aboutsummaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2016-07-23 20:41:05 +0000
committerDimitry Andric <dim@FreeBSD.org>2016-07-23 20:41:05 +0000
commit01095a5d43bbfde13731688ddcf6048ebb8b7721 (patch)
tree4def12e759965de927d963ac65840d663ef9d1ea /lib/Target
parentf0f4822ed4b66e3579e92a89f368f8fb860e218e (diff)
downloadsrc-vendor/llvm/llvm-release_39-r276489.tar.gz
src-vendor/llvm/llvm-release_39-r276489.zip
Vendor import of llvm release_39 branch r276489:vendor/llvm/llvm-release_39-r276489
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/AArch64/AArch64.h3
-rw-r--r--lib/Target/AArch64/AArch64.td187
-rw-r--r--lib/Target/AArch64/AArch64A53Fix835769.cpp10
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp40
-rw-r--r--lib/Target/AArch64/AArch64AddressTypePromotion.cpp14
-rw-r--r--lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp128
-rw-r--r--lib/Target/AArch64/AArch64AsmPrinter.cpp67
-rw-r--r--lib/Target/AArch64/AArch64BranchRelaxation.cpp30
-rw-r--r--lib/Target/AArch64/AArch64CallLowering.cpp104
-rw-r--r--lib/Target/AArch64/AArch64CallLowering.h36
-rw-r--r--lib/Target/AArch64/AArch64CallingConvention.td18
-rw-r--r--lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp25
-rw-r--r--lib/Target/AArch64/AArch64CollectLOH.cpp23
-rw-r--r--lib/Target/AArch64/AArch64ConditionOptimizer.cpp26
-rw-r--r--lib/Target/AArch64/AArch64ConditionalCompares.cpp24
-rw-r--r--lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp18
-rw-r--r--lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp215
-rw-r--r--lib/Target/AArch64/AArch64FastISel.cpp81
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp895
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.h15
-rw-r--r--lib/Target/AArch64/AArch64ISelDAGToDAG.cpp2400
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp963
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h60
-rw-r--r--lib/Target/AArch64/AArch64InstrAtomics.td51
-rw-r--r--lib/Target/AArch64/AArch64InstrFormats.td25
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp1743
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.h107
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td193
-rw-r--r--lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp1050
-rw-r--r--lib/Target/AArch64/AArch64MachineFunctionInfo.h38
-rw-r--r--lib/Target/AArch64/AArch64PBQPRegAlloc.cpp2
-rw-r--r--lib/Target/AArch64/AArch64PromoteConstant.cpp327
-rw-r--r--lib/Target/AArch64/AArch64RedundantCopyElimination.cpp182
-rw-r--r--lib/Target/AArch64/AArch64RegisterBankInfo.cpp168
-rw-r--r--lib/Target/AArch64/AArch64RegisterBankInfo.h69
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.cpp26
-rw-r--r--lib/Target/AArch64/AArch64RegisterInfo.td2
-rw-r--r--lib/Target/AArch64/AArch64SchedA53.td4
-rw-r--r--lib/Target/AArch64/AArch64SchedA57.td3
-rw-r--r--lib/Target/AArch64/AArch64SchedCyclone.td14
-rw-r--r--lib/Target/AArch64/AArch64SchedKryo.td133
-rw-r--r--lib/Target/AArch64/AArch64SchedKryoDetails.td2358
-rw-r--r--lib/Target/AArch64/AArch64SchedM1.td29
-rw-r--r--lib/Target/AArch64/AArch64SchedVulcan.td855
-rw-r--r--lib/Target/AArch64/AArch64Schedule.td8
-rw-r--r--lib/Target/AArch64/AArch64SelectionDAGInfo.cpp10
-rw-r--r--lib/Target/AArch64/AArch64SelectionDAGInfo.h14
-rw-r--r--lib/Target/AArch64/AArch64StorePairSuppress.cpp9
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.cpp109
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.h122
-rw-r--r--lib/Target/AArch64/AArch64SystemOperands.td1018
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp138
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.h12
-rw-r--r--lib/Target/AArch64/AArch64TargetTransformInfo.cpp77
-rw-r--r--lib/Target/AArch64/AArch64TargetTransformInfo.h11
-rw-r--r--lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp214
-rw-r--r--lib/Target/AArch64/AsmParser/Makefile15
-rw-r--r--lib/Target/AArch64/CMakeLists.txt19
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp15
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64Disassembler.h2
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp8
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h2
-rw-r--r--lib/Target/AArch64/Disassembler/Makefile16
-rw-r--r--lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp149
-rw-r--r--lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h4
-rw-r--r--lib/Target/AArch64/InstPrinter/Makefile15
-rw-r--r--lib/Target/AArch64/LLVMBuild.txt2
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h43
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp230
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp63
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp53
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp22
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h1
-rw-r--r--lib/Target/AArch64/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/AArch64/Makefile25
-rw-r--r--lib/Target/AArch64/TargetInfo/Makefile15
-rw-r--r--lib/Target/AArch64/Utils/AArch64BaseInfo.cpp943
-rw-r--r--lib/Target/AArch64/Utils/AArch64BaseInfo.h1003
-rw-r--r--lib/Target/AArch64/Utils/Makefile16
-rw-r--r--lib/Target/AMDGPU/AMDGPU.h57
-rw-r--r--lib/Target/AMDGPU/AMDGPU.td417
-rw-r--r--lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp3
-rw-r--r--lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp122
-rw-r--r--lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp21
-rw-r--r--lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp469
-rw-r--r--lib/Target/AMDGPU/AMDGPUAsmPrinter.h28
-rw-r--r--lib/Target/AMDGPU/AMDGPUCallLowering.cpp42
-rw-r--r--lib/Target/AMDGPU/AMDGPUCallLowering.h36
-rw-r--r--lib/Target/AMDGPU/AMDGPUCallingConv.td8
-rw-r--r--lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp82
-rw-r--r--lib/Target/AMDGPU/AMDGPUDiagnosticInfoUnsupported.cpp26
-rw-r--r--lib/Target/AMDGPU/AMDGPUDiagnosticInfoUnsupported.h48
-rw-r--r--lib/Target/AMDGPU/AMDGPUFrameLowering.cpp32
-rw-r--r--lib/Target/AMDGPU/AMDGPUFrameLowering.h10
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp793
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.cpp1484
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.h78
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstrInfo.cpp307
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstrInfo.h143
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstrInfo.td27
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstructions.td263
-rw-r--r--lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp38
-rw-r--r--lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h6
-rw-r--r--lib/Target/AMDGPU/AMDGPUIntrinsics.td75
-rw-r--r--lib/Target/AMDGPU/AMDGPUMCInstLower.cpp57
-rw-r--r--lib/Target/AMDGPU/AMDGPUMCInstLower.h4
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineFunction.cpp17
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineFunction.h34
-rw-r--r--lib/Target/AMDGPU/AMDGPUOpenCLImageTypeLoweringPass.cpp1
-rw-r--r--lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp667
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterInfo.cpp19
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterInfo.h21
-rw-r--r--lib/Target/AMDGPU/AMDGPURuntimeMetadata.h138
-rw-r--r--lib/Target/AMDGPU/AMDGPUSubtarget.cpp227
-rw-r--r--lib/Target/AMDGPU/AMDGPUSubtarget.h389
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetMachine.cpp370
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetMachine.h61
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp56
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetObjectFile.h18
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp163
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h49
-rw-r--r--lib/Target/AMDGPU/AMDILCFGStructurizer.cpp201
-rw-r--r--lib/Target/AMDGPU/AMDKernelCodeT.h31
-rw-r--r--lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp2505
-rw-r--r--lib/Target/AMDGPU/AsmParser/CMakeLists.txt2
-rw-r--r--lib/Target/AMDGPU/AsmParser/Makefile15
-rw-r--r--lib/Target/AMDGPU/CIInstructions.td148
-rw-r--r--lib/Target/AMDGPU/CMakeLists.txt25
-rw-r--r--lib/Target/AMDGPU/CaymanInstructions.td47
-rw-r--r--lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp437
-rw-r--r--lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h93
-rw-r--r--lib/Target/AMDGPU/Disassembler/CMakeLists.txt7
-rw-r--r--lib/Target/AMDGPU/Disassembler/LLVMBuild.txt23
-rw-r--r--lib/Target/AMDGPU/EvergreenInstructions.td67
-rw-r--r--lib/Target/AMDGPU/GCNHazardRecognizer.cpp264
-rw-r--r--lib/Target/AMDGPU/GCNHazardRecognizer.h62
-rw-r--r--lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp385
-rw-r--r--lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h38
-rw-r--r--lib/Target/AMDGPU/InstPrinter/CMakeLists.txt2
-rw-r--r--lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt2
-rw-r--r--lib/Target/AMDGPU/InstPrinter/Makefile15
-rw-r--r--lib/Target/AMDGPU/LLVMBuild.txt7
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp48
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp55
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.cpp5
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.h3
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h7
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp5
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h4
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h4
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp11
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h7
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp4
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h8
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp26
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp42
-rw-r--r--lib/Target/AMDGPU/Makefile23
-rw-r--r--lib/Target/AMDGPU/Processors.td23
-rw-r--r--lib/Target/AMDGPU/R600ClauseMergePass.cpp121
-rw-r--r--lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp158
-rw-r--r--lib/Target/AMDGPU/R600Defines.h4
-rw-r--r--lib/Target/AMDGPU/R600EmitClauseMarkers.cpp65
-rw-r--r--lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp30
-rw-r--r--lib/Target/AMDGPU/R600FrameLowering.cpp15
-rw-r--r--lib/Target/AMDGPU/R600FrameLowering.h30
-rw-r--r--lib/Target/AMDGPU/R600ISelLowering.cpp1006
-rw-r--r--lib/Target/AMDGPU/R600ISelLowering.h53
-rw-r--r--lib/Target/AMDGPU/R600InstrInfo.cpp509
-rw-r--r--lib/Target/AMDGPU/R600InstrInfo.h199
-rw-r--r--lib/Target/AMDGPU/R600Instructions.td124
-rw-r--r--lib/Target/AMDGPU/R600Intrinsics.td114
-rw-r--r--lib/Target/AMDGPU/R600MachineFunctionInfo.h7
-rw-r--r--lib/Target/AMDGPU/R600MachineScheduler.cpp136
-rw-r--r--lib/Target/AMDGPU/R600MachineScheduler.h11
-rw-r--r--lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp40
-rw-r--r--lib/Target/AMDGPU/R600Packetizer.cpp103
-rw-r--r--lib/Target/AMDGPU/R600RegisterInfo.cpp11
-rw-r--r--lib/Target/AMDGPU/R600RegisterInfo.h15
-rw-r--r--lib/Target/AMDGPU/R600Schedule.td2
-rw-r--r--lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp303
-rw-r--r--lib/Target/AMDGPU/SIAnnotateControlFlow.cpp88
-rw-r--r--lib/Target/AMDGPU/SIDebuggerInsertNops.cpp96
-rw-r--r--lib/Target/AMDGPU/SIDefines.h148
-rw-r--r--lib/Target/AMDGPU/SIFixSGPRCopies.cpp9
-rw-r--r--lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp219
-rw-r--r--lib/Target/AMDGPU/SIFoldOperands.cpp28
-rw-r--r--lib/Target/AMDGPU/SIFrameLowering.cpp144
-rw-r--r--lib/Target/AMDGPU/SIFrameLowering.h6
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp1725
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.h74
-rw-r--r--lib/Target/AMDGPU/SIInsertWaits.cpp198
-rw-r--r--lib/Target/AMDGPU/SIInstrFormats.td126
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.cpp1736
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.h254
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.td1852
-rw-r--r--lib/Target/AMDGPU/SIInstructions.td1298
-rw-r--r--lib/Target/AMDGPU/SIIntrinsics.td32
-rw-r--r--lib/Target/AMDGPU/SILoadStoreOptimizer.cpp50
-rw-r--r--lib/Target/AMDGPU/SILowerControlFlow.cpp645
-rw-r--r--lib/Target/AMDGPU/SILowerI1Copies.cpp17
-rw-r--r--lib/Target/AMDGPU/SIMachineFunctionInfo.cpp104
-rw-r--r--lib/Target/AMDGPU/SIMachineFunctionInfo.h129
-rw-r--r--lib/Target/AMDGPU/SIMachineScheduler.cpp168
-rw-r--r--lib/Target/AMDGPU/SIMachineScheduler.h10
-rw-r--r--lib/Target/AMDGPU/SIRegisterInfo.cpp604
-rw-r--r--lib/Target/AMDGPU/SIRegisterInfo.h69
-rw-r--r--lib/Target/AMDGPU/SIRegisterInfo.td122
-rw-r--r--lib/Target/AMDGPU/SISchedule.td63
-rw-r--r--lib/Target/AMDGPU/SIShrinkInstructions.cpp152
-rw-r--r--lib/Target/AMDGPU/SITypeRewriter.cpp2
-rw-r--r--lib/Target/AMDGPU/SIWholeQuadMode.cpp509
-rw-r--r--lib/Target/AMDGPU/TargetInfo/Makefile15
-rw-r--r--lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp69
-rw-r--r--lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.h31
-rw-r--r--lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp28
-rw-r--r--lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h7
-rw-r--r--lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h165
-rw-r--r--lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp166
-rw-r--r--lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h39
-rw-r--r--lib/Target/AMDGPU/Utils/CMakeLists.txt2
-rw-r--r--lib/Target/AMDGPU/Utils/Makefile16
-rw-r--r--lib/Target/AMDGPU/VIInstrFormats.td143
-rw-r--r--lib/Target/AMDGPU/VIInstructions.td50
-rw-r--r--lib/Target/ARM/A15SDOptimizer.cpp74
-rw-r--r--lib/Target/ARM/ARM.h4
-rw-r--r--lib/Target/ARM/ARM.td218
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.cpp134
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.h1
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp1309
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.h131
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp41
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.h12
-rw-r--r--lib/Target/ARM/ARMCallingConv.h2
-rw-r--r--lib/Target/ARM/ARMCallingConv.td52
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp160
-rw-r--r--lib/Target/ARM/ARMConstantPoolValue.cpp25
-rw-r--r--lib/Target/ARM/ARMConstantPoolValue.h11
-rw-r--r--lib/Target/ARM/ARMExpandPseudoInsts.cpp283
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp143
-rw-r--r--lib/Target/ARM/ARMFrameLowering.cpp162
-rw-r--r--lib/Target/ARM/ARMFrameLowering.h2
-rw-r--r--lib/Target/ARM/ARMHazardRecognizer.cpp3
-rw-r--r--lib/Target/ARM/ARMISelDAGToDAG.cpp1043
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp1484
-rw-r--r--lib/Target/ARM/ARMISelLowering.h99
-rw-r--r--lib/Target/ARM/ARMInstrFormats.td212
-rw-r--r--lib/Target/ARM/ARMInstrInfo.cpp22
-rw-r--r--lib/Target/ARM/ARMInstrInfo.h3
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td355
-rw-r--r--lib/Target/ARM/ARMInstrThumb.td157
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td346
-rw-r--r--lib/Target/ARM/ARMInstrVFP.td430
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp265
-rw-r--r--lib/Target/ARM/ARMMCInstLower.cpp43
-rw-r--r--lib/Target/ARM/ARMMachineFunctionInfo.cpp2
-rw-r--r--lib/Target/ARM/ARMMachineFunctionInfo.h1
-rw-r--r--lib/Target/ARM/ARMOptimizeBarriersPass.cpp8
-rw-r--r--lib/Target/ARM/ARMSchedule.td14
-rw-r--r--lib/Target/ARM/ARMScheduleA8.td2
-rw-r--r--lib/Target/ARM/ARMScheduleA9.td4
-rw-r--r--lib/Target/ARM/ARMScheduleSwift.td2
-rw-r--r--lib/Target/ARM/ARMSelectionDAGInfo.cpp58
-rw-r--r--lib/Target/ARM/ARMSelectionDAGInfo.h42
-rw-r--r--lib/Target/ARM/ARMSubtarget.cpp157
-rw-r--r--lib/Target/ARM/ARMSubtarget.h273
-rw-r--r--lib/Target/ARM/ARMTargetMachine.cpp54
-rw-r--r--lib/Target/ARM/ARMTargetMachine.h31
-rw-r--r--lib/Target/ARM/ARMTargetObjectFile.h8
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.cpp42
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.h17
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp309
-rw-r--r--lib/Target/ARM/AsmParser/Makefile15
-rw-r--r--lib/Target/ARM/Disassembler/ARMDisassembler.cpp108
-rw-r--r--lib/Target/ARM/Disassembler/Makefile16
-rw-r--r--lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp120
-rw-r--r--lib/Target/ARM/InstPrinter/ARMInstPrinter.h9
-rw-r--r--lib/Target/ARM/InstPrinter/Makefile15
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h51
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp83
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h11
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h8
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h3
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h1
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h14
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp19
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp6
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h10
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp3
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp120
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp16
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp2
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp9
-rw-r--r--lib/Target/ARM/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/ARM/MLxExpansionPass.cpp6
-rw-r--r--lib/Target/ARM/Makefile24
-rw-r--r--lib/Target/ARM/README.txt18
-rw-r--r--lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp1
-rw-r--r--lib/Target/ARM/TargetInfo/Makefile15
-rw-r--r--lib/Target/ARM/Thumb1FrameLowering.cpp56
-rw-r--r--lib/Target/ARM/Thumb1FrameLowering.h2
-rw-r--r--lib/Target/ARM/Thumb1InstrInfo.cpp19
-rw-r--r--lib/Target/ARM/Thumb1InstrInfo.h8
-rw-r--r--lib/Target/ARM/Thumb2ITBlockPass.cpp11
-rw-r--r--lib/Target/ARM/Thumb2InstrInfo.cpp55
-rw-r--r--lib/Target/ARM/Thumb2InstrInfo.h12
-rw-r--r--lib/Target/ARM/Thumb2SizeReduction.cpp68
-rw-r--r--lib/Target/ARM/ThumbRegisterInfo.cpp120
-rw-r--r--lib/Target/ARM/ThumbRegisterInfo.h5
-rw-r--r--lib/Target/AVR/AVR.h4
-rw-r--r--lib/Target/AVR/AVR.td6
-rw-r--r--lib/Target/AVR/AVRConfig.h15
-rw-r--r--lib/Target/AVR/AVRFrameLowering.h46
-rw-r--r--lib/Target/AVR/AVRISelLowering.h152
-rw-r--r--lib/Target/AVR/AVRInstrFormats.td577
-rw-r--r--lib/Target/AVR/AVRInstrInfo.cpp466
-rw-r--r--lib/Target/AVR/AVRInstrInfo.h110
-rw-r--r--lib/Target/AVR/AVRInstrInfo.td1981
-rw-r--r--lib/Target/AVR/AVRMachineFunctionInfo.h6
-rw-r--r--lib/Target/AVR/AVRRegisterInfo.cpp256
-rw-r--r--lib/Target/AVR/AVRRegisterInfo.h56
-rw-r--r--lib/Target/AVR/AVRSelectionDAGInfo.h11
-rw-r--r--lib/Target/AVR/AVRSubtarget.cpp47
-rw-r--r--lib/Target/AVR/AVRSubtarget.h119
-rw-r--r--lib/Target/AVR/AVRTargetMachine.cpp101
-rw-r--r--lib/Target/AVR/AVRTargetMachine.h51
-rw-r--r--lib/Target/AVR/AVRTargetObjectFile.h5
-rw-r--r--lib/Target/AVR/CMakeLists.txt12
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp66
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRELFStreamer.h29
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp28
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h31
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.h57
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.cpp24
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.h32
-rw-r--r--lib/Target/AVR/MCTargetDesc/CMakeLists.txt8
-rw-r--r--lib/Target/AVR/MCTargetDesc/LLVMBuild.txt (renamed from lib/Target/CppBackend/LLVMBuild.txt)18
-rw-r--r--lib/Target/AVR/Makefile19
-rw-r--r--lib/Target/AVR/TODO.md7
-rw-r--r--lib/Target/AVR/TargetInfo/CMakeLists.txt2
-rw-r--r--lib/Target/AVR/TargetInfo/Makefile16
-rw-r--r--lib/Target/BPF/BPFAsmPrinter.cpp28
-rw-r--r--lib/Target/BPF/BPFFrameLowering.h4
-rw-r--r--lib/Target/BPF/BPFISelDAGToDAG.cpp37
-rw-r--r--lib/Target/BPF/BPFISelLowering.cpp148
-rw-r--r--lib/Target/BPF/BPFISelLowering.h13
-rw-r--r--lib/Target/BPF/BPFInstrInfo.cpp12
-rw-r--r--lib/Target/BPF/BPFInstrInfo.h6
-rw-r--r--lib/Target/BPF/BPFMCInstLower.cpp3
-rw-r--r--lib/Target/BPF/BPFSubtarget.h6
-rw-r--r--lib/Target/BPF/BPFTargetMachine.cpp15
-rw-r--r--lib/Target/BPF/BPFTargetMachine.h5
-rw-r--r--lib/Target/BPF/InstPrinter/Makefile16
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp3
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp8
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h4
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp1
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp12
-rw-r--r--lib/Target/BPF/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/BPF/Makefile21
-rw-r--r--lib/Target/BPF/TargetInfo/Makefile16
-rw-r--r--lib/Target/CppBackend/CMakeLists.txt5
-rw-r--r--lib/Target/CppBackend/CPPBackend.cpp2143
-rw-r--r--lib/Target/CppBackend/CPPTargetMachine.h44
-rw-r--r--lib/Target/CppBackend/Makefile16
-rw-r--r--lib/Target/CppBackend/TargetInfo/CMakeLists.txt3
-rw-r--r--lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp29
-rw-r--r--lib/Target/CppBackend/TargetInfo/Makefile15
-rw-r--r--lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp408
-rw-r--r--lib/Target/Hexagon/AsmParser/Makefile15
-rw-r--r--lib/Target/Hexagon/BitTracker.cpp220
-rw-r--r--lib/Target/Hexagon/BitTracker.h10
-rw-r--r--lib/Target/Hexagon/CMakeLists.txt6
-rw-r--r--lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp7
-rw-r--r--lib/Target/Hexagon/Disassembler/Makefile16
-rw-r--r--lib/Target/Hexagon/Hexagon.td27
-rw-r--r--lib/Target/Hexagon/HexagonAsmPrinter.cpp51
-rw-r--r--lib/Target/Hexagon/HexagonBitSimplify.cpp50
-rw-r--r--lib/Target/Hexagon/HexagonBitTracker.cpp75
-rw-r--r--lib/Target/Hexagon/HexagonBitTracker.h8
-rw-r--r--lib/Target/Hexagon/HexagonBlockRanges.cpp483
-rw-r--r--lib/Target/Hexagon/HexagonBlockRanges.h239
-rw-r--r--lib/Target/Hexagon/HexagonBranchRelaxation.cpp211
-rw-r--r--lib/Target/Hexagon/HexagonCFGOptimizer.cpp48
-rw-r--r--lib/Target/Hexagon/HexagonCommonGEP.cpp14
-rw-r--r--lib/Target/Hexagon/HexagonCopyToCombine.cpp278
-rw-r--r--lib/Target/Hexagon/HexagonEarlyIfConv.cpp19
-rw-r--r--lib/Target/Hexagon/HexagonExpandCondsets.cpp1062
-rw-r--r--lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp357
-rw-r--r--lib/Target/Hexagon/HexagonFixupHwLoops.cpp20
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp1269
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.h61
-rw-r--r--lib/Target/Hexagon/HexagonGenExtract.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonGenInsert.cpp13
-rw-r--r--lib/Target/Hexagon/HexagonGenMux.cpp21
-rw-r--r--lib/Target/Hexagon/HexagonGenPredicate.cpp28
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp34
-rw-r--r--lib/Target/Hexagon/HexagonISelDAGToDAG.cpp1237
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp458
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.h54
-rw-r--r--lib/Target/Hexagon/HexagonInstrAlias.td192
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormats.td14
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormatsV4.td7
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp1067
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h62
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.td249
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV3.td25
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV4.td707
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV60.td116
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoVector.td55
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsics.td511
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsicsV4.td193
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsicsV5.td24
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsicsV60.td4
-rw-r--r--lib/Target/Hexagon/HexagonMCInstLower.cpp66
-rw-r--r--lib/Target/Hexagon/HexagonMachineFunctionInfo.h19
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp406
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.h20
-rw-r--r--lib/Target/Hexagon/HexagonNewValueJump.cpp150
-rw-r--r--lib/Target/Hexagon/HexagonOperands.td72
-rw-r--r--lib/Target/Hexagon/HexagonOptAddrMode.cpp663
-rw-r--r--lib/Target/Hexagon/HexagonOptimizeSZextends.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonPeephole.cpp101
-rw-r--r--lib/Target/Hexagon/HexagonRDF.h4
-rw-r--r--lib/Target/Hexagon/HexagonRDFOpt.cpp88
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.cpp108
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.h18
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.td51
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV4.td13
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV55.td186
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV60.td11
-rw-r--r--lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp13
-rw-r--r--lib/Target/Hexagon/HexagonSelectionDAGInfo.h16
-rw-r--r--lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp63
-rw-r--r--lib/Target/Hexagon/HexagonSplitDouble.cpp24
-rw-r--r--lib/Target/Hexagon/HexagonStoreWidening.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.cpp244
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.h29
-rw-r--r--lib/Target/Hexagon/HexagonSystemInst.td21
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.cpp54
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.h2
-rw-r--r--lib/Target/Hexagon/HexagonTargetObjectFile.cpp393
-rw-r--r--lib/Target/Hexagon/HexagonTargetObjectFile.h37
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp172
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.h13
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp638
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h22
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp60
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonFixupKinds.h1
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp12
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp1
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h1
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp6
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h5
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp451
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp6
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp19
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.cpp55
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.h18
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp182
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h14
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp1
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp70
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/Hexagon/Makefile26
-rw-r--r--lib/Target/Hexagon/RDFCopy.cpp217
-rw-r--r--lib/Target/Hexagon/RDFCopy.h12
-rw-r--r--lib/Target/Hexagon/RDFDeadCode.cpp50
-rw-r--r--lib/Target/Hexagon/RDFDeadCode.h12
-rw-r--r--lib/Target/Hexagon/RDFGraph.cpp55
-rw-r--r--lib/Target/Hexagon/RDFGraph.h33
-rw-r--r--lib/Target/Hexagon/RDFLiveness.cpp100
-rw-r--r--lib/Target/Hexagon/RDFLiveness.h8
-rw-r--r--lib/Target/Hexagon/TargetInfo/Makefile15
-rw-r--r--lib/Target/LLVMBuild.txt2
-rw-r--r--lib/Target/Lanai/AsmParser/CMakeLists.txt7
-rw-r--r--lib/Target/Lanai/AsmParser/LLVMBuild.txt23
-rw-r--r--lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp1213
-rw-r--r--lib/Target/Lanai/CMakeLists.txt35
-rw-r--r--lib/Target/Lanai/Disassembler/CMakeLists.txt3
-rw-r--r--lib/Target/Lanai/Disassembler/LLVMBuild.txt23
-rw-r--r--lib/Target/Lanai/Disassembler/LanaiDisassembler.cpp240
-rw-r--r--lib/Target/Lanai/Disassembler/LanaiDisassembler.h41
-rw-r--r--lib/Target/Lanai/InstPrinter/CMakeLists.txt3
-rw-r--r--lib/Target/Lanai/InstPrinter/LLVMBuild.txt23
-rw-r--r--lib/Target/Lanai/InstPrinter/LanaiInstPrinter.cpp305
-rw-r--r--lib/Target/Lanai/InstPrinter/LanaiInstPrinter.h65
-rw-r--r--lib/Target/Lanai/LLVMBuild.txt45
-rw-r--r--lib/Target/Lanai/Lanai.h51
-rw-r--r--lib/Target/Lanai/Lanai.td47
-rw-r--r--lib/Target/Lanai/LanaiAluCode.h148
-rw-r--r--lib/Target/Lanai/LanaiAsmPrinter.cpp243
-rw-r--r--lib/Target/Lanai/LanaiCallingConv.td50
-rw-r--r--lib/Target/Lanai/LanaiCondCode.h100
-rw-r--r--lib/Target/Lanai/LanaiDelaySlotFiller.cpp263
-rw-r--r--lib/Target/Lanai/LanaiFrameLowering.cpp220
-rw-r--r--lib/Target/Lanai/LanaiFrameLowering.h57
-rw-r--r--lib/Target/Lanai/LanaiISelDAGToDAG.cpp317
-rw-r--r--lib/Target/Lanai/LanaiISelLowering.cpp1437
-rw-r--r--lib/Target/Lanai/LanaiISelLowering.h148
-rw-r--r--lib/Target/Lanai/LanaiInstrFormats.td561
-rw-r--r--lib/Target/Lanai/LanaiInstrInfo.cpp803
-rw-r--r--lib/Target/Lanai/LanaiInstrInfo.h184
-rw-r--r--lib/Target/Lanai/LanaiInstrInfo.td892
-rw-r--r--lib/Target/Lanai/LanaiMCInstLower.cpp140
-rw-r--r--lib/Target/Lanai/LanaiMCInstLower.h48
-rw-r--r--lib/Target/Lanai/LanaiMachineFunctionInfo.cpp23
-rw-r--r--lib/Target/Lanai/LanaiMachineFunctionInfo.h58
-rw-r--r--lib/Target/Lanai/LanaiMemAluCombiner.cpp422
-rw-r--r--lib/Target/Lanai/LanaiRegisterInfo.cpp287
-rw-r--r--lib/Target/Lanai/LanaiRegisterInfo.h63
-rw-r--r--lib/Target/Lanai/LanaiRegisterInfo.td64
-rw-r--r--lib/Target/Lanai/LanaiSchedule.td70
-rw-r--r--lib/Target/Lanai/LanaiSelectionDAGInfo.cpp35
-rw-r--r--lib/Target/Lanai/LanaiSelectionDAGInfo.h36
-rw-r--r--lib/Target/Lanai/LanaiSubtarget.cpp47
-rw-r--r--lib/Target/Lanai/LanaiSubtarget.h76
-rw-r--r--lib/Target/Lanai/LanaiTargetMachine.cpp112
-rw-r--r--lib/Target/Lanai/LanaiTargetMachine.h55
-rw-r--r--lib/Target/Lanai/LanaiTargetObjectFile.cpp123
-rw-r--r--lib/Target/Lanai/LanaiTargetObjectFile.h46
-rw-r--r--lib/Target/Lanai/LanaiTargetTransformInfo.h86
-rw-r--r--lib/Target/Lanai/MCTargetDesc/CMakeLists.txt8
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LLVMBuild.txt23
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp172
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiBaseInfo.h119
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiELFObjectWriter.cpp95
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiFixupKinds.h43
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiMCAsmInfo.cpp43
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiMCAsmInfo.h31
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiMCCodeEmitter.cpp326
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiMCExpr.cpp60
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiMCExpr.h56
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp149
-rw-r--r--lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.h59
-rw-r--r--lib/Target/Lanai/TargetInfo/CMakeLists.txt3
-rw-r--r--lib/Target/Lanai/TargetInfo/LLVMBuild.txt (renamed from lib/Target/CppBackend/TargetInfo/LLVMBuild.txt)8
-rw-r--r--lib/Target/Lanai/TargetInfo/LanaiTargetInfo.cpp20
-rw-r--r--lib/Target/MSP430/InstPrinter/Makefile15
-rw-r--r--lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp14
-rw-r--r--lib/Target/MSP430/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/MSP430/MSP430BranchSelector.cpp25
-rw-r--r--lib/Target/MSP430/MSP430FrameLowering.cpp40
-rw-r--r--lib/Target/MSP430/MSP430FrameLowering.h6
-rw-r--r--lib/Target/MSP430/MSP430ISelDAGToDAG.cpp134
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.cpp126
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.h31
-rw-r--r--lib/Target/MSP430/MSP430InstrInfo.cpp45
-rw-r--r--lib/Target/MSP430/MSP430InstrInfo.h16
-rw-r--r--lib/Target/MSP430/MSP430Subtarget.h6
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.cpp12
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.h2
-rw-r--r--lib/Target/MSP430/Makefile23
-rw-r--r--lib/Target/MSP430/TargetInfo/Makefile15
-rw-r--r--lib/Target/Makefile20
-rw-r--r--lib/Target/Mips/AsmParser/Makefile15
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp1724
-rw-r--r--lib/Target/Mips/CMakeLists.txt1
-rw-r--r--lib/Target/Mips/Disassembler/Makefile16
-rw-r--r--lib/Target/Mips/Disassembler/MipsDisassembler.cpp376
-rw-r--r--lib/Target/Mips/InstPrinter/Makefile16
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp96
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.h4
-rw-r--r--lib/Target/Mips/MCTargetDesc/Makefile17
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp22
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h17
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp47
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIInfo.h9
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp77
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h5
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h10
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp614
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h19
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp5
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp415
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h27
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp295
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCExpr.h66
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp55
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp378
-rw-r--r--lib/Target/Mips/Makefile25
-rw-r--r--lib/Target/Mips/MicroMips32r6InstrFormats.td238
-rw-r--r--lib/Target/Mips/MicroMips32r6InstrInfo.td717
-rw-r--r--lib/Target/Mips/MicroMips64r6InstrFormats.td141
-rw-r--r--lib/Target/Mips/MicroMips64r6InstrInfo.td404
-rw-r--r--lib/Target/Mips/MicroMipsDSPInstrFormats.td58
-rw-r--r--lib/Target/Mips/MicroMipsDSPInstrInfo.td79
-rw-r--r--lib/Target/Mips/MicroMipsInstrFPU.td39
-rw-r--r--lib/Target/Mips/MicroMipsInstrFormats.td9
-rw-r--r--lib/Target/Mips/MicroMipsInstrInfo.td360
-rw-r--r--lib/Target/Mips/Mips.h3
-rw-r--r--lib/Target/Mips/Mips.td5
-rw-r--r--lib/Target/Mips/Mips16FrameLowering.cpp1
-rw-r--r--lib/Target/Mips/Mips16HardFloat.cpp28
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.cpp182
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.h26
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.cpp137
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.h28
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.cpp25
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.h16
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.td85
-rw-r--r--lib/Target/Mips/Mips16RegisterInfo.cpp3
-rw-r--r--lib/Target/Mips/Mips32r6InstrFormats.td2
-rw-r--r--lib/Target/Mips/Mips32r6InstrInfo.td343
-rw-r--r--lib/Target/Mips/Mips64InstrInfo.td475
-rw-r--r--lib/Target/Mips/Mips64r6InstrInfo.td98
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp65
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.h2
-rw-r--r--lib/Target/Mips/MipsCCState.cpp4
-rw-r--r--lib/Target/Mips/MipsCallingConv.td48
-rw-r--r--lib/Target/Mips/MipsCondMov.td12
-rw-r--r--lib/Target/Mips/MipsConstantIslandPass.cpp114
-rw-r--r--lib/Target/Mips/MipsDSPInstrFormats.td4
-rw-r--r--lib/Target/Mips/MipsDSPInstrInfo.td69
-rw-r--r--lib/Target/Mips/MipsDelaySlotFiller.cpp144
-rw-r--r--lib/Target/Mips/MipsEVAInstrInfo.td73
-rw-r--r--lib/Target/Mips/MipsFastISel.cpp39
-rw-r--r--lib/Target/Mips/MipsFrameLowering.cpp16
-rw-r--r--lib/Target/Mips/MipsFrameLowering.h2
-rw-r--r--lib/Target/Mips/MipsHazardSchedule.cpp147
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.cpp53
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.h25
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp419
-rw-r--r--lib/Target/Mips/MipsISelLowering.h96
-rw-r--r--lib/Target/Mips/MipsInstrFPU.td130
-rw-r--r--lib/Target/Mips/MipsInstrFormats.td7
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp236
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h26
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td1036
-rw-r--r--lib/Target/Mips/MipsLongBranch.cpp72
-rw-r--r--lib/Target/Mips/MipsMCInstLower.cpp155
-rw-r--r--lib/Target/Mips/MipsMCInstLower.h8
-rw-r--r--lib/Target/Mips/MipsMSAInstrInfo.td220
-rw-r--r--lib/Target/Mips/MipsMachineFunction.cpp16
-rw-r--r--lib/Target/Mips/MipsMachineFunction.h15
-rw-r--r--lib/Target/Mips/MipsOs16.cpp26
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp20
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.h22
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.td13
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.cpp12
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.cpp141
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.h34
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.cpp256
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.h24
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.cpp94
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.h13
-rw-r--r--lib/Target/Mips/MipsSERegisterInfo.cpp54
-rw-r--r--lib/Target/Mips/MipsSchedule.td226
-rw-r--r--lib/Target/Mips/MipsScheduleP5600.td2
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp9
-rw-r--r--lib/Target/Mips/MipsSubtarget.h14
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp34
-rw-r--r--lib/Target/Mips/MipsTargetMachine.h13
-rw-r--r--lib/Target/Mips/MipsTargetObjectFile.cpp16
-rw-r--r--lib/Target/Mips/MipsTargetObjectFile.h3
-rw-r--r--lib/Target/Mips/MipsTargetStreamer.h77
-rw-r--r--lib/Target/Mips/TargetInfo/Makefile15
-rw-r--r--lib/Target/NVPTX/CMakeLists.txt2
-rw-r--r--lib/Target/NVPTX/InstPrinter/Makefile15
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp7
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp16
-rw-r--r--lib/Target/NVPTX/Makefile23
-rw-r--r--lib/Target/NVPTX/NVPTX.h8
-rw-r--r--lib/Target/NVPTX/NVPTX.td14
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.cpp52
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.h6
-rw-r--r--lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp7
-rw-r--r--lib/Target/NVPTX/NVPTXFrameLowering.cpp7
-rw-r--r--lib/Target/NVPTX/NVPTXFrameLowering.h2
-rw-r--r--lib/Target/NVPTX/NVPTXGenericToNVVM.cpp16
-rw-r--r--lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp528
-rw-r--r--lib/Target/NVPTX/NVPTXISelDAGToDAG.h42
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp126
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.h20
-rw-r--r--lib/Target/NVPTX/NVPTXImageOptimizer.cpp3
-rw-r--r--lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp586
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.cpp61
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.h19
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.td3025
-rw-r--r--lib/Target/NVPTX/NVPTXIntrinsics.td349
-rw-r--r--lib/Target/NVPTX/NVPTXLowerAlloca.cpp3
-rw-r--r--lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXMCExpr.cpp4
-rw-r--r--lib/Target/NVPTX/NVPTXMCExpr.h9
-rw-r--r--lib/Target/NVPTX/NVPTXPeephole.cpp3
-rw-r--r--lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp9
-rw-r--r--lib/Target/NVPTX/NVPTXSection.h1
-rw-r--r--lib/Target/NVPTX/NVPTXSubtarget.h6
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.cpp129
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.h11
-rw-r--r--lib/Target/NVPTX/NVPTXTargetObjectFile.h4
-rw-r--r--lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXTargetTransformInfo.h4
-rw-r--r--lib/Target/NVPTX/NVPTXUtilities.cpp4
-rw-r--r--lib/Target/NVPTX/NVPTXUtilities.h5
-rw-r--r--lib/Target/NVPTX/NVVMIntrRange.cpp148
-rw-r--r--lib/Target/NVPTX/NVVMReflect.cpp178
-rw-r--r--lib/Target/NVPTX/TargetInfo/Makefile15
-rw-r--r--lib/Target/PowerPC/AsmParser/Makefile15
-rw-r--r--lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp66
-rw-r--r--lib/Target/PowerPC/CMakeLists.txt3
-rw-r--r--lib/Target/PowerPC/Disassembler/Makefile16
-rw-r--r--lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp17
-rw-r--r--lib/Target/PowerPC/InstPrinter/Makefile16
-rw-r--r--lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp25
-rw-r--r--lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h2
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp4
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp18
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp23
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp18
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp2
-rw-r--r--lib/Target/PowerPC/Makefile24
-rw-r--r--lib/Target/PowerPC/PPC.h22
-rw-r--r--lib/Target/PowerPC/PPC.td71
-rw-r--r--lib/Target/PowerPC/PPCAsmPrinter.cpp237
-rw-r--r--lib/Target/PowerPC/PPCBoolRetToInt.cpp13
-rw-r--r--lib/Target/PowerPC/PPCBranchSelector.cpp15
-rw-r--r--lib/Target/PowerPC/PPCCCState.cpp36
-rw-r--r--lib/Target/PowerPC/PPCCCState.h42
-rw-r--r--lib/Target/PowerPC/PPCCTRLoops.cpp39
-rw-r--r--lib/Target/PowerPC/PPCCallingConv.td25
-rw-r--r--lib/Target/PowerPC/PPCEarlyReturn.cpp20
-rw-r--r--lib/Target/PowerPC/PPCFastISel.cpp71
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp131
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.h13
-rw-r--r--lib/Target/PowerPC/PPCHazardRecognizers.cpp7
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp452
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp2122
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h273
-rw-r--r--lib/Target/PowerPC/PPCInstr64Bit.td61
-rw-r--r--lib/Target/PowerPC/PPCInstrAltivec.td184
-rw-r--r--lib/Target/PowerPC/PPCInstrFormats.td258
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp410
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.h65
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.td112
-rw-r--r--lib/Target/PowerPC/PPCInstrVSX.td516
-rw-r--r--lib/Target/PowerPC/PPCLoopDataPrefetch.cpp233
-rw-r--r--lib/Target/PowerPC/PPCLoopPreIncPrep.cpp3
-rw-r--r--lib/Target/PowerPC/PPCMCInstLower.cpp42
-rw-r--r--lib/Target/PowerPC/PPCMIPeephole.cpp2
-rw-r--r--lib/Target/PowerPC/PPCMachineFunctionInfo.h10
-rw-r--r--lib/Target/PowerPC/PPCQPXLoadSplat.cpp166
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp38
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.h1
-rw-r--r--lib/Target/PowerPC/PPCSchedule.td7
-rw-r--r--lib/Target/PowerPC/PPCSchedule440.td3
-rw-r--r--lib/Target/PowerPC/PPCScheduleA2.td7
-rw-r--r--lib/Target/PowerPC/PPCScheduleE500mc.td15
-rw-r--r--lib/Target/PowerPC/PPCScheduleE5500.td19
-rw-r--r--lib/Target/PowerPC/PPCScheduleG5.td17
-rw-r--r--lib/Target/PowerPC/PPCScheduleP7.td3
-rw-r--r--lib/Target/PowerPC/PPCScheduleP8.td3
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.cpp34
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.h29
-rw-r--r--lib/Target/PowerPC/PPCTLSDynamicCall.cpp5
-rw-r--r--lib/Target/PowerPC/PPCTOCRegDeps.cpp5
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.cpp63
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.h15
-rw-r--r--lib/Target/PowerPC/PPCTargetObjectFile.cpp2
-rw-r--r--lib/Target/PowerPC/PPCTargetTransformInfo.cpp30
-rw-r--r--lib/Target/PowerPC/PPCTargetTransformInfo.h2
-rw-r--r--lib/Target/PowerPC/PPCVSXCopy.cpp5
-rw-r--r--lib/Target/PowerPC/PPCVSXFMAMutate.cpp68
-rw-r--r--lib/Target/PowerPC/PPCVSXSwapRemoval.cpp43
-rw-r--r--lib/Target/PowerPC/README.txt11
-rw-r--r--lib/Target/PowerPC/README_P9.txt605
-rw-r--r--lib/Target/PowerPC/TargetInfo/Makefile15
-rw-r--r--lib/Target/PowerPC/p9-instrs.txt442
-rw-r--r--lib/Target/README.txt2
-rw-r--r--lib/Target/Sparc/AsmParser/Makefile15
-rw-r--r--lib/Target/Sparc/AsmParser/SparcAsmParser.cpp99
-rw-r--r--lib/Target/Sparc/CMakeLists.txt1
-rw-r--r--lib/Target/Sparc/DelaySlotFiller.cpp39
-rw-r--r--lib/Target/Sparc/Disassembler/Makefile16
-rw-r--r--lib/Target/Sparc/Disassembler/SparcDisassembler.cpp121
-rw-r--r--lib/Target/Sparc/InstPrinter/Makefile16
-rw-r--r--lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp23
-rwxr-xr-xlib/Target/Sparc/LeonFeatures.td91
-rwxr-xr-xlib/Target/Sparc/LeonPasses.cpp933
-rwxr-xr-xlib/Target/Sparc/LeonPasses.h199
-rw-r--r--lib/Target/Sparc/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp3
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp7
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp1
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp35
-rw-r--r--lib/Target/Sparc/Makefile24
-rw-r--r--lib/Target/Sparc/README.txt1
-rw-r--r--lib/Target/Sparc/Sparc.h35
-rw-r--r--lib/Target/Sparc/Sparc.td140
-rw-r--r--lib/Target/Sparc/SparcAsmPrinter.cpp8
-rw-r--r--lib/Target/Sparc/SparcFrameLowering.cpp8
-rw-r--r--lib/Target/Sparc/SparcFrameLowering.h2
-rw-r--r--lib/Target/Sparc/SparcISelDAGToDAG.cpp36
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp935
-rw-r--r--lib/Target/Sparc/SparcISelLowering.h107
-rw-r--r--lib/Target/Sparc/SparcInstr64Bit.td43
-rw-r--r--lib/Target/Sparc/SparcInstrAliases.td117
-rw-r--r--lib/Target/Sparc/SparcInstrFormats.td122
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.cpp107
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.h18
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.td509
-rw-r--r--lib/Target/Sparc/SparcMCInstLower.cpp1
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.cpp10
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.h3
-rw-r--r--lib/Target/Sparc/SparcRegisterInfo.td78
-rwxr-xr-xlib/Target/Sparc/SparcSchedule.td124
-rw-r--r--lib/Target/Sparc/SparcSubtarget.cpp25
-rw-r--r--lib/Target/Sparc/SparcSubtarget.h50
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.cpp104
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.h21
-rw-r--r--lib/Target/Sparc/TargetInfo/Makefile15
-rw-r--r--lib/Target/SystemZ/AsmParser/Makefile16
-rw-r--r--lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp5
-rw-r--r--lib/Target/SystemZ/CMakeLists.txt1
-rw-r--r--lib/Target/SystemZ/Disassembler/Makefile16
-rw-r--r--lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp57
-rw-r--r--lib/Target/SystemZ/InstPrinter/Makefile16
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp3
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp7
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp22
-rw-r--r--lib/Target/SystemZ/Makefile28
-rw-r--r--lib/Target/SystemZ/README.txt30
-rw-r--r--lib/Target/SystemZ/SystemZ.h41
-rw-r--r--lib/Target/SystemZ/SystemZAsmPrinter.cpp200
-rw-r--r--lib/Target/SystemZ/SystemZCallingConv.cpp4
-rw-r--r--lib/Target/SystemZ/SystemZCallingConv.h50
-rw-r--r--lib/Target/SystemZ/SystemZCallingConv.td15
-rw-r--r--lib/Target/SystemZ/SystemZElimCompare.cpp214
-rw-r--r--lib/Target/SystemZ/SystemZFrameLowering.cpp26
-rw-r--r--lib/Target/SystemZ/SystemZFrameLowering.h7
-rw-r--r--lib/Target/SystemZ/SystemZISelDAGToDAG.cpp266
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp1043
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.h97
-rw-r--r--lib/Target/SystemZ/SystemZInstrBuilder.h2
-rw-r--r--lib/Target/SystemZ/SystemZInstrFP.td11
-rw-r--r--lib/Target/SystemZ/SystemZInstrFormats.td206
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.cpp607
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.h85
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.td356
-rw-r--r--lib/Target/SystemZ/SystemZLDCleanup.cpp7
-rw-r--r--lib/Target/SystemZ/SystemZLongBranch.cpp19
-rw-r--r--lib/Target/SystemZ/SystemZMachineFunctionInfo.h9
-rw-r--r--lib/Target/SystemZ/SystemZOperands.td8
-rw-r--r--lib/Target/SystemZ/SystemZOperators.td29
-rw-r--r--lib/Target/SystemZ/SystemZProcessors.td8
-rw-r--r--lib/Target/SystemZ/SystemZRegisterInfo.cpp16
-rw-r--r--lib/Target/SystemZ/SystemZRegisterInfo.h9
-rw-r--r--lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp102
-rw-r--r--lib/Target/SystemZ/SystemZSelectionDAGInfo.h36
-rw-r--r--lib/Target/SystemZ/SystemZShortenInst.cpp34
-rw-r--r--lib/Target/SystemZ/SystemZSubtarget.cpp14
-rw-r--r--lib/Target/SystemZ/SystemZSubtarget.h9
-rw-r--r--lib/Target/SystemZ/SystemZTDC.cpp382
-rw-r--r--lib/Target/SystemZ/SystemZTargetMachine.cpp20
-rw-r--r--lib/Target/SystemZ/SystemZTargetMachine.h2
-rw-r--r--lib/Target/SystemZ/TargetInfo/Makefile15
-rw-r--r--lib/Target/Target.cpp22
-rw-r--r--lib/Target/TargetLoweringObjectFile.cpp23
-rw-r--r--lib/Target/TargetMachine.cpp117
-rw-r--r--lib/Target/TargetMachineC.cpp23
-rw-r--r--lib/Target/TargetRecip.cpp10
-rw-r--r--lib/Target/TargetSubtargetInfo.cpp2
-rw-r--r--lib/Target/WebAssembly/CMakeLists.txt7
-rw-r--r--lib/Target/WebAssembly/Disassembler/Makefile16
-rw-r--r--lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp6
-rw-r--r--lib/Target/WebAssembly/InstPrinter/Makefile16
-rw-r--r--lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp52
-rw-r--r--lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h5
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp9
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp7
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp4
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp13
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp19
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h56
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp12
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h9
-rw-r--r--lib/Target/WebAssembly/Makefile26
-rw-r--r--lib/Target/WebAssembly/README.txt95
-rw-r--r--lib/Target/WebAssembly/Relooper.cpp984
-rw-r--r--lib/Target/WebAssembly/Relooper.h186
-rw-r--r--lib/Target/WebAssembly/TargetInfo/Makefile15
-rw-r--r--lib/Target/WebAssembly/WebAssembly.h11
-rw-r--r--lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp14
-rw-r--r--lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp86
-rw-r--r--lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp338
-rw-r--r--lib/Target/WebAssembly/WebAssemblyFastISel.cpp1108
-rw-r--r--lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp296
-rw-r--r--lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp206
-rw-r--r--lib/Target/WebAssembly/WebAssemblyFrameLowering.h21
-rw-r--r--lib/Target/WebAssembly/WebAssemblyISD.def2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp18
-rw-r--r--lib/Target/WebAssembly/WebAssemblyISelLowering.cpp361
-rw-r--r--lib/Target/WebAssembly/WebAssemblyISelLowering.h25
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrControl.td34
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrFloat.td16
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp64
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrInfo.h12
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrInfo.td41
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrInteger.td35
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrMemory.td608
-rw-r--r--lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp21
-rw-r--r--lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h40
-rw-r--r--lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp105
-rw-r--r--lib/Target/WebAssembly/WebAssemblyPEI.cpp1066
-rw-r--r--lib/Target/WebAssembly/WebAssemblyPeephole.cpp135
-rw-r--r--lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp136
-rw-r--r--lib/Target/WebAssembly/WebAssemblyRegColoring.cpp4
-rw-r--r--lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp32
-rw-r--r--lib/Target/WebAssembly/WebAssemblyRegStackify.cpp722
-rw-r--r--lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp73
-rw-r--r--lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp97
-rw-r--r--lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h6
-rw-r--r--lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp114
-rw-r--r--lib/Target/WebAssembly/WebAssemblyStoreResults.cpp158
-rw-r--r--lib/Target/WebAssembly/WebAssemblySubtarget.cpp12
-rw-r--r--lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp104
-rw-r--r--lib/Target/WebAssembly/WebAssemblyTargetMachine.h4
-rw-r--r--lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp56
-rw-r--r--lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h10
-rw-r--r--lib/Target/WebAssembly/known_gcc_test_failures.txt248
-rw-r--r--lib/Target/X86/AsmParser/Makefile15
-rw-r--r--lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp4
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp366
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParserCommon.h2
-rw-r--r--lib/Target/X86/AsmParser/X86Operand.h61
-rw-r--r--lib/Target/X86/CMakeLists.txt7
-rw-r--r--lib/Target/X86/Disassembler/Makefile18
-rw-r--r--lib/Target/X86/Disassembler/X86Disassembler.cpp108
-rw-r--r--lib/Target/X86/Disassembler/X86Disassembler.h112
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp24
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.h7
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h10
-rw-r--r--lib/Target/X86/InstPrinter/Makefile15
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp23
-rw-r--r--lib/Target/X86/InstPrinter/X86InstComments.cpp837
-rw-r--r--lib/Target/X86/MCTargetDesc/CMakeLists.txt2
-rw-r--r--lib/Target/X86/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp114
-rw-r--r--lib/Target/X86/MCTargetDesc/X86BaseInfo.h31
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp75
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp141
-rw-r--r--lib/Target/X86/MCTargetDesc/X86FixupKinds.h6
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp2
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp618
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp101
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h10
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp119
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp7
-rw-r--r--lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp8
-rw-r--r--lib/Target/X86/Makefile23
-rw-r--r--lib/Target/X86/README-X86-64.txt2
-rw-r--r--lib/Target/X86/README.txt6
-rw-r--r--lib/Target/X86/TargetInfo/Makefile16
-rw-r--r--lib/Target/X86/Utils/Makefile15
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.cpp162
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.h94
-rw-r--r--lib/Target/X86/X86.h15
-rw-r--r--lib/Target/X86/X86.td395
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp81
-rw-r--r--lib/Target/X86/X86AsmPrinter.h59
-rw-r--r--lib/Target/X86/X86CallFrameOptimization.cpp190
-rw-r--r--lib/Target/X86/X86CallingConv.td60
-rw-r--r--lib/Target/X86/X86ExpandPseudo.cpp82
-rw-r--r--lib/Target/X86/X86FastISel.cpp332
-rw-r--r--lib/Target/X86/X86FixupBWInsts.cpp371
-rw-r--r--lib/Target/X86/X86FixupLEAs.cpp124
-rw-r--r--lib/Target/X86/X86FixupSetCC.cpp186
-rw-r--r--lib/Target/X86/X86FloatingPoint.cpp410
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp529
-rw-r--r--lib/Target/X86/X86FrameLowering.h53
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp444
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp12158
-rw-r--r--lib/Target/X86/X86ISelLowering.h306
-rw-r--r--lib/Target/X86/X86InstrAVX512.td3106
-rw-r--r--lib/Target/X86/X86InstrBuilder.h30
-rw-r--r--lib/Target/X86/X86InstrCompiler.td216
-rw-r--r--lib/Target/X86/X86InstrControl.td12
-rw-r--r--lib/Target/X86/X86InstrFPStack.td10
-rw-r--r--lib/Target/X86/X86InstrFormats.td2
-rw-r--r--lib/Target/X86/X86InstrFragmentsSIMD.td244
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp2229
-rw-r--r--lib/Target/X86/X86InstrInfo.h268
-rw-r--r--lib/Target/X86/X86InstrInfo.td367
-rw-r--r--lib/Target/X86/X86InstrMMX.td2
-rw-r--r--lib/Target/X86/X86InstrMPX.td4
-rw-r--r--lib/Target/X86/X86InstrSSE.td1473
-rw-r--r--lib/Target/X86/X86InstrSystem.td40
-rw-r--r--lib/Target/X86/X86InstrVMX.td6
-rw-r--r--lib/Target/X86/X86InstrXOP.td214
-rw-r--r--lib/Target/X86/X86IntrinsicsInfo.h928
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp635
-rw-r--r--lib/Target/X86/X86MachineFunctionInfo.h13
-rw-r--r--lib/Target/X86/X86OptimizeLEAs.cpp480
-rw-r--r--lib/Target/X86/X86PadShortFunction.cpp14
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp52
-rw-r--r--lib/Target/X86/X86RegisterInfo.td63
-rw-r--r--lib/Target/X86/X86Schedule.td15
-rw-r--r--lib/Target/X86/X86ScheduleAtom.td1
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.cpp17
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.h26
-rw-r--r--lib/Target/X86/X86ShuffleDecodeConstantPool.cpp218
-rw-r--r--lib/Target/X86/X86ShuffleDecodeConstantPool.h15
-rw-r--r--lib/Target/X86/X86Subtarget.cpp175
-rw-r--r--lib/Target/X86/X86Subtarget.h97
-rw-r--r--lib/Target/X86/X86TargetMachine.cpp94
-rw-r--r--lib/Target/X86/X86TargetMachine.h5
-rw-r--r--lib/Target/X86/X86TargetObjectFile.cpp75
-rw-r--r--lib/Target/X86/X86TargetObjectFile.h13
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp405
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.h5
-rw-r--r--lib/Target/X86/X86VZeroUpper.cpp88
-rw-r--r--lib/Target/X86/X86WinAllocaExpander.cpp294
-rw-r--r--lib/Target/X86/X86WinEHState.cpp464
-rw-r--r--lib/Target/XCore/Disassembler/Makefile16
-rw-r--r--lib/Target/XCore/Disassembler/XCoreDisassembler.cpp2
-rw-r--r--lib/Target/XCore/InstPrinter/Makefile16
-rw-r--r--lib/Target/XCore/MCTargetDesc/Makefile16
-rw-r--r--lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp18
-rw-r--r--lib/Target/XCore/Makefile23
-rw-r--r--lib/Target/XCore/TargetInfo/Makefile16
-rw-r--r--lib/Target/XCore/XCoreAsmPrinter.cpp3
-rw-r--r--lib/Target/XCore/XCoreFrameLowering.cpp37
-rw-r--r--lib/Target/XCore/XCoreFrameLowering.h4
-rw-r--r--lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp5
-rw-r--r--lib/Target/XCore/XCoreISelDAGToDAG.cpp77
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp242
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h39
-rw-r--r--lib/Target/XCore/XCoreInstrInfo.cpp68
-rw-r--r--lib/Target/XCore/XCoreInstrInfo.h13
-rw-r--r--lib/Target/XCore/XCoreLowerThreadLocal.cpp12
-rw-r--r--lib/Target/XCore/XCoreSelectionDAGInfo.cpp13
-rw-r--r--lib/Target/XCore/XCoreSelectionDAGInfo.h21
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.cpp14
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.h2
-rw-r--r--lib/Target/XCore/XCoreTargetObjectFile.cpp8
-rw-r--r--lib/Target/XCore/XCoreTargetObjectFile.h3
1040 files changed, 106529 insertions, 53082 deletions
diff --git a/lib/Target/AArch64/AArch64.h b/lib/Target/AArch64/AArch64.h
index 21106c9ad29a..c767c75fce57 100644
--- a/lib/Target/AArch64/AArch64.h
+++ b/lib/Target/AArch64/AArch64.h
@@ -27,6 +27,7 @@ class FunctionPass;
class MachineFunctionPass;
FunctionPass *createAArch64DeadRegisterDefinitions();
+FunctionPass *createAArch64RedundantCopyEliminationPass();
FunctionPass *createAArch64ConditionalCompares();
FunctionPass *createAArch64AdvSIMDScalar();
FunctionPass *createAArch64BranchRelaxation();
@@ -44,6 +45,8 @@ FunctionPass *createAArch64A53Fix835769();
FunctionPass *createAArch64CleanupLocalDynamicTLSPass();
FunctionPass *createAArch64CollectLOHPass();
+
+void initializeAArch64ExpandPseudoPass(PassRegistry&);
} // end namespace llvm
#endif
diff --git a/lib/Target/AArch64/AArch64.td b/lib/Target/AArch64/AArch64.td
index cd3e84d38fe2..b1e881685b0c 100644
--- a/lib/Target/AArch64/AArch64.td
+++ b/lib/Target/AArch64/AArch64.td
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// Target-independent interfaces which we are implementing
+// Target-independent interfaces which we are implementing.
//===----------------------------------------------------------------------===//
include "llvm/Target/Target.td"
@@ -32,6 +32,9 @@ def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true",
def FeatureCRC : SubtargetFeature<"crc", "HasCRC", "true",
"Enable ARMv8 CRC-32 checksum instructions">;
+def FeatureRAS : SubtargetFeature<"ras", "HasRAS", "true",
+ "Enable ARMv8 Reliability, Availability and Serviceability Extensions">;
+
def FeaturePerfMon : SubtargetFeature<"perfmon", "HasPerfMon", "true",
"Enable ARMv8 PMUv3 Performance Monitors extension">;
@@ -58,6 +61,50 @@ def FeatureReserveX18 : SubtargetFeature<"reserve-x18", "ReserveX18", "true",
"Reserve X18, making it unavailable "
"as a GPR">;
+def FeatureMergeNarrowLd : SubtargetFeature<"merge-narrow-ld",
+ "MergeNarrowLoads", "true",
+ "Merge narrow load instructions">;
+
+def FeatureUseAA : SubtargetFeature<"use-aa", "UseAA", "true",
+ "Use alias analysis during codegen">;
+
+def FeatureBalanceFPOps : SubtargetFeature<"balance-fp-ops", "BalanceFPOps",
+ "true",
+ "balance mix of odd and even D-registers for fp multiply(-accumulate) ops">;
+
+def FeaturePredictableSelectIsExpensive : SubtargetFeature<
+ "predictable-select-expensive", "PredictableSelectIsExpensive", "true",
+ "Prefer likely predicted branches over selects">;
+
+def FeatureCustomCheapAsMoveHandling : SubtargetFeature<"custom-cheap-as-move",
+ "CustomAsCheapAsMove", "true",
+ "Use custom code for TargetInstrInfo::isAsCheapAsAMove()">;
+
+def FeaturePostRAScheduler : SubtargetFeature<"use-postra-scheduler",
+ "UsePostRAScheduler", "true", "Schedule again after register allocation">;
+
+def FeatureSlowMisaligned128Store : SubtargetFeature<"slow-misaligned-128store",
+ "Misaligned128StoreIsSlow", "true", "Misaligned 128 bit stores are slow">;
+
+def FeatureAvoidQuadLdStPairs : SubtargetFeature<"no-quad-ldst-pairs",
+ "AvoidQuadLdStPairs", "true",
+ "Do not form quad load/store pair operations">;
+
+def FeatureAlternateSExtLoadCVTF32Pattern : SubtargetFeature<
+ "alternate-sextload-cvt-f32-pattern", "UseAlternateSExtLoadCVTF32Pattern",
+ "true", "Use alternative pattern for sextload convert to f32">;
+
+def FeatureMacroOpFusion : SubtargetFeature<
+ "macroop-fusion", "HasMacroOpFusion", "true",
+ "CPU supports macro op fusion">;
+
+def FeatureDisableLatencySchedHeuristic : SubtargetFeature<
+ "disable-latency-sched-heuristic", "DisableLatencySchedHeuristic", "true",
+ "Disable latency scheduling heuristic">;
+
+def FeatureUseRSqrt : SubtargetFeature<
+ "use-reverse-square-root", "UseRSqrt", "true", "Use reverse square root">;
+
//===----------------------------------------------------------------------===//
// Architectures.
//
@@ -66,7 +113,7 @@ def HasV8_1aOps : SubtargetFeature<"v8.1a", "HasV8_1aOps", "true",
"Support ARM v8.1a instructions", [FeatureCRC]>;
def HasV8_2aOps : SubtargetFeature<"v8.2a", "HasV8_2aOps", "true",
- "Support ARM v8.2a instructions", [HasV8_1aOps]>;
+ "Support ARM v8.2a instructions", [HasV8_1aOps, FeatureRAS]>;
//===----------------------------------------------------------------------===//
// Register File Description
@@ -85,67 +132,145 @@ include "AArch64InstrInfo.td"
def AArch64InstrInfo : InstrInfo;
//===----------------------------------------------------------------------===//
+// Named operands for MRS/MSR/TLBI/...
+//===----------------------------------------------------------------------===//
+
+include "AArch64SystemOperands.td"
+
+//===----------------------------------------------------------------------===//
// AArch64 Processors supported.
//
include "AArch64SchedA53.td"
include "AArch64SchedA57.td"
include "AArch64SchedCyclone.td"
include "AArch64SchedM1.td"
+include "AArch64SchedKryo.td"
+include "AArch64SchedVulcan.td"
def ProcA35 : SubtargetFeature<"a35", "ARMProcFamily", "CortexA35",
- "Cortex-A35 ARM processors",
- [FeatureFPARMv8,
- FeatureNEON,
- FeatureCrypto,
+ "Cortex-A35 ARM processors", [
FeatureCRC,
- FeaturePerfMon]>;
+ FeatureCrypto,
+ FeatureFPARMv8,
+ FeatureNEON,
+ FeaturePerfMon
+ ]>;
def ProcA53 : SubtargetFeature<"a53", "ARMProcFamily", "CortexA53",
- "Cortex-A53 ARM processors",
- [FeatureFPARMv8,
- FeatureNEON,
- FeatureCrypto,
+ "Cortex-A53 ARM processors", [
+ FeatureBalanceFPOps,
FeatureCRC,
- FeaturePerfMon]>;
+ FeatureCrypto,
+ FeatureCustomCheapAsMoveHandling,
+ FeatureFPARMv8,
+ FeatureNEON,
+ FeaturePerfMon,
+ FeaturePostRAScheduler,
+ FeatureUseAA
+ ]>;
def ProcA57 : SubtargetFeature<"a57", "ARMProcFamily", "CortexA57",
- "Cortex-A57 ARM processors",
- [FeatureFPARMv8,
+ "Cortex-A57 ARM processors", [
+ FeatureBalanceFPOps,
+ FeatureCRC,
+ FeatureCrypto,
+ FeatureCustomCheapAsMoveHandling,
+ FeatureFPARMv8,
+ FeatureMergeNarrowLd,
FeatureNEON,
+ FeaturePerfMon,
+ FeaturePostRAScheduler,
+ FeaturePredictableSelectIsExpensive
+ ]>;
+
+def ProcA72 : SubtargetFeature<"a72", "ARMProcFamily", "CortexA72",
+ "Cortex-A72 ARM processors", [
+ FeatureCRC,
FeatureCrypto,
+ FeatureFPARMv8,
+ FeatureNEON,
+ FeaturePerfMon
+ ]>;
+
+def ProcA73 : SubtargetFeature<"a73", "ARMProcFamily", "CortexA73",
+ "Cortex-A73 ARM processors", [
FeatureCRC,
- FeaturePerfMon]>;
+ FeatureCrypto,
+ FeatureFPARMv8,
+ FeatureNEON,
+ FeaturePerfMon
+ ]>;
def ProcCyclone : SubtargetFeature<"cyclone", "ARMProcFamily", "Cyclone",
- "Cyclone",
- [FeatureFPARMv8,
- FeatureNEON,
+ "Cyclone", [
+ FeatureAlternateSExtLoadCVTF32Pattern,
FeatureCrypto,
- FeatureCRC,
+ FeatureDisableLatencySchedHeuristic,
+ FeatureFPARMv8,
+ FeatureMacroOpFusion,
+ FeatureNEON,
FeaturePerfMon,
- FeatureZCRegMove, FeatureZCZeroing]>;
+ FeatureSlowMisaligned128Store,
+ FeatureZCRegMove,
+ FeatureZCZeroing
+ ]>;
def ProcExynosM1 : SubtargetFeature<"exynosm1", "ARMProcFamily", "ExynosM1",
- "Samsung Exynos-M1 processors",
- [FeatureFPARMv8,
- FeatureNEON,
- FeatureCrypto,
+ "Samsung Exynos-M1 processors", [
+ FeatureAvoidQuadLdStPairs,
FeatureCRC,
- FeaturePerfMon]>;
+ FeatureCrypto,
+ FeatureCustomCheapAsMoveHandling,
+ FeatureFPARMv8,
+ FeatureNEON,
+ FeaturePerfMon,
+ FeaturePostRAScheduler,
+ FeatureUseRSqrt
+ ]>;
+
+def ProcKryo : SubtargetFeature<"kryo", "ARMProcFamily", "Kryo",
+ "Qualcomm Kryo processors", [
+ FeatureCRC,
+ FeatureCrypto,
+ FeatureCustomCheapAsMoveHandling,
+ FeatureFPARMv8,
+ FeatureMergeNarrowLd,
+ FeatureNEON,
+ FeaturePerfMon,
+ FeaturePostRAScheduler,
+ FeaturePredictableSelectIsExpensive,
+ FeatureZCZeroing
+ ]>;
+
+def ProcVulcan : SubtargetFeature<"vulcan", "ARMProcFamily", "Vulcan",
+ "Broadcom Vulcan processors", [
+ FeatureCRC,
+ FeatureCrypto,
+ FeatureFPARMv8,
+ FeatureMacroOpFusion,
+ FeatureNEON,
+ FeaturePostRAScheduler,
+ HasV8_1aOps]>;
-def : ProcessorModel<"generic", NoSchedModel, [FeatureFPARMv8,
- FeatureNEON,
- FeatureCRC,
- FeaturePerfMon]>;
+def : ProcessorModel<"generic", NoSchedModel, [
+ FeatureCRC,
+ FeatureFPARMv8,
+ FeatureNEON,
+ FeaturePerfMon,
+ FeaturePostRAScheduler
+ ]>;
// FIXME: Cortex-A35 is currently modelled as a Cortex-A53
def : ProcessorModel<"cortex-a35", CortexA53Model, [ProcA35]>;
def : ProcessorModel<"cortex-a53", CortexA53Model, [ProcA53]>;
def : ProcessorModel<"cortex-a57", CortexA57Model, [ProcA57]>;
-// FIXME: Cortex-A72 is currently modelled as an Cortex-A57.
-def : ProcessorModel<"cortex-a72", CortexA57Model, [ProcA57]>;
+// FIXME: Cortex-A72 and Cortex-A73 are currently modelled as an Cortex-A57.
+def : ProcessorModel<"cortex-a72", CortexA57Model, [ProcA72]>;
+def : ProcessorModel<"cortex-a73", CortexA57Model, [ProcA73]>;
def : ProcessorModel<"cyclone", CycloneModel, [ProcCyclone]>;
def : ProcessorModel<"exynos-m1", ExynosM1Model, [ProcExynosM1]>;
+def : ProcessorModel<"kryo", KryoModel, [ProcKryo]>;
+def : ProcessorModel<"vulcan", VulcanModel, [ProcVulcan]>;
//===----------------------------------------------------------------------===//
// Assembly parser
diff --git a/lib/Target/AArch64/AArch64A53Fix835769.cpp b/lib/Target/AArch64/AArch64A53Fix835769.cpp
index d215d9e831c0..c2cca63f4977 100644
--- a/lib/Target/AArch64/AArch64A53Fix835769.cpp
+++ b/lib/Target/AArch64/AArch64A53Fix835769.cpp
@@ -22,7 +22,6 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -87,6 +86,11 @@ public:
bool runOnMachineFunction(MachineFunction &F) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "Workaround A53 erratum 835769 pass";
}
@@ -133,8 +137,8 @@ static MachineBasicBlock *getBBFallenThrough(MachineBasicBlock *MBB,
MachineBasicBlock *PrevBB = &*std::prev(MBBI);
for (MachineBasicBlock *S : MBB->predecessors())
- if (S == PrevBB && !TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond) &&
- !TBB && !FBB)
+ if (S == PrevBB && !TII->analyzeBranch(*PrevBB, TBB, FBB, Cond) && !TBB &&
+ !FBB)
return S;
return nullptr;
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 3d1ab4e3fc2b..0465e59dc54a 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -43,7 +43,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include <list>
using namespace llvm;
#define DEBUG_TYPE "aarch64-a57-fp-load-balancing"
@@ -125,6 +124,11 @@ public:
bool runOnMachineFunction(MachineFunction &F) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "A57 FP Anti-dependency breaker";
}
@@ -222,7 +226,7 @@ public:
}
/// Return true if MI is a member of the chain.
- bool contains(MachineInstr *MI) { return Insts.count(MI) > 0; }
+ bool contains(MachineInstr &MI) { return Insts.count(&MI) > 0; }
/// Return the number of instructions in the chain.
unsigned size() const {
@@ -248,9 +252,10 @@ public:
MachineInstr *getKill() const { return KillInst; }
/// Return an instruction that can be used as an iterator for the end
/// of the chain. This is the maximum of KillInst (if set) and LastInst.
- MachineBasicBlock::iterator getEnd() const {
+ MachineBasicBlock::iterator end() const {
return ++MachineBasicBlock::iterator(KillInst ? KillInst : LastInst);
}
+ MachineBasicBlock::iterator begin() const { return getStart(); }
/// Can the Kill instruction (assuming one exists) be modified?
bool isKillImmutable() const { return KillIsImmutable; }
@@ -307,9 +312,10 @@ public:
//===----------------------------------------------------------------------===//
bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
- // Don't do anything if this isn't an A53 or A57.
- if (!(F.getSubtarget<AArch64Subtarget>().isCortexA53() ||
- F.getSubtarget<AArch64Subtarget>().isCortexA57()))
+ if (skipFunction(*F.getFunction()))
+ return false;
+
+ if (!F.getSubtarget<AArch64Subtarget>().balanceFPOps())
return false;
bool Changed = false;
@@ -492,15 +498,14 @@ bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV,
int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C,
MachineBasicBlock &MBB) {
RegScavenger RS;
- RS.enterBasicBlock(&MBB);
+ RS.enterBasicBlock(MBB);
RS.forward(MachineBasicBlock::iterator(G->getStart()));
// Can we find an appropriate register that is available throughout the life
// of the chain?
unsigned RegClassID = G->getStart()->getDesc().OpInfo[0].RegClass;
BitVector AvailableRegs = RS.getRegsAvailable(TRI->getRegClass(RegClassID));
- for (MachineBasicBlock::iterator I = G->getStart(), E = G->getEnd();
- I != E; ++I) {
+ for (MachineBasicBlock::iterator I = G->begin(), E = G->end(); I != E; ++I) {
RS.forward(I);
AvailableRegs &= RS.getRegsAvailable(TRI->getRegClass(RegClassID));
@@ -530,8 +535,7 @@ int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C,
for (auto Reg : Ord) {
if (!AvailableRegs[Reg])
continue;
- if ((C == Color::Even && (Reg % 2) == 0) ||
- (C == Color::Odd && (Reg % 2) == 1))
+ if (C == getColor(Reg))
return Reg;
}
@@ -554,16 +558,14 @@ bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C,
DEBUG(dbgs() << " - Scavenged register: " << TRI->getName(Reg) << "\n");
std::map<unsigned, unsigned> Substs;
- for (MachineBasicBlock::iterator I = G->getStart(), E = G->getEnd();
- I != E; ++I) {
- if (!G->contains(I) &&
- (&*I != G->getKill() || G->isKillImmutable()))
+ for (MachineInstr &I : *G) {
+ if (!G->contains(I) && (&I != G->getKill() || G->isKillImmutable()))
continue;
// I is a member of G, or I is a mutable instruction that kills G.
std::vector<unsigned> ToErase;
- for (auto &U : I->operands()) {
+ for (auto &U : I.operands()) {
if (U.isReg() && U.isUse() && Substs.find(U.getReg()) != Substs.end()) {
unsigned OrigReg = U.getReg();
U.setReg(Substs[OrigReg]);
@@ -583,11 +585,11 @@ bool AArch64A57FPLoadBalancing::colorChain(Chain *G, Color C,
Substs.erase(J);
// Only change the def if this isn't the last instruction.
- if (&*I != G->getKill()) {
- MachineOperand &MO = I->getOperand(0);
+ if (&I != G->getKill()) {
+ MachineOperand &MO = I.getOperand(0);
bool Change = TransformAll || getColor(MO.getReg()) != C;
- if (G->requiresFixup() && &*I == G->getLast())
+ if (G->requiresFixup() && &I == G->getLast())
Change = false;
if (Change) {
diff --git a/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
index 3afcdfb8b930..4846ef08c983 100644
--- a/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
+++ b/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
@@ -20,10 +20,9 @@
// e = getelementptr ..., i64 a
//
// This is legal to do if the computations are marked with either nsw or nuw
-// markers.
-// Moreover, the current heuristic is simple: it does not create new sext
-// operations, i.e., it gives up when a sext would have forked (e.g., if
-// a = add i32 b, c, two sexts are required to promote the computation).
+// markers. Moreover, the current heuristic is simple: it does not create new
+// sext operations, i.e., it gives up when a sext would have forked (e.g., if a
+// = add i32 b, c, two sexts are required to promote the computation).
//
// FIXME: This pass may be useful for other targets too.
// ===---------------------------------------------------------------------===//
@@ -207,9 +206,7 @@ bool AArch64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) {
}
static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
- if (isa<SelectInst>(Inst) && OpIdx == 0)
- return false;
- return true;
+ return !(isa<SelectInst>(Inst) && OpIdx == 0);
}
bool
@@ -481,6 +478,9 @@ void AArch64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
}
bool AArch64AddressTypePromotion::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
if (!EnableAddressTypePromotion || F.isDeclaration())
return false;
Func = &F;
diff --git a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
index 1644d71d2821..d0a2dd3fa1fc 100644
--- a/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
+++ b/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
@@ -76,12 +76,12 @@ private:
// isProfitableToTransform - Predicate function to determine whether an
// instruction should be transformed to its equivalent AdvSIMD scalar
// instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
- bool isProfitableToTransform(const MachineInstr *MI) const;
+ bool isProfitableToTransform(const MachineInstr &MI) const;
// transformInstruction - Perform the transformation of an instruction
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
- void transformInstruction(MachineInstr *MI);
+ void transformInstruction(MachineInstr &MI);
// processMachineBasicBlock - Main optimzation loop.
bool processMachineBasicBlock(MachineBasicBlock *MBB);
@@ -132,19 +132,19 @@ static bool isFPR64(unsigned Reg, unsigned SubReg,
// getSrcFromCopy - Get the original source register for a GPR64 <--> FPR64
// copy instruction. Return zero_reg if the instruction is not a copy.
-static unsigned getSrcFromCopy(const MachineInstr *MI,
- const MachineRegisterInfo *MRI,
- unsigned &SubReg) {
+static MachineOperand *getSrcFromCopy(MachineInstr *MI,
+ const MachineRegisterInfo *MRI,
+ unsigned &SubReg) {
SubReg = 0;
// The "FMOV Xd, Dn" instruction is the typical form.
if (MI->getOpcode() == AArch64::FMOVDXr ||
MI->getOpcode() == AArch64::FMOVXDr)
- return MI->getOperand(1).getReg();
+ return &MI->getOperand(1);
// A lane zero extract "UMOV.d Xd, Vn[0]" is equivalent. We shouldn't see
// these at this stage, but it's easy to check for.
if (MI->getOpcode() == AArch64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
SubReg = AArch64::dsub;
- return MI->getOperand(1).getReg();
+ return &MI->getOperand(1);
}
// Or just a plain COPY instruction. This can be directly to/from FPR64,
// or it can be a dsub subreg reference to an FPR128.
@@ -152,18 +152,18 @@ static unsigned getSrcFromCopy(const MachineInstr *MI,
if (isFPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
MRI) &&
isGPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), MRI))
- return MI->getOperand(1).getReg();
+ return &MI->getOperand(1);
if (isGPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
MRI) &&
isFPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(),
MRI)) {
SubReg = MI->getOperand(1).getSubReg();
- return MI->getOperand(1).getReg();
+ return &MI->getOperand(1);
}
}
// Otherwise, this is some other kind of instruction.
- return 0;
+ return nullptr;
}
// getTransformOpcode - For any opcode for which there is an AdvSIMD equivalent
@@ -189,16 +189,16 @@ static unsigned getTransformOpcode(unsigned Opc) {
return Opc;
}
-static bool isTransformable(const MachineInstr *MI) {
- unsigned Opc = MI->getOpcode();
+static bool isTransformable(const MachineInstr &MI) {
+ unsigned Opc = MI.getOpcode();
return Opc != getTransformOpcode(Opc);
}
// isProfitableToTransform - Predicate function to determine whether an
// instruction should be transformed to its equivalent AdvSIMD scalar
// instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example.
-bool
-AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
+bool AArch64AdvSIMDScalar::isProfitableToTransform(
+ const MachineInstr &MI) const {
// If this instruction isn't eligible to be transformed (no SIMD equivalent),
// early exit since that's the common case.
if (!isTransformable(MI))
@@ -209,33 +209,33 @@ AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
unsigned NumNewCopies = 3;
unsigned NumRemovableCopies = 0;
- unsigned OrigSrc0 = MI->getOperand(1).getReg();
- unsigned OrigSrc1 = MI->getOperand(2).getReg();
- unsigned Src0 = 0, SubReg0;
- unsigned Src1 = 0, SubReg1;
+ unsigned OrigSrc0 = MI.getOperand(1).getReg();
+ unsigned OrigSrc1 = MI.getOperand(2).getReg();
+ unsigned SubReg0;
+ unsigned SubReg1;
if (!MRI->def_empty(OrigSrc0)) {
MachineRegisterInfo::def_instr_iterator Def =
MRI->def_instr_begin(OrigSrc0);
assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
- Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
+ MachineOperand *MOSrc0 = getSrcFromCopy(&*Def, MRI, SubReg0);
// If the source was from a copy, we don't need to insert a new copy.
- if (Src0)
+ if (MOSrc0)
--NumNewCopies;
// If there are no other users of the original source, we can delete
// that instruction.
- if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0))
+ if (MOSrc0 && MRI->hasOneNonDBGUse(OrigSrc0))
++NumRemovableCopies;
}
if (!MRI->def_empty(OrigSrc1)) {
MachineRegisterInfo::def_instr_iterator Def =
MRI->def_instr_begin(OrigSrc1);
assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
- Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
- if (Src1)
+ MachineOperand *MOSrc1 = getSrcFromCopy(&*Def, MRI, SubReg1);
+ if (MOSrc1)
--NumNewCopies;
// If there are no other users of the original source, we can delete
// that instruction.
- if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1))
+ if (MOSrc1 && MRI->hasOneNonDBGUse(OrigSrc1))
++NumRemovableCopies;
}
@@ -244,14 +244,14 @@ AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
// any of the uses is a transformable instruction, it's likely the tranforms
// will chain, enabling us to save a copy there, too. This is an aggressive
// heuristic that approximates the graph based cost analysis described above.
- unsigned Dst = MI->getOperand(0).getReg();
+ unsigned Dst = MI.getOperand(0).getReg();
bool AllUsesAreCopies = true;
for (MachineRegisterInfo::use_instr_nodbg_iterator
Use = MRI->use_instr_nodbg_begin(Dst),
E = MRI->use_instr_nodbg_end();
Use != E; ++Use) {
unsigned SubReg;
- if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(&*Use))
+ if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(*Use))
++NumRemovableCopies;
// If the use is an INSERT_SUBREG, that's still something that can
// directly use the FPR64, so we don't invalidate AllUsesAreCopies. It's
@@ -279,12 +279,11 @@ AArch64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
return TransformAll;
}
-static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr *MI,
+static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
unsigned Dst, unsigned Src, bool IsKill) {
- MachineInstrBuilder MIB =
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AArch64::COPY),
- Dst)
- .addReg(Src, getKillRegState(IsKill));
+ MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+ TII->get(AArch64::COPY), Dst)
+ .addReg(Src, getKillRegState(IsKill));
DEBUG(dbgs() << " adding copy: " << *MIB);
++NumCopiesInserted;
return MIB;
@@ -293,43 +292,56 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr *MI,
// transformInstruction - Perform the transformation of an instruction
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
-void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
- DEBUG(dbgs() << "Scalar transform: " << *MI);
+void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
+ DEBUG(dbgs() << "Scalar transform: " << MI);
- MachineBasicBlock *MBB = MI->getParent();
- unsigned OldOpc = MI->getOpcode();
+ MachineBasicBlock *MBB = MI.getParent();
+ unsigned OldOpc = MI.getOpcode();
unsigned NewOpc = getTransformOpcode(OldOpc);
assert(OldOpc != NewOpc && "transform an instruction to itself?!");
// Check if we need a copy for the source registers.
- unsigned OrigSrc0 = MI->getOperand(1).getReg();
- unsigned OrigSrc1 = MI->getOperand(2).getReg();
+ unsigned OrigSrc0 = MI.getOperand(1).getReg();
+ unsigned OrigSrc1 = MI.getOperand(2).getReg();
unsigned Src0 = 0, SubReg0;
unsigned Src1 = 0, SubReg1;
+ bool KillSrc0 = false, KillSrc1 = false;
if (!MRI->def_empty(OrigSrc0)) {
MachineRegisterInfo::def_instr_iterator Def =
MRI->def_instr_begin(OrigSrc0);
assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
- Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
+ MachineOperand *MOSrc0 = getSrcFromCopy(&*Def, MRI, SubReg0);
// If there are no other users of the original source, we can delete
// that instruction.
- if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0)) {
- assert(Src0 && "Can't delete copy w/o a valid original source!");
- Def->eraseFromParent();
- ++NumCopiesDeleted;
+ if (MOSrc0) {
+ Src0 = MOSrc0->getReg();
+ KillSrc0 = MOSrc0->isKill();
+ // Src0 is going to be reused, thus, it cannot be killed anymore.
+ MOSrc0->setIsKill(false);
+ if (MRI->hasOneNonDBGUse(OrigSrc0)) {
+ assert(MOSrc0 && "Can't delete copy w/o a valid original source!");
+ Def->eraseFromParent();
+ ++NumCopiesDeleted;
+ }
}
}
if (!MRI->def_empty(OrigSrc1)) {
MachineRegisterInfo::def_instr_iterator Def =
MRI->def_instr_begin(OrigSrc1);
assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
- Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
+ MachineOperand *MOSrc1 = getSrcFromCopy(&*Def, MRI, SubReg1);
// If there are no other users of the original source, we can delete
// that instruction.
- if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1)) {
- assert(Src1 && "Can't delete copy w/o a valid original source!");
- Def->eraseFromParent();
- ++NumCopiesDeleted;
+ if (MOSrc1) {
+ Src1 = MOSrc1->getReg();
+ KillSrc1 = MOSrc1->isKill();
+ // Src0 is going to be reused, thus, it cannot be killed anymore.
+ MOSrc1->setIsKill(false);
+ if (MRI->hasOneNonDBGUse(OrigSrc1)) {
+ assert(MOSrc1 && "Can't delete copy w/o a valid original source!");
+ Def->eraseFromParent();
+ ++NumCopiesDeleted;
+ }
}
}
// If we weren't able to reference the original source directly, create a
@@ -337,12 +349,14 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
if (!Src0) {
SubReg0 = 0;
Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
- insertCopy(TII, MI, Src0, OrigSrc0, true);
+ insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0);
+ KillSrc0 = true;
}
if (!Src1) {
SubReg1 = 0;
Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
- insertCopy(TII, MI, Src1, OrigSrc1, true);
+ insertCopy(TII, MI, Src1, OrigSrc1, KillSrc1);
+ KillSrc1 = true;
}
// Create a vreg for the destination.
@@ -353,17 +367,17 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
// For now, all of the new instructions have the same simple three-register
// form, so no need to special case based on what instruction we're
// building.
- BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(NewOpc), Dst)
- .addReg(Src0, getKillRegState(true), SubReg0)
- .addReg(Src1, getKillRegState(true), SubReg1);
+ BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(NewOpc), Dst)
+ .addReg(Src0, getKillRegState(KillSrc0), SubReg0)
+ .addReg(Src1, getKillRegState(KillSrc1), SubReg1);
// Now copy the result back out to a GPR.
// FIXME: Try to avoid this if all uses could actually just use the FPR64
// directly.
- insertCopy(TII, MI, MI->getOperand(0).getReg(), Dst, true);
+ insertCopy(TII, MI, MI.getOperand(0).getReg(), Dst, true);
// Erase the old instruction.
- MI->eraseFromParent();
+ MI.eraseFromParent();
++NumScalarInsnsUsed;
}
@@ -372,8 +386,7 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
- MachineInstr *MI = I;
- ++I;
+ MachineInstr &MI = *I++;
if (isProfitableToTransform(MI)) {
transformInstruction(MI);
Changed = true;
@@ -387,6 +400,9 @@ bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
bool Changed = false;
DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
+ if (skipFunction(*mf.getFunction()))
+ return false;
+
MRI = &mf.getRegInfo();
TII = mf.getSubtarget().getInstrInfo();
diff --git a/lib/Target/AArch64/AArch64AsmPrinter.cpp b/lib/Target/AArch64/AArch64AsmPrinter.cpp
index ada995bad37e..22374f754603 100644
--- a/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -49,6 +49,7 @@ namespace {
class AArch64AsmPrinter : public AsmPrinter {
AArch64MCInstLower MCInstLowering;
StackMaps SM;
+ const AArch64Subtarget *STI;
public:
AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
@@ -83,11 +84,11 @@ public:
bool runOnMachineFunction(MachineFunction &F) override {
AArch64FI = F.getInfo<AArch64FunctionInfo>();
+ STI = static_cast<const AArch64Subtarget*>(&F.getSubtarget());
return AsmPrinter::runOnMachineFunction(F);
}
private:
- MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
bool printAsmRegInClass(const MachineOperand &MO,
@@ -112,6 +113,9 @@ private:
/// \brief Emit the LOHs contained in AArch64FI.
void EmitLOHs();
+ /// Emit instruction to set float register to zero.
+ void EmitFMov0(const MachineInstr &MI);
+
typedef std::map<const MachineInstr *, MCSymbol *> MInstToMCSymbol;
MInstToMCSymbol LOHInstToLabel;
};
@@ -133,19 +137,6 @@ void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
}
-MachineLocation
-AArch64AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
- MachineLocation Location;
- assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
- // Frame address. Currently handles register +- offset only.
- if (MI->getOperand(0).isReg() && MI->getOperand(1).isImm())
- Location.set(MI->getOperand(0).getReg(), MI->getOperand(1).getImm());
- else {
- DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n");
- }
- return Location;
-}
-
void AArch64AsmPrinter::EmitLOHs() {
SmallVector<MCSymbol *, 3> MCArgs;
@@ -238,8 +229,7 @@ bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
const TargetRegisterClass *RC,
bool isVector, raw_ostream &O) {
assert(MO.isReg() && "Should only get here with a register!");
- const AArch64RegisterInfo *RI =
- MF->getSubtarget<AArch64Subtarget>().getRegisterInfo();
+ const TargetRegisterInfo *RI = STI->getRegisterInfo();
unsigned Reg = MO.getReg();
unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
assert(RI->regsOverlap(RegToPrint, Reg));
@@ -404,16 +394,16 @@ void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
unsigned ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
EncodedBytes = 16;
// Materialize the jump address:
- EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZWi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi)
.addReg(ScratchReg)
.addImm((CallTarget >> 32) & 0xFFFF)
.addImm(32));
- EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKWi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
.addReg(ScratchReg)
.addReg(ScratchReg)
.addImm((CallTarget >> 16) & 0xFFFF)
.addImm(16));
- EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKWi)
+ EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
.addReg(ScratchReg)
.addReg(ScratchReg)
.addImm(CallTarget & 0xFFFF)
@@ -430,6 +420,40 @@ void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
}
+void AArch64AsmPrinter::EmitFMov0(const MachineInstr &MI) {
+ unsigned DestReg = MI.getOperand(0).getReg();
+ if (STI->hasZeroCycleZeroing()) {
+ // Convert S/D register to corresponding Q register
+ if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31) {
+ DestReg = AArch64::Q0 + (DestReg - AArch64::S0);
+ } else {
+ assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
+ DestReg = AArch64::Q0 + (DestReg - AArch64::D0);
+ }
+ MCInst MOVI;
+ MOVI.setOpcode(AArch64::MOVIv2d_ns);
+ MOVI.addOperand(MCOperand::createReg(DestReg));
+ MOVI.addOperand(MCOperand::createImm(0));
+ EmitToStreamer(*OutStreamer, MOVI);
+ } else {
+ MCInst FMov;
+ switch (MI.getOpcode()) {
+ default: llvm_unreachable("Unexpected opcode");
+ case AArch64::FMOVS0:
+ FMov.setOpcode(AArch64::FMOVWSr);
+ FMov.addOperand(MCOperand::createReg(DestReg));
+ FMov.addOperand(MCOperand::createReg(AArch64::WZR));
+ break;
+ case AArch64::FMOVD0:
+ FMov.setOpcode(AArch64::FMOVXDr);
+ FMov.addOperand(MCOperand::createReg(DestReg));
+ FMov.addOperand(MCOperand::createReg(AArch64::XZR));
+ break;
+ }
+ EmitToStreamer(*OutStreamer, FMov);
+ }
+}
+
// Simple pseudo-instructions have their lowering (with expansion to real
// instructions) auto-generated.
#include "AArch64GenMCPseudoLowering.inc"
@@ -535,6 +559,11 @@ void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
+ case AArch64::FMOVS0:
+ case AArch64::FMOVD0:
+ EmitFMov0(*MI);
+ return;
+
case TargetOpcode::STACKMAP:
return LowerSTACKMAP(*OutStreamer, SM, *MI);
diff --git a/lib/Target/AArch64/AArch64BranchRelaxation.cpp b/lib/Target/AArch64/AArch64BranchRelaxation.cpp
index a614f555a4e9..9ec6ae4118a4 100644
--- a/lib/Target/AArch64/AArch64BranchRelaxation.cpp
+++ b/lib/Target/AArch64/AArch64BranchRelaxation.cpp
@@ -177,7 +177,7 @@ void AArch64BranchRelaxation::scanFunction() {
void AArch64BranchRelaxation::computeBlockSize(const MachineBasicBlock &MBB) {
unsigned Size = 0;
for (const MachineInstr &MI : MBB)
- Size += TII->GetInstSizeInBytes(&MI);
+ Size += TII->GetInstSizeInBytes(MI);
BlockInfo[MBB.getNumber()].Size = Size;
}
@@ -195,7 +195,7 @@ unsigned AArch64BranchRelaxation::getInstrOffset(MachineInstr *MI) const {
// Sum instructions before MI in MBB.
for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
assert(I != MBB->end() && "Didn't find MI in its own basic block?");
- Offset += TII->GetInstSizeInBytes(I);
+ Offset += TII->GetInstSizeInBytes(*I);
}
return Offset;
}
@@ -415,12 +415,12 @@ bool AArch64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
// Analyze the branch so we know how to update the successor lists.
MachineBasicBlock *TBB, *FBB;
SmallVector<MachineOperand, 2> Cond;
- TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, false);
+ TII->analyzeBranch(*MBB, TBB, FBB, Cond, false);
MachineBasicBlock *NewBB = splitBlockBeforeInstr(MI);
// No need for the branch to the next block. We're adding an unconditional
// branch to the destination.
- int delta = TII->GetInstSizeInBytes(&MBB->back());
+ int delta = TII->GetInstSizeInBytes(MBB->back());
BlockInfo[MBB->getNumber()].Size -= delta;
MBB->back().eraseFromParent();
// BlockInfo[SplitBB].Offset is wrong temporarily, fixed below
@@ -446,12 +446,12 @@ bool AArch64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
if (MI->getOpcode() == AArch64::Bcc)
invertBccCondition(MIB);
MIB.addMBB(NextBB);
- BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
BuildMI(MBB, DebugLoc(), TII->get(AArch64::B)).addMBB(DestBB);
- BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
// Remove the old conditional branch. It may or may not still be in MBB.
- BlockInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
+ BlockInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(*MI);
MI->eraseFromParent();
// Finally, keep the block offsets up to date.
@@ -463,12 +463,13 @@ bool AArch64BranchRelaxation::relaxBranchInstructions() {
bool Changed = false;
// Relaxing branches involves creating new basic blocks, so re-eval
// end() for termination.
- for (auto &MBB : *MF) {
- MachineInstr *MI = MBB.getFirstTerminator();
- if (isConditionalBranch(MI->getOpcode()) &&
- !isBlockInRange(MI, getDestBlock(MI),
- getBranchDisplacementBits(MI->getOpcode()))) {
- fixupConditionalBranch(MI);
+ for (MachineFunction::iterator I = MF->begin(); I != MF->end(); ++I) {
+ MachineBasicBlock &MBB = *I;
+ MachineInstr &MI = *MBB.getFirstTerminator();
+ if (isConditionalBranch(MI.getOpcode()) &&
+ !isBlockInRange(&MI, getDestBlock(&MI),
+ getBranchDisplacementBits(MI.getOpcode()))) {
+ fixupConditionalBranch(&MI);
++NumRelaxed;
Changed = true;
}
@@ -513,8 +514,7 @@ bool AArch64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
return MadeChange;
}
-/// createAArch64BranchRelaxation - returns an instance of the constpool
-/// island pass.
+/// Returns an instance of the AArch64 Branch Relaxation pass.
FunctionPass *llvm::createAArch64BranchRelaxation() {
return new AArch64BranchRelaxation();
}
diff --git a/lib/Target/AArch64/AArch64CallLowering.cpp b/lib/Target/AArch64/AArch64CallLowering.cpp
new file mode 100644
index 000000000000..e3522e63c21c
--- /dev/null
+++ b/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -0,0 +1,104 @@
+//===-- llvm/lib/Target/AArch64/AArch64CallLowering.cpp - Call lowering ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the lowering of LLVM calls to machine code calls for
+/// GlobalISel.
+///
+//===----------------------------------------------------------------------===//
+
+#include "AArch64CallLowering.h"
+#include "AArch64ISelLowering.h"
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+
+using namespace llvm;
+
+#ifndef LLVM_BUILD_GLOBAL_ISEL
+#error "This shouldn't be built without GISel"
+#endif
+
+AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
+ : CallLowering(&TLI) {
+}
+
+bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
+ const Value *Val, unsigned VReg) const {
+ MachineInstr *Return = MIRBuilder.buildInstr(AArch64::RET_ReallyLR);
+ assert(Return && "Unable to build a return instruction?!");
+
+ assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
+ if (VReg) {
+ assert(Val->getType()->isIntegerTy() && "Type not supported yet");
+ unsigned Size = Val->getType()->getPrimitiveSizeInBits();
+ assert((Size == 64 || Size == 32) && "Size not supported yet");
+ unsigned ResReg = (Size == 32) ? AArch64::W0 : AArch64::X0;
+ // Set the insertion point to be right before Return.
+ MIRBuilder.setInstr(*Return, /* Before */ true);
+ MachineInstr *Copy =
+ MIRBuilder.buildInstr(TargetOpcode::COPY, ResReg, VReg);
+ (void)Copy;
+ assert(Copy->getNextNode() == Return &&
+ "The insertion did not happen where we expected");
+ MachineInstrBuilder(MIRBuilder.getMF(), Return)
+ .addReg(ResReg, RegState::Implicit);
+ }
+ return true;
+}
+
+bool AArch64CallLowering::lowerFormalArguments(
+ MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args,
+ const SmallVectorImpl<unsigned> &VRegs) const {
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = *MF.getFunction();
+
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
+
+ unsigned NumArgs = Args.size();
+ Function::const_arg_iterator CurOrigArg = Args.begin();
+ const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
+ for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) {
+ MVT ValVT = MVT::getVT(CurOrigArg->getType());
+ CCAssignFn *AssignFn =
+ TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
+ bool Res =
+ AssignFn(i, ValVT, ValVT, CCValAssign::Full, ISD::ArgFlagsTy(), CCInfo);
+ assert(!Res && "Call operand has unhandled type");
+ (void)Res;
+ }
+ assert(ArgLocs.size() == Args.size() &&
+ "We have a different number of location and args?!");
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+
+ assert(VA.isRegLoc() && "Not yet implemented");
+ // Transform the arguments in physical registers into virtual ones.
+ MIRBuilder.getMBB().addLiveIn(VA.getLocReg());
+ MIRBuilder.buildInstr(TargetOpcode::COPY, VRegs[i], VA.getLocReg());
+
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::BCvt:
+ // We don't care about bitcast.
+ break;
+ case CCValAssign::AExt:
+ case CCValAssign::SExt:
+ case CCValAssign::ZExt:
+ // Zero/Sign extend the register.
+ assert(0 && "Not yet implemented");
+ break;
+ }
+ }
+ return true;
+}
diff --git a/lib/Target/AArch64/AArch64CallLowering.h b/lib/Target/AArch64/AArch64CallLowering.h
new file mode 100644
index 000000000000..411622803461
--- /dev/null
+++ b/lib/Target/AArch64/AArch64CallLowering.h
@@ -0,0 +1,36 @@
+//===-- llvm/lib/Target/AArch64/AArch64CallLowering.h - Call lowering -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM calls to machine code calls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64CALLLOWERING
+#define LLVM_LIB_TARGET_AARCH64_AARCH64CALLLOWERING
+
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+
+namespace llvm {
+
+class AArch64TargetLowering;
+
+class AArch64CallLowering: public CallLowering {
+ public:
+ AArch64CallLowering(const AArch64TargetLowering &TLI);
+
+ bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val,
+ unsigned VReg) const override;
+ bool
+ lowerFormalArguments(MachineIRBuilder &MIRBuilder,
+ const Function::ArgumentListType &Args,
+ const SmallVectorImpl<unsigned> &VRegs) const override;
+};
+} // End of namespace llvm;
+#endif
diff --git a/lib/Target/AArch64/AArch64CallingConvention.td b/lib/Target/AArch64/AArch64CallingConvention.td
index 388d64ec4e99..178e3971640e 100644
--- a/lib/Target/AArch64/AArch64CallingConvention.td
+++ b/lib/Target/AArch64/AArch64CallingConvention.td
@@ -45,6 +45,9 @@ def CC_AArch64_AAPCS : CallingConv<[
// supported there.
CCIfNest<CCAssignToReg<[X18]>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>,
+
CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
// Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
@@ -86,6 +89,8 @@ def RetCC_AArch64_AAPCS : CallingConv<[
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
+ CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X19], [W19]>>>,
+
// Big endian vectors must be passed as if they were 1-element vectors so that
// their lanes are in a consistent order.
CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
@@ -126,6 +131,12 @@ def CC_AArch64_DarwinPCS : CallingConv<[
// slot is 64-bit.
CCIfByVal<CCPassByVal<8, 8>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>,
+
+ // A SwiftError is passed in X19.
+ CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X19], [W19]>>>,
+
CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
// Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
@@ -270,6 +281,9 @@ def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
// case)
def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
+def CSR_AArch64_AAPCS_SwiftError
+ : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X19)>;
+
// The function used by Darwin to obtain the address of a thread-local variable
// guarantees more than a normal AAPCS function. x16 and x17 are used on the
// fast path for calculation, but other registers except X0 (argument/return)
@@ -310,3 +324,7 @@ def CSR_AArch64_AllRegs
(sequence "Q%u", 0, 31))>;
def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>;
+
+def CSR_AArch64_RT_MostRegs : CalleeSavedRegs<(add CSR_AArch64_AAPCS,
+ (sequence "X%u", 9, 15))>;
+
diff --git a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
index 9310ac4a44a2..011a03622ba5 100644
--- a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
+++ b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
@@ -39,6 +39,9 @@ struct LDTLSCleanup : public MachineFunctionPass {
LDTLSCleanup() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
if (AFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two.
@@ -69,9 +72,9 @@ struct LDTLSCleanup : public MachineFunctionPass {
break;
if (TLSBaseAddrReg)
- I = replaceTLSBaseAddrCall(I, TLSBaseAddrReg);
+ I = replaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
else
- I = setRegister(I, &TLSBaseAddrReg);
+ I = setRegister(*I, &TLSBaseAddrReg);
Changed = true;
break;
default:
@@ -89,27 +92,27 @@ struct LDTLSCleanup : public MachineFunctionPass {
// Replace the TLS_base_addr instruction I with a copy from
// TLSBaseAddrReg, returning the new instruction.
- MachineInstr *replaceTLSBaseAddrCall(MachineInstr *I,
+ MachineInstr *replaceTLSBaseAddrCall(MachineInstr &I,
unsigned TLSBaseAddrReg) {
- MachineFunction *MF = I->getParent()->getParent();
+ MachineFunction *MF = I.getParent()->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to x0, which is where the rest of the
// code sequence assumes the address will be.
- MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- AArch64::X0).addReg(TLSBaseAddrReg);
+ MachineInstr *Copy = BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII->get(TargetOpcode::COPY), AArch64::X0)
+ .addReg(TLSBaseAddrReg);
// Erase the TLS_base_addr instruction.
- I->eraseFromParent();
+ I.eraseFromParent();
return Copy;
}
// Create a virtal register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
- MachineInstr *setRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
- MachineFunction *MF = I->getParent()->getParent();
+ MachineInstr *setRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
+ MachineFunction *MF = I.getParent()->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
// Create a virtual register for the TLS base address.
@@ -118,7 +121,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
// Insert a copy from X0 to TLSBaseAddrReg for later.
MachineInstr *Copy =
- BuildMI(*I->getParent(), ++I->getIterator(), I->getDebugLoc(),
+ BuildMI(*I.getParent(), ++I.getIterator(), I.getDebugLoc(),
TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
.addReg(AArch64::X0);
diff --git a/lib/Target/AArch64/AArch64CollectLOH.cpp b/lib/Target/AArch64/AArch64CollectLOH.cpp
index 78c239b11ef3..5eecb3a86856 100644
--- a/lib/Target/AArch64/AArch64CollectLOH.cpp
+++ b/lib/Target/AArch64/AArch64CollectLOH.cpp
@@ -179,6 +179,11 @@ struct AArch64CollectLOH : public MachineFunctionPass {
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return AARCH64_COLLECT_LOH_NAME;
}
@@ -623,10 +628,7 @@ static void computeADRP(const InstrToInstrs &UseToDefs,
continue;
}
DEBUG(dbgs() << "Record AdrpAdrp:\n" << *L2 << '\n' << *L1 << '\n');
- SmallVector<const MachineInstr *, 2> Args;
- Args.push_back(L2);
- Args.push_back(L1);
- AArch64FI.addLOHDirective(MCLOH_AdrpAdrp, Args);
+ AArch64FI.addLOHDirective(MCLOH_AdrpAdrp, {L2, L1});
++NumADRPSimpleCandidate;
}
#ifdef DEBUG
@@ -760,13 +762,9 @@ static bool registerADRCandidate(const MachineInstr &Use,
"ADD already involved in LOH.");
DEBUG(dbgs() << "Record AdrpAdd\n" << Def << '\n' << Use << '\n');
- SmallVector<const MachineInstr *, 2> Args;
- Args.push_back(&Def);
- Args.push_back(&Use);
-
- AArch64FI.addLOHDirective(Use.getOpcode() == AArch64::ADDXri ? MCLOH_AdrpAdd
- : MCLOH_AdrpLdrGot,
- Args);
+ AArch64FI.addLOHDirective(
+ Use.getOpcode() == AArch64::ADDXri ? MCLOH_AdrpAdd : MCLOH_AdrpLdrGot,
+ {&Def, &Use});
return true;
}
@@ -1036,6 +1034,9 @@ static void collectInvolvedReg(const MachineFunction &MF, MapRegToId &RegToId,
}
bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const MachineDominatorTree *MDT = &getAnalysis<MachineDominatorTree>();
diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index fc27bfee73d1..8fff381d391e 100644
--- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -70,7 +70,6 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -144,10 +143,18 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
if (I->getOpcode() != AArch64::Bcc)
return nullptr;
+ // Since we may modify cmp of this MBB, make sure NZCV does not live out.
+ for (auto SuccBB : MBB->successors())
+ if (SuccBB->isLiveIn(AArch64::NZCV))
+ return nullptr;
+
// Now find the instruction controlling the terminator.
for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
--I;
assert(!I->isTerminator() && "Spurious terminator");
+ // Check if there is any use of NZCV between CMP and Bcc.
+ if (I->readsRegister(AArch64::NZCV))
+ return nullptr;
switch (I->getOpcode()) {
// cmp is an alias for subs with a dead destination register.
case AArch64::SUBSWri:
@@ -166,7 +173,7 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
return nullptr;
}
- return I;
+ return &*I;
}
// Prevent false positive case like:
// cmp w19, #0
@@ -268,13 +275,13 @@ void AArch64ConditionOptimizer::modifyCmp(MachineInstr *CmpMI,
// The fact that this comparison was picked ensures that it's related to the
// first terminator instruction.
- MachineInstr *BrMI = MBB->getFirstTerminator();
+ MachineInstr &BrMI = *MBB->getFirstTerminator();
// Change condition in branch instruction.
- BuildMI(*MBB, BrMI, BrMI->getDebugLoc(), TII->get(AArch64::Bcc))
+ BuildMI(*MBB, BrMI, BrMI.getDebugLoc(), TII->get(AArch64::Bcc))
.addImm(Cmp)
- .addOperand(BrMI->getOperand(1));
- BrMI->eraseFromParent();
+ .addOperand(BrMI.getOperand(1));
+ BrMI.eraseFromParent();
MBB->updateTerminator();
@@ -311,6 +318,9 @@ bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI,
bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
TII = MF.getSubtarget().getInstrInfo();
DomTree = &getAnalysis<MachineDominatorTree>();
MRI = &MF.getRegInfo();
@@ -327,7 +337,7 @@ bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
SmallVector<MachineOperand, 4> HeadCond;
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
- if (TII->AnalyzeBranch(*HBB, TBB, FBB, HeadCond)) {
+ if (TII->analyzeBranch(*HBB, TBB, FBB, HeadCond)) {
continue;
}
@@ -338,7 +348,7 @@ bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
SmallVector<MachineOperand, 4> TrueCond;
MachineBasicBlock *TBB_TBB = nullptr, *TBB_FBB = nullptr;
- if (TII->AnalyzeBranch(*TBB, TBB_TBB, TBB_FBB, TrueCond)) {
+ if (TII->analyzeBranch(*TBB, TBB_TBB, TBB_FBB, TrueCond)) {
continue;
}
diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index df1320fbd4c9..e1b0dc724b39 100644
--- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -18,13 +18,10 @@
//===----------------------------------------------------------------------===//
#include "AArch64.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -307,7 +304,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
case AArch64::CBNZW:
case AArch64::CBNZX:
// These can be converted into a ccmp against #0.
- return I;
+ return &*I;
}
++NumCmpTermRejs;
DEBUG(dbgs() << "Flags not used by terminator: " << *I);
@@ -338,7 +335,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
case AArch64::ADDSWrr:
case AArch64::ADDSXrr:
if (isDeadDef(I->getOperand(0).getReg()))
- return I;
+ return &*I;
DEBUG(dbgs() << "Can't convert compare with live destination: " << *I);
++NumLiveDstRejs;
return nullptr;
@@ -346,12 +343,12 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) {
case AArch64::FCMPDrr:
case AArch64::FCMPESrr:
case AArch64::FCMPEDrr:
- return I;
+ return &*I;
}
// Check for flag reads and clobbers.
MIOperands::PhysRegInfo PRI =
- MIOperands(I).analyzePhysReg(AArch64::NZCV, TRI);
+ MIOperands(*I).analyzePhysReg(AArch64::NZCV, TRI);
if (PRI.Read) {
// The ccmp doesn't produce exactly the same flags as the original
@@ -496,7 +493,7 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
// The branch we're looking to eliminate must be analyzable.
HeadCond.clear();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
- if (TII->AnalyzeBranch(*Head, TBB, FBB, HeadCond)) {
+ if (TII->analyzeBranch(*Head, TBB, FBB, HeadCond)) {
DEBUG(dbgs() << "Head branch not analyzable.\n");
++NumHeadBranchRejs;
return false;
@@ -524,7 +521,7 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) {
CmpBBCond.clear();
TBB = FBB = nullptr;
- if (TII->AnalyzeBranch(*CmpBB, TBB, FBB, CmpBBCond)) {
+ if (TII->analyzeBranch(*CmpBB, TBB, FBB, CmpBBCond)) {
DEBUG(dbgs() << "CmpBB branch not analyzable.\n");
++NumCmpBranchRejs;
return false;
@@ -759,7 +756,6 @@ void initializeAArch64ConditionalComparesPass(PassRegistry &);
INITIALIZE_PASS_BEGIN(AArch64ConditionalCompares, "aarch64-ccmp",
"AArch64 CCMP Pass", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
INITIALIZE_PASS_END(AArch64ConditionalCompares, "aarch64-ccmp",
@@ -770,7 +766,6 @@ FunctionPass *llvm::createAArch64ConditionalCompares() {
}
void AArch64ConditionalCompares::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
@@ -849,9 +844,9 @@ bool AArch64ConditionalCompares::shouldConvert() {
// Instruction depths can be computed for all trace instructions above CmpBB.
unsigned HeadDepth =
- Trace.getInstrCycles(CmpConv.Head->getFirstTerminator()).Depth;
+ Trace.getInstrCycles(*CmpConv.Head->getFirstTerminator()).Depth;
unsigned CmpBBDepth =
- Trace.getInstrCycles(CmpConv.CmpBB->getFirstTerminator()).Depth;
+ Trace.getInstrCycles(*CmpConv.CmpBB->getFirstTerminator()).Depth;
DEBUG(dbgs() << "Head depth: " << HeadDepth
<< "\nCmpBB depth: " << CmpBBDepth << '\n');
if (CmpBBDepth > HeadDepth + DelayLimit) {
@@ -891,6 +886,9 @@ bool AArch64ConditionalCompares::tryConvert(MachineBasicBlock *MBB) {
bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getSubtarget().getRegisterInfo();
SchedModel = MF.getSubtarget().getSchedModel();
diff --git a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 576cf4a74167..7a6f7669db5f 100644
--- a/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -48,6 +48,11 @@ public:
bool runOnMachineFunction(MachineFunction &F) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override { return AARCH64_DEAD_REG_DEF_NAME; }
void getAnalysisUsage(AnalysisUsage &AU) const override {
@@ -88,6 +93,12 @@ bool AArch64DeadRegisterDefinitions::processMachineBasicBlock(
DEBUG(dbgs() << " Ignoring, operand is frame index\n");
continue;
}
+ if (MI.definesRegister(AArch64::XZR) || MI.definesRegister(AArch64::WZR)) {
+ // It is not allowed to write to the same register (not even the zero
+ // register) twice in a single instruction.
+ DEBUG(dbgs() << " Ignoring, XZR or WZR already used by the instruction\n");
+ continue;
+ }
for (int i = 0, e = MI.getDesc().getNumDefs(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDead() && MO.isDef()) {
@@ -100,7 +111,7 @@ bool AArch64DeadRegisterDefinitions::processMachineBasicBlock(
continue;
}
// Don't change the register if there's an implicit def of a subreg or
- // supperreg.
+ // superreg.
if (implicitlyDefinesOverlappingReg(MO.getReg(), MI)) {
DEBUG(dbgs() << " Ignoring, implicitly defines overlap reg.\n");
continue;
@@ -123,6 +134,8 @@ bool AArch64DeadRegisterDefinitions::processMachineBasicBlock(
MO.setReg(NewReg);
DEBUG(MI.print(dbgs()));
++NumDeadDefsReplaced;
+ // Only replace one dead register, see check for zero register above.
+ break;
}
}
}
@@ -136,6 +149,9 @@ bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
DEBUG(dbgs() << "***** AArch64DeadRegisterDefinitions *****\n");
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
for (auto &MBB : MF)
if (processMachineBasicBlock(MBB))
Changed = true;
diff --git a/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index d24e42a93763..5e477d39e074 100644
--- a/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -17,6 +17,7 @@
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "AArch64InstrInfo.h"
#include "AArch64Subtarget.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/MathExtras.h"
@@ -46,9 +47,18 @@ public:
private:
bool expandMBB(MachineBasicBlock &MBB);
- bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned BitSize);
+
+ bool expandCMP_SWAP(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
+ unsigned ExtendImm, unsigned ZeroReg,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
};
char AArch64ExpandPseudo::ID = 0;
}
@@ -403,9 +413,17 @@ bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned BitSize) {
MachineInstr &MI = *MBBI;
+ unsigned DstReg = MI.getOperand(0).getReg();
uint64_t Imm = MI.getOperand(1).getImm();
const unsigned Mask = 0xFFFF;
+ if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
+ // Useless def, and we don't want to risk creating an invalid ORR (which
+ // would really write to sp).
+ MI.eraseFromParent();
+ return true;
+ }
+
// Try a MOVI instruction (aka ORR-immediate with the zero register).
uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
uint64_t Encoding;
@@ -531,7 +549,6 @@ bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
LastShift = (TZ / 16) * 16;
}
unsigned Imm16 = (Imm >> Shift) & Mask;
- unsigned DstReg = MI.getOperand(0).getReg();
bool DstIsDead = MI.getOperand(0).isDead();
MachineInstrBuilder MIB1 =
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(FirstOpc))
@@ -572,10 +589,178 @@ bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
return true;
}
+static void addPostLoopLiveIns(MachineBasicBlock *MBB, LivePhysRegs &LiveRegs) {
+ for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I)
+ MBB->addLiveIn(*I);
+}
+
+bool AArch64ExpandPseudo::expandCMP_SWAP(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned LdarOp,
+ unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineOperand &Dest = MI.getOperand(0);
+ unsigned StatusReg = MI.getOperand(1).getReg();
+ MachineOperand &Addr = MI.getOperand(2);
+ MachineOperand &Desired = MI.getOperand(3);
+ MachineOperand &New = MI.getOperand(4);
+
+ LivePhysRegs LiveRegs(&TII->getRegisterInfo());
+ LiveRegs.addLiveOuts(MBB);
+ for (auto I = std::prev(MBB.end()); I != MBBI; --I)
+ LiveRegs.stepBackward(*I);
+
+ MachineFunction *MF = MBB.getParent();
+ auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ MF->insert(++MBB.getIterator(), LoadCmpBB);
+ MF->insert(++LoadCmpBB->getIterator(), StoreBB);
+ MF->insert(++StoreBB->getIterator(), DoneBB);
+
+ // .Lloadcmp:
+ // ldaxr xDest, [xAddr]
+ // cmp xDest, xDesired
+ // b.ne .Ldone
+ LoadCmpBB->addLiveIn(Addr.getReg());
+ LoadCmpBB->addLiveIn(Dest.getReg());
+ LoadCmpBB->addLiveIn(Desired.getReg());
+ addPostLoopLiveIns(LoadCmpBB, LiveRegs);
+
+ BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
+ .addReg(Addr.getReg());
+ BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
+ .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
+ .addOperand(Desired)
+ .addImm(ExtendImm);
+ BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
+ .addImm(AArch64CC::NE)
+ .addMBB(DoneBB)
+ .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
+ LoadCmpBB->addSuccessor(DoneBB);
+ LoadCmpBB->addSuccessor(StoreBB);
+
+ // .Lstore:
+ // stlxr wStatus, xNew, [xAddr]
+ // cbnz wStatus, .Lloadcmp
+ StoreBB->addLiveIn(Addr.getReg());
+ StoreBB->addLiveIn(New.getReg());
+ addPostLoopLiveIns(StoreBB, LiveRegs);
+
+ BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
+ .addOperand(New)
+ .addOperand(Addr);
+ BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
+ .addReg(StatusReg, RegState::Kill)
+ .addMBB(LoadCmpBB);
+ StoreBB->addSuccessor(LoadCmpBB);
+ StoreBB->addSuccessor(DoneBB);
+
+ DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
+ DoneBB->transferSuccessors(&MBB);
+ addPostLoopLiveIns(DoneBB, LiveRegs);
+
+ MBB.addSuccessor(LoadCmpBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+ return true;
+}
+
+bool AArch64ExpandPseudo::expandCMP_SWAP_128(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineOperand &DestLo = MI.getOperand(0);
+ MachineOperand &DestHi = MI.getOperand(1);
+ unsigned StatusReg = MI.getOperand(2).getReg();
+ MachineOperand &Addr = MI.getOperand(3);
+ MachineOperand &DesiredLo = MI.getOperand(4);
+ MachineOperand &DesiredHi = MI.getOperand(5);
+ MachineOperand &NewLo = MI.getOperand(6);
+ MachineOperand &NewHi = MI.getOperand(7);
+
+ LivePhysRegs LiveRegs(&TII->getRegisterInfo());
+ LiveRegs.addLiveOuts(MBB);
+ for (auto I = std::prev(MBB.end()); I != MBBI; --I)
+ LiveRegs.stepBackward(*I);
+
+ MachineFunction *MF = MBB.getParent();
+ auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ MF->insert(++MBB.getIterator(), LoadCmpBB);
+ MF->insert(++LoadCmpBB->getIterator(), StoreBB);
+ MF->insert(++StoreBB->getIterator(), DoneBB);
+
+ // .Lloadcmp:
+ // ldaxp xDestLo, xDestHi, [xAddr]
+ // cmp xDestLo, xDesiredLo
+ // sbcs xDestHi, xDesiredHi
+ // b.ne .Ldone
+ LoadCmpBB->addLiveIn(Addr.getReg());
+ LoadCmpBB->addLiveIn(DestLo.getReg());
+ LoadCmpBB->addLiveIn(DestHi.getReg());
+ LoadCmpBB->addLiveIn(DesiredLo.getReg());
+ LoadCmpBB->addLiveIn(DesiredHi.getReg());
+ addPostLoopLiveIns(LoadCmpBB, LiveRegs);
+
+ BuildMI(LoadCmpBB, DL, TII->get(AArch64::LDAXPX))
+ .addReg(DestLo.getReg(), RegState::Define)
+ .addReg(DestHi.getReg(), RegState::Define)
+ .addReg(Addr.getReg());
+ BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
+ .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
+ .addOperand(DesiredLo)
+ .addImm(0);
+ BuildMI(LoadCmpBB, DL, TII->get(AArch64::SBCSXr), AArch64::XZR)
+ .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
+ .addOperand(DesiredHi);
+ BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
+ .addImm(AArch64CC::NE)
+ .addMBB(DoneBB)
+ .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
+ LoadCmpBB->addSuccessor(DoneBB);
+ LoadCmpBB->addSuccessor(StoreBB);
+
+ // .Lstore:
+ // stlxp wStatus, xNewLo, xNewHi, [xAddr]
+ // cbnz wStatus, .Lloadcmp
+ StoreBB->addLiveIn(Addr.getReg());
+ StoreBB->addLiveIn(NewLo.getReg());
+ StoreBB->addLiveIn(NewHi.getReg());
+ addPostLoopLiveIns(StoreBB, LiveRegs);
+ BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg)
+ .addOperand(NewLo)
+ .addOperand(NewHi)
+ .addOperand(Addr);
+ BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
+ .addReg(StatusReg, RegState::Kill)
+ .addMBB(LoadCmpBB);
+ StoreBB->addSuccessor(LoadCmpBB);
+ StoreBB->addSuccessor(DoneBB);
+
+ DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
+ DoneBB->transferSuccessors(&MBB);
+ addPostLoopLiveIns(DoneBB, LiveRegs);
+
+ MBB.addSuccessor(LoadCmpBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+ return true;
+}
+
/// \brief If MBBI references a pseudo instruction that should be expanded here,
/// do the expansion and return true. Otherwise return false.
bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI) {
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
switch (Opcode) {
@@ -717,6 +902,28 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
MI.eraseFromParent();
return true;
}
+ case AArch64::CMP_SWAP_8:
+ return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
+ AArch64::SUBSWrx,
+ AArch64_AM::getArithExtendImm(AArch64_AM::UXTB, 0),
+ AArch64::WZR, NextMBBI);
+ case AArch64::CMP_SWAP_16:
+ return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
+ AArch64::SUBSWrx,
+ AArch64_AM::getArithExtendImm(AArch64_AM::UXTH, 0),
+ AArch64::WZR, NextMBBI);
+ case AArch64::CMP_SWAP_32:
+ return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
+ AArch64::SUBSWrs,
+ AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
+ AArch64::WZR, NextMBBI);
+ case AArch64::CMP_SWAP_64:
+ return expandCMP_SWAP(MBB, MBBI,
+ AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
+ AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
+ AArch64::XZR, NextMBBI);
+ case AArch64::CMP_SWAP_128:
+ return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
}
return false;
}
@@ -729,7 +936,7 @@ bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineBasicBlock::iterator NMBBI = std::next(MBBI);
- Modified |= expandMI(MBB, MBBI);
+ Modified |= expandMI(MBB, MBBI, NMBBI);
MBBI = NMBBI;
}
diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp
index 0ac4b39b0357..e2ab7ab79be1 100644
--- a/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/lib/Target/AArch64/AArch64FastISel.cpp
@@ -37,7 +37,6 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/CommandLine.h"
using namespace llvm;
namespace {
@@ -144,8 +143,8 @@ private:
bool computeCallAddress(const Value *V, Address &Addr);
bool simplifyAddress(Address &Addr, MVT VT);
void addLoadStoreOperands(Address &Addr, const MachineInstrBuilder &MIB,
- unsigned Flags, unsigned ScaleFactor,
- MachineMemOperand *MMO);
+ MachineMemOperand::Flags Flags,
+ unsigned ScaleFactor, MachineMemOperand *MMO);
bool isMemCpySmall(uint64_t Len, unsigned Alignment);
bool tryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
unsigned Alignment);
@@ -439,9 +438,6 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {
.addReg(ADRPReg)
.addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
AArch64II::MO_NC);
- } else if (OpFlags & AArch64II::MO_CONSTPOOL) {
- // We can't handle addresses loaded from a constant pool quickly yet.
- return 0;
} else {
// ADRP + ADDX
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
@@ -555,10 +551,9 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
// Iterate through the GEP folding the constants into offsets where
// we can.
- gep_type_iterator GTI = gep_type_begin(U);
- for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
- ++i, ++GTI) {
- const Value *Op = *i;
+ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
+ GTI != E; ++GTI) {
+ const Value *Op = GTI.getOperand();
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
@@ -947,10 +942,7 @@ bool AArch64FastISel::isValueAvailable(const Value *V) const {
return true;
const auto *I = cast<Instruction>(V);
- if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB)
- return true;
-
- return false;
+ return FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB;
}
bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {
@@ -1048,7 +1040,7 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {
void AArch64FastISel::addLoadStoreOperands(Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags,
+ MachineMemOperand::Flags Flags,
unsigned ScaleFactor,
MachineMemOperand *MMO) {
int64_t Offset = Addr.getOffset() / ScaleFactor;
@@ -1612,8 +1604,8 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT,
unsigned LHSReg, bool LHSIsKill,
uint64_t Imm) {
- assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR) &&
- "ISD nodes are not consecutive!");
+ static_assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR),
+ "ISD nodes are not consecutive!");
static const unsigned OpcTable[3][2] = {
{ AArch64::ANDWri, AArch64::ANDXri },
{ AArch64::ORRWri, AArch64::ORRXri },
@@ -1659,8 +1651,8 @@ unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT,
unsigned LHSReg, bool LHSIsKill,
unsigned RHSReg, bool RHSIsKill,
uint64_t ShiftImm) {
- assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR) &&
- "ISD nodes are not consecutive!");
+ static_assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR),
+ "ISD nodes are not consecutive!");
static const unsigned OpcTable[3][2] = {
{ AArch64::ANDWrs, AArch64::ANDXrs },
{ AArch64::ORRWrs, AArch64::ORRXrs },
@@ -1904,6 +1896,21 @@ bool AArch64FastISel::selectLoad(const Instruction *I) {
cast<LoadInst>(I)->isAtomic())
return false;
+ const Value *SV = I->getOperand(0);
+ if (TLI.supportSwiftError()) {
+ // Swifterror values can come from either a function parameter with
+ // swifterror attribute or an alloca with swifterror attribute.
+ if (const Argument *Arg = dyn_cast<Argument>(SV)) {
+ if (Arg->hasSwiftErrorAttr())
+ return false;
+ }
+
+ if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
+ if (Alloca->isSwiftError())
+ return false;
+ }
+ }
+
// See if we can handle this address.
Address Addr;
if (!computeAddress(I->getOperand(0), Addr, I->getType()))
@@ -2068,6 +2075,21 @@ bool AArch64FastISel::selectStore(const Instruction *I) {
cast<StoreInst>(I)->isAtomic())
return false;
+ const Value *PtrV = I->getOperand(1);
+ if (TLI.supportSwiftError()) {
+ // Swifterror values can come from either a function parameter with
+ // swifterror attribute or an alloca with swifterror attribute.
+ if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
+ if (Arg->hasSwiftErrorAttr())
+ return false;
+ }
+
+ if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
+ if (Alloca->isSwiftError())
+ return false;
+ }
+ }
+
// Get the value to be stored into a register. Use the zero register directly
// when possible to avoid an unnecessary copy and a wasted register.
unsigned SrcReg = 0;
@@ -2813,6 +2835,8 @@ bool AArch64FastISel::fastLowerArguments() {
if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) ||
F->getAttributes().hasAttribute(Idx, Attribute::Nest))
return false;
@@ -3064,7 +3088,8 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
return false;
for (auto Flag : CLI.OutFlags)
- if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
+ if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal() ||
+ Flag.isSwiftSelf() || Flag.isSwiftError())
return false;
// Set up the argument vectors.
@@ -3646,6 +3671,10 @@ bool AArch64FastISel::selectRet(const Instruction *I) {
if (F.isVarArg())
return false;
+ if (TLI.supportSwiftError() &&
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ return false;
+
if (TLI.supportSplitCSR(FuncInfo.MF))
return false;
@@ -4814,18 +4843,18 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) {
// Keep a running tab of the total offset to coalesce multiple N = N + Offset
// into a single N = N + TotalOffset.
uint64_t TotalOffs = 0;
- Type *Ty = I->getOperand(0)->getType();
MVT VT = TLI.getPointerTy(DL);
- for (auto OI = std::next(I->op_begin()), E = I->op_end(); OI != E; ++OI) {
- const Value *Idx = *OI;
- if (auto *StTy = dyn_cast<StructType>(Ty)) {
+ for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
+ GTI != E; ++GTI) {
+ const Value *Idx = GTI.getOperand();
+ if (auto *StTy = dyn_cast<StructType>(*GTI)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
// N = N + Offset
if (Field)
TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
- Ty = StTy->getElementType(Field);
} else {
- Ty = cast<SequentialType>(Ty)->getElementType();
+ Type *Ty = GTI.getIndexedType();
+
// If this is a constant subscript, handle it quickly.
if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero())
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index 3f63d049c34e..82111e5c7259 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -93,6 +93,7 @@
#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -127,12 +128,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
unsigned NumBytes = AFI->getLocalStackSize();
- // Note: currently hasFP() is always true for hasCalls(), but that's an
- // implementation detail of the current code, not a strict requirement,
- // so stay safe here and check both.
- if (MFI->hasCalls() || hasFP(MF) || NumBytes > 128)
- return false;
- return true;
+ return !(MFI->hasCalls() || hasFP(MF) || NumBytes > 128);
}
/// hasFP - Return true if the specified function should have a dedicated frame
@@ -140,9 +136,12 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
- return (MFI->hasCalls() || MFI->hasVarSizedObjects() ||
- MFI->isFrameAddressTaken() || MFI->hasStackMap() ||
- MFI->hasPatchPoint() || RegInfo->needsStackRealignment(MF));
+ // Retain behavior of always omitting the FP for leaf functions when possible.
+ return (MFI->hasCalls() &&
+ MF.getTarget().Options.DisableFramePointerElim(MF)) ||
+ MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() ||
+ MFI->hasStackMap() || MFI->hasPatchPoint() ||
+ RegInfo->needsStackRealignment(MF);
}
/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
@@ -155,7 +154,7 @@ AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
return !MF.getFrameInfo()->hasVarSizedObjects();
}
-void AArch64FrameLowering::eliminateCallFramePseudoInstr(
+MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
const AArch64InstrInfo *TII =
@@ -170,7 +169,7 @@ void AArch64FrameLowering::eliminateCallFramePseudoInstr(
unsigned Align = getStackAlignment();
int64_t Amount = I->getOperand(0).getImm();
- Amount = RoundUpToAlignment(Amount, Align);
+ Amount = alignTo(Amount, Align);
if (!IsDestroy)
Amount = -Amount;
@@ -186,7 +185,7 @@ void AArch64FrameLowering::eliminateCallFramePseudoInstr(
// 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses
// LSL #0, and the other uses LSL #12.
//
- // Mostly call frames will be allocated at the start of a function so
+ // Most call frames will be allocated at the start of a function so
// this is OK, but it is a limitation that needs dealing with.
assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large");
emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII);
@@ -198,12 +197,11 @@ void AArch64FrameLowering::eliminateCallFramePseudoInstr(
emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount,
TII);
}
- MBB.erase(I);
+ return MBB.erase(I);
}
void AArch64FrameLowering::emitCalleeSavedFrameMoves(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- unsigned FramePtr) const {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
@@ -216,75 +214,194 @@ void AArch64FrameLowering::emitCalleeSavedFrameMoves(
if (CSI.empty())
return;
- const DataLayout &TD = MF.getDataLayout();
- bool HasFP = hasFP(MF);
-
- // Calculate amount of bytes used for return address storing.
- int stackGrowth = -TD.getPointerSize(0);
-
- // Calculate offsets.
- int64_t saveAreaOffset = (HasFP ? 2 : 1) * stackGrowth;
- unsigned TotalSkipped = 0;
for (const auto &Info : CSI) {
unsigned Reg = Info.getReg();
- int64_t Offset = MFI->getObjectOffset(Info.getFrameIdx()) -
- getOffsetOfLocalArea() + saveAreaOffset;
-
- // Don't output a new CFI directive if we're re-saving the frame pointer or
- // link register. This happens when the PrologEpilogInserter has inserted an
- // extra "STP" of the frame pointer and link register -- the "emitPrologue"
- // method automatically generates the directives when frame pointers are
- // used. If we generate CFI directives for the extra "STP"s, the linker will
- // lose track of the correct values for the frame pointer and link register.
- if (HasFP && (FramePtr == Reg || Reg == AArch64::LR)) {
- TotalSkipped += stackGrowth;
- continue;
- }
-
+ int64_t Offset =
+ MFI->getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
- unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
- nullptr, DwarfReg, Offset - TotalSkipped));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
}
}
-/// Get FPOffset by analyzing the first instruction.
-static int getFPOffsetInPrologue(MachineInstr *MBBI) {
- // First instruction must a) allocate the stack and b) have an immediate
- // that is a multiple of -2.
- assert(((MBBI->getOpcode() == AArch64::STPXpre ||
- MBBI->getOpcode() == AArch64::STPDpre) &&
- MBBI->getOperand(3).getReg() == AArch64::SP &&
- MBBI->getOperand(4).getImm() < 0 &&
- (MBBI->getOperand(4).getImm() & 1) == 0));
-
- // Frame pointer is fp = sp - 16. Since the STPXpre subtracts the space
- // required for the callee saved register area we get the frame pointer
- // by addding that offset - 16 = -getImm()*8 - 2*8 = -(getImm() + 2) * 8.
- int FPOffset = -(MBBI->getOperand(4).getImm() + 2) * 8;
- assert(FPOffset >= 0 && "Bad Framepointer Offset");
- return FPOffset;
-}
+// Find a scratch register that we can use at the start of the prologue to
+// re-align the stack pointer. We avoid using callee-save registers since they
+// may appear to be free when this is called from canUseAsPrologue (during
+// shrink wrapping), but then no longer be free when this is called from
+// emitPrologue.
+//
+// FIXME: This is a bit conservative, since in the above case we could use one
+// of the callee-save registers as a scratch temp to re-align the stack pointer,
+// but we would then have to make sure that we were in fact saving at least one
+// callee-save register in the prologue, which is additional complexity that
+// doesn't seem worth the benefit.
+static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) {
+ MachineFunction *MF = MBB->getParent();
+
+ // If MBB is an entry block, use X9 as the scratch register
+ if (&MF->front() == MBB)
+ return AArch64::X9;
+
+ const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
+ LivePhysRegs LiveRegs(&TRI);
+ LiveRegs.addLiveIns(*MBB);
+
+ // Mark callee saved registers as used so we will not choose them.
+ const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
+ const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+ const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(MF);
+ for (unsigned i = 0; CSRegs[i]; ++i)
+ LiveRegs.addReg(CSRegs[i]);
+
+ // Prefer X9 since it was historically used for the prologue scratch reg.
+ const MachineRegisterInfo &MRI = MF->getRegInfo();
+ if (LiveRegs.available(MRI, AArch64::X9))
+ return AArch64::X9;
-static bool isCSSave(MachineInstr *MBBI) {
- return MBBI->getOpcode() == AArch64::STPXi ||
- MBBI->getOpcode() == AArch64::STPDi ||
- MBBI->getOpcode() == AArch64::STPXpre ||
- MBBI->getOpcode() == AArch64::STPDpre;
+ for (unsigned Reg : AArch64::GPR64RegClass) {
+ if (LiveRegs.available(MRI, Reg))
+ return Reg;
+ }
+ return AArch64::NoRegister;
}
bool AArch64FrameLowering::canUseAsPrologue(
const MachineBasicBlock &MBB) const {
const MachineFunction *MF = MBB.getParent();
+ MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
// Don't need a scratch register if we're not going to re-align the stack.
- // Otherwise, we may need a scratch register to be available and we do not
- // support that for now.
- return !RegInfo->needsStackRealignment(*MF);
+ if (!RegInfo->needsStackRealignment(*MF))
+ return true;
+ // Otherwise, we can use any block as long as it has a scratch register
+ // available.
+ return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister;
+}
+
+bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
+ MachineFunction &MF, unsigned StackBumpBytes) const {
+ AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
+ const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+
+ if (AFI->getLocalStackSize() == 0)
+ return false;
+
+ // 512 is the maximum immediate for stp/ldp that will be used for
+ // callee-save save/restores
+ if (StackBumpBytes >= 512)
+ return false;
+
+ if (MFI->hasVarSizedObjects())
+ return false;
+
+ if (RegInfo->needsStackRealignment(MF))
+ return false;
+
+ // This isn't strictly necessary, but it simplifies things a bit since the
+ // current RedZone handling code assumes the SP is adjusted by the
+ // callee-save save/restore code.
+ if (canUseRedZone(MF))
+ return false;
+
+ return true;
+}
+
+// Convert callee-save register save/restore instruction to do stack pointer
+// decrement/increment to allocate/deallocate the callee-save stack area by
+// converting store/load to use pre/post increment version.
+static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc) {
+
+ unsigned NewOpc;
+ bool NewIsUnscaled = false;
+ switch (MBBI->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected callee-save save/restore opcode!");
+ case AArch64::STPXi:
+ NewOpc = AArch64::STPXpre;
+ break;
+ case AArch64::STPDi:
+ NewOpc = AArch64::STPDpre;
+ break;
+ case AArch64::STRXui:
+ NewOpc = AArch64::STRXpre;
+ NewIsUnscaled = true;
+ break;
+ case AArch64::STRDui:
+ NewOpc = AArch64::STRDpre;
+ NewIsUnscaled = true;
+ break;
+ case AArch64::LDPXi:
+ NewOpc = AArch64::LDPXpost;
+ break;
+ case AArch64::LDPDi:
+ NewOpc = AArch64::LDPDpost;
+ break;
+ case AArch64::LDRXui:
+ NewOpc = AArch64::LDRXpost;
+ NewIsUnscaled = true;
+ break;
+ case AArch64::LDRDui:
+ NewOpc = AArch64::LDRDpost;
+ NewIsUnscaled = true;
+ break;
+ }
+
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc));
+ MIB.addReg(AArch64::SP, RegState::Define);
+
+ // Copy all operands other than the immediate offset.
+ unsigned OpndIdx = 0;
+ for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
+ ++OpndIdx)
+ MIB.addOperand(MBBI->getOperand(OpndIdx));
+
+ assert(MBBI->getOperand(OpndIdx).getImm() == 0 &&
+ "Unexpected immediate offset in first/last callee-save save/restore "
+ "instruction!");
+ assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
+ "Unexpected base register in callee-save save/restore instruction!");
+ // Last operand is immediate offset that needs fixing.
+ assert(CSStackSizeInc % 8 == 0);
+ int64_t CSStackSizeIncImm = CSStackSizeInc;
+ if (!NewIsUnscaled)
+ CSStackSizeIncImm /= 8;
+ MIB.addImm(CSStackSizeIncImm);
+
+ MIB.setMIFlags(MBBI->getFlags());
+ MIB.setMemRefs(MBBI->memoperands_begin(), MBBI->memoperands_end());
+
+ return std::prev(MBB.erase(MBBI));
+}
+
+// Fixup callee-save register save/restore instructions to take into account
+// combined SP bump by adding the local stack size to the stack offsets.
+static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI,
+ unsigned LocalStackSize) {
+ unsigned Opc = MI.getOpcode();
+ (void)Opc;
+ assert((Opc == AArch64::STPXi || Opc == AArch64::STPDi ||
+ Opc == AArch64::STRXui || Opc == AArch64::STRDui ||
+ Opc == AArch64::LDPXi || Opc == AArch64::LDPDi ||
+ Opc == AArch64::LDRXui || Opc == AArch64::LDRDui) &&
+ "Unexpected callee-save save/restore opcode!");
+
+ unsigned OffsetIdx = MI.getNumExplicitOperands() - 1;
+ assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
+ "Unexpected base register in callee-save save/restore instruction!");
+ // Last operand is immediate offset that needs fixing.
+ MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx);
+ // All generated opcodes have scaled offsets.
+ assert(LocalStackSize % 8 == 0);
+ OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / 8);
}
void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
@@ -316,40 +433,59 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// All of the stack allocation is for locals.
AFI->setLocalStackSize(NumBytes);
- // Label used to tie together the PROLOG_LABEL and the MachineMoves.
- MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
-
+ if (!NumBytes)
+ return;
// REDZONE: If the stack size is less than 128 bytes, we don't need
// to actually allocate.
- if (NumBytes && !canUseRedZone(MF)) {
+ if (canUseRedZone(MF))
+ ++NumRedZoneFunctions;
+ else {
emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
MachineInstr::FrameSetup);
+ // Label used to tie together the PROLOG_LABEL and the MachineMoves.
+ MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
// Encode the stack size of the leaf function.
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
- } else if (NumBytes) {
- ++NumRedZoneFunctions;
}
-
return;
}
- // Only set up FP if we actually need to.
- int FPOffset = 0;
- if (HasFP)
- FPOffset = getFPOffsetInPrologue(MBBI);
+ auto CSStackSize = AFI->getCalleeSavedStackSize();
+ // All of the remaining stack allocations are for locals.
+ AFI->setLocalStackSize(NumBytes - CSStackSize);
- // Move past the saves of the callee-saved registers.
- while (isCSSave(MBBI)) {
- ++MBBI;
- NumBytes -= 16;
+ bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
+ if (CombineSPBump) {
+ emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
+ MachineInstr::FrameSetup);
+ NumBytes = 0;
+ } else if (CSStackSize != 0) {
+ MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(MBB, MBBI, DL, TII,
+ -CSStackSize);
+ NumBytes -= CSStackSize;
}
assert(NumBytes >= 0 && "Negative stack allocation size!?");
+
+ // Move past the saves of the callee-saved registers, fixing up the offsets
+ // and pre-inc if we decided to combine the callee-save and local stack
+ // pointer bump above.
+ MachineBasicBlock::iterator End = MBB.end();
+ while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup)) {
+ if (CombineSPBump)
+ fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize());
+ ++MBBI;
+ }
if (HasFP) {
+ // Only set up FP if we actually need to. Frame pointer is fp = sp - 16.
+ int FPOffset = CSStackSize - 16;
+ if (CombineSPBump)
+ FPOffset += AFI->getLocalStackSize();
+
// Issue sub fp, sp, FPOffset or
// mov fp,sp when FPOffset is zero.
// Note: All stores of callee-saved registers are marked as "FrameSetup".
@@ -358,47 +494,46 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
MachineInstr::FrameSetup);
}
- // All of the remaining stack allocations are for locals.
- AFI->setLocalStackSize(NumBytes);
-
// Allocate space for the rest of the frame.
+ if (NumBytes) {
+ const bool NeedsRealignment = RegInfo->needsStackRealignment(MF);
+ unsigned scratchSPReg = AArch64::SP;
- const unsigned Alignment = MFI->getMaxAlignment();
- const bool NeedsRealignment = RegInfo->needsStackRealignment(MF);
- unsigned scratchSPReg = AArch64::SP;
- if (NumBytes && NeedsRealignment) {
- // Use the first callee-saved register as a scratch register.
- scratchSPReg = AArch64::X9;
- }
+ if (NeedsRealignment) {
+ scratchSPReg = findScratchNonCalleeSaveRegister(&MBB);
+ assert(scratchSPReg != AArch64::NoRegister);
+ }
- // If we're a leaf function, try using the red zone.
- if (NumBytes && !canUseRedZone(MF))
- // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
- // the correct value here, as NumBytes also includes padding bytes,
- // which shouldn't be counted here.
- emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII,
- MachineInstr::FrameSetup);
+ // If we're a leaf function, try using the red zone.
+ if (!canUseRedZone(MF))
+ // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
+ // the correct value here, as NumBytes also includes padding bytes,
+ // which shouldn't be counted here.
+ emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII,
+ MachineInstr::FrameSetup);
- if (NumBytes && NeedsRealignment) {
- const unsigned NrBitsToZero = countTrailingZeros(Alignment);
- assert(NrBitsToZero > 1);
- assert(scratchSPReg != AArch64::SP);
-
- // SUB X9, SP, NumBytes
- // -- X9 is temporary register, so shouldn't contain any live data here,
- // -- free to use. This is already produced by emitFrameOffset above.
- // AND SP, X9, 0b11111...0000
- // The logical immediates have a non-trivial encoding. The following
- // formula computes the encoded immediate with all ones but
- // NrBitsToZero zero bits as least significant bits.
- uint32_t andMaskEncoded =
- (1 <<12) // = N
- | ((64-NrBitsToZero) << 6) // immr
- | ((64-NrBitsToZero-1) << 0) // imms
- ;
- BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP)
- .addReg(scratchSPReg, RegState::Kill)
- .addImm(andMaskEncoded);
+ if (NeedsRealignment) {
+ const unsigned Alignment = MFI->getMaxAlignment();
+ const unsigned NrBitsToZero = countTrailingZeros(Alignment);
+ assert(NrBitsToZero > 1);
+ assert(scratchSPReg != AArch64::SP);
+
+ // SUB X9, SP, NumBytes
+ // -- X9 is temporary register, so shouldn't contain any live data here,
+ // -- free to use. This is already produced by emitFrameOffset above.
+ // AND SP, X9, 0b11111...0000
+ // The logical immediates have a non-trivial encoding. The following
+ // formula computes the encoded immediate with all ones but
+ // NrBitsToZero zero bits as least significant bits.
+ uint32_t andMaskEncoded = (1 << 12) // = N
+ | ((64 - NrBitsToZero) << 6) // immr
+ | ((64 - NrBitsToZero - 1) << 0); // imms
+
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP)
+ .addReg(scratchSPReg, RegState::Kill)
+ .addImm(andMaskEncoded);
+ AFI->setStackRealigned(true);
+ }
}
// If we need a base pointer, set it up here. It's whatever the value of the
@@ -491,21 +626,6 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
-
- // Record the location of the stored LR
- unsigned LR = RegInfo->getDwarfRegNum(AArch64::LR, true);
- CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createOffset(nullptr, LR, StackGrowth));
- BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex)
- .setMIFlags(MachineInstr::FrameSetup);
-
- // Record the location of the stored FP
- CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createOffset(nullptr, Reg, 2 * StackGrowth));
- BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex)
- .setMIFlags(MachineInstr::FrameSetup);
} else {
// Encode the stack size of the leaf function.
unsigned CFIIndex = MMI.addFrameInst(
@@ -515,36 +635,10 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
.setMIFlags(MachineInstr::FrameSetup);
}
- // Now emit the moves for whatever callee saved regs we have.
- emitCalleeSavedFrameMoves(MBB, MBBI, FramePtr);
- }
-}
-
-static bool isCalleeSavedRegister(unsigned Reg, const MCPhysReg *CSRegs) {
- for (unsigned i = 0; CSRegs[i]; ++i)
- if (Reg == CSRegs[i])
- return true;
- return false;
-}
-
-/// Checks whether the given instruction restores callee save registers
-/// and if so returns how many.
-static unsigned getNumCSRestores(MachineInstr &MI, const MCPhysReg *CSRegs) {
- unsigned RtIdx = 0;
- switch (MI.getOpcode()) {
- case AArch64::LDPXpost:
- case AArch64::LDPDpost:
- RtIdx = 1;
- // FALLTHROUGH
- case AArch64::LDPXi:
- case AArch64::LDPDi:
- if (!isCalleeSavedRegister(MI.getOperand(RtIdx).getReg(), CSRegs) ||
- !isCalleeSavedRegister(MI.getOperand(RtIdx + 1).getReg(), CSRegs) ||
- MI.getOperand(RtIdx + 2).getReg() != AArch64::SP)
- return 0;
- return 2;
+ // Now emit the moves for whatever callee saved regs we have (including FP,
+ // LR if those are saved).
+ emitCalleeSavedFrameMoves(MBB, MBBI);
}
- return 0;
}
void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
@@ -552,7 +646,6 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
MachineFrameInfo *MFI = MF.getFrameInfo();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
- const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL;
bool IsTailCallReturn = false;
@@ -599,7 +692,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// ---------------------| --- |
// | | | |
// | CalleeSavedReg | | |
- // | (NumRestores * 8) | | |
+ // | (CalleeSavedStackSize)| | |
// | | | |
// ---------------------| | NumBytes
// | | StackSize (StackAdjustUp)
@@ -614,41 +707,74 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
//
// AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
// it as the 2nd argument of AArch64ISD::TC_RETURN.
- NumBytes += ArgumentPopSize;
- unsigned NumRestores = 0;
+ auto CSStackSize = AFI->getCalleeSavedStackSize();
+ bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
+
+ if (!CombineSPBump && CSStackSize != 0)
+ convertCalleeSaveRestoreToSPPrePostIncDec(
+ MBB, std::prev(MBB.getFirstTerminator()), DL, TII, CSStackSize);
+
// Move past the restores of the callee-saved registers.
MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator();
- const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
MachineBasicBlock::iterator Begin = MBB.begin();
while (LastPopI != Begin) {
--LastPopI;
- unsigned Restores = getNumCSRestores(*LastPopI, CSRegs);
- NumRestores += Restores;
- if (Restores == 0) {
+ if (!LastPopI->getFlag(MachineInstr::FrameDestroy)) {
++LastPopI;
break;
- }
+ } else if (CombineSPBump)
+ fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize());
+ }
+
+ // If there is a single SP update, insert it before the ret and we're done.
+ if (CombineSPBump) {
+ emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
+ NumBytes + ArgumentPopSize, TII,
+ MachineInstr::FrameDestroy);
+ return;
}
- NumBytes -= NumRestores * 8;
+
+ NumBytes -= CSStackSize;
assert(NumBytes >= 0 && "Negative stack allocation size!?");
if (!hasFP(MF)) {
+ bool RedZone = canUseRedZone(MF);
// If this was a redzone leaf function, we don't need to restore the
- // stack pointer.
- if (!canUseRedZone(MF))
- emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes,
- TII);
- return;
+ // stack pointer (but we may need to pop stack args for fastcc).
+ if (RedZone && ArgumentPopSize == 0)
+ return;
+
+ bool NoCalleeSaveRestore = CSStackSize == 0;
+ int StackRestoreBytes = RedZone ? 0 : NumBytes;
+ if (NoCalleeSaveRestore)
+ StackRestoreBytes += ArgumentPopSize;
+ emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
+ StackRestoreBytes, TII, MachineInstr::FrameDestroy);
+ // If we were able to combine the local stack pop with the argument pop,
+ // then we're done.
+ if (NoCalleeSaveRestore || ArgumentPopSize == 0)
+ return;
+ NumBytes = 0;
}
// Restore the original stack pointer.
// FIXME: Rather than doing the math here, we should instead just use
// non-post-indexed loads for the restores if we aren't actually going to
// be able to save any instructions.
- if (NumBytes || MFI->hasVarSizedObjects())
+ if (MFI->hasVarSizedObjects() || AFI->isStackRealigned())
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP,
- -(NumRestores - 2) * 8, TII, MachineInstr::NoFlags);
+ -CSStackSize + 16, TII, MachineInstr::FrameDestroy);
+ else if (NumBytes)
+ emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, TII,
+ MachineInstr::FrameDestroy);
+
+ // This must be placed after the callee-save restore code because that code
+ // assumes the SP is at the same location as it was after the callee-save save
+ // code in the prologue.
+ if (ArgumentPopSize)
+ emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
+ ArgumentPopSize, TII, MachineInstr::FrameDestroy);
}
/// getFrameIndexReference - Provide a base+offset reference to an FI slot for
@@ -726,86 +852,167 @@ int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
}
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
- if (Reg != AArch64::LR)
- return getKillRegState(true);
+ // Do not set a kill flag on values that are also marked as live-in. This
+ // happens with the @llvm-returnaddress intrinsic and with arguments passed in
+ // callee saved registers.
+ // Omitting the kill flags is conservatively correct even if the live-in
+ // is not used after all.
+ bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg);
+ return getKillRegState(!IsLiveIn);
+}
- // LR maybe referred to later by an @llvm.returnaddress intrinsic.
- bool LRLiveIn = MF.getRegInfo().isLiveIn(AArch64::LR);
- bool LRKill = !(LRLiveIn && MF.getFrameInfo()->isReturnAddressTaken());
- return getKillRegState(LRKill);
+static bool produceCompactUnwindFrame(MachineFunction &MF) {
+ const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
+ AttributeSet Attrs = MF.getFunction()->getAttributes();
+ return Subtarget.isTargetMachO() &&
+ !(Subtarget.getTargetLowering()->supportSwiftError() &&
+ Attrs.hasAttrSomewhere(Attribute::SwiftError));
}
-bool AArch64FrameLowering::spillCalleeSavedRegisters(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+
+struct RegPairInfo {
+ RegPairInfo() : Reg1(AArch64::NoRegister), Reg2(AArch64::NoRegister) {}
+ unsigned Reg1;
+ unsigned Reg2;
+ int FrameIdx;
+ int Offset;
+ bool IsGPR;
+ bool isPaired() const { return Reg2 != AArch64::NoRegister; }
+};
+
+static void computeCalleeSaveRegisterPairs(
+ MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs) {
+
+ if (CSI.empty())
+ return;
+
+ AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ CallingConv::ID CC = MF.getFunction()->getCallingConv();
unsigned Count = CSI.size();
- DebugLoc DL;
- assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
+ (void)CC;
+ // MachO's compact unwind format relies on all registers being stored in
+ // pairs.
+ assert((!produceCompactUnwindFrame(MF) ||
+ CC == CallingConv::PreserveMost ||
+ (Count & 1) == 0) &&
+ "Odd number of callee-saved regs to spill!");
+ unsigned Offset = AFI->getCalleeSavedStackSize();
+
+ for (unsigned i = 0; i < Count; ++i) {
+ RegPairInfo RPI;
+ RPI.Reg1 = CSI[i].getReg();
+
+ assert(AArch64::GPR64RegClass.contains(RPI.Reg1) ||
+ AArch64::FPR64RegClass.contains(RPI.Reg1));
+ RPI.IsGPR = AArch64::GPR64RegClass.contains(RPI.Reg1);
+
+ // Add the next reg to the pair if it is in the same register class.
+ if (i + 1 < Count) {
+ unsigned NextReg = CSI[i + 1].getReg();
+ if ((RPI.IsGPR && AArch64::GPR64RegClass.contains(NextReg)) ||
+ (!RPI.IsGPR && AArch64::FPR64RegClass.contains(NextReg)))
+ RPI.Reg2 = NextReg;
+ }
- for (unsigned i = 0; i < Count; i += 2) {
- unsigned idx = Count - i - 2;
- unsigned Reg1 = CSI[idx].getReg();
- unsigned Reg2 = CSI[idx + 1].getReg();
// GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
// list to come in sorted by frame index so that we can issue the store
// pair instructions directly. Assert if we see anything otherwise.
//
// The order of the registers in the list is controlled by
// getCalleeSavedRegs(), so they will always be in-order, as well.
- assert(CSI[idx].getFrameIdx() + 1 == CSI[idx + 1].getFrameIdx() &&
+ assert((!RPI.isPaired() ||
+ (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) &&
"Out of order callee saved regs!");
+
+ // MachO's compact unwind format relies on all registers being stored in
+ // adjacent register pairs.
+ assert((!produceCompactUnwindFrame(MF) ||
+ CC == CallingConv::PreserveMost ||
+ (RPI.isPaired() &&
+ ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
+ RPI.Reg1 + 1 == RPI.Reg2))) &&
+ "Callee-save registers not saved as adjacent register pair!");
+
+ RPI.FrameIdx = CSI[i].getFrameIdx();
+
+ if (Count * 8 != AFI->getCalleeSavedStackSize() && !RPI.isPaired()) {
+ // Round up size of non-pair to pair size if we need to pad the
+ // callee-save area to ensure 16-byte alignment.
+ Offset -= 16;
+ assert(MFI->getObjectAlignment(RPI.FrameIdx) <= 16);
+ MFI->setObjectAlignment(RPI.FrameIdx, 16);
+ AFI->setCalleeSaveStackHasFreeSpace(true);
+ } else
+ Offset -= RPI.isPaired() ? 16 : 8;
+ assert(Offset % 8 == 0);
+ RPI.Offset = Offset / 8;
+ assert((RPI.Offset >= -64 && RPI.Offset <= 63) &&
+ "Offset out of bounds for LDP/STP immediate");
+
+ RegPairs.push_back(RPI);
+ if (RPI.isPaired())
+ ++i;
+ }
+}
+
+bool AArch64FrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ DebugLoc DL;
+ SmallVector<RegPairInfo, 8> RegPairs;
+
+ computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs);
+
+ for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE;
+ ++RPII) {
+ RegPairInfo RPI = *RPII;
+ unsigned Reg1 = RPI.Reg1;
+ unsigned Reg2 = RPI.Reg2;
unsigned StrOpc;
- assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
- assert((i & 1) == 0 && "Odd index for callee-saved reg spill!");
- // Issue sequence of non-sp increment and pi sp spills for cs regs. The
- // first spill is a pre-increment that allocates the stack.
+
+ // Issue sequence of spills for cs regs. The first spill may be converted
+ // to a pre-decrement store later by emitPrologue if the callee-save stack
+ // area allocation can't be combined with the local stack area allocation.
// For example:
- // stp x22, x21, [sp, #-48]! // addImm(-6)
+ // stp x22, x21, [sp, #0] // addImm(+0)
// stp x20, x19, [sp, #16] // addImm(+2)
// stp fp, lr, [sp, #32] // addImm(+4)
// Rationale: This sequence saves uop updates compared to a sequence of
// pre-increment spills like stp xi,xj,[sp,#-16]!
- // Note: Similar rational and sequence for restores in epilog.
- if (AArch64::GPR64RegClass.contains(Reg1)) {
- assert(AArch64::GPR64RegClass.contains(Reg2) &&
- "Expected GPR64 callee-saved register pair!");
- // For first spill use pre-increment store.
- if (i == 0)
- StrOpc = AArch64::STPXpre;
- else
- StrOpc = AArch64::STPXi;
- } else if (AArch64::FPR64RegClass.contains(Reg1)) {
- assert(AArch64::FPR64RegClass.contains(Reg2) &&
- "Expected FPR64 callee-saved register pair!");
- // For first spill use pre-increment store.
- if (i == 0)
- StrOpc = AArch64::STPDpre;
- else
- StrOpc = AArch64::STPDi;
- } else
- llvm_unreachable("Unexpected callee saved register!");
- DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1) << ", "
- << TRI->getName(Reg2) << ") -> fi#(" << CSI[idx].getFrameIdx()
- << ", " << CSI[idx + 1].getFrameIdx() << ")\n");
- // Compute offset: i = 0 => offset = -Count;
- // i = 2 => offset = -(Count - 2) + Count = 2 = i; etc.
- const int Offset = (i == 0) ? -Count : i;
- assert((Offset >= -64 && Offset <= 63) &&
- "Offset out of bounds for STP immediate");
- MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
- if (StrOpc == AArch64::STPDpre || StrOpc == AArch64::STPXpre)
- MIB.addReg(AArch64::SP, RegState::Define);
+ // Note: Similar rationale and sequence for restores in epilog.
+ if (RPI.IsGPR)
+ StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
+ else
+ StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
+ DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1);
+ if (RPI.isPaired())
+ dbgs() << ", " << TRI->getName(Reg2);
+ dbgs() << ") -> fi#(" << RPI.FrameIdx;
+ if (RPI.isPaired())
+ dbgs() << ", " << RPI.FrameIdx+1;
+ dbgs() << ")\n");
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
MBB.addLiveIn(Reg1);
- MBB.addLiveIn(Reg2);
- MIB.addReg(Reg2, getPrologueDeath(MF, Reg2))
- .addReg(Reg1, getPrologueDeath(MF, Reg1))
+ if (RPI.isPaired()) {
+ MBB.addLiveIn(Reg2);
+ MIB.addReg(Reg2, getPrologueDeath(MF, Reg2));
+ MIB.addMemOperand(MF.getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1),
+ MachineMemOperand::MOStore, 8, 8));
+ }
+ MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
.addReg(AArch64::SP)
- .addImm(Offset) // [sp, #offset * 8], where factor * 8 is implicit
+ .addImm(RPI.Offset) // [sp, #offset*8], where factor*8 is implicit
.setMIFlag(MachineInstr::FrameSetup);
+ MIB.addMemOperand(MF.getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx),
+ MachineMemOperand::MOStore, 8, 8));
}
return true;
}
@@ -816,66 +1023,55 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- unsigned Count = CSI.size();
DebugLoc DL;
- assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
+ SmallVector<RegPairInfo, 8> RegPairs;
if (MI != MBB.end())
DL = MI->getDebugLoc();
- for (unsigned i = 0; i < Count; i += 2) {
- unsigned Reg1 = CSI[i].getReg();
- unsigned Reg2 = CSI[i + 1].getReg();
- // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
- // list to come in sorted by frame index so that we can issue the store
- // pair instructions directly. Assert if we see anything otherwise.
- assert(CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx() &&
- "Out of order callee saved regs!");
- // Issue sequence of non-sp increment and sp-pi restores for cs regs. Only
- // the last load is sp-pi post-increment and de-allocates the stack:
+ computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs);
+
+ for (auto RPII = RegPairs.begin(), RPIE = RegPairs.end(); RPII != RPIE;
+ ++RPII) {
+ RegPairInfo RPI = *RPII;
+ unsigned Reg1 = RPI.Reg1;
+ unsigned Reg2 = RPI.Reg2;
+
+ // Issue sequence of restores for cs regs. The last restore may be converted
+ // to a post-increment load later by emitEpilogue if the callee-save stack
+ // area allocation can't be combined with the local stack area allocation.
// For example:
// ldp fp, lr, [sp, #32] // addImm(+4)
// ldp x20, x19, [sp, #16] // addImm(+2)
- // ldp x22, x21, [sp], #48 // addImm(+6)
+ // ldp x22, x21, [sp, #0] // addImm(+0)
// Note: see comment in spillCalleeSavedRegisters()
unsigned LdrOpc;
+ if (RPI.IsGPR)
+ LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
+ else
+ LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
+ DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1);
+ if (RPI.isPaired())
+ dbgs() << ", " << TRI->getName(Reg2);
+ dbgs() << ") -> fi#(" << RPI.FrameIdx;
+ if (RPI.isPaired())
+ dbgs() << ", " << RPI.FrameIdx+1;
+ dbgs() << ")\n");
- assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
- assert((i & 1) == 0 && "Odd index for callee-saved reg spill!");
- if (AArch64::GPR64RegClass.contains(Reg1)) {
- assert(AArch64::GPR64RegClass.contains(Reg2) &&
- "Expected GPR64 callee-saved register pair!");
- if (i == Count - 2)
- LdrOpc = AArch64::LDPXpost;
- else
- LdrOpc = AArch64::LDPXi;
- } else if (AArch64::FPR64RegClass.contains(Reg1)) {
- assert(AArch64::FPR64RegClass.contains(Reg2) &&
- "Expected FPR64 callee-saved register pair!");
- if (i == Count - 2)
- LdrOpc = AArch64::LDPDpost;
- else
- LdrOpc = AArch64::LDPDi;
- } else
- llvm_unreachable("Unexpected callee saved register!");
- DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1) << ", "
- << TRI->getName(Reg2) << ") -> fi#(" << CSI[i].getFrameIdx()
- << ", " << CSI[i + 1].getFrameIdx() << ")\n");
-
- // Compute offset: i = 0 => offset = Count - 2; i = 2 => offset = Count - 4;
- // etc.
- const int Offset = (i == Count - 2) ? Count : Count - i - 2;
- assert((Offset >= -64 && Offset <= 63) &&
- "Offset out of bounds for LDP immediate");
MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc));
- if (LdrOpc == AArch64::LDPXpost || LdrOpc == AArch64::LDPDpost)
- MIB.addReg(AArch64::SP, RegState::Define);
-
- MIB.addReg(Reg2, getDefRegState(true))
- .addReg(Reg1, getDefRegState(true))
+ if (RPI.isPaired()) {
+ MIB.addReg(Reg2, getDefRegState(true));
+ MIB.addMemOperand(MF.getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1),
+ MachineMemOperand::MOLoad, 8, 8));
+ }
+ MIB.addReg(Reg1, getDefRegState(true))
.addReg(AArch64::SP)
- .addImm(Offset); // [sp], #offset * 8 or [sp, #offset * 8]
- // where the factor * 8 is implicit
+ .addImm(RPI.Offset) // [sp, #offset*8] where the factor*8 is implicit
+ .setMIFlag(MachineInstr::FrameDestroy);
+ MIB.addMemOperand(MF.getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx),
+ MachineMemOperand::MOLoad, 8, 8));
}
return true;
}
@@ -892,8 +1088,8 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
MF.getSubtarget().getRegisterInfo());
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- SmallVector<unsigned, 4> UnspilledCSGPRs;
- SmallVector<unsigned, 4> UnspilledCSFPRs;
+ unsigned UnspilledCSGPR = AArch64::NoRegister;
+ unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
// The frame record needs to be created by saving the appropriate registers
if (hasFP(MF)) {
@@ -901,79 +1097,51 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
SavedRegs.set(AArch64::LR);
}
- // Spill the BasePtr if it's used. Do this first thing so that the
- // getCalleeSavedRegs() below will get the right answer.
+ unsigned BasePointerReg = AArch64::NoRegister;
if (RegInfo->hasBasePointer(MF))
- SavedRegs.set(RegInfo->getBaseRegister());
-
- if (RegInfo->needsStackRealignment(MF) && !RegInfo->hasBasePointer(MF))
- SavedRegs.set(AArch64::X9);
+ BasePointerReg = RegInfo->getBaseRegister();
- // If any callee-saved registers are used, the frame cannot be eliminated.
- unsigned NumGPRSpilled = 0;
- unsigned NumFPRSpilled = 0;
bool ExtraCSSpill = false;
- bool CanEliminateFrame = true;
- DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:");
const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
+ // Figure out which callee-saved registers to save/restore.
+ for (unsigned i = 0; CSRegs[i]; ++i) {
+ const unsigned Reg = CSRegs[i];
+
+ // Add the base pointer register to SavedRegs if it is callee-save.
+ if (Reg == BasePointerReg)
+ SavedRegs.set(Reg);
- // Check pairs of consecutive callee-saved registers.
- for (unsigned i = 0; CSRegs[i]; i += 2) {
- assert(CSRegs[i + 1] && "Odd number of callee-saved registers!");
-
- const unsigned OddReg = CSRegs[i];
- const unsigned EvenReg = CSRegs[i + 1];
- assert((AArch64::GPR64RegClass.contains(OddReg) &&
- AArch64::GPR64RegClass.contains(EvenReg)) ^
- (AArch64::FPR64RegClass.contains(OddReg) &&
- AArch64::FPR64RegClass.contains(EvenReg)) &&
- "Register class mismatch!");
-
- const bool OddRegUsed = SavedRegs.test(OddReg);
- const bool EvenRegUsed = SavedRegs.test(EvenReg);
-
- // Early exit if none of the registers in the register pair is actually
- // used.
- if (!OddRegUsed && !EvenRegUsed) {
- if (AArch64::GPR64RegClass.contains(OddReg)) {
- UnspilledCSGPRs.push_back(OddReg);
- UnspilledCSGPRs.push_back(EvenReg);
- } else {
- UnspilledCSFPRs.push_back(OddReg);
- UnspilledCSFPRs.push_back(EvenReg);
+ bool RegUsed = SavedRegs.test(Reg);
+ unsigned PairedReg = CSRegs[i ^ 1];
+ if (!RegUsed) {
+ if (AArch64::GPR64RegClass.contains(Reg) &&
+ !RegInfo->isReservedReg(MF, Reg)) {
+ UnspilledCSGPR = Reg;
+ UnspilledCSGPRPaired = PairedReg;
}
continue;
}
- unsigned Reg = AArch64::NoRegister;
- // If only one of the registers of the register pair is used, make sure to
- // mark the other one as used as well.
- if (OddRegUsed ^ EvenRegUsed) {
- // Find out which register is the additional spill.
- Reg = OddRegUsed ? EvenReg : OddReg;
- SavedRegs.set(Reg);
+ // MachO's compact unwind format relies on all registers being stored in
+ // pairs.
+ // FIXME: the usual format is actually better if unwinding isn't needed.
+ if (produceCompactUnwindFrame(MF) && !SavedRegs.test(PairedReg)) {
+ SavedRegs.set(PairedReg);
+ if (AArch64::GPR64RegClass.contains(PairedReg) &&
+ !RegInfo->isReservedReg(MF, PairedReg))
+ ExtraCSSpill = true;
}
+ }
- DEBUG(dbgs() << ' ' << PrintReg(OddReg, RegInfo));
- DEBUG(dbgs() << ' ' << PrintReg(EvenReg, RegInfo));
-
- assert(((OddReg == AArch64::LR && EvenReg == AArch64::FP) ||
- (RegInfo->getEncodingValue(OddReg) + 1 ==
- RegInfo->getEncodingValue(EvenReg))) &&
- "Register pair of non-adjacent registers!");
- if (AArch64::GPR64RegClass.contains(OddReg)) {
- NumGPRSpilled += 2;
- // If it's not a reserved register, we can use it in lieu of an
- // emergency spill slot for the register scavenger.
- // FIXME: It would be better to instead keep looking and choose another
- // unspilled register that isn't reserved, if there is one.
- if (Reg != AArch64::NoRegister && !RegInfo->isReservedReg(MF, Reg))
- ExtraCSSpill = true;
- } else
- NumFPRSpilled += 2;
+ DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:";
+ for (int Reg = SavedRegs.find_first(); Reg != -1;
+ Reg = SavedRegs.find_next(Reg))
+ dbgs() << ' ' << PrintReg(Reg, RegInfo);
+ dbgs() << "\n";);
- CanEliminateFrame = false;
- }
+ // If any callee-saved registers are used, the frame cannot be eliminated.
+ unsigned NumRegsSpilled = SavedRegs.count();
+ bool CanEliminateFrame = NumRegsSpilled == 0;
// FIXME: Set BigStack if any stack slot references may be out of range.
// For now, just conservatively guestimate based on unscaled indexing
@@ -982,8 +1150,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// The CSR spill slots have not been allocated yet, so estimateStackSize
// won't include them.
MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned CFSize =
- MFI->estimateStackSize(MF) + 8 * (NumGPRSpilled + NumFPRSpilled);
+ unsigned CFSize = MFI->estimateStackSize(MF) + 8 * NumRegsSpilled;
DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
bool BigStack = (CFSize >= 256);
if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
@@ -996,19 +1163,17 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
// above to keep the number of spills even, we don't need to do anything else
// here.
if (BigStack && !ExtraCSSpill) {
-
- // If we're adding a register to spill here, we have to add two of them
- // to keep the number of regs to spill even.
- assert(((UnspilledCSGPRs.size() & 1) == 0) && "Odd number of registers!");
- unsigned Count = 0;
- while (!UnspilledCSGPRs.empty() && Count < 2) {
- unsigned Reg = UnspilledCSGPRs.back();
- UnspilledCSGPRs.pop_back();
- DEBUG(dbgs() << "Spilling " << PrintReg(Reg, RegInfo)
- << " to get a scratch register.\n");
- SavedRegs.set(Reg);
+ if (UnspilledCSGPR != AArch64::NoRegister) {
+ DEBUG(dbgs() << "Spilling " << PrintReg(UnspilledCSGPR, RegInfo)
+ << " to get a scratch register.\n");
+ SavedRegs.set(UnspilledCSGPR);
+ // MachO's compact unwind format relies on all registers being stored in
+ // pairs, so if we need to spill one extra for BigStack, then we need to
+ // store the pair.
+ if (produceCompactUnwindFrame(MF))
+ SavedRegs.set(UnspilledCSGPRPaired);
ExtraCSSpill = true;
- ++Count;
+ NumRegsSpilled = SavedRegs.count();
}
// If we didn't find an extra callee-saved register to spill, create
@@ -1021,4 +1186,14 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
<< " as the emergency spill slot.\n");
}
}
+
+ // Round up to register pair alignment to avoid additional SP adjustment
+ // instructions.
+ AFI->setCalleeSavedStackSize(alignTo(8 * NumRegsSpilled, 16));
+}
+
+bool AArch64FrameLowering::enableStackSlotScavenging(
+ const MachineFunction &MF) const {
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ return AFI->hasCalleeSaveStackFreeSpace();
}
diff --git a/lib/Target/AArch64/AArch64FrameLowering.h b/lib/Target/AArch64/AArch64FrameLowering.h
index 7d8354c38787..f254ea9b70aa 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/lib/Target/AArch64/AArch64FrameLowering.h
@@ -25,12 +25,11 @@ public:
true /*StackRealignable*/) {}
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned FramePtr) const;
+ MachineBasicBlock::iterator MBBI) const;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
@@ -67,6 +66,12 @@ public:
bool enableShrinkWrapping(const MachineFunction &MF) const override {
return true;
}
+
+ bool enableStackSlotScavenging(const MachineFunction &MF) const override;
+
+private:
+ bool shouldCombineCSRLocalStackBump(MachineFunction &MF,
+ unsigned StackBumpBytes) const;
};
} // End llvm namespace
diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 6c868880bcac..8d649250f656 100644
--- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -57,7 +57,7 @@ public:
return SelectionDAGISel::runOnMachineFunction(MF);
}
- SDNode *Select(SDNode *Node) override;
+ void Select(SDNode *Node) override;
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
@@ -65,8 +65,8 @@ public:
unsigned ConstraintID,
std::vector<SDValue> &OutOps) override;
- SDNode *SelectMLAV64LaneV128(SDNode *N);
- SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
+ bool tryMLAV64LaneV128(SDNode *N);
+ bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
@@ -147,28 +147,29 @@ public:
SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
const unsigned SubRegs[]);
- SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
+ void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
- SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
+ bool tryIndexedLoad(SDNode *N);
- SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
unsigned SubRegIdx);
- SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
unsigned SubRegIdx);
- SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectBitfieldExtractOp(SDNode *N);
- SDNode *SelectBitfieldInsertOp(SDNode *N);
- SDNode *SelectBitfieldInsertInZeroOp(SDNode *N);
+ bool tryBitfieldExtractOp(SDNode *N);
+ bool tryBitfieldExtractOpFromSExt(SDNode *N);
+ bool tryBitfieldInsertOp(SDNode *N);
+ bool tryBitfieldInsertInZeroOp(SDNode *N);
- SDNode *SelectReadRegister(SDNode *N);
- SDNode *SelectWriteRegister(SDNode *N);
+ bool tryReadRegister(SDNode *N);
+ bool tryWriteRegister(SDNode *N);
// Include the pieces autogenerated from the target description.
#include "AArch64GenDAGISel.inc"
@@ -198,6 +199,9 @@ private:
}
bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
+
+ void SelectCMP_SWAP(SDNode *N);
+
};
} // end anonymous namespace
@@ -328,9 +332,7 @@ static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
// it hurts if the value is used at least twice, unless we are optimizing
// for code size.
- if (ForCodeSize || V.hasOneUse())
- return true;
- return false;
+ return ForCodeSize || V.hasOneUse();
}
/// SelectShiftedRegister - Select a "shifted register" operand. If the value
@@ -452,7 +454,7 @@ static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
/// is a lane in the upper half of a 128-bit vector. Recognize and select this
/// so that we don't emit unnecessary lane extracts.
-SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
+bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) {
SDLoc dl(N);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -467,7 +469,7 @@ SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
if (Op1.getOpcode() != ISD::MUL ||
!checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
LaneIdx))
- return nullptr;
+ return false;
}
SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
@@ -493,10 +495,11 @@ SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
break;
}
- return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops));
+ return true;
}
-SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
+bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) {
SDLoc dl(N);
SDValue SMULLOp0;
SDValue SMULLOp1;
@@ -504,7 +507,7 @@ SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
LaneIdx))
- return nullptr;
+ return false;
SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
@@ -537,7 +540,8 @@ SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
} else
llvm_unreachable("Unrecognized intrinsic.");
- return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops));
+ return true;
}
/// Instructions that accept extend modifiers like UXTW expect the register
@@ -610,7 +614,7 @@ static bool isWorthFoldingADDlow(SDValue N) {
// ldar and stlr have much more restrictive addressing modes (just a
// register).
- if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
+ if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering()))
return false;
}
@@ -687,7 +691,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
const GlobalValue *GV = GAN->getGlobal();
unsigned Alignment = GV->getAlignment();
- Type *Ty = GV->getType()->getElementType();
+ Type *Ty = GV->getValueType();
if (Alignment == 0 && Ty->isSized())
Alignment = DL.getABITypeAlignment(Ty);
@@ -797,10 +801,7 @@ bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
return false;
- if (isWorthFolding(N))
- return true;
-
- return false;
+ return isWorthFolding(N);
}
bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
@@ -1015,8 +1016,8 @@ SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
return SDValue(N, 0);
}
-SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
- unsigned Opc, bool isExt) {
+void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
+ bool isExt) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
@@ -1033,13 +1034,13 @@ SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
Ops.push_back(N->getOperand(1));
Ops.push_back(RegSeq);
Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
- return CurDAG->getMachineNode(Opc, dl, VT, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
}
-SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
+bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->isUnindexed())
- return nullptr;
+ return false;
EVT VT = LD->getMemoryVT();
EVT DstVT = N->getValueType(0);
ISD::MemIndexedMode AM = LD->getAddressingMode();
@@ -1101,7 +1102,7 @@ SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
} else if (VT.is128BitVector()) {
Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
} else
- return nullptr;
+ return false;
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
@@ -1112,7 +1113,6 @@ SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
MVT::Other, Ops);
// Either way, we're replacing the node, so tell the caller that.
- Done = true;
SDValue LoadedVal = SDValue(Res, 1);
if (InsertTo64) {
SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
@@ -1127,12 +1127,12 @@ SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
ReplaceUses(SDValue(N, 0), LoadedVal);
ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
-
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
+ return true;
}
-SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
- unsigned Opc, unsigned SubRegIdx) {
+void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ unsigned SubRegIdx) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
@@ -1149,11 +1149,11 @@ SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
}
-SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
- unsigned Opc, unsigned SubRegIdx) {
+void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
+ unsigned Opc, unsigned SubRegIdx) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
@@ -1181,11 +1181,11 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
// Update the chain
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
}
-SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
@@ -1197,11 +1197,11 @@ SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
- return St;
+ ReplaceNode(N, St);
}
-SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
const EVT ResTys[] = {MVT::i64, // Type of the write back register
@@ -1218,7 +1218,7 @@ SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
N->getOperand(0)}; // Chain
SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
- return St;
+ ReplaceNode(N, St);
}
namespace {
@@ -1256,8 +1256,8 @@ static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
V128Reg);
}
-SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
bool Narrow = VT.getSizeInBits() == 64;
@@ -1292,12 +1292,11 @@ SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
}
ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
-
- return Ld;
+ CurDAG->RemoveDeadNode(N);
}
-SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
bool Narrow = VT.getSizeInBits() == 64;
@@ -1348,12 +1347,11 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
// Update the Chain
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
-
- return Ld;
+ CurDAG->RemoveDeadNode(N);
}
-SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
bool Narrow = VT.getSizeInBits() == 64;
@@ -1379,11 +1377,11 @@ SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
- return St;
+ ReplaceNode(N, St);
}
-SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
bool Narrow = VT.getSizeInBits() == 64;
@@ -1414,7 +1412,7 @@ SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
- return St;
+ ReplaceNode(N, St);
}
static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
@@ -1441,25 +1439,25 @@ static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
// form these situations when matching bigger pattern (bitfield insert).
// For unsigned extracts, check for a shift right and mask
- uint64_t And_imm = 0;
- if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
+ uint64_t AndImm = 0;
+ if (!isOpcWithIntImmediate(N, ISD::AND, AndImm))
return false;
const SDNode *Op0 = N->getOperand(0).getNode();
// Because of simplify-demanded-bits in DAGCombine, the mask may have been
// simplified. Try to undo that
- And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
+ AndImm |= (1 << NumberOfIgnoredLowBits) - 1;
// The immediate is a mask of the low bits iff imm & (imm+1) == 0
- if (And_imm & (And_imm + 1))
+ if (AndImm & (AndImm + 1))
return false;
bool ClampMSB = false;
- uint64_t Srl_imm = 0;
+ uint64_t SrlImm = 0;
// Handle the SRL + ANY_EXTEND case.
if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
- isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
+ isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) {
// Extend the incoming operand of the SRL to 64-bit.
Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
// Make sure to clamp the MSB so that we preserve the semantics of the
@@ -1467,13 +1465,13 @@ static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
ClampMSB = true;
} else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
- Srl_imm)) {
+ SrlImm)) {
// If the shift result was truncated, we can still combine them.
Opd0 = Op0->getOperand(0).getOperand(0);
// Use the type of SRL node.
VT = Opd0->getValueType(0);
- } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
+ } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) {
Opd0 = Op0->getOperand(0);
} else if (BiggerPattern) {
// Let's pretend a 0 shift right has been performed.
@@ -1487,15 +1485,15 @@ static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
// Bail out on large immediates. This happens when no proper
// combining/constant folding was performed.
- if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
+ if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) {
DEBUG((dbgs() << N
<< ": Found large shift immediate, this should not happen\n"));
return false;
}
- LSB = Srl_imm;
- MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
- : countTrailingOnes<uint64_t>(And_imm)) -
+ LSB = SrlImm;
+ MSB = SrlImm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(AndImm)
+ : countTrailingOnes<uint64_t>(AndImm)) -
1;
if (ClampMSB)
// Since we're moving the extend before the right shift operation, we need
@@ -1508,6 +1506,39 @@ static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
return true;
}
+static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
+ SDValue &Opd0, unsigned &Immr,
+ unsigned &Imms) {
+ assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
+
+ EVT VT = N->getValueType(0);
+ unsigned BitWidth = VT.getSizeInBits();
+ assert((VT == MVT::i32 || VT == MVT::i64) &&
+ "Type checking must have been done before calling this function");
+
+ SDValue Op = N->getOperand(0);
+ if (Op->getOpcode() == ISD::TRUNCATE) {
+ Op = Op->getOperand(0);
+ VT = Op->getValueType(0);
+ BitWidth = VT.getSizeInBits();
+ }
+
+ uint64_t ShiftImm;
+ if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) &&
+ !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
+ return false;
+
+ unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
+ if (ShiftImm + Width > BitWidth)
+ return false;
+
+ Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri;
+ Opd0 = Op.getOperand(0);
+ Immr = ShiftImm;
+ Imms = ShiftImm + Width - 1;
+ return true;
+}
+
static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
SDValue &Opd0, unsigned &LSB,
unsigned &MSB) {
@@ -1522,32 +1553,32 @@ static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
//
// This gets selected into a single UBFM:
//
- // UBFM Value, ShiftImm, BitWide + Srl_imm -1
+ // UBFM Value, ShiftImm, BitWide + SrlImm -1
//
if (N->getOpcode() != ISD::SRL)
return false;
- uint64_t And_mask = 0;
- if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
+ uint64_t AndMask = 0;
+ if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask))
return false;
Opd0 = N->getOperand(0).getOperand(0);
- uint64_t Srl_imm = 0;
- if (!isIntImmediate(N->getOperand(1), Srl_imm))
+ uint64_t SrlImm = 0;
+ if (!isIntImmediate(N->getOperand(1), SrlImm))
return false;
// Check whether we really have several bits extract here.
- unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
- if (BitWide && isMask_64(And_mask >> Srl_imm)) {
+ unsigned BitWide = 64 - countLeadingOnes(~(AndMask >> SrlImm));
+ if (BitWide && isMask_64(AndMask >> SrlImm)) {
if (N->getValueType(0) == MVT::i32)
Opc = AArch64::UBFMWri;
else
Opc = AArch64::UBFMXri;
- LSB = Srl_imm;
- MSB = BitWide + Srl_imm - 1;
+ LSB = SrlImm;
+ MSB = BitWide + SrlImm - 1;
return true;
}
@@ -1572,10 +1603,10 @@ static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
return true;
- // we're looking for a shift of a shift
- uint64_t Shl_imm = 0;
- uint64_t Trunc_bits = 0;
- if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
+ // We're looking for a shift of a shift.
+ uint64_t ShlImm = 0;
+ uint64_t TruncBits = 0;
+ if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) {
Opd0 = N->getOperand(0).getOperand(0);
} else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
@@ -1584,7 +1615,7 @@ static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
// always generate 64bit UBFM. This consistency will help the CSE pass
// later find more redundancy.
Opd0 = N->getOperand(0).getOperand(0);
- Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
+ TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
VT = Opd0->getValueType(0);
assert(VT == MVT::i64 && "the promoted type should be i64");
} else if (BiggerPattern) {
@@ -1597,21 +1628,21 @@ static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
// Missing combines/constant folding may have left us with strange
// constants.
- if (Shl_imm >= VT.getSizeInBits()) {
+ if (ShlImm >= VT.getSizeInBits()) {
DEBUG((dbgs() << N
<< ": Found large shift immediate, this should not happen\n"));
return false;
}
- uint64_t Srl_imm = 0;
- if (!isIntImmediate(N->getOperand(1), Srl_imm))
+ uint64_t SrlImm = 0;
+ if (!isIntImmediate(N->getOperand(1), SrlImm))
return false;
- assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
+ assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() &&
"bad amount in shift node!");
- int immr = Srl_imm - Shl_imm;
+ int immr = SrlImm - ShlImm;
Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
- Imms = VT.getSizeInBits() - Shl_imm - Trunc_bits - 1;
+ Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1;
// SRA requires a signed extraction
if (VT == MVT::i32)
Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
@@ -1620,6 +1651,30 @@ static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
return true;
}
+bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {
+ assert(N->getOpcode() == ISD::SIGN_EXTEND);
+
+ EVT VT = N->getValueType(0);
+ EVT NarrowVT = N->getOperand(0)->getValueType(0);
+ if (VT != MVT::i64 || NarrowVT != MVT::i32)
+ return false;
+
+ uint64_t ShiftImm;
+ SDValue Op = N->getOperand(0);
+ if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm))
+ return false;
+
+ SDLoc dl(N);
+ // Extend the incoming operand of the shift to 64-bits.
+ SDValue Opd0 = Widen(CurDAG, Op.getOperand(0));
+ unsigned Immr = ShiftImm;
+ unsigned Imms = NarrowVT.getSizeInBits() - 1;
+ SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
+ CurDAG->getTargetConstant(Imms, dl, VT)};
+ CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops);
+ return true;
+}
+
static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
SDValue &Opd0, unsigned &Immr, unsigned &Imms,
unsigned NumberOfIgnoredLowBits = 0,
@@ -1638,6 +1693,9 @@ static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
case ISD::SRL:
case ISD::SRA:
return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
+
+ case ISD::SIGN_EXTEND_INREG:
+ return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms);
}
unsigned NOpc = N->getMachineOpcode();
@@ -1658,11 +1716,11 @@ static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
return false;
}
-SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
+bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
unsigned Opc, Immr, Imms;
SDValue Opd0;
if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
- return nullptr;
+ return false;
EVT VT = N->getValueType(0);
SDLoc dl(N);
@@ -1675,22 +1733,22 @@ SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
- MachineSDNode *Node =
- CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
- SDValue(BFM, 0), SubReg);
- return Node;
+ ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
+ MVT::i32, SDValue(BFM, 0), SubReg));
+ return true;
}
SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
CurDAG->getTargetConstant(Imms, dl, VT)};
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ return true;
}
/// Does DstMask form a complementary pair with the mask provided by
/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
/// this asks whether DstMask zeroes precisely those bits that will be set by
/// the other half.
-static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
+static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
unsigned NumberOfIgnoredHighBits, EVT VT) {
assert((VT == MVT::i32 || VT == MVT::i64) &&
"i32 or i64 mask type expected!");
@@ -1851,6 +1909,20 @@ static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
case AArch64::BFMWri:
case AArch64::BFMXri:
return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
+
+ case AArch64::STRBBui:
+ case AArch64::STURBBi:
+ if (UserNode->getOperand(0) != Orig)
+ return;
+ UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff);
+ return;
+
+ case AArch64::STRHHui:
+ case AArch64::STURHHi:
+ if (UserNode->getOperand(0) != Orig)
+ return;
+ UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff);
+ return;
}
}
@@ -1963,36 +2035,129 @@ static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
return true;
}
-// Given a OR operation, check if we have the following pattern
-// ubfm c, b, imm, imm2 (or something that does the same jobs, see
-// isBitfieldExtractOp)
-// d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
-// countTrailingZeros(mask2) == imm2 - imm + 1
-// f = d | c
-// if yes, given reference arguments will be update so that one can replace
-// the OR instruction with:
-// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
-static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
- SDValue &Src, unsigned &ImmR,
- unsigned &ImmS, const APInt &UsefulBits,
- SelectionDAG *CurDAG) {
+static bool isShiftedMask(uint64_t Mask, EVT VT) {
+ assert(VT == MVT::i32 || VT == MVT::i64);
+ if (VT == MVT::i32)
+ return isShiftedMask_32(Mask);
+ return isShiftedMask_64(Mask);
+}
+
+// Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
+// inserted only sets known zero bits.
+static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {
assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
- // Set Opc
EVT VT = N->getValueType(0);
- if (VT == MVT::i32)
- Opc = AArch64::BFMWri;
- else if (VT == MVT::i64)
- Opc = AArch64::BFMXri;
- else
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return false;
+
+ unsigned BitWidth = VT.getSizeInBits();
+
+ uint64_t OrImm;
+ if (!isOpcWithIntImmediate(N, ISD::OR, OrImm))
+ return false;
+
+ // Skip this transformation if the ORR immediate can be encoded in the ORR.
+ // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely
+ // performance neutral.
+ if (AArch64_AM::isLogicalImmediate(OrImm, BitWidth))
return false;
+ uint64_t MaskImm;
+ SDValue And = N->getOperand(0);
+ // Must be a single use AND with an immediate operand.
+ if (!And.hasOneUse() ||
+ !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm))
+ return false;
+
+ // Compute the Known Zero for the AND as this allows us to catch more general
+ // cases than just looking for AND with imm.
+ APInt KnownZero, KnownOne;
+ CurDAG->computeKnownBits(And, KnownZero, KnownOne);
+
+ // Non-zero in the sense that they're not provably zero, which is the key
+ // point if we want to use this value.
+ uint64_t NotKnownZero = (~KnownZero).getZExtValue();
+
+ // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
+ if (!isShiftedMask(KnownZero.getZExtValue(), VT))
+ return false;
+
+ // The bits being inserted must only set those bits that are known to be zero.
+ if ((OrImm & NotKnownZero) != 0) {
+ // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
+ // currently handle this case.
+ return false;
+ }
+
+ // BFI/BFXIL dst, src, #lsb, #width.
+ int LSB = countTrailingOnes(NotKnownZero);
+ int Width = BitWidth - APInt(BitWidth, NotKnownZero).countPopulation();
+
+ // BFI/BFXIL is an alias of BFM, so translate to BFM operands.
+ unsigned ImmR = (BitWidth - LSB) % BitWidth;
+ unsigned ImmS = Width - 1;
+
+ // If we're creating a BFI instruction avoid cases where we need more
+ // instructions to materialize the BFI constant as compared to the original
+ // ORR. A BFXIL will use the same constant as the original ORR, so the code
+ // should be no worse in this case.
+ bool IsBFI = LSB != 0;
+ uint64_t BFIImm = OrImm >> LSB;
+ if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) {
+ // We have a BFI instruction and we know the constant can't be materialized
+ // with a ORR-immediate with the zero register.
+ unsigned OrChunks = 0, BFIChunks = 0;
+ for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) {
+ if (((OrImm >> Shift) & 0xFFFF) != 0)
+ ++OrChunks;
+ if (((BFIImm >> Shift) & 0xFFFF) != 0)
+ ++BFIChunks;
+ }
+ if (BFIChunks > OrChunks)
+ return false;
+ }
+
+ // Materialize the constant to be inserted.
+ SDLoc DL(N);
+ unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
+ SDNode *MOVI = CurDAG->getMachineNode(
+ MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT));
+
+ // Create the BFI/BFXIL instruction.
+ SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0),
+ CurDAG->getTargetConstant(ImmR, DL, VT),
+ CurDAG->getTargetConstant(ImmS, DL, VT)};
+ unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
+ CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ return true;
+}
+
+static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
+ SelectionDAG *CurDAG) {
+ assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
+
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return false;
+
+ unsigned BitWidth = VT.getSizeInBits();
+
// Because of simplify-demanded-bits in DAGCombine, involved masks may not
// have the expected shape. Try to undo that.
unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
+ // Given a OR operation, check if we have the following pattern
+ // ubfm c, b, imm, imm2 (or something that does the same jobs, see
+ // isBitfieldExtractOp)
+ // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
+ // countTrailingZeros(mask2) == imm2 - imm + 1
+ // f = d | c
+ // if yes, replace the OR instruction with:
+ // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2
+
// OR is commutative, check all combinations of operand order and values of
// BiggerPattern, i.e.
// Opd0, Opd1, BiggerPattern=false
@@ -2004,8 +2169,11 @@ static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
// and/or inserting fewer extra instructions.
for (int I = 0; I < 4; ++I) {
+ SDValue Dst, Src;
+ unsigned ImmR, ImmS;
bool BiggerPattern = I / 2;
- SDNode *OrOpd0 = N->getOperand(I % 2).getNode();
+ SDValue OrOpd0Val = N->getOperand(I % 2);
+ SDNode *OrOpd0 = OrOpd0Val.getNode();
SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
SDNode *OrOpd1 = OrOpd1Val.getNode();
@@ -2030,10 +2198,10 @@ static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
// If the mask on the insertee is correct, we have a BFXIL operation. We
// can share the ImmR and ImmS values from the already-computed UBFM.
- } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0),
+ } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val,
BiggerPattern,
Src, DstLSB, Width)) {
- ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
+ ImmR = (BitWidth - DstLSB) % BitWidth;
ImmS = Width - 1;
} else
continue;
@@ -2069,60 +2237,98 @@ static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
Dst = OrOpd1Val;
// both parts match
+ SDLoc DL(N);
+ SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
+ CurDAG->getTargetConstant(ImmS, DL, VT)};
+ unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
+ CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ return true;
+ }
+
+ // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff
+ // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted
+ // mask (e.g., 0x000ffff0).
+ uint64_t Mask0Imm, Mask1Imm;
+ SDValue And0 = N->getOperand(0);
+ SDValue And1 = N->getOperand(1);
+ if (And0.hasOneUse() && And1.hasOneUse() &&
+ isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) &&
+ isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) &&
+ APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) &&
+ (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) {
+
+ // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm),
+ // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the
+ // bits to be inserted.
+ if (isShiftedMask(Mask0Imm, VT)) {
+ std::swap(And0, And1);
+ std::swap(Mask0Imm, Mask1Imm);
+ }
+
+ SDValue Src = And1->getOperand(0);
+ SDValue Dst = And0->getOperand(0);
+ unsigned LSB = countTrailingZeros(Mask1Imm);
+ int Width = BitWidth - APInt(BitWidth, Mask0Imm).countPopulation();
+
+ // The BFXIL inserts the low-order bits from a source register, so right
+ // shift the needed bits into place.
+ SDLoc DL(N);
+ unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
+ SDNode *LSR = CurDAG->getMachineNode(
+ ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LSB, DL, VT),
+ CurDAG->getTargetConstant(BitWidth - 1, DL, VT));
+
+ // BFXIL is an alias of BFM, so translate to BFM operands.
+ unsigned ImmR = (BitWidth - LSB) % BitWidth;
+ unsigned ImmS = Width - 1;
+
+ // Create the BFXIL instruction.
+ SDValue Ops[] = {Dst, SDValue(LSR, 0),
+ CurDAG->getTargetConstant(ImmR, DL, VT),
+ CurDAG->getTargetConstant(ImmS, DL, VT)};
+ unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
+ CurDAG->SelectNodeTo(N, Opc, VT, Ops);
return true;
}
return false;
}
-SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
+bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
if (N->getOpcode() != ISD::OR)
- return nullptr;
+ return false;
- unsigned Opc;
- unsigned LSB, MSB;
- SDValue Opd0, Opd1;
- EVT VT = N->getValueType(0);
APInt NUsefulBits;
getUsefulBits(SDValue(N, 0), NUsefulBits);
// If all bits are not useful, just return UNDEF.
- if (!NUsefulBits)
- return CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, VT);
+ if (!NUsefulBits) {
+ CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
+ return true;
+ }
- if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, NUsefulBits,
- CurDAG))
- return nullptr;
+ if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG))
+ return true;
- SDLoc dl(N);
- SDValue Ops[] = { Opd0,
- Opd1,
- CurDAG->getTargetConstant(LSB, dl, VT),
- CurDAG->getTargetConstant(MSB, dl, VT) };
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ return tryBitfieldInsertOpFromOrAndImm(N, CurDAG);
}
/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
/// equivalent of a left shift by a constant amount followed by an and masking
/// out a contiguous set of bits.
-SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertInZeroOp(SDNode *N) {
+bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
if (N->getOpcode() != ISD::AND)
- return nullptr;
+ return false;
EVT VT = N->getValueType(0);
- unsigned Opc;
- if (VT == MVT::i32)
- Opc = AArch64::UBFMWri;
- else if (VT == MVT::i64)
- Opc = AArch64::UBFMXri;
- else
- return nullptr;
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return false;
SDValue Op0;
int DstLSB, Width;
if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
Op0, DstLSB, Width))
- return nullptr;
+ return false;
// ImmR is the rotate right amount.
unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
@@ -2132,7 +2338,9 @@ SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertInZeroOp(SDNode *N) {
SDLoc DL(N);
SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
CurDAG->getTargetConstant(ImmS, DL, VT)};
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri;
+ CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ return true;
}
bool
@@ -2214,62 +2422,68 @@ static int getIntOperandFromRegisterString(StringRef RegString) {
// register string argument is either of the form detailed in the ALCE (the
// form described in getIntOperandsFromRegsterString) or is a named register
// known by the MRS SysReg mapper.
-SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
+bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
SDLoc DL(N);
int Reg = getIntOperandFromRegisterString(RegString->getString());
- if (Reg != -1)
- return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
- MVT::Other,
- CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- N->getOperand(0));
+ if (Reg != -1) {
+ ReplaceNode(N, CurDAG->getMachineNode(
+ AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(0)));
+ return true;
+ }
// Use the sysreg mapper to map the remaining possible strings to the
// value for the register to be used for the instruction operand.
- AArch64SysReg::MRSMapper mapper;
- bool IsValidSpecialReg;
- Reg = mapper.fromString(RegString->getString(),
- Subtarget->getFeatureBits(),
- IsValidSpecialReg);
- if (IsValidSpecialReg)
- return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
- MVT::Other,
- CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- N->getOperand(0));
+ auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
+ if (TheReg && TheReg->Readable &&
+ TheReg->haveFeatures(Subtarget->getFeatureBits()))
+ Reg = TheReg->Encoding;
+ else
+ Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
+
+ if (Reg != -1) {
+ ReplaceNode(N, CurDAG->getMachineNode(
+ AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(0)));
+ return true;
+ }
- return nullptr;
+ return false;
}
// Lower the write_register intrinsic to an MSR instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
// form described in getIntOperandsFromRegsterString) or is a named register
// known by the MSR SysReg mapper.
-SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
+bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
SDLoc DL(N);
int Reg = getIntOperandFromRegisterString(RegString->getString());
- if (Reg != -1)
- return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
+ if (Reg != -1) {
+ ReplaceNode(
+ N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- N->getOperand(2), N->getOperand(0));
+ N->getOperand(2), N->getOperand(0)));
+ return true;
+ }
// Check if the register was one of those allowed as the pstatefield value in
// the MSR (immediate) instruction. To accept the values allowed in the
// pstatefield for the MSR (immediate) instruction, we also require that an
// immediate value has been provided as an argument, we know that this is
// the case as it has been ensured by semantic checking.
- AArch64PState::PStateMapper PMapper;
- bool IsValidSpecialReg;
- Reg = PMapper.fromString(RegString->getString(),
- Subtarget->getFeatureBits(),
- IsValidSpecialReg);
- if (IsValidSpecialReg) {
+ auto PMapper = AArch64PState::lookupPStateByName(RegString->getString());;
+ if (PMapper) {
assert (isa<ConstantSDNode>(N->getOperand(2))
&& "Expected a constant integer expression.");
+ unsigned Reg = PMapper->Encoding;
uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
unsigned State;
if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO) {
@@ -2279,29 +2493,66 @@ SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
assert(Immed < 16 && "Bad imm");
State = AArch64::MSRpstateImm4;
}
- return CurDAG->getMachineNode(State, DL, MVT::Other,
- CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- CurDAG->getTargetConstant(Immed, DL, MVT::i16),
- N->getOperand(0));
+ ReplaceNode(N, CurDAG->getMachineNode(
+ State, DL, MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ CurDAG->getTargetConstant(Immed, DL, MVT::i16),
+ N->getOperand(0)));
+ return true;
}
// Use the sysreg mapper to attempt to map the remaining possible strings
// to the value for the register to be used for the MSR (register)
// instruction operand.
- AArch64SysReg::MSRMapper Mapper;
- Reg = Mapper.fromString(RegString->getString(),
- Subtarget->getFeatureBits(),
- IsValidSpecialReg);
+ auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString());
+ if (TheReg && TheReg->Writeable &&
+ TheReg->haveFeatures(Subtarget->getFeatureBits()))
+ Reg = TheReg->Encoding;
+ else
+ Reg = AArch64SysReg::parseGenericRegister(RegString->getString());
+ if (Reg != -1) {
+ ReplaceNode(N, CurDAG->getMachineNode(
+ AArch64::MSR, DL, MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(2), N->getOperand(0)));
+ return true;
+ }
- if (IsValidSpecialReg)
- return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
- CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- N->getOperand(2), N->getOperand(0));
+ return false;
+}
+
+/// We've got special pseudo-instructions for these
+void AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
+ unsigned Opcode;
+ EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
+ if (MemTy == MVT::i8)
+ Opcode = AArch64::CMP_SWAP_8;
+ else if (MemTy == MVT::i16)
+ Opcode = AArch64::CMP_SWAP_16;
+ else if (MemTy == MVT::i32)
+ Opcode = AArch64::CMP_SWAP_32;
+ else if (MemTy == MVT::i64)
+ Opcode = AArch64::CMP_SWAP_64;
+ else
+ llvm_unreachable("Unknown AtomicCmpSwap type");
- return nullptr;
+ MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32;
+ SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
+ N->getOperand(0)};
+ SDNode *CmpSwap = CurDAG->getMachineNode(
+ Opcode, SDLoc(N),
+ CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops);
+
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
+
+ ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
+ ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
+ CurDAG->RemoveDeadNode(N);
}
-SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
+void AArch64DAGToDAGISel::Select(SDNode *Node) {
// Dump information about the Node being selected
DEBUG(errs() << "Selecting: ");
DEBUG(Node->dump(CurDAG));
@@ -2311,54 +2562,61 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return nullptr;
+ return;
}
// Few custom selection stuff.
- SDNode *ResNode = nullptr;
EVT VT = Node->getValueType(0);
switch (Node->getOpcode()) {
default:
break;
+ case ISD::ATOMIC_CMP_SWAP:
+ SelectCMP_SWAP(Node);
+ return;
+
case ISD::READ_REGISTER:
- if (SDNode *Res = SelectReadRegister(Node))
- return Res;
+ if (tryReadRegister(Node))
+ return;
break;
case ISD::WRITE_REGISTER:
- if (SDNode *Res = SelectWriteRegister(Node))
- return Res;
+ if (tryWriteRegister(Node))
+ return;
break;
case ISD::ADD:
- if (SDNode *I = SelectMLAV64LaneV128(Node))
- return I;
+ if (tryMLAV64LaneV128(Node))
+ return;
break;
case ISD::LOAD: {
// Try to select as an indexed load. Fall through to normal processing
// if we can't.
- bool Done = false;
- SDNode *I = SelectIndexedLoad(Node, Done);
- if (Done)
- return I;
+ if (tryIndexedLoad(Node))
+ return;
break;
}
case ISD::SRL:
case ISD::AND:
case ISD::SRA:
- if (SDNode *I = SelectBitfieldExtractOp(Node))
- return I;
- if (SDNode *I = SelectBitfieldInsertInZeroOp(Node))
- return I;
+ case ISD::SIGN_EXTEND_INREG:
+ if (tryBitfieldExtractOp(Node))
+ return;
+ if (tryBitfieldInsertInZeroOp(Node))
+ return;
+ break;
+
+ case ISD::SIGN_EXTEND:
+ if (tryBitfieldExtractOpFromSExt(Node))
+ return;
break;
case ISD::OR:
- if (SDNode *I = SelectBitfieldInsertOp(Node))
- return I;
+ if (tryBitfieldInsertOp(Node))
+ return;
break;
case ISD::EXTRACT_VECTOR_ELT: {
@@ -2401,19 +2659,25 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
DEBUG(Extract->dumpr(CurDAG));
DEBUG(dbgs() << "\n");
- return Extract.getNode();
+ ReplaceNode(Node, Extract.getNode());
+ return;
}
case ISD::Constant: {
// Materialize zero constants as copies from WZR/XZR. This allows
// the coalescer to propagate these into other instructions.
ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
if (ConstNode->isNullValue()) {
- if (VT == MVT::i32)
- return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
- AArch64::WZR, MVT::i32).getNode();
- else if (VT == MVT::i64)
- return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
- AArch64::XZR, MVT::i64).getNode();
+ if (VT == MVT::i32) {
+ SDValue New = CurDAG->getCopyFromReg(
+ CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
+ ReplaceNode(Node, New.getNode());
+ return;
+ } else if (VT == MVT::i64) {
+ SDValue New = CurDAG->getCopyFromReg(
+ CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
+ ReplaceNode(Node, New.getNode());
+ return;
+ }
}
break;
}
@@ -2428,7 +2692,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
SDLoc DL(Node);
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
- return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
+ CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
+ return;
}
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
@@ -2450,7 +2715,8 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
- return Ld;
+ ReplaceNode(Node, Ld);
+ return;
}
case Intrinsic::aarch64_stlxp:
case Intrinsic::aarch64_stxp: {
@@ -2471,208 +2737,305 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
- return St;
+ ReplaceNode(Node, St);
+ return;
}
case Intrinsic::aarch64_neon_ld1x2:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld1x3:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld1x4:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld2:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld3:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld4:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld2r:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld3r:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld4r:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld2lane:
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectLoadLane(Node, 2, AArch64::LD2i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectLoadLane(Node, 2, AArch64::LD2i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectLoadLane(Node, 2, AArch64::LD2i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectLoadLane(Node, 2, AArch64::LD2i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectLoadLane(Node, 2, AArch64::LD2i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectLoadLane(Node, 2, AArch64::LD2i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectLoadLane(Node, 2, AArch64::LD2i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectLoadLane(Node, 2, AArch64::LD2i64);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld3lane:
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectLoadLane(Node, 3, AArch64::LD3i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectLoadLane(Node, 3, AArch64::LD3i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectLoadLane(Node, 3, AArch64::LD3i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectLoadLane(Node, 3, AArch64::LD3i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectLoadLane(Node, 3, AArch64::LD3i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectLoadLane(Node, 3, AArch64::LD3i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectLoadLane(Node, 3, AArch64::LD3i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectLoadLane(Node, 3, AArch64::LD3i64);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld4lane:
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectLoadLane(Node, 4, AArch64::LD4i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectLoadLane(Node, 4, AArch64::LD4i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectLoadLane(Node, 4, AArch64::LD4i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectLoadLane(Node, 4, AArch64::LD4i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectLoadLane(Node, 4, AArch64::LD4i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectLoadLane(Node, 4, AArch64::LD4i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectLoadLane(Node, 4, AArch64::LD4i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectLoadLane(Node, 4, AArch64::LD4i64);
+ return;
+ }
break;
}
} break;
@@ -2682,33 +3045,39 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
default:
break;
case Intrinsic::aarch64_neon_tbl2:
- return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
- : AArch64::TBLv16i8Two,
- false);
+ SelectTable(Node, 2,
+ VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
+ false);
+ return;
case Intrinsic::aarch64_neon_tbl3:
- return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
- : AArch64::TBLv16i8Three,
- false);
+ SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
+ : AArch64::TBLv16i8Three,
+ false);
+ return;
case Intrinsic::aarch64_neon_tbl4:
- return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
- : AArch64::TBLv16i8Four,
- false);
+ SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
+ : AArch64::TBLv16i8Four,
+ false);
+ return;
case Intrinsic::aarch64_neon_tbx2:
- return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
- : AArch64::TBXv16i8Two,
- true);
+ SelectTable(Node, 2,
+ VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
+ true);
+ return;
case Intrinsic::aarch64_neon_tbx3:
- return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
- : AArch64::TBXv16i8Three,
- true);
+ SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
+ : AArch64::TBXv16i8Three,
+ true);
+ return;
case Intrinsic::aarch64_neon_tbx4:
- return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
- : AArch64::TBXv16i8Four,
- true);
+ SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
+ : AArch64::TBXv16i8Four,
+ true);
+ return;
case Intrinsic::aarch64_neon_smull:
case Intrinsic::aarch64_neon_umull:
- if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
- return N;
+ if (tryMULLV64LaneV128(IntNo, Node))
+ return;
break;
}
break;
@@ -2721,588 +3090,827 @@ SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
default:
break;
case Intrinsic::aarch64_neon_st1x2: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 2, AArch64::ST1Twov8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 2, AArch64::ST1Twov16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 2, AArch64::ST1Twov4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 2, AArch64::ST1Twov8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 2, AArch64::ST1Twov2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 2, AArch64::ST1Twov4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 2, AArch64::ST1Twov2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 2, AArch64::ST1Twov1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 2, AArch64::ST1Twov8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 2, AArch64::ST1Twov16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 2, AArch64::ST1Twov4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 2, AArch64::ST1Twov8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 2, AArch64::ST1Twov2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 2, AArch64::ST1Twov4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 2, AArch64::ST1Twov2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 2, AArch64::ST1Twov1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st1x3: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 3, AArch64::ST1Threev8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 3, AArch64::ST1Threev16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 3, AArch64::ST1Threev4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 3, AArch64::ST1Threev8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 3, AArch64::ST1Threev2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 3, AArch64::ST1Threev4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 3, AArch64::ST1Threev2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 3, AArch64::ST1Threev1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 3, AArch64::ST1Threev8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 3, AArch64::ST1Threev16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 3, AArch64::ST1Threev4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 3, AArch64::ST1Threev8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 3, AArch64::ST1Threev2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 3, AArch64::ST1Threev4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 3, AArch64::ST1Threev2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 3, AArch64::ST1Threev1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st1x4: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 4, AArch64::ST1Fourv8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 4, AArch64::ST1Fourv16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 4, AArch64::ST1Fourv4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 4, AArch64::ST1Fourv8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 4, AArch64::ST1Fourv2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 4, AArch64::ST1Fourv4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 4, AArch64::ST1Fourv2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 4, AArch64::ST1Fourv8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 4, AArch64::ST1Fourv16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 4, AArch64::ST1Fourv4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 4, AArch64::ST1Fourv8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 4, AArch64::ST1Fourv2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 4, AArch64::ST1Fourv4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 4, AArch64::ST1Fourv2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st2: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 2, AArch64::ST2Twov8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 2, AArch64::ST2Twov16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 2, AArch64::ST2Twov4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 2, AArch64::ST2Twov8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 2, AArch64::ST2Twov2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 2, AArch64::ST2Twov4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 2, AArch64::ST2Twov2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 2, AArch64::ST1Twov1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 2, AArch64::ST2Twov8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 2, AArch64::ST2Twov16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 2, AArch64::ST2Twov4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 2, AArch64::ST2Twov8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 2, AArch64::ST2Twov2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 2, AArch64::ST2Twov4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 2, AArch64::ST2Twov2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 2, AArch64::ST1Twov1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st3: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 3, AArch64::ST3Threev8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 3, AArch64::ST3Threev16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 3, AArch64::ST3Threev4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 3, AArch64::ST3Threev8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 3, AArch64::ST3Threev2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 3, AArch64::ST3Threev4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 3, AArch64::ST3Threev2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 3, AArch64::ST1Threev1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 3, AArch64::ST3Threev8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 3, AArch64::ST3Threev16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 3, AArch64::ST3Threev4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 3, AArch64::ST3Threev8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 3, AArch64::ST3Threev2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 3, AArch64::ST3Threev4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 3, AArch64::ST3Threev2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 3, AArch64::ST1Threev1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st4: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 4, AArch64::ST4Fourv8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 4, AArch64::ST4Fourv16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 4, AArch64::ST4Fourv4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 4, AArch64::ST4Fourv8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 4, AArch64::ST4Fourv2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 4, AArch64::ST4Fourv4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 4, AArch64::ST4Fourv2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 4, AArch64::ST4Fourv8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 4, AArch64::ST4Fourv16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 4, AArch64::ST4Fourv4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 4, AArch64::ST4Fourv8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 4, AArch64::ST4Fourv2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 4, AArch64::ST4Fourv4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 4, AArch64::ST4Fourv2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st2lane: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectStoreLane(Node, 2, AArch64::ST2i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectStoreLane(Node, 2, AArch64::ST2i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectStoreLane(Node, 2, AArch64::ST2i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectStoreLane(Node, 2, AArch64::ST2i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectStoreLane(Node, 2, AArch64::ST2i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectStoreLane(Node, 2, AArch64::ST2i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectStoreLane(Node, 2, AArch64::ST2i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectStoreLane(Node, 2, AArch64::ST2i64);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st3lane: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectStoreLane(Node, 3, AArch64::ST3i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectStoreLane(Node, 3, AArch64::ST3i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectStoreLane(Node, 3, AArch64::ST3i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectStoreLane(Node, 3, AArch64::ST3i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectStoreLane(Node, 3, AArch64::ST3i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectStoreLane(Node, 3, AArch64::ST3i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectStoreLane(Node, 3, AArch64::ST3i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectStoreLane(Node, 3, AArch64::ST3i64);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st4lane: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectStoreLane(Node, 4, AArch64::ST4i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectStoreLane(Node, 4, AArch64::ST4i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectStoreLane(Node, 4, AArch64::ST4i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectStoreLane(Node, 4, AArch64::ST4i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectStoreLane(Node, 4, AArch64::ST4i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectStoreLane(Node, 4, AArch64::ST4i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectStoreLane(Node, 4, AArch64::ST4i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectStoreLane(Node, 4, AArch64::ST4i64);
+ return;
+ }
break;
}
}
break;
}
case AArch64ISD::LD2post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD3post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD4post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1x2post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1x3post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1x4post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1DUPpost: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD2DUPpost: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD3DUPpost: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD4DUPpost: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1LANEpost: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::LD2LANEpost: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::LD3LANEpost: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::LD4LANEpost: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST2post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST3post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST4post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST1x2post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST1x3post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST1x4post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST2LANEpost: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST3LANEpost: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST4LANEpost: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
+ return;
+ }
break;
}
}
// Select the default instruction
- ResNode = SelectCode(Node);
-
- DEBUG(errs() << "=> ");
- if (ResNode == nullptr || ResNode == Node)
- DEBUG(Node->dump(CurDAG));
- else
- DEBUG(ResNode->dump(CurDAG));
- DEBUG(errs() << "\n");
-
- return ResNode;
+ SelectCode(Node);
}
/// createAArch64ISelDag - This pass converts a legalized DAG into a
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 92cf1cd71970..d6f2a190d4c8 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -40,12 +40,6 @@ using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumShiftInserts, "Number of vector shift inserts");
-// Place holder until extr generation is tested fully.
-static cl::opt<bool>
-EnableAArch64ExtrGeneration("aarch64-extr-generation", cl::Hidden,
- cl::desc("Allow AArch64 (or (shift)(shift))->extract"),
- cl::init(true));
-
static cl::opt<bool>
EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden,
cl::desc("Allow AArch64 SLI/SRI formation"),
@@ -59,6 +53,13 @@ cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
cl::init(false));
+// Disabled for causing self-hosting failures once returned-attribute inference
+// was enabled.
+static cl::opt<bool>
+EnableThisRetForwarding("aarch64-this-return-forwarding", cl::Hidden,
+ cl::desc("Directly forward this return"),
+ cl::init(false));
+
/// Value type used for condition codes.
static const MVT MVT_CC = MVT::i32;
@@ -225,13 +226,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- // Expand the undefined-at-zero variants to cttz/ctlz to their defined-at-zero
- // counterparts, which AArch64 supports directly.
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
-
setOperationAction(ISD::CTPOP, MVT::i32, Custom);
setOperationAction(ISD::CTPOP, MVT::i64, Custom);
@@ -402,6 +396,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom);
+
// Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0.
// This requires the Performance Monitors extension.
if (Subtarget->hasPerfMon())
@@ -476,7 +472,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
// Also, try to fold ADD into CSINC/CSINV..
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::SUB);
-
+ setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::XOR);
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::UINT_TO_FP);
@@ -518,7 +514,11 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
MaskAndBranchFoldingIsLegal = true;
EnableExtLdPromotion = true;
+ // Set required alignment.
setMinFunctionAlignment(2);
+ // Set preferred alignments.
+ setPrefFunctionAlignment(STI.getPrefFunctionAlignment());
+ setPrefLoopAlignment(STI.getPrefLoopAlignment());
setHasExtractBitsInsn(true);
@@ -583,6 +583,18 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v1i64, Expand);
+ setOperationAction(ISD::CTLZ, MVT::v2i64, Expand);
+
+ setOperationAction(ISD::CTTZ, MVT::v2i8, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v4i16, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v2i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v1i64, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v8i16, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v4i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::v2i64, Expand);
+
// AArch64 doesn't have MUL.2d:
setOperationAction(ISD::MUL, MVT::v2i64, Expand);
// Custom handling for some quad-vector types to detect MULL.
@@ -623,91 +635,88 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
}
}
- // Prefer likely predicted branches to selects on out-of-order cores.
- if (Subtarget->isCortexA57())
- PredictableSelectIsExpensive = true;
+ PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive();
}
-void AArch64TargetLowering::addTypeForNEON(EVT VT, EVT PromotedBitwiseVT) {
+void AArch64TargetLowering::addTypeForNEON(MVT VT, MVT PromotedBitwiseVT) {
if (VT == MVT::v2f32 || VT == MVT::v4f16) {
- setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
- AddPromotedToType(ISD::LOAD, VT.getSimpleVT(), MVT::v2i32);
+ setOperationAction(ISD::LOAD, VT, Promote);
+ AddPromotedToType(ISD::LOAD, VT, MVT::v2i32);
- setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
- AddPromotedToType(ISD::STORE, VT.getSimpleVT(), MVT::v2i32);
+ setOperationAction(ISD::STORE, VT, Promote);
+ AddPromotedToType(ISD::STORE, VT, MVT::v2i32);
} else if (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16) {
- setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
- AddPromotedToType(ISD::LOAD, VT.getSimpleVT(), MVT::v2i64);
+ setOperationAction(ISD::LOAD, VT, Promote);
+ AddPromotedToType(ISD::LOAD, VT, MVT::v2i64);
- setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
- AddPromotedToType(ISD::STORE, VT.getSimpleVT(), MVT::v2i64);
+ setOperationAction(ISD::STORE, VT, Promote);
+ AddPromotedToType(ISD::STORE, VT, MVT::v2i64);
}
// Mark vector float intrinsics as expand.
if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) {
- setOperationAction(ISD::FSIN, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FCOS, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FPOWI, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FPOW, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FLOG, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FLOG2, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FLOG10, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FEXP, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FEXP2, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FSIN, VT, Expand);
+ setOperationAction(ISD::FCOS, VT, Expand);
+ setOperationAction(ISD::FPOWI, VT, Expand);
+ setOperationAction(ISD::FPOW, VT, Expand);
+ setOperationAction(ISD::FLOG, VT, Expand);
+ setOperationAction(ISD::FLOG2, VT, Expand);
+ setOperationAction(ISD::FLOG10, VT, Expand);
+ setOperationAction(ISD::FEXP, VT, Expand);
+ setOperationAction(ISD::FEXP2, VT, Expand);
// But we do support custom-lowering for FCOPYSIGN.
- setOperationAction(ISD::FCOPYSIGN, VT.getSimpleVT(), Custom);
- }
-
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::AND, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::OR, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
-
- setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::VSELECT, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::FCOPYSIGN, VT, Custom);
+ }
+
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::AND, VT, Custom);
+ setOperationAction(ISD::OR, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
+
+ setOperationAction(ISD::SELECT, VT, Expand);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+ setOperationAction(ISD::VSELECT, VT, Expand);
for (MVT InnerVT : MVT::all_valuetypes())
- setLoadExtAction(ISD::EXTLOAD, InnerVT, VT.getSimpleVT(), Expand);
+ setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
// CNT supports only B element sizes.
if (VT != MVT::v8i8 && VT != MVT::v16i8)
- setOperationAction(ISD::CTPOP, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::CTPOP, VT, Expand);
- setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::SDIV, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::FREM, VT, Expand);
- setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::FP_TO_SINT, VT, Custom);
+ setOperationAction(ISD::FP_TO_UINT, VT, Custom);
// [SU][MIN|MAX] are available for all NEON types apart from i64.
- if (!VT.isFloatingPoint() &&
- VT.getSimpleVT() != MVT::v2i64 && VT.getSimpleVT() != MVT::v1i64)
+ if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
- setOperationAction(Opcode, VT.getSimpleVT(), Legal);
+ setOperationAction(Opcode, VT, Legal);
// F[MIN|MAX][NUM|NAN] are available for all FP NEON types (not f16 though!).
if (VT.isFloatingPoint() && VT.getVectorElementType() != MVT::f16)
for (unsigned Opcode : {ISD::FMINNAN, ISD::FMAXNAN,
ISD::FMINNUM, ISD::FMAXNUM})
- setOperationAction(Opcode, VT.getSimpleVT(), Legal);
+ setOperationAction(Opcode, VT, Legal);
if (Subtarget->isLittleEndian()) {
for (unsigned im = (unsigned)ISD::PRE_INC;
im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
- setIndexedLoadAction(im, VT.getSimpleVT(), Legal);
- setIndexedStoreAction(im, VT.getSimpleVT(), Legal);
+ setIndexedLoadAction(im, VT, Legal);
+ setIndexedStoreAction(im, VT, Legal);
}
}
}
@@ -804,12 +813,9 @@ bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
if (Subtarget->requiresStrictAlign())
return false;
- // FIXME: This is mostly true for Cyclone, but not necessarily others.
if (Fast) {
- // FIXME: Define an attribute for slow unaligned accesses instead of
- // relying on the CPU type as a proxy.
- // On Cyclone, unaligned 128-bit stores are slow.
- *Fast = !Subtarget->isCyclone() || VT.getStoreSize() != 16 ||
+ // Some CPUs are fine with unaligned stores except for 128-bit ones.
+ *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 ||
// See comments in performSTORECombine() for more details about
// these conditions.
@@ -954,12 +960,14 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::ST4LANEpost: return "AArch64ISD::ST4LANEpost";
case AArch64ISD::SMULL: return "AArch64ISD::SMULL";
case AArch64ISD::UMULL: return "AArch64ISD::UMULL";
+ case AArch64ISD::FRSQRTE: return "AArch64ISD::FRSQRTE";
+ case AArch64ISD::FRECPE: return "AArch64ISD::FRECPE";
}
return nullptr;
}
MachineBasicBlock *
-AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
+AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
MachineBasicBlock *MBB) const {
// We materialise the F128CSEL pseudo-instruction as some control flow and a
// phi node:
@@ -976,14 +984,14 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
MachineFunction::iterator It = ++MBB->getIterator();
- unsigned DestReg = MI->getOperand(0).getReg();
- unsigned IfTrueReg = MI->getOperand(1).getReg();
- unsigned IfFalseReg = MI->getOperand(2).getReg();
- unsigned CondCode = MI->getOperand(3).getImm();
- bool NZCVKilled = MI->getOperand(4).isKill();
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned IfTrueReg = MI.getOperand(1).getReg();
+ unsigned IfFalseReg = MI.getOperand(2).getReg();
+ unsigned CondCode = MI.getOperand(3).getImm();
+ bool NZCVKilled = MI.getOperand(4).isKill();
MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
@@ -1014,17 +1022,16 @@ AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
.addReg(IfFalseReg)
.addMBB(MBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return EndBB;
}
-MachineBasicBlock *
-AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- switch (MI->getOpcode()) {
+MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
+ MachineInstr &MI, MachineBasicBlock *BB) const {
+ switch (MI.getOpcode()) {
default:
#ifndef NDEBUG
- MI->dump();
+ MI.dump();
#endif
llvm_unreachable("Unexpected instruction for custom inserter!");
@@ -1135,6 +1142,35 @@ static void changeFPCCToAArch64CC(ISD::CondCode CC,
}
}
+/// Convert a DAG fp condition code to an AArch64 CC.
+/// This differs from changeFPCCToAArch64CC in that it returns cond codes that
+/// should be AND'ed instead of OR'ed.
+static void changeFPCCToANDAArch64CC(ISD::CondCode CC,
+ AArch64CC::CondCode &CondCode,
+ AArch64CC::CondCode &CondCode2) {
+ CondCode2 = AArch64CC::AL;
+ switch (CC) {
+ default:
+ changeFPCCToAArch64CC(CC, CondCode, CondCode2);
+ assert(CondCode2 == AArch64CC::AL);
+ break;
+ case ISD::SETONE:
+ // (a one b)
+ // == ((a olt b) || (a ogt b))
+ // == ((a ord b) && (a une b))
+ CondCode = AArch64CC::VC;
+ CondCode2 = AArch64CC::NE;
+ break;
+ case ISD::SETUEQ:
+ // (a ueq b)
+ // == ((a uno b) || (a oeq b))
+ // == ((a ule b) && (a uge b))
+ CondCode = AArch64CC::PL;
+ CondCode2 = AArch64CC::LE;
+ break;
+ }
+}
+
/// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64
/// CC usable with the vector instructions. Fewer operations are available
/// without a real NZCV register, so we have to use less efficient combinations
@@ -1174,11 +1210,18 @@ static bool isLegalArithImmed(uint64_t C) {
}
static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDLoc dl, SelectionDAG &DAG) {
+ const SDLoc &dl, SelectionDAG &DAG) {
EVT VT = LHS.getValueType();
- if (VT.isFloatingPoint())
+ if (VT.isFloatingPoint()) {
+ assert(VT != MVT::f128);
+ if (VT == MVT::f16) {
+ LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS);
+ RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS);
+ VT = MVT::f32;
+ }
return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS);
+ }
// The CMP instruction is just an alias for SUBS, and representing it as
// SUBS means that it's possible to get CSE with subtract operations.
@@ -1258,22 +1301,31 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
/// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
ISD::CondCode CC, SDValue CCOp,
- SDValue Condition, unsigned NZCV,
- SDLoc DL, SelectionDAG &DAG) {
+ AArch64CC::CondCode Predicate,
+ AArch64CC::CondCode OutCC,
+ const SDLoc &DL, SelectionDAG &DAG) {
unsigned Opcode = 0;
- if (LHS.getValueType().isFloatingPoint())
+ if (LHS.getValueType().isFloatingPoint()) {
+ assert(LHS.getValueType() != MVT::f128);
+ if (LHS.getValueType() == MVT::f16) {
+ LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS);
+ RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS);
+ }
Opcode = AArch64ISD::FCCMP;
- else if (RHS.getOpcode() == ISD::SUB) {
+ } else if (RHS.getOpcode() == ISD::SUB) {
SDValue SubOp0 = RHS.getOperand(0);
if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- // See emitComparison() on why we can only do this for SETEQ and SETNE.
- Opcode = AArch64ISD::CCMN;
- RHS = RHS.getOperand(1);
- }
+ // See emitComparison() on why we can only do this for SETEQ and SETNE.
+ Opcode = AArch64ISD::CCMN;
+ RHS = RHS.getOperand(1);
+ }
}
if (Opcode == 0)
Opcode = AArch64ISD::CCMP;
+ SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC);
+ AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC);
+ unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC);
SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
}
@@ -1284,31 +1336,49 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
/// at the leafs only. i.e. "not (or (or x y) z)" can be changed to
/// "and (and (not x) (not y)) (not z)"; "not (or (and x y) z)" cannot be
/// brought into such a form.
-static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanPushNegate,
+static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanNegate,
unsigned Depth = 0) {
if (!Val.hasOneUse())
return false;
unsigned Opcode = Val->getOpcode();
if (Opcode == ISD::SETCC) {
- CanPushNegate = true;
+ if (Val->getOperand(0).getValueType() == MVT::f128)
+ return false;
+ CanNegate = true;
return true;
}
- // Protect against stack overflow.
- if (Depth > 15)
+ // Protect against exponential runtime and stack overflow.
+ if (Depth > 6)
return false;
if (Opcode == ISD::AND || Opcode == ISD::OR) {
SDValue O0 = Val->getOperand(0);
SDValue O1 = Val->getOperand(1);
- bool CanPushNegateL;
- if (!isConjunctionDisjunctionTree(O0, CanPushNegateL, Depth+1))
+ bool CanNegateL;
+ if (!isConjunctionDisjunctionTree(O0, CanNegateL, Depth+1))
return false;
- bool CanPushNegateR;
- if (!isConjunctionDisjunctionTree(O1, CanPushNegateR, Depth+1))
+ bool CanNegateR;
+ if (!isConjunctionDisjunctionTree(O1, CanNegateR, Depth+1))
return false;
- // We cannot push a negate through an AND operation (it would become an OR),
- // we can however change a (not (or x y)) to (and (not x) (not y)) if we can
- // push the negate through the x/y subtrees.
- CanPushNegate = (Opcode == ISD::OR) && CanPushNegateL && CanPushNegateR;
+
+ if (Opcode == ISD::OR) {
+ // For an OR expression we need to be able to negate at least one side or
+ // we cannot do the transformation at all.
+ if (!CanNegateL && !CanNegateR)
+ return false;
+ // We can however change a (not (or x y)) to (and (not x) (not y)) if we
+ // can negate the x and y subtrees.
+ CanNegate = CanNegateL && CanNegateR;
+ } else {
+ // If the operands are OR expressions then we finally need to negate their
+ // outputs, we can only do that for the operand with emitted last by
+ // negating OutCC, not for both operands.
+ bool NeedsNegOutL = O0->getOpcode() == ISD::OR;
+ bool NeedsNegOutR = O1->getOpcode() == ISD::OR;
+ if (NeedsNegOutL && NeedsNegOutR)
+ return false;
+ // We cannot negate an AND operation (it would become an OR),
+ CanNegate = false;
+ }
return true;
}
return false;
@@ -1324,10 +1394,9 @@ static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanPushNegate,
/// effects pushed to the tree leafs; @p Predicate is an NZCV flag predicate
/// for the comparisons in the current subtree; @p Depth limits the search
/// depth to avoid stack overflow.
-static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val,
- AArch64CC::CondCode &OutCC, bool PushNegate = false,
- SDValue CCOp = SDValue(), AArch64CC::CondCode Predicate = AArch64CC::AL,
- unsigned Depth = 0) {
+static SDValue emitConjunctionDisjunctionTreeRec(SelectionDAG &DAG, SDValue Val,
+ AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
+ AArch64CC::CondCode Predicate) {
// We're at a tree leaf, produce a conditional comparison operation.
unsigned Opcode = Val->getOpcode();
if (Opcode == ISD::SETCC) {
@@ -1335,7 +1404,7 @@ static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val,
SDValue RHS = Val->getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get();
bool isInteger = LHS.getValueType().isInteger();
- if (PushNegate)
+ if (Negate)
CC = getSetCCInverse(CC, isInteger);
SDLoc DL(Val);
// Determine OutCC and handle FP special case.
@@ -1344,68 +1413,62 @@ static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val,
} else {
assert(LHS.getValueType().isFloatingPoint());
AArch64CC::CondCode ExtraCC;
- changeFPCCToAArch64CC(CC, OutCC, ExtraCC);
- // Surpisingly some floating point conditions can't be tested with a
- // single condition code. Construct an additional comparison in this case.
- // See comment below on how we deal with OR conditions.
+ changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC);
+ // Some floating point conditions can't be tested with a single condition
+ // code. Construct an additional comparison in this case.
if (ExtraCC != AArch64CC::AL) {
SDValue ExtraCmp;
if (!CCOp.getNode())
ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG);
- else {
- SDValue ConditionOp = DAG.getConstant(Predicate, DL, MVT_CC);
- // Note that we want the inverse of ExtraCC, so NZCV is not inversed.
- unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(ExtraCC);
- ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, ConditionOp,
- NZCV, DL, DAG);
- }
+ else
+ ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate,
+ ExtraCC, DL, DAG);
CCOp = ExtraCmp;
- Predicate = AArch64CC::getInvertedCondCode(ExtraCC);
- OutCC = AArch64CC::getInvertedCondCode(OutCC);
+ Predicate = ExtraCC;
}
}
// Produce a normal comparison if we are first in the chain
- if (!CCOp.getNode())
+ if (!CCOp)
return emitComparison(LHS, RHS, CC, DL, DAG);
// Otherwise produce a ccmp.
- SDValue ConditionOp = DAG.getConstant(Predicate, DL, MVT_CC);
- AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC);
- unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC);
- return emitConditionalComparison(LHS, RHS, CC, CCOp, ConditionOp, NZCV, DL,
+ return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL,
DAG);
- } else if ((Opcode != ISD::AND && Opcode != ISD::OR) || !Val->hasOneUse())
- return SDValue();
-
- assert((Opcode == ISD::OR || !PushNegate)
- && "Can only push negate through OR operation");
+ }
+ assert((Opcode == ISD::AND || (Opcode == ISD::OR && Val->hasOneUse())) &&
+ "Valid conjunction/disjunction tree");
// Check if both sides can be transformed.
SDValue LHS = Val->getOperand(0);
SDValue RHS = Val->getOperand(1);
- bool CanPushNegateL;
- if (!isConjunctionDisjunctionTree(LHS, CanPushNegateL, Depth+1))
- return SDValue();
- bool CanPushNegateR;
- if (!isConjunctionDisjunctionTree(RHS, CanPushNegateR, Depth+1))
- return SDValue();
- // Do we need to negate our operands?
- bool NegateOperands = Opcode == ISD::OR;
+ // In case of an OR we need to negate our operands and the result.
+ // (A v B) <=> not(not(A) ^ not(B))
+ bool NegateOpsAndResult = Opcode == ISD::OR;
// We can negate the results of all previous operations by inverting the
- // predicate flags giving us a free negation for one side. For the other side
- // we need to be able to push the negation to the leafs of the tree.
- if (NegateOperands) {
- if (!CanPushNegateL && !CanPushNegateR)
- return SDValue();
- // Order the side where we can push the negate through to LHS.
- if (!CanPushNegateL && CanPushNegateR)
+ // predicate flags giving us a free negation for one side. The other side
+ // must be negatable by itself.
+ if (NegateOpsAndResult) {
+ // See which side we can negate.
+ bool CanNegateL;
+ bool isValidL = isConjunctionDisjunctionTree(LHS, CanNegateL);
+ assert(isValidL && "Valid conjunction/disjunction tree");
+ (void)isValidL;
+
+#ifndef NDEBUG
+ bool CanNegateR;
+ bool isValidR = isConjunctionDisjunctionTree(RHS, CanNegateR);
+ assert(isValidR && "Valid conjunction/disjunction tree");
+ assert((CanNegateL || CanNegateR) && "Valid conjunction/disjunction tree");
+#endif
+
+ // Order the side which we cannot negate to RHS so we can emit it first.
+ if (!CanNegateL)
std::swap(LHS, RHS);
} else {
bool NeedsNegOutL = LHS->getOpcode() == ISD::OR;
- bool NeedsNegOutR = RHS->getOpcode() == ISD::OR;
- if (NeedsNegOutL && NeedsNegOutR)
- return SDValue();
+ assert((!NeedsNegOutL || RHS->getOpcode() != ISD::OR) &&
+ "Valid conjunction/disjunction tree");
// Order the side where we need to negate the output flags to RHS so it
// gets emitted first.
if (NeedsNegOutL)
@@ -1416,24 +1479,39 @@ static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val,
// through if we are already in a PushNegate case, otherwise we can negate
// the "flags to test" afterwards.
AArch64CC::CondCode RHSCC;
- SDValue CmpR = emitConjunctionDisjunctionTree(DAG, RHS, RHSCC, PushNegate,
- CCOp, Predicate, Depth+1);
- if (NegateOperands && !PushNegate)
+ SDValue CmpR = emitConjunctionDisjunctionTreeRec(DAG, RHS, RHSCC, Negate,
+ CCOp, Predicate);
+ if (NegateOpsAndResult && !Negate)
RHSCC = AArch64CC::getInvertedCondCode(RHSCC);
- // Emit LHS. We must push the negate through if we need to negate it.
- SDValue CmpL = emitConjunctionDisjunctionTree(DAG, LHS, OutCC, NegateOperands,
- CmpR, RHSCC, Depth+1);
+ // Emit LHS. We may need to negate it.
+ SDValue CmpL = emitConjunctionDisjunctionTreeRec(DAG, LHS, OutCC,
+ NegateOpsAndResult, CmpR,
+ RHSCC);
// If we transformed an OR to and AND then we have to negate the result
- // (or absorb a PushNegate resulting in a double negation).
- if (Opcode == ISD::OR && !PushNegate)
+ // (or absorb the Negate parameter).
+ if (NegateOpsAndResult && !Negate)
OutCC = AArch64CC::getInvertedCondCode(OutCC);
return CmpL;
}
+/// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
+/// of CCMP/CFCMP ops. See @ref AArch64CCMP.
+/// \see emitConjunctionDisjunctionTreeRec().
+static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val,
+ AArch64CC::CondCode &OutCC) {
+ bool CanNegate;
+ if (!isConjunctionDisjunctionTree(Val, CanNegate))
+ return SDValue();
+
+ return emitConjunctionDisjunctionTreeRec(DAG, Val, OutCC, false, SDValue(),
+ AArch64CC::AL);
+}
+
/// @}
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &AArch64cc, SelectionDAG &DAG, SDLoc dl) {
+ SDValue &AArch64cc, SelectionDAG &DAG,
+ const SDLoc &dl) {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
EVT VT = RHS.getValueType();
uint64_t C = RHSC->getZExtValue();
@@ -1994,7 +2072,7 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
StructType *RetTy = StructType::get(ArgTy, ArgTy, nullptr);
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
- .setCallee(CallingConv::Fast, RetTy, Callee, std::move(Args), 0);
+ .setCallee(CallingConv::Fast, RetTy, Callee, std::move(Args));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.first;
@@ -2096,8 +2174,7 @@ static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
// The values are implicitly truncated so sext vs. zext doesn't matter.
Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::getVectorVT(TruncVT, NumElts), Ops);
+ return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
}
static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
@@ -2213,7 +2290,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDLoc dl(Op);
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
- case Intrinsic::aarch64_thread_pointer: {
+ case Intrinsic::thread_pointer: {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
}
@@ -2356,6 +2433,8 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
return CC_AArch64_GHC;
case CallingConv::C:
case CallingConv::Fast:
+ case CallingConv::PreserveMost:
+ case CallingConv::CXX_FAST_TLS:
if (!Subtarget->isTargetDarwin())
return CC_AArch64_AAPCS;
return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS;
@@ -2364,8 +2443,8 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
SDValue AArch64TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -2515,13 +2594,14 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
ArgValue = DAG.getExtLoad(
ExtType, DL, VA.getLocVT(), Chain, FIN,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- MemVT, false, false, false, 0);
+ MemVT);
InVals.push_back(ArgValue);
}
}
// varargs
+ AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
if (isVarArg) {
if (!Subtarget->isTargetDarwin()) {
// The AAPCS variadic function ABI is identical to the non-variadic
@@ -2530,22 +2610,20 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
saveVarArgRegisters(CCInfo, DAG, DL, Chain);
}
- AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
// This will point to the next argument passed via stack.
unsigned StackOffset = CCInfo.getNextStackOffset();
// We currently pass all varargs at 8-byte alignment.
StackOffset = ((StackOffset + 7) & ~7);
- AFI->setVarArgsStackIndex(MFI->CreateFixedObject(4, StackOffset, true));
+ FuncInfo->setVarArgsStackIndex(MFI->CreateFixedObject(4, StackOffset, true));
}
- AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
unsigned StackArgSize = CCInfo.getNextStackOffset();
bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
// This is a non-standard ABI so by fiat I say we're allowed to make full
// use of the stack area to be popped, which must be aligned to 16 bytes in
// any case:
- StackArgSize = RoundUpToAlignment(StackArgSize, 16);
+ StackArgSize = alignTo(StackArgSize, 16);
// If we're expected to restore the stack (e.g. fastcc) then we'll be adding
// a multiple of 16.
@@ -2563,7 +2641,8 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
}
void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
- SelectionDAG &DAG, SDLoc DL,
+ SelectionDAG &DAG,
+ const SDLoc &DL,
SDValue &Chain) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -2590,8 +2669,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
SDValue Store = DAG.getStore(
Val.getValue(1), DL, Val, FIN,
- MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 8), false,
- false, 0);
+ MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 8));
MemOps.push_back(Store);
FIN =
DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT));
@@ -2620,8 +2698,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SDValue Store = DAG.getStore(
Val.getValue(1), DL, Val, FIN,
- MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 16),
- false, false, 0);
+ MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 16));
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
DAG.getConstant(16, DL, PtrVT));
@@ -2640,8 +2717,8 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
/// appropriate copies out of appropriate physical registers.
SDValue AArch64TargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal) const {
CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS
? RetCC_AArch64_WebKit_JS
@@ -2658,7 +2735,7 @@ SDValue AArch64TargetLowering::LowerCallResult(
// Pass 'this' value directly from the argument to return value, to avoid
// reg unit interference
- if (i == 0 && isThisReturn) {
+ if (i == 0 && isThisReturn && EnableThisRetForwarding) {
assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 &&
"unexpected return calling convention register assignment");
InVals.push_back(ThisVal);
@@ -2688,7 +2765,6 @@ SDValue AArch64TargetLowering::LowerCallResult(
bool AArch64TargetLowering::isEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
- bool isCalleeStructRet, bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
@@ -2698,7 +2774,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
return false;
- const MachineFunction &MF = DAG.getMachineFunction();
+ MachineFunction &MF = DAG.getMachineFunction();
const Function *CallerF = MF.getFunction();
CallingConv::ID CallerCC = CallerF->getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
@@ -2713,9 +2789,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
return false;
if (getTargetMachine().Options.GuaranteedTailCallOpt) {
- if (IsTailCallConvention(CalleeCC) && CCMatch)
- return true;
- return false;
+ return IsTailCallConvention(CalleeCC) && CCMatch;
}
// Externally-defined functions with weak linkage should not be
@@ -2742,6 +2816,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
assert((!isVarArg || CalleeCC == CallingConv::C) &&
"Unexpected variadic calling convention");
+ LLVMContext &C = *DAG.getContext();
if (isVarArg && !Outs.empty()) {
// At least two cases here: if caller is fastcc then we can't have any
// memory arguments (we'd be expected to clean up the stack afterwards). If
@@ -2750,8 +2825,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
// FIXME: for now we take the most conservative of these in both cases:
// disallow all variadic memory operands.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext());
+ CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true));
for (const CCValAssign &ArgLoc : ArgLocs)
@@ -2759,34 +2833,18 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
return false;
}
- // If the calling conventions do not match, then we'd better make sure the
- // results are returned in the same way as what the caller expects.
+ // Check that the call results are passed in the same way.
+ if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
+ CCAssignFnForCall(CalleeCC, isVarArg),
+ CCAssignFnForCall(CallerCC, isVarArg)))
+ return false;
+ // The callee has to preserve all registers the caller needs to preserve.
+ const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
if (!CCMatch) {
- SmallVector<CCValAssign, 16> RVLocs1;
- CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
- *DAG.getContext());
- CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForCall(CalleeCC, isVarArg));
-
- SmallVector<CCValAssign, 16> RVLocs2;
- CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
- *DAG.getContext());
- CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForCall(CallerCC, isVarArg));
-
- if (RVLocs1.size() != RVLocs2.size())
+ const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
+ if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
return false;
- for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
- if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
- return false;
- if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
- return false;
- if (RVLocs1[i].isRegLoc()) {
- if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
- return false;
- } else {
- if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
- return false;
- }
- }
}
// Nothing more to check if the callee is taking no arguments
@@ -2794,16 +2852,22 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
return true;
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext());
+ CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- // If the stack arguments for this call would fit into our own save area then
- // the call can be made tail.
- return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
+ // If the stack arguments for this call do not fit into our own save area then
+ // the call cannot be made tail.
+ if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea())
+ return false;
+
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
+ return false;
+
+ return true;
}
SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
@@ -2845,7 +2909,8 @@ bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
}
bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
- return CallCC == CallingConv::Fast;
+ return CallCC == CallingConv::Fast ||
+ CallCC == CallingConv::PreserveMost;
}
/// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
@@ -2865,7 +2930,6 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
bool IsVarArg = CLI.IsVarArg;
MachineFunction &MF = DAG.getMachineFunction();
- bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
bool IsThisReturn = false;
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
@@ -2875,8 +2939,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
if (IsTailCall) {
// Check if it's really possible to do a tail call.
IsTailCall = isEligibleForTailCallOptimization(
- Callee, CallConv, IsVarArg, IsStructRet,
- MF.getFunction()->hasStructRetAttr(), Outs, OutVals, Ins, DAG);
+ Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
@@ -2959,7 +3022,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Since callee will pop argument stack as a tail call, we must keep the
// popped size 16-byte aligned.
- NumBytes = RoundUpToAlignment(NumBytes, 16);
+ NumBytes = alignTo(NumBytes, 16);
// FPDiff will be negative if this tail call requires more space than we
// would automatically have in our incoming argument space. Positive if we
@@ -3092,8 +3155,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
VA.getValVT() == MVT::i16)
Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
- SDValue Store =
- DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, false, false, 0);
+ SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo);
MemOpChains.push_back(Store);
}
}
@@ -3199,9 +3261,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops);
InFlag = Chain.getValue(1);
- uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt)
- ? RoundUpToAlignment(NumBytes, 16)
- : 0;
+ uint64_t CalleePopBytes =
+ DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0;
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
DAG.getIntPtrConstant(CalleePopBytes, DL, true),
@@ -3232,7 +3293,7 @@ AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
+ const SDLoc &DL, SelectionDAG &DAG) const {
CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS
? RetCC_AArch64_WebKit_JS
: RetCC_AArch64_AAPCS;
@@ -3318,26 +3379,6 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
return DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, GotAddr);
}
- if ((OpFlags & AArch64II::MO_CONSTPOOL) != 0) {
- assert(getTargetMachine().getCodeModel() == CodeModel::Small &&
- "use of MO_CONSTPOOL only supported on small model");
- SDValue Hi = DAG.getTargetConstantPool(GV, PtrVT, 0, 0, AArch64II::MO_PAGE);
- SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
- unsigned char LoFlags = AArch64II::MO_PAGEOFF | AArch64II::MO_NC;
- SDValue Lo = DAG.getTargetConstantPool(GV, PtrVT, 0, 0, LoFlags);
- SDValue PoolAddr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
- SDValue GlobalAddr = DAG.getLoad(
- PtrVT, DL, DAG.getEntryNode(), PoolAddr,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- /*isVolatile=*/false,
- /*isNonTemporal=*/true,
- /*isInvariant=*/true, 8);
- if (GN->getOffset() != 0)
- return DAG.getNode(ISD::ADD, DL, PtrVT, GlobalAddr,
- DAG.getConstant(GN->getOffset(), DL, PtrVT));
- return GlobalAddr;
- }
-
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
const unsigned char MO_NC = AArch64II::MO_NC;
return DAG.getNode(
@@ -3405,8 +3446,9 @@ AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
SDValue Chain = DAG.getEntryNode();
SDValue FuncTLVGet =
DAG.getLoad(MVT::i64, DL, Chain, DescAddr,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()), false,
- true, true, 8);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()),
+ /* Alignment = */ 8, MachineMemOperand::MONonTemporal |
+ MachineMemOperand::MOInvariant);
Chain = FuncTLVGet.getValue(1);
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
@@ -3447,18 +3489,16 @@ AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
/// Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the
/// above sequence, and expanded really late in the compilation flow, to ensure
/// the sequence is produced as per above.
-SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
+SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
+ const SDLoc &DL,
SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Chain = DAG.getEntryNode();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- SmallVector<SDValue, 2> Ops;
- Ops.push_back(Chain);
- Ops.push_back(SymAddr);
-
- Chain = DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, Ops);
+ Chain =
+ DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr});
SDValue Glue = Chain.getValue(1);
return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
@@ -3888,7 +3928,7 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
SDValue RHS, SDValue TVal,
- SDValue FVal, SDLoc dl,
+ SDValue FVal, const SDLoc &dl,
SelectionDAG &DAG) const {
// Handle f128 first, because it will result in a comparison of some RTLIB
// call result against zero.
@@ -4181,7 +4221,7 @@ SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
getPointerTy(DAG.getDataLayout()));
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV));
}
SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
@@ -4201,7 +4241,7 @@ SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
// void *__stack at offset 0
SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT);
MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
- MachinePointerInfo(SV), false, false, 8));
+ MachinePointerInfo(SV), /* Alignment = */ 8));
// void *__gr_top at offset 8
int GPRSize = FuncInfo->getVarArgsGPRSize();
@@ -4216,7 +4256,8 @@ SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
DAG.getConstant(GPRSize, DL, PtrVT));
MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
- MachinePointerInfo(SV, 8), false, false, 8));
+ MachinePointerInfo(SV, 8),
+ /* Alignment = */ 8));
}
// void *__vr_top at offset 16
@@ -4231,24 +4272,23 @@ SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
DAG.getConstant(FPRSize, DL, PtrVT));
MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
- MachinePointerInfo(SV, 16), false, false, 8));
+ MachinePointerInfo(SV, 16),
+ /* Alignment = */ 8));
}
// int __gr_offs at offset 24
SDValue GROffsAddr =
DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(24, DL, PtrVT));
- MemOps.push_back(DAG.getStore(Chain, DL,
- DAG.getConstant(-GPRSize, DL, MVT::i32),
- GROffsAddr, MachinePointerInfo(SV, 24), false,
- false, 4));
+ MemOps.push_back(DAG.getStore(
+ Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32), GROffsAddr,
+ MachinePointerInfo(SV, 24), /* Alignment = */ 4));
// int __vr_offs at offset 28
SDValue VROffsAddr =
DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(28, DL, PtrVT));
- MemOps.push_back(DAG.getStore(Chain, DL,
- DAG.getConstant(-FPRSize, DL, MVT::i32),
- VROffsAddr, MachinePointerInfo(SV, 28), false,
- false, 4));
+ MemOps.push_back(DAG.getStore(
+ Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32), VROffsAddr,
+ MachinePointerInfo(SV, 28), /* Alignment = */ 4));
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
@@ -4287,8 +4327,7 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
unsigned Align = Op.getConstantOperandVal(3);
auto PtrVT = getPointerTy(DAG.getDataLayout());
- SDValue VAList = DAG.getLoad(PtrVT, DL, Chain, Addr, MachinePointerInfo(V),
- false, false, false, 0);
+ SDValue VAList = DAG.getLoad(PtrVT, DL, Chain, Addr, MachinePointerInfo(V));
Chain = VAList.getValue(1);
if (Align > 8) {
@@ -4318,14 +4357,14 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
DAG.getConstant(ArgSize, DL, PtrVT));
// Store the incremented VAList to the legalized pointer
- SDValue APStore = DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V),
- false, false, 0);
+ SDValue APStore =
+ DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V));
// Load the actual argument out of the pointer VAList
if (NeedFPTrunc) {
// Load the value as an f64.
- SDValue WideFP = DAG.getLoad(MVT::f64, DL, APStore, VAList,
- MachinePointerInfo(), false, false, false, 0);
+ SDValue WideFP =
+ DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo());
// Round the value down to an f32.
SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0),
DAG.getIntPtrConstant(1, DL));
@@ -4334,8 +4373,7 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
return DAG.getMergeValues(Ops, DL);
}
- return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo(), false,
- false, false, 0);
+ return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo());
}
SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -4350,7 +4388,7 @@ SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr,
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo());
return FrameAddr;
}
@@ -4381,7 +4419,7 @@ SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout()));
return DAG.getLoad(VT, DL, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo());
}
// Return LR, which contains the return address. Mark it an implicit live-in.
@@ -4521,6 +4559,40 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
// AArch64 Optimization Hooks
//===----------------------------------------------------------------------===//
+/// getEstimate - Return the appropriate estimate DAG for either the reciprocal
+/// or the reciprocal square root.
+static SDValue getEstimate(const AArch64Subtarget &ST,
+ const AArch64TargetLowering::DAGCombinerInfo &DCI, unsigned Opcode,
+ const SDValue &Operand, unsigned &ExtraSteps) {
+ if (!ST.hasNEON())
+ return SDValue();
+
+ EVT VT = Operand.getValueType();
+
+ std::string RecipOp;
+ RecipOp = Opcode == (AArch64ISD::FRECPE) ? "div": "sqrt";
+ RecipOp = ((VT.isVector()) ? "vec-": "") + RecipOp;
+ RecipOp += (VT.getScalarType() == MVT::f64) ? "d": "f";
+
+ TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
+ if (!Recips.isEnabled(RecipOp))
+ return SDValue();
+
+ ExtraSteps = Recips.getRefinementSteps(RecipOp);
+ return DCI.DAG.getNode(Opcode, SDLoc(Operand), VT, Operand);
+}
+
+SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
+ DAGCombinerInfo &DCI, unsigned &ExtraSteps) const {
+ return getEstimate(*Subtarget, DCI, AArch64ISD::FRECPE, Operand, ExtraSteps);
+}
+
+SDValue AArch64TargetLowering::getRsqrtEstimate(SDValue Operand,
+ DAGCombinerInfo &DCI, unsigned &ExtraSteps, bool &UseOneConst) const {
+ UseOneConst = true;
+ return getEstimate(*Subtarget, DCI, AArch64ISD::FRSQRTE, Operand, ExtraSteps);
+}
+
//===----------------------------------------------------------------------===//
// AArch64 Inline Assembly Support
//===----------------------------------------------------------------------===//
@@ -4548,6 +4620,27 @@ bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
// is prefixed by the %w modifier. Floating-point and SIMD register operands
// will be output with the v prefix unless prefixed by the %b, %h, %s, %d or
// %q modifier.
+const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const {
+ // At this point, we have to lower this constraint to something else, so we
+ // lower it to an "r" or "w". However, by doing this we will force the result
+ // to be in register, while the X constraint is much more permissive.
+ //
+ // Although we are correct (we are free to emit anything, without
+ // constraints), we might break use cases that would expect us to be more
+ // efficient and emit something else.
+ if (!Subtarget->hasFPARMv8())
+ return "r";
+
+ if (ConstraintVT.isFloatingPoint())
+ return "w";
+
+ if (ConstraintVT.isVector() &&
+ (ConstraintVT.getSizeInBits() == 64 ||
+ ConstraintVT.getSizeInBits() == 128))
+ return "w";
+
+ return "r";
+}
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
@@ -4642,11 +4735,16 @@ AArch64TargetLowering::getRegForInlineAsmConstraint(
int RegNo;
bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo);
if (!Failed && RegNo >= 0 && RegNo <= 31) {
- // v0 - v31 are aliases of q0 - q31.
+ // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size.
// By default we'll emit v0-v31 for this unless there's a modifier where
// we'll emit the correct register as well.
- Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
- Res.second = &AArch64::FPR128RegClass;
+ if (VT != MVT::Other && VT.getSizeInBits() == 64) {
+ Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
+ Res.second = &AArch64::FPR64RegClass;
+ } else {
+ Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
+ Res.second = &AArch64::FPR128RegClass;
+ }
}
}
}
@@ -4862,11 +4960,12 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
SmallVector<ShuffleSourceInfo, 2> Sources;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
+ if (V.isUndef())
continue;
- else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
+ else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+ !isa<ConstantSDNode>(V.getOperand(1))) {
// A shuffle can only come from building a vector from various
- // elements of other vectors.
+ // elements of other vectors, provided their indices are constant.
return SDValue();
}
@@ -4985,7 +5084,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits();
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
SDValue Entry = Op.getOperand(i);
- if (Entry.getOpcode() == ISD::UNDEF)
+ if (Entry.isUndef())
continue;
auto Src = std::find(Sources.begin(), Sources.end(), Entry.getOperand(0));
@@ -5018,7 +5117,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
ShuffleOps[i] = Sources[i].ShuffleVec;
SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
- ShuffleOps[1], &Mask[0]);
+ ShuffleOps[1], Mask);
return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
}
@@ -5304,7 +5403,7 @@ static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
- SDLoc dl) {
+ const SDLoc &dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
@@ -5433,35 +5532,34 @@ static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2);
SDValue Shuffle;
- if (V2.getNode()->getOpcode() == ISD::UNDEF) {
+ if (V2.getNode()->isUndef()) {
if (IndexLen == 8)
V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
- DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
- makeArrayRef(TBLMask.data(), IndexLen)));
+ DAG.getBuildVector(IndexVT, DL,
+ makeArrayRef(TBLMask.data(), IndexLen)));
} else {
if (IndexLen == 8) {
V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst);
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst,
- DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
- makeArrayRef(TBLMask.data(), IndexLen)));
+ DAG.getBuildVector(IndexVT, DL,
+ makeArrayRef(TBLMask.data(), IndexLen)));
} else {
// FIXME: We cannot, for the moment, emit a TBL2 instruction because we
// cannot currently represent the register constraints on the input
// table registers.
// Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst,
- // DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
- // &TBLMask[0], IndexLen));
+ // DAG.getBuildVector(IndexVT, DL, &TBLMask[0],
+ // IndexLen));
Shuffle = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, IndexVT,
- DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32),
- V1Cst, V2Cst,
- DAG.getNode(ISD::BUILD_VECTOR, DL, IndexVT,
- makeArrayRef(TBLMask.data(), IndexLen)));
+ DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst,
+ V2Cst, DAG.getBuildVector(IndexVT, DL,
+ makeArrayRef(TBLMask.data(), IndexLen)));
}
}
return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle);
@@ -5496,8 +5594,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0],
- V1.getValueType().getSimpleVT())) {
+ if (SVN->isSplat()) {
int Lane = SVN->getSplatIndex();
// If this is undef splat, generate it via "just" vdup, if possible.
if (Lane == -1)
@@ -5546,8 +5643,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
Imm *= getExtFactor(V1);
return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2,
DAG.getConstant(Imm, dl, MVT::i32));
- } else if (V2->getOpcode() == ISD::UNDEF &&
- isSingletonEXTMask(ShuffleMask, VT, Imm)) {
+ } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) {
Imm *= getExtFactor(V1);
return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1,
DAG.getConstant(Imm, dl, MVT::i32));
@@ -5580,8 +5676,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
}
- SDValue Concat = tryFormConcatFromShuffle(Op, DAG);
- if (Concat.getNode())
+ if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG))
return Concat;
bool DstIsLeft;
@@ -5853,8 +5948,7 @@ SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
SelectionDAG &DAG) const {
// Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
if (EnableAArch64SlrGeneration) {
- SDValue Res = tryLowerToSLI(Op.getNode(), DAG);
- if (Res.getNode())
+ if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG))
return Res;
}
@@ -5972,7 +6066,7 @@ static SDValue NormalizeBuildVector(SDValue Op,
}
Ops.push_back(Lane);
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
+ return DAG.getBuildVector(VT, dl, Ops);
}
SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
@@ -6217,7 +6311,7 @@ FailedModImm:
SDValue ConstantValue;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
+ if (V.isUndef())
continue;
if (i > 0)
isOnlyLowElement = false;
@@ -6273,7 +6367,7 @@ FailedModImm:
for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i)));
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts);
- SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
+ SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
Val = LowerBUILD_VECTOR(Val, DAG);
if (Val.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
@@ -6328,7 +6422,7 @@ FailedModImm:
// value is already in an S or D register.
// Do not do this for UNDEF/LOAD nodes because we have better patterns
// for those avoiding the SCALAR_TO_VECTOR/BUILD_VECTOR.
- if (Op0.getOpcode() != ISD::UNDEF && Op0.getOpcode() != ISD::LOAD &&
+ if (!Op0.isUndef() && Op0.getOpcode() != ISD::LOAD &&
(ElemSize == 32 || ElemSize == 64)) {
unsigned SubIdx = ElemSize == 32 ? AArch64::ssub : AArch64::dsub;
MachineSDNode *N =
@@ -6339,7 +6433,7 @@ FailedModImm:
}
for (; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
+ if (V.isUndef())
continue;
SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
@@ -6580,7 +6674,7 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
AArch64CC::CondCode CC, bool NoNans, EVT VT,
- SDLoc dl, SelectionDAG &DAG) {
+ const SDLoc &dl, SelectionDAG &DAG) {
EVT SrcVT = LHS.getValueType();
assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
"function only supposed to emit natural comparisons");
@@ -6877,12 +6971,10 @@ bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
const DataLayout &DL = I->getModule()->getDataLayout();
EVT VT = getValueType(DL, User->getOperand(0)->getType());
- if (isFMAFasterThanFMulAndFAdd(VT) &&
- isOperationLegalOrCustom(ISD::FMA, VT) &&
- (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath))
- return false;
-
- return true;
+ return !(isFMAFasterThanFMulAndFAdd(VT) &&
+ isOperationLegalOrCustom(ISD::FMA, VT) &&
+ (Options.AllowFPOpFusion == FPOpFusion::Fast ||
+ Options.UnsafeFPMath));
}
// All 32-bit GPR operations implicitly zero the high-half of the corresponding
@@ -7183,16 +7275,17 @@ EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
// 12-bit optionally shifted immediates are legal for adds.
bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
- if ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0))
- return true;
- return false;
+ // Avoid UB for INT64_MIN.
+ if (Immed == std::numeric_limits<int64_t>::min())
+ return false;
+ // Same encoding for add/sub, just flip the sign.
+ Immed = std::abs(Immed);
+ return ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
}
// Integer comparisons are implemented with ADDS/SUBS, so the range of valid
// immediates is the same as for an add or a sub.
bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {
- if (Immed < 0)
- Immed *= -1;
return isLegalAddImmediate(Immed);
}
@@ -7244,10 +7337,8 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
// Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2
- if (!AM.Scale || AM.Scale == 1 ||
- (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes))
- return true;
- return false;
+ return !AM.Scale || AM.Scale == 1 ||
+ (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes);
}
int AArch64TargetLowering::getScalingFactorCost(const DataLayout &DL,
@@ -7334,6 +7425,33 @@ bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
return Shift < 3;
}
+/// Turn vector tests of the signbit in the form of:
+/// xor (sra X, elt_size(X)-1), -1
+/// into:
+/// cmge X, X, #0
+static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
+ const AArch64Subtarget *Subtarget) {
+ EVT VT = N->getValueType(0);
+ if (!Subtarget->hasNEON() || !VT.isVector())
+ return SDValue();
+
+ // There must be a shift right algebraic before the xor, and the xor must be a
+ // 'not' operation.
+ SDValue Shift = N->getOperand(0);
+ SDValue Ones = N->getOperand(1);
+ if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() ||
+ !ISD::isBuildVectorAllOnes(Ones.getNode()))
+ return SDValue();
+
+ // The shift should be smearing the sign bit across each vector element.
+ auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
+ EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
+ if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
+ return SDValue();
+
+ return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0));
+}
+
// Generate SUBS and CSEL for integer abs.
static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
@@ -7362,13 +7480,15 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-// performXorCombine - Attempts to handle integer ABS.
static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
+ if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
+ return Cmp;
+
return performIntegerAbsCombine(N, DAG);
}
@@ -7376,6 +7496,10 @@ SDValue
AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
std::vector<SDNode *> *Created) const {
+ AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ if (isIntDivCheap(N->getValueType(0), Attr))
+ return SDValue(N,0); // Lower SDIV as SDIV
+
// fold (sdiv X, pow2)
EVT VT = N->getValueType(0);
if ((VT != MVT::i32 && VT != MVT::i64) ||
@@ -7426,7 +7550,7 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
// gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and
// 64-bit is 5 cycles, so this is always a win.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
- APInt Value = C->getAPIntValue();
+ const APInt &Value = C->getAPIntValue();
EVT VT = N->getValueType(0);
SDLoc DL(N);
if (Value.isNonNegative()) {
@@ -7543,9 +7667,8 @@ static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
!cast<LoadSDNode>(N0)->isVolatile()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
- LN0->getPointerInfo(), LN0->isVolatile(),
- LN0->isNonTemporal(), LN0->isInvariant(),
- LN0->getAlignment());
+ LN0->getPointerInfo(), LN0->getAlignment(),
+ LN0->getMemOperand()->getFlags());
// Make sure successors of the original load stay after it by updating them
// to use the new Chain.
@@ -7567,7 +7690,8 @@ static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
SDValue Op = N->getOperand(0);
- if (!Op.getValueType().isVector() || Op.getOpcode() != ISD::FMUL)
+ if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
+ Op.getOpcode() != ISD::FMUL)
return SDValue();
SDValue ConstVec = Op->getOperand(1);
@@ -7801,25 +7925,49 @@ static SDValue tryCombineToBSL(SDNode *N,
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
// Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N))
- if (!EnableAArch64ExtrGeneration)
- return SDValue();
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
- SDValue Res = tryCombineToEXTR(N, DCI);
- if (Res.getNode())
+ if (SDValue Res = tryCombineToEXTR(N, DCI))
return Res;
- Res = tryCombineToBSL(N, DCI);
- if (Res.getNode())
+ if (SDValue Res = tryCombineToBSL(N, DCI))
return Res;
return SDValue();
}
+static SDValue performSRLCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return SDValue();
+
+ // Canonicalize (srl (bswap i32 x), 16) to (rotr (bswap i32 x), 16), if the
+ // high 16-bits of x are zero. Similarly, canonicalize (srl (bswap i64 x), 32)
+ // to (rotr (bswap i64 x), 32), if the high 32-bits of x are zero.
+ SDValue N0 = N->getOperand(0);
+ if (N0.getOpcode() == ISD::BSWAP) {
+ SDLoc DL(N);
+ SDValue N1 = N->getOperand(1);
+ SDValue N00 = N0.getOperand(0);
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
+ uint64_t ShiftAmt = C->getZExtValue();
+ if (VT == MVT::i32 && ShiftAmt == 16 &&
+ DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(32, 16)))
+ return DAG.getNode(ISD::ROTR, DL, VT, N0, N1);
+ if (VT == MVT::i64 && ShiftAmt == 32 &&
+ DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(64, 32)))
+ return DAG.getNode(ISD::ROTR, DL, VT, N0, N1);
+ }
+ }
+ return SDValue();
+}
+
static SDValue performBitcastCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
@@ -8575,15 +8723,15 @@ static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode *St) {
SDValue BasePtr = St->getBasePtr();
SDValue NewST1 =
DAG.getStore(St->getChain(), DL, SplatVal, BasePtr, St->getPointerInfo(),
- St->isVolatile(), St->isNonTemporal(), St->getAlignment());
+ St->getAlignment(), St->getMemOperand()->getFlags());
unsigned Offset = EltOffset;
while (--NumVecElts) {
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
DAG.getConstant(Offset, DL, MVT::i64));
NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), Alignment);
+ St->getPointerInfo(), Alignment,
+ St->getMemOperand()->getFlags());
Offset += EltOffset;
}
return NewST1;
@@ -8603,9 +8751,7 @@ static SDValue split16BStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
// be included in TLI.allowsMisalignedMemoryAccesses(), and there should be
// a call to that function here.
- // Cyclone has bad performance on unaligned 16B stores when crossing line and
- // page boundaries. We want to split such stores.
- if (!Subtarget->isCyclone())
+ if (!Subtarget->isMisaligned128StoreSlow())
return SDValue();
// Don't split at -Oz.
@@ -8647,12 +8793,12 @@ static SDValue split16BStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SDValue BasePtr = S->getBasePtr();
SDValue NewST1 =
DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(),
- S->isVolatile(), S->isNonTemporal(), S->getAlignment());
+ S->getAlignment(), S->getMemOperand()->getFlags());
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
DAG.getConstant(8, DL, MVT::i64));
return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr,
- S->getPointerInfo(), S->isVolatile(), S->isNonTemporal(),
- S->getAlignment());
+ S->getPointerInfo(), S->getAlignment(),
+ S->getMemOperand()->getFlags());
}
/// Target-specific DAG combine function for post-increment LD1 (lane) and
@@ -8741,9 +8887,10 @@ static SDValue performPostLD1Combine(SDNode *N,
LoadSDN->getMemOperand());
// Update the uses.
- SmallVector<SDValue, 2> NewResults;
- NewResults.push_back(SDValue(LD, 0)); // The result of load
- NewResults.push_back(SDValue(UpdN.getNode(), 2)); // Chain
+ SDValue NewResults[] = {
+ SDValue(LD, 0), // The result of load
+ SDValue(UpdN.getNode(), 2) // Chain
+ };
DCI.CombineTo(LD, NewResults);
DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result
DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register
@@ -8774,8 +8921,7 @@ static SDValue performSTORECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) {
- SDValue Split = split16BStores(N, DCI, DAG, Subtarget);
- if (Split.getNode())
+ if (SDValue Split = split16BStores(N, DCI, DAG, Subtarget))
return Split;
if (Subtarget->supportsAddressTopByteIgnored() &&
@@ -9215,10 +9361,8 @@ bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {
}
case ISD::Constant:
case ISD::TargetConstant: {
- if (std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
- 1LL << (width - 1))
- return true;
- return false;
+ return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) <
+ 1LL << (width - 1);
}
}
@@ -9286,14 +9430,13 @@ bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {
// isEquivalentMaskless() is the code for testing if the AND can be removed
// factored out of the DAG recognition as the DAG can take several forms.
-static
-bool isEquivalentMaskless(unsigned CC, unsigned width,
- ISD::LoadExtType ExtType, signed AddConstant,
- signed CompConstant) {
+static bool isEquivalentMaskless(unsigned CC, unsigned width,
+ ISD::LoadExtType ExtType, int AddConstant,
+ int CompConstant) {
// By being careful about our equations and only writing the in term
// symbolic values and well known constants (0, 1, -1, MaxUInt) we can
// make them generally applicable to all bit widths.
- signed MaxUInt = (1 << width);
+ int MaxUInt = (1 << width);
// For the purposes of these comparisons sign extending the type is
// equivalent to zero extending the add and displacing it by half the integer
@@ -9441,8 +9584,7 @@ SDValue performCONDCombine(SDNode *N,
static SDValue performBRCONDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
- SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3);
- if (NV.getNode())
+ if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3))
N = NV.getNode();
SDValue Chain = N->getOperand(0);
SDValue Dest = N->getOperand(1);
@@ -9678,7 +9820,7 @@ static SDValue performSelectCombine(SDNode *N,
// Now duplicate the comparison mask we want across all other lanes.
SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0);
- SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask.data());
+ SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask);
Mask = DAG.getNode(ISD::BITCAST, DL,
ResVT.changeVectorElementTypeToInteger(), Mask);
@@ -9716,6 +9858,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performFDivCombine(N, DAG, Subtarget);
case ISD::OR:
return performORCombine(N, DCI, Subtarget);
+ case ISD::SRL:
+ return performSRLCombine(N, DCI);
case ISD::INTRINSIC_WO_CHAIN:
return performIntrinsicCombine(N, DCI, Subtarget);
case ISD::ANY_EXTEND:
@@ -9829,10 +9973,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
// return instructions to help enable tail call optimizations for this
// instruction.
bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
- if (!CI->isTailCall())
- return false;
-
- return true;
+ return CI->isTailCall();
}
bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
@@ -9935,6 +10076,31 @@ static void ReplaceReductionResults(SDNode *N,
Results.push_back(SplitVal);
}
+static void ReplaceCMP_SWAP_128Results(SDNode *N,
+ SmallVectorImpl<SDValue> & Results,
+ SelectionDAG &DAG) {
+ assert(N->getValueType(0) == MVT::i128 &&
+ "AtomicCmpSwap on types less than 128 should be legal");
+ SDValue Ops[] = {N->getOperand(1),
+ N->getOperand(2)->getOperand(0),
+ N->getOperand(2)->getOperand(1),
+ N->getOperand(3)->getOperand(0),
+ N->getOperand(3)->getOperand(1),
+ N->getOperand(0)};
+ SDNode *CmpSwap = DAG.getMachineNode(
+ AArch64::CMP_SWAP_128, SDLoc(N),
+ DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other), Ops);
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1);
+ MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
+
+ Results.push_back(SDValue(CmpSwap, 0));
+ Results.push_back(SDValue(CmpSwap, 1));
+ Results.push_back(SDValue(CmpSwap, 3));
+}
+
void AArch64TargetLowering::ReplaceNodeResults(
SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
switch (N->getOpcode()) {
@@ -9966,11 +10132,16 @@ void AArch64TargetLowering::ReplaceNodeResults(
assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion");
// Let normal code take care of it by not adding anything to Results.
return;
+ case ISD::ATOMIC_CMP_SWAP:
+ ReplaceCMP_SWAP_128Results(N, Results, DAG);
+ return;
}
}
bool AArch64TargetLowering::useLoadStackGuardNode() const {
- return true;
+ if (!Subtarget->isTargetAndroid())
+ return true;
+ return TargetLowering::useLoadStackGuardNode();
}
unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
@@ -10017,14 +10188,19 @@ AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
bool AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
AtomicCmpXchgInst *AI) const {
- return true;
+ // At -O0, fast-regalloc cannot cope with the live vregs necessary to
+ // implement cmpxchg without spilling. If the address being exchanged is also
+ // on the stack and close enough to the spill slot, this can lead to a
+ // situation where the monitor always gets cleared and the atomic operation
+ // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
+ return getTargetMachine().getOptLevel() != 0;
}
Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
- bool IsAcquire = isAtLeastAcquire(Ord);
+ bool IsAcquire = isAcquireOrStronger(Ord);
// Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
// intrinsic must return {i64, i64} and we have to recombine them into a
@@ -10066,7 +10242,7 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder,
Value *Val, Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- bool IsRelease = isAtLeastRelease(Ord);
+ bool IsRelease = isReleaseOrStronger(Ord);
// Since the intrinsics must have legal type, the i128 intrinsics take two
// parameters: "i64, i64". We must marshal Val into the appropriate form
@@ -10104,6 +10280,22 @@ bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
return false;
}
+Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
+ if (!Subtarget->isTargetAndroid())
+ return TargetLowering::getIRStackGuard(IRB);
+
+ // Android provides a fixed TLS slot for the stack cookie. See the definition
+ // of TLS_SLOT_STACK_GUARD in
+ // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
+ const unsigned TlsOffset = 0x28;
+ Module *M = IRB.GetInsertBlock()->getParent()->getParent();
+ Function *ThreadPointerFunc =
+ Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
+ return IRB.CreatePointerCast(
+ IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), TlsOffset),
+ Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));
+}
+
Value *AArch64TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
if (!Subtarget->isTargetAndroid())
return TargetLowering::getSafeStackPointerLocation(IRB);
@@ -10114,7 +10306,7 @@ Value *AArch64TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) cons
const unsigned TlsOffset = 0x48;
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_thread_pointer);
+ Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreatePointerCast(
IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), TlsOffset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));
@@ -10166,3 +10358,16 @@ void AArch64TargetLowering::insertCopiesSplitCSR(
.addReg(NewVR);
}
}
+
+bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
+ // Integer division on AArch64 is expensive. However, when aggressively
+ // optimizing for code size, we prefer to use a div instruction, as it is
+ // usually smaller than the alternative sequence.
+ // The exception to this is vector division. Since AArch64 doesn't have vector
+ // integer division, leaving the division as-is is a loss even in terms of
+ // size, because it will have to be scalarized, while the alternative code
+ // sequence can be performed in vector form.
+ bool OptSize =
+ Attr.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ return OptSize && !VT.isVector();
+}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index e99616c94068..c87cfed1f892 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -187,6 +187,10 @@ enum NodeType : unsigned {
SMULL,
UMULL,
+ // Reciprocal estimates.
+ FRECPE,
+ FRSQRTE,
+
// NEON Load/Store with post-increment base updates
LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
LD3post,
@@ -272,11 +276,11 @@ public:
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
- MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
+ MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
+ EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
@@ -358,6 +362,10 @@ public:
TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(EVT VT) const override;
+ /// If the target has a standard location for the stack protector cookie,
+ /// returns the address of that location. Otherwise, returns nullptr.
+ Value *getIRStackGuard(IRBuilder<> &IRB) const override;
+
/// If the target has a standard location for the unsafe stack pointer,
/// returns the address of that location. Otherwise, returns nullptr.
Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
@@ -378,6 +386,8 @@ public:
return AArch64::X1;
}
+ bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
+
bool isCheapToSpeculateCttz() const override {
return true;
}
@@ -385,6 +395,12 @@ public:
bool isCheapToSpeculateCtlz() const override {
return true;
}
+
+ bool hasBitPreservingFPLogic(EVT VT) const override {
+ // FIXME: Is this always true? It should be true for vectors at least.
+ return VT == MVT::f32 || VT == MVT::f64;
+ }
+
bool supportSplitCSR(MachineFunction *MF) const override {
return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
@@ -394,6 +410,10 @@ public:
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
+ bool supportSwiftError() const override {
+ return true;
+ }
+
private:
bool isExtFreeImpl(const Instruction *Ext) const override;
@@ -401,30 +421,30 @@ private:
/// make the right decision when generating code for different targets.
const AArch64Subtarget *Subtarget;
- void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
+ void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
void addDRTypeForNEON(MVT VT);
void addQRTypeForNEON(MVT VT);
- SDValue
- LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(CallLoweringInfo & /*CLI*/,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- bool isThisReturn, SDValue ThisVal) const;
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
+ SDValue ThisVal) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
bool isEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
- bool isCalleeStructRet, bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
@@ -439,7 +459,7 @@ private:
bool IsTailCallConvention(CallingConv::ID CallCC) const;
- void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
+ void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
SDValue &Chain) const;
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
@@ -449,21 +469,21 @@ private:
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const override;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
+ SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
- SDValue TVal, SDValue FVal, SDLoc dl,
+ SDValue TVal, SDValue FVal, const SDLoc &dl,
SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
@@ -500,6 +520,11 @@ private:
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
std::vector<SDNode *> *Created) const override;
+ SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps,
+ bool &UseOneConstNR) const override;
+ SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps) const override;
unsigned combineRepeatedFPDivisors() const override;
ConstraintType getConstraintType(StringRef Constraint) const override;
@@ -515,6 +540,9 @@ private:
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
+
+ const char *LowerXConstraint(EVT ConstraintVT) const override;
+
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const override;
diff --git a/lib/Target/AArch64/AArch64InstrAtomics.td b/lib/Target/AArch64/AArch64InstrAtomics.td
index 4923a1161dfc..59de62ad2877 100644
--- a/lib/Target/AArch64/AArch64InstrAtomics.td
+++ b/lib/Target/AArch64/AArch64InstrAtomics.td
@@ -29,7 +29,7 @@ def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>;
class acquiring_load<PatFrag base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return isAtLeastAcquire(Ordering);
+ return isAcquireOrStronger(Ordering);
}]>;
// An atomic load operation that does not need either acquire or release
@@ -37,7 +37,7 @@ class acquiring_load<PatFrag base>
class relaxed_load<PatFrag base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return !isAtLeastAcquire(Ordering);
+ return !isAcquireOrStronger(Ordering);
}]>;
// 8-bit loads
@@ -112,15 +112,16 @@ def : Pat<(relaxed_load<atomic_load_64>
class releasing_store<PatFrag base>
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- assert(Ordering != AcquireRelease && "unexpected store ordering");
- return isAtLeastRelease(Ordering);
+ assert(Ordering != AtomicOrdering::AcquireRelease &&
+ "unexpected store ordering");
+ return isReleaseOrStronger(Ordering);
}]>;
// An atomic store operation that doesn't actually need to be atomic on AArch64.
class relaxed_store<PatFrag base>
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return !isAtLeastRelease(Ordering);
+ return !isReleaseOrStronger(Ordering);
}]>;
// 8-bit stores
@@ -361,3 +362,43 @@ def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
// And clear exclusive.
def : Pat<(int_aarch64_clrex), (CLREX 0xf)>;
+
+//===----------------------------------
+// Atomic cmpxchg for -O0
+//===----------------------------------
+
+// The fast register allocator used during -O0 inserts spills to cover any VRegs
+// live across basic block boundaries. When this happens between an LDXR and an
+// STXR it can clear the exclusive monitor, causing all cmpxchg attempts to
+// fail.
+
+// Unfortunately, this means we have to have an alternative (expanded
+// post-regalloc) path for -O0 compilations. Fortunately this path can be
+// significantly more naive than the standard expansion: we conservatively
+// assume seq_cst, strong cmpxchg and omit clrex on failure.
+
+let Constraints = "@earlyclobber $Rd,@earlyclobber $status",
+ mayLoad = 1, mayStore = 1 in {
+def CMP_SWAP_8 : Pseudo<(outs GPR32:$Rd, GPR32:$status),
+ (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
+ Sched<[WriteAtomic]>;
+
+def CMP_SWAP_16 : Pseudo<(outs GPR32:$Rd, GPR32:$status),
+ (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
+ Sched<[WriteAtomic]>;
+
+def CMP_SWAP_32 : Pseudo<(outs GPR32:$Rd, GPR32:$status),
+ (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
+ Sched<[WriteAtomic]>;
+
+def CMP_SWAP_64 : Pseudo<(outs GPR64:$Rd, GPR32:$status),
+ (ins GPR64:$addr, GPR64:$desired, GPR64:$new), []>,
+ Sched<[WriteAtomic]>;
+}
+
+let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi,@earlyclobber $status",
+ mayLoad = 1, mayStore = 1 in
+def CMP_SWAP_128 : Pseudo<(outs GPR64:$RdLo, GPR64:$RdHi, GPR32:$status),
+ (ins GPR64:$addr, GPR64:$desiredLo, GPR64:$desiredHi,
+ GPR64:$newLo, GPR64:$newHi), []>,
+ Sched<[WriteAtomic]>;
diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td
index 6ac2175e5035..34d35e961210 100644
--- a/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/lib/Target/AArch64/AArch64InstrFormats.td
@@ -496,7 +496,7 @@ def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
return ((uint32_t)Imm) < 65536;
}]> {
let ParserMatchClass = Imm0_65535Operand;
- let PrintMethod = "printHexImm";
+ let PrintMethod = "printImmHex";
}
// imm0_255 predicate - True if the immediate is in the range [0,255].
@@ -505,7 +505,7 @@ def imm0_255 : Operand<i32>, ImmLeaf<i32, [{
return ((uint32_t)Imm) < 256;
}]> {
let ParserMatchClass = Imm0_255Operand;
- let PrintMethod = "printHexImm";
+ let PrintMethod = "printImm";
}
// imm0_127 predicate - True if the immediate is in the range [0,127]
@@ -514,7 +514,7 @@ def imm0_127 : Operand<i32>, ImmLeaf<i32, [{
return ((uint32_t)Imm) < 128;
}]> {
let ParserMatchClass = Imm0_127Operand;
- let PrintMethod = "printHexImm";
+ let PrintMethod = "printImm";
}
// NOTE: These imm0_N operands have to be of type i64 because i64 is the size
@@ -923,10 +923,7 @@ def psbhint_op : Operand<i32> {
// "psb" is an alias to "hint" only for certain values of CRm:Op2 fields.
if (!MCOp.isImm())
return false;
- bool ValidNamed;
- (void)AArch64PSBHint::PSBHintMapper().toString(MCOp.getImm(),
- STI.getFeatureBits(), ValidNamed);
- return ValidNamed;
+ return AArch64PSBHint::lookupPSBByEncoding(MCOp.getImm()) != nullptr;
}];
}
@@ -1549,7 +1546,7 @@ class ADRI<bit page, string asm, Operand adr, list<dag> pattern>
def movimm32_imm : Operand<i32> {
let ParserMatchClass = Imm0_65535Operand;
let EncoderMethod = "getMoveWideImmOpValue";
- let PrintMethod = "printHexImm";
+ let PrintMethod = "printImm";
}
def movimm32_shift : Operand<i32> {
let PrintMethod = "printShifter";
@@ -9377,7 +9374,8 @@ class BaseCASEncoding<dag oops, dag iops, string asm, string operands,
class BaseCAS<string order, string size, RegisterClass RC>
: BaseCASEncoding<(outs RC:$out),(ins RC:$Rs, RC:$Rt, GPR64sp:$Rn),
"cas" # order # size, "\t$Rs, $Rt, [$Rn]",
- "$out = $Rs",[]> {
+ "$out = $Rs",[]>,
+ Sched<[WriteAtomic]> {
let NP = 1;
}
@@ -9391,7 +9389,8 @@ multiclass CompareAndSwap<bits<1> Acq, bits<1> Rel, string order> {
class BaseCASP<string order, string size, RegisterOperand RC>
: BaseCASEncoding<(outs RC:$out),(ins RC:$Rs, RC:$Rt, GPR64sp:$Rn),
"casp" # order # size, "\t$Rs, $Rt, [$Rn]",
- "$out = $Rs",[]> {
+ "$out = $Rs",[]>,
+ Sched<[WriteAtomic]> {
let NP = 0;
}
@@ -9405,7 +9404,8 @@ multiclass CompareAndSwapPair<bits<1> Acq, bits<1> Rel, string order> {
let Predicates = [HasV8_1a] in
class BaseSWP<string order, string size, RegisterClass RC>
: I<(outs RC:$Rt),(ins RC:$Rs, GPR64sp:$Rn), "swp" # order # size,
- "\t$Rs, $Rt, [$Rn]","",[]> {
+ "\t$Rs, $Rt, [$Rn]","",[]>,
+ Sched<[WriteAtomic]> {
bits<2> Sz;
bit Acq;
bit Rel;
@@ -9436,7 +9436,8 @@ multiclass Swap<bits<1> Acq, bits<1> Rel, string order> {
let Predicates = [HasV8_1a], mayLoad = 1, mayStore = 1, hasSideEffects = 1 in
class BaseLDOPregister<string op, string order, string size, RegisterClass RC>
: I<(outs RC:$Rt),(ins RC:$Rs, GPR64sp:$Rn), "ld" # op # order # size,
- "\t$Rs, $Rt, [$Rn]","",[]> {
+ "\t$Rs, $Rt, [$Rn]","",[]>,
+ Sched<[WriteAtomic]> {
bits<2> Sz;
bit Acq;
bit Rel;
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index f398117de953..0aa4708f35ac 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -22,27 +22,31 @@
#include "llvm/MC/MCInst.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+#include <algorithm>
using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "AArch64GenInstrInfo.inc"
+static LLVM_CONSTEXPR MachineMemOperand::Flags MOSuppressPair =
+ MachineMemOperand::MOTargetFlag1;
+
AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
: AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
RI(STI.getTargetTriple()), Subtarget(STI) {}
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
-unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- const MachineBasicBlock &MBB = *MI->getParent();
+unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr &MI) const {
+ const MachineBasicBlock &MBB = *MI.getParent();
const MachineFunction *MF = MBB.getParent();
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
- if (MI->getOpcode() == AArch64::INLINEASM)
- return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
+ if (MI.getOpcode() == AArch64::INLINEASM)
+ return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
- const MCInstrDesc &Desc = MI->getDesc();
+ const MCInstrDesc &Desc = MI.getDesc();
switch (Desc.getOpcode()) {
default:
// Anything not explicitly designated otherwise is a nomal 4-byte insn.
@@ -89,25 +93,25 @@ static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
}
// Branch analysis.
-bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
+bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I == MBB.end())
return false;
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
return false;
// Get the last instruction in the block.
- MachineInstr *LastInst = I;
+ MachineInstr *LastInst = &*I;
// If there is only one terminator instruction, process it.
unsigned LastOpc = LastInst->getOpcode();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
if (isUncondBranchOpcode(LastOpc)) {
TBB = LastInst->getOperand(0).getMBB();
return false;
@@ -121,7 +125,7 @@ bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// Get the instruction before it if it is a terminator.
- MachineInstr *SecondLastInst = I;
+ MachineInstr *SecondLastInst = &*I;
unsigned SecondLastOpc = SecondLastInst->getOpcode();
// If AllowModify is true and the block ends with two or more unconditional
@@ -131,19 +135,19 @@ bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
LastInst->eraseFromParent();
LastInst = SecondLastInst;
LastOpc = LastInst->getOpcode();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
// Return now the only terminator is an unconditional branch.
TBB = LastInst->getOperand(0).getMBB();
return false;
} else {
- SecondLastInst = I;
+ SecondLastInst = &*I;
SecondLastOpc = SecondLastInst->getOpcode();
}
}
}
// If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
+ if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
return true;
// If the block ends with a B and a Bcc, handle it.
@@ -243,7 +247,7 @@ unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
}
void AArch64InstrInfo::instantiateCondBranch(
- MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
+ MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
ArrayRef<MachineOperand> Cond) const {
if (Cond[0].getImm() != -1) {
// Regular Bcc
@@ -259,9 +263,11 @@ void AArch64InstrInfo::instantiateCondBranch(
}
}
-unsigned AArch64InstrInfo::InsertBranch(
- MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
+unsigned AArch64InstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@@ -399,8 +405,8 @@ bool AArch64InstrInfo::canInsertSelect(
}
void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DstReg,
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
@@ -533,8 +539,8 @@ void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
}
/// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
-static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
- uint64_t Imm = MI->getOperand(1).getImm();
+static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) {
+ uint64_t Imm = MI.getOperand(1).getImm();
uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
uint64_t Encoding;
return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
@@ -542,11 +548,13 @@ static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
// FIXME: this implementation should be micro-architecture dependent, so a
// micro-architecture target hook should be introduced here in future.
-bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
- if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
- return MI->isAsCheapAsAMove();
+bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
+ if (!Subtarget.hasCustomCheapAsMoveHandling())
+ return MI.isAsCheapAsAMove();
+
+ unsigned Imm;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
return false;
@@ -555,7 +563,17 @@ bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
case AArch64::ADDXri:
case AArch64::SUBWri:
case AArch64::SUBXri:
- return (MI->getOperand(3).getImm() == 0);
+ return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 ||
+ MI.getOperand(3).getImm() == 0);
+
+ // add/sub on register with shift
+ case AArch64::ADDWrs:
+ case AArch64::ADDXrs:
+ case AArch64::SUBWrs:
+ case AArch64::SUBXrs:
+ Imm = MI.getOperand(3).getImm();
+ return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
+ AArch64_AM::getArithShiftValue(Imm) < 4);
// logical ops on immediate
case AArch64::ANDWri:
@@ -580,12 +598,41 @@ bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
case AArch64::ORRWrr:
case AArch64::ORRXrr:
return true;
+
+ // logical ops on register with shift
+ case AArch64::ANDWrs:
+ case AArch64::ANDXrs:
+ case AArch64::BICWrs:
+ case AArch64::BICXrs:
+ case AArch64::EONWrs:
+ case AArch64::EONXrs:
+ case AArch64::EORWrs:
+ case AArch64::EORXrs:
+ case AArch64::ORNWrs:
+ case AArch64::ORNXrs:
+ case AArch64::ORRWrs:
+ case AArch64::ORRXrs:
+ Imm = MI.getOperand(3).getImm();
+ return (Subtarget.getProcFamily() == AArch64Subtarget::ExynosM1 &&
+ AArch64_AM::getShiftValue(Imm) < 4 &&
+ AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL);
+
// If MOVi32imm or MOVi64imm can be expanded into ORRWri or
// ORRXri, it is as cheap as MOV
case AArch64::MOVi32imm:
return canBeExpandedToORR(MI, 32);
case AArch64::MOVi64imm:
return canBeExpandedToORR(MI, 64);
+
+ // It is cheap to zero out registers if the subtarget has ZeroCycleZeroing
+ // feature.
+ case AArch64::FMOVS0:
+ case AArch64::FMOVD0:
+ return Subtarget.hasZeroCycleZeroing();
+ case TargetOpcode::COPY:
+ return (Subtarget.hasZeroCycleZeroing() &&
+ (MI.getOperand(1).getReg() == AArch64::WZR ||
+ MI.getOperand(1).getReg() == AArch64::XZR));
}
llvm_unreachable("Unknown opcode to check as cheap as a move!");
@@ -611,20 +658,18 @@ bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
}
}
-bool
-AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
- MachineInstr *MIb,
- AliasAnalysis *AA) const {
+bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
+ MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
const TargetRegisterInfo *TRI = &getRegisterInfo();
unsigned BaseRegA = 0, BaseRegB = 0;
- int OffsetA = 0, OffsetB = 0;
- int WidthA = 0, WidthB = 0;
+ int64_t OffsetA = 0, OffsetB = 0;
+ unsigned WidthA = 0, WidthB = 0;
- assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
- assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
+ assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
+ assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
- if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
- MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
+ if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
+ MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
return false;
// Retrieve the base register, offset from the base register and width. Width
@@ -648,10 +693,10 @@ AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
/// Return true if the comparison instruction can be analyzed.
-bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
break;
case AArch64::SUBSWrr:
@@ -667,8 +712,8 @@ bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
case AArch64::ADDSXrs:
case AArch64::ADDSXrx:
// Replace SUBSWrr with SUBWrr if NZCV is not used.
- SrcReg = MI->getOperand(1).getReg();
- SrcReg2 = MI->getOperand(2).getReg();
+ SrcReg = MI.getOperand(1).getReg();
+ SrcReg2 = MI.getOperand(2).getReg();
CmpMask = ~0;
CmpValue = 0;
return true;
@@ -676,17 +721,17 @@ bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
case AArch64::ADDSWri:
case AArch64::SUBSXri:
case AArch64::ADDSXri:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
// FIXME: In order to convert CmpValue to 0 or 1
- CmpValue = (MI->getOperand(2).getImm() != 0);
+ CmpValue = MI.getOperand(2).getImm() != 0;
return true;
case AArch64::ANDSWri:
case AArch64::ANDSXri:
// ANDS does not use the same encoding scheme as the others xxxS
// instructions.
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
// FIXME:The return val type of decodeLogicalImmediate is uint64_t,
@@ -694,17 +739,17 @@ bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
// the high 32 bits of uint64_t will be lost.
// In fact it causes a bug in spec2006-483.xalancbmk
// CmpValue is only used to compare with zero in OptimizeCompareInstr
- CmpValue = (AArch64_AM::decodeLogicalImmediate(
- MI->getOperand(2).getImm(),
- MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
+ CmpValue = AArch64_AM::decodeLogicalImmediate(
+ MI.getOperand(2).getImm(),
+ MI.getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
return true;
}
return false;
}
-static bool UpdateOperandRegClass(MachineInstr *Instr) {
- MachineBasicBlock *MBB = Instr->getParent();
+static bool UpdateOperandRegClass(MachineInstr &Instr) {
+ MachineBasicBlock *MBB = Instr.getParent();
assert(MBB && "Can't get MachineBasicBlock here");
MachineFunction *MF = MBB->getParent();
assert(MF && "Can't get MachineFunction here");
@@ -712,11 +757,11 @@ static bool UpdateOperandRegClass(MachineInstr *Instr) {
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
MachineRegisterInfo *MRI = &MF->getRegInfo();
- for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
+ for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx;
++OpIdx) {
- MachineOperand &MO = Instr->getOperand(OpIdx);
+ MachineOperand &MO = Instr.getOperand(OpIdx);
const TargetRegisterClass *OpRegCstraints =
- Instr->getRegClassConstraint(OpIdx, TII, TRI);
+ Instr.getRegClassConstraint(OpIdx, TII, TRI);
// If there's no constraint, there's nothing to do.
if (!OpRegCstraints)
@@ -744,16 +789,16 @@ static bool UpdateOperandRegClass(MachineInstr *Instr) {
/// \brief Return the opcode that does not set flags when possible - otherwise
/// return the original opcode. The caller is responsible to do the actual
/// substitution and legality checking.
-static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
+static unsigned convertFlagSettingOpcode(const MachineInstr &MI) {
// Don't convert all compare instructions, because for some the zero register
// encoding becomes the sp register.
bool MIDefinesZeroReg = false;
- if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
+ if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR))
MIDefinesZeroReg = true;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
- return MI->getOpcode();
+ return MI.getOpcode();
case AArch64::ADDSWrr:
return AArch64::ADDWrr;
case AArch64::ADDSWri:
@@ -789,60 +834,76 @@ static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
}
}
-/// True when condition code could be modified on the instruction
-/// trace starting at from and ending at to.
-static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
- const bool CheckOnlyCCWrites,
- const TargetRegisterInfo *TRI) {
- // We iterate backward starting \p To until we hit \p From
- MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
+enum AccessKind {
+ AK_Write = 0x01,
+ AK_Read = 0x10,
+ AK_All = 0x11
+};
+/// True when condition flags are accessed (either by writing or reading)
+/// on the instruction trace starting at From and ending at To.
+///
+/// Note: If From and To are from different blocks it's assumed CC are accessed
+/// on the path.
+static bool areCFlagsAccessedBetweenInstrs(
+ MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
+ const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {
// Early exit if To is at the beginning of the BB.
- if (I == B)
+ if (To == To->getParent()->begin())
return true;
- // Check whether the definition of SrcReg is in the same basic block as
- // Compare. If not, assume the condition code gets modified on some path.
+ // Check whether the instructions are in the same basic block
+ // If not, assume the condition flags might get modified somewhere.
if (To->getParent() != From->getParent())
return true;
- // Check that NZCV isn't set on the trace.
- for (--I; I != E; --I) {
- const MachineInstr &Instr = *I;
+ // From must be above To.
+ assert(std::find_if(MachineBasicBlock::reverse_iterator(To),
+ To->getParent()->rend(), [From](MachineInstr &MI) {
+ return MachineBasicBlock::iterator(MI) == From;
+ }) != To->getParent()->rend());
- if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
- (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
- // This instruction modifies or uses NZCV after the one we want to
- // change.
- return true;
- if (I == B)
- // We currently don't allow the instruction trace to cross basic
- // block boundaries
+ // We iterate backward starting \p To until we hit \p From.
+ for (--To; To != From; --To) {
+ const MachineInstr &Instr = *To;
+
+ if ( ((AccessToCheck & AK_Write) && Instr.modifiesRegister(AArch64::NZCV, TRI)) ||
+ ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI)))
return true;
}
return false;
}
-/// optimizeCompareInstr - Convert the instruction supplying the argument to the
-/// comparison into one that sets the zero bit in the flags register.
+
+/// Try to optimize a compare instruction. A compare instruction is an
+/// instruction which produces AArch64::NZCV. It can be truly compare instruction
+/// when there are no uses of its destination register.
+///
+/// The following steps are tried in order:
+/// 1. Convert CmpInstr into an unconditional version.
+/// 2. Remove CmpInstr if above there is an instruction producing a needed
+/// condition code or an instruction which can be converted into such an instruction.
+/// Only comparison with zero is supported.
bool AArch64InstrInfo::optimizeCompareInstr(
- MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
+ MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
int CmpValue, const MachineRegisterInfo *MRI) const {
+ assert(CmpInstr.getParent());
+ assert(MRI);
// Replace SUBSWrr with SUBWrr if NZCV is not used.
- int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
- if (Cmp_NZCV != -1) {
- if (CmpInstr->definesRegister(AArch64::WZR) ||
- CmpInstr->definesRegister(AArch64::XZR)) {
- CmpInstr->eraseFromParent();
+ int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true);
+ if (DeadNZCVIdx != -1) {
+ if (CmpInstr.definesRegister(AArch64::WZR) ||
+ CmpInstr.definesRegister(AArch64::XZR)) {
+ CmpInstr.eraseFromParent();
return true;
}
- unsigned Opc = CmpInstr->getOpcode();
+ unsigned Opc = CmpInstr.getOpcode();
unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
if (NewOpc == Opc)
return false;
const MCInstrDesc &MCID = get(NewOpc);
- CmpInstr->setDesc(MCID);
- CmpInstr->RemoveOperand(Cmp_NZCV);
+ CmpInstr.setDesc(MCID);
+ CmpInstr.RemoveOperand(DeadNZCVIdx);
bool succeeded = UpdateOperandRegClass(CmpInstr);
(void)succeeded;
assert(succeeded && "Some operands reg class are incompatible!");
@@ -857,23 +918,21 @@ bool AArch64InstrInfo::optimizeCompareInstr(
return false;
// CmpInstr is a Compare instruction if destination register is not used.
- if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
- return false;
-
- // Get the unique definition of SrcReg.
- MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
- if (!MI)
+ if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
return false;
- bool CheckOnlyCCWrites = false;
- const TargetRegisterInfo *TRI = &getRegisterInfo();
- if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
- return false;
+ return substituteCmpToZero(CmpInstr, SrcReg, MRI);
+}
- unsigned NewOpc = MI->getOpcode();
- switch (MI->getOpcode()) {
+/// Get opcode of S version of Instr.
+/// If Instr is S version its opcode is returned.
+/// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
+/// or we are not interested in it.
+static unsigned sForm(MachineInstr &Instr) {
+ switch (Instr.getOpcode()) {
default:
- return false;
+ return AArch64::INSTRUCTION_LIST_END;
+
case AArch64::ADDSWrr:
case AArch64::ADDSWri:
case AArch64::ADDSXrr:
@@ -882,116 +941,221 @@ bool AArch64InstrInfo::optimizeCompareInstr(
case AArch64::SUBSWri:
case AArch64::SUBSXrr:
case AArch64::SUBSXri:
- break;
- case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
- case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
- case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
- case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
- case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
- case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
- case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
- case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
- case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
- case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
- case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
- case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
- case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
- case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
- }
-
- // Scan forward for the use of NZCV.
- // When checking against MI: if it's a conditional code requires
- // checking of V bit, then this is not safe to do.
- // It is safe to remove CmpInstr if NZCV is redefined or killed.
- // If we are done with the basic block, we need to check whether NZCV is
- // live-out.
- bool IsSafe = false;
- for (MachineBasicBlock::iterator I = CmpInstr,
- E = CmpInstr->getParent()->end();
- !IsSafe && ++I != E;) {
- const MachineInstr &Instr = *I;
- for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
- ++IO) {
- const MachineOperand &MO = Instr.getOperand(IO);
- if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
- IsSafe = true;
- break;
- }
- if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
- continue;
- if (MO.isDef()) {
- IsSafe = true;
- break;
- }
+ return Instr.getOpcode();;
+
+ case AArch64::ADDWrr: return AArch64::ADDSWrr;
+ case AArch64::ADDWri: return AArch64::ADDSWri;
+ case AArch64::ADDXrr: return AArch64::ADDSXrr;
+ case AArch64::ADDXri: return AArch64::ADDSXri;
+ case AArch64::ADCWr: return AArch64::ADCSWr;
+ case AArch64::ADCXr: return AArch64::ADCSXr;
+ case AArch64::SUBWrr: return AArch64::SUBSWrr;
+ case AArch64::SUBWri: return AArch64::SUBSWri;
+ case AArch64::SUBXrr: return AArch64::SUBSXrr;
+ case AArch64::SUBXri: return AArch64::SUBSXri;
+ case AArch64::SBCWr: return AArch64::SBCSWr;
+ case AArch64::SBCXr: return AArch64::SBCSXr;
+ case AArch64::ANDWri: return AArch64::ANDSWri;
+ case AArch64::ANDXri: return AArch64::ANDSXri;
+ }
+}
- // Decode the condition code.
- unsigned Opc = Instr.getOpcode();
- AArch64CC::CondCode CC;
- switch (Opc) {
- default:
- return false;
- case AArch64::Bcc:
- CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
- break;
- case AArch64::CSINVWr:
- case AArch64::CSINVXr:
- case AArch64::CSINCWr:
- case AArch64::CSINCXr:
- case AArch64::CSELWr:
- case AArch64::CSELXr:
- case AArch64::CSNEGWr:
- case AArch64::CSNEGXr:
- case AArch64::FCSELSrrr:
- case AArch64::FCSELDrrr:
- CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
- break;
- }
+/// Check if AArch64::NZCV should be alive in successors of MBB.
+static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB) {
+ for (auto *BB : MBB->successors())
+ if (BB->isLiveIn(AArch64::NZCV))
+ return true;
+ return false;
+}
- // It is not safe to remove Compare instruction if Overflow(V) is used.
- switch (CC) {
- default:
- // NZCV can be used multiple times, we should continue.
- break;
- case AArch64CC::VS:
- case AArch64CC::VC:
- case AArch64CC::GE:
- case AArch64CC::LT:
- case AArch64CC::GT:
- case AArch64CC::LE:
- return false;
- }
+struct UsedNZCV {
+ bool N;
+ bool Z;
+ bool C;
+ bool V;
+ UsedNZCV(): N(false), Z(false), C(false), V(false) {}
+ UsedNZCV& operator |=(const UsedNZCV& UsedFlags) {
+ this->N |= UsedFlags.N;
+ this->Z |= UsedFlags.Z;
+ this->C |= UsedFlags.C;
+ this->V |= UsedFlags.V;
+ return *this;
+ }
+};
+
+/// Find a condition code used by the instruction.
+/// Returns AArch64CC::Invalid if either the instruction does not use condition
+/// codes or we don't optimize CmpInstr in the presence of such instructions.
+static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {
+ switch (Instr.getOpcode()) {
+ default:
+ return AArch64CC::Invalid;
+
+ case AArch64::Bcc: {
+ int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
+ assert(Idx >= 2);
+ return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 2).getImm());
}
+
+ case AArch64::CSINVWr:
+ case AArch64::CSINVXr:
+ case AArch64::CSINCWr:
+ case AArch64::CSINCXr:
+ case AArch64::CSELWr:
+ case AArch64::CSELXr:
+ case AArch64::CSNEGWr:
+ case AArch64::CSNEGXr:
+ case AArch64::FCSELSrrr:
+ case AArch64::FCSELDrrr: {
+ int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV);
+ assert(Idx >= 1);
+ return static_cast<AArch64CC::CondCode>(Instr.getOperand(Idx - 1).getImm());
+ }
+ }
+}
+
+static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
+ assert(CC != AArch64CC::Invalid);
+ UsedNZCV UsedFlags;
+ switch (CC) {
+ default:
+ break;
+
+ case AArch64CC::EQ: // Z set
+ case AArch64CC::NE: // Z clear
+ UsedFlags.Z = true;
+ break;
+
+ case AArch64CC::HI: // Z clear and C set
+ case AArch64CC::LS: // Z set or C clear
+ UsedFlags.Z = true;
+ case AArch64CC::HS: // C set
+ case AArch64CC::LO: // C clear
+ UsedFlags.C = true;
+ break;
+
+ case AArch64CC::MI: // N set
+ case AArch64CC::PL: // N clear
+ UsedFlags.N = true;
+ break;
+
+ case AArch64CC::VS: // V set
+ case AArch64CC::VC: // V clear
+ UsedFlags.V = true;
+ break;
+
+ case AArch64CC::GT: // Z clear, N and V the same
+ case AArch64CC::LE: // Z set, N and V differ
+ UsedFlags.Z = true;
+ case AArch64CC::GE: // N and V the same
+ case AArch64CC::LT: // N and V differ
+ UsedFlags.N = true;
+ UsedFlags.V = true;
+ break;
}
+ return UsedFlags;
+}
+
+static bool isADDSRegImm(unsigned Opcode) {
+ return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
+}
+
+static bool isSUBSRegImm(unsigned Opcode) {
+ return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
+}
+
+/// Check if CmpInstr can be substituted by MI.
+///
+/// CmpInstr can be substituted:
+/// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
+/// - and, MI and CmpInstr are from the same MachineBB
+/// - and, condition flags are not alive in successors of the CmpInstr parent
+/// - and, if MI opcode is the S form there must be no defs of flags between
+/// MI and CmpInstr
+/// or if MI opcode is not the S form there must be neither defs of flags
+/// nor uses of flags between MI and CmpInstr.
+/// - and C/V flags are not used after CmpInstr
+static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr,
+ const TargetRegisterInfo *TRI) {
+ assert(MI);
+ assert(sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
+ assert(CmpInstr);
+
+ const unsigned CmpOpcode = CmpInstr->getOpcode();
+ if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
+ return false;
- // If NZCV is not killed nor re-defined, we should check whether it is
- // live-out. If it is live-out, do not optimize.
- if (!IsSafe) {
- MachineBasicBlock *ParentBlock = CmpInstr->getParent();
- for (auto *MBB : ParentBlock->successors())
- if (MBB->isLiveIn(AArch64::NZCV))
+ if (MI->getParent() != CmpInstr->getParent())
+ return false;
+
+ if (areCFlagsAliveInSuccessors(CmpInstr->getParent()))
+ return false;
+
+ AccessKind AccessToCheck = AK_Write;
+ if (sForm(*MI) != MI->getOpcode())
+ AccessToCheck = AK_All;
+ if (areCFlagsAccessedBetweenInstrs(MI, CmpInstr, TRI, AccessToCheck))
+ return false;
+
+ UsedNZCV NZCVUsedAfterCmp;
+ for (auto I = std::next(CmpInstr->getIterator()), E = CmpInstr->getParent()->instr_end();
+ I != E; ++I) {
+ const MachineInstr &Instr = *I;
+ if (Instr.readsRegister(AArch64::NZCV, TRI)) {
+ AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr);
+ if (CC == AArch64CC::Invalid) // Unsupported conditional instruction
return false;
+ NZCVUsedAfterCmp |= getUsedNZCV(CC);
+ }
+
+ if (Instr.modifiesRegister(AArch64::NZCV, TRI))
+ break;
}
+
+ return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
+}
+
+/// Substitute an instruction comparing to zero with another instruction
+/// which produces needed condition flags.
+///
+/// Return true on success.
+bool AArch64InstrInfo::substituteCmpToZero(
+ MachineInstr &CmpInstr, unsigned SrcReg,
+ const MachineRegisterInfo *MRI) const {
+ assert(MRI);
+ // Get the unique definition of SrcReg.
+ MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
+ if (!MI)
+ return false;
+
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+
+ unsigned NewOpc = sForm(*MI);
+ if (NewOpc == AArch64::INSTRUCTION_LIST_END)
+ return false;
+
+ if (!canInstrSubstituteCmpInstr(MI, &CmpInstr, TRI))
+ return false;
// Update the instruction to set NZCV.
MI->setDesc(get(NewOpc));
- CmpInstr->eraseFromParent();
- bool succeeded = UpdateOperandRegClass(MI);
+ CmpInstr.eraseFromParent();
+ bool succeeded = UpdateOperandRegClass(*MI);
(void)succeeded;
assert(succeeded && "Some operands reg class are incompatible!");
MI->addRegisterDefined(AArch64::NZCV, TRI);
return true;
}
-bool
-AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
- if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
+bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
return false;
- MachineBasicBlock &MBB = *MI->getParent();
- DebugLoc DL = MI->getDebugLoc();
- unsigned Reg = MI->getOperand(0).getReg();
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Reg = MI.getOperand(0).getReg();
const GlobalValue *GV =
- cast<GlobalValue>((*MI->memoperands_begin())->getValue());
+ cast<GlobalValue>((*MI.memoperands_begin())->getValue());
const TargetMachine &TM = MBB.getParent()->getTarget();
unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
const unsigned char MO_NC = AArch64II::MO_NC;
@@ -1000,8 +1164,9 @@ AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
.addGlobalAddress(GV, 0, AArch64II::MO_GOT);
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
- .addReg(Reg, RegState::Kill).addImm(0)
- .addMemOperand(*MI->memoperands_begin());
+ .addReg(Reg, RegState::Kill)
+ .addImm(0)
+ .addMemOperand(*MI.memoperands_begin());
} else if (TM.getCodeModel() == CodeModel::Large) {
BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
.addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
@@ -1015,8 +1180,9 @@ AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
- .addReg(Reg, RegState::Kill).addImm(0)
- .addMemOperand(*MI->memoperands_begin());
+ .addReg(Reg, RegState::Kill)
+ .addImm(0)
+ .addMemOperand(*MI.memoperands_begin());
} else {
BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
.addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
@@ -1024,7 +1190,7 @@ AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, LoFlags)
- .addMemOperand(*MI->memoperands_begin());
+ .addMemOperand(*MI.memoperands_begin());
}
MBB.erase(MI);
@@ -1033,8 +1199,8 @@ AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
}
/// Return true if this is this instruction has a non-zero immediate
-bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+bool AArch64InstrInfo::hasShiftedReg(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
default:
break;
case AArch64::ADDSWrs:
@@ -1069,8 +1235,8 @@ bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
case AArch64::SUBSXrs:
case AArch64::SUBWrs:
case AArch64::SUBXrs:
- if (MI->getOperand(3).isImm()) {
- unsigned val = MI->getOperand(3).getImm();
+ if (MI.getOperand(3).isImm()) {
+ unsigned val = MI.getOperand(3).getImm();
return (val != 0);
}
break;
@@ -1079,8 +1245,8 @@ bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
}
/// Return true if this is this instruction has a non-zero immediate
-bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+bool AArch64InstrInfo::hasExtendedReg(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
default:
break;
case AArch64::ADDSWrx:
@@ -1095,8 +1261,8 @@ bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
case AArch64::SUBWrx:
case AArch64::SUBXrx:
case AArch64::SUBXrx64:
- if (MI->getOperand(3).isImm()) {
- unsigned val = MI->getOperand(3).getImm();
+ if (MI.getOperand(3).isImm()) {
+ unsigned val = MI.getOperand(3).getImm();
return (val != 0);
}
break;
@@ -1107,51 +1273,51 @@ bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
// Return true if this instruction simply sets its single destination register
// to zero. This is equivalent to a register rename of the zero-register.
-bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
default:
break;
case AArch64::MOVZWi:
case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
- if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
- assert(MI->getDesc().getNumOperands() == 3 &&
- MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
+ if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) {
+ assert(MI.getDesc().getNumOperands() == 3 &&
+ MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands");
return true;
}
break;
case AArch64::ANDWri: // and Rd, Rzr, #imm
- return MI->getOperand(1).getReg() == AArch64::WZR;
+ return MI.getOperand(1).getReg() == AArch64::WZR;
case AArch64::ANDXri:
- return MI->getOperand(1).getReg() == AArch64::XZR;
+ return MI.getOperand(1).getReg() == AArch64::XZR;
case TargetOpcode::COPY:
- return MI->getOperand(1).getReg() == AArch64::WZR;
+ return MI.getOperand(1).getReg() == AArch64::WZR;
}
return false;
}
// Return true if this instruction simply renames a general register without
// modifying bits.
-bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
default:
break;
case TargetOpcode::COPY: {
// GPR32 copies will by lowered to ORRXrs
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
return (AArch64::GPR32RegClass.contains(DstReg) ||
AArch64::GPR64RegClass.contains(DstReg));
}
case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
- if (MI->getOperand(1).getReg() == AArch64::XZR) {
- assert(MI->getDesc().getNumOperands() == 4 &&
- MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
+ if (MI.getOperand(1).getReg() == AArch64::XZR) {
+ assert(MI.getDesc().getNumOperands() == 4 &&
+ MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands");
return true;
}
break;
case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
- if (MI->getOperand(2).getImm() == 0) {
- assert(MI->getDesc().getNumOperands() == 4 &&
- MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
+ if (MI.getOperand(2).getImm() == 0) {
+ assert(MI.getDesc().getNumOperands() == 4 &&
+ MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands");
return true;
}
break;
@@ -1161,19 +1327,19 @@ bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
// Return true if this instruction simply renames a general register without
// modifying bits.
-bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
default:
break;
case TargetOpcode::COPY: {
// FPR64 copies will by lowered to ORR.16b
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
return (AArch64::FPR64RegClass.contains(DstReg) ||
AArch64::FPR128RegClass.contains(DstReg));
}
case AArch64::ORRv16i8:
- if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
- assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
+ if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
+ assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() &&
"invalid ORRv16i8 operands");
return true;
}
@@ -1182,9 +1348,9 @@ bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
return false;
}
-unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
break;
case AArch64::LDRWui:
@@ -1194,10 +1360,10 @@ unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
case AArch64::LDRSui:
case AArch64::LDRDui:
case AArch64::LDRQui:
- if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
+ MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
}
@@ -1205,9 +1371,9 @@ unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return 0;
}
-unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
break;
case AArch64::STRWui:
@@ -1217,10 +1383,10 @@ unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
case AArch64::STRSui:
case AArch64::STRDui:
case AArch64::STRQui:
- if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() &&
+ MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
}
@@ -1230,8 +1396,8 @@ unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
/// Return true if this is load/store scales or extends its register offset.
/// This refers to scaling a dynamic index as opposed to scaled immediates.
/// MI should be a memory op that allows scaled addressing.
-bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+bool AArch64InstrInfo::isScaledAddr(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
default:
break;
case AArch64::LDRBBroW:
@@ -1281,7 +1447,7 @@ bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
case AArch64::STRWroX:
case AArch64::STRXroX:
- unsigned Val = MI->getOperand(3).getImm();
+ unsigned Val = MI.getOperand(3).getImm();
AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
}
@@ -1289,36 +1455,96 @@ bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
}
/// Check all MachineMemOperands for a hint to suppress pairing.
-bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
- assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
- "Too many target MO flags");
- for (auto *MM : MI->memoperands()) {
- if (MM->getFlags() &
- (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
- return true;
- }
- }
- return false;
+bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) const {
+ return any_of(MI.memoperands(), [](MachineMemOperand *MMO) {
+ return MMO->getFlags() & MOSuppressPair;
+ });
}
/// Set a flag on the first MachineMemOperand to suppress pairing.
-void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
- if (MI->memoperands_empty())
+void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) const {
+ if (MI.memoperands_empty())
return;
+ (*MI.memoperands_begin())->setFlags(MOSuppressPair);
+}
- assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
- "Too many target MO flags");
- (*MI->memoperands_begin())
- ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
+bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
+ switch (Opc) {
+ default:
+ return false;
+ case AArch64::STURSi:
+ case AArch64::STURDi:
+ case AArch64::STURQi:
+ case AArch64::STURBBi:
+ case AArch64::STURHHi:
+ case AArch64::STURWi:
+ case AArch64::STURXi:
+ case AArch64::LDURSi:
+ case AArch64::LDURDi:
+ case AArch64::LDURQi:
+ case AArch64::LDURWi:
+ case AArch64::LDURXi:
+ case AArch64::LDURSWi:
+ case AArch64::LDURHHi:
+ case AArch64::LDURBBi:
+ case AArch64::LDURSBWi:
+ case AArch64::LDURSHWi:
+ return true;
+ }
}
-bool
-AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
- switch (LdSt->getOpcode()) {
+bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr &MI) const {
+ return isUnscaledLdSt(MI.getOpcode());
+}
+
+// Is this a candidate for ld/st merging or pairing? For example, we don't
+// touch volatiles or load/stores that have a hint to avoid pair formation.
+bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
+ // If this is a volatile load/store, don't mess with it.
+ if (MI.hasOrderedMemoryRef())
+ return false;
+
+ // Make sure this is a reg+imm (as opposed to an address reloc).
+ assert(MI.getOperand(1).isReg() && "Expected a reg operand.");
+ if (!MI.getOperand(2).isImm())
+ return false;
+
+ // Can't merge/pair if the instruction modifies the base register.
+ // e.g., ldr x0, [x0]
+ unsigned BaseReg = MI.getOperand(1).getReg();
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ if (MI.modifiesRegister(BaseReg, TRI))
+ return false;
+
+ // Check if this load/store has a hint to avoid pair formation.
+ // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
+ if (isLdStPairSuppressed(MI))
+ return false;
+
+ // On some CPUs quad load/store pairs are slower than two single load/stores.
+ if (Subtarget.avoidQuadLdStPairs()) {
+ switch (MI.getOpcode()) {
+ default:
+ break;
+
+ case AArch64::LDURQi:
+ case AArch64::STURQi:
+ case AArch64::LDRQui:
+ case AArch64::STRQui:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
+ MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
+ const TargetRegisterInfo *TRI) const {
+ switch (LdSt.getOpcode()) {
default:
return false;
+ // Scaled instructions.
case AArch64::STRSui:
case AArch64::STRDui:
case AArch64::STRQui:
@@ -1329,29 +1555,45 @@ AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
case AArch64::LDRQui:
case AArch64::LDRXui:
case AArch64::LDRWui:
- if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
- return false;
- BaseReg = LdSt->getOperand(1).getReg();
- MachineFunction &MF = *LdSt->getParent()->getParent();
- unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
- Offset = LdSt->getOperand(2).getImm() * Width;
- return true;
+ case AArch64::LDRSWui:
+ // Unscaled instructions.
+ case AArch64::STURSi:
+ case AArch64::STURDi:
+ case AArch64::STURQi:
+ case AArch64::STURXi:
+ case AArch64::STURWi:
+ case AArch64::LDURSi:
+ case AArch64::LDURDi:
+ case AArch64::LDURQi:
+ case AArch64::LDURWi:
+ case AArch64::LDURXi:
+ case AArch64::LDURSWi:
+ unsigned Width;
+ return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
};
}
bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
- MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
+ MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const {
+ assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
// Handle only loads/stores with base register followed by immediate offset.
- if (LdSt->getNumOperands() != 3)
- return false;
- if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
+ if (LdSt.getNumExplicitOperands() == 3) {
+ // Non-paired instruction (e.g., ldr x1, [x0, #8]).
+ if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
+ return false;
+ } else if (LdSt.getNumExplicitOperands() == 4) {
+ // Paired instruction (e.g., ldp x1, x2, [x0, #8]).
+ if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isReg() ||
+ !LdSt.getOperand(3).isImm())
+ return false;
+ } else
return false;
// Offset is calculated as the immediate operand multiplied by the scaling factor.
// Unscaled instructions have scaling factor set to 1.
- int Scale = 0;
- switch (LdSt->getOpcode()) {
+ unsigned Scale = 0;
+ switch (LdSt.getOpcode()) {
default:
return false;
case AArch64::LDURQi:
@@ -1392,18 +1634,48 @@ bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
Width = 1;
Scale = 1;
break;
+ case AArch64::LDPQi:
+ case AArch64::LDNPQi:
+ case AArch64::STPQi:
+ case AArch64::STNPQi:
+ Scale = 16;
+ Width = 32;
+ break;
case AArch64::LDRQui:
case AArch64::STRQui:
Scale = Width = 16;
break;
+ case AArch64::LDPXi:
+ case AArch64::LDPDi:
+ case AArch64::LDNPXi:
+ case AArch64::LDNPDi:
+ case AArch64::STPXi:
+ case AArch64::STPDi:
+ case AArch64::STNPXi:
+ case AArch64::STNPDi:
+ Scale = 8;
+ Width = 16;
+ break;
case AArch64::LDRXui:
case AArch64::LDRDui:
case AArch64::STRXui:
case AArch64::STRDui:
Scale = Width = 8;
break;
+ case AArch64::LDPWi:
+ case AArch64::LDPSi:
+ case AArch64::LDNPWi:
+ case AArch64::LDNPSi:
+ case AArch64::STPWi:
+ case AArch64::STPSi:
+ case AArch64::STNPWi:
+ case AArch64::STNPSi:
+ Scale = 4;
+ Width = 8;
+ break;
case AArch64::LDRWui:
case AArch64::LDRSui:
+ case AArch64::LDRSWui:
case AArch64::STRWui:
case AArch64::STRSui:
Scale = Width = 4;
@@ -1420,41 +1692,120 @@ bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
case AArch64::STRBBui:
Scale = Width = 1;
break;
- };
+ }
- BaseReg = LdSt->getOperand(1).getReg();
- Offset = LdSt->getOperand(2).getImm() * Scale;
+ if (LdSt.getNumExplicitOperands() == 3) {
+ BaseReg = LdSt.getOperand(1).getReg();
+ Offset = LdSt.getOperand(2).getImm() * Scale;
+ } else {
+ assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
+ BaseReg = LdSt.getOperand(2).getReg();
+ Offset = LdSt.getOperand(3).getImm() * Scale;
+ }
return true;
}
+// Scale the unscaled offsets. Returns false if the unscaled offset can't be
+// scaled.
+static bool scaleOffset(unsigned Opc, int64_t &Offset) {
+ unsigned OffsetStride = 1;
+ switch (Opc) {
+ default:
+ return false;
+ case AArch64::LDURQi:
+ case AArch64::STURQi:
+ OffsetStride = 16;
+ break;
+ case AArch64::LDURXi:
+ case AArch64::LDURDi:
+ case AArch64::STURXi:
+ case AArch64::STURDi:
+ OffsetStride = 8;
+ break;
+ case AArch64::LDURWi:
+ case AArch64::LDURSi:
+ case AArch64::LDURSWi:
+ case AArch64::STURWi:
+ case AArch64::STURSi:
+ OffsetStride = 4;
+ break;
+ }
+ // If the byte-offset isn't a multiple of the stride, we can't scale this
+ // offset.
+ if (Offset % OffsetStride != 0)
+ return false;
+
+ // Convert the byte-offset used by unscaled into an "element" offset used
+ // by the scaled pair load/store instructions.
+ Offset /= OffsetStride;
+ return true;
+}
+
+static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
+ if (FirstOpc == SecondOpc)
+ return true;
+ // We can also pair sign-ext and zero-ext instructions.
+ switch (FirstOpc) {
+ default:
+ return false;
+ case AArch64::LDRWui:
+ case AArch64::LDURWi:
+ return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
+ case AArch64::LDRSWui:
+ case AArch64::LDURSWi:
+ return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
+ }
+ // These instructions can't be paired based on their opcodes.
+ return false;
+}
+
/// Detect opportunities for ldp/stp formation.
///
/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
-bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
- MachineInstr *SecondLdSt,
- unsigned NumLoads) const {
+bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
+ MachineInstr &SecondLdSt,
+ unsigned NumLoads) const {
// Only cluster up to a single pair.
if (NumLoads > 1)
return false;
- if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
+
+ // Can we pair these instructions based on their opcodes?
+ unsigned FirstOpc = FirstLdSt.getOpcode();
+ unsigned SecondOpc = SecondLdSt.getOpcode();
+ if (!canPairLdStOpc(FirstOpc, SecondOpc))
+ return false;
+
+ // Can't merge volatiles or load/stores that have a hint to avoid pair
+ // formation, for example.
+ if (!isCandidateToMergeOrPair(FirstLdSt) ||
+ !isCandidateToMergeOrPair(SecondLdSt))
+ return false;
+
+ // isCandidateToMergeOrPair guarantees that operand 2 is an immediate.
+ int64_t Offset1 = FirstLdSt.getOperand(2).getImm();
+ if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1))
+ return false;
+
+ int64_t Offset2 = SecondLdSt.getOperand(2).getImm();
+ if (isUnscaledLdSt(SecondOpc) && !scaleOffset(SecondOpc, Offset2))
return false;
- // getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
- unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
- // Allow 6 bits of positive range.
- if (Ofs1 > 64)
+
+ // Pairwise instructions have a 7-bit signed offset field.
+ if (Offset1 > 63 || Offset1 < -64)
return false;
+
// The caller should already have ordered First/SecondLdSt by offset.
- unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
- return Ofs1 + 1 == Ofs2;
+ assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
+ return Offset1 + 1 == Offset2;
}
-bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
- MachineInstr *Second) const {
- if (Subtarget.isCyclone()) {
- // Cyclone can fuse CMN, CMP, TST followed by Bcc.
- unsigned SecondOpcode = Second->getOpcode();
+bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr &First,
+ MachineInstr &Second) const {
+ if (Subtarget.hasMacroOpFusion()) {
+ // Fuse CMN, CMP, TST followed by Bcc.
+ unsigned SecondOpcode = Second.getOpcode();
if (SecondOpcode == AArch64::Bcc) {
- switch (First->getOpcode()) {
+ switch (First.getOpcode()) {
default:
return false;
case AArch64::SUBSWri:
@@ -1466,10 +1817,10 @@ bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
return true;
}
}
- // Cyclone B0 also supports ALU operations followed by CBZ/CBNZ.
+ // Fuse ALU operations followed by CBZ/CBNZ.
if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
- switch (First->getOpcode()) {
+ switch (First.getOpcode()) {
default:
return false;
case AArch64::ADDWri:
@@ -1491,7 +1842,7 @@ bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
- const MDNode *Expr, DebugLoc DL) const {
+ const MDNode *Expr, const DebugLoc &DL) const {
MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
.addFrameIndex(FrameIx)
.addImm(0)
@@ -1521,7 +1872,7 @@ static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
}
void AArch64InstrInfo::copyPhysRegTuple(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
llvm::ArrayRef<unsigned> Indices) const {
assert(Subtarget.hasNEON() &&
@@ -1547,9 +1898,9 @@ void AArch64InstrInfo::copyPhysRegTuple(
}
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
if (AArch64::GPR32spRegClass.contains(DestReg) &&
(AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
const TargetRegisterInfo *TRI = &getRegisterInfo();
@@ -1818,8 +2169,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (SrcReg == AArch64::NZCV) {
assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
- BuildMI(MBB, I, DL, get(AArch64::MRS))
- .addReg(DestReg)
+ BuildMI(MBB, I, DL, get(AArch64::MRS), DestReg)
.addImm(AArch64SysReg::NZCV)
.addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
return;
@@ -1879,39 +2229,45 @@ void AArch64InstrInfo::storeRegToStackSlot(
else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register store without NEON");
- Opc = AArch64::ST1Twov1d, Offset = false;
+ Opc = AArch64::ST1Twov1d;
+ Offset = false;
}
break;
case 24:
if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register store without NEON");
- Opc = AArch64::ST1Threev1d, Offset = false;
+ Opc = AArch64::ST1Threev1d;
+ Offset = false;
}
break;
case 32:
if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register store without NEON");
- Opc = AArch64::ST1Fourv1d, Offset = false;
+ Opc = AArch64::ST1Fourv1d;
+ Offset = false;
} else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register store without NEON");
- Opc = AArch64::ST1Twov2d, Offset = false;
+ Opc = AArch64::ST1Twov2d;
+ Offset = false;
}
break;
case 48:
if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register store without NEON");
- Opc = AArch64::ST1Threev2d, Offset = false;
+ Opc = AArch64::ST1Threev2d;
+ Offset = false;
}
break;
case 64:
if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register store without NEON");
- Opc = AArch64::ST1Fourv2d, Offset = false;
+ Opc = AArch64::ST1Fourv2d;
+ Offset = false;
}
break;
}
@@ -1977,39 +2333,45 @@ void AArch64InstrInfo::loadRegFromStackSlot(
else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register load without NEON");
- Opc = AArch64::LD1Twov1d, Offset = false;
+ Opc = AArch64::LD1Twov1d;
+ Offset = false;
}
break;
case 24:
if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register load without NEON");
- Opc = AArch64::LD1Threev1d, Offset = false;
+ Opc = AArch64::LD1Threev1d;
+ Offset = false;
}
break;
case 32:
if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register load without NEON");
- Opc = AArch64::LD1Fourv1d, Offset = false;
+ Opc = AArch64::LD1Fourv1d;
+ Offset = false;
} else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register load without NEON");
- Opc = AArch64::LD1Twov2d, Offset = false;
+ Opc = AArch64::LD1Twov2d;
+ Offset = false;
}
break;
case 48:
if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register load without NEON");
- Opc = AArch64::LD1Threev2d, Offset = false;
+ Opc = AArch64::LD1Threev2d;
+ Offset = false;
}
break;
case 64:
if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
assert(Subtarget.hasNEON() &&
"Unexpected register load without NEON");
- Opc = AArch64::LD1Fourv2d, Offset = false;
+ Opc = AArch64::LD1Fourv2d;
+ Offset = false;
}
break;
}
@@ -2024,13 +2386,16 @@ void AArch64InstrInfo::loadRegFromStackSlot(
}
void llvm::emitFrameOffset(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg, int Offset,
const TargetInstrInfo *TII,
MachineInstr::MIFlag Flag, bool SetNZCV) {
if (DestReg == SrcReg && Offset == 0)
return;
+ assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
+ "SP increment/decrement not 16-byte aligned");
+
bool isSub = Offset < 0;
if (isSub)
Offset = -Offset;
@@ -2082,8 +2447,9 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
}
MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex,
+ LiveIntervals *LIS) const {
// This is a bit of a hack. Consider this instruction:
//
// %vreg0<def> = COPY %SP; GPR64all:%vreg0
@@ -2097,9 +2463,9 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
//
// <rdar://problem/11522048>
//
- if (MI->isCopy()) {
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned SrcReg = MI->getOperand(1).getReg();
+ if (MI.isCopy()) {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
if (SrcReg == AArch64::SP &&
TargetRegisterInfo::isVirtualRegister(DstReg)) {
MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
@@ -2393,9 +2759,10 @@ void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
NopInst.setOpcode(AArch64::HINT);
NopInst.addOperand(MCOperand::createImm(0));
}
-/// useMachineCombiner - return true when a target supports MachineCombiner
+
+// AArch64 supports MachineCombiner.
bool AArch64InstrInfo::useMachineCombiner() const {
- // AArch64 supports the combiner
+
return true;
}
//
@@ -2456,37 +2823,75 @@ static bool isCombineInstrCandidate64(unsigned Opc) {
return false;
}
//
+// FP Opcodes that can be combined with a FMUL
+static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {
+ switch (Inst.getOpcode()) {
+ case AArch64::FADDSrr:
+ case AArch64::FADDDrr:
+ case AArch64::FADDv2f32:
+ case AArch64::FADDv2f64:
+ case AArch64::FADDv4f32:
+ case AArch64::FSUBSrr:
+ case AArch64::FSUBDrr:
+ case AArch64::FSUBv2f32:
+ case AArch64::FSUBv2f64:
+ case AArch64::FSUBv4f32:
+ return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
+ default:
+ break;
+ }
+ return false;
+}
+//
// Opcodes that can be combined with a MUL
static bool isCombineInstrCandidate(unsigned Opc) {
return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
}
-static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
- unsigned MulOpc, unsigned ZeroReg) {
+//
+// Utility routine that checks if \param MO is defined by an
+// \param CombineOpc instruction in the basic block \param MBB
+static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
+ unsigned CombineOpc, unsigned ZeroReg = 0,
+ bool CheckZeroReg = false) {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineInstr *MI = nullptr;
- // We need a virtual register definition.
+
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
MI = MRI.getUniqueVRegDef(MO.getReg());
// And it needs to be in the trace (otherwise, it won't have a depth).
- if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
- return false;
-
- assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
- MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
- MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
-
- // The third input reg must be zero.
- if (MI->getOperand(3).getReg() != ZeroReg)
+ if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != CombineOpc)
return false;
-
// Must only used by the user we combine with.
if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
return false;
+ if (CheckZeroReg) {
+ assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
+ MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
+ MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
+ // The third input reg must be zero.
+ if (MI->getOperand(3).getReg() != ZeroReg)
+ return false;
+ }
+
return true;
}
+//
+// Is \param MO defined by an integer multiply and can be combined?
+static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
+ unsigned MulOpc, unsigned ZeroReg) {
+ return canCombine(MBB, MO, MulOpc, ZeroReg, true);
+}
+
+//
+// Is \param MO defined by a floating-point multiply and can be combined?
+static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
+ unsigned MulOpc) {
+ return canCombine(MBB, MO, MulOpc);
+}
+
// TODO: There are many more machine instruction opcodes to match:
// 1. Other data types (integer, vectors)
// 2. Other math / logic operations (xor, or)
@@ -2522,17 +2927,17 @@ static bool getMaddPatterns(MachineInstr &Root,
bool Found = false;
if (!isCombineInstrCandidate(Opc))
- return 0;
+ return false;
if (isCombineInstrSettingFlag(Opc)) {
int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
// When NZCV is live bail out.
if (Cmp_NZCV == -1)
- return 0;
- unsigned NewOpc = convertFlagSettingOpcode(&Root);
+ return false;
+ unsigned NewOpc = convertFlagSettingOpcode(Root);
// When opcode can't change bail out.
// CHECKME: do we miss any cases for opcode conversion?
if (NewOpc == Opc)
- return 0;
+ return false;
Opc = NewOpc;
}
@@ -2620,7 +3025,230 @@ static bool getMaddPatterns(MachineInstr &Root,
}
return Found;
}
+/// Floating-Point Support
+
+/// Find instructions that can be turned into madd.
+static bool getFMAPatterns(MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern> &Patterns) {
+
+ if (!isCombineInstrCandidateFP(Root))
+ return 0;
+ MachineBasicBlock &MBB = *Root.getParent();
+ bool Found = false;
+
+ switch (Root.getOpcode()) {
+ default:
+ assert(false && "Unsupported FP instruction in combiner\n");
+ break;
+ case AArch64::FADDSrr:
+ assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
+ "FADDWrr does not have register operands");
+ if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
+ Patterns.push_back(MachineCombinerPattern::FMULADDS_OP1);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
+ AArch64::FMULv1i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP1);
+ Found = true;
+ }
+ if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
+ Patterns.push_back(MachineCombinerPattern::FMULADDS_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv1i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv1i32_indexed_OP2);
+ Found = true;
+ }
+ break;
+ case AArch64::FADDDrr:
+ if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
+ Patterns.push_back(MachineCombinerPattern::FMULADDD_OP1);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
+ AArch64::FMULv1i64_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP1);
+ Found = true;
+ }
+ if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
+ Patterns.push_back(MachineCombinerPattern::FMULADDD_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv1i64_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv1i64_indexed_OP2);
+ Found = true;
+ }
+ break;
+ case AArch64::FADDv2f32:
+ if (canCombineWithFMUL(MBB, Root.getOperand(1),
+ AArch64::FMULv2i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP1);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
+ AArch64::FMULv2f32)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP1);
+ Found = true;
+ }
+ if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv2i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv2i32_indexed_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv2f32)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv2f32_OP2);
+ Found = true;
+ }
+ break;
+ case AArch64::FADDv2f64:
+ if (canCombineWithFMUL(MBB, Root.getOperand(1),
+ AArch64::FMULv2i64_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP1);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
+ AArch64::FMULv2f64)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP1);
+ Found = true;
+ }
+ if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv2i64_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv2i64_indexed_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv2f64)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv2f64_OP2);
+ Found = true;
+ }
+ break;
+ case AArch64::FADDv4f32:
+ if (canCombineWithFMUL(MBB, Root.getOperand(1),
+ AArch64::FMULv4i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP1);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(1),
+ AArch64::FMULv4f32)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP1);
+ Found = true;
+ }
+ if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv4i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv4i32_indexed_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv4f32)) {
+ Patterns.push_back(MachineCombinerPattern::FMLAv4f32_OP2);
+ Found = true;
+ }
+ break;
+
+ case AArch64::FSUBSrr:
+ if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULSrr)) {
+ Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP1);
+ Found = true;
+ }
+ if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULSrr)) {
+ Patterns.push_back(MachineCombinerPattern::FMULSUBS_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv1i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLSv1i32_indexed_OP2);
+ Found = true;
+ }
+ break;
+ case AArch64::FSUBDrr:
+ if (canCombineWithFMUL(MBB, Root.getOperand(1), AArch64::FMULDrr)) {
+ Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP1);
+ Found = true;
+ }
+ if (canCombineWithFMUL(MBB, Root.getOperand(2), AArch64::FMULDrr)) {
+ Patterns.push_back(MachineCombinerPattern::FMULSUBD_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv1i64_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLSv1i64_indexed_OP2);
+ Found = true;
+ }
+ break;
+ case AArch64::FSUBv2f32:
+ if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv2i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLSv2i32_indexed_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv2f32)) {
+ Patterns.push_back(MachineCombinerPattern::FMLSv2f32_OP2);
+ Found = true;
+ }
+ break;
+ case AArch64::FSUBv2f64:
+ if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv2i64_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLSv2i64_indexed_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv2f64)) {
+ Patterns.push_back(MachineCombinerPattern::FMLSv2f64_OP2);
+ Found = true;
+ }
+ break;
+ case AArch64::FSUBv4f32:
+ if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv4i32_indexed)) {
+ Patterns.push_back(MachineCombinerPattern::FMLSv4i32_indexed_OP2);
+ Found = true;
+ } else if (canCombineWithFMUL(MBB, Root.getOperand(2),
+ AArch64::FMULv4f32)) {
+ Patterns.push_back(MachineCombinerPattern::FMLSv4f32_OP2);
+ Found = true;
+ }
+ break;
+ }
+ return Found;
+}
+
+/// Return true when a code sequence can improve throughput. It
+/// should be called only for instructions in loops.
+/// \param Pattern - combiner pattern
+bool
+AArch64InstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
+ switch (Pattern) {
+ default:
+ break;
+ case MachineCombinerPattern::FMULADDS_OP1:
+ case MachineCombinerPattern::FMULADDS_OP2:
+ case MachineCombinerPattern::FMULSUBS_OP1:
+ case MachineCombinerPattern::FMULSUBS_OP2:
+ case MachineCombinerPattern::FMULADDD_OP1:
+ case MachineCombinerPattern::FMULADDD_OP2:
+ case MachineCombinerPattern::FMULSUBD_OP1:
+ case MachineCombinerPattern::FMULSUBD_OP2:
+ case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
+ case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
+ case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
+ case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
+ case MachineCombinerPattern::FMLAv2f32_OP2:
+ case MachineCombinerPattern::FMLAv2f32_OP1:
+ case MachineCombinerPattern::FMLAv2f64_OP1:
+ case MachineCombinerPattern::FMLAv2f64_OP2:
+ case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
+ case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
+ case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
+ case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
+ case MachineCombinerPattern::FMLAv4f32_OP1:
+ case MachineCombinerPattern::FMLAv4f32_OP2:
+ case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
+ case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
+ case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
+ case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
+ case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
+ case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
+ case MachineCombinerPattern::FMLSv2f32_OP2:
+ case MachineCombinerPattern::FMLSv2f64_OP2:
+ case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
+ case MachineCombinerPattern::FMLSv4f32_OP2:
+ return true;
+ } // end switch (Pattern)
+ return false;
+}
/// Return true when there is potentially a faster code sequence for an
/// instruction chain ending in \p Root. All potential patterns are listed in
/// the \p Pattern vector. Pattern should be sorted in priority order since the
@@ -2629,28 +3257,35 @@ static bool getMaddPatterns(MachineInstr &Root,
bool AArch64InstrInfo::getMachineCombinerPatterns(
MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
+ // Integer patterns
if (getMaddPatterns(Root, Patterns))
return true;
+ // Floating point patterns
+ if (getFMAPatterns(Root, Patterns))
+ return true;
return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
}
-/// genMadd - Generate madd instruction and combine mul and add.
-/// Example:
-/// MUL I=A,B,0
-/// ADD R,I,C
-/// ==> MADD R,A,B,C
-/// \param Root is the ADD instruction
+enum class FMAInstKind { Default, Indexed, Accumulator };
+/// genFusedMultiply - Generate fused multiply instructions.
+/// This function supports both integer and floating point instructions.
+/// A typical example:
+/// F|MUL I=A,B,0
+/// F|ADD R,I,C
+/// ==> F|MADD R,A,B,C
+/// \param Root is the F|ADD instruction
/// \param [out] InsInstrs is a vector of machine instructions and will
/// contain the generated madd instruction
/// \param IdxMulOpd is index of operand in Root that is the result of
-/// the MUL. In the example above IdxMulOpd is 1.
-/// \param MaddOpc the opcode fo the madd instruction
-static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
- const TargetInstrInfo *TII, MachineInstr &Root,
- SmallVectorImpl<MachineInstr *> &InsInstrs,
- unsigned IdxMulOpd, unsigned MaddOpc,
- const TargetRegisterClass *RC) {
+/// the F|MUL. In the example above IdxMulOpd is 1.
+/// \param MaddOpc the opcode fo the f|madd instruction
+static MachineInstr *
+genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
+ const TargetInstrInfo *TII, MachineInstr &Root,
+ SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
+ unsigned MaddOpc, const TargetRegisterClass *RC,
+ FMAInstKind kind = FMAInstKind::Default) {
assert(IdxMulOpd == 1 || IdxMulOpd == 2);
unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
@@ -2672,12 +3307,26 @@ static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
MRI.constrainRegClass(SrcReg2, RC);
- MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
- ResultReg)
- .addReg(SrcReg0, getKillRegState(Src0IsKill))
- .addReg(SrcReg1, getKillRegState(Src1IsKill))
- .addReg(SrcReg2, getKillRegState(Src2IsKill));
- // Insert the MADD
+ MachineInstrBuilder MIB;
+ if (kind == FMAInstKind::Default)
+ MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
+ .addReg(SrcReg0, getKillRegState(Src0IsKill))
+ .addReg(SrcReg1, getKillRegState(Src1IsKill))
+ .addReg(SrcReg2, getKillRegState(Src2IsKill));
+ else if (kind == FMAInstKind::Indexed)
+ MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
+ .addReg(SrcReg2, getKillRegState(Src2IsKill))
+ .addReg(SrcReg0, getKillRegState(Src0IsKill))
+ .addReg(SrcReg1, getKillRegState(Src1IsKill))
+ .addImm(MUL->getOperand(3).getImm());
+ else if (kind == FMAInstKind::Accumulator)
+ MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg)
+ .addReg(SrcReg2, getKillRegState(Src2IsKill))
+ .addReg(SrcReg0, getKillRegState(Src0IsKill))
+ .addReg(SrcReg1, getKillRegState(Src1IsKill));
+ else
+ assert(false && "Invalid FMA instruction kind \n");
+ // Insert the MADD (MADD, FMA, FMS, FMLA, FMSL)
InsInstrs.push_back(MIB);
return MUL;
}
@@ -2765,7 +3414,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass;
}
- MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
break;
case MachineCombinerPattern::MULADDW_OP2:
case MachineCombinerPattern::MULADDX_OP2:
@@ -2780,7 +3429,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
Opc = AArch64::MADDXrrr;
RC = &AArch64::GPR64RegClass;
}
- MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
break;
case MachineCombinerPattern::MULADDWI_OP1:
case MachineCombinerPattern::MULADDXI_OP1: {
@@ -2872,7 +3521,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
Opc = AArch64::MSUBXrrr;
RC = &AArch64::GPR64RegClass;
}
- MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
break;
case MachineCombinerPattern::MULSUBWI_OP1:
case MachineCombinerPattern::MULSUBXI_OP1: {
@@ -2917,6 +3566,234 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
}
break;
}
+ // Floating Point Support
+ case MachineCombinerPattern::FMULADDS_OP1:
+ case MachineCombinerPattern::FMULADDD_OP1:
+ // MUL I=A,B,0
+ // ADD R,I,C
+ // ==> MADD R,A,B,C
+ // --- Create(MADD);
+ if (Pattern == MachineCombinerPattern::FMULADDS_OP1) {
+ Opc = AArch64::FMADDSrrr;
+ RC = &AArch64::FPR32RegClass;
+ } else {
+ Opc = AArch64::FMADDDrrr;
+ RC = &AArch64::FPR64RegClass;
+ }
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
+ break;
+ case MachineCombinerPattern::FMULADDS_OP2:
+ case MachineCombinerPattern::FMULADDD_OP2:
+ // FMUL I=A,B,0
+ // FADD R,C,I
+ // ==> FMADD R,A,B,C
+ // --- Create(FMADD);
+ if (Pattern == MachineCombinerPattern::FMULADDS_OP2) {
+ Opc = AArch64::FMADDSrrr;
+ RC = &AArch64::FPR32RegClass;
+ } else {
+ Opc = AArch64::FMADDDrrr;
+ RC = &AArch64::FPR64RegClass;
+ }
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
+ break;
+
+ case MachineCombinerPattern::FMLAv1i32_indexed_OP1:
+ Opc = AArch64::FMLAv1i32_indexed;
+ RC = &AArch64::FPR32RegClass;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
+ FMAInstKind::Indexed);
+ break;
+ case MachineCombinerPattern::FMLAv1i32_indexed_OP2:
+ Opc = AArch64::FMLAv1i32_indexed;
+ RC = &AArch64::FPR32RegClass;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ break;
+
+ case MachineCombinerPattern::FMLAv1i64_indexed_OP1:
+ Opc = AArch64::FMLAv1i64_indexed;
+ RC = &AArch64::FPR64RegClass;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
+ FMAInstKind::Indexed);
+ break;
+ case MachineCombinerPattern::FMLAv1i64_indexed_OP2:
+ Opc = AArch64::FMLAv1i64_indexed;
+ RC = &AArch64::FPR64RegClass;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ break;
+
+ case MachineCombinerPattern::FMLAv2i32_indexed_OP1:
+ case MachineCombinerPattern::FMLAv2f32_OP1:
+ RC = &AArch64::FPR64RegClass;
+ if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP1) {
+ Opc = AArch64::FMLAv2i32_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLAv2f32;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+ case MachineCombinerPattern::FMLAv2i32_indexed_OP2:
+ case MachineCombinerPattern::FMLAv2f32_OP2:
+ RC = &AArch64::FPR64RegClass;
+ if (Pattern == MachineCombinerPattern::FMLAv2i32_indexed_OP2) {
+ Opc = AArch64::FMLAv2i32_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLAv2f32;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+
+ case MachineCombinerPattern::FMLAv2i64_indexed_OP1:
+ case MachineCombinerPattern::FMLAv2f64_OP1:
+ RC = &AArch64::FPR128RegClass;
+ if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP1) {
+ Opc = AArch64::FMLAv2i64_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLAv2f64;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+ case MachineCombinerPattern::FMLAv2i64_indexed_OP2:
+ case MachineCombinerPattern::FMLAv2f64_OP2:
+ RC = &AArch64::FPR128RegClass;
+ if (Pattern == MachineCombinerPattern::FMLAv2i64_indexed_OP2) {
+ Opc = AArch64::FMLAv2i64_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLAv2f64;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+
+ case MachineCombinerPattern::FMLAv4i32_indexed_OP1:
+ case MachineCombinerPattern::FMLAv4f32_OP1:
+ RC = &AArch64::FPR128RegClass;
+ if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP1) {
+ Opc = AArch64::FMLAv4i32_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLAv4f32;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+
+ case MachineCombinerPattern::FMLAv4i32_indexed_OP2:
+ case MachineCombinerPattern::FMLAv4f32_OP2:
+ RC = &AArch64::FPR128RegClass;
+ if (Pattern == MachineCombinerPattern::FMLAv4i32_indexed_OP2) {
+ Opc = AArch64::FMLAv4i32_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLAv4f32;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+
+ case MachineCombinerPattern::FMULSUBS_OP1:
+ case MachineCombinerPattern::FMULSUBD_OP1: {
+ // FMUL I=A,B,0
+ // FSUB R,I,C
+ // ==> FNMSUB R,A,B,C // = -C + A*B
+ // --- Create(FNMSUB);
+ if (Pattern == MachineCombinerPattern::FMULSUBS_OP1) {
+ Opc = AArch64::FNMSUBSrrr;
+ RC = &AArch64::FPR32RegClass;
+ } else {
+ Opc = AArch64::FNMSUBDrrr;
+ RC = &AArch64::FPR64RegClass;
+ }
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
+ break;
+ }
+ case MachineCombinerPattern::FMULSUBS_OP2:
+ case MachineCombinerPattern::FMULSUBD_OP2: {
+ // FMUL I=A,B,0
+ // FSUB R,C,I
+ // ==> FMSUB R,A,B,C (computes C - A*B)
+ // --- Create(FMSUB);
+ if (Pattern == MachineCombinerPattern::FMULSUBS_OP2) {
+ Opc = AArch64::FMSUBSrrr;
+ RC = &AArch64::FPR32RegClass;
+ } else {
+ Opc = AArch64::FMSUBDrrr;
+ RC = &AArch64::FPR64RegClass;
+ }
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
+ break;
+
+ case MachineCombinerPattern::FMLSv1i32_indexed_OP2:
+ Opc = AArch64::FMLSv1i32_indexed;
+ RC = &AArch64::FPR32RegClass;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ break;
+
+ case MachineCombinerPattern::FMLSv1i64_indexed_OP2:
+ Opc = AArch64::FMLSv1i64_indexed;
+ RC = &AArch64::FPR64RegClass;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ break;
+
+ case MachineCombinerPattern::FMLSv2f32_OP2:
+ case MachineCombinerPattern::FMLSv2i32_indexed_OP2:
+ RC = &AArch64::FPR64RegClass;
+ if (Pattern == MachineCombinerPattern::FMLSv2i32_indexed_OP2) {
+ Opc = AArch64::FMLSv2i32_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLSv2f32;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+
+ case MachineCombinerPattern::FMLSv2f64_OP2:
+ case MachineCombinerPattern::FMLSv2i64_indexed_OP2:
+ RC = &AArch64::FPR128RegClass;
+ if (Pattern == MachineCombinerPattern::FMLSv2i64_indexed_OP2) {
+ Opc = AArch64::FMLSv2i64_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLSv2f64;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+
+ case MachineCombinerPattern::FMLSv4f32_OP2:
+ case MachineCombinerPattern::FMLSv4i32_indexed_OP2:
+ RC = &AArch64::FPR128RegClass;
+ if (Pattern == MachineCombinerPattern::FMLSv4i32_indexed_OP2) {
+ Opc = AArch64::FMLSv4i32_indexed;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Indexed);
+ } else {
+ Opc = AArch64::FMLSv4f32;
+ MUL = genFusedMultiply(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC,
+ FMAInstKind::Accumulator);
+ }
+ break;
+ }
} // end switch (Pattern)
// Record MUL and ADD/SUB for deletion
DelInstrs.push_back(MUL);
@@ -2940,14 +3817,23 @@ void AArch64InstrInfo::genAlternativeCodeSequence(
/// to
/// b.<condition code>
///
+/// Replace compare and branch sequence by TBZ/TBNZ instruction when the
+/// compare's constant operand is power of 2.
+///
+/// Examples:
+/// and w8, w8, #0x400
+/// cbnz w8, L1
+/// to
+/// tbnz w8, #10, L1
+///
/// \param MI Conditional Branch
/// \return True when the simple conditional branch is generated
///
-bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
+bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {
bool IsNegativeBranch = false;
bool IsTestAndBranch = false;
unsigned TargetBBInMI = 0;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
llvm_unreachable("Unknown branch instruction?");
case AArch64::Bcc:
@@ -2976,48 +3862,108 @@ bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
// So we increment a zero register and test for bits other
// than bit 0? Conservatively bail out in case the verifier
// missed this case.
- if (IsTestAndBranch && MI->getOperand(1).getImm())
+ if (IsTestAndBranch && MI.getOperand(1).getImm())
return false;
// Find Definition.
- assert(MI->getParent() && "Incomplete machine instruciton\n");
- MachineBasicBlock *MBB = MI->getParent();
+ assert(MI.getParent() && "Incomplete machine instruciton\n");
+ MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
- unsigned VReg = MI->getOperand(0).getReg();
+ unsigned VReg = MI.getOperand(0).getReg();
if (!TargetRegisterInfo::isVirtualRegister(VReg))
return false;
MachineInstr *DefMI = MRI->getVRegDef(VReg);
- // Look for CSINC
- if (!(DefMI->getOpcode() == AArch64::CSINCWr &&
- DefMI->getOperand(1).getReg() == AArch64::WZR &&
- DefMI->getOperand(2).getReg() == AArch64::WZR) &&
- !(DefMI->getOpcode() == AArch64::CSINCXr &&
- DefMI->getOperand(1).getReg() == AArch64::XZR &&
- DefMI->getOperand(2).getReg() == AArch64::XZR))
- return false;
+ // Look through COPY instructions to find definition.
+ while (DefMI->isCopy()) {
+ unsigned CopyVReg = DefMI->getOperand(1).getReg();
+ if (!MRI->hasOneNonDBGUse(CopyVReg))
+ return false;
+ if (!MRI->hasOneDef(CopyVReg))
+ return false;
+ DefMI = MRI->getVRegDef(CopyVReg);
+ }
- if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
+ switch (DefMI->getOpcode()) {
+ default:
return false;
+ // Fold AND into a TBZ/TBNZ if constant operand is power of 2.
+ case AArch64::ANDWri:
+ case AArch64::ANDXri: {
+ if (IsTestAndBranch)
+ return false;
+ if (DefMI->getParent() != MBB)
+ return false;
+ if (!MRI->hasOneNonDBGUse(VReg))
+ return false;
- AArch64CC::CondCode CC =
- (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
- bool CheckOnlyCCWrites = true;
- // Convert only when the condition code is not modified between
- // the CSINC and the branch. The CC may be used by other
- // instructions in between.
- if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
- return false;
- MachineBasicBlock &RefToMBB = *MBB;
- MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
- DebugLoc DL = MI->getDebugLoc();
- if (IsNegativeBranch)
- CC = AArch64CC::getInvertedCondCode(CC);
- BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
- MI->eraseFromParent();
- return true;
+ bool Is32Bit = (DefMI->getOpcode() == AArch64::ANDWri);
+ uint64_t Mask = AArch64_AM::decodeLogicalImmediate(
+ DefMI->getOperand(2).getImm(), Is32Bit ? 32 : 64);
+ if (!isPowerOf2_64(Mask))
+ return false;
+
+ MachineOperand &MO = DefMI->getOperand(1);
+ unsigned NewReg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(NewReg))
+ return false;
+
+ assert(!MRI->def_empty(NewReg) && "Register must be defined.");
+
+ MachineBasicBlock &RefToMBB = *MBB;
+ MachineBasicBlock *TBB = MI.getOperand(1).getMBB();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Imm = Log2_64(Mask);
+ unsigned Opc = (Imm < 32)
+ ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
+ : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
+ MachineInstr *NewMI = BuildMI(RefToMBB, MI, DL, get(Opc))
+ .addReg(NewReg)
+ .addImm(Imm)
+ .addMBB(TBB);
+ // Register lives on to the CBZ now.
+ MO.setIsKill(false);
+
+ // For immediate smaller than 32, we need to use the 32-bit
+ // variant (W) in all cases. Indeed the 64-bit variant does not
+ // allow to encode them.
+ // Therefore, if the input register is 64-bit, we need to take the
+ // 32-bit sub-part.
+ if (!Is32Bit && Imm < 32)
+ NewMI->getOperand(0).setSubReg(AArch64::sub_32);
+ MI.eraseFromParent();
+ return true;
+ }
+ // Look for CSINC
+ case AArch64::CSINCWr:
+ case AArch64::CSINCXr: {
+ if (!(DefMI->getOperand(1).getReg() == AArch64::WZR &&
+ DefMI->getOperand(2).getReg() == AArch64::WZR) &&
+ !(DefMI->getOperand(1).getReg() == AArch64::XZR &&
+ DefMI->getOperand(2).getReg() == AArch64::XZR))
+ return false;
+
+ if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
+ return false;
+
+ AArch64CC::CondCode CC = (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
+ // Convert only when the condition code is not modified between
+ // the CSINC and the branch. The CC may be used by other
+ // instructions in between.
+ if (areCFlagsAccessedBetweenInstrs(DefMI, MI, &getRegisterInfo(), AK_Write))
+ return false;
+ MachineBasicBlock &RefToMBB = *MBB;
+ MachineBasicBlock *TBB = MI.getOperand(TargetBBInMI).getMBB();
+ DebugLoc DL = MI.getDebugLoc();
+ if (IsNegativeBranch)
+ CC = AArch64CC::getInvertedCondCode(CC);
+ BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
+ MI.eraseFromParent();
+ return true;
+ }
+ }
}
std::pair<unsigned, unsigned>
@@ -3046,7 +3992,6 @@ AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
static const std::pair<unsigned, const char *> TargetFlags[] = {
{MO_GOT, "aarch64-got"},
{MO_NC, "aarch64-nc"},
- {MO_TLS, "aarch64-tls"},
- {MO_CONSTPOOL, "aarch64-constant-pool"}};
+ {MO_TLS, "aarch64-tls"}};
return makeArrayRef(TargetFlags);
}
diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h
index b5bb446f8c16..24bc0e639747 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/lib/Target/AArch64/AArch64InstrInfo.h
@@ -28,12 +28,6 @@ class AArch64Subtarget;
class AArch64TargetMachine;
class AArch64InstrInfo : public AArch64GenInstrInfo {
- // Reserve bits in the MachineMemOperand target hint flags, starting at 1.
- // They will be shifted into MOTargetHintStart when accessed.
- enum TargetMemOperandFlags {
- MOSuppressPair = 1
- };
-
const AArch64RegisterInfo RI;
const AArch64Subtarget &Subtarget;
@@ -45,76 +39,88 @@ public:
/// always be able to get register info as well (through this method).
const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
- unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+ unsigned GetInstSizeInBytes(const MachineInstr &MI) const;
- bool isAsCheapAsAMove(const MachineInstr *MI) const override;
+ bool isAsCheapAsAMove(const MachineInstr &MI) const override;
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
unsigned &DstReg, unsigned &SubIdx) const override;
bool
- areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
+ areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const override;
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// Returns true if there is a shiftable register and that the shift value
/// is non-zero.
- bool hasShiftedReg(const MachineInstr *MI) const;
+ bool hasShiftedReg(const MachineInstr &MI) const;
/// Returns true if there is an extendable register and that the extending
/// value is non-zero.
- bool hasExtendedReg(const MachineInstr *MI) const;
+ bool hasExtendedReg(const MachineInstr &MI) const;
/// \brief Does this instruction set its full destination register to zero?
- bool isGPRZero(const MachineInstr *MI) const;
+ bool isGPRZero(const MachineInstr &MI) const;
/// \brief Does this instruction rename a GPR without modifying bits?
- bool isGPRCopy(const MachineInstr *MI) const;
+ bool isGPRCopy(const MachineInstr &MI) const;
/// \brief Does this instruction rename an FPR without modifying bits?
- bool isFPRCopy(const MachineInstr *MI) const;
+ bool isFPRCopy(const MachineInstr &MI) const;
/// Return true if this is load/store scales or extends its register offset.
/// This refers to scaling a dynamic index as opposed to scaled immediates.
/// MI should be a memory op that allows scaled addressing.
- bool isScaledAddr(const MachineInstr *MI) const;
+ bool isScaledAddr(const MachineInstr &MI) const;
/// Return true if pairing the given load or store is hinted to be
/// unprofitable.
- bool isLdStPairSuppressed(const MachineInstr *MI) const;
+ bool isLdStPairSuppressed(const MachineInstr &MI) const;
+
+ /// Return true if this is an unscaled load/store.
+ bool isUnscaledLdSt(unsigned Opc) const;
+
+ /// Return true if this is an unscaled load/store.
+ bool isUnscaledLdSt(MachineInstr &MI) const;
+
+ /// Return true if this is a load/store that can be potentially paired/merged.
+ bool isCandidateToMergeOrPair(MachineInstr &MI) const;
/// Hint that pairing the given load or store is unprofitable.
- void suppressLdStPair(MachineInstr *MI) const;
+ void suppressLdStPair(MachineInstr &MI) const;
- bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
+ bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
- bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
- int &Offset, int &Width,
+ bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg,
+ int64_t &Offset, unsigned &Width,
const TargetRegisterInfo *TRI) const;
bool enableClusterLoads() const override { return true; }
- bool shouldClusterLoads(MachineInstr *FirstLdSt, MachineInstr *SecondLdSt,
- unsigned NumLoads) const override;
+ bool enableClusterStores() const override { return true; }
+
+ bool shouldClusterMemOps(MachineInstr &FirstLdSt, MachineInstr &SecondLdSt,
+ unsigned NumLoads) const override;
- bool shouldScheduleAdjacent(MachineInstr *First,
- MachineInstr *Second) const override;
+ bool shouldScheduleAdjacent(MachineInstr &First,
+ MachineInstr &Second) const override;
MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
uint64_t Offset, const MDNode *Var,
- const MDNode *Expr, DebugLoc DL) const;
+ const MDNode *Expr,
+ const DebugLoc &DL) const;
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc, unsigned Opcode,
llvm::ArrayRef<unsigned> Indices) const;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -129,40 +135,47 @@ public:
const TargetRegisterInfo *TRI) const override;
using TargetInstrInfo::foldMemoryOperandImpl;
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- int FrameIndex) const override;
+ MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex,
+ LiveIntervals *LIS = nullptr) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify = false) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
unsigned, unsigned, int &, int &, int &) const override;
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- DebugLoc DL, unsigned DstReg, ArrayRef<MachineOperand> Cond,
- unsigned TrueReg, unsigned FalseReg) const override;
+ const DebugLoc &DL, unsigned DstReg,
+ ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+ unsigned FalseReg) const override;
void getNoopForMachoTarget(MCInst &NopInst) const override;
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
/// Return true if the comparison instruction can be analyzed.
- bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
/// optimizeCompareInstr - Convert the instruction supplying the argument to
/// the comparison into one that sets the zero bit in the flags register.
- bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
- bool optimizeCondBranch(MachineInstr *MI) const override;
+ bool optimizeCondBranch(MachineInstr &MI) const override;
+
+ /// Return true when a code sequence can improve throughput. It
+ /// should be called only for instructions in loops.
+ /// \param Pattern - combiner pattern
+ bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
/// Return true when there is potentially a faster code sequence
/// for an instruction chain ending in <Root>. All potential patterns are
/// listed in the <Patterns> array.
@@ -179,10 +192,10 @@ public:
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
- /// useMachineCombiner - AArch64 supports MachineCombiner
+ /// AArch64 supports MachineCombiner.
bool useMachineCombiner() const override;
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const override;
@@ -192,9 +205,11 @@ public:
getSerializableBitmaskMachineOperandTargetFlags() const override;
private:
- void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL,
+ void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
MachineBasicBlock *TBB,
ArrayRef<MachineOperand> Cond) const;
+ bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
+ const MachineRegisterInfo *MRI) const;
};
/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
@@ -202,8 +217,8 @@ private:
/// insertion (PEI) pass, where a virtual scratch register may be allocated
/// if necessary, to be replaced by the scavenger at the end of PEI.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned DestReg, unsigned SrcReg, int Offset,
- const TargetInstrInfo *TII,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
+ int Offset, const TargetInstrInfo *TII,
MachineInstr::MIFlag = MachineInstr::NoFlags,
bool SetNZCV = false);
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index d02bc9ff394d..af9ed812e6da 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -26,6 +26,8 @@ def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
AssemblerPredicate<"FeatureCrypto", "crypto">;
def HasCRC : Predicate<"Subtarget->hasCRC()">,
AssemblerPredicate<"FeatureCRC", "crc">;
+def HasRAS : Predicate<"Subtarget->hasRAS()">,
+ AssemblerPredicate<"FeatureRAS", "ras">;
def HasPerfMon : Predicate<"Subtarget->hasPerfMon()">;
def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
AssemblerPredicate<"FeatureFullFP16", "fullfp16">;
@@ -34,7 +36,8 @@ def HasSPE : Predicate<"Subtarget->hasSPE()">,
def IsLE : Predicate<"Subtarget->isLittleEndian()">;
def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
-def IsCyclone : Predicate<"Subtarget->isCyclone()">;
+def UseAlternateSExtLoadCVTF32
+ : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
//===----------------------------------------------------------------------===//
// AArch64-specific DAG Nodes.
@@ -283,6 +286,9 @@ def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
+def AArch64frecpe : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
+def AArch64frsqrte : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
+
def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
@@ -295,9 +301,6 @@ def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
//===----------------------------------------------------------------------===//
// AArch64 Instruction Predicate Definitions.
-//
-def HasZCZ : Predicate<"Subtarget->hasZeroCycleZeroing()">;
-def NoZCZ : Predicate<"!Subtarget->hasZeroCycleZeroing()">;
def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
def IsNotDarwin: Predicate<"!Subtarget->isTargetDarwin()">;
def ForCodeSize : Predicate<"ForCodeSize">;
@@ -312,10 +315,13 @@ include "AArch64InstrFormats.td"
//===----------------------------------------------------------------------===//
let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
+// We set Sched to empty list because we expect these instructions to simply get
+// removed in most cases.
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
- [(AArch64callseq_start timm:$amt)]>;
+ [(AArch64callseq_start timm:$amt)]>, Sched<[]>;
def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- [(AArch64callseq_end timm:$amt1, timm:$amt2)]>;
+ [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
+ Sched<[]>;
} // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
let isReMaterializable = 1, isCodeGenOnly = 1 in {
@@ -383,6 +389,7 @@ def : InstAlias<"wfe", (HINT 0b010)>;
def : InstAlias<"wfi", (HINT 0b011)>;
def : InstAlias<"sev", (HINT 0b100)>;
def : InstAlias<"sevl", (HINT 0b101)>;
+def : InstAlias<"esb", (HINT 0b10000)>, Requires<[HasRAS]>;
// v8.2a Statistical Profiling extension
def : InstAlias<"psb $op", (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
@@ -528,6 +535,12 @@ def i64imm_32bit : ImmLeaf<i64, [{
return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
}]>;
+def s64imm_32bit : ImmLeaf<i64, [{
+ int64_t Imm64 = static_cast<int64_t>(Imm);
+ return Imm64 >= std::numeric_limits<int32_t>::min() &&
+ Imm64 <= std::numeric_limits<int32_t>::max();
+}]>;
+
def trunc_imm : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
}]>;
@@ -679,10 +692,11 @@ def : InstAlias<"negs $dst, $src$shift",
// Unsigned/Signed divide
defm UDIV : Div<0, "udiv", udiv>;
defm SDIV : Div<1, "sdiv", sdiv>;
-let isCodeGenOnly = 1 in {
-defm UDIV_Int : Div<0, "udiv", int_aarch64_udiv>;
-defm SDIV_Int : Div<1, "sdiv", int_aarch64_sdiv>;
-}
+
+def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr $Rn, $Rm)>;
+def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr $Rn, $Rm)>;
+def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr $Rn, $Rm)>;
+def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr $Rn, $Rm)>;
// Variable shift
defm ASRV : Shift<0b10, "asr", sra>;
@@ -734,6 +748,40 @@ def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
(SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
(UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
+
+def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
+ (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
+def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
+ (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
+def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
+ (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
+ (MOVi32imm (trunc_imm imm:$C)), XZR)>;
+
+def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
+ (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
+def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
+ (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
+def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
+ (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
+ (MOVi32imm (trunc_imm imm:$C)), XZR)>;
+
+def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
+ (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
+def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
+ (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
+def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
+ GPR64:$Ra)),
+ (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
+ (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
+
+def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
+ (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
+def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
+ (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
+def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
+ (s64imm_32bit:$C)))),
+ (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
+ (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
} // AddedComplexity = 5
def : MulAccumWAlias<"mul", MADDWrrr>;
@@ -1089,6 +1137,14 @@ def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
(CSINVWr WZR, WZR, (i32 imm:$cc))>;
def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
(CSINVXr XZR, XZR, (i32 imm:$cc))>;
+def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
+ (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
+def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
+ (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
+def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
+ (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
+def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
+ (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
// The inverse of the condition code from the alias instruction is what is used
// in the aliased instruction. The parser all ready inverts the condition code
@@ -1158,7 +1214,8 @@ def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
// Create a separate pseudo-instruction for codegen to use so that we don't
// flag lr as used in every function. It'll be restored before the RET by the
// epilogue if it's legitimately used.
-def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]> {
+def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
+ Sched<[WriteBrReg]> {
let isTerminator = 1;
let isBarrier = 1;
let isReturn = 1;
@@ -1168,7 +1225,7 @@ def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]> {
// R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
// (which in the usual case is a BLR).
let hasSideEffects = 1 in
-def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []> {
+def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
let AsmString = ".tlsdesccall $sym";
}
@@ -1178,7 +1235,8 @@ let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
isCodeGenOnly = 1 in
def TLSDESC_CALLSEQ
: Pseudo<(outs), (ins i64imm:$sym),
- [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>;
+ [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
+ Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
(TLSDESC_CALLSEQ texternalsym:$sym)>;
@@ -2444,13 +2502,32 @@ defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
-let isCodeGenOnly = 1 in {
-defm FCVTZS_Int : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", int_aarch64_neon_fcvtzs>;
-defm FCVTZU_Int : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", int_aarch64_neon_fcvtzu>;
-defm FCVTZS_Int : FPToIntegerScaled<0b11, 0b000, "fcvtzs", int_aarch64_neon_fcvtzs>;
-defm FCVTZU_Int : FPToIntegerScaled<0b11, 0b001, "fcvtzu", int_aarch64_neon_fcvtzu>;
+
+multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
+ def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
+ def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
+ def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
+ def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
+ def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
+ def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
+
+ def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
+ (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
+ def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
+ (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
+ def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
+ (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
+ def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
+ (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
+ def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
+ (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
+ def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
+ (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
}
+defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
+defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
+
multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
def : Pat<(i32 (to_int (round f32:$Rn))),
(!cast<Instruction>(INST # UWSr) f32:$Rn)>;
@@ -2485,13 +2562,11 @@ defm UCVTF : IntegerToFP<1, "ucvtf", uint_to_fp>;
defm FMOV : UnscaledConversion<"fmov">;
// Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
-let isReMaterializable = 1, isCodeGenOnly = 1 in {
+let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
- PseudoInstExpansion<(FMOVWSr FPR32:$Rd, WZR)>,
- Requires<[NoZCZ]>;
+ Sched<[WriteF]>;
def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
- PseudoInstExpansion<(FMOVXDr FPR64:$Rd, XZR)>,
- Requires<[NoZCZ]>;
+ Sched<[WriteF]>;
}
//===----------------------------------------------------------------------===//
@@ -2617,6 +2692,7 @@ def F128CSEL : Pseudo<(outs FPR128:$Rd),
(i32 imm:$cond), NZCV))]> {
let Uses = [NZCV];
let usesCustomInserter = 1;
+ let hasNoSchedulingInfo = 1;
}
@@ -2742,12 +2818,19 @@ defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
int_aarch64_neon_fcvtxn>;
defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
-let isCodeGenOnly = 1 in {
-defm FCVTZS_Int : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs",
- int_aarch64_neon_fcvtzs>;
-defm FCVTZU_Int : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu",
- int_aarch64_neon_fcvtzu>;
-}
+
+def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
+def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
+def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
+def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
+def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
+
+def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
+def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
+def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
+def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
+def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
+
defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", frnd>;
@@ -3318,6 +3401,19 @@ def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
(FRECPEv1i64 FPR64:$Rn)>;
+def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
+ (FRECPEv1i32 FPR32:$Rn)>;
+def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
+ (FRECPEv2f32 V64:$Rn)>;
+def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
+ (FRECPEv4f32 FPR128:$Rn)>;
+def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
+ (FRECPEv1i64 FPR64:$Rn)>;
+def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
+ (FRECPEv1i64 FPR64:$Rn)>;
+def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
+ (FRECPEv2f64 FPR128:$Rn)>;
+
def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
(FRECPXv1i32 FPR32:$Rn)>;
def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
@@ -3330,6 +3426,19 @@ def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
(FRSQRTEv1i64 FPR64:$Rn)>;
+def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
+ (FRSQRTEv1i32 FPR32:$Rn)>;
+def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
+ (FRSQRTEv2f32 V64:$Rn)>;
+def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
+ (FRSQRTEv4f32 FPR128:$Rn)>;
+def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
+ (FRSQRTEv1i64 FPR64:$Rn)>;
+def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
+ (FRSQRTEv1i64 FPR64:$Rn)>;
+def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
+ (FRSQRTEv2f64 FPR128:$Rn)>;
+
// If an integer is about to be converted to a floating point value,
// just load it on the floating point unit.
// Here are the patterns for 8 and 16-bits to float.
@@ -4319,18 +4428,6 @@ def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
"movi", ".2d",
[(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
-
-// Use movi.2d to materialize 0.0 if the HW does zero-cycle zeroing.
-// Complexity is added to break a tie with a plain MOVI.
-let AddedComplexity = 1 in {
-def : Pat<(f32 fpimm0),
- (f32 (EXTRACT_SUBREG (v2i64 (MOVIv2d_ns (i32 0))), ssub))>,
- Requires<[HasZCZ]>;
-def : Pat<(f64 fpimm0),
- (f64 (EXTRACT_SUBREG (v2i64 (MOVIv2d_ns (i32 0))), dsub))>,
- Requires<[HasZCZ]>;
-}
-
def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
@@ -4845,7 +4942,8 @@ class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
0),
dsub)),
0),
- ssub)))>, Requires<[NotForCodeSize, IsCyclone]>;
+ ssub)))>,
+ Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
(LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
@@ -4898,7 +4996,8 @@ class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
0),
dsub)),
0),
- dsub)))>, Requires<[NotForCodeSize, IsCyclone]>;
+ dsub)))>,
+ Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
(LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
@@ -5982,7 +6081,7 @@ def : NTStore64Pat<v8i8>;
def : Pat<(nontemporalstore GPR64:$Rt,
(am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
(STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
- (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 0, 31), sub_32),
+ (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
GPR64sp:$Rn, simm7s4:$offset)>;
} // AddedComplexity=10
} // Predicates = [IsLE]
@@ -5990,8 +6089,10 @@ def : Pat<(nontemporalstore GPR64:$Rt,
// Tail call return handling. These are all compiler pseudo-instructions,
// so no encoding information or anything like that.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
- def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff),[]>;
- def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>;
+ def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
+ Sched<[WriteBrReg]>;
+ def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
+ Sched<[WriteBrReg]>;
}
def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 43664df3b861..dca13fc49414 100644
--- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -33,9 +33,6 @@ using namespace llvm;
#define DEBUG_TYPE "aarch64-ldst-opt"
-/// AArch64AllocLoadStoreOpt - Post-register allocation pass to combine
-/// load / store instructions to form ldp / stp instructions.
-
STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
STATISTIC(NumPostFolded, "Number of post-index updates folded");
STATISTIC(NumPreFolded, "Number of pre-index updates folded");
@@ -45,9 +42,19 @@ STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
-static cl::opt<unsigned> ScanLimit("aarch64-load-store-scan-limit",
+// The LdStLimit limits how far we search for load/store pairs.
+static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
cl::init(20), cl::Hidden);
+// The UpdateLimit limits how far we search for update instructions when we form
+// pre-/post-index instructions.
+static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
+ cl::Hidden);
+
+static cl::opt<bool> EnableNarrowLdMerge("enable-narrow-ld-merge", cl::Hidden,
+ cl::init(false),
+ cl::desc("Enable narrow load merge"));
+
namespace llvm {
void initializeAArch64LoadStoreOptPass(PassRegistry &);
}
@@ -88,22 +95,29 @@ struct AArch64LoadStoreOpt : public MachineFunctionPass {
const TargetRegisterInfo *TRI;
const AArch64Subtarget *Subtarget;
+ // Track which registers have been modified and used.
+ BitVector ModifiedRegs, UsedRegs;
+
// Scan the instructions looking for a load/store that can be combined
// with the current instruction into a load/store pair.
// Return the matching instruction if one is found, else MBB->end().
MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
LdStPairFlags &Flags,
- unsigned Limit);
+ unsigned Limit,
+ bool FindNarrowMerge);
// Scan the instructions looking for a store that writes to the address from
// which the current load instruction reads. Return true if one is found.
bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
MachineBasicBlock::iterator &StoreI);
+ // Merge the two instructions indicated into a wider instruction.
+ MachineBasicBlock::iterator
+ mergeNarrowInsns(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator MergeMI,
+ const LdStPairFlags &Flags);
+
// Merge the two instructions indicated into a single pair-wise instruction.
- // If MergeForward is true, erase the first instruction and fold its
- // operation into the second. If false, the reverse. Return the instruction
- // following the first instruction (which may change during processing).
MachineBasicBlock::iterator
mergePairedInsns(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
@@ -118,8 +132,8 @@ struct AArch64LoadStoreOpt : public MachineFunctionPass {
// be combined with the current instruction (a load or store) using
// pre or post indexed addressing with writeback. Scan forwards.
MachineBasicBlock::iterator
- findMatchingUpdateInsnForward(MachineBasicBlock::iterator I, unsigned Limit,
- int UnscaledOffset);
+ findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
+ int UnscaledOffset, unsigned Limit);
// Scan the instruction list to find a base register update that can
// be combined with the current instruction (a load or store) using
@@ -129,7 +143,7 @@ struct AArch64LoadStoreOpt : public MachineFunctionPass {
// Find an instruction that updates the base register of the ld/st
// instruction.
- bool isMatchingUpdateInsn(MachineInstr *MemMI, MachineInstr *MI,
+ bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
unsigned BaseReg, int Offset);
// Merge a pre- or post-index base register update into a ld/st instruction.
@@ -140,17 +154,21 @@ struct AArch64LoadStoreOpt : public MachineFunctionPass {
// Find and merge foldable ldr/str instructions.
bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
+ // Find and pair ldr/str instructions.
+ bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
+
// Find and promote load instructions which read directly from store.
bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
- // Check if converting two narrow loads into a single wider load with
- // bitfield extracts could be enabled.
- bool enableNarrowLdMerge(MachineFunction &Fn);
-
bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return AARCH64_LOAD_STORE_OPT_NAME;
}
@@ -161,37 +179,8 @@ char AArch64LoadStoreOpt::ID = 0;
INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
AARCH64_LOAD_STORE_OPT_NAME, false, false)
-static bool isUnscaledLdSt(unsigned Opc) {
- switch (Opc) {
- default:
- return false;
- case AArch64::STURSi:
- case AArch64::STURDi:
- case AArch64::STURQi:
- case AArch64::STURBBi:
- case AArch64::STURHHi:
- case AArch64::STURWi:
- case AArch64::STURXi:
- case AArch64::LDURSi:
- case AArch64::LDURDi:
- case AArch64::LDURQi:
- case AArch64::LDURWi:
- case AArch64::LDURXi:
- case AArch64::LDURSWi:
- case AArch64::LDURHHi:
- case AArch64::LDURBBi:
- case AArch64::LDURSBWi:
- case AArch64::LDURSHWi:
- return true;
- }
-}
-
-static bool isUnscaledLdSt(MachineInstr *MI) {
- return isUnscaledLdSt(MI->getOpcode());
-}
-
-static unsigned getBitExtrOpcode(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static unsigned getBitExtrOpcode(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
default:
llvm_unreachable("Unexpected opcode.");
case AArch64::LDRBBui:
@@ -219,10 +208,6 @@ static bool isNarrowStore(unsigned Opc) {
}
}
-static bool isNarrowStore(MachineInstr *MI) {
- return isNarrowStore(MI->getOpcode());
-}
-
static bool isNarrowLoad(unsigned Opc) {
switch (Opc) {
default:
@@ -239,13 +224,17 @@ static bool isNarrowLoad(unsigned Opc) {
}
}
-static bool isNarrowLoad(MachineInstr *MI) {
- return isNarrowLoad(MI->getOpcode());
+static bool isNarrowLoad(MachineInstr &MI) {
+ return isNarrowLoad(MI.getOpcode());
+}
+
+static bool isNarrowLoadOrStore(unsigned Opc) {
+ return isNarrowLoad(Opc) || isNarrowStore(Opc);
}
// Scaling factor for unscaled load or store.
-static int getMemScale(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static int getMemScale(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
default:
llvm_unreachable("Opcode has unknown scale!");
case AArch64::LDRBBui:
@@ -354,6 +343,37 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
}
}
+static unsigned getMatchingWideOpcode(unsigned Opc) {
+ switch (Opc) {
+ default:
+ llvm_unreachable("Opcode has no wide equivalent!");
+ case AArch64::STRBBui:
+ return AArch64::STRHHui;
+ case AArch64::STRHHui:
+ return AArch64::STRWui;
+ case AArch64::STURBBi:
+ return AArch64::STURHHi;
+ case AArch64::STURHHi:
+ return AArch64::STURWi;
+ case AArch64::STURWi:
+ return AArch64::STURXi;
+ case AArch64::STRWui:
+ return AArch64::STRXui;
+ case AArch64::LDRHHui:
+ case AArch64::LDRSHWui:
+ return AArch64::LDRWui;
+ case AArch64::LDURHHi:
+ case AArch64::LDURSHWi:
+ return AArch64::LDURWi;
+ case AArch64::LDRBBui:
+ case AArch64::LDRSBWui:
+ return AArch64::LDRHHui;
+ case AArch64::LDURBBi:
+ case AArch64::LDURSBWi:
+ return AArch64::LDURHHi;
+ }
+}
+
static unsigned getMatchingPairOpcode(unsigned Opc) {
switch (Opc) {
default:
@@ -367,14 +387,6 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
case AArch64::STRQui:
case AArch64::STURQi:
return AArch64::STPQi;
- case AArch64::STRBBui:
- return AArch64::STRHHui;
- case AArch64::STRHHui:
- return AArch64::STRWui;
- case AArch64::STURBBi:
- return AArch64::STURHHi;
- case AArch64::STURHHi:
- return AArch64::STURWi;
case AArch64::STRWui:
case AArch64::STURWi:
return AArch64::STPWi;
@@ -399,25 +411,13 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
case AArch64::LDRSWui:
case AArch64::LDURSWi:
return AArch64::LDPSWi;
- case AArch64::LDRHHui:
- case AArch64::LDRSHWui:
- return AArch64::LDRWui;
- case AArch64::LDURHHi:
- case AArch64::LDURSHWi:
- return AArch64::LDURWi;
- case AArch64::LDRBBui:
- case AArch64::LDRSBWui:
- return AArch64::LDRHHui;
- case AArch64::LDURBBi:
- case AArch64::LDURSBWi:
- return AArch64::LDURHHi;
}
}
-static unsigned isMatchingStore(MachineInstr *LoadInst,
- MachineInstr *StoreInst) {
- unsigned LdOpc = LoadInst->getOpcode();
- unsigned StOpc = StoreInst->getOpcode();
+static unsigned isMatchingStore(MachineInstr &LoadInst,
+ MachineInstr &StoreInst) {
+ unsigned LdOpc = LoadInst.getOpcode();
+ unsigned StOpc = StoreInst.getOpcode();
switch (LdOpc) {
default:
llvm_unreachable("Unsupported load instruction!");
@@ -562,8 +562,8 @@ static unsigned getPostIndexedOpcode(unsigned Opc) {
}
}
-static bool isPairedLdSt(const MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static bool isPairedLdSt(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
default:
return false;
case AArch64::LDPSi:
@@ -581,41 +581,55 @@ static bool isPairedLdSt(const MachineInstr *MI) {
}
}
-static const MachineOperand &getLdStRegOp(const MachineInstr *MI,
+static const MachineOperand &getLdStRegOp(const MachineInstr &MI,
unsigned PairedRegOp = 0) {
assert(PairedRegOp < 2 && "Unexpected register operand idx.");
unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
- return MI->getOperand(Idx);
+ return MI.getOperand(Idx);
}
-static const MachineOperand &getLdStBaseOp(const MachineInstr *MI) {
+static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) {
unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
- return MI->getOperand(Idx);
+ return MI.getOperand(Idx);
}
-static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
+static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) {
unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
- return MI->getOperand(Idx);
+ return MI.getOperand(Idx);
}
-static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
- MachineInstr *StoreInst) {
+static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
+ MachineInstr &StoreInst,
+ const AArch64InstrInfo *TII) {
assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
int LoadSize = getMemScale(LoadInst);
int StoreSize = getMemScale(StoreInst);
- int UnscaledStOffset = isUnscaledLdSt(StoreInst)
+ int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
? getLdStOffsetOp(StoreInst).getImm()
: getLdStOffsetOp(StoreInst).getImm() * StoreSize;
- int UnscaledLdOffset = isUnscaledLdSt(LoadInst)
+ int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
? getLdStOffsetOp(LoadInst).getImm()
: getLdStOffsetOp(LoadInst).getImm() * LoadSize;
return (UnscaledStOffset <= UnscaledLdOffset) &&
(UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
}
+static bool isPromotableZeroStoreOpcode(unsigned Opc) {
+ return isNarrowStore(Opc) || Opc == AArch64::STRWui || Opc == AArch64::STURWi;
+}
+
+static bool isPromotableZeroStoreOpcode(MachineInstr &MI) {
+ return isPromotableZeroStoreOpcode(MI.getOpcode());
+}
+
+static bool isPromotableZeroStoreInst(MachineInstr &MI) {
+ return (isPromotableZeroStoreOpcode(MI)) &&
+ getLdStRegOp(MI).getReg() == AArch64::WZR;
+}
+
MachineBasicBlock::iterator
-AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
- MachineBasicBlock::iterator Paired,
+AArch64LoadStoreOpt::mergeNarrowInsns(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator MergeMI,
const LdStPairFlags &Flags) {
MachineBasicBlock::iterator NextI = I;
++NextI;
@@ -623,128 +637,124 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
// to skip one further. Either way we merge will invalidate the iterator,
// and we don't need to scan the new instruction, as it's a pairwise
// instruction, which we're not considering for further action anyway.
- if (NextI == Paired)
+ if (NextI == MergeMI)
++NextI;
- int SExtIdx = Flags.getSExtIdx();
- unsigned Opc =
- SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
- bool IsUnscaled = isUnscaledLdSt(Opc);
- int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
+ unsigned Opc = I->getOpcode();
+ bool IsScaled = !TII->isUnscaledLdSt(Opc);
+ int OffsetStride = IsScaled ? 1 : getMemScale(*I);
bool MergeForward = Flags.getMergeForward();
- unsigned NewOpc = getMatchingPairOpcode(Opc);
// Insert our new paired instruction after whichever of the paired
// instructions MergeForward indicates.
- MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
+ MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
// Also based on MergeForward is from where we copy the base register operand
// so we get the flags compatible with the input code.
const MachineOperand &BaseRegOp =
- MergeForward ? getLdStBaseOp(Paired) : getLdStBaseOp(I);
+ MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I);
// Which register is Rt and which is Rt2 depends on the offset order.
MachineInstr *RtMI, *Rt2MI;
- if (getLdStOffsetOp(I).getImm() ==
- getLdStOffsetOp(Paired).getImm() + OffsetStride) {
- RtMI = Paired;
- Rt2MI = I;
- // Here we swapped the assumption made for SExtIdx.
- // I.e., we turn ldp I, Paired into ldp Paired, I.
- // Update the index accordingly.
- if (SExtIdx != -1)
- SExtIdx = (SExtIdx + 1) % 2;
+ if (getLdStOffsetOp(*I).getImm() ==
+ getLdStOffsetOp(*MergeMI).getImm() + OffsetStride) {
+ RtMI = &*MergeMI;
+ Rt2MI = &*I;
} else {
- RtMI = I;
- Rt2MI = Paired;
+ RtMI = &*I;
+ Rt2MI = &*MergeMI;
}
- int OffsetImm = getLdStOffsetOp(RtMI).getImm();
+ int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
+ // Change the scaled offset from small to large type.
+ if (IsScaled) {
+ assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
+ OffsetImm /= 2;
+ }
+ DebugLoc DL = I->getDebugLoc();
+ MachineBasicBlock *MBB = I->getParent();
if (isNarrowLoad(Opc)) {
- // Change the scaled offset from small to large type.
- if (!IsUnscaled) {
- assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
- OffsetImm /= 2;
- }
- MachineInstr *RtNewDest = MergeForward ? I : Paired;
+ MachineInstr *RtNewDest = &*(MergeForward ? I : MergeMI);
// When merging small (< 32 bit) loads for big-endian targets, the order of
// the component parts gets swapped.
if (!Subtarget->isLittleEndian())
std::swap(RtMI, Rt2MI);
// Construct the new load instruction.
MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
- NewMemMI = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(NewOpc))
- .addOperand(getLdStRegOp(RtNewDest))
- .addOperand(BaseRegOp)
- .addImm(OffsetImm)
- .setMemRefs(I->mergeMemRefsWith(*Paired));
+ NewMemMI =
+ BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
+ .addOperand(getLdStRegOp(*RtNewDest))
+ .addOperand(BaseRegOp)
+ .addImm(OffsetImm)
+ .setMemRefs(I->mergeMemRefsWith(*MergeMI));
+ (void)NewMemMI;
DEBUG(
dbgs()
<< "Creating the new load and extract. Replacing instructions:\n ");
DEBUG(I->print(dbgs()));
DEBUG(dbgs() << " ");
- DEBUG(Paired->print(dbgs()));
+ DEBUG(MergeMI->print(dbgs()));
DEBUG(dbgs() << " with instructions:\n ");
DEBUG((NewMemMI)->print(dbgs()));
- int Width = getMemScale(I) == 1 ? 8 : 16;
+ int Width = getMemScale(*I) == 1 ? 8 : 16;
int LSBLow = 0;
int LSBHigh = Width;
int ImmsLow = LSBLow + Width - 1;
int ImmsHigh = LSBHigh + Width - 1;
- MachineInstr *ExtDestMI = MergeForward ? Paired : I;
+ MachineInstr *ExtDestMI = &*(MergeForward ? MergeMI : I);
if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
// Create the bitfield extract for high bits.
- BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(getBitExtrOpcode(Rt2MI)))
- .addOperand(getLdStRegOp(Rt2MI))
- .addReg(getLdStRegOp(RtNewDest).getReg())
- .addImm(LSBHigh)
- .addImm(ImmsHigh);
+ BitExtMI1 =
+ BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*Rt2MI)))
+ .addOperand(getLdStRegOp(*Rt2MI))
+ .addReg(getLdStRegOp(*RtNewDest).getReg())
+ .addImm(LSBHigh)
+ .addImm(ImmsHigh);
// Create the bitfield extract for low bits.
if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
// For unsigned, prefer to use AND for low bits.
- BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(AArch64::ANDWri))
- .addOperand(getLdStRegOp(RtMI))
- .addReg(getLdStRegOp(RtNewDest).getReg())
+ BitExtMI2 = BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::ANDWri))
+ .addOperand(getLdStRegOp(*RtMI))
+ .addReg(getLdStRegOp(*RtNewDest).getReg())
.addImm(ImmsLow);
} else {
- BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(getBitExtrOpcode(RtMI)))
- .addOperand(getLdStRegOp(RtMI))
- .addReg(getLdStRegOp(RtNewDest).getReg())
- .addImm(LSBLow)
- .addImm(ImmsLow);
+ BitExtMI2 =
+ BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*RtMI)))
+ .addOperand(getLdStRegOp(*RtMI))
+ .addReg(getLdStRegOp(*RtNewDest).getReg())
+ .addImm(LSBLow)
+ .addImm(ImmsLow);
}
} else {
// Create the bitfield extract for low bits.
if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
// For unsigned, prefer to use AND for low bits.
- BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(AArch64::ANDWri))
- .addOperand(getLdStRegOp(RtMI))
- .addReg(getLdStRegOp(RtNewDest).getReg())
+ BitExtMI1 = BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::ANDWri))
+ .addOperand(getLdStRegOp(*RtMI))
+ .addReg(getLdStRegOp(*RtNewDest).getReg())
.addImm(ImmsLow);
} else {
- BitExtMI1 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(getBitExtrOpcode(RtMI)))
- .addOperand(getLdStRegOp(RtMI))
- .addReg(getLdStRegOp(RtNewDest).getReg())
- .addImm(LSBLow)
- .addImm(ImmsLow);
+ BitExtMI1 =
+ BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*RtMI)))
+ .addOperand(getLdStRegOp(*RtMI))
+ .addReg(getLdStRegOp(*RtNewDest).getReg())
+ .addImm(LSBLow)
+ .addImm(ImmsLow);
}
// Create the bitfield extract for high bits.
- BitExtMI2 = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(getBitExtrOpcode(Rt2MI)))
- .addOperand(getLdStRegOp(Rt2MI))
- .addReg(getLdStRegOp(RtNewDest).getReg())
- .addImm(LSBHigh)
- .addImm(ImmsHigh);
+ BitExtMI2 =
+ BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*Rt2MI)))
+ .addOperand(getLdStRegOp(*Rt2MI))
+ .addReg(getLdStRegOp(*RtNewDest).getReg())
+ .addImm(LSBHigh)
+ .addImm(ImmsHigh);
}
+ (void)BitExtMI1;
+ (void)BitExtMI2;
+
DEBUG(dbgs() << " ");
DEBUG((BitExtMI1)->print(dbgs()));
DEBUG(dbgs() << " ");
@@ -753,47 +763,122 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
// Erase the old instructions.
I->eraseFromParent();
- Paired->eraseFromParent();
+ MergeMI->eraseFromParent();
return NextI;
}
+ assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
+ "Expected promotable zero store");
// Construct the new instruction.
MachineInstrBuilder MIB;
- if (isNarrowStore(Opc)) {
- // Change the scaled offset from small to large type.
- if (!IsUnscaled) {
- assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
- OffsetImm /= 2;
+ MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
+ .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
+ .addOperand(BaseRegOp)
+ .addImm(OffsetImm)
+ .setMemRefs(I->mergeMemRefsWith(*MergeMI));
+ (void)MIB;
+
+ DEBUG(dbgs() << "Creating wider load/store. Replacing instructions:\n ");
+ DEBUG(I->print(dbgs()));
+ DEBUG(dbgs() << " ");
+ DEBUG(MergeMI->print(dbgs()));
+ DEBUG(dbgs() << " with instruction:\n ");
+ DEBUG(((MachineInstr *)MIB)->print(dbgs()));
+ DEBUG(dbgs() << "\n");
+
+ // Erase the old instructions.
+ I->eraseFromParent();
+ MergeMI->eraseFromParent();
+ return NextI;
+}
+
+MachineBasicBlock::iterator
+AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator Paired,
+ const LdStPairFlags &Flags) {
+ MachineBasicBlock::iterator NextI = I;
+ ++NextI;
+ // If NextI is the second of the two instructions to be merged, we need
+ // to skip one further. Either way we merge will invalidate the iterator,
+ // and we don't need to scan the new instruction, as it's a pairwise
+ // instruction, which we're not considering for further action anyway.
+ if (NextI == Paired)
+ ++NextI;
+
+ int SExtIdx = Flags.getSExtIdx();
+ unsigned Opc =
+ SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
+ bool IsUnscaled = TII->isUnscaledLdSt(Opc);
+ int OffsetStride = IsUnscaled ? getMemScale(*I) : 1;
+
+ bool MergeForward = Flags.getMergeForward();
+ // Insert our new paired instruction after whichever of the paired
+ // instructions MergeForward indicates.
+ MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
+ // Also based on MergeForward is from where we copy the base register operand
+ // so we get the flags compatible with the input code.
+ const MachineOperand &BaseRegOp =
+ MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I);
+
+ int Offset = getLdStOffsetOp(*I).getImm();
+ int PairedOffset = getLdStOffsetOp(*Paired).getImm();
+ bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
+ if (IsUnscaled != PairedIsUnscaled) {
+ // We're trying to pair instructions that differ in how they are scaled. If
+ // I is scaled then scale the offset of Paired accordingly. Otherwise, do
+ // the opposite (i.e., make Paired's offset unscaled).
+ int MemSize = getMemScale(*Paired);
+ if (PairedIsUnscaled) {
+ // If the unscaled offset isn't a multiple of the MemSize, we can't
+ // pair the operations together.
+ assert(!(PairedOffset % getMemScale(*Paired)) &&
+ "Offset should be a multiple of the stride!");
+ PairedOffset /= MemSize;
+ } else {
+ PairedOffset *= MemSize;
}
- MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(NewOpc))
- .addOperand(getLdStRegOp(I))
- .addOperand(BaseRegOp)
- .addImm(OffsetImm)
- .setMemRefs(I->mergeMemRefsWith(*Paired));
+ }
+
+ // Which register is Rt and which is Rt2 depends on the offset order.
+ MachineInstr *RtMI, *Rt2MI;
+ if (Offset == PairedOffset + OffsetStride) {
+ RtMI = &*Paired;
+ Rt2MI = &*I;
+ // Here we swapped the assumption made for SExtIdx.
+ // I.e., we turn ldp I, Paired into ldp Paired, I.
+ // Update the index accordingly.
+ if (SExtIdx != -1)
+ SExtIdx = (SExtIdx + 1) % 2;
} else {
- // Handle Unscaled
- if (IsUnscaled)
- OffsetImm /= OffsetStride;
- MIB = BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(NewOpc))
- .addOperand(getLdStRegOp(RtMI))
- .addOperand(getLdStRegOp(Rt2MI))
- .addOperand(BaseRegOp)
- .addImm(OffsetImm);
+ RtMI = &*I;
+ Rt2MI = &*Paired;
+ }
+ int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
+ // Scale the immediate offset, if necessary.
+ if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
+ assert(!(OffsetImm % getMemScale(*RtMI)) &&
+ "Unscaled offset cannot be scaled.");
+ OffsetImm /= getMemScale(*RtMI);
}
- (void)MIB;
+ // Construct the new instruction.
+ MachineInstrBuilder MIB;
+ DebugLoc DL = I->getDebugLoc();
+ MachineBasicBlock *MBB = I->getParent();
+ MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc)))
+ .addOperand(getLdStRegOp(*RtMI))
+ .addOperand(getLdStRegOp(*Rt2MI))
+ .addOperand(BaseRegOp)
+ .addImm(OffsetImm)
+ .setMemRefs(I->mergeMemRefsWith(*Paired));
- // FIXME: Do we need/want to copy the mem operands from the source
- // instructions? Probably. What uses them after this?
+ (void)MIB;
DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
DEBUG(I->print(dbgs()));
DEBUG(dbgs() << " ");
DEBUG(Paired->print(dbgs()));
DEBUG(dbgs() << " with instruction:\n ");
-
if (SExtIdx != -1) {
// Generate the sign extension for the proper result of the ldp.
// I.e., with X1, that would be:
@@ -814,26 +899,23 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
// Insert this definition right after the generated LDP, i.e., before
// InsertionPoint.
MachineInstrBuilder MIBKill =
- BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(TargetOpcode::KILL), DstRegW)
+ BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
.addReg(DstRegW)
.addReg(DstRegX, RegState::Define);
MIBKill->getOperand(2).setImplicit();
// Create the sign extension.
MachineInstrBuilder MIBSXTW =
- BuildMI(*I->getParent(), InsertionPoint, I->getDebugLoc(),
- TII->get(AArch64::SBFMXri), DstRegX)
+ BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
.addReg(DstRegX)
.addImm(0)
.addImm(31);
(void)MIBSXTW;
DEBUG(dbgs() << " Extend operand:\n ");
DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
- DEBUG(dbgs() << "\n");
} else {
DEBUG(((MachineInstr *)MIB)->print(dbgs()));
- DEBUG(dbgs() << "\n");
}
+ DEBUG(dbgs() << "\n");
// Erase the old instructions.
I->eraseFromParent();
@@ -848,10 +930,10 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
MachineBasicBlock::iterator NextI = LoadI;
++NextI;
- int LoadSize = getMemScale(LoadI);
- int StoreSize = getMemScale(StoreI);
- unsigned LdRt = getLdStRegOp(LoadI).getReg();
- unsigned StRt = getLdStRegOp(StoreI).getReg();
+ int LoadSize = getMemScale(*LoadI);
+ int StoreSize = getMemScale(*StoreI);
+ unsigned LdRt = getLdStRegOp(*LoadI).getReg();
+ unsigned StRt = getLdStRegOp(*StoreI).getReg();
bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
assert((IsStoreXReg ||
@@ -881,15 +963,16 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
// performance and correctness are verified only in little-endian.
if (!Subtarget->isLittleEndian())
return NextI;
- bool IsUnscaled = isUnscaledLdSt(LoadI);
- assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match");
+ bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
+ assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
+ "Unsupported ld/st match");
assert(LoadSize <= StoreSize && "Invalid load size");
int UnscaledLdOffset = IsUnscaled
- ? getLdStOffsetOp(LoadI).getImm()
- : getLdStOffsetOp(LoadI).getImm() * LoadSize;
+ ? getLdStOffsetOp(*LoadI).getImm()
+ : getLdStOffsetOp(*LoadI).getImm() * LoadSize;
int UnscaledStOffset = IsUnscaled
- ? getLdStOffsetOp(StoreI).getImm()
- : getLdStOffsetOp(StoreI).getImm() * StoreSize;
+ ? getLdStOffsetOp(*StoreI).getImm()
+ : getLdStOffsetOp(*StoreI).getImm() * StoreSize;
int Width = LoadSize * 8;
int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
int Imms = Immr + Width - 1;
@@ -926,6 +1009,7 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
.addImm(Imms);
}
}
+ (void)BitExtMI;
DEBUG(dbgs() << "Promoting load by replacing :\n ");
DEBUG(StoreI->print(dbgs()));
@@ -944,16 +1028,18 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
/// trackRegDefsUses - Remember what registers the specified instruction uses
/// and modifies.
-static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
+static void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs,
BitVector &UsedRegs,
const TargetRegisterInfo *TRI) {
- for (const MachineOperand &MO : MI->operands()) {
+ for (const MachineOperand &MO : MI.operands()) {
if (MO.isRegMask())
ModifiedRegs.setBitsNotInMask(MO.getRegMask());
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
if (MO.isDef()) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
ModifiedRegs.set(*AI);
@@ -968,38 +1054,42 @@ static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
// Convert the byte-offset used by unscaled into an "element" offset used
// by the scaled pair load/store instructions.
- if (IsUnscaled)
+ if (IsUnscaled) {
+ // If the byte-offset isn't a multiple of the stride, there's no point
+ // trying to match it.
+ if (Offset % OffsetStride)
+ return false;
Offset /= OffsetStride;
-
+ }
return Offset <= 63 && Offset >= -64;
}
// Do alignment, specialized to power of 2 and for signed ints,
// avoiding having to do a C-style cast from uint_64t to int when
-// using RoundUpToAlignment from include/llvm/Support/MathExtras.h.
+// using alignTo from include/llvm/Support/MathExtras.h.
// FIXME: Move this function to include/MathExtras.h?
static int alignTo(int Num, int PowOf2) {
return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
}
-static bool mayAlias(MachineInstr *MIa, MachineInstr *MIb,
+static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
const AArch64InstrInfo *TII) {
// One of the instructions must modify memory.
- if (!MIa->mayStore() && !MIb->mayStore())
+ if (!MIa.mayStore() && !MIb.mayStore())
return false;
// Both instructions must be memory operations.
- if (!MIa->mayLoadOrStore() && !MIb->mayLoadOrStore())
+ if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
return false;
return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
}
-static bool mayAlias(MachineInstr *MIa,
+static bool mayAlias(MachineInstr &MIa,
SmallVectorImpl<MachineInstr *> &MemInsns,
const AArch64InstrInfo *TII) {
- for (auto &MIb : MemInsns)
- if (mayAlias(MIa, MIb, TII))
+ for (MachineInstr *MIb : MemInsns)
+ if (mayAlias(MIa, *MIb, TII))
return true;
return false;
@@ -1008,40 +1098,43 @@ static bool mayAlias(MachineInstr *MIa,
bool AArch64LoadStoreOpt::findMatchingStore(
MachineBasicBlock::iterator I, unsigned Limit,
MachineBasicBlock::iterator &StoreI) {
- MachineBasicBlock::iterator E = I->getParent()->begin();
+ MachineBasicBlock::iterator B = I->getParent()->begin();
MachineBasicBlock::iterator MBBI = I;
- MachineInstr *FirstMI = I;
- unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
+ MachineInstr &LoadMI = *I;
+ unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
+
+ // If the load is the first instruction in the block, there's obviously
+ // not any matching store.
+ if (MBBI == B)
+ return false;
// Track which registers have been modified and used between the first insn
// and the second insn.
- BitVector ModifiedRegs, UsedRegs;
- ModifiedRegs.resize(TRI->getNumRegs());
- UsedRegs.resize(TRI->getNumRegs());
+ ModifiedRegs.reset();
+ UsedRegs.reset();
- for (unsigned Count = 0; MBBI != E && Count < Limit;) {
+ unsigned Count = 0;
+ do {
--MBBI;
- MachineInstr *MI = MBBI;
- // Skip DBG_VALUE instructions. Otherwise debug info can affect the
- // optimization by changing how far we scan.
- if (MI->isDebugValue())
- continue;
- // Now that we know this is a real instruction, count it.
- ++Count;
+ MachineInstr &MI = *MBBI;
+
+ // Don't count DBG_VALUE instructions towards the search limit.
+ if (!MI.isDebugValue())
+ ++Count;
// If the load instruction reads directly from the address to which the
// store instruction writes and the stored value is not modified, we can
// promote the load. Since we do not handle stores with pre-/post-index,
// it's unnecessary to check if BaseReg is modified by the store itself.
- if (MI->mayStore() && isMatchingStore(FirstMI, MI) &&
+ if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
BaseReg == getLdStBaseOp(MI).getReg() &&
- isLdOffsetInRangeOfSt(FirstMI, MI) &&
+ isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
!ModifiedRegs[getLdStRegOp(MI).getReg()]) {
StoreI = MBBI;
return true;
}
- if (MI->isCall())
+ if (MI.isCall())
return false;
// Update modified / uses register lists.
@@ -1053,139 +1146,165 @@ bool AArch64LoadStoreOpt::findMatchingStore(
return false;
// If we encounter a store aliased with the load, return early.
- if (MI->mayStore() && mayAlias(FirstMI, MI, TII))
+ if (MI.mayStore() && mayAlias(LoadMI, MI, TII))
return false;
- }
+ } while (MBBI != B && Count < Limit);
return false;
}
-/// findMatchingInsn - Scan the instructions looking for a load/store that can
-/// be combined with the current instruction into a load/store pair.
+// Returns true if FirstMI and MI are candidates for merging or pairing.
+// Otherwise, returns false.
+static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
+ LdStPairFlags &Flags,
+ const AArch64InstrInfo *TII) {
+ // If this is volatile or if pairing is suppressed, not a candidate.
+ if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
+ return false;
+
+ // We should have already checked FirstMI for pair suppression and volatility.
+ assert(!FirstMI.hasOrderedMemoryRef() &&
+ !TII->isLdStPairSuppressed(FirstMI) &&
+ "FirstMI shouldn't get here if either of these checks are true.");
+
+ unsigned OpcA = FirstMI.getOpcode();
+ unsigned OpcB = MI.getOpcode();
+
+ // Opcodes match: nothing more to check.
+ if (OpcA == OpcB)
+ return true;
+
+ // Try to match a sign-extended load/store with a zero-extended load/store.
+ bool IsValidLdStrOpc, PairIsValidLdStrOpc;
+ unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
+ assert(IsValidLdStrOpc &&
+ "Given Opc should be a Load or Store with an immediate");
+ // OpcA will be the first instruction in the pair.
+ if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
+ Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
+ return true;
+ }
+
+ // If the second instruction isn't even a load/store, bail out.
+ if (!PairIsValidLdStrOpc)
+ return false;
+
+ // FIXME: We don't support merging narrow loads/stores with mixed
+ // scaled/unscaled offsets.
+ if (isNarrowLoadOrStore(OpcA) || isNarrowLoadOrStore(OpcB))
+ return false;
+
+ // Try to match an unscaled load/store with a scaled load/store.
+ return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
+ getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
+
+ // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
+}
+
+/// Scan the instructions looking for a load/store that can be combined with the
+/// current instruction into a wider equivalent or a load/store pair.
MachineBasicBlock::iterator
AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
- LdStPairFlags &Flags, unsigned Limit) {
+ LdStPairFlags &Flags, unsigned Limit,
+ bool FindNarrowMerge) {
MachineBasicBlock::iterator E = I->getParent()->end();
MachineBasicBlock::iterator MBBI = I;
- MachineInstr *FirstMI = I;
+ MachineInstr &FirstMI = *I;
++MBBI;
- unsigned Opc = FirstMI->getOpcode();
- bool MayLoad = FirstMI->mayLoad();
- bool IsUnscaled = isUnscaledLdSt(FirstMI);
+ bool MayLoad = FirstMI.mayLoad();
+ bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
unsigned Reg = getLdStRegOp(FirstMI).getReg();
unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
int Offset = getLdStOffsetOp(FirstMI).getImm();
- bool IsNarrowStore = isNarrowStore(Opc);
-
- // For narrow stores, find only the case where the stored value is WZR.
- if (IsNarrowStore && Reg != AArch64::WZR)
- return E;
-
- // Early exit if the first instruction modifies the base register.
- // e.g., ldr x0, [x0]
- if (FirstMI->modifiesRegister(BaseReg, TRI))
- return E;
-
- // Early exit if the offset if not possible to match. (6 bits of positive
- // range, plus allow an extra one in case we find a later insn that matches
- // with Offset-1)
int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
- if (!(isNarrowLoad(Opc) || IsNarrowStore) &&
- !inBoundsForPair(IsUnscaled, Offset, OffsetStride))
- return E;
+ bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
// Track which registers have been modified and used between the first insn
// (inclusive) and the second insn.
- BitVector ModifiedRegs, UsedRegs;
- ModifiedRegs.resize(TRI->getNumRegs());
- UsedRegs.resize(TRI->getNumRegs());
+ ModifiedRegs.reset();
+ UsedRegs.reset();
// Remember any instructions that read/write memory between FirstMI and MI.
SmallVector<MachineInstr *, 4> MemInsns;
for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
- MachineInstr *MI = MBBI;
+ MachineInstr &MI = *MBBI;
// Skip DBG_VALUE instructions. Otherwise debug info can affect the
// optimization by changing how far we scan.
- if (MI->isDebugValue())
+ if (MI.isDebugValue())
continue;
// Now that we know this is a real instruction, count it.
++Count;
- bool CanMergeOpc = Opc == MI->getOpcode();
Flags.setSExtIdx(-1);
- if (!CanMergeOpc) {
- bool IsValidLdStrOpc;
- unsigned NonSExtOpc = getMatchingNonSExtOpcode(Opc, &IsValidLdStrOpc);
- assert(IsValidLdStrOpc &&
- "Given Opc should be a Load or Store with an immediate");
- // Opc will be the first instruction in the pair.
- Flags.setSExtIdx(NonSExtOpc == (unsigned)Opc ? 1 : 0);
- CanMergeOpc = NonSExtOpc == getMatchingNonSExtOpcode(MI->getOpcode());
- }
-
- if (CanMergeOpc && getLdStOffsetOp(MI).isImm()) {
- assert(MI->mayLoadOrStore() && "Expected memory operation.");
+ if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
+ getLdStOffsetOp(MI).isImm()) {
+ assert(MI.mayLoadOrStore() && "Expected memory operation.");
// If we've found another instruction with the same opcode, check to see
// if the base and offset are compatible with our starting instruction.
// These instructions all have scaled immediate operands, so we just
// check for +1/-1. Make sure to check the new instruction offset is
// actually an immediate and not a symbolic reference destined for
// a relocation.
- //
- // Pairwise instructions have a 7-bit signed offset field. Single insns
- // have a 12-bit unsigned offset field. To be a valid combine, the
- // final offset must be in range.
unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
int MIOffset = getLdStOffsetOp(MI).getImm();
+ bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
+ if (IsUnscaled != MIIsUnscaled) {
+ // We're trying to pair instructions that differ in how they are scaled.
+ // If FirstMI is scaled then scale the offset of MI accordingly.
+ // Otherwise, do the opposite (i.e., make MI's offset unscaled).
+ int MemSize = getMemScale(MI);
+ if (MIIsUnscaled) {
+ // If the unscaled offset isn't a multiple of the MemSize, we can't
+ // pair the operations together: bail and keep looking.
+ if (MIOffset % MemSize)
+ continue;
+ MIOffset /= MemSize;
+ } else {
+ MIOffset *= MemSize;
+ }
+ }
+
if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
(Offset + OffsetStride == MIOffset))) {
int MinOffset = Offset < MIOffset ? Offset : MIOffset;
- // If this is a volatile load/store that otherwise matched, stop looking
- // as something is going on that we don't have enough information to
- // safely transform. Similarly, stop if we see a hint to avoid pairs.
- if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
- return E;
- // If the resultant immediate offset of merging these instructions
- // is out of range for a pairwise instruction, bail and keep looking.
- bool MIIsUnscaled = isUnscaledLdSt(MI);
- bool IsNarrowLoad = isNarrowLoad(MI->getOpcode());
- if (!IsNarrowLoad &&
- !inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
- trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
- MemInsns.push_back(MI);
- continue;
- }
-
- if (IsNarrowLoad || IsNarrowStore) {
+ if (FindNarrowMerge) {
// If the alignment requirements of the scaled wide load/store
- // instruction can't express the offset of the scaled narrow
- // input, bail and keep looking.
- if (!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) {
+ // instruction can't express the offset of the scaled narrow input,
+ // bail and keep looking. For promotable zero stores, allow only when
+ // the stored value is the same (i.e., WZR).
+ if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
+ (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
- MemInsns.push_back(MI);
+ MemInsns.push_back(&MI);
continue;
}
} else {
+ // Pairwise instructions have a 7-bit signed offset field. Single
+ // insns have a 12-bit unsigned offset field. If the resultant
+ // immediate offset of merging these instructions is out of range for
+ // a pairwise instruction, bail and keep looking.
+ if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
+ trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
+ MemInsns.push_back(&MI);
+ continue;
+ }
// If the alignment requirements of the paired (scaled) instruction
// can't express the offset of the unscaled input, bail and keep
// looking.
if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
- MemInsns.push_back(MI);
+ MemInsns.push_back(&MI);
continue;
}
}
// If the destination register of the loads is the same register, bail
// and keep looking. A load-pair instruction with both destination
// registers the same is UNPREDICTABLE and will result in an exception.
- // For narrow stores, allow only when the stored value is the same
- // (i.e., WZR).
- if ((MayLoad && Reg == getLdStRegOp(MI).getReg()) ||
- (IsNarrowStore && Reg != getLdStRegOp(MI).getReg())) {
+ if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
- MemInsns.push_back(MI);
+ MemInsns.push_back(&MI);
continue;
}
@@ -1194,7 +1313,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// and first alias with the second, we can combine the second into the
// first.
if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
- !(MI->mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
+ !(MI.mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
!mayAlias(MI, MemInsns, TII)) {
Flags.setMergeForward(false);
return MBBI;
@@ -1217,7 +1336,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// If the instruction wasn't a matching load or store. Stop searching if we
// encounter a call instruction that might modify memory.
- if (MI->isCall())
+ if (MI.isCall())
return E;
// Update modified / uses register lists.
@@ -1229,8 +1348,8 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
return E;
// Update list of instructions that read/write memory.
- if (MI->mayLoadOrStore())
- MemInsns.push_back(MI);
+ if (MI.mayLoadOrStore())
+ MemInsns.push_back(&MI);
}
return E;
}
@@ -1258,22 +1377,24 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
: getPostIndexedOpcode(I->getOpcode());
MachineInstrBuilder MIB;
- if (!isPairedLdSt(I)) {
+ if (!isPairedLdSt(*I)) {
// Non-paired instruction.
MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
- .addOperand(getLdStRegOp(Update))
- .addOperand(getLdStRegOp(I))
- .addOperand(getLdStBaseOp(I))
- .addImm(Value);
+ .addOperand(getLdStRegOp(*Update))
+ .addOperand(getLdStRegOp(*I))
+ .addOperand(getLdStBaseOp(*I))
+ .addImm(Value)
+ .setMemRefs(I->memoperands_begin(), I->memoperands_end());
} else {
// Paired instruction.
- int Scale = getMemScale(I);
+ int Scale = getMemScale(*I);
MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
- .addOperand(getLdStRegOp(Update))
- .addOperand(getLdStRegOp(I, 0))
- .addOperand(getLdStRegOp(I, 1))
- .addOperand(getLdStBaseOp(I))
- .addImm(Value / Scale);
+ .addOperand(getLdStRegOp(*Update))
+ .addOperand(getLdStRegOp(*I, 0))
+ .addOperand(getLdStRegOp(*I, 1))
+ .addOperand(getLdStBaseOp(*I))
+ .addImm(Value / Scale)
+ .setMemRefs(I->memoperands_begin(), I->memoperands_end());
}
(void)MIB;
@@ -1296,10 +1417,10 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
return NextI;
}
-bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
- MachineInstr *MI,
+bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
+ MachineInstr &MI,
unsigned BaseReg, int Offset) {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
break;
case AArch64::SUBXri:
@@ -1309,20 +1430,20 @@ bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
case AArch64::ADDXri:
// Make sure it's a vanilla immediate operand, not a relocation or
// anything else we can't handle.
- if (!MI->getOperand(2).isImm())
+ if (!MI.getOperand(2).isImm())
break;
// Watch out for 1 << 12 shifted value.
- if (AArch64_AM::getShiftValue(MI->getOperand(3).getImm()))
+ if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
break;
// The update instruction source and destination register must be the
// same as the load/store base register.
- if (MI->getOperand(0).getReg() != BaseReg ||
- MI->getOperand(1).getReg() != BaseReg)
+ if (MI.getOperand(0).getReg() != BaseReg ||
+ MI.getOperand(1).getReg() != BaseReg)
break;
bool IsPairedInsn = isPairedLdSt(MemMI);
- int UpdateOffset = MI->getOperand(2).getImm();
+ int UpdateOffset = MI.getOperand(2).getImm();
// For non-paired load/store instructions, the immediate must fit in a
// signed 9-bit integer.
if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
@@ -1343,7 +1464,7 @@ bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
// If we have a non-zero Offset, we check that it matches the amount
// we're adding to the register.
- if (!Offset || Offset == MI->getOperand(2).getImm())
+ if (!Offset || Offset == MI.getOperand(2).getImm())
return true;
break;
}
@@ -1351,9 +1472,9 @@ bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr *MemMI,
}
MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
- MachineBasicBlock::iterator I, unsigned Limit, int UnscaledOffset) {
+ MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
MachineBasicBlock::iterator E = I->getParent()->end();
- MachineInstr *MemMI = I;
+ MachineInstr &MemMI = *I;
MachineBasicBlock::iterator MBBI = I;
unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
@@ -1376,22 +1497,20 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
// Track which registers have been modified and used between the first insn
// (inclusive) and the second insn.
- BitVector ModifiedRegs, UsedRegs;
- ModifiedRegs.resize(TRI->getNumRegs());
- UsedRegs.resize(TRI->getNumRegs());
+ ModifiedRegs.reset();
+ UsedRegs.reset();
++MBBI;
- for (unsigned Count = 0; MBBI != E; ++MBBI) {
- MachineInstr *MI = MBBI;
- // Skip DBG_VALUE instructions. Otherwise debug info can affect the
- // optimization by changing how far we scan.
- if (MI->isDebugValue())
+ for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ // Skip DBG_VALUE instructions.
+ if (MI.isDebugValue())
continue;
// Now that we know this is a real instruction, count it.
++Count;
// If we found a match, return it.
- if (isMatchingUpdateInsn(I, MI, BaseReg, UnscaledOffset))
+ if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
return MBBI;
// Update the status of what the instruction clobbered and used.
@@ -1409,7 +1528,7 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
MachineBasicBlock::iterator I, unsigned Limit) {
MachineBasicBlock::iterator B = I->getParent()->begin();
MachineBasicBlock::iterator E = I->getParent()->end();
- MachineInstr *MemMI = I;
+ MachineInstr &MemMI = *I;
MachineBasicBlock::iterator MBBI = I;
unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
@@ -1430,22 +1549,19 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
// Track which registers have been modified and used between the first insn
// (inclusive) and the second insn.
- BitVector ModifiedRegs, UsedRegs;
- ModifiedRegs.resize(TRI->getNumRegs());
- UsedRegs.resize(TRI->getNumRegs());
- --MBBI;
- for (unsigned Count = 0; MBBI != B; --MBBI) {
- MachineInstr *MI = MBBI;
- // Skip DBG_VALUE instructions. Otherwise debug info can affect the
- // optimization by changing how far we scan.
- if (MI->isDebugValue())
- continue;
+ ModifiedRegs.reset();
+ UsedRegs.reset();
+ unsigned Count = 0;
+ do {
+ --MBBI;
+ MachineInstr &MI = *MBBI;
- // Now that we know this is a real instruction, count it.
- ++Count;
+ // Don't count DBG_VALUE instructions towards the search limit.
+ if (!MI.isDebugValue())
+ ++Count;
// If we found a match, return it.
- if (isMatchingUpdateInsn(I, MI, BaseReg, Offset))
+ if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset))
return MBBI;
// Update the status of what the instruction clobbered and used.
@@ -1455,15 +1571,15 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
// return early.
if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
return E;
- }
+ } while (MBBI != B && Count < Limit);
return E;
}
bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
MachineBasicBlock::iterator &MBBI) {
- MachineInstr *MI = MBBI;
+ MachineInstr &MI = *MBBI;
// If this is a volatile load, don't mess with it.
- if (MI->hasOrderedMemoryRef())
+ if (MI.hasOrderedMemoryRef())
return false;
// Make sure this is a reg+imm.
@@ -1471,9 +1587,9 @@ bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
if (!getLdStOffsetOp(MI).isImm())
return false;
- // Look backward up to ScanLimit instructions.
+ // Look backward up to LdStLimit instructions.
MachineBasicBlock::iterator StoreI;
- if (findMatchingStore(MBBI, ScanLimit, StoreI)) {
+ if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
++NumLoadsFromStoresPromoted;
// Promote the load. Keeping the iterator straight is a
// pain, so we let the merge routine tell us what the next instruction
@@ -1484,40 +1600,70 @@ bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
return false;
}
+// Find narrow loads that can be converted into a single wider load with
+// bitfield extract instructions. Also merge adjacent zero stores into a wider
+// store.
bool AArch64LoadStoreOpt::tryToMergeLdStInst(
MachineBasicBlock::iterator &MBBI) {
- MachineInstr *MI = MBBI;
- MachineBasicBlock::iterator E = MI->getParent()->end();
- // If this is a volatile load/store, don't mess with it.
- if (MI->hasOrderedMemoryRef())
- return false;
+ assert((isNarrowLoad(*MBBI) || isPromotableZeroStoreOpcode(*MBBI)) &&
+ "Expected narrow op.");
+ MachineInstr &MI = *MBBI;
+ MachineBasicBlock::iterator E = MI.getParent()->end();
- // Make sure this is a reg+imm (as opposed to an address reloc).
- if (!getLdStOffsetOp(MI).isImm())
+ if (!TII->isCandidateToMergeOrPair(MI))
return false;
- // Check if this load/store has a hint to avoid pair formation.
- // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
- if (TII->isLdStPairSuppressed(MI))
+ // For promotable zero stores, the stored value should be WZR.
+ if (isPromotableZeroStoreOpcode(MI) &&
+ getLdStRegOp(MI).getReg() != AArch64::WZR)
return false;
- // Look ahead up to ScanLimit instructions for a pairable instruction.
+ // Look ahead up to LdStLimit instructions for a mergable instruction.
LdStPairFlags Flags;
- MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, ScanLimit);
- if (Paired != E) {
+ MachineBasicBlock::iterator MergeMI =
+ findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
+ if (MergeMI != E) {
if (isNarrowLoad(MI)) {
++NumNarrowLoadsPromoted;
- } else if (isNarrowStore(MI)) {
+ } else if (isPromotableZeroStoreInst(MI)) {
++NumZeroStoresPromoted;
- } else {
- ++NumPairCreated;
- if (isUnscaledLdSt(MI))
- ++NumUnscaledPairCreated;
}
+ // Keeping the iterator straight is a pain, so we let the merge routine tell
+ // us what the next instruction is after it's done mucking about.
+ MBBI = mergeNarrowInsns(MBBI, MergeMI, Flags);
+ return true;
+ }
+ return false;
+}
- // Merge the loads into a pair. Keeping the iterator straight is a
- // pain, so we let the merge routine tell us what the next instruction
- // is after it's done mucking about.
+// Find loads and stores that can be merged into a single load or store pair
+// instruction.
+bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
+ MachineInstr &MI = *MBBI;
+ MachineBasicBlock::iterator E = MI.getParent()->end();
+
+ if (!TII->isCandidateToMergeOrPair(MI))
+ return false;
+
+ // Early exit if the offset is not possible to match. (6 bits of positive
+ // range, plus allow an extra one in case we find a later insn that matches
+ // with Offset-1)
+ bool IsUnscaled = TII->isUnscaledLdSt(MI);
+ int Offset = getLdStOffsetOp(MI).getImm();
+ int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
+ if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
+ return false;
+
+ // Look ahead up to LdStLimit instructions for a pairable instruction.
+ LdStPairFlags Flags;
+ MachineBasicBlock::iterator Paired =
+ findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
+ if (Paired != E) {
+ ++NumPairCreated;
+ if (TII->isUnscaledLdSt(MI))
+ ++NumUnscaledPairCreated;
+ // Keeping the iterator straight is a pain, so we let the merge routine tell
+ // us what the next instruction is after it's done mucking about.
MBBI = mergePairedInsns(MBBI, Paired, Flags);
return true;
}
@@ -1527,7 +1673,7 @@ bool AArch64LoadStoreOpt::tryToMergeLdStInst(
bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
bool enableNarrowLdOpt) {
bool Modified = false;
- // Three tranformations to do here:
+ // Four tranformations to do here:
// 1) Find loads that directly read from stores and promote them by
// replacing with mov instructions. If the store is wider than the load,
// the load will be replaced with a bitfield extract.
@@ -1536,35 +1682,11 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
// ldrh w2, [x0, #6]
// ; becomes
// str w1, [x0, #4]
- // lsr w2, w1, #16
- // 2) Find narrow loads that can be converted into a single wider load
- // with bitfield extract instructions.
- // e.g.,
- // ldrh w0, [x2]
- // ldrh w1, [x2, #2]
- // ; becomes
- // ldr w0, [x2]
- // ubfx w1, w0, #16, #16
- // and w0, w0, #ffff
- // 3) Find loads and stores that can be merged into a single load or store
- // pair instruction.
- // e.g.,
- // ldr x0, [x2]
- // ldr x1, [x2, #8]
- // ; becomes
- // ldp x0, x1, [x2]
- // 4) Find base register updates that can be merged into the load or store
- // as a base-reg writeback.
- // e.g.,
- // ldr x0, [x2]
- // add x2, x2, #4
- // ; becomes
- // ldr x0, [x2], #4
-
+ // lsr w2, w1, #16
for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
MBBI != E;) {
- MachineInstr *MI = MBBI;
- switch (MI->getOpcode()) {
+ MachineInstr &MI = *MBBI;
+ switch (MI.getOpcode()) {
default:
// Just move on to the next instruction.
++MBBI;
@@ -1586,47 +1708,49 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
++MBBI;
break;
}
- // FIXME: Do the other instructions.
}
}
-
+ // 2) Find narrow loads that can be converted into a single wider load
+ // with bitfield extract instructions.
+ // e.g.,
+ // ldrh w0, [x2]
+ // ldrh w1, [x2, #2]
+ // ; becomes
+ // ldr w0, [x2]
+ // ubfx w1, w0, #16, #16
+ // and w0, w0, #ffff
+ //
+ // Also merge adjacent zero stores into a wider store.
+ // e.g.,
+ // strh wzr, [x0]
+ // strh wzr, [x0, #2]
+ // ; becomes
+ // str wzr, [x0]
for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
enableNarrowLdOpt && MBBI != E;) {
- MachineInstr *MI = MBBI;
- switch (MI->getOpcode()) {
- default:
- // Just move on to the next instruction.
- ++MBBI;
- break;
- // Scaled instructions.
- case AArch64::LDRBBui:
- case AArch64::LDRHHui:
- case AArch64::LDRSBWui:
- case AArch64::LDRSHWui:
- case AArch64::STRBBui:
- case AArch64::STRHHui:
- // Unscaled instructions.
- case AArch64::LDURBBi:
- case AArch64::LDURHHi:
- case AArch64::LDURSBWi:
- case AArch64::LDURSHWi:
- case AArch64::STURBBi:
- case AArch64::STURHHi: {
+ MachineInstr &MI = *MBBI;
+ unsigned Opc = MI.getOpcode();
+ if (isPromotableZeroStoreOpcode(Opc) ||
+ (EnableNarrowLdMerge && isNarrowLoad(Opc))) {
if (tryToMergeLdStInst(MBBI)) {
Modified = true;
- break;
- }
+ } else
+ ++MBBI;
+ } else
++MBBI;
- break;
- }
- // FIXME: Do the other instructions.
- }
}
+ // 3) Find loads and stores that can be merged into a single load or store
+ // pair instruction.
+ // e.g.,
+ // ldr x0, [x2]
+ // ldr x1, [x2, #8]
+ // ; becomes
+ // ldp x0, x1, [x2]
for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
MBBI != E;) {
- MachineInstr *MI = MBBI;
- switch (MI->getOpcode()) {
+ MachineInstr &MI = *MBBI;
+ switch (MI.getOpcode()) {
default:
// Just move on to the next instruction.
++MBBI;
@@ -1655,23 +1779,28 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
case AArch64::LDURWi:
case AArch64::LDURXi:
case AArch64::LDURSWi: {
- if (tryToMergeLdStInst(MBBI)) {
+ if (tryToPairLdStInst(MBBI)) {
Modified = true;
break;
}
++MBBI;
break;
}
- // FIXME: Do the other instructions.
}
}
-
+ // 4) Find base register updates that can be merged into the load or store
+ // as a base-reg writeback.
+ // e.g.,
+ // ldr x0, [x2]
+ // add x2, x2, #4
+ // ; becomes
+ // ldr x0, [x2], #4
for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
MBBI != E;) {
- MachineInstr *MI = MBBI;
+ MachineInstr &MI = *MBBI;
// Do update merging. It's simpler to keep this separate from the above
- // switch, though not strictly necessary.
- unsigned Opc = MI->getOpcode();
+ // switchs, though not strictly necessary.
+ unsigned Opc = MI.getOpcode();
switch (Opc) {
default:
// Just move on to the next instruction.
@@ -1726,7 +1855,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
// merged into:
// ldr x0, [x20], #32
MachineBasicBlock::iterator Update =
- findMatchingUpdateInsnForward(MBBI, ScanLimit, 0);
+ findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
if (Update != E) {
// Merge the update into the ld/st.
MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
@@ -1736,7 +1865,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
}
// Don't know how to handle pre/post-index versions, so move to the next
// instruction.
- if (isUnscaledLdSt(Opc)) {
+ if (TII->isUnscaledLdSt(Opc)) {
++MBBI;
break;
}
@@ -1746,7 +1875,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
// ldr x1, [x0]
// merged into:
// ldr x1, [x0, #8]!
- Update = findMatchingUpdateInsnBackward(MBBI, ScanLimit);
+ Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
if (Update != E) {
// Merge the update into the ld/st.
MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
@@ -1764,7 +1893,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
// add x0, x0, #64
// merged into:
// ldr x1, [x0, #64]!
- Update = findMatchingUpdateInsnForward(MBBI, ScanLimit, UnscaledOffset);
+ Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
if (Update != E) {
// Merge the update into the ld/st.
MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
@@ -1777,29 +1906,29 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
++MBBI;
break;
}
- // FIXME: Do the other instructions.
}
}
return Modified;
}
-bool AArch64LoadStoreOpt::enableNarrowLdMerge(MachineFunction &Fn) {
- bool ProfitableArch = Subtarget->isCortexA57();
- // FIXME: The benefit from converting narrow loads into a wider load could be
- // microarchitectural as it assumes that a single load with two bitfield
- // extracts is cheaper than two narrow loads. Currently, this conversion is
- // enabled only in cortex-a57 on which performance benefits were verified.
- return ProfitableArch && !Subtarget->requiresStrictAlign();
-}
-
bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipFunction(*Fn.getFunction()))
+ return false;
+
Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
TRI = Subtarget->getRegisterInfo();
+ // Resize the modified and used register bitfield trackers. We do this once
+ // per function and then clear the bitfield each time we optimize a load or
+ // store.
+ ModifiedRegs.resize(TRI->getNumRegs());
+ UsedRegs.resize(TRI->getNumRegs());
+
bool Modified = false;
- bool enableNarrowLdOpt = enableNarrowLdMerge(Fn);
+ bool enableNarrowLdOpt =
+ Subtarget->mergeNarrowLoads() && !Subtarget->requiresStrictAlign();
for (auto &MBB : Fn)
Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
@@ -1809,6 +1938,11 @@ bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
// FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
// loads and stores near one another?
+// FIXME: When pairing store instructions it's very possible for this pass to
+// hoist a store with a KILL marker above another use (without a KILL marker).
+// The resulting IR is invalid, but nothing uses the KILL markers after this
+// pass, so it's never caused a problem in practice.
+
/// createAArch64LoadStoreOptimizationPass - returns an instance of the
/// load / store optimization pass.
FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
diff --git a/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index 318f83953505..49e7767741ea 100644
--- a/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -48,6 +48,9 @@ class AArch64FunctionInfo : public MachineFunctionInfo {
/// \brief Amount of stack frame size, not including callee-saved registers.
unsigned LocalStackSize;
+ /// \brief Amount of stack frame size used for saving callee-saved registers.
+ unsigned CalleeSavedStackSize;
+
/// \brief Number of TLS accesses using the special (combinable)
/// _TLS_MODULE_BASE_ symbol.
unsigned NumLocalDynamicTLSAccesses;
@@ -76,18 +79,28 @@ class AArch64FunctionInfo : public MachineFunctionInfo {
/// copies.
bool IsSplitCSR;
+ /// True when the stack gets realigned dynamically because the size of stack
+ /// frame is unknown at compile time. e.g., in case of VLAs.
+ bool StackRealigned;
+
+ /// True when the callee-save stack area has unused gaps that may be used for
+ /// other stack allocations.
+ bool CalleeSaveStackHasFreeSpace;
+
public:
AArch64FunctionInfo()
: BytesInStackArgArea(0), ArgumentStackToRestore(0), HasStackFrame(false),
NumLocalDynamicTLSAccesses(0), VarArgsStackIndex(0), VarArgsGPRIndex(0),
VarArgsGPRSize(0), VarArgsFPRIndex(0), VarArgsFPRSize(0),
- IsSplitCSR(false) {}
+ IsSplitCSR(false), StackRealigned(false),
+ CalleeSaveStackHasFreeSpace(false) {}
explicit AArch64FunctionInfo(MachineFunction &MF)
: BytesInStackArgArea(0), ArgumentStackToRestore(0), HasStackFrame(false),
NumLocalDynamicTLSAccesses(0), VarArgsStackIndex(0), VarArgsGPRIndex(0),
VarArgsGPRSize(0), VarArgsFPRIndex(0), VarArgsFPRSize(0),
- IsSplitCSR(false) {
+ IsSplitCSR(false), StackRealigned(false),
+ CalleeSaveStackHasFreeSpace(false) {
(void)MF;
}
@@ -102,12 +115,25 @@ public:
bool hasStackFrame() const { return HasStackFrame; }
void setHasStackFrame(bool s) { HasStackFrame = s; }
+ bool isStackRealigned() const { return StackRealigned; }
+ void setStackRealigned(bool s) { StackRealigned = s; }
+
+ bool hasCalleeSaveStackFreeSpace() const {
+ return CalleeSaveStackHasFreeSpace;
+ }
+ void setCalleeSaveStackHasFreeSpace(bool s) {
+ CalleeSaveStackHasFreeSpace = s;
+ }
+
bool isSplitCSR() const { return IsSplitCSR; }
void setIsSplitCSR(bool s) { IsSplitCSR = s; }
void setLocalStackSize(unsigned Size) { LocalStackSize = Size; }
unsigned getLocalStackSize() const { return LocalStackSize; }
+ void setCalleeSavedStackSize(unsigned Size) { CalleeSavedStackSize = Size; }
+ unsigned getCalleeSavedStackSize() const { return CalleeSavedStackSize; }
+
void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
unsigned getNumLocalDynamicTLSAccesses() const {
return NumLocalDynamicTLSAccesses;
@@ -140,15 +166,15 @@ public:
SmallVector<const MachineInstr *, 3> Args;
public:
- typedef SmallVectorImpl<const MachineInstr *> LOHArgs;
+ typedef ArrayRef<const MachineInstr *> LOHArgs;
- MILOHDirective(MCLOHType Kind, const LOHArgs &Args)
+ MILOHDirective(MCLOHType Kind, LOHArgs Args)
: Kind(Kind), Args(Args.begin(), Args.end()) {
assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
}
MCLOHType getKind() const { return Kind; }
- const LOHArgs &getArgs() const { return Args; }
+ LOHArgs getArgs() const { return Args; }
};
typedef MILOHDirective::LOHArgs MILOHArgs;
@@ -157,7 +183,7 @@ public:
const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
/// Add a LOH directive of this @p Kind and this @p Args.
- void addLOHDirective(MCLOHType Kind, const MILOHArgs &Args) {
+ void addLOHDirective(MCLOHType Kind, MILOHArgs Args) {
LOHContainerSet.push_back(MILOHDirective(Kind, Args));
LOHRelated.insert(Args.begin(), Args.end());
}
diff --git a/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp b/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
index 5394875a6bc1..038162c6f54a 100644
--- a/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
+++ b/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
@@ -320,7 +320,7 @@ void A57ChainingConstraint::addInterChainConstraint(PBQPRAGraph &G, unsigned Rd,
static bool regJustKilledBefore(const LiveIntervals &LIs, unsigned reg,
const MachineInstr &MI) {
const LiveInterval &LI = LIs.getInterval(reg);
- SlotIndex SI = LIs.getInstructionIndex(&MI);
+ SlotIndex SI = LIs.getInstructionIndex(MI);
return LI.expiredAt(SI);
}
diff --git a/lib/Target/AArch64/AArch64PromoteConstant.cpp b/lib/Target/AArch64/AArch64PromoteConstant.cpp
index 79c09d9f058d..b1e40510b2ae 100644
--- a/lib/Target/AArch64/AArch64PromoteConstant.cpp
+++ b/lib/Target/AArch64/AArch64PromoteConstant.cpp
@@ -85,6 +85,21 @@ namespace {
class AArch64PromoteConstant : public ModulePass {
public:
+ struct PromotedConstant {
+ bool ShouldConvert = false;
+ GlobalVariable *GV = nullptr;
+ };
+ typedef SmallDenseMap<Constant *, PromotedConstant, 16> PromotionCacheTy;
+
+ struct UpdateRecord {
+ Constant *C;
+ Instruction *User;
+ unsigned Op;
+
+ UpdateRecord(Constant *C, Instruction *User, unsigned Op)
+ : C(C), User(User), Op(Op) {}
+ };
+
static char ID;
AArch64PromoteConstant() : ModulePass(ID) {}
@@ -94,9 +109,12 @@ public:
/// global variables with module scope.
bool runOnModule(Module &M) override {
DEBUG(dbgs() << getPassName() << '\n');
+ if (skipModule(M))
+ return false;
bool Changed = false;
+ PromotionCacheTy PromotionCache;
for (auto &MF : M) {
- Changed |= runOnFunction(MF);
+ Changed |= runOnFunction(MF, PromotionCache);
}
return Changed;
}
@@ -105,7 +123,7 @@ private:
/// Look for interesting constants used within the given function.
/// Promote them into global variables, load these global variables within
/// the related function, so that the number of inserted load is minimal.
- bool runOnFunction(Function &F);
+ bool runOnFunction(Function &F, PromotionCacheTy &PromotionCache);
// This transformation requires dominator info
void getAnalysisUsage(AnalysisUsage &AU) const override {
@@ -115,79 +133,72 @@ private:
}
/// Type to store a list of Uses.
- typedef SmallVector<Use *, 4> Uses;
+ typedef SmallVector<std::pair<Instruction *, unsigned>, 4> Uses;
/// Map an insertion point to all the uses it dominates.
typedef DenseMap<Instruction *, Uses> InsertionPoints;
- /// Map a function to the required insertion point of load for a
- /// global variable.
- typedef DenseMap<Function *, InsertionPoints> InsertionPointsPerFunc;
/// Find the closest point that dominates the given Use.
- Instruction *findInsertionPoint(Use &Use);
+ Instruction *findInsertionPoint(Instruction &User, unsigned OpNo);
/// Check if the given insertion point is dominated by an existing
/// insertion point.
/// If true, the given use is added to the list of dominated uses for
/// the related existing point.
/// \param NewPt the insertion point to be checked
- /// \param Use the use to be added into the list of dominated uses
+ /// \param User the user of the constant
+ /// \param OpNo the operand number of the use
/// \param InsertPts existing insertion points
/// \pre NewPt and all instruction in InsertPts belong to the same function
/// \return true if one of the insertion point in InsertPts dominates NewPt,
/// false otherwise
- bool isDominated(Instruction *NewPt, Use &Use, InsertionPoints &InsertPts);
+ bool isDominated(Instruction *NewPt, Instruction *User, unsigned OpNo,
+ InsertionPoints &InsertPts);
/// Check if the given insertion point can be merged with an existing
/// insertion point in a common dominator.
/// If true, the given use is added to the list of the created insertion
/// point.
/// \param NewPt the insertion point to be checked
- /// \param Use the use to be added into the list of dominated uses
+ /// \param User the user of the constant
+ /// \param OpNo the operand number of the use
/// \param InsertPts existing insertion points
/// \pre NewPt and all instruction in InsertPts belong to the same function
/// \pre isDominated returns false for the exact same parameters.
/// \return true if it exists an insertion point in InsertPts that could
/// have been merged with NewPt in a common dominator,
/// false otherwise
- bool tryAndMerge(Instruction *NewPt, Use &Use, InsertionPoints &InsertPts);
+ bool tryAndMerge(Instruction *NewPt, Instruction *User, unsigned OpNo,
+ InsertionPoints &InsertPts);
/// Compute the minimal insertion points to dominates all the interesting
/// uses of value.
/// Insertion points are group per function and each insertion point
/// contains a list of all the uses it dominates within the related function
- /// \param Val constant to be examined
- /// \param[out] InsPtsPerFunc output storage of the analysis
- void computeInsertionPoints(Constant *Val,
- InsertionPointsPerFunc &InsPtsPerFunc);
+ /// \param User the user of the constant
+ /// \param OpNo the operand number of the constant
+ /// \param[out] InsertPts output storage of the analysis
+ void computeInsertionPoint(Instruction *User, unsigned OpNo,
+ InsertionPoints &InsertPts);
/// Insert a definition of a new global variable at each point contained in
/// InsPtsPerFunc and update the related uses (also contained in
/// InsPtsPerFunc).
- bool insertDefinitions(Constant *Cst, InsertionPointsPerFunc &InsPtsPerFunc);
-
- /// Compute the minimal insertion points to dominate all the interesting
- /// uses of Val and insert a definition of a new global variable
- /// at these points.
- /// Also update the uses of Val accordingly.
- /// Currently a use of Val is considered interesting if:
- /// - Val is not UndefValue
- /// - Val is not zeroinitialized
- /// - Replacing Val per a load of a global variable is valid.
- /// \see shouldConvert for more details
- bool computeAndInsertDefinitions(Constant *Val);
-
- /// Promote the given constant into a global variable if it is expected to
- /// be profitable.
- /// \return true if Cst has been promoted
- bool promoteConstant(Constant *Cst);
+ void insertDefinitions(Function &F, GlobalVariable &GV,
+ InsertionPoints &InsertPts);
+
+ /// Do the constant promotion indicated by the Updates records, keeping track
+ /// of globals in PromotionCache.
+ void promoteConstants(Function &F, SmallVectorImpl<UpdateRecord> &Updates,
+ PromotionCacheTy &PromotionCache);
/// Transfer the list of dominated uses of IPI to NewPt in InsertPts.
/// Append Use to this list and delete the entry of IPI in InsertPts.
- static void appendAndTransferDominatedUses(Instruction *NewPt, Use &Use,
+ static void appendAndTransferDominatedUses(Instruction *NewPt,
+ Instruction *User, unsigned OpNo,
InsertionPoints::iterator &IPI,
InsertionPoints &InsertPts) {
// Record the dominated use.
- IPI->second.push_back(&Use);
+ IPI->second.emplace_back(User, OpNo);
// Transfer the dominated uses of IPI to NewPt
// Inserting into the DenseMap may invalidate existing iterator.
// Keep a copy of the key to find the iterator to erase. Keep a copy of the
@@ -285,10 +296,7 @@ static bool shouldConvertUse(const Constant *Cst, const Instruction *Instr,
// Do not mess with inline asm.
const CallInst *CI = dyn_cast<const CallInst>(Instr);
- if (CI && isa<const InlineAsm>(CI->getCalledValue()))
- return false;
-
- return true;
+ return !(CI && isa<const InlineAsm>(CI->getCalledValue()));
}
/// Check if the given Cst should be converted into
@@ -305,7 +313,7 @@ static bool shouldConvertUse(const Constant *Cst, const Instruction *Instr,
/// for the regular approach, even for float).
/// Again, the simplest solution would be to promote every
/// constant and rematerialize them when they are actually cheap to create.
-static bool shouldConvert(const Constant *Cst) {
+static bool shouldConvertImpl(const Constant *Cst) {
if (isa<const UndefValue>(Cst))
return false;
@@ -328,18 +336,28 @@ static bool shouldConvert(const Constant *Cst) {
return isConstantUsingVectorTy(Cst->getType());
}
-Instruction *AArch64PromoteConstant::findInsertionPoint(Use &Use) {
- Instruction *User = cast<Instruction>(Use.getUser());
+static bool
+shouldConvert(Constant &C,
+ AArch64PromoteConstant::PromotionCacheTy &PromotionCache) {
+ auto Converted = PromotionCache.insert(
+ std::make_pair(&C, AArch64PromoteConstant::PromotedConstant()));
+ if (Converted.second)
+ Converted.first->second.ShouldConvert = shouldConvertImpl(&C);
+ return Converted.first->second.ShouldConvert;
+}
+Instruction *AArch64PromoteConstant::findInsertionPoint(Instruction &User,
+ unsigned OpNo) {
// If this user is a phi, the insertion point is in the related
// incoming basic block.
- if (PHINode *PhiInst = dyn_cast<PHINode>(User))
- return PhiInst->getIncomingBlock(Use.getOperandNo())->getTerminator();
+ if (PHINode *PhiInst = dyn_cast<PHINode>(&User))
+ return PhiInst->getIncomingBlock(OpNo)->getTerminator();
- return User;
+ return &User;
}
-bool AArch64PromoteConstant::isDominated(Instruction *NewPt, Use &Use,
+bool AArch64PromoteConstant::isDominated(Instruction *NewPt, Instruction *User,
+ unsigned OpNo,
InsertionPoints &InsertPts) {
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
@@ -358,14 +376,15 @@ bool AArch64PromoteConstant::isDominated(Instruction *NewPt, Use &Use,
DEBUG(dbgs() << "Insertion point dominated by:\n");
DEBUG(IPI.first->print(dbgs()));
DEBUG(dbgs() << '\n');
- IPI.second.push_back(&Use);
+ IPI.second.emplace_back(User, OpNo);
return true;
}
}
return false;
}
-bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Use &Use,
+bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Instruction *User,
+ unsigned OpNo,
InsertionPoints &InsertPts) {
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
*NewPt->getParent()->getParent()).getDomTree();
@@ -385,7 +404,7 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Use &Use,
DEBUG(dbgs() << "Merge insertion point with:\n");
DEBUG(IPI->first->print(dbgs()));
DEBUG(dbgs() << "\nat considered insertion point.\n");
- appendAndTransferDominatedUses(NewPt, Use, IPI, InsertPts);
+ appendAndTransferDominatedUses(NewPt, User, OpNo, IPI, InsertPts);
return true;
}
@@ -409,149 +428,141 @@ bool AArch64PromoteConstant::tryAndMerge(Instruction *NewPt, Use &Use,
DEBUG(dbgs() << '\n');
DEBUG(NewPt->print(dbgs()));
DEBUG(dbgs() << '\n');
- appendAndTransferDominatedUses(NewPt, Use, IPI, InsertPts);
+ appendAndTransferDominatedUses(NewPt, User, OpNo, IPI, InsertPts);
return true;
}
return false;
}
-void AArch64PromoteConstant::computeInsertionPoints(
- Constant *Val, InsertionPointsPerFunc &InsPtsPerFunc) {
- DEBUG(dbgs() << "** Compute insertion points **\n");
- for (Use &Use : Val->uses()) {
- Instruction *User = dyn_cast<Instruction>(Use.getUser());
-
- // If the user is not an Instruction, we cannot modify it.
- if (!User)
- continue;
-
- // Filter out uses that should not be converted.
- if (!shouldConvertUse(Val, User, Use.getOperandNo()))
- continue;
+void AArch64PromoteConstant::computeInsertionPoint(
+ Instruction *User, unsigned OpNo, InsertionPoints &InsertPts) {
+ DEBUG(dbgs() << "Considered use, opidx " << OpNo << ":\n");
+ DEBUG(User->print(dbgs()));
+ DEBUG(dbgs() << '\n');
- DEBUG(dbgs() << "Considered use, opidx " << Use.getOperandNo() << ":\n");
- DEBUG(User->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ Instruction *InsertionPoint = findInsertionPoint(*User, OpNo);
- Instruction *InsertionPoint = findInsertionPoint(Use);
+ DEBUG(dbgs() << "Considered insertion point:\n");
+ DEBUG(InsertionPoint->print(dbgs()));
+ DEBUG(dbgs() << '\n');
- DEBUG(dbgs() << "Considered insertion point:\n");
- DEBUG(InsertionPoint->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ if (isDominated(InsertionPoint, User, OpNo, InsertPts))
+ return;
+ // This insertion point is useful, check if we can merge some insertion
+ // point in a common dominator or if NewPt dominates an existing one.
+ if (tryAndMerge(InsertionPoint, User, OpNo, InsertPts))
+ return;
- // Check if the current insertion point is useless, i.e., it is dominated
- // by another one.
- InsertionPoints &InsertPts =
- InsPtsPerFunc[InsertionPoint->getParent()->getParent()];
- if (isDominated(InsertionPoint, Use, InsertPts))
- continue;
- // This insertion point is useful, check if we can merge some insertion
- // point in a common dominator or if NewPt dominates an existing one.
- if (tryAndMerge(InsertionPoint, Use, InsertPts))
- continue;
-
- DEBUG(dbgs() << "Keep considered insertion point\n");
+ DEBUG(dbgs() << "Keep considered insertion point\n");
- // It is definitely useful by its own
- InsertPts[InsertionPoint].push_back(&Use);
- }
+ // It is definitely useful by its own
+ InsertPts[InsertionPoint].emplace_back(User, OpNo);
}
-bool AArch64PromoteConstant::insertDefinitions(
- Constant *Cst, InsertionPointsPerFunc &InsPtsPerFunc) {
- // We will create one global variable per Module.
- DenseMap<Module *, GlobalVariable *> ModuleToMergedGV;
- bool HasChanged = false;
+static void ensurePromotedGV(Function &F, Constant &C,
+ AArch64PromoteConstant::PromotedConstant &PC) {
+ assert(PC.ShouldConvert &&
+ "Expected that we should convert this to a global");
+ if (PC.GV)
+ return;
+ PC.GV = new GlobalVariable(
+ *F.getParent(), C.getType(), true, GlobalValue::InternalLinkage, nullptr,
+ "_PromotedConst", nullptr, GlobalVariable::NotThreadLocal);
+ PC.GV->setInitializer(&C);
+ DEBUG(dbgs() << "Global replacement: ");
+ DEBUG(PC.GV->print(dbgs()));
+ DEBUG(dbgs() << '\n');
+ ++NumPromoted;
+}
- // Traverse all insertion points in all the function.
- for (const auto &FctToInstPtsIt : InsPtsPerFunc) {
- const InsertionPoints &InsertPts = FctToInstPtsIt.second;
-// Do more checking for debug purposes.
+void AArch64PromoteConstant::insertDefinitions(Function &F,
+ GlobalVariable &PromotedGV,
+ InsertionPoints &InsertPts) {
#ifndef NDEBUG
- DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(
- *FctToInstPtsIt.first).getDomTree();
+ // Do more checking for debug purposes.
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
#endif
- assert(!InsertPts.empty() && "Empty uses does not need a definition");
-
- Module *M = FctToInstPtsIt.first->getParent();
- GlobalVariable *&PromotedGV = ModuleToMergedGV[M];
- if (!PromotedGV) {
- PromotedGV = new GlobalVariable(
- *M, Cst->getType(), true, GlobalValue::InternalLinkage, nullptr,
- "_PromotedConst", nullptr, GlobalVariable::NotThreadLocal);
- PromotedGV->setInitializer(Cst);
- DEBUG(dbgs() << "Global replacement: ");
- DEBUG(PromotedGV->print(dbgs()));
- DEBUG(dbgs() << '\n');
- ++NumPromoted;
- HasChanged = true;
- }
-
- for (const auto &IPI : InsertPts) {
- // Create the load of the global variable.
- IRBuilder<> Builder(IPI.first);
- LoadInst *LoadedCst = Builder.CreateLoad(PromotedGV);
- DEBUG(dbgs() << "**********\n");
- DEBUG(dbgs() << "New def: ");
- DEBUG(LoadedCst->print(dbgs()));
- DEBUG(dbgs() << '\n');
+ assert(!InsertPts.empty() && "Empty uses does not need a definition");
+
+ for (const auto &IPI : InsertPts) {
+ // Create the load of the global variable.
+ IRBuilder<> Builder(IPI.first);
+ LoadInst *LoadedCst = Builder.CreateLoad(&PromotedGV);
+ DEBUG(dbgs() << "**********\n");
+ DEBUG(dbgs() << "New def: ");
+ DEBUG(LoadedCst->print(dbgs()));
+ DEBUG(dbgs() << '\n');
- // Update the dominated uses.
- for (Use *Use : IPI.second) {
+ // Update the dominated uses.
+ for (auto Use : IPI.second) {
#ifndef NDEBUG
- assert(DT.dominates(LoadedCst, findInsertionPoint(*Use)) &&
- "Inserted definition does not dominate all its uses!");
+ assert(DT.dominates(LoadedCst,
+ findInsertionPoint(*Use.first, Use.second)) &&
+ "Inserted definition does not dominate all its uses!");
#endif
- DEBUG(dbgs() << "Use to update " << Use->getOperandNo() << ":");
- DEBUG(Use->getUser()->print(dbgs()));
- DEBUG(dbgs() << '\n');
- Use->set(LoadedCst);
- ++NumPromotedUses;
- }
+ DEBUG({
+ dbgs() << "Use to update " << Use.second << ":";
+ Use.first->print(dbgs());
+ dbgs() << '\n';
+ });
+ Use.first->setOperand(Use.second, LoadedCst);
+ ++NumPromotedUses;
}
}
- return HasChanged;
}
-bool AArch64PromoteConstant::computeAndInsertDefinitions(Constant *Val) {
- InsertionPointsPerFunc InsertPtsPerFunc;
- computeInsertionPoints(Val, InsertPtsPerFunc);
- return insertDefinitions(Val, InsertPtsPerFunc);
-}
-
-bool AArch64PromoteConstant::promoteConstant(Constant *Cst) {
- assert(Cst && "Given variable is not a valid constant.");
-
- if (!shouldConvert(Cst))
- return false;
-
- DEBUG(dbgs() << "******************************\n");
- DEBUG(dbgs() << "Candidate constant: ");
- DEBUG(Cst->print(dbgs()));
- DEBUG(dbgs() << '\n');
-
- return computeAndInsertDefinitions(Cst);
+void AArch64PromoteConstant::promoteConstants(
+ Function &F, SmallVectorImpl<UpdateRecord> &Updates,
+ PromotionCacheTy &PromotionCache) {
+ // Promote the constants.
+ for (auto U = Updates.begin(), E = Updates.end(); U != E;) {
+ DEBUG(dbgs() << "** Compute insertion points **\n");
+ auto First = U;
+ Constant *C = First->C;
+ InsertionPoints InsertPts;
+ do {
+ computeInsertionPoint(U->User, U->Op, InsertPts);
+ } while (++U != E && U->C == C);
+
+ auto &Promotion = PromotionCache[C];
+ ensurePromotedGV(F, *C, Promotion);
+ insertDefinitions(F, *Promotion.GV, InsertPts);
+ }
}
-bool AArch64PromoteConstant::runOnFunction(Function &F) {
+bool AArch64PromoteConstant::runOnFunction(Function &F,
+ PromotionCacheTy &PromotionCache) {
// Look for instructions using constant vector. Promote that constant to a
// global variable. Create as few loads of this variable as possible and
// update the uses accordingly.
- bool LocalChange = false;
- SmallPtrSet<Constant *, 8> AlreadyChecked;
-
+ SmallVector<UpdateRecord, 64> Updates;
for (Instruction &I : instructions(&F)) {
// Traverse the operand, looking for constant vectors. Replace them by a
// load of a global variable of constant vector type.
- for (Value *Op : I.operand_values()) {
- Constant *Cst = dyn_cast<Constant>(Op);
+ for (Use &U : I.operands()) {
+ Constant *Cst = dyn_cast<Constant>(U);
// There is no point in promoting global values as they are already
// global. Do not promote constant expressions either, as they may
// require some code expansion.
- if (Cst && !isa<GlobalValue>(Cst) && !isa<ConstantExpr>(Cst) &&
- AlreadyChecked.insert(Cst).second)
- LocalChange |= promoteConstant(Cst);
+ if (!Cst || isa<GlobalValue>(Cst) || isa<ConstantExpr>(Cst))
+ continue;
+
+ // Check if this constant is worth promoting.
+ if (!shouldConvert(*Cst, PromotionCache))
+ continue;
+
+ // Check if this use should be promoted.
+ unsigned OpNo = &U - I.op_begin();
+ if (!shouldConvertUse(Cst, &I, OpNo))
+ continue;
+
+ Updates.emplace_back(Cst, &I, OpNo);
}
}
- return LocalChange;
+
+ if (Updates.empty())
+ return false;
+
+ promoteConstants(F, Updates, PromotionCache);
+ return true;
}
diff --git a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
new file mode 100644
index 000000000000..60d8bbd260bb
--- /dev/null
+++ b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
@@ -0,0 +1,182 @@
+//=- AArch64RedundantCopyElimination.cpp - Remove useless copy for AArch64 -=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+// This pass removes unnecessary zero copies in BBs that are targets of
+// cbz/cbnz instructions. For instance, the copy instruction in the code below
+// can be removed because the CBZW jumps to BB#2 when W0 is zero.
+// BB#1:
+// CBZW %W0, <BB#2>
+// BB#2:
+// %W0 = COPY %WZR
+// This pass should be run after register allocation.
+//
+// FIXME: This should be extended to handle any constant other than zero. E.g.,
+// cmp w0, #1
+// b.eq .BB1
+// BB1:
+// mov w0, #1
+//
+// FIXME: This could also be extended to check the whole dominance subtree below
+// the comparison if the compile time regression is acceptable.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "aarch64-copyelim"
+
+STATISTIC(NumCopiesRemoved, "Number of copies removed.");
+
+namespace llvm {
+void initializeAArch64RedundantCopyEliminationPass(PassRegistry &);
+}
+
+namespace {
+class AArch64RedundantCopyElimination : public MachineFunctionPass {
+ const MachineRegisterInfo *MRI;
+ const TargetRegisterInfo *TRI;
+
+public:
+ static char ID;
+ AArch64RedundantCopyElimination() : MachineFunctionPass(ID) {}
+ bool optimizeCopy(MachineBasicBlock *MBB);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+ const char *getPassName() const override {
+ return "AArch64 Redundant Copy Elimination";
+ }
+};
+char AArch64RedundantCopyElimination::ID = 0;
+}
+
+INITIALIZE_PASS(AArch64RedundantCopyElimination, "aarch64-copyelim",
+ "AArch64 redundant copy elimination pass", false, false)
+
+static bool guaranteesZeroRegInBlock(MachineInstr &MI, MachineBasicBlock *MBB) {
+ unsigned Opc = MI.getOpcode();
+ // Check if the current basic block is the target block to which the
+ // CBZ/CBNZ instruction jumps when its Wt/Xt is zero.
+ if ((Opc == AArch64::CBZW || Opc == AArch64::CBZX) &&
+ MBB == MI.getOperand(1).getMBB())
+ return true;
+ else if ((Opc == AArch64::CBNZW || Opc == AArch64::CBNZX) &&
+ MBB != MI.getOperand(1).getMBB())
+ return true;
+
+ return false;
+}
+
+bool AArch64RedundantCopyElimination::optimizeCopy(MachineBasicBlock *MBB) {
+ // Check if the current basic block has a single predecessor.
+ if (MBB->pred_size() != 1)
+ return false;
+
+ MachineBasicBlock *PredMBB = *MBB->pred_begin();
+ MachineBasicBlock::iterator CompBr = PredMBB->getLastNonDebugInstr();
+ if (CompBr == PredMBB->end() || PredMBB->succ_size() != 2)
+ return false;
+
+ ++CompBr;
+ do {
+ --CompBr;
+ if (guaranteesZeroRegInBlock(*CompBr, MBB))
+ break;
+ } while (CompBr != PredMBB->begin() && CompBr->isTerminator());
+
+ // We've not found a CBZ/CBNZ, time to bail out.
+ if (!guaranteesZeroRegInBlock(*CompBr, MBB))
+ return false;
+
+ unsigned TargetReg = CompBr->getOperand(0).getReg();
+ if (!TargetReg)
+ return false;
+ assert(TargetRegisterInfo::isPhysicalRegister(TargetReg) &&
+ "Expect physical register");
+
+ // Remember all registers aliasing with TargetReg.
+ SmallSetVector<unsigned, 8> TargetRegs;
+ for (MCRegAliasIterator AI(TargetReg, TRI, true); AI.isValid(); ++AI)
+ TargetRegs.insert(*AI);
+
+ bool Changed = false;
+ MachineBasicBlock::iterator LastChange = MBB->begin();
+ unsigned SmallestDef = TargetReg;
+ // Remove redundant Copy instructions unless TargetReg is modified.
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
+ MachineInstr *MI = &*I;
+ ++I;
+ if (MI->isCopy() && MI->getOperand(0).isReg() &&
+ MI->getOperand(1).isReg()) {
+
+ unsigned DefReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI->getOperand(1).getReg();
+
+ if ((SrcReg == AArch64::XZR || SrcReg == AArch64::WZR) &&
+ !MRI->isReserved(DefReg) &&
+ (TargetReg == DefReg || TRI->isSuperRegister(DefReg, TargetReg))) {
+ DEBUG(dbgs() << "Remove redundant Copy : ");
+ DEBUG((MI)->print(dbgs()));
+
+ MI->eraseFromParent();
+ Changed = true;
+ LastChange = I;
+ NumCopiesRemoved++;
+ SmallestDef =
+ TRI->isSubRegister(SmallestDef, DefReg) ? DefReg : SmallestDef;
+ continue;
+ }
+ }
+
+ if (MI->modifiesRegister(TargetReg, TRI))
+ break;
+ }
+
+ if (!Changed)
+ return false;
+
+ // Otherwise, we have to fixup the use-def chain, starting with the
+ // CBZ/CBNZ. Conservatively mark as much as we can live.
+ CompBr->clearRegisterKills(SmallestDef, TRI);
+
+ if (std::none_of(TargetRegs.begin(), TargetRegs.end(),
+ [&](unsigned Reg) { return MBB->isLiveIn(Reg); }))
+ MBB->addLiveIn(TargetReg);
+
+ // Clear any kills of TargetReg between CompBr and the last removed COPY.
+ for (MachineInstr &MMI :
+ make_range(MBB->begin()->getIterator(), LastChange->getIterator()))
+ MMI.clearRegisterKills(SmallestDef, TRI);
+
+ return true;
+}
+
+bool AArch64RedundantCopyElimination::runOnMachineFunction(
+ MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+ TRI = MF.getSubtarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ bool Changed = false;
+ for (MachineBasicBlock &MBB : MF)
+ Changed |= optimizeCopy(&MBB);
+ return Changed;
+}
+
+FunctionPass *llvm::createAArch64RedundantCopyEliminationPass() {
+ return new AArch64RedundantCopyElimination();
+}
diff --git a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
new file mode 100644
index 000000000000..0a1831bd9a8c
--- /dev/null
+++ b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -0,0 +1,168 @@
+//===- AArch64RegisterBankInfo.cpp -------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file implements the targeting of the RegisterBankInfo class for
+/// AArch64.
+/// \todo This should be generated by TableGen.
+//===----------------------------------------------------------------------===//
+
+#include "AArch64RegisterBankInfo.h"
+#include "AArch64InstrInfo.h" // For XXXRegClassID.
+#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+using namespace llvm;
+
+#ifndef LLVM_BUILD_GLOBAL_ISEL
+#error "You shouldn't build this"
+#endif
+
+AArch64RegisterBankInfo::AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
+ : RegisterBankInfo(AArch64::NumRegisterBanks) {
+ // Initialize the GPR bank.
+ createRegisterBank(AArch64::GPRRegBankID, "GPR");
+ // The GPR register bank is fully defined by all the registers in
+ // GR64all + its subclasses.
+ addRegBankCoverage(AArch64::GPRRegBankID, AArch64::GPR64allRegClassID, TRI);
+ const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
+ (void)RBGPR;
+ assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
+ "Subclass not added?");
+ assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");
+
+ // Initialize the FPR bank.
+ createRegisterBank(AArch64::FPRRegBankID, "FPR");
+ // The FPR register bank is fully defined by all the registers in
+ // GR64all + its subclasses.
+ addRegBankCoverage(AArch64::FPRRegBankID, AArch64::QQQQRegClassID, TRI);
+ const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
+ (void)RBFPR;
+ assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
+ "Subclass not added?");
+ assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
+ "Subclass not added?");
+ assert(RBFPR.getSize() == 512 &&
+ "FPRs should hold up to 512-bit via QQQQ sequence");
+
+ // Initialize the CCR bank.
+ createRegisterBank(AArch64::CCRRegBankID, "CCR");
+ addRegBankCoverage(AArch64::CCRRegBankID, AArch64::CCRRegClassID, TRI);
+ const RegisterBank &RBCCR = getRegBank(AArch64::CCRRegBankID);
+ (void)RBCCR;
+ assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
+ "Class not added?");
+ assert(RBCCR.getSize() == 32 && "CCR should hold up to 32-bit");
+
+ assert(verify(TRI) && "Invalid register bank information");
+}
+
+unsigned AArch64RegisterBankInfo::copyCost(const RegisterBank &A,
+ const RegisterBank &B,
+ unsigned Size) const {
+ // What do we do with different size?
+ // copy are same size.
+ // Will introduce other hooks for different size:
+ // * extract cost.
+ // * build_sequence cost.
+ // TODO: Add more accurate cost for FPR to/from GPR.
+ return RegisterBankInfo::copyCost(A, B, Size);
+}
+
+const RegisterBank &AArch64RegisterBankInfo::getRegBankFromRegClass(
+ const TargetRegisterClass &RC) const {
+ switch (RC.getID()) {
+ case AArch64::FPR8RegClassID:
+ case AArch64::FPR16RegClassID:
+ case AArch64::FPR32RegClassID:
+ case AArch64::FPR64RegClassID:
+ case AArch64::FPR128RegClassID:
+ case AArch64::FPR128_loRegClassID:
+ case AArch64::DDRegClassID:
+ case AArch64::DDDRegClassID:
+ case AArch64::DDDDRegClassID:
+ case AArch64::QQRegClassID:
+ case AArch64::QQQRegClassID:
+ case AArch64::QQQQRegClassID:
+ return getRegBank(AArch64::FPRRegBankID);
+ case AArch64::GPR32commonRegClassID:
+ case AArch64::GPR32RegClassID:
+ case AArch64::GPR32spRegClassID:
+ case AArch64::GPR32sponlyRegClassID:
+ case AArch64::GPR32allRegClassID:
+ case AArch64::GPR64commonRegClassID:
+ case AArch64::GPR64RegClassID:
+ case AArch64::GPR64spRegClassID:
+ case AArch64::GPR64sponlyRegClassID:
+ case AArch64::GPR64allRegClassID:
+ case AArch64::tcGPR64RegClassID:
+ case AArch64::WSeqPairsClassRegClassID:
+ case AArch64::XSeqPairsClassRegClassID:
+ return getRegBank(AArch64::GPRRegBankID);
+ case AArch64::CCRRegClassID:
+ return getRegBank(AArch64::CCRRegBankID);
+ default:
+ llvm_unreachable("Register class not supported");
+ }
+}
+
+RegisterBankInfo::InstructionMappings
+AArch64RegisterBankInfo::getInstrAlternativeMappings(
+ const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_OR: {
+ // 32 and 64-bit or can be mapped on either FPR or
+ // GPR for the same cost.
+ const MachineFunction &MF = *MI.getParent()->getParent();
+ const TargetSubtargetInfo &STI = MF.getSubtarget();
+ const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
+ if (Size != 32 && Size != 64)
+ break;
+
+ // If the instruction has any implicit-defs or uses,
+ // do not mess with it.
+ if (MI.getNumOperands() != 3)
+ break;
+ InstructionMappings AltMappings;
+ InstructionMapping GPRMapping(/*ID*/ 1, /*Cost*/ 1, /*NumOperands*/ 3);
+ InstructionMapping FPRMapping(/*ID*/ 2, /*Cost*/ 1, /*NumOperands*/ 3);
+ for (unsigned Idx = 0; Idx != 3; ++Idx) {
+ GPRMapping.setOperandMapping(Idx, Size,
+ getRegBank(AArch64::GPRRegBankID));
+ FPRMapping.setOperandMapping(Idx, Size,
+ getRegBank(AArch64::FPRRegBankID));
+ }
+ AltMappings.emplace_back(std::move(GPRMapping));
+ AltMappings.emplace_back(std::move(FPRMapping));
+ return AltMappings;
+ }
+ default:
+ break;
+ }
+ return RegisterBankInfo::getInstrAlternativeMappings(MI);
+}
+
+void AArch64RegisterBankInfo::applyMappingImpl(
+ const OperandsMapper &OpdMapper) const {
+ switch (OpdMapper.getMI().getOpcode()) {
+ case TargetOpcode::G_OR: {
+ // Those ID must match getInstrAlternativeMappings.
+ assert((OpdMapper.getInstrMapping().getID() == 1 ||
+ OpdMapper.getInstrMapping().getID() == 2) &&
+ "Don't know how to handle that ID");
+ return applyDefaultMapping(OpdMapper);
+ }
+ default:
+ llvm_unreachable("Don't know how to handle that operation");
+ }
+}
diff --git a/lib/Target/AArch64/AArch64RegisterBankInfo.h b/lib/Target/AArch64/AArch64RegisterBankInfo.h
new file mode 100644
index 000000000000..907bcfdea161
--- /dev/null
+++ b/lib/Target/AArch64/AArch64RegisterBankInfo.h
@@ -0,0 +1,69 @@
+//===- AArch64RegisterBankInfo -----------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares the targeting of the RegisterBankInfo class for AArch64.
+/// \todo This should be generated by TableGen.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64REGISTERBANKINFO_H
+#define LLVM_LIB_TARGET_AARCH64_AARCH64REGISTERBANKINFO_H
+
+#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
+
+namespace llvm {
+
+class TargetRegisterInfo;
+
+namespace AArch64 {
+enum {
+ GPRRegBankID = 0, /// General Purpose Registers: W, X.
+ FPRRegBankID = 1, /// Floating Point/Vector Registers: B, H, S, D, Q.
+ CCRRegBankID = 2, /// Conditional register: NZCV.
+ NumRegisterBanks
+};
+} // End AArch64 namespace.
+
+/// This class provides the information for the target register banks.
+class AArch64RegisterBankInfo : public RegisterBankInfo {
+ /// See RegisterBankInfo::applyMapping.
+ void applyMappingImpl(const OperandsMapper &OpdMapper) const override;
+
+public:
+ AArch64RegisterBankInfo(const TargetRegisterInfo &TRI);
+ /// Get the cost of a copy from \p B to \p A, or put differently,
+ /// get the cost of A = COPY B. Since register banks may cover
+ /// different size, \p Size specifies what will be the size in bits
+ /// that will be copied around.
+ ///
+ /// \note Since this is a copy, both registers have the same size.
+ unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
+ unsigned Size) const override;
+
+ /// Get a register bank that covers \p RC.
+ ///
+ /// \pre \p RC is a user-defined register class (as opposed as one
+ /// generated by TableGen).
+ ///
+ /// \note The mapping RC -> RegBank could be built while adding the
+ /// coverage for the register banks. However, we do not do it, because,
+ /// at least for now, we only need this information for register classes
+ /// that are used in the description of instruction. In other words,
+ /// there are just a handful of them and we do not want to waste space.
+ ///
+ /// \todo This should be TableGen'ed.
+ const RegisterBank &
+ getRegBankFromRegClass(const TargetRegisterClass &RC) const override;
+
+ /// Get the alternative mappings for \p MI.
+ /// Alternative in the sense different from getInstrMapping.
+ InstructionMappings
+ getInstrAlternativeMappings(const MachineInstr &MI) const override;
+};
+} // End llvm namespace.
+#endif
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 32b4888f2f64..af867da4823d 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -25,7 +25,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetOptions.h"
@@ -51,6 +50,13 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() ?
CSR_AArch64_CXX_TLS_Darwin_PE_SaveList :
CSR_AArch64_CXX_TLS_Darwin_SaveList;
+ if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
+ ->supportSwiftError() &&
+ MF->getFunction()->getAttributes().hasAttrSomewhere(
+ Attribute::SwiftError))
+ return CSR_AArch64_AAPCS_SwiftError_SaveList;
+ if (MF->getFunction()->getCallingConv() == CallingConv::PreserveMost)
+ return CSR_AArch64_RT_MostRegs_SaveList;
else
return CSR_AArch64_AAPCS_SaveList;
}
@@ -74,6 +80,12 @@ AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
return CSR_AArch64_AllRegs_RegMask;
if (CC == CallingConv::CXX_FAST_TLS)
return CSR_AArch64_CXX_TLS_Darwin_RegMask;
+ if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
+ ->supportSwiftError() &&
+ MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ return CSR_AArch64_AAPCS_SwiftError_RegMask;
+ if (CC == CallingConv::PreserveMost)
+ return CSR_AArch64_RT_MostRegs_RegMask;
else
return CSR_AArch64_AAPCS_RegMask;
}
@@ -190,9 +202,7 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
// If it's wrong, we'll materialize the constant and still get to the
// object; it's just suboptimal. Negative offsets use the unscaled
// load/store instructions, which have a 9-bit signed immediate.
- if (MFI->getLocalFrameSize() < 256)
- return false;
- return true;
+ return MFI->getLocalFrameSize() >= 256;
}
return false;
@@ -231,9 +241,7 @@ bool AArch64RegisterInfo::requiresFrameIndexScavenging(
bool
AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- // Only consider eliminating leaf frames.
- if (MFI->hasCalls() || (MF.getTarget().Options.DisableFramePointerElim(MF) &&
- MFI->adjustsStack()))
+ if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->adjustsStack())
return true;
return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken();
}
@@ -396,8 +404,6 @@ void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);
}
-namespace llvm {
-
unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
const AArch64FrameLowering *TFI = getFrameLowering(MF);
@@ -437,5 +443,3 @@ unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
return 16;
}
}
-
-} // namespace llvm
diff --git a/lib/Target/AArch64/AArch64RegisterInfo.td b/lib/Target/AArch64/AArch64RegisterInfo.td
index a8c8b176efa9..5fbaff00a5e7 100644
--- a/lib/Target/AArch64/AArch64RegisterInfo.td
+++ b/lib/Target/AArch64/AArch64RegisterInfo.td
@@ -1,4 +1,4 @@
-//=- AArch64RegisterInfo.td - Describe the AArch64 Regisers --*- tablegen -*-=//
+//=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/Target/AArch64/AArch64SchedA53.td b/lib/Target/AArch64/AArch64SchedA53.td
index d709bee7b9eb..93ca079275c8 100644
--- a/lib/Target/AArch64/AArch64SchedA53.td
+++ b/lib/Target/AArch64/AArch64SchedA53.td
@@ -19,13 +19,13 @@
def CortexA53Model : SchedMachineModel {
let MicroOpBufferSize = 0; // Explicitly set to zero since A53 is in-order.
let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
- let MinLatency = 1 ; // OperandCycles are interpreted as MinLatency.
let LoadLatency = 3; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
let MispredictPenalty = 9; // Based on "Cortex-A53 Software Optimisation
// Specification - Instruction Timings"
// v 1.0 Spreadsheet
+ let CompleteModel = 1;
}
@@ -109,6 +109,8 @@ def A53WriteVST2 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 5;
def A53WriteVST3 : SchedWriteRes<[A53UnitLdSt]> { let Latency = 6;
let ResourceCycles = [3]; }
+def : WriteRes<WriteAtomic, []> { let Unsupported = 1; }
+
// Branch
def : WriteRes<WriteBr, [A53UnitB]>;
def : WriteRes<WriteBrReg, [A53UnitB]>;
diff --git a/lib/Target/AArch64/AArch64SchedA57.td b/lib/Target/AArch64/AArch64SchedA57.td
index ca4457af8525..a266351f7ffc 100644
--- a/lib/Target/AArch64/AArch64SchedA57.td
+++ b/lib/Target/AArch64/AArch64SchedA57.td
@@ -30,6 +30,7 @@ def CortexA57Model : SchedMachineModel {
// Enable partial & runtime unrolling. The magic number is chosen based on
// experiments and benchmarking data.
let LoopMicroOpBufferSize = 16;
+ let CompleteModel = 1;
}
//===----------------------------------------------------------------------===//
@@ -96,6 +97,8 @@ def : SchedAlias<WriteV, A57Write_3cyc_1V>;
def : SchedAlias<WriteVLD, A57Write_5cyc_1L>;
def : SchedAlias<WriteVST, A57Write_1cyc_1S>;
+def : WriteRes<WriteAtomic, []> { let Unsupported = 1; }
+
def : WriteRes<WriteSys, []> { let Latency = 1; }
def : WriteRes<WriteBarrier, []> { let Latency = 1; }
def : WriteRes<WriteHint, []> { let Latency = 1; }
diff --git a/lib/Target/AArch64/AArch64SchedCyclone.td b/lib/Target/AArch64/AArch64SchedCyclone.td
index a2a180237789..9fd3ae6818e5 100644
--- a/lib/Target/AArch64/AArch64SchedCyclone.td
+++ b/lib/Target/AArch64/AArch64SchedCyclone.td
@@ -1,4 +1,4 @@
-//=- ARMSchedCyclone.td - AArch64 Cyclone Scheduling Defs ----*- tablegen -*-=//
+//=- AArch64SchedCyclone.td - Cyclone Scheduling Definitions -*- tablegen -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -17,6 +17,7 @@ def CycloneModel : SchedMachineModel {
let MicroOpBufferSize = 192; // Based on the reorder buffer.
let LoadLatency = 4; // Optimistic load latency.
let MispredictPenalty = 16; // 14-19 cycles are typical.
+ let CompleteModel = 1;
}
//===----------------------------------------------------------------------===//
@@ -107,7 +108,7 @@ def WriteX : SchedWriteRes<[]> { let Latency = 0; }
// The move is replaced by a single nop micro-op.
// MOVZ Rd, #0
// AND Rd, Rzr, #imm
-def WriteZPred : SchedPredicate<[{TII->isGPRZero(MI)}]>;
+def WriteZPred : SchedPredicate<[{TII->isGPRZero(*MI)}]>;
def WriteImmZ : SchedWriteVariant<[
SchedVar<WriteZPred, [WriteX]>,
SchedVar<NoSchedPred, [WriteImm]>]>;
@@ -116,8 +117,8 @@ def : InstRW<[WriteImmZ], (instrs MOVZWi,MOVZXi,ANDWri,ANDXri)>;
// Move GPR is a register rename and single nop micro-op.
// ORR Xd, XZR, Xm
// ADD Xd, Xn, #0
-def WriteIMovPred : SchedPredicate<[{TII->isGPRCopy(MI)}]>;
-def WriteVMovPred : SchedPredicate<[{TII->isFPRCopy(MI)}]>;
+def WriteIMovPred : SchedPredicate<[{TII->isGPRCopy(*MI)}]>;
+def WriteVMovPred : SchedPredicate<[{TII->isFPRCopy(*MI)}]>;
def WriteMov : SchedWriteVariant<[
SchedVar<WriteIMovPred, [WriteX]>,
SchedVar<WriteVMovPred, [WriteX]>,
@@ -726,7 +727,7 @@ def : InstRW<[WriteVLDShuffle, WriteAdr, WriteV, WriteV],
def : InstRW<[WriteVLDShuffle, WriteVLDShuffle, WriteV],
(instrs LD3Rv1d,LD3Rv2d)>;
def : InstRW<[WriteVLDShuffle, WriteAdr, WriteVLDShuffle, WriteV],
- (instrs LD3Rv2d_POST,LD3Rv2d_POST)>;
+ (instrs LD3Rv1d_POST,LD3Rv2d_POST)>;
def : InstRW<[WriteVLDShuffle, WriteVLDShuffle, WriteV, WriteV],
(instregex "LD4Fourv(8b|4h|2s)$")>;
@@ -851,6 +852,9 @@ def : InstRW<[WriteAdr, WriteVSTPairShuffle], (instregex "ST4i(8|16|32)_POST")>;
def : InstRW<[WriteVSTShuffle, WriteVSTShuffle], (instrs ST4i64)>;
def : InstRW<[WriteAdr, WriteVSTShuffle, WriteVSTShuffle],(instrs ST4i64_POST)>;
+// Atomic operations are not supported.
+def : WriteRes<WriteAtomic, []> { let Unsupported = 1; }
+
//---
// Unused SchedRead types
//---
diff --git a/lib/Target/AArch64/AArch64SchedKryo.td b/lib/Target/AArch64/AArch64SchedKryo.td
new file mode 100644
index 000000000000..4e491a04c78d
--- /dev/null
+++ b/lib/Target/AArch64/AArch64SchedKryo.td
@@ -0,0 +1,133 @@
+//==- AArch64SchedKryo.td - Qualcomm Kryo Scheduling Defs ---*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the machine model for Qualcomm Kryo to support
+// instruction scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// The issue width is set to five, matching the five issue queues for expanded
+// uops. Now, the latency spreadsheet has information based on fragmented uops,
+// but these do not actually take up an issue queue.
+
+def KryoModel : SchedMachineModel {
+ let IssueWidth = 5; // 5-wide issue for expanded uops
+ let MicroOpBufferSize = 128; // Out-of-order with temporary unified issue buffer
+ let LoadLatency = 4; // Optimistic load latency
+ let MispredictPenalty = 14; // Fetch + Decode/Rename/Dispatch + Branch
+
+ // Enable partial & runtime unrolling. The magic number is chosen based on
+ // experiments and benchmarking data.
+ let LoopMicroOpBufferSize = 16;
+ let CompleteModel = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Define each kind of processor resource and number available on Kryo.
+
+let SchedModel = KryoModel in {
+ def KryoUnitXA : ProcResource<1>; // Type X(A) micro-ops
+ def KryoUnitXB : ProcResource<1>; // Type X(B) micro-ops
+ def KryoUnitYA : ProcResource<1>; // Type Y(A) micro-ops
+ def KryoUnitYB : ProcResource<1>; // Type Y(B) micro-ops
+ def KryoUnitX : ProcResGroup<[KryoUnitXA, // Type X micro-ops
+ KryoUnitXB]>;
+ def KryoUnitY : ProcResGroup<[KryoUnitYA, // Type Y micro-ops
+ KryoUnitYB]>;
+ def KryoUnitXY : ProcResGroup<[KryoUnitXA, // Type XY micro-ops
+ KryoUnitXB,
+ KryoUnitYA,
+ KryoUnitYB]>;
+ def KryoUnitLSA : ProcResource<1>; // Type LS(A) micro-ops
+ def KryoUnitLSB : ProcResource<1>; // Type LS(B) micro-ops
+ def KryoUnitLS : ProcResGroup<[KryoUnitLSA, // Type LS micro-ops
+ KryoUnitLSB]>;
+}
+
+let SchedModel = KryoModel in {
+
+//===----------------------------------------------------------------------===//
+// Map the target-defined scheduler read/write resources and latency for
+// Kryo.
+
+def : WriteRes<WriteImm, [KryoUnitXY]> { let Latency = 1; }
+def : WriteRes<WriteI, [KryoUnitXY]> { let Latency = 1; }
+def : WriteRes<WriteISReg, [KryoUnitXY, KryoUnitXY]>
+ { let Latency = 2; let NumMicroOps = 2; }
+def : WriteRes<WriteIEReg, [KryoUnitXY, KryoUnitXY]>
+ { let Latency = 2; let NumMicroOps = 2; }
+def : WriteRes<WriteExtr, [KryoUnitXY, KryoUnitX]>
+ { let Latency = 2; let NumMicroOps = 2; }
+def : WriteRes<WriteIS, [KryoUnitXY]> { let Latency = 2; }
+def : WriteRes<WriteID32, [KryoUnitXA, KryoUnitY]>
+ { let Latency = 8; let NumMicroOps = 1; } // Fragent -1
+def : WriteRes<WriteID64, [KryoUnitXA, KryoUnitY]>
+ { let Latency = 8; let NumMicroOps = 1; } // Fragent -1
+def : WriteRes<WriteIM32, [KryoUnitX]> { let Latency = 5; }
+def : WriteRes<WriteIM64, [KryoUnitX]> { let Latency = 5; }
+def : WriteRes<WriteBr, [KryoUnitXY]> { let Latency = 1; }
+def : WriteRes<WriteBrReg, [KryoUnitXY]> { let Latency = 1; }
+def : WriteRes<WriteLD, [KryoUnitLS]> { let Latency = 4; }
+def : WriteRes<WriteST, [KryoUnitLS]> { let Latency = 4; }
+def : WriteRes<WriteSTP, [KryoUnitLS]> { let Latency = 4; }
+def : WriteRes<WriteAdr, [KryoUnitXY]> { let Latency = 6; }
+def : WriteRes<WriteLDIdx, [KryoUnitLS]> { let Latency = 4; }
+def : WriteRes<WriteSTIdx, [KryoUnitLS]> { let Latency = 4; }
+def : WriteRes<WriteF, [KryoUnitXY, KryoUnitXY]>
+ { let Latency = 3; let NumMicroOps = 2; }
+def : WriteRes<WriteFCmp, [KryoUnitXY]> { let Latency = 2; }
+def : WriteRes<WriteFCvt, [KryoUnitX]> { let Latency = 4; }
+def : WriteRes<WriteFCopy, [KryoUnitXY]> { let Latency = 6; }
+def : WriteRes<WriteFImm, [KryoUnitXY]> { let Latency = 6; }
+def : WriteRes<WriteFMul, [KryoUnitX, KryoUnitX]>
+ { let Latency = 6; let NumMicroOps = 2; }
+def : WriteRes<WriteFDiv, [KryoUnitXA, KryoUnitY]>
+ { let Latency = 12; let NumMicroOps = 2; } // Fragent -1 / NoRSV +1
+def : WriteRes<WriteV, [KryoUnitXY]> { let Latency = 6; }
+def : WriteRes<WriteVLD, [KryoUnitLS]> { let Latency = 4; }
+def : WriteRes<WriteVST, [KryoUnitLS]> { let Latency = 4; }
+
+def : WriteRes<WriteSys, []> { let Latency = 1; }
+def : WriteRes<WriteBarrier, []> { let Latency = 1; }
+def : WriteRes<WriteHint, []> { let Latency = 1; }
+
+def : WriteRes<WriteLDHi, []> { let Latency = 4; }
+
+def : WriteRes<WriteAtomic, []> { let Unsupported = 1; }
+
+// No forwarding logic is modelled yet.
+def : ReadAdvance<ReadI, 0>;
+def : ReadAdvance<ReadISReg, 0>;
+def : ReadAdvance<ReadIEReg, 0>;
+def : ReadAdvance<ReadIM, 0>;
+def : ReadAdvance<ReadIMA, 0>;
+def : ReadAdvance<ReadID, 0>;
+def : ReadAdvance<ReadExtrHi, 0>;
+def : ReadAdvance<ReadAdrBase, 0>;
+def : ReadAdvance<ReadVLD, 0>;
+
+
+//===----------------------------------------------------------------------===//
+// Specialize the coarse model by associating instruction groups with the
+// subtarget-defined types. As the modeled is refined, this will override most
+// of the above SchedWriteRes and SchedAlias mappings.
+
+// Miscellaneous
+// -----------------------------------------------------------------------------
+
+def : InstRW<[WriteI], (instrs COPY)>;
+
+
+// Detailed Refinedments
+// -----------------------------------------------------------------------------
+include "AArch64SchedKryoDetails.td"
+
+
+} // SchedModel = KryoModel
diff --git a/lib/Target/AArch64/AArch64SchedKryoDetails.td b/lib/Target/AArch64/AArch64SchedKryoDetails.td
new file mode 100644
index 000000000000..426ae6103e4b
--- /dev/null
+++ b/lib/Target/AArch64/AArch64SchedKryoDetails.td
@@ -0,0 +1,2358 @@
+//=- AArch64SchedKryoDetails.td - QC Kryo Scheduling Defs ----*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the uop and latency details for the machine model for the
+// Qualcomm Kryo subtarget.
+//
+//===----------------------------------------------------------------------===//
+
+def KryoWrite_3cyc_X_noRSV_138ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_X_noRSV_138ln],
+ (instregex "(S|U)R?SRA(d|(v2i32|v4i16|v8i8)_shift)")>;
+
+def KryoWrite_3cyc_X_X_139ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_X_X_139ln],
+ (instregex "(S|U)R?SRA(v2i64|v4i32|v8i16|v16i8)_shift")>;
+
+def KryoWrite_4cyc_XY_XY_noRSV_172ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 4; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_4cyc_XY_XY_noRSV_172ln],
+ (instregex "(S|U)ABA(v8i8|v4i16|v2i32)")>;
+def KryoWrite_4cyc_XY_XY_XY_XY_178ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY, KryoUnitXY, KryoUnitXY]> {
+ let Latency = 4; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_4cyc_XY_XY_XY_XY_178ln],
+ (instregex "(S|U)ABA(v16i8|v8i16|v4i32)")>;
+def KryoWrite_3cyc_XY_XY_XY_XY_177ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY, KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_XY_XY_177ln],
+ (instregex "(S|U)ABALv.*")>;
+def KryoWrite_3cyc_XY_XY_166ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_166ln],
+ (instregex "(S|U)(ABD|QSUB|RHADD)(v16i8|v8i16|v4i32|v2i64)")>;
+def KryoWrite_3cyc_XY_noRSV_159ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_159ln],
+ (instregex "(S|U)(ABD|RHADD)(v8i8|v4i16|v2i32)")>;
+def KryoWrite_3cyc_XY_XY_165ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_165ln],
+ (instregex "(S|U)ABDLv.*")>;
+def KryoWrite_3cyc_X_noRSV_154ln :
+ SchedWriteRes<[KryoUnitX]> {
+let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_X_noRSV_154ln],
+ (instregex "(S|U)ADALP(v8i8|v4i16|v2i32)_v.*")>;
+def KryoWrite_3cyc_X_X_155ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_X_X_155ln],
+ (instregex "(S|U)ADALP(v16i8|v8i16|v4i32)_v.*")>;
+def KryoWrite_2cyc_XY_XY_151ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_151ln],
+ (instregex "(S|U)(ADD|SUB)Lv.*")>;
+def KryoWrite_2cyc_XY_noRSV_148ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_148ln],
+ (instregex "((S|U)ADDLP|ABS)(v2i32|v4i16|v8i8)(_v.*)?")>;
+def KryoWrite_2cyc_XY_XY_150ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_150ln],
+ (instregex "((S|U)ADDLP|ABS)(v2i64|v4i32|v8i16|v16i8)(_v.*)?")>;
+def KryoWrite_3cyc_XY_XY_XY_noRSV_179ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_XY_noRSV_179ln],
+ (instrs SADDLVv4i32v, UADDLVv4i32v)>;
+def KryoWrite_5cyc_XY_XY_XY_noRSV_180ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY, KryoUnitXY]> {
+ let Latency = 5; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_5cyc_XY_XY_XY_noRSV_180ln],
+ (instrs SADDLVv8i16v, UADDLVv8i16v)>;
+def KryoWrite_6cyc_XY_XY_X_noRSV_181ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY, KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_6cyc_XY_XY_X_noRSV_181ln],
+ (instrs SADDLVv16i8v, UADDLVv16i8v)>;
+def KryoWrite_3cyc_XY_noRSV_158ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_158ln],
+ (instrs SADDLVv4i16v, UADDLVv4i16v, ADDVv4i16v)>;
+def KryoWrite_4cyc_X_noRSV_169ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_169ln],
+ (instrs SADDLVv8i8v, UADDLVv8i8v, ADDVv8i8v)>;
+def KryoWrite_2cyc_XY_XY_XY_XY_176ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY, KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_XY_XY_176ln],
+ (instregex "(S|U)(ADDW|SUBW)v.*")>;
+def KryoWrite_4cyc_X_noRSV_40ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_40ln],
+ (instregex "(S|U)CVTFS(W|X)(D|S)ri")>;
+def KryoWrite_4cyc_X_noRSV_97ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_97ln],
+ (instregex "(S|U)CVTFU(W|X)(D|S)ri")>;
+def KryoWrite_4cyc_X_noRSV_110ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_110ln],
+ (instregex "(S|U)CVTF(v1i32|v2i32|v1i64|v2f32|d|s)(_shift)?")>;
+def KryoWrite_4cyc_X_X_114ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_114ln],
+ (instregex "(S|U)CVTF(v2i64|v4i32|v2f64|v4f32)(_shift)?")>;
+def KryoWrite_1cyc_XA_Y_98ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XA_Y_98ln],
+ (instregex "(S|U)DIV(_Int)?(W|X)r")>;
+def KryoWrite_2cyc_XY_XY_152ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_152ln],
+ (instregex "(S|U)H(ADD|SUB)(v16i8|v8i16|v4i32)")>;
+def KryoWrite_2cyc_XY_noRSV_149ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_149ln],
+ (instregex "((S|U)H(ADD|SUB)|ADDP)(v8i8|v4i16|v2i32)")>;
+def KryoWrite_4cyc_X_70ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_X_70ln],
+ (instregex "(S|U)(MADDL|MSUBL)rrr")>;
+def KryoWrite_4cyc_X_X_191ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_191ln],
+ (instregex "(S|U|SQD)(MLAL|MLSL|MULL)v.*")>;
+def KryoWrite_1cyc_XY_195ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_195ln],
+ (instregex "(S|U)MOVv.*")>;
+def KryoWrite_5cyc_X_71ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_5cyc_X_71ln],
+ (instrs SMULHrr, UMULHrr)>;
+def KryoWrite_3cyc_XY_noRSV_186ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_186ln],
+ (instregex "^(S|U)QADD(v8i8|v4i16|v2i32)")>;
+def KryoWrite_3cyc_XY_XY_187ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_187ln],
+ (instregex "^(S|U)QADD(v16i8|v8i16|v4i32|v2i64)")>;
+def KryoWrite_3cyc_XY_noRSV_69ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_69ln],
+ (instregex "(S|U|SU|US)QADD(v1i8|v1i16|v2i16|v1i32|v1i64)")>;
+def KryoWrite_3cyc_XY_noRSV_248ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_248ln],
+ (instregex "(S|U)QSHLU?(d|s|h|b|(v8i8|v4i16|v2i32)_shift)$")>;
+def KryoWrite_3cyc_XY_XY_250ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_250ln],
+ (instregex "(S|U)(QSHLU?|RSHR)(v16i8|v8i16|v4i32|v2i64)_shift$")>;
+def KryoWrite_3cyc_XY_noRSV_246ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_246ln],
+ (instregex "(S|U)(QSHL|RSHL|QRSHL)(v1i8|v1i16|v1i32|v1i64|v8i8|v4i16|v2i32)$")>;
+def KryoWrite_3cyc_XY_XY_251ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_251ln],
+ (instregex "(S|U)(QSHL|RSHL|QRSHL)(v16i8|v8i16|v4i32|v2i64)$")>;
+def KryoWrite_6cyc_XY_X_238ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_XY_X_238ln],
+ (instregex "((S|U)QR?SHRN|SQR?SHRUN)(v16i8|v8i16|v4i32)_shift$")>;
+def KryoWrite_3cyc_XY_noRSV_249ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_249ln],
+ (instregex "((S|U)QR?SHRN|SQR?SHRUN)(s|h|b)?")>;
+def KryoWrite_6cyc_XY_X_noRSV_252ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_6cyc_XY_X_noRSV_252ln],
+ (instregex "((S|U)QR?SHRN|SQR?SHRUN)(v8i8|v4i16|v2i32)_shift?")>;
+def KryoWrite_3cyc_XY_noRSV_161ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_161ln],
+ (instregex "(S|U)QSUB(v8i8|v4i16|v2i32|v1i64|v1i32|v1i16|v1i8)")>;
+def KryoWrite_3cyc_XY_noRSV_163ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_163ln],
+ (instregex "(S|U)QXTU?N(v16i8|v8i16|v4i32|v8i8|v4i16|v2i32)")>;
+def KryoWrite_3cyc_XY_noRSV_162ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_162ln],
+ (instregex "(S|U)QXTU?N(v1i8|v1i16|v1i32)")>;
+def KryoWrite_3cyc_XY_noRSV_247ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_247ln],
+ (instregex "(S|U)RSHR(d|(v8i8|v4i16|v2i32)_shift)$")>;
+def KryoWrite_2cyc_XY_noRSV_239ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_239ln],
+ (instregex "(S|U)SHL(d|v8i8|v4i16|v2i32|v1i64)$")>;
+def KryoWrite_2cyc_XY_XY_243ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_243ln],
+ (instregex "(S|U)SHL(v16i8|v8i16|v4i32|v2i64)$")>;
+def KryoWrite_2cyc_XY_XY_241ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_241ln],
+ (instregex "(S|U)?SHLL(v16i8|v8i16|v4i32|v8i8|v4i16|v2i32)(_shift)?$")>;
+def KryoWrite_2cyc_XY_noRSV_240ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_240ln],
+ (instregex "((S|U)SHR|SHL)(d|(v8i8|v4i16|v2i32)_shift)$")>;
+def KryoWrite_2cyc_XY_XY_242ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_242ln],
+ (instregex "((S|U)SHR|SHL)(v16i8|v8i16|v4i32|v2i64)_shift$")>;
+def KryoWrite_2cyc_XY_XY_183ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_183ln],
+ (instregex "(S|U)(MAX|MIN)P?(v16i8|v8i16|v4i32)")>;
+def KryoWrite_2cyc_XY_noRSV_182ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_182ln],
+ (instregex "(S|U)(MAX|MIN)P?(v8i8|v4i16|v2i32)")>;
+def KryoWrite_3cyc_XY_noRSV_184ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_184ln],
+ (instregex "(S|U)(MAX|MIN)V(v4i16v|v8i8v|v4i32)")>;
+def KryoWrite_4cyc_X_noRSV_185ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_185ln],
+ (instregex "(S|U)(MAX|MIN)V(v16i8v|v8i16v)")>;
+def KryoWrite_2cyc_XY_noRSV_67ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_67ln],
+ (instrs ABSv1i64)>;
+def KryoWrite_1cyc_XY_63ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_63ln, ReadI, ReadI],
+ (instregex "ADC.*")>;
+def KryoWrite_1cyc_XY_63_1ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_63_1ln],
+ (instregex "ADR.*")>;
+def KryoWrite_1cyc_XY_62ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_62ln, ReadI],
+ (instregex "ADDS?(W|X)ri")>;
+def KryoWrite_2cyc_XY_XY_64ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_64ln, ReadI, ReadI],
+ (instregex "ADDS?(W|X)r(r|s|x)(64)?")>;
+def KryoWrite_1cyc_XY_noRSV_65ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_65ln],
+ (instrs ADDv1i64)>;
+def KryoWrite_1cyc_XY_noRSV_144ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_144ln],
+ (instregex "(ADD|SUB)(v8i8|v4i16|v2i32|v1i64)")>;
+def KryoWrite_1cyc_XY_XY_146ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_146ln],
+ (instregex "(ADD|SUB)(v16i8|v8i16|v4i32|v2i64)")>;
+def KryoWrite_4cyc_XY_X_noRSV_171ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_4cyc_XY_X_noRSV_171ln],
+ (instregex "(ADD|SUB)HNv.*")>;
+def KryoWrite_1cyc_XY_noRSV_66ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_66ln],
+ (instrs ADDPv2i64p)>;
+def KryoWrite_2cyc_XY_XY_153ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_153ln],
+ (instregex "ADDP(v16i8|v8i16|v4i32|v2i64)")>;
+def KryoWrite_3cyc_XY_XY_noRSV_170ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_noRSV_170ln],
+ (instrs ADDVv4i32v)>;
+def KryoWrite_4cyc_XY_XY_noRSV_173ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 4; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_4cyc_XY_XY_noRSV_173ln],
+ (instrs ADDVv8i16v)>;
+def KryoWrite_5cyc_XY_X_noRSV_174ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_5cyc_XY_X_noRSV_174ln],
+ (instrs ADDVv16i8v)>;
+def KryoWrite_3cyc_XY_XY_X_X_27ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY, KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_X_X_27ln],
+ (instrs AESDrr, AESErr)>;
+def KryoWrite_2cyc_X_X_22ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_X_X_22ln],
+ (instrs AESIMCrr, AESMCrr)>;
+def KryoWrite_1cyc_XY_noRSV_76ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_76ln],
+ (instregex "((AND|ORN|EOR|EON)S?(Wr[rsi]|v8i8|v4i16|v2i32)|(ORR|BIC)S?(Wr[rs]|v8i8|v4i16|v2i32))")>;
+def KryoWrite_1cyc_XY_XY_79ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_79ln],
+ (instregex "((AND|ORN|EOR|EON)S?(Xr[rsi]|v16i8|v8i16|v4i32)|(ORR|BIC)S?(Xr[rs]|v16i8|v8i16|v4i32))")>;
+def KryoWrite_1cyc_X_72ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_X_72ln],
+ (instregex "(S|U)?BFM.*")>;
+def KryoWrite_1cyc_XY_noRSV_77ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_77ln],
+ (instregex "(BIC|ORR)S?Wri")>;
+def KryoWrite_1cyc_XY_XY_78ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_78ln],
+ (instregex "(BIC|ORR)S?Xri")>;
+def KryoWrite_1cyc_X_noRSV_74ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_noRSV_74ln],
+ (instrs BIFv8i8, BITv8i8, BSLv8i8)>;
+def KryoWrite_1cyc_X_X_75ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_75ln],
+ (instrs BIFv16i8, BITv16i8, BSLv16i8)>;
+def KryoWrite_0cyc_noRSV_11ln :
+ SchedWriteRes<[]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_noRSV_11ln],
+ (instrs BRK, DCPS1, DCPS2, DCPS3, HLT, HVC, ISB, HINT, SMC, SVC)>;
+def KryoWrite_0cyc_XY_16ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_XY_16ln, ReadI],
+ (instregex "(CCMN|CCMP)(W|X)i")>;
+def KryoWrite_0cyc_XY_16_1ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_XY_16_1ln, ReadI, ReadI],
+ (instregex "(CCMN|CCMP)(W|X)r")>;
+def KryoWrite_2cyc_XY_3ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_2cyc_XY_3ln, ReadI],
+ (instregex "(CLS|CLZ)(W|X)r")>;
+def KryoWrite_2cyc_XY_noRSV_7ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_7ln],
+ (instregex "(CLS|CLZ|CNT)(v4i32|v8i16|v16i8)")>;
+def KryoWrite_2cyc_XY_XY_8ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_8ln],
+ (instregex "(CLS|CLZ|CNT)(v2i32|v4i16|v8i8)")>;
+def KryoWrite_2cyc_XY_noRSV_80ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_80ln],
+ (instregex "CM(EQ|GE|HS|GT|HI|TST)(v8i8|v4i16|v2i32|v1i64)$")>;
+def KryoWrite_2cyc_XY_XY_83ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_83ln],
+ (instregex "CM(EQ|GE|HS|GT|HI|TST)(v16i8|v8i16|v4i32|v2i64)$")>;
+def KryoWrite_2cyc_XY_noRSV_81ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_81ln],
+ (instregex "CM(EQ|LE|GE|GT|LT)(v8i8|v4i16|v2i32|v1i64)rz$")>;
+def KryoWrite_2cyc_XY_XY_82ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_82ln],
+ (instregex "CM(EQ|LE|GE|GT|LT)(v16i8|v8i16|v4i32|v2i64)rz$")>;
+def KryoWrite_3cyc_XY_4ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_XY_4ln, ReadI, ReadISReg],
+ (instregex "CRC32.*")>;
+def KryoWrite_1cyc_XY_20ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_20ln, ReadI, ReadI],
+ (instregex "CSEL(W|X)r")>;
+def KryoWrite_1cyc_X_17ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_X_17ln, ReadI, ReadI],
+ (instregex "(CSINC|CSNEG)(W|X)r")>;
+def KryoWrite_1cyc_XY_18ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_18ln, ReadI, ReadI],
+ (instregex "(CSINV)(W|X)r")>;
+def KryoWrite_3cyc_LS_X_13ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_X_13ln],
+ (instrs DRPS)>;
+def KryoWrite_0cyc_LS_10ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_LS_10ln],
+ (instrs DSB, DMB, CLREX)>;
+def KryoWrite_1cyc_X_noRSV_196ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_noRSV_196ln],
+ (instregex "DUP(v8i8|v4i16|v2i32)(gpr|lane)")>;
+def KryoWrite_1cyc_X_X_197ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_197ln],
+ (instregex "DUP(v16i8|v8i16|v4i32|v2i64)(gpr|lane)")>;
+def KryoWrite_3cyc_LS_LS_X_15ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_X_15ln],
+ (instrs ERET)>;
+def KryoWrite_1cyc_X_noRSV_207ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_noRSV_207ln],
+ (instrs EXTv8i8)>;
+def KryoWrite_1cyc_X_X_212ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_212ln],
+ (instrs EXTv16i8)>;
+def KryoWrite_2cyc_XY_X_136ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitX]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_X_136ln],
+ (instrs EXTRWrri, EXTRXrri)>;
+def KryoWrite_2cyc_XY_noRSV_35ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_35ln],
+ (instregex "F(MAX|MIN)(NM)?P?(D|S)rr")>;
+def KryoWrite_2cyc_XY_XY_106ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_106ln],
+ (instregex "(F(MAX|MIN)(NM)?P?|FAC(GE|GT)|FCM(EQ|GE|GT))(v2i64p|v2f64|v4f32)")>;
+def KryoWrite_2cyc_XY_noRSV_104ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_104ln],
+ (instregex "(F(MAX|MIN)(NM)?P?|FAC(GE|GT)|FCM(EQ|GE|GT))(v2f32|v2i32p)")>;
+def KryoWrite_3cyc_XY_noRSV_107ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_107ln],
+ (instregex "F(MAX|MIN)(NM)?Vv4i32v")>;
+def KryoWrite_3cyc_XY_noRSV_101ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_101ln],
+ (instregex "FABD(32|64|v2f32)")>;
+def KryoWrite_3cyc_XY_XY_103ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_103ln],
+ (instregex "(FABD|FADD|FSUB|FADDP)(v4f32|v2f64)")>;
+def KryoWrite_1cyc_XY_noRSV_48ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_48ln],
+ (instregex "F(ABS|NEG)(D|S)r")>;
+def KryoWrite_1cyc_XY_noRSV_124ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_124ln],
+ (instregex "F(ABS|NEG)v2f32")>;
+def KryoWrite_1cyc_XY_XY_125ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_125ln],
+ (instregex "F(ABS|NEG)(v2f64|v4f32)")>;
+def KryoWrite_2cyc_XY_noRSV_33ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_33ln],
+ (instregex "(FAC(GE|GT)|FCM(EQ|GE|GT))(32|64)")>;
+def KryoWrite_3cyc_XY_noRSV_30ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_30ln],
+ (instregex "(FADD|FSUB)(D|S)rr")>;
+def KryoWrite_3cyc_XY_noRSV_100ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_100ln],
+ (instregex "(FADD|FSUB|FADDP)v2f32")>;
+def KryoWrite_3cyc_XY_noRSV_29ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_29ln],
+ (instregex "FADDP(v2i32p|v2i64p)")>;
+def KryoWrite_0cyc_XY_31ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_XY_31ln],
+ (instregex "FCCMPE?(D|S)rr")>;
+def KryoWrite_2cyc_XY_noRSV_34ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_34ln],
+ (instregex "FCM(EQ|LE|GE|GT|LT)(v1i32|v1i64)rz")>;
+def KryoWrite_2cyc_XY_XY_36ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_36ln],
+ (instregex "FCM(EQ|LE|GE|GT|LT)(v2i64|v4i32)rz")>;
+def KryoWrite_2cyc_XY_noRSV_105ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_105ln],
+ (instregex "FCM(EQ|LE|GE|GT|LT)v2i32rz")>;
+def KryoWrite_0cyc_XY_32ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_XY_32ln],
+ (instregex "FCMPE?(D|S)r(r|i)")>;
+def KryoWrite_1cyc_XY_noRSV_49ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_49ln],
+ (instrs FCSELDrrr, FCSELSrrr)>;
+def KryoWrite_4cyc_X_noRSV_41ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_41ln],
+ (instrs FCVTDHr, FCVTDSr, FCVTHDr, FCVTHSr, FCVTSDr, FCVTSHr)>;
+def KryoWrite_4cyc_X_38ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_X_38ln],
+ (instregex "FCVT(((A|N|M|P)(S|U)(S|U)|Z(S|U)_Int(S|U))(W|X)(D|S)ri?|Z(S|U)(d|s))$")>;
+def KryoWrite_4cyc_X_noRSV_113ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_113ln],
+ (instregex "FCVT((A|N|M|P)(S|U)|Z(S|U)_Int)(v1i32|v1i64|v2f32)$")>;
+def KryoWrite_4cyc_X_X_117ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_117ln],
+ (instregex "FCVT((A|N|M|P)(S|U)|Z(S|U)_Int)(v4f32|v2f64)$")>;
+def KryoWrite_5cyc_X_X_XY_noRSV_119ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitXY]> {
+ let Latency = 5; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_5cyc_X_X_XY_noRSV_119ln],
+ (instregex "FCVTX?N(v2f32|v4f32|v2i32|v4i16|v4i32|v8i16)$")>;
+def KryoWrite_4cyc_X_X_116ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_116ln],
+ (instregex "FCVTL(v2i32|v4i16|v4i32|v8i16)$")>;
+def KryoWrite_4cyc_X_noRSV_112ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_112ln],
+ (instrs FCVTXNv1i64)>;
+def KryoWrite_4cyc_X_37ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_X_37ln],
+ (instregex "FCVTZ(S|U)(S|U)(W|X)(D|S)ri?$")>;
+def KryoWrite_4cyc_X_noRSV_111ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_111ln],
+ (instregex "FCVTZ(S|U)(v2f32|v1i32|v1i64|v2i32(_shift)?)$")>;
+def KryoWrite_4cyc_X_X_115ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_115ln],
+ (instregex "FCVTZ(S|U)(v2f64|v4f32|(v2i64|v4i32)(_shift)?)$")>;
+def KryoWrite_1cyc_XA_Y_noRSV_43ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_1cyc_XA_Y_noRSV_43ln],
+ (instrs FDIVDrr, FDIVSrr)>;
+def KryoWrite_1cyc_XA_Y_noRSV_121ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_1cyc_XA_Y_noRSV_121ln],
+ (instrs FDIVv2f32)>;
+def KryoWrite_1cyc_XA_Y_XA_Y_123ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitY, KryoUnitXA, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_1cyc_XA_Y_XA_Y_123ln],
+ (instrs FDIVv2f64, FDIVv4f32)>;
+def KryoWrite_5cyc_X_noRSV_55ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_noRSV_55ln],
+ (instregex "FN?M(ADD|SUB)Srrr")>;
+def KryoWrite_6cyc_X_noRSV_57ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_X_noRSV_57ln],
+ (instregex "FN?M(ADD|SUB)Drrr")>;
+def KryoWrite_5cyc_X_noRSV_51ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_noRSV_51ln],
+ (instrs FMLAv2f32, FMLSv2f32, FMLAv1i32_indexed, FMLSv1i32_indexed)>;
+def KryoWrite_5cyc_X_X_56ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_X_56ln],
+ (instrs FMLAv4f32, FMLSv4f32)>;
+def KryoWrite_6cyc_X_X_61ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_X_X_61ln],
+ (instrs FMLAv2f64, FMLSv2f64)>;
+def KryoWrite_5cyc_X_noRSV_128ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_noRSV_128ln],
+ (instrs FMLAv2i32_indexed, FMLSv2i32_indexed)>;
+def KryoWrite_5cyc_X_X_131ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_X_131ln],
+ (instrs FMLAv4i32_indexed, FMLSv4i32_indexed)>;
+def KryoWrite_6cyc_X_X_134ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_X_X_134ln],
+ (instrs FMLAv2i64_indexed, FMLSv2i64_indexed)>;
+def KryoWrite_6cyc_X_noRSV_60ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_X_noRSV_60ln],
+ (instrs FMLAv1i64_indexed, FMLSv1i64_indexed, FMULv1i64_indexed, FMULXv1i64_indexed)>;
+def KryoWrite_1cyc_XY_45ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_45ln],
+ (instregex "FMOV(XDHigh|DXHigh|DX)r")>;
+def KryoWrite_1cyc_XY_noRSV_47ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_47ln],
+ (instregex "FMOV(Di|Dr|Si|Sr|SWr|WSr|XDr|v.*_ns)")>;
+def KryoWrite_5cyc_X_noRSV_53ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_noRSV_53ln],
+ (instrs FMULv1i32_indexed, FMULXv1i32_indexed)>;
+def KryoWrite_5cyc_X_noRSV_127ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_noRSV_127ln],
+ (instrs FMULv2f32, FMULXv2f32, FMULv2i32_indexed, FMULXv2i32_indexed)>;
+def KryoWrite_5cyc_X_X_130ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_X_130ln],
+ (instrs FMULv4f32, FMULXv4f32, FMULv4i32_indexed, FMULXv4i32_indexed)>;
+def KryoWrite_6cyc_X_X_133ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_X_X_133ln],
+ (instrs FMULv2f64, FMULXv2f64, FMULv2i64_indexed, FMULXv2i64_indexed)>;
+def KryoWrite_5cyc_X_noRSV_54ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_noRSV_54ln],
+ (instrs FMULSrr, FNMULSrr, FMULX32)>;
+def KryoWrite_6cyc_X_noRSV_59ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_X_noRSV_59ln],
+ (instrs FMULDrr, FNMULDrr, FMULX64)>;
+def KryoWrite_3cyc_XY_noRSV_28ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_28ln],
+ (instrs FRECPEv1i32, FRECPEv1i64, FRSQRTEv1i32, FRSQRTEv1i64 )>;
+def KryoWrite_3cyc_XY_noRSV_99ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_99ln],
+ (instrs FRECPEv2f32, FRSQRTEv2f32)>;
+def KryoWrite_3cyc_XY_XY_102ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_102ln],
+ (instrs FRECPEv2f64, FRECPEv4f32, FRSQRTEv2f64, FRSQRTEv4f32)>;
+def KryoWrite_5cyc_X_noRSV_52ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_noRSV_52ln],
+ (instrs FRECPS32, FRSQRTS32)>;
+def KryoWrite_6cyc_X_noRSV_58ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_X_noRSV_58ln],
+ (instrs FRECPS64, FRSQRTS64)>;
+def KryoWrite_5cyc_X_noRSV_126ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_noRSV_126ln],
+ (instrs FRECPSv2f32, FRSQRTSv2f32)>;
+def KryoWrite_5cyc_X_X_129ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_X_129ln],
+ (instrs FRECPSv4f32, FRSQRTSv4f32)>;
+def KryoWrite_6cyc_X_X_132ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_6cyc_X_X_132ln],
+ (instrs FRECPSv2f64, FRSQRTSv2f64)>;
+def KryoWrite_3cyc_XY_noRSV_50ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_50ln],
+ (instrs FRECPXv1i32, FRECPXv1i64)>;
+def KryoWrite_2cyc_XY_noRSV_39ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_39ln],
+ (instregex "FRINT(A|I|M|N|P|X|Z)(S|D)r")>;
+def KryoWrite_2cyc_XY_noRSV_108ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_108ln],
+ (instregex "FRINT(A|I|M|N|P|X|Z)v2f32")>;
+def KryoWrite_2cyc_XY_XY_109ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_109ln],
+ (instregex "FRINT(A|I|M|N|P|X|Z)(v2f64|v4f32)")>;
+def KryoWrite_1cyc_XA_Y_noRSV_42ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_1cyc_XA_Y_noRSV_42ln],
+ (instregex "FSQRT(S|D)r")>;
+def KryoWrite_1cyc_XA_Y_noRSV_120ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_1cyc_XA_Y_noRSV_120ln],
+ (instregex "FSQRTv2f32")>;
+def KryoWrite_1cyc_XA_Y_XA_Y_122ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitY, KryoUnitXA, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_1cyc_XA_Y_XA_Y_122ln],
+ (instregex "FSQRT(v2f64|v4f32)")>;
+def KryoWrite_1cyc_X_201ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_X_201ln],
+ (instregex "INSv.*")>;
+def KryoWrite_3cyc_LS_255ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_255ln],
+ (instregex "LD1(One(v16b|v8h|v4s|v2d)|i64)$")>;
+def KryoWrite_4cyc_LS_X_270ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_LS_X_270ln],
+ (instregex "LD1(i8|i16|i32)$")>;
+def KryoWrite_3cyc_LS_noRSV_285ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_noRSV_285ln],
+ (instregex "LD1One(v8b|v4h|v2s|v1d)$")>;
+def KryoWrite_3cyc_LS_XY_289ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_289ln, WriteAdr],
+ (instregex "LD1(One(v16b|v8h|v4s|v2d)|i64)_POST$")>;
+def KryoWrite_4cyc_LS_XY_X_298ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_4cyc_LS_XY_X_298ln, WriteAdr],
+ (instregex "LD1(i8|i16|i32)_POST$")>;
+def KryoWrite_3cyc_LS_LS_LS_308ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_LS_308ln],
+ (instregex "LD1Three(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_3cyc_LS_XY_noRSV_317ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_noRSV_317ln, WriteAdr],
+ (instregex "LD1One(v8b|v4h|v2s|v1d)_POST$")>;
+def KryoWrite_3cyc_LS_LS_LS_LS_328ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_LS_LS_328ln, WriteAdr],
+ (instregex "LD1Four(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_3cyc_LS_XY_LS_LS_332ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_LS_332ln, WriteAdr],
+ (instregex "LD1Three(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_3cyc_LS_LS_noRSV_noRSV_noRSV_348ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_noRSV_noRSV_noRSV_348ln],
+ (instregex "LD1Three(v8b|v4h|v2s|v1d)$")>;
+def KryoWrite_3cyc_LS_XY_LS_LS_LS_351ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_LS_LS_351ln],
+ (instregex "LD1Four(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_3cyc_LS_LS_noRSV_noRSV_noRSV_noRSV_358ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_noRSV_noRSV_noRSV_noRSV_358ln],
+ (instregex "LD1Four(v8b|v4h|v2s|v1d)$")>;
+def KryoWrite_3cyc_LS_XY_LS_noRSV_noRSV_noRSV_360ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_noRSV_noRSV_noRSV_360ln, WriteAdr],
+ (instregex "LD1Three(v8b|v4h|v2s|v1d)_POST$")>;
+def KryoWrite_3cyc_LS_XY_LS_noRSV_noRSV_noRSV_noRSV_368ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 7;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_noRSV_noRSV_noRSV_noRSV_368ln, WriteAdr],
+ (instregex "LD1Four(v8b|v4h|v2s|v1d)_POST$")>;
+def KryoWrite_3cyc_LS_LS_281ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_281ln],
+ (instregex "LD(1|2)Two(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_3cyc_LS_noRSV_noRSV_311ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_noRSV_noRSV_311ln],
+ (instregex "LD(1|2)Two(v8b|v4h|v2s|v1d)$")>;
+def KryoWrite_3cyc_LS_XY_LS_313ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_313ln, WriteAdr],
+ (instregex "LD(1|2)Two(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_3cyc_LS_XY_noRSV_noRSV_334ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_noRSV_noRSV_334ln, WriteAdr],
+ (instregex "LD(1|2)Two(v8b|v4h|v2s|v1d)_POST$")>;
+def KryoWrite_3cyc_LS_256ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_256ln],
+ (instregex "LD1R(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_3cyc_LS_noRSV_286ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_noRSV_286ln],
+ (instregex "LD1R(v8b|v4h|v2s|v1d)$")>;
+def KryoWrite_3cyc_LS_XY_290ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_290ln, WriteAdr],
+ (instregex "LD1R(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_3cyc_LS_XY_noRSV_318ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_noRSV_318ln, WriteAdr],
+ (instregex "LD1R(v8b|v4h|v2s|v1d)_POST$")>;
+def KryoWrite_3cyc_LS_257ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_257ln],
+ (instregex "LD2i64$")>;
+def KryoWrite_3cyc_LS_XY_291ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_291ln, WriteAdr],
+ (instregex "LD2i64_POST$")>;
+def KryoWrite_4cyc_LS_X_X_296ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_4cyc_LS_X_X_296ln],
+ (instregex "LD2(i8|i16|i32)$")>;
+def KryoWrite_4cyc_LS_XY_X_X_321ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_4cyc_LS_XY_X_X_321ln, WriteAdr],
+ (instregex "LD2(i8|i16|i32)_POST$")>;
+def KryoWrite_3cyc_LS_LS_282ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_282ln],
+ (instregex "LD2R(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_3cyc_LS_noRSV_noRSV_312ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_noRSV_noRSV_312ln],
+ (instregex "LD2R(v8b|v4h|v2s|v1d)$")>;
+def KryoWrite_3cyc_LS_XY_LS_314ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_314ln, WriteAdr],
+ (instregex "LD2R(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_3cyc_LS_XY_noRSV_noRSV_335ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_noRSV_noRSV_335ln, WriteAdr],
+ (instregex "LD2R(v8b|v4h|v2s|v1d)_POST$")>;
+def KryoWrite_3cyc_LS_LS_283ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_283ln],
+ (instregex "LD3i64$")>;
+def KryoWrite_3cyc_LS_LS_LS_309ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_LS_309ln],
+ (instregex "LD3Threev2d$")>;
+def KryoWrite_3cyc_LS_XY_LS_315ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_315ln, WriteAdr],
+ (instregex "LD3i64_POST$")>;
+def KryoWrite_4cyc_LS_X_X_X_320ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_4cyc_LS_X_X_X_320ln],
+ (instregex "LD3(i8|i16|i32)$")>;
+def KryoWrite_3cyc_LS_XY_LS_LS_331ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_LS_331ln, WriteAdr],
+ (instregex "LD3Threev2d_POST$")>;
+def KryoWrite_4cyc_LS_XY_X_X_X_338ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_4cyc_LS_XY_X_X_X_338ln, WriteAdr],
+ (instregex "LD3(i8|i16|i32)_POST$")>;
+def KryoWrite_4cyc_LS_LS_X_X_X_noRSV_noRSV_noRSV_373ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 8;
+}
+def : InstRW<[KryoWrite_4cyc_LS_LS_X_X_X_noRSV_noRSV_noRSV_373ln],
+ (instregex "LD3Three(v8b|v4h|v2s)$")>;
+def KryoWrite_4cyc_LS_XY_LS_X_X_X_noRSV_noRSV_noRSV_380ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitX, KryoUnitX,
+ KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 9;
+}
+def : InstRW<[KryoWrite_4cyc_LS_XY_LS_X_X_X_noRSV_noRSV_noRSV_380ln, WriteAdr],
+ (instregex "LD3Three(v8b|v4h|v2s)_POST$")>;
+def KryoWrite_4cyc_LS_LS_X_X_X_LS_LS_X_X_X_381ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitLS, KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 10;
+}
+def : InstRW<[KryoWrite_4cyc_LS_LS_X_X_X_LS_LS_X_X_X_381ln],
+ (instregex "LD3Three(v16b|v8h|v4s)$")>;
+def KryoWrite_4cyc_LS_LS_X_X_X_LS_XY_LS_X_X_X_383ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitX, KryoUnitX,
+ KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 11;
+}
+def : InstRW<[KryoWrite_4cyc_LS_LS_X_X_X_LS_XY_LS_X_X_X_383ln, WriteAdr],
+ (instregex "LD3Three(v16b|v8h|v4s)_POST$")>;
+def KryoWrite_3cyc_LS_LS_LS_310ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_LS_310ln],
+ (instregex "LD3R(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_3cyc_LS_XY_LS_LS_333ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_LS_333ln, WriteAdr],
+ (instregex "LD3R(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_3cyc_LS_LS_noRSV_noRSV_noRSV_349ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_noRSV_noRSV_noRSV_349ln],
+ (instregex "LD3R(v8b|v4h|v2s|v1d)$")>;
+def KryoWrite_3cyc_LS_XY_LS_noRSV_noRSV_noRSV_361ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_noRSV_noRSV_noRSV_361ln, WriteAdr],
+ (instregex "LD3R(v8b|v4h|v2s|v1d)_POST$")>;
+def KryoWrite_3cyc_LS_LS_284ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_284ln],
+ (instregex "LD4i64$")>;
+def KryoWrite_3cyc_LS_XY_LS_316ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_316ln, WriteAdr],
+ (instregex "LD4i64_POST$")>;
+def KryoWrite_3cyc_LS_LS_LS_LS_329ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_LS_LS_329ln],
+ (instregex "LD4Four(v2d)$")>;
+def KryoWrite_4cyc_LS_X_X_X_X_337ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_4cyc_LS_X_X_X_X_337ln],
+ (instregex "LD4(i8|i16|i32)$")>;
+def KryoWrite_3cyc_LS_XY_LS_LS_LS_350ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_LS_LS_350ln, WriteAdr],
+ (instregex "LD4Four(v2d)_POST$")>;
+def KryoWrite_4cyc_LS_XY_X_X_X_X_355ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_4cyc_LS_XY_X_X_X_X_355ln, WriteAdr],
+ (instregex "LD4(i8|i16|i32)_POST$")>;
+def KryoWrite_4cyc_LS_LS_X_X_X_X_noRSV_noRSV_noRSV_noRSV_382ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 10;
+}
+def : InstRW<[KryoWrite_4cyc_LS_LS_X_X_X_X_noRSV_noRSV_noRSV_noRSV_382ln],
+ (instregex "LD4Four(v8b|v4h|v2s)$")>;
+def KryoWrite_4cyc_LS_XY_LS_X_X_X_X_noRSV_noRSV_noRSV_noRSV_384ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 11;
+}
+def : InstRW<[KryoWrite_4cyc_LS_XY_LS_X_X_X_X_noRSV_noRSV_noRSV_noRSV_384ln, WriteAdr],
+ (instregex "LD4Four(v8b|v4h|v2s)_POST$")>;
+def KryoWrite_4cyc_LS_LS_X_X_X_X_LS_LS_X_X_X_X_386ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitLS, KryoUnitLS, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 12;
+}
+def : InstRW<[KryoWrite_4cyc_LS_LS_X_X_X_X_LS_LS_X_X_X_X_386ln],
+ (instregex "LD4Four(v16b|v8h|v4s)$")>;
+def KryoWrite_4cyc_LS_LS_X_X_X_X_LS_XY_LS_X_X_X_X_389ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitX,
+ KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 13;
+}
+def : InstRW<[KryoWrite_4cyc_LS_LS_X_X_X_X_LS_XY_LS_X_X_X_X_389ln, WriteAdr],
+ (instregex "LD4Four(v16b|v8h|v4s)_POST$")>;
+def KryoWrite_3cyc_LS_LS_LS_LS_330ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_LS_LS_330ln],
+ (instregex "LD4R(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_3cyc_LS_XY_LS_LS_LS_352ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS, KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_LS_LS_352ln, WriteAdr],
+ (instregex "LD4R(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_3cyc_LS_LS_noRSV_noRSV_noRSV_noRSV_359ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_noRSV_noRSV_noRSV_noRSV_359ln],
+ (instregex "LD4R(v8b|v4h|v2s|v1d)$")>;
+def KryoWrite_3cyc_LS_XY_LS_noRSV_noRSV_noRSV_noRSV_369ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 7;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_noRSV_noRSV_noRSV_noRSV_369ln, WriteAdr],
+ (instregex "LD4R(v8b|v4h|v2s|v1d)_POST$")>;
+def KryoWrite_3cyc_LS_LS_400ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_400ln],
+ (instregex "(LDAX?R(B|H|W|X)|LDAXP(W|X))")>;
+def KryoWrite_3cyc_LS_LS_401ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_401ln, WriteLDHi],
+ (instrs LDNPQi)>;
+def KryoWrite_3cyc_LS_noRSV_noRSV_408ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_noRSV_noRSV_408ln, WriteLDHi],
+ (instrs LDNPDi, LDNPSi)>;
+def KryoWrite_3cyc_LS_394ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_394ln, WriteLDHi],
+ (instrs LDNPWi, LDNPXi)>;
+def KryoWrite_3cyc_LS_LS_402ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_402ln, WriteLDHi],
+ (instrs LDPQi)>;
+def KryoWrite_3cyc_LS_noRSV_noRSV_409ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_noRSV_noRSV_409ln, WriteLDHi],
+ (instrs LDPDi, LDPSi)>;
+def KryoWrite_3cyc_LS_XY_LS_410ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY, KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_LS_410ln, WriteLDHi, WriteAdr],
+ (instregex "LDPQ(post|pre)")>;
+def KryoWrite_3cyc_LS_XY_noRSV_noRSV_411ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_noRSV_noRSV_411ln, WriteLDHi, WriteAdr],
+ (instregex "LDP(D|S)(post|pre)")>;
+def KryoWrite_3cyc_LS_393ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_393ln, WriteLDHi],
+ (instrs LDPWi, LDPXi)>;
+def KryoWrite_3cyc_LS_XY_403ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_403ln, WriteLDHi, WriteAdr],
+ (instregex "LDP(W|X)(post|pre)")>;
+def KryoWrite_4cyc_LS_395ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_LS_395ln, WriteLDHi],
+ (instrs LDPSWi)>;
+def KryoWrite_4cyc_LS_XY_405ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_LS_XY_405ln, WriteLDHi, WriteAdr],
+ (instrs LDPSWpost, LDPSWpre)>;
+def KryoWrite_3cyc_LS_264ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_264ln],
+ (instrs LDRQui, LDRQl)>;
+def KryoWrite_4cyc_X_LS_271ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitLS]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_LS_271ln],
+ (instrs LDRQroW, LDRQroX)>;
+def KryoWrite_3cyc_LS_noRSV_287ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_noRSV_287ln],
+ (instregex "LDR((D|S)l|(D|S|H|B)ui)")>;
+def KryoWrite_3cyc_LS_XY_293ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_293ln, WriteAdr],
+ (instrs LDRQpost, LDRQpre)>;
+def KryoWrite_4cyc_X_LS_noRSV_297ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitLS]> {
+ let Latency = 4; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_4cyc_X_LS_noRSV_297ln],
+ (instregex "LDR(D|S|H|B)ro(W|X)")>;
+def KryoWrite_3cyc_LS_XY_noRSV_319ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_noRSV_319ln, WriteAdr],
+ (instregex "LDR(D|S|H|B)(post|pre)")>;
+def KryoWrite_3cyc_LS_261ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_261ln],
+ (instregex "LDR(BB|HH|W|X)ui")>;
+def KryoWrite_3cyc_LS_XY_292ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_XY_292ln, WriteAdr],
+ (instregex "LDR(BB|HH|W|X)(post|pre)")>;
+def KryoWrite_4cyc_X_LS_272ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitLS]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_LS_272ln],
+ (instregex "(LDR(BB|HH|W|X)ro(W|X)|PRFMro(W|X))")>;
+def KryoWrite_3cyc_LS_262ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_262ln],
+ (instrs LDRWl, LDRXl)>;
+def KryoWrite_4cyc_LS_268ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_LS_268ln],
+ (instregex "LDRS(BW|BX|HW|HX|W)ui")>;
+def KryoWrite_5cyc_X_LS_273ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitLS]> {
+ let Latency = 5; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_5cyc_X_LS_273ln],
+ (instregex "LDRS(BW|BX|HW|HX|W)ro(W|X)")>;
+def KryoWrite_4cyc_LS_XY_294ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitXY]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_LS_XY_294ln, WriteAdr],
+ (instregex "LDRS(BW|BX|HW|HX|W)(post|pre)")>;
+def KryoWrite_4cyc_LS_269ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_LS_269ln],
+ (instrs LDRSWl)>;
+def KryoWrite_3cyc_LS_260ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_260ln],
+ (instregex "LDTR(B|H|W|X)i")>;
+def KryoWrite_4cyc_LS_267ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_LS_267ln],
+ (instregex "LDTRS(BW|BX|HW|HX|W)i")>;
+def KryoWrite_3cyc_LS_263ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_263ln],
+ (instrs LDURQi)>;
+def KryoWrite_3cyc_LS_noRSV_288ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_noRSV_288ln],
+ (instregex "LDUR(D|S|H|B)i")>;
+def KryoWrite_3cyc_LS_259ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_259ln],
+ (instregex "LDUR(BB|HH|W|X)i")>;
+def KryoWrite_4cyc_LS_266ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_LS_266ln],
+ (instregex "LDURS(B|H)?(W|X)i")>;
+def KryoWrite_3cyc_LS_258ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_258ln],
+ (instregex "LDXP(W|X)")>;
+def KryoWrite_3cyc_LS_258_1ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 3; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_3cyc_LS_258_1ln],
+ (instregex "LDXR(B|H|W|X)")>;
+def KryoWrite_2cyc_XY_XY_137ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_137ln],
+ (instrs LSLVWr, LSLVXr)>;
+def KryoWrite_1cyc_XY_135ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_135ln],
+ (instregex "(LS|AS|RO)RV(W|X)r")>;
+def KryoWrite_4cyc_X_84ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_4cyc_X_84ln],
+ (instrs MADDWrrr, MSUBWrrr)>;
+def KryoWrite_5cyc_X_85ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_5cyc_X_85ln],
+ (instrs MADDXrrr, MSUBXrrr)>;
+def KryoWrite_4cyc_X_noRSV_188ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_188ln],
+ (instregex "(MLA|MLS|MUL)(v8i8|v4i16|v2i32)(_indexed)?")>;
+def KryoWrite_4cyc_X_X_192ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_192ln],
+ (instregex "(MLA|MLS|MUL|SQR?DMULH)(v16i8|v8i16|v4i32)(_indexed)?")>;
+def KryoWrite_1cyc_XY_noRSV_198ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_198ln],
+ (instregex "(MOVI|MVNI)(D|v8b_ns|v2i32|v4i16|v2s_msl)")>;
+def KryoWrite_1cyc_XY_XY_199ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_199ln],
+ (instregex "(MOVI|MVNI)(v2d_ns|v16b_ns|v4i32|v8i16|v4s_msl)")>;
+def KryoWrite_1cyc_X_89ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_X_89ln],
+ (instrs MOVKWi, MOVKXi)>;
+def KryoWrite_1cyc_XY_91ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_91ln],
+ (instrs MOVNWi, MOVNXi)>;
+def KryoWrite_1cyc_XY_90ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_90ln],
+ (instrs MOVZWi, MOVZXi)>;
+def KryoWrite_2cyc_XY_93ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_2cyc_XY_93ln],
+ (instrs MRS)>;
+def KryoWrite_0cyc_X_87ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_X_87ln],
+ (instrs MSRpstateImm4)>;
+def : InstRW<[KryoWrite_0cyc_X_87ln],
+ (instrs MSRpstateImm1)>;
+def KryoWrite_0cyc_XY_88ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_XY_88ln],
+ (instrs MSR)>;
+def KryoWrite_1cyc_XY_noRSV_143ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_143ln],
+ (instregex "NEG(v8i8|v4i16|v2i32|v1i64)")>;
+def KryoWrite_1cyc_XY_XY_145ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_145ln],
+ (instregex "NEG(v16i8|v8i16|v4i32|v2i64)")>;
+def KryoWrite_1cyc_XY_noRSV_193ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_193ln],
+ (instrs NOTv8i8)>;
+def KryoWrite_1cyc_XY_XY_194ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_194ln],
+ (instrs NOTv16i8)>;
+def KryoWrite_2cyc_XY_noRSV_234ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_234ln],
+ (instrs PMULv8i8)>;
+def KryoWrite_2cyc_XY_XY_236ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_236ln],
+ (instrs PMULv16i8)>;
+def KryoWrite_2cyc_XY_XY_235ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_235ln],
+ (instrs PMULLv8i8, PMULLv16i8)>;
+def KryoWrite_3cyc_XY_XY_237ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_237ln],
+ (instrs PMULLv1i64, PMULLv2i64)>;
+def KryoWrite_0cyc_LS_254ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_LS_254ln],
+ (instrs PRFMl, PRFMui)>;
+def KryoWrite_0cyc_LS_253ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_LS_253ln],
+ (instrs PRFUMi)>;
+def KryoWrite_6cyc_XY_X_noRSV_175ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitX]> {
+ let Latency = 6; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_6cyc_XY_X_noRSV_175ln],
+ (instregex "R(ADD|SUB)HNv.*")>;
+def KryoWrite_2cyc_XY_204ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_2cyc_XY_204ln],
+ (instrs RBITWr, RBITXr)>;
+def KryoWrite_2cyc_XY_noRSV_218ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_noRSV_218ln],
+ (instrs RBITv8i8)>;
+def KryoWrite_2cyc_XY_XY_219ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_219ln],
+ (instrs RBITv16i8)>;
+def KryoWrite_1cyc_X_202ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_X_202ln],
+ (instregex "REV(16|32)?(W|X)r")>;
+def KryoWrite_1cyc_XY_noRSV_214ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_214ln],
+ (instregex "REV(16|32|64)(v8i8|v4i16|v2i32)")>;
+def KryoWrite_1cyc_XY_XY_216ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_216ln],
+ (instregex "REV(16|32|64)(v16i8|v8i16|v4i32)")>;
+def KryoWrite_3cyc_X_noRSV_244ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_X_noRSV_244ln],
+ (instregex "S(L|R)I(d|(v8i8|v4i16|v2i32)_shift)")>;
+def KryoWrite_3cyc_X_X_245ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_X_X_245ln],
+ (instregex "S(L|R)I(v16i8|v8i16|v4i32|v2i64)_shift")>;
+def KryoWrite_1cyc_XY_2ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_2ln, ReadI, ReadI],
+ (instregex "SBCS?(W|X)r")>;
+def KryoWrite_2cyc_XA_XA_XA_24ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitXA, KryoUnitXA]> {
+ let Latency = 2; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_2cyc_XA_XA_XA_24ln],
+ (instrs SHA1Crrr, SHA1Mrrr, SHA1Prrr)>;
+def KryoWrite_1cyc_XY_noRSV_21ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_21ln],
+ (instrs SHA1Hrr)>;
+def KryoWrite_2cyc_X_X_23ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_X_X_23ln],
+ (instrs SHA1SU0rrr, SHA1SU1rr, SHA256SU0rr)>;
+def KryoWrite_4cyc_XA_XA_XA_25ln :
+ SchedWriteRes<[KryoUnitXA, KryoUnitXA, KryoUnitXA]> {
+ let Latency = 4; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_4cyc_XA_XA_XA_25ln],
+ (instrs SHA256Hrrr, SHA256H2rrr)>;
+def KryoWrite_3cyc_XY_XY_X_X_26ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY, KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_X_X_26ln],
+ (instrs SHA256SU1rrr)>;
+def KryoWrite_4cyc_X_noRSV_189ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_189ln],
+ (instregex "SQR?DMULH(v8i8|v4i16|v1i32|v2i32|v1i16)(_indexed)?")>;
+def KryoWrite_3cyc_XY_noRSV_68ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_68ln],
+ (instregex "SQ(ABS|NEG)(v1i8|v1i16|v1i32|v1i64)")>;
+def KryoWrite_3cyc_XY_noRSV_157ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_157ln],
+ (instregex "SQ(ABS|NEG)(v8i8|v4i16|v2i32)")>;
+def KryoWrite_3cyc_XY_XY_164ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_164ln],
+ (instregex "SQ(ABS|NEG)(v16i8|v8i16|v4i32|v2i64)")>;
+def KryoWrite_4cyc_X_noRSV_190ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_4cyc_X_noRSV_190ln],
+ (instregex "SQD(MLAL|MLSL|MULL)(i16|i32)")>;
+def KryoWrite_0cyc_LS_Y_274ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_274ln],
+ (instregex "ST1(One(v8b|v4h|v2s|v1d|v16b|v8h|v4s|v2d)|(i8|i16|i32|i64)|Two(v8b|v4h|v2s|v1d))$")>;
+def KryoWrite_1cyc_LS_Y_X_301ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_X_301ln],
+ (instregex "ST1(One(v8b|v4h|v2s|v1d|v16b|v8h|v4s|v2d)|(i8|i16|i32|i64)|Two(v8b|v4h|v2s|v1d))_POST$")>;
+def KryoWrite_1cyc_LS_Y_XY_305ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_XY_305ln],
+ (instregex "ST1(One(v16b|v8h|v4s|v2d)|Two(v8b|v4h|v2s|v1d))_POST$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_323ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 4;
+}
+def : InstRW<[WriteAdr, KryoWrite_0cyc_LS_Y_LS_Y_323ln],
+ (instregex "ST1(Two(v16b|v8h|v4s|v2d)|(Three|Four)(v8b|v4h|v2s|v1d))_POST$")>;
+def KryoWrite_1cyc_LS_Y_XY_LS_Y_345ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitXY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_1cyc_LS_Y_XY_LS_Y_345ln],
+ (instregex "ST1(Two(v16b|v8h|v4s|v2d)|(Three|Four)(v8b|v4h|v2s|v1d))$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_LS_Y_356ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY, KryoUnitLS,
+ KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_LS_Y_356ln],
+ (instregex "ST1Three(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_1cyc_LS_Y_XY_LS_Y_LS_Y_366ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitXY, KryoUnitLS, KryoUnitY,
+ KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 7;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_XY_LS_Y_LS_Y_366ln],
+ (instregex "ST1Three(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_LS_Y_LS_Y_371ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY, KryoUnitLS,
+ KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 8;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_LS_Y_LS_Y_371ln],
+ (instregex "ST1Four(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_XY_LS_Y_LS_Y_377ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY, KryoUnitXY,
+ KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 9;
+}
+def : InstRW<[WriteAdr, KryoWrite_0cyc_LS_Y_LS_Y_XY_LS_Y_LS_Y_377ln],
+ (instregex "ST1Four(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_0cyc_LS_Y_275ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_275ln],
+ (instregex "ST2(Two(v8b|v4h|v2s|v1d|v16b|v8h|v4s|v2d)|(i8|i16|i32|i64))$")>;
+def KryoWrite_1cyc_LS_Y_XY_306ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_XY_306ln],
+ (instregex "ST2(Two(v8b|v4h|v2s|v1d)|(i8|i16|i32|i64))_POST$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_322ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_322ln],
+ (instregex "ST2Two(v16b|v8h|v4s|v2d)$")>;
+def KryoWrite_1cyc_LS_Y_XY_LS_Y_344ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitXY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 5;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_XY_LS_Y_344ln],
+ (instregex "ST2Two(v16b|v8h|v4s|v2d)_POST$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_324ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_324ln],
+ (instregex "ST3(Threev1d|(i8|i16|i32|i64))$")>;
+def KryoWrite_1cyc_LS_Y_XY_LS_Y_346ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitXY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 5;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_XY_LS_Y_346ln],
+ (instregex "ST3(Threev1d|(i8|i16|i32|i64))_POST$")>;
+def KryoWrite_1cyc_X_X_LS_Y_LS_Y_353ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitLS,
+ KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_LS_Y_LS_Y_353ln],
+ (instregex "ST3Three(v8b|v4h|v2s)$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_LS_Y_357ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY, KryoUnitLS,
+ KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_LS_Y_357ln],
+ (instregex "ST3Threev2d$")>;
+def KryoWrite_1cyc_X_X_LS_Y_XY_LS_Y_363ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitXY,
+ KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 7;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_X_X_LS_Y_XY_LS_Y_363ln],
+ (instregex "ST3Three(v8b|v4h|v2s)_POST$")>;
+def KryoWrite_1cyc_LS_Y_XY_LS_Y_LS_Y_367ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitXY, KryoUnitLS, KryoUnitY,
+ KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 7;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_XY_LS_Y_LS_Y_367ln],
+ (instregex "ST3Threev2d_POST$")>;
+def KryoWrite_1cyc_X_X_LS_Y_LS_Y_X_X_LS_Y_LS_Y_385ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitLS,
+ KryoUnitY, KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY,
+ KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 12;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_LS_Y_LS_Y_X_X_LS_Y_LS_Y_385ln],
+ (instregex "ST3Three(v16b|v8h|v4s)$")>;
+def KryoWrite_1cyc_X_X_LS_Y_LS_Y_X_X_LS_Y_XY_LS_Y_388ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitLS,
+ KryoUnitY, KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY,
+ KryoUnitXY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 13;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_X_X_LS_Y_LS_Y_X_X_LS_Y_XY_LS_Y_388ln],
+ (instregex "ST3Three(v16b|v8h|v4s)_POST$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_325ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_325ln],
+ (instregex "ST4(Fourv1d|(i8|i16|i32|i64))$")>;
+def KryoWrite_1cyc_LS_Y_XY_LS_Y_347ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitXY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 5;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_XY_LS_Y_347ln],
+ (instregex "ST4(Fourv1d|(i8|i16|i32|i64))_POST$")>;
+def KryoWrite_1cyc_X_X_LS_Y_X_X_LS_Y_370ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitX,
+ KryoUnitX, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 8;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_LS_Y_X_X_LS_Y_370ln],
+ (instregex "ST4Four(v8b|v4h|v2s)$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_LS_Y_LS_Y_372ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY, KryoUnitLS,
+ KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 8;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_LS_Y_LS_Y_372ln],
+ (instregex "ST4Fourv2d$")>;
+def KryoWrite_1cyc_X_X_LS_Y_XY_X_X_LS_Y_375ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitXY,
+ KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 9;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_X_X_LS_Y_XY_X_X_LS_Y_375ln],
+ (instregex "ST4Four(v8b|v4h|v2s)_POST$")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_XY_LS_Y_LS_Y_379ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY, KryoUnitXY,
+ KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 9;
+}
+def : InstRW<[WriteAdr, KryoWrite_0cyc_LS_Y_LS_Y_XY_LS_Y_LS_Y_379ln],
+ (instregex "ST4Fourv2d_POST$")>;
+def KryoWrite_1cyc_X_X_LS_Y_X_X_LS_Y_X_X_LS_Y_X_X_LS_Y_390ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitX,
+ KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitX, KryoUnitX,
+ KryoUnitLS, KryoUnitY, KryoUnitX, KryoUnitX, KryoUnitLS,
+ KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 16;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_LS_Y_X_X_LS_Y_X_X_LS_Y_X_X_LS_Y_390ln],
+ (instregex "ST4Four(v16b|v8h|v4s)$")>;
+def KryoWrite_1cyc_X_X_LS_Y_X_X_LS_Y_X_X_LS_Y_XY_X_X_LS_Y_392ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitX,
+ KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitX, KryoUnitX,
+ KryoUnitLS, KryoUnitY, KryoUnitXY, KryoUnitX, KryoUnitX,
+ KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 17;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_X_X_LS_Y_X_X_LS_Y_X_X_LS_Y_XY_X_X_LS_Y_392ln],
+ (instregex "ST4Four(v16b|v8h|v4s)_POST$")>;
+def KryoWrite_0cyc_LS_LS_Y_299ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_0cyc_LS_LS_Y_299ln],
+ (instregex "STLR(B|H|W|X)")>;
+def KryoWrite_3cyc_LS_LS_Y_307ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitLS, KryoUnitY]> {
+ let Latency = 3; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_3cyc_LS_LS_Y_307ln],
+ (instregex "STLX(P(W|X)|R(B|H|W|X))")>;
+def KryoWrite_0cyc_LS_Y_276ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_276ln],
+ (instrs STNPDi, STNPSi)>;
+def KryoWrite_0cyc_LS_Y_LS_Y_326ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_326ln],
+ (instrs STNPQi)>;
+def KryoWrite_0cyc_LS_Y_280ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_280ln],
+ (instrs STNPWi, STNPXi)>;
+def KryoWrite_0cyc_LS_Y_277ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_277ln],
+ (instregex "STP(D|S)i")>;
+def KryoWrite_1cyc_LS_Y_X_303ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_X_303ln],
+ (instregex "STP(D|S)(post|pre)")>;
+def KryoWrite_0cyc_LS_Y_LS_Y_327ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_LS_Y_327ln],
+ (instrs STPQi)>;
+def KryoWrite_1cyc_LS_Y_X_LS_Y_343ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitX, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 5;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_X_LS_Y_343ln],
+ (instrs STPQpost, STPQpre)>;
+def KryoWrite_0cyc_LS_Y_279ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_279ln],
+ (instregex "STP(W|X)i")>;
+def KryoWrite_1cyc_LS_X_Y_300ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitX, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_X_Y_300ln],
+ (instregex "STP(W|X)(post|pre)")>;
+def KryoWrite_0cyc_LS_Y_278ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_278ln],
+ (instregex "STR(Q|D|S|H|B)ui")>;
+def KryoWrite_1cyc_X_LS_Y_295ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_1cyc_X_LS_Y_295ln],
+ (instregex "STR(D|S|H|B)ro(W|X)")>;
+def KryoWrite_1cyc_LS_Y_X_304ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_Y_X_304ln],
+ (instregex "STR(Q|D|S|H|B)(post|pre)")>;
+def KryoWrite_2cyc_X_LS_Y_XY_LS_Y_354ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitLS, KryoUnitY, KryoUnitXY, KryoUnitLS,
+ KryoUnitY]> {
+ let Latency = 2; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_2cyc_X_LS_Y_XY_LS_Y_354ln],
+ (instregex "STRQro(W|X)")>;
+def KryoWrite_0cyc_LS_Y_399ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_399ln],
+ (instregex "STR(BB|HH|W|X)ui")>;
+def KryoWrite_1cyc_X_LS_Y_406ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitLS, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_1cyc_X_LS_Y_406ln],
+ (instregex "STR(BB|HH|W|X)ro(W|X)")>;
+def KryoWrite_1cyc_LS_X_Y_407ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitX, KryoUnitY]> {
+ let Latency = 1; let NumMicroOps = 3;
+}
+def : InstRW<[WriteAdr, KryoWrite_1cyc_LS_X_Y_407ln],
+ (instregex "STR(BB|HH|W|X)(post|pre)")>;
+def KryoWrite_0cyc_LS_Y_398ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_398ln],
+ (instregex "STTR(B|H|W|X)i")>;
+def KryoWrite_0cyc_LS_Y_396ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_396ln],
+ (instregex "STUR(Q|D|S|H|B)i")>;
+def KryoWrite_0cyc_LS_Y_397ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 0; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_0cyc_LS_Y_397ln],
+ (instregex "STUR(BB|HH|W|X)i")>;
+def KryoWrite_3cyc_LS_Y_404ln :
+ SchedWriteRes<[KryoUnitLS, KryoUnitY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_LS_Y_404ln],
+ (instregex "STX(P(W|X)|R(B|H|W|X))")>;
+def KryoWrite_3cyc_XY_noRSV_160ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_160ln],
+ (instregex "^(SU|US)QADD(v8i8|v4i16|v2i32)")>;
+def KryoWrite_3cyc_XY_XY_167ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_167ln],
+ (instregex "^(SU|US)QADD(v16i8|v8i16|v4i32|v2i64)")>;
+def KryoWrite_1cyc_XY_1ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_1cyc_XY_1ln, ReadI],
+ (instregex "SUBS?(W|X)ri")>;
+def KryoWrite_2cyc_XY_XY_5ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_5ln, ReadI, ReadIEReg],
+ (instregex "SUBS?(W|X)rx")>;
+def KryoWrite_2cyc_XY_XY_5_1ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 2; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_2cyc_XY_XY_5_1ln, ReadI, ReadISReg],
+ (instregex "SUBS?(W|X)rs")>;
+def KryoWrite_1cyc_XY_noRSV_6ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_6ln, ReadI, ReadI],
+ (instregex "SUBS?(W|X)rr")>;
+def KryoWrite_0cyc_LS_9ln :
+ SchedWriteRes<[KryoUnitLS]> {
+ let Latency = 0; let NumMicroOps = 1;
+}
+def : InstRW<[KryoWrite_0cyc_LS_9ln],
+ (instregex "SYSL?xt")>;
+def KryoWrite_1cyc_X_noRSV_205ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_noRSV_205ln],
+ (instrs TBLv8i8One)>;
+def KryoWrite_1cyc_X_X_208ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_208ln],
+ (instrs TBLv16i8One)>;
+def KryoWrite_2cyc_X_X_X_noRSV_222ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 2; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_2cyc_X_X_X_noRSV_222ln],
+ (instrs TBLv8i8Two)>;
+def KryoWrite_2cyc_X_X_X_X_X_X_224ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX]> {
+ let Latency = 2; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_2cyc_X_X_X_X_X_X_224ln],
+ (instrs TBLv16i8Two)>;
+def KryoWrite_3cyc_X_X_X_X_X_noRSV_225ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 6;
+}
+def : InstRW<[KryoWrite_3cyc_X_X_X_X_X_noRSV_225ln],
+ (instrs TBLv8i8Three)>;
+def KryoWrite_3cyc_X_X_X_X_X_X_X_noRSV_228ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 8;
+}
+def : InstRW<[KryoWrite_3cyc_X_X_X_X_X_X_X_noRSV_228ln],
+ (instrs TBLv8i8Four)>;
+def KryoWrite_4cyc_X_X_X_X_X_X_X_X_XY_X_X_230ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitXY, KryoUnitX,
+ KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 11;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_X_X_X_X_X_X_XY_X_X_230ln],
+ (instrs TBLv16i8Three)>;
+def KryoWrite_4cyc_X_X_X_X_X_X_X_X_X_X_XY_X_X_X_X_232ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitXY, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 15;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_X_X_X_X_X_X_X_X_XY_X_X_X_X_232ln],
+ (instrs TBLv16i8Four)>;
+def KryoWrite_2cyc_X_X_noRSV_220ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 2; let NumMicroOps = 3;
+}
+def : InstRW<[KryoWrite_2cyc_X_X_noRSV_220ln],
+ (instrs TBXv8i8One)>;
+def KryoWrite_2cyc_X_X_X_X_221ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 2; let NumMicroOps = 4;
+}
+def : InstRW<[KryoWrite_2cyc_X_X_X_X_221ln],
+ (instrs TBXv16i8One)>;
+def KryoWrite_3cyc_X_X_X_X_noRSV_223ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 5;
+}
+def : InstRW<[KryoWrite_3cyc_X_X_X_X_noRSV_223ln],
+ (instrs TBXv8i8Two)>;
+def KryoWrite_4cyc_X_X_X_X_X_X_noRSV_226ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 7;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_X_X_X_X_noRSV_226ln],
+ (instrs TBXv8i8Three)>;
+def KryoWrite_3cyc_X_X_X_X_X_X_X_X_227ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 3; let NumMicroOps = 8;
+}
+def : InstRW<[KryoWrite_3cyc_X_X_X_X_X_X_X_X_227ln],
+ (instrs TBXv16i8Two)>;
+def KryoWrite_4cyc_X_X_X_X_X_X_X_X_noRSV_229ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 4; let NumMicroOps = 9;
+}
+def : InstRW<[KryoWrite_4cyc_X_X_X_X_X_X_X_X_noRSV_229ln],
+ (instrs TBXv8i8Four)>;
+def KryoWrite_5cyc_X_X_X_X_X_X_X_X_X_XY_X_X_X_231ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitXY,
+ KryoUnitX, KryoUnitX, KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 13;
+}
+def : InstRW<[KryoWrite_5cyc_X_X_X_X_X_X_X_X_X_XY_X_X_X_231ln],
+ (instrs TBXv16i8Three)>;
+def KryoWrite_5cyc_X_X_X_X_X_X_X_X_X_X_X_XY_X_X_X_X_X_233ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitXY, KryoUnitX, KryoUnitX, KryoUnitX,
+ KryoUnitX, KryoUnitX]> {
+ let Latency = 5; let NumMicroOps = 17;
+}
+def : InstRW<[KryoWrite_5cyc_X_X_X_X_X_X_X_X_X_X_X_XY_X_X_X_X_X_233ln],
+ (instrs TBXv16i8Four)>;
+def KryoWrite_1cyc_XY_XY_217ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_XY_217ln],
+ (instregex "((TRN1|TRN2|ZIP1|UZP1|UZP2)v2i64|ZIP2(v2i64|v4i32|v8i16|v16i8))")>;
+def KryoWrite_1cyc_X_X_211ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_211ln],
+ (instregex "(TRN1|TRN2)(v4i32|v8i16|v16i8)")>;
+def KryoWrite_1cyc_X_XY_213ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_XY_213ln],
+ (instregex "(TRN1|TRN2)(v2i32|v4i16|v8i8)")>;
+def KryoWrite_3cyc_XY_noRSV_156ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_noRSV_156ln],
+ (instrs URECPEv2i32, URSQRTEv2i32)>;
+def KryoWrite_3cyc_XY_XY_168ln :
+ SchedWriteRes<[KryoUnitXY, KryoUnitXY]> {
+ let Latency = 3; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_3cyc_XY_XY_168ln],
+ (instrs URECPEv4i32, URSQRTEv4i32)>;
+def KryoWrite_1cyc_X_X_210ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_210ln],
+ (instregex "(UZP1|UZP2)(v4i32|v8i16|v16i8)")>;
+def KryoWrite_1cyc_X_noRSV_206ln :
+ SchedWriteRes<[KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_noRSV_206ln],
+ (instregex "(UZP1|UZP2|ZIP1|ZIP2)(v2i32|v4i16|v8i8)")>;
+def KryoWrite_1cyc_XY_noRSV_215ln :
+ SchedWriteRes<[KryoUnitXY]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_XY_noRSV_215ln],
+ (instregex "XTNv.*")>;
+def KryoWrite_1cyc_X_X_209ln :
+ SchedWriteRes<[KryoUnitX, KryoUnitX]> {
+ let Latency = 1; let NumMicroOps = 2;
+}
+def : InstRW<[KryoWrite_1cyc_X_X_209ln],
+ (instregex "ZIP1(v4i32|v8i16|v16i8)")>;
diff --git a/lib/Target/AArch64/AArch64SchedM1.td b/lib/Target/AArch64/AArch64SchedM1.td
index 6525628dbfd6..2288b8dfc223 100644
--- a/lib/Target/AArch64/AArch64SchedM1.td
+++ b/lib/Target/AArch64/AArch64SchedM1.td
@@ -19,9 +19,8 @@
def ExynosM1Model : SchedMachineModel {
let IssueWidth = 4; // Up to 4 uops per cycle.
- let MinLatency = 0; // OoO.
let MicroOpBufferSize = 96; // ROB size.
- let LoopMicroOpBufferSize = 32; // Instruction queue size.
+ let LoopMicroOpBufferSize = 24; // Based on the instruction queue size.
let LoadLatency = 4; // Optimistic load cases.
let MispredictPenalty = 14; // Minimum branch misprediction penalty.
let CompleteModel = 0; // Use the default model otherwise.
@@ -142,12 +141,13 @@ def : WriteRes<WriteVST, [M1UnitS, M1UnitFST]> { let Latency = 1; }
def : WriteRes<WriteV, [M1UnitFADD]> { let Latency = 3; }
// Other miscellaneous instructions.
-def : WriteRes<WriteSys, []> { let Latency = 1; }
+def : WriteRes<WriteAtomic, []> { let Unsupported = 1; }
def : WriteRes<WriteBarrier, []> { let Latency = 1; }
def : WriteRes<WriteHint, []> { let Latency = 1; }
+def : WriteRes<WriteSys, []> { let Latency = 1; }
//===----------------------------------------------------------------------===//
-// Fast forwarding.
+// Generic fast forwarding.
// TODO: Add FP register forwarding rules.
@@ -187,6 +187,10 @@ def M1WriteNEONH : SchedWriteRes<[M1UnitNALU,
M1UnitFST]> { let Latency = 3; }
def M1WriteNEONI : SchedWriteRes<[M1UnitFST,
M1UnitL]> { let Latency = 9; }
+def M1WriteNEONJ : SchedWriteRes<[M1UnitNMISC,
+ M1UnitFMAC]> { let Latency = 6; }
+def M1WriteNEONK : SchedWriteRes<[M1UnitNMISC,
+ M1UnitFMAC]> { let Latency = 7; }
def M1WriteALU1 : SchedWriteRes<[M1UnitALU]> { let Latency = 1; }
def M1WriteB : SchedWriteRes<[M1UnitB]> { let Latency = 1; }
// FIXME: This is the worst case, conditional branch and link.
@@ -305,8 +309,10 @@ def : InstRW<[M1WriteFVAR15], (instregex "FSQRTv.f32")>;
def : InstRW<[M1WriteFVAR23], (instregex "FSQRTv2f64")>;
def : InstRW<[M1WriteNMISC1], (instregex "^F(MAX|MIN)(NM)?V?v")>;
def : InstRW<[M1WriteNMISC2], (instregex "^F(MAX|MIN)(NM)?Pv")>;
-def : InstRW<[M1WriteFMAC4], (instregex "^FMULX?v")>;
-def : InstRW<[M1WriteFMAC5], (instregex "^FML[AS]v")>;
+def : InstRW<[M1WriteNEONJ], (instregex "^FMULX?v.i")>;
+def : InstRW<[M1WriteFMAC4], (instregex "^FMULX?v.f")>;
+def : InstRW<[M1WriteNEONK], (instregex "^FML[AS]v.i")>;
+def : InstRW<[M1WriteFMAC5], (instregex "^FML[AS]v.f")>;
def : InstRW<[M1WriteFCVT3], (instregex "^FRINT[AIMNPXZ]v")>;
// ASIMD miscellaneous instructions.
@@ -337,16 +343,19 @@ def : InstRW<[WriteSequence<[M1WriteNAL12], 4>],
(instregex "^TB[LX]v16i8Four")>;
def : InstRW<[M1WriteNEOND], (instregex "^[SU]MOVv")>;
def : InstRW<[M1WriteNALU1], (instregex "^INSv.+lane")>;
-def : InstRW<[M1WriteNALU1], (instregex "^(TRN|UZP)(1|2)(v8i8|v4i16|v2i32)")>;
-def : InstRW<[M1WriteNALU2], (instregex "^(TRN|UZP)(1|2)(v16i8|v8i16|v4i32|v2i64)")>;
-def : InstRW<[M1WriteNALU1], (instregex "^ZIP(1|2)v")>;
+def : InstRW<[M1WriteNALU1], (instregex "^(TRN|UZP)[12](v8i8|v4i16|v2i32)")>;
+def : InstRW<[M1WriteNALU2], (instregex "^(TRN|UZP)[12](v16i8|v8i16|v4i32|v2i64)")>;
+def : InstRW<[M1WriteNALU1], (instregex "^ZIP[12]v")>;
// ASIMD load instructions.
// ASIMD store instructions.
// Cryptography instructions.
-def : InstRW<[M1WriteNCRYPT1], (instregex "^AES")>;
+def M1WriteAES : SchedWriteRes<[M1UnitNCRYPT]> { let Latency = 1; }
+def M1ReadAES : SchedReadAdvance<1, [M1WriteAES]>;
+def : InstRW<[M1WriteAES, M1ReadAES], (instregex "^AES")>;
+
def : InstRW<[M1WriteNCRYPT1], (instregex "^PMUL")>;
def : InstRW<[M1WriteNCRYPT1], (instregex "^SHA1(H|SU)")>;
def : InstRW<[M1WriteNCRYPT5], (instregex "^SHA1[CMP]")>;
diff --git a/lib/Target/AArch64/AArch64SchedVulcan.td b/lib/Target/AArch64/AArch64SchedVulcan.td
new file mode 100644
index 000000000000..0aa2462eba83
--- /dev/null
+++ b/lib/Target/AArch64/AArch64SchedVulcan.td
@@ -0,0 +1,855 @@
+//=- AArch64SchedVulcan.td - Vulcan Scheduling Defs ----------*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// 1. Introduction
+//
+// This file defines the machine model for Broadcom Vulcan to support
+// instruction scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// 2. Pipeline Description.
+
+def VulcanModel : SchedMachineModel {
+ let IssueWidth = 4; // 4 micro-ops dispatched at a time.
+ let MicroOpBufferSize = 180; // 180 entries in micro-op re-order buffer.
+ let LoadLatency = 4; // Optimistic load latency.
+ let MispredictPenalty = 12; // Extra cycles for mispredicted branch.
+ // Determined via a mix of micro-arch details and experimentation.
+ let LoopMicroOpBufferSize = 32;
+ let PostRAScheduler = 1; // Using PostRA sched.
+ let CompleteModel = 1;
+}
+
+// Define the issue ports.
+
+// Port 0: ALU, FP/SIMD.
+def VulcanP0 : ProcResource<1>;
+
+// Port 1: ALU, FP/SIMD, integer mul/div.
+def VulcanP1 : ProcResource<1>;
+
+// Port 2: ALU, Branch.
+def VulcanP2 : ProcResource<1>;
+
+// Port 3: Store data.
+def VulcanP3 : ProcResource<1>;
+
+// Port 4: Load/store.
+def VulcanP4 : ProcResource<1>;
+
+// Port 5: Load/store.
+def VulcanP5 : ProcResource<1>;
+
+let SchedModel = VulcanModel in {
+
+// Define groups for the functional units on each
+// issue port. Each group created will be used
+// by a WriteRes later on.
+//
+// NOTE: Some groups only contain one member. This
+// is a way to create names for the various functional
+// units that share a single issue port. For example,
+// "VulcanI1" for ALU ops on port 1 and "VulcanF1" for
+// FP ops on port 1.
+
+// Integer divide and multiply micro-ops only on port 1.
+def VulcanI1 : ProcResGroup<[VulcanP1]>;
+
+// Branch micro-ops only on port 2.
+def VulcanI2 : ProcResGroup<[VulcanP2]>;
+
+// ALU micro-ops on ports 0, 1, and 2.
+def VulcanI012 : ProcResGroup<[VulcanP0, VulcanP1, VulcanP2]>;
+
+// Crypto FP/SIMD micro-ops only on port 1.
+def VulcanF1 : ProcResGroup<[VulcanP1]>;
+
+// FP/SIMD micro-ops on ports 0 and 1.
+def VulcanF01 : ProcResGroup<[VulcanP0, VulcanP1]>;
+
+// Store data micro-ops only on port 3.
+def VulcanSD : ProcResGroup<[VulcanP3]>;
+
+// Load/store micro-ops on ports 4 and 5.
+def VulcanLS01 : ProcResGroup<[VulcanP4, VulcanP5]>;
+
+// 60 entry unified scheduler.
+def VulcanAny : ProcResGroup<[VulcanP0, VulcanP1, VulcanP2,
+ VulcanP3, VulcanP4, VulcanP5]> {
+ let BufferSize=60;
+}
+
+// Define commonly used write types for InstRW specializations.
+// All definitions follow the format: VulcanWrite_<NumCycles>Cyc_<Resources>.
+
+// 3 cycles on I1.
+def VulcanWrite_3Cyc_I1 : SchedWriteRes<[VulcanI1]> { let Latency = 3; }
+
+// 4 cycles on I1.
+def VulcanWrite_4Cyc_I1 : SchedWriteRes<[VulcanI1]> { let Latency = 4; }
+
+// 1 cycle on I0, I1, or I2.
+def VulcanWrite_1Cyc_I012 : SchedWriteRes<[VulcanI012]> { let Latency = 1; }
+
+// 5 cycles on F1.
+def VulcanWrite_5Cyc_F1 : SchedWriteRes<[VulcanF1]> { let Latency = 5; }
+
+// 7 cycles on F1.
+def VulcanWrite_7Cyc_F1 : SchedWriteRes<[VulcanF1]> { let Latency = 7; }
+
+// 4 cycles on F0 or F1.
+def VulcanWrite_4Cyc_F01 : SchedWriteRes<[VulcanF01]> { let Latency = 4; }
+
+// 5 cycles on F0 or F1.
+def VulcanWrite_5Cyc_F01 : SchedWriteRes<[VulcanF01]> { let Latency = 5; }
+
+// 6 cycles on F0 or F1.
+def VulcanWrite_6Cyc_F01 : SchedWriteRes<[VulcanF01]> { let Latency = 6; }
+
+// 7 cycles on F0 or F1.
+def VulcanWrite_7Cyc_F01 : SchedWriteRes<[VulcanF01]> { let Latency = 7; }
+
+// 8 cycles on F0 or F1.
+def VulcanWrite_8Cyc_F01 : SchedWriteRes<[VulcanF01]> { let Latency = 8; }
+
+// 16 cycles on F0 or F1.
+def VulcanWrite_16Cyc_F01 : SchedWriteRes<[VulcanF01]> {
+ let Latency = 16;
+ let ResourceCycles = [8];
+}
+
+// 23 cycles on F0 or F1.
+def VulcanWrite_23Cyc_F01 : SchedWriteRes<[VulcanF01]> {
+ let Latency = 23;
+ let ResourceCycles = [11];
+}
+
+// 1 cycles on LS0 or LS1.
+def VulcanWrite_1Cyc_LS01 : SchedWriteRes<[VulcanLS01]> { let Latency = 1; }
+
+// 4 cycles on LS0 or LS1.
+def VulcanWrite_4Cyc_LS01 : SchedWriteRes<[VulcanLS01]> { let Latency = 4; }
+
+// 5 cycles on LS0 or LS1.
+def VulcanWrite_5Cyc_LS01 : SchedWriteRes<[VulcanLS01]> { let Latency = 5; }
+
+// 6 cycles on LS0 or LS1.
+def VulcanWrite_6Cyc_LS01 : SchedWriteRes<[VulcanLS01]> { let Latency = 6; }
+
+// 5 cycles on LS0 or LS1 and I0, I1, or I2.
+def VulcanWrite_5Cyc_LS01_I012 : SchedWriteRes<[VulcanLS01, VulcanI012]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+
+// 5 cycles on LS0 or LS1 and 2 of I0, I1, or I2.
+def VulcanWrite_6Cyc_LS01_I012_I012 :
+ SchedWriteRes<[VulcanLS01, VulcanI012, VulcanI012]> {
+ let Latency = 6;
+ let NumMicroOps = 3;
+}
+
+// 1 cycles on LS0 or LS1 and F0 or F1.
+def VulcanWrite_1Cyc_LS01_F01 : SchedWriteRes<[VulcanLS01, VulcanF01]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+
+// 5 cycles on LS0 or LS1 and F0 or F1.
+def VulcanWrite_5Cyc_LS01_F01 : SchedWriteRes<[VulcanLS01, VulcanF01]> {
+ let Latency = 5;
+ let NumMicroOps = 2;
+}
+
+// 6 cycles on LS0 or LS1 and F0 or F1.
+def VulcanWrite_6Cyc_LS01_F01 : SchedWriteRes<[VulcanLS01, VulcanF01]> {
+ let Latency = 6;
+ let NumMicroOps = 2;
+}
+
+// 7 cycles on LS0 or LS1 and F0 or F1.
+def VulcanWrite_7Cyc_LS01_F01 : SchedWriteRes<[VulcanLS01, VulcanF01]> {
+ let Latency = 7;
+ let NumMicroOps = 2;
+}
+
+// 8 cycles on LS0 or LS1 and F0 or F1.
+def VulcanWrite_8Cyc_LS01_F01 : SchedWriteRes<[VulcanLS01, VulcanF01]> {
+ let Latency = 8;
+ let NumMicroOps = 2;
+}
+
+// Define commonly used read types.
+
+// No forwarding is provided for these types.
+def : ReadAdvance<ReadI, 0>;
+def : ReadAdvance<ReadISReg, 0>;
+def : ReadAdvance<ReadIEReg, 0>;
+def : ReadAdvance<ReadIM, 0>;
+def : ReadAdvance<ReadIMA, 0>;
+def : ReadAdvance<ReadID, 0>;
+def : ReadAdvance<ReadExtrHi, 0>;
+def : ReadAdvance<ReadAdrBase, 0>;
+def : ReadAdvance<ReadVLD, 0>;
+
+}
+
+
+//===----------------------------------------------------------------------===//
+// 3. Instruction Tables.
+
+let SchedModel = VulcanModel in {
+
+//---
+// 3.1 Branch Instructions
+//---
+
+// Branch, immed
+// Branch and link, immed
+// Compare and branch
+def : WriteRes<WriteBr, [VulcanI2]> { let Latency = 1; }
+
+def : WriteRes<WriteSys, []> { let Latency = 1; }
+def : WriteRes<WriteBarrier, []> { let Latency = 1; }
+def : WriteRes<WriteHint, []> { let Latency = 1; }
+
+def : WriteRes<WriteAtomic, []> { let Unsupported = 1; }
+
+// Branch, register
+// Branch and link, register != LR
+// Branch and link, register = LR
+def : WriteRes<WriteBrReg, [VulcanI2]> { let Latency = 1; }
+
+//---
+// 3.2 Arithmetic and Logical Instructions
+// 3.3 Move and Shift Instructions
+//---
+
+// ALU, basic
+// Conditional compare
+// Conditional select
+// Address generation
+def : WriteRes<WriteI, [VulcanI012]> { let Latency = 1; }
+def : InstRW<[WriteI], (instrs COPY)>;
+
+// ALU, extend and/or shift
+def : WriteRes<WriteISReg, [VulcanI012]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+
+def : WriteRes<WriteIEReg, [VulcanI012]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+}
+
+// Move immed
+def : WriteRes<WriteImm, [VulcanI012]> { let Latency = 1; }
+
+// Variable shift
+def : WriteRes<WriteIS, [VulcanI012]> { let Latency = 1; }
+
+//---
+// 3.4 Divide and Multiply Instructions
+//---
+
+// Divide, W-form
+// Latency range of 13-23. Take the average.
+def : WriteRes<WriteID32, [VulcanI1]> {
+ let Latency = 18;
+ let ResourceCycles = [18];
+}
+
+// Divide, X-form
+// Latency range of 13-39. Take the average.
+def : WriteRes<WriteID64, [VulcanI1]> {
+ let Latency = 26;
+ let ResourceCycles = [26];
+}
+
+// Multiply accumulate, W-form
+def : WriteRes<WriteIM32, [VulcanI012]> { let Latency = 5; }
+
+// Multiply accumulate, X-form
+def : WriteRes<WriteIM64, [VulcanI012]> { let Latency = 5; }
+
+// Bitfield extract, two reg
+def : WriteRes<WriteExtr, [VulcanI012]> { let Latency = 1; }
+
+// Bitfield move, basic
+// Bitfield move, insert
+// NOTE: Handled by WriteIS.
+
+// Count leading
+def : InstRW<[VulcanWrite_3Cyc_I1], (instregex "^CLS(W|X)r$",
+ "^CLZ(W|X)r$")>;
+
+// Reverse bits/bytes
+// NOTE: Handled by WriteI.
+
+//---
+// 3.6 Load Instructions
+// 3.10 FP Load Instructions
+//---
+
+// Load register, literal
+// Load register, unscaled immed
+// Load register, immed unprivileged
+// Load register, unsigned immed
+def : WriteRes<WriteLD, [VulcanLS01]> { let Latency = 4; }
+
+// Load register, immed post-index
+// NOTE: Handled by WriteLD, WriteI.
+// Load register, immed pre-index
+// NOTE: Handled by WriteLD, WriteAdr.
+def : WriteRes<WriteAdr, [VulcanI012]> { let Latency = 1; }
+
+// Load register offset, basic
+// Load register, register offset, scale by 4/8
+// Load register, register offset, scale by 2
+// Load register offset, extend
+// Load register, register offset, extend, scale by 4/8
+// Load register, register offset, extend, scale by 2
+def VulcanWriteLDIdx : SchedWriteVariant<[
+ SchedVar<ScaledIdxPred, [VulcanWrite_6Cyc_LS01_I012_I012]>,
+ SchedVar<NoSchedPred, [VulcanWrite_5Cyc_LS01_I012]>]>;
+def : SchedAlias<WriteLDIdx, VulcanWriteLDIdx>;
+
+def VulcanReadAdrBase : SchedReadVariant<[
+ SchedVar<ScaledIdxPred, [ReadDefault]>,
+ SchedVar<NoSchedPred, [ReadDefault]>]>;
+def : SchedAlias<ReadAdrBase, VulcanReadAdrBase>;
+
+// Load pair, immed offset, normal
+// Load pair, immed offset, signed words, base != SP
+// Load pair, immed offset signed words, base = SP
+// LDP only breaks into *one* LS micro-op. Thus
+// the resources are handling by WriteLD.
+def : WriteRes<WriteLDHi, []> {
+ let Latency = 5;
+}
+
+// Load pair, immed pre-index, normal
+// Load pair, immed pre-index, signed words
+// Load pair, immed post-index, normal
+// Load pair, immed post-index, signed words
+// NOTE: Handled by WriteLD, WriteLDHi, WriteAdr.
+
+//--
+// 3.7 Store Instructions
+// 3.11 FP Store Instructions
+//--
+
+// Store register, unscaled immed
+// Store register, immed unprivileged
+// Store register, unsigned immed
+def : WriteRes<WriteST, [VulcanLS01, VulcanSD]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+
+// Store register, immed post-index
+// NOTE: Handled by WriteAdr, WriteST, ReadAdrBase
+
+// Store register, immed pre-index
+// NOTE: Handled by WriteAdr, WriteST
+
+// Store register, register offset, basic
+// Store register, register offset, scaled by 4/8
+// Store register, register offset, scaled by 2
+// Store register, register offset, extend
+// Store register, register offset, extend, scale by 4/8
+// Store register, register offset, extend, scale by 1
+def : WriteRes<WriteSTIdx, [VulcanLS01, VulcanSD, VulcanI012]> {
+ let Latency = 1;
+ let NumMicroOps = 3;
+}
+
+// Store pair, immed offset, W-form
+// Store pair, immed offset, X-form
+def : WriteRes<WriteSTP, [VulcanLS01, VulcanSD]> {
+ let Latency = 1;
+ let NumMicroOps = 2;
+}
+
+// Store pair, immed post-index, W-form
+// Store pair, immed post-index, X-form
+// Store pair, immed pre-index, W-form
+// Store pair, immed pre-index, X-form
+// NOTE: Handled by WriteAdr, WriteSTP.
+
+//---
+// 3.8 FP Data Processing Instructions
+//---
+
+// FP absolute value
+// FP min/max
+// FP negate
+def : WriteRes<WriteF, [VulcanF01]> { let Latency = 5; }
+
+// FP arithmetic
+def : InstRW<[VulcanWrite_6Cyc_F01], (instregex "^FADD", "^FSUB")>;
+
+// FP compare
+def : WriteRes<WriteFCmp, [VulcanF01]> { let Latency = 5; }
+
+// FP divide, S-form
+// FP square root, S-form
+def : WriteRes<WriteFDiv, [VulcanF01]> {
+ let Latency = 16;
+ let ResourceCycles = [8];
+}
+
+// FP divide, D-form
+// FP square root, D-form
+def : InstRW<[VulcanWrite_23Cyc_F01], (instrs FDIVDrr, FSQRTDr)>;
+
+// FP multiply
+// FP multiply accumulate
+def : WriteRes<WriteFMul, [VulcanF01]> { let Latency = 6; }
+
+// FP round to integral
+def : InstRW<[VulcanWrite_7Cyc_F01],
+ (instregex "^FRINT(A|I|M|N|P|X|Z)(Sr|Dr)")>;
+
+// FP select
+def : InstRW<[VulcanWrite_4Cyc_F01], (instregex "^FCSEL")>;
+
+//---
+// 3.9 FP Miscellaneous Instructions
+//---
+
+// FP convert, from vec to vec reg
+// FP convert, from gen to vec reg
+// FP convert, from vec to gen reg
+def : WriteRes<WriteFCvt, [VulcanF01]> { let Latency = 7; }
+
+// FP move, immed
+// FP move, register
+def : WriteRes<WriteFImm, [VulcanF01]> { let Latency = 4; }
+
+// FP transfer, from gen to vec reg
+// FP transfer, from vec to gen reg
+def : WriteRes<WriteFCopy, [VulcanF01]> { let Latency = 4; }
+def : InstRW<[VulcanWrite_5Cyc_F01], (instrs FMOVXDHighr, FMOVDXHighr)>;
+
+//---
+// 3.12 ASIMD Integer Instructions
+//---
+
+// ASIMD absolute diff, D-form
+// ASIMD absolute diff, Q-form
+// ASIMD absolute diff accum, D-form
+// ASIMD absolute diff accum, Q-form
+// ASIMD absolute diff accum long
+// ASIMD absolute diff long
+// ASIMD arith, basic
+// ASIMD arith, complex
+// ASIMD compare
+// ASIMD logical (AND, BIC, EOR)
+// ASIMD max/min, basic
+// ASIMD max/min, reduce, 4H/4S
+// ASIMD max/min, reduce, 8B/8H
+// ASIMD max/min, reduce, 16B
+// ASIMD multiply, D-form
+// ASIMD multiply, Q-form
+// ASIMD multiply accumulate long
+// ASIMD multiply accumulate saturating long
+// ASIMD multiply long
+// ASIMD pairwise add and accumulate
+// ASIMD shift accumulate
+// ASIMD shift by immed, basic
+// ASIMD shift by immed and insert, basic, D-form
+// ASIMD shift by immed and insert, basic, Q-form
+// ASIMD shift by immed, complex
+// ASIMD shift by register, basic, D-form
+// ASIMD shift by register, basic, Q-form
+// ASIMD shift by register, complex, D-form
+// ASIMD shift by register, complex, Q-form
+def : WriteRes<WriteV, [VulcanF01]> { let Latency = 7; }
+
+// ASIMD arith, reduce, 4H/4S
+// ASIMD arith, reduce, 8B/8H
+// ASIMD arith, reduce, 16B
+def : InstRW<[VulcanWrite_5Cyc_F01],
+ (instregex "^ADDVv", "^SADDLVv", "^UADDLVv")>;
+
+// ASIMD logical (MOV, MVN, ORN, ORR)
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^ORRv", "^ORNv", "^NOTv")>;
+
+// ASIMD polynomial (8x8) multiply long
+def : InstRW<[VulcanWrite_5Cyc_F01], (instrs PMULLv8i8, PMULLv16i8)>;
+
+//---
+// 3.13 ASIMD Floating-point Instructions
+//---
+
+// ASIMD FP absolute value
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^FABSv")>;
+
+// ASIMD FP arith, normal, D-form
+// ASIMD FP arith, normal, Q-form
+def : InstRW<[VulcanWrite_6Cyc_F01], (instregex "^FABDv", "^FADDv", "^FSUBv")>;
+
+// ASIMD FP arith,pairwise, D-form
+// ASIMD FP arith, pairwise, Q-form
+def : InstRW<[VulcanWrite_6Cyc_F01], (instregex "^FADDPv")>;
+
+// ASIMD FP compare, D-form
+// ASIMD FP compare, Q-form
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^FACGEv", "^FACGTv")>;
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^FCMEQv", "^FCMGEv",
+ "^FCMGTv", "^FCMLEv",
+ "^FCMLTv")>;
+
+// ASIMD FP convert, long
+// ASIMD FP convert, narrow
+// ASIMD FP convert, other, D-form
+// ASIMD FP convert, other, Q-form
+// NOTE: Handled by WriteV.
+
+// ASIMD FP divide, D-form, F32
+def : InstRW<[VulcanWrite_16Cyc_F01], (instrs FDIVv2f32)>;
+
+// ASIMD FP divide, Q-form, F32
+def : InstRW<[VulcanWrite_16Cyc_F01], (instrs FDIVv4f32)>;
+
+// ASIMD FP divide, Q-form, F64
+def : InstRW<[VulcanWrite_23Cyc_F01], (instrs FDIVv2f64)>;
+
+// ASIMD FP max/min, normal, D-form
+// ASIMD FP max/min, normal, Q-form
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^FMAXv", "^FMAXNMv",
+ "^FMINv", "^FMINNMv")>;
+
+// ASIMD FP max/min, pairwise, D-form
+// ASIMD FP max/min, pairwise, Q-form
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^FMAXPv", "^FMAXNMPv",
+ "^FMINPv", "^FMINNMPv")>;
+
+// ASIMD FP max/min, reduce
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^FMAXVv", "^FMAXNMVv",
+ "^FMINVv", "^FMINNMVv")>;
+
+// ASIMD FP multiply, D-form, FZ
+// ASIMD FP multiply, D-form, no FZ
+// ASIMD FP multiply, Q-form, FZ
+// ASIMD FP multiply, Q-form, no FZ
+def : InstRW<[VulcanWrite_6Cyc_F01], (instregex "^FMULv", "^FMULXv")>;
+
+// ASIMD FP multiply accumulate, Dform, FZ
+// ASIMD FP multiply accumulate, Dform, no FZ
+// ASIMD FP multiply accumulate, Qform, FZ
+// ASIMD FP multiply accumulate, Qform, no FZ
+def : InstRW<[VulcanWrite_6Cyc_F01], (instregex "^FMLAv", "^FMLSv")>;
+
+// ASIMD FP negate
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^FNEGv")>;
+
+// ASIMD FP round, D-form
+// ASIMD FP round, Q-form
+// NOTE: Handled by WriteV.
+
+//--
+// 3.14 ASIMD Miscellaneous Instructions
+//--
+
+// ASIMD bit reverse
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^RBITv")>;
+
+// ASIMD bitwise insert, D-form
+// ASIMD bitwise insert, Q-form
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^BIFv", "^BITv", "^BSLv")>;
+
+// ASIMD count, D-form
+// ASIMD count, Q-form
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^CLSv", "^CLZv", "^CNTv")>;
+
+// ASIMD duplicate, gen reg
+// ASIMD duplicate, element
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^DUPv")>;
+
+// ASIMD extract
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^EXTv")>;
+
+// ASIMD extract narrow
+// ASIMD extract narrow, saturating
+// NOTE: Handled by WriteV.
+
+// ASIMD insert, element to element
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^INSv")>;
+
+// ASIMD move, integer immed
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^MOVIv", "^MOVIDv")>;
+
+// ASIMD move, FP immed
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^FMOVv")>;
+
+// ASIMD reciprocal estimate, D-form
+// ASIMD reciprocal estimate, Q-form
+def : InstRW<[VulcanWrite_5Cyc_F01],
+ (instregex "^FRECPEv", "^FRECPXv", "^URECPEv",
+ "^FRSQRTEv", "^URSQRTEv")>;
+
+// ASIMD reciprocal step, D-form, FZ
+// ASIMD reciprocal step, D-form, no FZ
+// ASIMD reciprocal step, Q-form, FZ
+// ASIMD reciprocal step, Q-form, no FZ
+def : InstRW<[VulcanWrite_6Cyc_F01], (instregex "^FRECPSv", "^FRSQRTSv")>;
+
+// ASIMD reverse
+def : InstRW<[VulcanWrite_5Cyc_F01],
+ (instregex "^REV16v", "^REV32v", "^REV64v")>;
+
+// ASIMD table lookup, D-form
+// ASIMD table lookup, Q-form
+def : InstRW<[VulcanWrite_8Cyc_F01], (instregex "^TBLv", "^TBXv")>;
+
+// ASIMD transfer, element to word or word
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^UMOVv")>;
+
+// ASIMD transfer, element to gen reg
+def : InstRW<[VulcanWrite_6Cyc_F01], (instregex "^SMOVv", "^UMOVv")>;
+
+// ASIMD transfer gen reg to element
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^INSv")>;
+
+// ASIMD transpose
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^TRN1v", "^TRN2v",
+ "^UZP1v", "^UZP2v")>;
+
+// ASIMD unzip/zip
+def : InstRW<[VulcanWrite_5Cyc_F01], (instregex "^ZIP1v", "^ZIP2v")>;
+
+//--
+// 3.15 ASIMD Load Instructions
+//--
+
+// ASIMD load, 1 element, multiple, 1 reg, D-form
+// ASIMD load, 1 element, multiple, 1 reg, Q-form
+def : InstRW<[VulcanWrite_4Cyc_LS01],
+ (instregex "^LD1Onev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_4Cyc_LS01, WriteAdr],
+ (instregex "^LD1Onev(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 1 element, multiple, 2 reg, D-form
+// ASIMD load, 1 element, multiple, 2 reg, Q-form
+def : InstRW<[VulcanWrite_4Cyc_LS01],
+ (instregex "^LD1Twov(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_4Cyc_LS01, WriteAdr],
+ (instregex "^LD1Twov(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 1 element, multiple, 3 reg, D-form
+// ASIMD load, 1 element, multiple, 3 reg, Q-form
+def : InstRW<[VulcanWrite_5Cyc_LS01],
+ (instregex "^LD1Threev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_5Cyc_LS01, WriteAdr],
+ (instregex "^LD1Threev(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 1 element, multiple, 4 reg, D-form
+// ASIMD load, 1 element, multiple, 4 reg, Q-form
+def : InstRW<[VulcanWrite_6Cyc_LS01],
+ (instregex "^LD1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_6Cyc_LS01, WriteAdr],
+ (instregex "^LD1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 1 element, one lane, B/H/S
+// ASIMD load, 1 element, one lane, D
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01], (instregex "^LD1i(8|16|32|64)$")>;
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD1i(8|16|32|64)_POST$")>;
+
+// ASIMD load, 1 element, all lanes, D-form, B/H/S
+// ASIMD load, 1 element, all lanes, D-form, D
+// ASIMD load, 1 element, all lanes, Q-form
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01],
+ (instregex "^LD1Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD1Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 2 element, multiple, D-form, B/H/S
+// ASIMD load, 2 element, multiple, Q-form, D
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01],
+ (instregex "^LD2Twov(8b|4h|2s|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD2Twov(8b|4h|2s|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 2 element, one lane, B/H
+// ASIMD load, 2 element, one lane, S
+// ASIMD load, 2 element, one lane, D
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01], (instregex "^LD2i(8|16|32|64)$")>;
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD2i(8|16|32|64)_POST$")>;
+
+// ASIMD load, 2 element, all lanes, D-form, B/H/S
+// ASIMD load, 2 element, all lanes, D-form, D
+// ASIMD load, 2 element, all lanes, Q-form
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01],
+ (instregex "^LD2Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_5Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD2Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 3 element, multiple, D-form, B/H/S
+// ASIMD load, 3 element, multiple, Q-form, B/H/S
+// ASIMD load, 3 element, multiple, Q-form, D
+def : InstRW<[VulcanWrite_8Cyc_LS01_F01],
+ (instregex "^LD3Threev(8b|4h|2s|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_8Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD3Threev(8b|4h|2s|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 3 element, one lone, B/H
+// ASIMD load, 3 element, one lane, S
+// ASIMD load, 3 element, one lane, D
+def : InstRW<[VulcanWrite_7Cyc_LS01_F01], (instregex "^LD3i(8|16|32|64)$")>;
+def : InstRW<[VulcanWrite_7Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD3i(8|16|32|64)_POST$")>;
+
+// ASIMD load, 3 element, all lanes, D-form, B/H/S
+// ASIMD load, 3 element, all lanes, D-form, D
+// ASIMD load, 3 element, all lanes, Q-form, B/H/S
+// ASIMD load, 3 element, all lanes, Q-form, D
+def : InstRW<[VulcanWrite_7Cyc_LS01_F01],
+ (instregex "^LD3Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_7Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD3Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 4 element, multiple, D-form, B/H/S
+// ASIMD load, 4 element, multiple, Q-form, B/H/S
+// ASIMD load, 4 element, multiple, Q-form, D
+def : InstRW<[VulcanWrite_8Cyc_LS01_F01],
+ (instregex "^LD4Fourv(8b|4h|2s|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_8Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD4Fourv(8b|4h|2s|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD load, 4 element, one lane, B/H
+// ASIMD load, 4 element, one lane, S
+// ASIMD load, 4 element, one lane, D
+def : InstRW<[VulcanWrite_6Cyc_LS01_F01], (instregex "^LD4i(8|16|32|64)$")>;
+def : InstRW<[VulcanWrite_6Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD4i(8|16|32|64)_POST$")>;
+
+// ASIMD load, 4 element, all lanes, D-form, B/H/S
+// ASIMD load, 4 element, all lanes, D-form, D
+// ASIMD load, 4 element, all lanes, Q-form, B/H/S
+// ASIMD load, 4 element, all lanes, Q-form, D
+def : InstRW<[VulcanWrite_6Cyc_LS01_F01],
+ (instregex "^LD4Rv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_6Cyc_LS01_F01, WriteAdr],
+ (instregex "^LD4Rv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+//--
+// 3.16 ASIMD Store Instructions
+//--
+
+// ASIMD store, 1 element, multiple, 1 reg, D-form
+// ASIMD store, 1 element, multiple, 1 reg, Q-form
+def : InstRW<[VulcanWrite_1Cyc_LS01],
+ (instregex "^ST1Onev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01, WriteAdr],
+ (instregex "^ST1Onev(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD store, 1 element, multiple, 2 reg, D-form
+// ASIMD store, 1 element, multiple, 2 reg, Q-form
+def : InstRW<[VulcanWrite_1Cyc_LS01],
+ (instregex "^ST1Twov(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01, WriteAdr],
+ (instregex "^ST1Twov(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD store, 1 element, multiple, 3 reg, D-form
+// ASIMD store, 1 element, multiple, 3 reg, Q-form
+def : InstRW<[VulcanWrite_1Cyc_LS01],
+ (instregex "^ST1Threev(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01, WriteAdr],
+ (instregex "^ST1Threev(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD store, 1 element, multiple, 4 reg, D-form
+// ASIMD store, 1 element, multiple, 4 reg, Q-form
+def : InstRW<[VulcanWrite_1Cyc_LS01],
+ (instregex "^ST1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01, WriteAdr],
+ (instregex "^ST1Fourv(8b|4h|2s|1d|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD store, 1 element, one lane, B/H/S
+// ASIMD store, 1 element, one lane, D
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01],
+ (instregex "^ST1i(8|16|32|64)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01, WriteAdr],
+ (instregex "^ST1i(8|16|32|64)_POST$")>;
+
+// ASIMD store, 2 element, multiple, D-form, B/H/S
+// ASIMD store, 2 element, multiple, Q-form, B/H/S
+// ASIMD store, 2 element, multiple, Q-form, D
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01],
+ (instregex "^ST2Twov(8b|4h|2s|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01, WriteAdr],
+ (instregex "^ST2Twov(8b|4h|2s|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD store, 2 element, one lane, B/H/S
+// ASIMD store, 2 element, one lane, D
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01],
+ (instregex "^ST2i(8|16|32|64)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01, WriteAdr],
+ (instregex "^ST2i(8|16|32|64)_POST$")>;
+
+// ASIMD store, 3 element, multiple, D-form, B/H/S
+// ASIMD store, 3 element, multiple, Q-form, B/H/S
+// ASIMD store, 3 element, multiple, Q-form, D
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01],
+ (instregex "^ST3Threev(8b|4h|2s|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01, WriteAdr],
+ (instregex "^ST3Threev(8b|4h|2s|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD store, 3 element, one lane, B/H
+// ASIMD store, 3 element, one lane, S
+// ASIMD store, 3 element, one lane, D
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01], (instregex "^ST3i(8|16|32|64)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01, WriteAdr],
+ (instregex "^ST3i(8|16|32|64)_POST$")>;
+
+// ASIMD store, 4 element, multiple, D-form, B/H/S
+// ASIMD store, 4 element, multiple, Q-form, B/H/S
+// ASIMD store, 4 element, multiple, Q-form, D
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01],
+ (instregex "^ST4Fourv(8b|4h|2s|16b|8h|4s|2d)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01, WriteAdr],
+ (instregex "^ST4Fourv(8b|4h|2s|16b|8h|4s|2d)_POST$")>;
+
+// ASIMD store, 4 element, one lane, B/H
+// ASIMD store, 4 element, one lane, S
+// ASIMD store, 4 element, one lane, D
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01], (instregex "^ST4i(8|16|32|64)$")>;
+def : InstRW<[VulcanWrite_1Cyc_LS01_F01, WriteAdr],
+ (instregex "^ST4i(8|16|32|64)_POST$")>;
+
+//--
+// 3.17 Cryptography Extensions
+//--
+
+// Crypto AES ops
+def : InstRW<[VulcanWrite_5Cyc_F1], (instregex "^AES")>;
+
+// Crypto polynomial (64x64) multiply long
+def : InstRW<[VulcanWrite_5Cyc_F1], (instrs PMULLv1i64, PMULLv2i64)>;
+
+// Crypto SHA1 xor ops
+// Crypto SHA1 schedule acceleration ops
+// Crypto SHA256 schedule acceleration op (1 u-op)
+// Crypto SHA256 schedule acceleration op (2 u-ops)
+// Crypto SHA256 hash acceleration ops
+def : InstRW<[VulcanWrite_7Cyc_F1], (instregex "^SHA")>;
+
+//--
+// 3.18 CRC
+//--
+
+// CRC checksum ops
+def : InstRW<[VulcanWrite_4Cyc_I1], (instregex "^CRC32")>;
+
+} // SchedModel = VulcanModel
diff --git a/lib/Target/AArch64/AArch64Schedule.td b/lib/Target/AArch64/AArch64Schedule.td
index eaa9110ab1bc..ce81f48acf71 100644
--- a/lib/Target/AArch64/AArch64Schedule.td
+++ b/lib/Target/AArch64/AArch64Schedule.td
@@ -51,15 +51,15 @@ def WriteSTIdx : SchedWrite; // Store to a register index (maybe scaled).
def ReadAdrBase : SchedRead; // Read the base resister of a reg-offset LD/ST.
// Predicate for determining when a shiftable register is shifted.
-def RegShiftedPred : SchedPredicate<[{TII->hasShiftedReg(MI)}]>;
+def RegShiftedPred : SchedPredicate<[{TII->hasShiftedReg(*MI)}]>;
// Predicate for determining when a extendedable register is extended.
-def RegExtendedPred : SchedPredicate<[{TII->hasExtendedReg(MI)}]>;
+def RegExtendedPred : SchedPredicate<[{TII->hasExtendedReg(*MI)}]>;
// ScaledIdxPred is true if a WriteLDIdx operand will be
// scaled. Subtargets can use this to dynamically select resources and
// latency for WriteLDIdx and ReadAdrBase.
-def ScaledIdxPred : SchedPredicate<[{TII->isScaledAddr(MI)}]>;
+def ScaledIdxPred : SchedPredicate<[{TII->isScaledAddr(*MI)}]>;
// Serialized two-level address load.
// EXAMPLE: LOADGot
@@ -92,6 +92,8 @@ def WriteV : SchedWrite; // Vector ops.
def WriteVLD : SchedWrite; // Vector loads.
def WriteVST : SchedWrite; // Vector stores.
+def WriteAtomic : SchedWrite; // Atomic memory operations (CAS, Swap, LDOP)
+
// Read the unwritten lanes of the VLD's destination registers.
def ReadVLD : SchedRead;
diff --git a/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
index f40293021d74..66a8f332513a 100644
--- a/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
+++ b/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
@@ -17,7 +17,7 @@ using namespace llvm;
#define DEBUG_TYPE "aarch64-selectiondag-info"
SDValue AArch64SelectionDAGInfo::EmitTargetCodeForMemset(
- SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
// Check to see if there is a specialized entry-point for memory zeroing.
@@ -44,10 +44,16 @@ SDValue AArch64SelectionDAGInfo::EmitTargetCodeForMemset(
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(Chain)
.setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
- DAG.getExternalSymbol(bzeroEntry, IntPtr), std::move(Args), 0)
+ DAG.getExternalSymbol(bzeroEntry, IntPtr), std::move(Args))
.setDiscardResult();
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
return CallResult.second;
}
return SDValue();
}
+bool AArch64SelectionDAGInfo::generateFMAsInMachineCombiner(
+ CodeGenOpt::Level OptLevel) const {
+ if (OptLevel >= CodeGenOpt::Aggressive)
+ return true;
+ return false;
+}
diff --git a/lib/Target/AArch64/AArch64SelectionDAGInfo.h b/lib/Target/AArch64/AArch64SelectionDAGInfo.h
index 97421b45b122..7e4f11091226 100644
--- a/lib/Target/AArch64/AArch64SelectionDAGInfo.h
+++ b/lib/Target/AArch64/AArch64SelectionDAGInfo.h
@@ -7,24 +7,24 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the AArch64 subclass for TargetSelectionDAGInfo.
+// This file defines the AArch64 subclass for SelectionDAGTargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64SELECTIONDAGINFO_H
#define LLVM_LIB_TARGET_AARCH64_AARCH64SELECTIONDAGINFO_H
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
namespace llvm {
-class AArch64SelectionDAGInfo : public TargetSelectionDAGInfo {
+class AArch64SelectionDAGInfo : public SelectionDAGTargetInfo {
public:
-
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
- SDValue Dst, SDValue Src, SDValue Size,
- unsigned Align, bool isVolatile,
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const override;
+ bool generateFMAsInMachineCombiner(CodeGenOpt::Level OptLevel) const override;
};
}
diff --git a/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
index 1c6b15790ea9..f904b2379416 100644
--- a/lib/Target/AArch64/AArch64StorePairSuppress.cpp
+++ b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -115,6 +115,9 @@ bool AArch64StorePairSuppress::isNarrowFPStore(const MachineInstr &MI) {
}
bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
const TargetSubtargetInfo &ST = MF.getSubtarget();
TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
TRI = ST.getRegisterInfo();
@@ -141,8 +144,8 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
if (!isNarrowFPStore(MI))
continue;
unsigned BaseReg;
- unsigned Offset;
- if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
+ int64_t Offset;
+ if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI)) {
if (PrevBaseReg == BaseReg) {
// If this block can take STPs, skip ahead to the next block.
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
@@ -150,7 +153,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
// Otherwise, continue unpairing the stores in this block.
DEBUG(dbgs() << "Unpairing store " << MI << "\n");
SuppressSTP = true;
- TII->suppressLdStPair(&MI);
+ TII->suppressLdStPair(MI);
}
PrevBaseReg = BaseReg;
} else
diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp
index f6ee8cf47a6a..7dd8ccbe6c25 100644
--- a/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -11,10 +11,9 @@
//
//===----------------------------------------------------------------------===//
+#include "AArch64Subtarget.h"
#include "AArch64InstrInfo.h"
#include "AArch64PBQPRegAlloc.h"
-#include "AArch64Subtarget.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/TargetRegistry.h"
@@ -44,58 +43,83 @@ AArch64Subtarget::initializeSubtargetDependencies(StringRef FS) {
CPUString = "generic";
ParseSubtargetFeatures(CPUString, FS);
+ initializeProperties();
+
return *this;
}
+void AArch64Subtarget::initializeProperties() {
+ // Initialize CPU specific properties. We should add a tablegen feature for
+ // this in the future so we can specify it together with the subtarget
+ // features.
+ switch (ARMProcFamily) {
+ case Cyclone:
+ CacheLineSize = 64;
+ PrefetchDistance = 280;
+ MinPrefetchStride = 2048;
+ MaxPrefetchIterationsAhead = 3;
+ break;
+ case CortexA57:
+ MaxInterleaveFactor = 4;
+ break;
+ case ExynosM1:
+ PrefFunctionAlignment = 4;
+ PrefLoopAlignment = 3;
+ break;
+ case Kryo:
+ MaxInterleaveFactor = 4;
+ VectorInsertExtractBaseCost = 2;
+ CacheLineSize = 128;
+ PrefetchDistance = 740;
+ MinPrefetchStride = 1024;
+ MaxPrefetchIterationsAhead = 11;
+ break;
+ case Vulcan:
+ MaxInterleaveFactor = 4;
+ break;
+ case CortexA35: break;
+ case CortexA53: break;
+ case CortexA72: break;
+ case CortexA73: break;
+ case Others: break;
+ }
+}
+
AArch64Subtarget::AArch64Subtarget(const Triple &TT, const std::string &CPU,
const std::string &FS,
const TargetMachine &TM, bool LittleEndian)
- : AArch64GenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
- HasV8_1aOps(false), HasV8_2aOps(false), HasFPARMv8(false), HasNEON(false),
- HasCrypto(false), HasCRC(false), HasPerfMon(false), HasFullFP16(false),
- HasZeroCycleRegMove(false), HasZeroCycleZeroing(false),
- StrictAlign(false), ReserveX18(TT.isOSDarwin()), IsLittle(LittleEndian),
- CPUString(CPU), TargetTriple(TT), FrameLowering(),
+ : AArch64GenSubtargetInfo(TT, CPU, FS), ReserveX18(TT.isOSDarwin()),
+ IsLittle(LittleEndian), CPUString(CPU), TargetTriple(TT), FrameLowering(),
InstrInfo(initializeSubtargetDependencies(FS)), TSInfo(),
- TLInfo(TM, *this) {}
+ TLInfo(TM, *this), GISel() {}
+
+const CallLowering *AArch64Subtarget::getCallLowering() const {
+ assert(GISel && "Access to GlobalISel APIs not set");
+ return GISel->getCallLowering();
+}
+
+const RegisterBankInfo *AArch64Subtarget::getRegBankInfo() const {
+ assert(GISel && "Access to GlobalISel APIs not set");
+ return GISel->getRegBankInfo();
+}
-/// ClassifyGlobalReference - Find the target operand flags that describe
-/// how a global value should be referenced for the current subtarget.
+/// Find the target operand flags that describe how a global value should be
+/// referenced for the current subtarget.
unsigned char
AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
- const TargetMachine &TM) const {
- bool isDef = GV->isStrongDefinitionForLinker();
-
+ const TargetMachine &TM) const {
// MachO large model always goes via a GOT, simply to get a single 8-byte
// absolute relocation on all global addresses.
if (TM.getCodeModel() == CodeModel::Large && isTargetMachO())
return AArch64II::MO_GOT;
+ if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
+ return AArch64II::MO_GOT;
+
// The small code mode's direct accesses use ADRP, which cannot necessarily
// produce the value 0 (if the code is above 4GB).
- if (TM.getCodeModel() == CodeModel::Small && GV->hasExternalWeakLinkage()) {
- // In PIC mode use the GOT, but in absolute mode use a constant pool load.
- if (TM.getRelocationModel() == Reloc::Static)
- return AArch64II::MO_CONSTPOOL;
- else
- return AArch64II::MO_GOT;
- }
-
- // If symbol visibility is hidden, the extra load is not needed if
- // the symbol is definitely defined in the current translation unit.
-
- // The handling of non-hidden symbols in PIC mode is rather target-dependent:
- // + On MachO, if the symbol is defined in this module the GOT can be
- // skipped.
- // + On ELF, the R_AARCH64_COPY relocation means that even symbols actually
- // defined could end up in unexpected places. Use a GOT.
- if (TM.getRelocationModel() != Reloc::Static && GV->hasDefaultVisibility()) {
- if (isTargetMachO())
- return isDef ? AArch64II::MO_NO_FLAG : AArch64II::MO_GOT;
- else
- // No need to go through the GOT for local symbols on ELF.
- return GV->hasLocalLinkage() ? AArch64II::MO_NO_FLAG : AArch64II::MO_GOT;
- }
+ if (TM.getCodeModel() == CodeModel::Small && GV->hasExternalWeakLinkage())
+ return AArch64II::MO_GOT;
return AArch64II::MO_NO_FLAG;
}
@@ -114,8 +138,7 @@ const char *AArch64Subtarget::getBZeroEntry() const {
}
void AArch64Subtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
- MachineInstr *begin, MachineInstr *end,
- unsigned NumRegionInstrs) const {
+ unsigned NumRegionInstrs) const {
// LNT run (at least on Cyclone) showed reasonably significant gains for
// bi-directional scheduling. 253.perlbmk.
Policy.OnlyTopDown = false;
@@ -123,8 +146,7 @@ void AArch64Subtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
// Enabling or Disabling the latency heuristic is a close call: It seems to
// help nearly no benchmark on out-of-order architectures, on the other hand
// it regresses register pressure on a few benchmarking.
- if (isCyclone())
- Policy.DisableLatencyHeuristic = true;
+ Policy.DisableLatencyHeuristic = DisableLatencySchedHeuristic;
}
bool AArch64Subtarget::enableEarlyIfConversion() const {
@@ -146,8 +168,5 @@ bool AArch64Subtarget::supportsAddressTopByteIgnored() const {
std::unique_ptr<PBQPRAConstraint>
AArch64Subtarget::getCustomPBQPConstraints() const {
- if (!isCortexA57())
- return nullptr;
-
- return llvm::make_unique<A57ChainingConstraint>();
+ return balanceFPOps() ? llvm::make_unique<A57ChainingConstraint>() : nullptr;
}
diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h
index 151133b2f32c..16a35405c892 100644
--- a/lib/Target/AArch64/AArch64Subtarget.h
+++ b/lib/Target/AArch64/AArch64Subtarget.h
@@ -19,6 +19,7 @@
#include "AArch64InstrInfo.h"
#include "AArch64RegisterInfo.h"
#include "AArch64SelectionDAGInfo.h"
+#include "llvm/CodeGen/GlobalISel/GISelAccessor.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -32,38 +33,64 @@ class StringRef;
class Triple;
class AArch64Subtarget : public AArch64GenSubtargetInfo {
-protected:
- enum ARMProcFamilyEnum {
+public:
+ enum ARMProcFamilyEnum : uint8_t {
Others,
CortexA35,
CortexA53,
CortexA57,
+ CortexA72,
+ CortexA73,
Cyclone,
- ExynosM1
+ ExynosM1,
+ Kryo,
+ Vulcan
};
+protected:
/// ARMProcFamily - ARM processor family: Cortex-A53, Cortex-A57, and others.
- ARMProcFamilyEnum ARMProcFamily;
+ ARMProcFamilyEnum ARMProcFamily = Others;
- bool HasV8_1aOps;
- bool HasV8_2aOps;
+ bool HasV8_1aOps = false;
+ bool HasV8_2aOps = false;
- bool HasFPARMv8;
- bool HasNEON;
- bool HasCrypto;
- bool HasCRC;
- bool HasPerfMon;
- bool HasFullFP16;
- bool HasSPE;
+ bool HasFPARMv8 = false;
+ bool HasNEON = false;
+ bool HasCrypto = false;
+ bool HasCRC = false;
+ bool HasRAS = false;
+ bool HasPerfMon = false;
+ bool HasFullFP16 = false;
+ bool HasSPE = false;
// HasZeroCycleRegMove - Has zero-cycle register mov instructions.
- bool HasZeroCycleRegMove;
+ bool HasZeroCycleRegMove = false;
// HasZeroCycleZeroing - Has zero-cycle zeroing instructions.
- bool HasZeroCycleZeroing;
+ bool HasZeroCycleZeroing = false;
// StrictAlign - Disallow unaligned memory accesses.
- bool StrictAlign;
+ bool StrictAlign = false;
+ bool MergeNarrowLoads = false;
+ bool UseAA = false;
+ bool PredictableSelectIsExpensive = false;
+ bool BalanceFPOps = false;
+ bool CustomAsCheapAsMove = false;
+ bool UsePostRAScheduler = false;
+ bool Misaligned128StoreIsSlow = false;
+ bool AvoidQuadLdStPairs = false;
+ bool UseAlternateSExtLoadCVTF32Pattern = false;
+ bool HasMacroOpFusion = false;
+ bool DisableLatencySchedHeuristic = false;
+ bool UseRSqrt = false;
+ uint8_t MaxInterleaveFactor = 2;
+ uint8_t VectorInsertExtractBaseCost = 3;
+ uint16_t CacheLineSize = 0;
+ uint16_t PrefetchDistance = 0;
+ uint16_t MinPrefetchStride = 1;
+ unsigned MaxPrefetchIterationsAhead = UINT_MAX;
+ unsigned PrefFunctionAlignment = 0;
+ unsigned PrefLoopAlignment = 0;
// ReserveX18 - X18 is not available as a general purpose register.
bool ReserveX18;
@@ -80,12 +107,20 @@ protected:
AArch64InstrInfo InstrInfo;
AArch64SelectionDAGInfo TSInfo;
AArch64TargetLowering TLInfo;
+ /// Gather the accessor points to GlobalISel-related APIs.
+ /// This is used to avoid ifndefs spreading around while GISel is
+ /// an optional library.
+ std::unique_ptr<GISelAccessor> GISel;
+
private:
/// initializeSubtargetDependencies - Initializes using CPUString and the
/// passed in feature string so that we can use initializer lists for
/// subtarget initialization.
AArch64Subtarget &initializeSubtargetDependencies(StringRef FS);
+ /// Initialize properties based on the selected processor family.
+ void initializeProperties();
+
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
@@ -93,6 +128,11 @@ public:
const std::string &FS, const TargetMachine &TM,
bool LittleEndian);
+ /// This object will take onwership of \p GISelAccessor.
+ void setGISelAccessor(GISelAccessor &GISel) {
+ this->GISel.reset(&GISel);
+ }
+
const AArch64SelectionDAGInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
@@ -106,10 +146,20 @@ public:
const AArch64RegisterInfo *getRegisterInfo() const override {
return &getInstrInfo()->getRegisterInfo();
}
+ const CallLowering *getCallLowering() const override;
+ const RegisterBankInfo *getRegBankInfo() const override;
const Triple &getTargetTriple() const { return TargetTriple; }
bool enableMachineScheduler() const override { return true; }
bool enablePostRAScheduler() const override {
- return isGeneric() || isCortexA53() || isCortexA57();
+ return UsePostRAScheduler;
+ }
+
+ /// Returns ARM processor family.
+ /// Avoid this function! CPU specifics should be kept local to this class
+ /// and preferably modeled with SubtargetFeatures or properties in
+ /// initializeProperties().
+ ARMProcFamilyEnum getProcFamily() const {
+ return ARMProcFamily;
}
bool hasV8_1aOps() const { return HasV8_1aOps; }
@@ -126,6 +176,33 @@ public:
bool hasNEON() const { return HasNEON; }
bool hasCrypto() const { return HasCrypto; }
bool hasCRC() const { return HasCRC; }
+ bool hasRAS() const { return HasRAS; }
+ bool mergeNarrowLoads() const { return MergeNarrowLoads; }
+ bool balanceFPOps() const { return BalanceFPOps; }
+ bool predictableSelectIsExpensive() const {
+ return PredictableSelectIsExpensive;
+ }
+ bool hasCustomCheapAsMoveHandling() const { return CustomAsCheapAsMove; }
+ bool isMisaligned128StoreSlow() const { return Misaligned128StoreIsSlow; }
+ bool avoidQuadLdStPairs() const { return AvoidQuadLdStPairs; }
+ bool useAlternateSExtLoadCVTF32Pattern() const {
+ return UseAlternateSExtLoadCVTF32Pattern;
+ }
+ bool hasMacroOpFusion() const { return HasMacroOpFusion; }
+ bool useRSqrt() const { return UseRSqrt; }
+ unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
+ unsigned getVectorInsertExtractBaseCost() const {
+ return VectorInsertExtractBaseCost;
+ }
+ unsigned getCacheLineSize() const { return CacheLineSize; }
+ unsigned getPrefetchDistance() const { return PrefetchDistance; }
+ unsigned getMinPrefetchStride() const { return MinPrefetchStride; }
+ unsigned getMaxPrefetchIterationsAhead() const {
+ return MaxPrefetchIterationsAhead;
+ }
+ unsigned getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
+ unsigned getPrefLoopAlignment() const { return PrefLoopAlignment; }
+
/// CPU has TBI (top byte of addresses is ignored during HW address
/// translation) and OS enables it.
bool supportsAddressTopByteIgnored() const;
@@ -146,13 +223,7 @@ public:
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
- bool isGeneric() const { return CPUString == "generic"; }
- bool isCyclone() const { return CPUString == "cyclone"; }
- bool isCortexA57() const { return CPUString == "cortex-a57"; }
- bool isCortexA53() const { return CPUString == "cortex-a53"; }
- bool isExynosM1() const { return CPUString == "exynos-m1"; }
-
- bool useAA() const override { return isCortexA53(); }
+ bool useAA() const override { return UseAA; }
/// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
/// that still makes it profitable to inline the call.
@@ -174,8 +245,7 @@ public:
/// returns null.
const char *getBZeroEntry() const;
- void overrideSchedPolicy(MachineSchedPolicy &Policy, MachineInstr *begin,
- MachineInstr *end,
+ void overrideSchedPolicy(MachineSchedPolicy &Policy,
unsigned NumRegionInstrs) const override;
bool enableEarlyIfConversion() const override;
diff --git a/lib/Target/AArch64/AArch64SystemOperands.td b/lib/Target/AArch64/AArch64SystemOperands.td
new file mode 100644
index 000000000000..a3736c0868fb
--- /dev/null
+++ b/lib/Target/AArch64/AArch64SystemOperands.td
@@ -0,0 +1,1018 @@
+//===- AArch64SystemOperands.td ----------------------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the symbolic operands permitted for various kinds of
+// AArch64 system instruction.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/TableGen/SearchableTable.td"
+
+//===----------------------------------------------------------------------===//
+// AT (address translate) instruction options.
+//===----------------------------------------------------------------------===//
+
+class AT<string name, bits<2> op0, bits<3> op1, bits<4> crn, bits<4> crm,
+ bits<3> op2> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<16> Encoding;
+ let Encoding{15-14} = op0;
+ let Encoding{13-11} = op1;
+ let Encoding{10-7} = crn;
+ let Encoding{6-3} = crm;
+ let Encoding{2-0} = op2;
+}
+
+def : AT<"S1E1R", 0b01, 0b000, 0b0111, 0b1000, 0b000>;
+def : AT<"S1E2R", 0b01, 0b100, 0b0111, 0b1000, 0b000>;
+def : AT<"S1E3R", 0b01, 0b110, 0b0111, 0b1000, 0b000>;
+def : AT<"S1E1W", 0b01, 0b000, 0b0111, 0b1000, 0b001>;
+def : AT<"S1E2W", 0b01, 0b100, 0b0111, 0b1000, 0b001>;
+def : AT<"S1E3W", 0b01, 0b110, 0b0111, 0b1000, 0b001>;
+def : AT<"S1E0R", 0b01, 0b000, 0b0111, 0b1000, 0b010>;
+def : AT<"S1E0W", 0b01, 0b000, 0b0111, 0b1000, 0b011>;
+def : AT<"S12E1R", 0b01, 0b100, 0b0111, 0b1000, 0b100>;
+def : AT<"S12E1W", 0b01, 0b100, 0b0111, 0b1000, 0b101>;
+def : AT<"S12E0R", 0b01, 0b100, 0b0111, 0b1000, 0b110>;
+def : AT<"S12E0W", 0b01, 0b100, 0b0111, 0b1000, 0b111>;
+def : AT<"S1E1RP", 0b01, 0b000, 0b0111, 0b1001, 0b000>;
+def : AT<"S1E1WP", 0b01, 0b000, 0b0111, 0b1001, 0b001>;
+
+
+//===----------------------------------------------------------------------===//
+// DMB/DSB (data barrier) instruction options.
+//===----------------------------------------------------------------------===//
+
+class DB<string name, bits<4> encoding> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<4> Encoding = encoding;
+}
+
+def : DB<"oshld", 0x1>;
+def : DB<"oshst", 0x2>;
+def : DB<"osh", 0x3>;
+def : DB<"nshld", 0x5>;
+def : DB<"nshst", 0x6>;
+def : DB<"nsh", 0x7>;
+def : DB<"ishld", 0x9>;
+def : DB<"ishst", 0xa>;
+def : DB<"ish", 0xb>;
+def : DB<"ld", 0xd>;
+def : DB<"st", 0xe>;
+def : DB<"sy", 0xf>;
+
+//===----------------------------------------------------------------------===//
+// DC (data cache maintenance) instruction options.
+//===----------------------------------------------------------------------===//
+
+class DC<string name, bits<2> op0, bits<3> op1, bits<4> crn, bits<4> crm,
+ bits<3> op2> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<16> Encoding;
+ let Encoding{15-14} = op0;
+ let Encoding{13-11} = op1;
+ let Encoding{10-7} = crn;
+ let Encoding{6-3} = crm;
+ let Encoding{2-0} = op2;
+}
+
+def : DC<"ZVA", 0b01, 0b011, 0b0111, 0b0100, 0b001>;
+def : DC<"IVAC", 0b01, 0b000, 0b0111, 0b0110, 0b001>;
+def : DC<"ISW", 0b01, 0b000, 0b0111, 0b0110, 0b010>;
+def : DC<"CVAC", 0b01, 0b011, 0b0111, 0b1010, 0b001>;
+def : DC<"CSW", 0b01, 0b000, 0b0111, 0b1010, 0b010>;
+def : DC<"CVAU", 0b01, 0b011, 0b0111, 0b1011, 0b001>;
+def : DC<"CIVAC", 0b01, 0b011, 0b0111, 0b1110, 0b001>;
+def : DC<"CISW", 0b01, 0b000, 0b0111, 0b1110, 0b010>;
+
+//===----------------------------------------------------------------------===//
+// IC (instruction cache maintenance) instruction options.
+//===----------------------------------------------------------------------===//
+
+class IC<string name, bits<3> op1, bits<4> crn, bits<4> crm, bits<3> op2,
+ bit needsreg> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<14> Encoding;
+ let Encoding{13-11} = op1;
+ let Encoding{10-7} = crn;
+ let Encoding{6-3} = crm;
+ let Encoding{2-0} = op2;
+ bit NeedsReg = needsreg;
+}
+
+def : IC<"IALLUIS", 0b000, 0b0111, 0b0001, 0b000, 0>;
+def : IC<"IALLU", 0b000, 0b0111, 0b0101, 0b000, 0>;
+def : IC<"IVAU", 0b000, 0b0111, 0b0001, 0b000, 1>;
+
+//===----------------------------------------------------------------------===//
+// ISB (instruction-fetch barrier) instruction options.
+//===----------------------------------------------------------------------===//
+
+class ISB<string name, bits<4> encoding> : SearchableTable{
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<4> Encoding;
+ let Encoding = encoding;
+}
+
+def : ISB<"sy", 0xf>;
+
+//===----------------------------------------------------------------------===//
+// PRFM (prefetch) instruction options.
+//===----------------------------------------------------------------------===//
+
+class PRFM<string name, bits<5> encoding> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<5> Encoding;
+ let Encoding = encoding;
+}
+
+def : PRFM<"pldl1keep", 0x00>;
+def : PRFM<"pldl1strm", 0x01>;
+def : PRFM<"pldl2keep", 0x02>;
+def : PRFM<"pldl2strm", 0x03>;
+def : PRFM<"pldl3keep", 0x04>;
+def : PRFM<"pldl3strm", 0x05>;
+def : PRFM<"plil1keep", 0x08>;
+def : PRFM<"plil1strm", 0x09>;
+def : PRFM<"plil2keep", 0x0a>;
+def : PRFM<"plil2strm", 0x0b>;
+def : PRFM<"plil3keep", 0x0c>;
+def : PRFM<"plil3strm", 0x0d>;
+def : PRFM<"pstl1keep", 0x10>;
+def : PRFM<"pstl1strm", 0x11>;
+def : PRFM<"pstl2keep", 0x12>;
+def : PRFM<"pstl2strm", 0x13>;
+def : PRFM<"pstl3keep", 0x14>;
+def : PRFM<"pstl3strm", 0x15>;
+
+//===----------------------------------------------------------------------===//
+// PState instruction options.
+//===----------------------------------------------------------------------===//
+
+class PState<string name, bits<5> encoding> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<5> Encoding;
+ let Encoding = encoding;
+ code Requires = [{ {} }];
+}
+
+def : PState<"SPSel", 0b00101>;
+def : PState<"DAIFSet", 0b11110>;
+def : PState<"DAIFClr", 0b11111>;
+// v8.1a "Privileged Access Never" extension-specific PStates
+let Requires = [{ {AArch64::HasV8_1aOps} }] in
+def : PState<"PAN", 0b00100>;
+// v8.2a "User Access Override" extension-specific PStates
+let Requires = [{ {AArch64::HasV8_2aOps} }] in
+def : PState<"UAO", 0b00011>;
+
+
+//===----------------------------------------------------------------------===//
+// PSB instruction options.
+//===----------------------------------------------------------------------===//
+
+class PSB<string name, bits<5> encoding> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<5> Encoding;
+ let Encoding = encoding;
+}
+
+def : PSB<"csync", 0x11>;
+
+//===----------------------------------------------------------------------===//
+// TLBI (translation lookaside buffer invalidate) instruction options.
+//===----------------------------------------------------------------------===//
+
+class TLBI<string name, bits<2> op0, bits<3> op1, bits<4> crn, bits<4> crm,
+ bits<3> op2, bit needsreg = 1> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<16> Encoding;
+ let Encoding{15-14} = op0;
+ let Encoding{13-11} = op1;
+ let Encoding{10-7} = crn;
+ let Encoding{6-3} = crm;
+ let Encoding{2-0} = op2;
+ bit NeedsReg = needsreg;
+}
+
+def : TLBI<"IPAS2E1IS", 0b01, 0b100, 0b1000, 0b0000, 0b001>;
+def : TLBI<"IPAS2LE1IS", 0b01, 0b100, 0b1000, 0b0000, 0b101>;
+def : TLBI<"VMALLE1IS", 0b01, 0b000, 0b1000, 0b0011, 0b000, 0>;
+def : TLBI<"ALLE2IS", 0b01, 0b100, 0b1000, 0b0011, 0b000, 0>;
+def : TLBI<"ALLE3IS", 0b01, 0b110, 0b1000, 0b0011, 0b000, 0>;
+def : TLBI<"VAE1IS", 0b01, 0b000, 0b1000, 0b0011, 0b001>;
+def : TLBI<"VAE2IS", 0b01, 0b100, 0b1000, 0b0011, 0b001>;
+def : TLBI<"VAE3IS", 0b01, 0b110, 0b1000, 0b0011, 0b001>;
+def : TLBI<"ASIDE1IS", 0b01, 0b000, 0b1000, 0b0011, 0b010>;
+def : TLBI<"VAAE1IS", 0b01, 0b000, 0b1000, 0b0011, 0b011>;
+def : TLBI<"ALLE1IS", 0b01, 0b100, 0b1000, 0b0011, 0b100, 0>;
+def : TLBI<"VALE1IS", 0b01, 0b000, 0b1000, 0b0011, 0b101>;
+def : TLBI<"VALE2IS", 0b01, 0b100, 0b1000, 0b0011, 0b101>;
+def : TLBI<"VALE3IS", 0b01, 0b110, 0b1000, 0b0011, 0b101>;
+def : TLBI<"VMALLS12E1IS", 0b01, 0b100, 0b1000, 0b0011, 0b110, 0>;
+def : TLBI<"VAALE1IS", 0b01, 0b000, 0b1000, 0b0011, 0b111>;
+def : TLBI<"IPAS2E1", 0b01, 0b100, 0b1000, 0b0100, 0b001>;
+def : TLBI<"IPAS2LE1", 0b01, 0b100, 0b1000, 0b0100, 0b101>;
+def : TLBI<"VMALLE1", 0b01, 0b000, 0b1000, 0b0111, 0b000, 0>;
+def : TLBI<"ALLE2", 0b01, 0b100, 0b1000, 0b0111, 0b000, 0>;
+def : TLBI<"ALLE3", 0b01, 0b110, 0b1000, 0b0111, 0b000, 0>;
+def : TLBI<"VAE1", 0b01, 0b000, 0b1000, 0b0111, 0b001>;
+def : TLBI<"VAE2", 0b01, 0b100, 0b1000, 0b0111, 0b001>;
+def : TLBI<"VAE3", 0b01, 0b110, 0b1000, 0b0111, 0b001>;
+def : TLBI<"ASIDE1", 0b01, 0b000, 0b1000, 0b0111, 0b010>;
+def : TLBI<"VAAE1", 0b01, 0b000, 0b1000, 0b0111, 0b011>;
+def : TLBI<"ALLE1", 0b01, 0b100, 0b1000, 0b0111, 0b100, 0>;
+def : TLBI<"VALE1", 0b01, 0b000, 0b1000, 0b0111, 0b101>;
+def : TLBI<"VALE2", 0b01, 0b100, 0b1000, 0b0111, 0b101>;
+def : TLBI<"VALE3", 0b01, 0b110, 0b1000, 0b0111, 0b101>;
+def : TLBI<"VMALLS12E1", 0b01, 0b100, 0b1000, 0b0111, 0b110, 0>;
+def : TLBI<"VAALE1", 0b01, 0b000, 0b1000, 0b0111, 0b111>;
+
+
+//===----------------------------------------------------------------------===//
+// MRS/MSR (system register read/write) instruction options.
+//===----------------------------------------------------------------------===//
+
+class SysReg<string name, bits<2> op0, bits<3> op1, bits<4> crn, bits<4> crm,
+ bits<3> op2> : SearchableTable {
+ let SearchableFields = ["Name", "Encoding"];
+ let EnumValueField = "Encoding";
+
+ string Name = name;
+ bits<16> Encoding;
+ let Encoding{15-14} = op0;
+ let Encoding{13-11} = op1;
+ let Encoding{10-7} = crn;
+ let Encoding{6-3} = crm;
+ let Encoding{2-0} = op2;
+ bit Readable = ?;
+ bit Writeable = ?;
+ code Requires = [{ {} }];
+}
+
+class RWSysReg<string name, bits<2> op0, bits<3> op1, bits<4> crn, bits<4> crm,
+ bits<3> op2>
+ : SysReg<name, op0, op1, crn, crm, op2> {
+ let Readable = 1;
+ let Writeable = 1;
+}
+
+class ROSysReg<string name, bits<2> op0, bits<3> op1, bits<4> crn, bits<4> crm,
+ bits<3> op2>
+ : SysReg<name, op0, op1, crn, crm, op2> {
+ let Readable = 1;
+ let Writeable = 0;
+}
+
+class WOSysReg<string name, bits<2> op0, bits<3> op1, bits<4> crn, bits<4> crm,
+ bits<3> op2>
+ : SysReg<name, op0, op1, crn, crm, op2> {
+ let Readable = 0;
+ let Writeable = 1;
+}
+
+//===----------------------
+// Read-only regs
+//===----------------------
+
+// Op0 Op1 CRn CRm Op2
+def : ROSysReg<"MDCCSR_EL0", 0b10, 0b011, 0b0000, 0b0001, 0b000>;
+def : ROSysReg<"DBGDTRRX_EL0", 0b10, 0b011, 0b0000, 0b0101, 0b000>;
+def : ROSysReg<"MDRAR_EL1", 0b10, 0b000, 0b0001, 0b0000, 0b000>;
+def : ROSysReg<"OSLSR_EL1", 0b10, 0b000, 0b0001, 0b0001, 0b100>;
+def : ROSysReg<"DBGAUTHSTATUS_EL1", 0b10, 0b000, 0b0111, 0b1110, 0b110>;
+def : ROSysReg<"PMCEID0_EL0", 0b11, 0b011, 0b1001, 0b1100, 0b110>;
+def : ROSysReg<"PMCEID1_EL0", 0b11, 0b011, 0b1001, 0b1100, 0b111>;
+def : ROSysReg<"MIDR_EL1", 0b11, 0b000, 0b0000, 0b0000, 0b000>;
+def : ROSysReg<"CCSIDR_EL1", 0b11, 0b001, 0b0000, 0b0000, 0b000>;
+def : ROSysReg<"CLIDR_EL1", 0b11, 0b001, 0b0000, 0b0000, 0b001>;
+def : ROSysReg<"CTR_EL0", 0b11, 0b011, 0b0000, 0b0000, 0b001>;
+def : ROSysReg<"MPIDR_EL1", 0b11, 0b000, 0b0000, 0b0000, 0b101>;
+def : ROSysReg<"REVIDR_EL1", 0b11, 0b000, 0b0000, 0b0000, 0b110>;
+def : ROSysReg<"AIDR_EL1", 0b11, 0b001, 0b0000, 0b0000, 0b111>;
+def : ROSysReg<"DCZID_EL0", 0b11, 0b011, 0b0000, 0b0000, 0b111>;
+def : ROSysReg<"ID_PFR0_EL1", 0b11, 0b000, 0b0000, 0b0001, 0b000>;
+def : ROSysReg<"ID_PFR1_EL1", 0b11, 0b000, 0b0000, 0b0001, 0b001>;
+def : ROSysReg<"ID_DFR0_EL1", 0b11, 0b000, 0b0000, 0b0001, 0b010>;
+def : ROSysReg<"ID_AFR0_EL1", 0b11, 0b000, 0b0000, 0b0001, 0b011>;
+def : ROSysReg<"ID_MMFR0_EL1", 0b11, 0b000, 0b0000, 0b0001, 0b100>;
+def : ROSysReg<"ID_MMFR1_EL1", 0b11, 0b000, 0b0000, 0b0001, 0b101>;
+def : ROSysReg<"ID_MMFR2_EL1", 0b11, 0b000, 0b0000, 0b0001, 0b110>;
+def : ROSysReg<"ID_MMFR3_EL1", 0b11, 0b000, 0b0000, 0b0001, 0b111>;
+def : ROSysReg<"ID_ISAR0_EL1", 0b11, 0b000, 0b0000, 0b0010, 0b000>;
+def : ROSysReg<"ID_ISAR1_EL1", 0b11, 0b000, 0b0000, 0b0010, 0b001>;
+def : ROSysReg<"ID_ISAR2_EL1", 0b11, 0b000, 0b0000, 0b0010, 0b010>;
+def : ROSysReg<"ID_ISAR3_EL1", 0b11, 0b000, 0b0000, 0b0010, 0b011>;
+def : ROSysReg<"ID_ISAR4_EL1", 0b11, 0b000, 0b0000, 0b0010, 0b100>;
+def : ROSysReg<"ID_ISAR5_EL1", 0b11, 0b000, 0b0000, 0b0010, 0b101>;
+def : ROSysReg<"ID_AA64PFR0_EL1", 0b11, 0b000, 0b0000, 0b0100, 0b000>;
+def : ROSysReg<"ID_AA64PFR1_EL1", 0b11, 0b000, 0b0000, 0b0100, 0b001>;
+def : ROSysReg<"ID_AA64DFR0_EL1", 0b11, 0b000, 0b0000, 0b0101, 0b000>;
+def : ROSysReg<"ID_AA64DFR1_EL1", 0b11, 0b000, 0b0000, 0b0101, 0b001>;
+def : ROSysReg<"ID_AA64AFR0_EL1", 0b11, 0b000, 0b0000, 0b0101, 0b100>;
+def : ROSysReg<"ID_AA64AFR1_EL1", 0b11, 0b000, 0b0000, 0b0101, 0b101>;
+def : ROSysReg<"ID_AA64ISAR0_EL1", 0b11, 0b000, 0b0000, 0b0110, 0b000>;
+def : ROSysReg<"ID_AA64ISAR1_EL1", 0b11, 0b000, 0b0000, 0b0110, 0b001>;
+def : ROSysReg<"ID_AA64MMFR0_EL1", 0b11, 0b000, 0b0000, 0b0111, 0b000>;
+def : ROSysReg<"ID_AA64MMFR1_EL1", 0b11, 0b000, 0b0000, 0b0111, 0b001>;
+def : ROSysReg<"ID_AA64MMFR2_EL1", 0b11, 0b000, 0b0000, 0b0111, 0b010> {
+ let Requires = [{ {AArch64::HasV8_2aOps} }];
+}
+def : ROSysReg<"MVFR0_EL1", 0b11, 0b000, 0b0000, 0b0011, 0b000>;
+def : ROSysReg<"MVFR1_EL1", 0b11, 0b000, 0b0000, 0b0011, 0b001>;
+def : ROSysReg<"MVFR2_EL1", 0b11, 0b000, 0b0000, 0b0011, 0b010>;
+def : ROSysReg<"RVBAR_EL1", 0b11, 0b000, 0b1100, 0b0000, 0b001>;
+def : ROSysReg<"RVBAR_EL2", 0b11, 0b100, 0b1100, 0b0000, 0b001>;
+def : ROSysReg<"RVBAR_EL3", 0b11, 0b110, 0b1100, 0b0000, 0b001>;
+def : ROSysReg<"ISR_EL1", 0b11, 0b000, 0b1100, 0b0001, 0b000>;
+def : ROSysReg<"CNTPCT_EL0", 0b11, 0b011, 0b1110, 0b0000, 0b001>;
+def : ROSysReg<"CNTVCT_EL0", 0b11, 0b011, 0b1110, 0b0000, 0b010>;
+def : ROSysReg<"ID_MMFR4_EL1", 0b11, 0b000, 0b0000, 0b0010, 0b110>;
+
+// Trace registers
+// Op0 Op1 CRn CRm Op2
+def : ROSysReg<"TRCSTATR", 0b10, 0b001, 0b0000, 0b0011, 0b000>;
+def : ROSysReg<"TRCIDR8", 0b10, 0b001, 0b0000, 0b0000, 0b110>;
+def : ROSysReg<"TRCIDR9", 0b10, 0b001, 0b0000, 0b0001, 0b110>;
+def : ROSysReg<"TRCIDR10", 0b10, 0b001, 0b0000, 0b0010, 0b110>;
+def : ROSysReg<"TRCIDR11", 0b10, 0b001, 0b0000, 0b0011, 0b110>;
+def : ROSysReg<"TRCIDR12", 0b10, 0b001, 0b0000, 0b0100, 0b110>;
+def : ROSysReg<"TRCIDR13", 0b10, 0b001, 0b0000, 0b0101, 0b110>;
+def : ROSysReg<"TRCIDR0", 0b10, 0b001, 0b0000, 0b1000, 0b111>;
+def : ROSysReg<"TRCIDR1", 0b10, 0b001, 0b0000, 0b1001, 0b111>;
+def : ROSysReg<"TRCIDR2", 0b10, 0b001, 0b0000, 0b1010, 0b111>;
+def : ROSysReg<"TRCIDR3", 0b10, 0b001, 0b0000, 0b1011, 0b111>;
+def : ROSysReg<"TRCIDR4", 0b10, 0b001, 0b0000, 0b1100, 0b111>;
+def : ROSysReg<"TRCIDR5", 0b10, 0b001, 0b0000, 0b1101, 0b111>;
+def : ROSysReg<"TRCIDR6", 0b10, 0b001, 0b0000, 0b1110, 0b111>;
+def : ROSysReg<"TRCIDR7", 0b10, 0b001, 0b0000, 0b1111, 0b111>;
+def : ROSysReg<"TRCOSLSR", 0b10, 0b001, 0b0001, 0b0001, 0b100>;
+def : ROSysReg<"TRCPDSR", 0b10, 0b001, 0b0001, 0b0101, 0b100>;
+def : ROSysReg<"TRCDEVAFF0", 0b10, 0b001, 0b0111, 0b1010, 0b110>;
+def : ROSysReg<"TRCDEVAFF1", 0b10, 0b001, 0b0111, 0b1011, 0b110>;
+def : ROSysReg<"TRCLSR", 0b10, 0b001, 0b0111, 0b1101, 0b110>;
+def : ROSysReg<"TRCAUTHSTATUS", 0b10, 0b001, 0b0111, 0b1110, 0b110>;
+def : ROSysReg<"TRCDEVARCH", 0b10, 0b001, 0b0111, 0b1111, 0b110>;
+def : ROSysReg<"TRCDEVID", 0b10, 0b001, 0b0111, 0b0010, 0b111>;
+def : ROSysReg<"TRCDEVTYPE", 0b10, 0b001, 0b0111, 0b0011, 0b111>;
+def : ROSysReg<"TRCPIDR4", 0b10, 0b001, 0b0111, 0b0100, 0b111>;
+def : ROSysReg<"TRCPIDR5", 0b10, 0b001, 0b0111, 0b0101, 0b111>;
+def : ROSysReg<"TRCPIDR6", 0b10, 0b001, 0b0111, 0b0110, 0b111>;
+def : ROSysReg<"TRCPIDR7", 0b10, 0b001, 0b0111, 0b0111, 0b111>;
+def : ROSysReg<"TRCPIDR0", 0b10, 0b001, 0b0111, 0b1000, 0b111>;
+def : ROSysReg<"TRCPIDR1", 0b10, 0b001, 0b0111, 0b1001, 0b111>;
+def : ROSysReg<"TRCPIDR2", 0b10, 0b001, 0b0111, 0b1010, 0b111>;
+def : ROSysReg<"TRCPIDR3", 0b10, 0b001, 0b0111, 0b1011, 0b111>;
+def : ROSysReg<"TRCCIDR0", 0b10, 0b001, 0b0111, 0b1100, 0b111>;
+def : ROSysReg<"TRCCIDR1", 0b10, 0b001, 0b0111, 0b1101, 0b111>;
+def : ROSysReg<"TRCCIDR2", 0b10, 0b001, 0b0111, 0b1110, 0b111>;
+def : ROSysReg<"TRCCIDR3", 0b10, 0b001, 0b0111, 0b1111, 0b111>;
+
+// GICv3 registers
+// Op0 Op1 CRn CRm Op2
+def : ROSysReg<"ICC_IAR1_EL1", 0b11, 0b000, 0b1100, 0b1100, 0b000>;
+def : ROSysReg<"ICC_IAR0_EL1", 0b11, 0b000, 0b1100, 0b1000, 0b000>;
+def : ROSysReg<"ICC_HPPIR1_EL1", 0b11, 0b000, 0b1100, 0b1100, 0b010>;
+def : ROSysReg<"ICC_HPPIR0_EL1", 0b11, 0b000, 0b1100, 0b1000, 0b010>;
+def : ROSysReg<"ICC_RPR_EL1", 0b11, 0b000, 0b1100, 0b1011, 0b011>;
+def : ROSysReg<"ICH_VTR_EL2", 0b11, 0b100, 0b1100, 0b1011, 0b001>;
+def : ROSysReg<"ICH_EISR_EL2", 0b11, 0b100, 0b1100, 0b1011, 0b011>;
+def : ROSysReg<"ICH_ELSR_EL2", 0b11, 0b100, 0b1100, 0b1011, 0b101>;
+
+// v8.1a "Limited Ordering Regions" extension-specific system register
+// Op0 Op1 CRn CRm Op2
+let Requires = [{ {AArch64::HasV8_1aOps} }] in
+def : ROSysReg<"LORID_EL1", 0b11, 0b000, 0b1010, 0b0100, 0b111>;
+
+// v8.2a "RAS extension" registers
+// Op0 Op1 CRn CRm Op2
+let Requires = [{ {AArch64::FeatureRAS} }] in {
+def : ROSysReg<"ERRIDR_EL1", 0b11, 0b000, 0b0101, 0b0011, 0b000>;
+def : ROSysReg<"ERXFR_EL1", 0b11, 0b000, 0b0101, 0b0100, 0b000>;
+}
+
+//===----------------------
+// Write-only regs
+//===----------------------
+
+// Op0 Op1 CRn CRm Op2
+def : WOSysReg<"DBGDTRTX_EL0", 0b10, 0b011, 0b0000, 0b0101, 0b000>;
+def : WOSysReg<"OSLAR_EL1", 0b10, 0b000, 0b0001, 0b0000, 0b100>;
+def : WOSysReg<"PMSWINC_EL0", 0b11, 0b011, 0b1001, 0b1100, 0b100>;
+
+// Trace Registers
+// Op0 Op1 CRn CRm Op2
+def : WOSysReg<"TRCOSLAR", 0b10, 0b001, 0b0001, 0b0000, 0b100>;
+def : WOSysReg<"TRCLAR", 0b10, 0b001, 0b0111, 0b1100, 0b110>;
+
+// GICv3 registers
+// Op0 Op1 CRn CRm Op2
+def : WOSysReg<"ICC_EOIR1_EL1", 0b11, 0b000, 0b1100, 0b1100, 0b001>;
+def : WOSysReg<"ICC_EOIR0_EL1", 0b11, 0b000, 0b1100, 0b1000, 0b001>;
+def : WOSysReg<"ICC_DIR_EL1", 0b11, 0b000, 0b1100, 0b1011, 0b001>;
+def : WOSysReg<"ICC_SGI1R_EL1", 0b11, 0b000, 0b1100, 0b1011, 0b101>;
+def : WOSysReg<"ICC_ASGI1R_EL1", 0b11, 0b000, 0b1100, 0b1011, 0b110>;
+def : WOSysReg<"ICC_SGI0R_EL1", 0b11, 0b000, 0b1100, 0b1011, 0b111>;
+
+//===----------------------
+// Read-write regs
+//===----------------------
+
+// Op0 Op1 CRn CRm Op2
+def : RWSysReg<"OSDTRRX_EL1", 0b10, 0b000, 0b0000, 0b0000, 0b010>;
+def : RWSysReg<"OSDTRTX_EL1", 0b10, 0b000, 0b0000, 0b0011, 0b010>;
+def : RWSysReg<"TEECR32_EL1", 0b10, 0b010, 0b0000, 0b0000, 0b000>;
+def : RWSysReg<"MDCCINT_EL1", 0b10, 0b000, 0b0000, 0b0010, 0b000>;
+def : RWSysReg<"MDSCR_EL1", 0b10, 0b000, 0b0000, 0b0010, 0b010>;
+def : RWSysReg<"DBGDTR_EL0", 0b10, 0b011, 0b0000, 0b0100, 0b000>;
+def : RWSysReg<"OSECCR_EL1", 0b10, 0b000, 0b0000, 0b0110, 0b010>;
+def : RWSysReg<"DBGVCR32_EL2", 0b10, 0b100, 0b0000, 0b0111, 0b000>;
+def : RWSysReg<"DBGBVR0_EL1", 0b10, 0b000, 0b0000, 0b0000, 0b100>;
+def : RWSysReg<"DBGBVR1_EL1", 0b10, 0b000, 0b0000, 0b0001, 0b100>;
+def : RWSysReg<"DBGBVR2_EL1", 0b10, 0b000, 0b0000, 0b0010, 0b100>;
+def : RWSysReg<"DBGBVR3_EL1", 0b10, 0b000, 0b0000, 0b0011, 0b100>;
+def : RWSysReg<"DBGBVR4_EL1", 0b10, 0b000, 0b0000, 0b0100, 0b100>;
+def : RWSysReg<"DBGBVR5_EL1", 0b10, 0b000, 0b0000, 0b0101, 0b100>;
+def : RWSysReg<"DBGBVR6_EL1", 0b10, 0b000, 0b0000, 0b0110, 0b100>;
+def : RWSysReg<"DBGBVR7_EL1", 0b10, 0b000, 0b0000, 0b0111, 0b100>;
+def : RWSysReg<"DBGBVR8_EL1", 0b10, 0b000, 0b0000, 0b1000, 0b100>;
+def : RWSysReg<"DBGBVR9_EL1", 0b10, 0b000, 0b0000, 0b1001, 0b100>;
+def : RWSysReg<"DBGBVR10_EL1", 0b10, 0b000, 0b0000, 0b1010, 0b100>;
+def : RWSysReg<"DBGBVR11_EL1", 0b10, 0b000, 0b0000, 0b1011, 0b100>;
+def : RWSysReg<"DBGBVR12_EL1", 0b10, 0b000, 0b0000, 0b1100, 0b100>;
+def : RWSysReg<"DBGBVR13_EL1", 0b10, 0b000, 0b0000, 0b1101, 0b100>;
+def : RWSysReg<"DBGBVR14_EL1", 0b10, 0b000, 0b0000, 0b1110, 0b100>;
+def : RWSysReg<"DBGBVR15_EL1", 0b10, 0b000, 0b0000, 0b1111, 0b100>;
+def : RWSysReg<"DBGBCR0_EL1", 0b10, 0b000, 0b0000, 0b0000, 0b101>;
+def : RWSysReg<"DBGBCR1_EL1", 0b10, 0b000, 0b0000, 0b0001, 0b101>;
+def : RWSysReg<"DBGBCR2_EL1", 0b10, 0b000, 0b0000, 0b0010, 0b101>;
+def : RWSysReg<"DBGBCR3_EL1", 0b10, 0b000, 0b0000, 0b0011, 0b101>;
+def : RWSysReg<"DBGBCR4_EL1", 0b10, 0b000, 0b0000, 0b0100, 0b101>;
+def : RWSysReg<"DBGBCR5_EL1", 0b10, 0b000, 0b0000, 0b0101, 0b101>;
+def : RWSysReg<"DBGBCR6_EL1", 0b10, 0b000, 0b0000, 0b0110, 0b101>;
+def : RWSysReg<"DBGBCR7_EL1", 0b10, 0b000, 0b0000, 0b0111, 0b101>;
+def : RWSysReg<"DBGBCR8_EL1", 0b10, 0b000, 0b0000, 0b1000, 0b101>;
+def : RWSysReg<"DBGBCR9_EL1", 0b10, 0b000, 0b0000, 0b1001, 0b101>;
+def : RWSysReg<"DBGBCR10_EL1", 0b10, 0b000, 0b0000, 0b1010, 0b101>;
+def : RWSysReg<"DBGBCR11_EL1", 0b10, 0b000, 0b0000, 0b1011, 0b101>;
+def : RWSysReg<"DBGBCR12_EL1", 0b10, 0b000, 0b0000, 0b1100, 0b101>;
+def : RWSysReg<"DBGBCR13_EL1", 0b10, 0b000, 0b0000, 0b1101, 0b101>;
+def : RWSysReg<"DBGBCR14_EL1", 0b10, 0b000, 0b0000, 0b1110, 0b101>;
+def : RWSysReg<"DBGBCR15_EL1", 0b10, 0b000, 0b0000, 0b1111, 0b101>;
+def : RWSysReg<"DBGWVR0_EL1", 0b10, 0b000, 0b0000, 0b0000, 0b110>;
+def : RWSysReg<"DBGWVR1_EL1", 0b10, 0b000, 0b0000, 0b0001, 0b110>;
+def : RWSysReg<"DBGWVR2_EL1", 0b10, 0b000, 0b0000, 0b0010, 0b110>;
+def : RWSysReg<"DBGWVR3_EL1", 0b10, 0b000, 0b0000, 0b0011, 0b110>;
+def : RWSysReg<"DBGWVR4_EL1", 0b10, 0b000, 0b0000, 0b0100, 0b110>;
+def : RWSysReg<"DBGWVR5_EL1", 0b10, 0b000, 0b0000, 0b0101, 0b110>;
+def : RWSysReg<"DBGWVR6_EL1", 0b10, 0b000, 0b0000, 0b0110, 0b110>;
+def : RWSysReg<"DBGWVR7_EL1", 0b10, 0b000, 0b0000, 0b0111, 0b110>;
+def : RWSysReg<"DBGWVR8_EL1", 0b10, 0b000, 0b0000, 0b1000, 0b110>;
+def : RWSysReg<"DBGWVR9_EL1", 0b10, 0b000, 0b0000, 0b1001, 0b110>;
+def : RWSysReg<"DBGWVR10_EL1", 0b10, 0b000, 0b0000, 0b1010, 0b110>;
+def : RWSysReg<"DBGWVR11_EL1", 0b10, 0b000, 0b0000, 0b1011, 0b110>;
+def : RWSysReg<"DBGWVR12_EL1", 0b10, 0b000, 0b0000, 0b1100, 0b110>;
+def : RWSysReg<"DBGWVR13_EL1", 0b10, 0b000, 0b0000, 0b1101, 0b110>;
+def : RWSysReg<"DBGWVR14_EL1", 0b10, 0b000, 0b0000, 0b1110, 0b110>;
+def : RWSysReg<"DBGWVR15_EL1", 0b10, 0b000, 0b0000, 0b1111, 0b110>;
+def : RWSysReg<"DBGWCR0_EL1", 0b10, 0b000, 0b0000, 0b0000, 0b111>;
+def : RWSysReg<"DBGWCR1_EL1", 0b10, 0b000, 0b0000, 0b0001, 0b111>;
+def : RWSysReg<"DBGWCR2_EL1", 0b10, 0b000, 0b0000, 0b0010, 0b111>;
+def : RWSysReg<"DBGWCR3_EL1", 0b10, 0b000, 0b0000, 0b0011, 0b111>;
+def : RWSysReg<"DBGWCR4_EL1", 0b10, 0b000, 0b0000, 0b0100, 0b111>;
+def : RWSysReg<"DBGWCR5_EL1", 0b10, 0b000, 0b0000, 0b0101, 0b111>;
+def : RWSysReg<"DBGWCR6_EL1", 0b10, 0b000, 0b0000, 0b0110, 0b111>;
+def : RWSysReg<"DBGWCR7_EL1", 0b10, 0b000, 0b0000, 0b0111, 0b111>;
+def : RWSysReg<"DBGWCR8_EL1", 0b10, 0b000, 0b0000, 0b1000, 0b111>;
+def : RWSysReg<"DBGWCR9_EL1", 0b10, 0b000, 0b0000, 0b1001, 0b111>;
+def : RWSysReg<"DBGWCR10_EL1", 0b10, 0b000, 0b0000, 0b1010, 0b111>;
+def : RWSysReg<"DBGWCR11_EL1", 0b10, 0b000, 0b0000, 0b1011, 0b111>;
+def : RWSysReg<"DBGWCR12_EL1", 0b10, 0b000, 0b0000, 0b1100, 0b111>;
+def : RWSysReg<"DBGWCR13_EL1", 0b10, 0b000, 0b0000, 0b1101, 0b111>;
+def : RWSysReg<"DBGWCR14_EL1", 0b10, 0b000, 0b0000, 0b1110, 0b111>;
+def : RWSysReg<"DBGWCR15_EL1", 0b10, 0b000, 0b0000, 0b1111, 0b111>;
+def : RWSysReg<"TEEHBR32_EL1", 0b10, 0b010, 0b0001, 0b0000, 0b000>;
+def : RWSysReg<"OSDLR_EL1", 0b10, 0b000, 0b0001, 0b0011, 0b100>;
+def : RWSysReg<"DBGPRCR_EL1", 0b10, 0b000, 0b0001, 0b0100, 0b100>;
+def : RWSysReg<"DBGCLAIMSET_EL1", 0b10, 0b000, 0b0111, 0b1000, 0b110>;
+def : RWSysReg<"DBGCLAIMCLR_EL1", 0b10, 0b000, 0b0111, 0b1001, 0b110>;
+def : RWSysReg<"CSSELR_EL1", 0b11, 0b010, 0b0000, 0b0000, 0b000>;
+def : RWSysReg<"VPIDR_EL2", 0b11, 0b100, 0b0000, 0b0000, 0b000>;
+def : RWSysReg<"VMPIDR_EL2", 0b11, 0b100, 0b0000, 0b0000, 0b101>;
+def : RWSysReg<"CPACR_EL1", 0b11, 0b000, 0b0001, 0b0000, 0b010>;
+def : RWSysReg<"SCTLR_EL1", 0b11, 0b000, 0b0001, 0b0000, 0b000>;
+def : RWSysReg<"SCTLR_EL2", 0b11, 0b100, 0b0001, 0b0000, 0b000>;
+def : RWSysReg<"SCTLR_EL3", 0b11, 0b110, 0b0001, 0b0000, 0b000>;
+def : RWSysReg<"ACTLR_EL1", 0b11, 0b000, 0b0001, 0b0000, 0b001>;
+def : RWSysReg<"ACTLR_EL2", 0b11, 0b100, 0b0001, 0b0000, 0b001>;
+def : RWSysReg<"ACTLR_EL3", 0b11, 0b110, 0b0001, 0b0000, 0b001>;
+def : RWSysReg<"HCR_EL2", 0b11, 0b100, 0b0001, 0b0001, 0b000>;
+def : RWSysReg<"SCR_EL3", 0b11, 0b110, 0b0001, 0b0001, 0b000>;
+def : RWSysReg<"MDCR_EL2", 0b11, 0b100, 0b0001, 0b0001, 0b001>;
+def : RWSysReg<"SDER32_EL3", 0b11, 0b110, 0b0001, 0b0001, 0b001>;
+def : RWSysReg<"CPTR_EL2", 0b11, 0b100, 0b0001, 0b0001, 0b010>;
+def : RWSysReg<"CPTR_EL3", 0b11, 0b110, 0b0001, 0b0001, 0b010>;
+def : RWSysReg<"HSTR_EL2", 0b11, 0b100, 0b0001, 0b0001, 0b011>;
+def : RWSysReg<"HACR_EL2", 0b11, 0b100, 0b0001, 0b0001, 0b111>;
+def : RWSysReg<"MDCR_EL3", 0b11, 0b110, 0b0001, 0b0011, 0b001>;
+def : RWSysReg<"TTBR0_EL1", 0b11, 0b000, 0b0010, 0b0000, 0b000>;
+def : RWSysReg<"TTBR0_EL2", 0b11, 0b100, 0b0010, 0b0000, 0b000>;
+def : RWSysReg<"TTBR0_EL3", 0b11, 0b110, 0b0010, 0b0000, 0b000>;
+def : RWSysReg<"TTBR1_EL1", 0b11, 0b000, 0b0010, 0b0000, 0b001>;
+def : RWSysReg<"TCR_EL1", 0b11, 0b000, 0b0010, 0b0000, 0b010>;
+def : RWSysReg<"TCR_EL2", 0b11, 0b100, 0b0010, 0b0000, 0b010>;
+def : RWSysReg<"TCR_EL3", 0b11, 0b110, 0b0010, 0b0000, 0b010>;
+def : RWSysReg<"VTTBR_EL2", 0b11, 0b100, 0b0010, 0b0001, 0b000>;
+def : RWSysReg<"VTCR_EL2", 0b11, 0b100, 0b0010, 0b0001, 0b010>;
+def : RWSysReg<"DACR32_EL2", 0b11, 0b100, 0b0011, 0b0000, 0b000>;
+def : RWSysReg<"SPSR_EL1", 0b11, 0b000, 0b0100, 0b0000, 0b000>;
+def : RWSysReg<"SPSR_EL2", 0b11, 0b100, 0b0100, 0b0000, 0b000>;
+def : RWSysReg<"SPSR_EL3", 0b11, 0b110, 0b0100, 0b0000, 0b000>;
+def : RWSysReg<"ELR_EL1", 0b11, 0b000, 0b0100, 0b0000, 0b001>;
+def : RWSysReg<"ELR_EL2", 0b11, 0b100, 0b0100, 0b0000, 0b001>;
+def : RWSysReg<"ELR_EL3", 0b11, 0b110, 0b0100, 0b0000, 0b001>;
+def : RWSysReg<"SP_EL0", 0b11, 0b000, 0b0100, 0b0001, 0b000>;
+def : RWSysReg<"SP_EL1", 0b11, 0b100, 0b0100, 0b0001, 0b000>;
+def : RWSysReg<"SP_EL2", 0b11, 0b110, 0b0100, 0b0001, 0b000>;
+def : RWSysReg<"SPSel", 0b11, 0b000, 0b0100, 0b0010, 0b000>;
+def : RWSysReg<"NZCV", 0b11, 0b011, 0b0100, 0b0010, 0b000>;
+def : RWSysReg<"DAIF", 0b11, 0b011, 0b0100, 0b0010, 0b001>;
+def : RWSysReg<"CurrentEL", 0b11, 0b000, 0b0100, 0b0010, 0b010>;
+def : RWSysReg<"SPSR_irq", 0b11, 0b100, 0b0100, 0b0011, 0b000>;
+def : RWSysReg<"SPSR_abt", 0b11, 0b100, 0b0100, 0b0011, 0b001>;
+def : RWSysReg<"SPSR_und", 0b11, 0b100, 0b0100, 0b0011, 0b010>;
+def : RWSysReg<"SPSR_fiq", 0b11, 0b100, 0b0100, 0b0011, 0b011>;
+def : RWSysReg<"FPCR", 0b11, 0b011, 0b0100, 0b0100, 0b000>;
+def : RWSysReg<"FPSR", 0b11, 0b011, 0b0100, 0b0100, 0b001>;
+def : RWSysReg<"DSPSR_EL0", 0b11, 0b011, 0b0100, 0b0101, 0b000>;
+def : RWSysReg<"DLR_EL0", 0b11, 0b011, 0b0100, 0b0101, 0b001>;
+def : RWSysReg<"IFSR32_EL2", 0b11, 0b100, 0b0101, 0b0000, 0b001>;
+def : RWSysReg<"AFSR0_EL1", 0b11, 0b000, 0b0101, 0b0001, 0b000>;
+def : RWSysReg<"AFSR0_EL2", 0b11, 0b100, 0b0101, 0b0001, 0b000>;
+def : RWSysReg<"AFSR0_EL3", 0b11, 0b110, 0b0101, 0b0001, 0b000>;
+def : RWSysReg<"AFSR1_EL1", 0b11, 0b000, 0b0101, 0b0001, 0b001>;
+def : RWSysReg<"AFSR1_EL2", 0b11, 0b100, 0b0101, 0b0001, 0b001>;
+def : RWSysReg<"AFSR1_EL3", 0b11, 0b110, 0b0101, 0b0001, 0b001>;
+def : RWSysReg<"ESR_EL1", 0b11, 0b000, 0b0101, 0b0010, 0b000>;
+def : RWSysReg<"ESR_EL2", 0b11, 0b100, 0b0101, 0b0010, 0b000>;
+def : RWSysReg<"ESR_EL3", 0b11, 0b110, 0b0101, 0b0010, 0b000>;
+def : RWSysReg<"FPEXC32_EL2", 0b11, 0b100, 0b0101, 0b0011, 0b000>;
+def : RWSysReg<"FAR_EL1", 0b11, 0b000, 0b0110, 0b0000, 0b000>;
+def : RWSysReg<"FAR_EL2", 0b11, 0b100, 0b0110, 0b0000, 0b000>;
+def : RWSysReg<"FAR_EL3", 0b11, 0b110, 0b0110, 0b0000, 0b000>;
+def : RWSysReg<"HPFAR_EL2", 0b11, 0b100, 0b0110, 0b0000, 0b100>;
+def : RWSysReg<"PAR_EL1", 0b11, 0b000, 0b0111, 0b0100, 0b000>;
+def : RWSysReg<"PMCR_EL0", 0b11, 0b011, 0b1001, 0b1100, 0b000>;
+def : RWSysReg<"PMCNTENSET_EL0", 0b11, 0b011, 0b1001, 0b1100, 0b001>;
+def : RWSysReg<"PMCNTENCLR_EL0", 0b11, 0b011, 0b1001, 0b1100, 0b010>;
+def : RWSysReg<"PMOVSCLR_EL0", 0b11, 0b011, 0b1001, 0b1100, 0b011>;
+def : RWSysReg<"PMSELR_EL0", 0b11, 0b011, 0b1001, 0b1100, 0b101>;
+def : RWSysReg<"PMCCNTR_EL0", 0b11, 0b011, 0b1001, 0b1101, 0b000>;
+def : RWSysReg<"PMXEVTYPER_EL0", 0b11, 0b011, 0b1001, 0b1101, 0b001>;
+def : RWSysReg<"PMXEVCNTR_EL0", 0b11, 0b011, 0b1001, 0b1101, 0b010>;
+def : RWSysReg<"PMUSERENR_EL0", 0b11, 0b011, 0b1001, 0b1110, 0b000>;
+def : RWSysReg<"PMINTENSET_EL1", 0b11, 0b000, 0b1001, 0b1110, 0b001>;
+def : RWSysReg<"PMINTENCLR_EL1", 0b11, 0b000, 0b1001, 0b1110, 0b010>;
+def : RWSysReg<"PMOVSSET_EL0", 0b11, 0b011, 0b1001, 0b1110, 0b011>;
+def : RWSysReg<"MAIR_EL1", 0b11, 0b000, 0b1010, 0b0010, 0b000>;
+def : RWSysReg<"MAIR_EL2", 0b11, 0b100, 0b1010, 0b0010, 0b000>;
+def : RWSysReg<"MAIR_EL3", 0b11, 0b110, 0b1010, 0b0010, 0b000>;
+def : RWSysReg<"AMAIR_EL1", 0b11, 0b000, 0b1010, 0b0011, 0b000>;
+def : RWSysReg<"AMAIR_EL2", 0b11, 0b100, 0b1010, 0b0011, 0b000>;
+def : RWSysReg<"AMAIR_EL3", 0b11, 0b110, 0b1010, 0b0011, 0b000>;
+def : RWSysReg<"VBAR_EL1", 0b11, 0b000, 0b1100, 0b0000, 0b000>;
+def : RWSysReg<"VBAR_EL2", 0b11, 0b100, 0b1100, 0b0000, 0b000>;
+def : RWSysReg<"VBAR_EL3", 0b11, 0b110, 0b1100, 0b0000, 0b000>;
+def : RWSysReg<"RMR_EL1", 0b11, 0b000, 0b1100, 0b0000, 0b010>;
+def : RWSysReg<"RMR_EL2", 0b11, 0b100, 0b1100, 0b0000, 0b010>;
+def : RWSysReg<"RMR_EL3", 0b11, 0b110, 0b1100, 0b0000, 0b010>;
+def : RWSysReg<"CONTEXTIDR_EL1", 0b11, 0b000, 0b1101, 0b0000, 0b001>;
+def : RWSysReg<"TPIDR_EL0", 0b11, 0b011, 0b1101, 0b0000, 0b010>;
+def : RWSysReg<"TPIDR_EL2", 0b11, 0b100, 0b1101, 0b0000, 0b010>;
+def : RWSysReg<"TPIDR_EL3", 0b11, 0b110, 0b1101, 0b0000, 0b010>;
+def : RWSysReg<"TPIDRRO_EL0", 0b11, 0b011, 0b1101, 0b0000, 0b011>;
+def : RWSysReg<"TPIDR_EL1", 0b11, 0b000, 0b1101, 0b0000, 0b100>;
+def : RWSysReg<"CNTFRQ_EL0", 0b11, 0b011, 0b1110, 0b0000, 0b000>;
+def : RWSysReg<"CNTVOFF_EL2", 0b11, 0b100, 0b1110, 0b0000, 0b011>;
+def : RWSysReg<"CNTKCTL_EL1", 0b11, 0b000, 0b1110, 0b0001, 0b000>;
+def : RWSysReg<"CNTHCTL_EL2", 0b11, 0b100, 0b1110, 0b0001, 0b000>;
+def : RWSysReg<"CNTP_TVAL_EL0", 0b11, 0b011, 0b1110, 0b0010, 0b000>;
+def : RWSysReg<"CNTHP_TVAL_EL2", 0b11, 0b100, 0b1110, 0b0010, 0b000>;
+def : RWSysReg<"CNTPS_TVAL_EL1", 0b11, 0b111, 0b1110, 0b0010, 0b000>;
+def : RWSysReg<"CNTP_CTL_EL0", 0b11, 0b011, 0b1110, 0b0010, 0b001>;
+def : RWSysReg<"CNTHP_CTL_EL2", 0b11, 0b100, 0b1110, 0b0010, 0b001>;
+def : RWSysReg<"CNTPS_CTL_EL1", 0b11, 0b111, 0b1110, 0b0010, 0b001>;
+def : RWSysReg<"CNTP_CVAL_EL0", 0b11, 0b011, 0b1110, 0b0010, 0b010>;
+def : RWSysReg<"CNTHP_CVAL_EL2", 0b11, 0b100, 0b1110, 0b0010, 0b010>;
+def : RWSysReg<"CNTPS_CVAL_EL1", 0b11, 0b111, 0b1110, 0b0010, 0b010>;
+def : RWSysReg<"CNTV_TVAL_EL0", 0b11, 0b011, 0b1110, 0b0011, 0b000>;
+def : RWSysReg<"CNTV_CTL_EL0", 0b11, 0b011, 0b1110, 0b0011, 0b001>;
+def : RWSysReg<"CNTV_CVAL_EL0", 0b11, 0b011, 0b1110, 0b0011, 0b010>;
+def : RWSysReg<"PMEVCNTR0_EL0", 0b11, 0b011, 0b1110, 0b1000, 0b000>;
+def : RWSysReg<"PMEVCNTR1_EL0", 0b11, 0b011, 0b1110, 0b1000, 0b001>;
+def : RWSysReg<"PMEVCNTR2_EL0", 0b11, 0b011, 0b1110, 0b1000, 0b010>;
+def : RWSysReg<"PMEVCNTR3_EL0", 0b11, 0b011, 0b1110, 0b1000, 0b011>;
+def : RWSysReg<"PMEVCNTR4_EL0", 0b11, 0b011, 0b1110, 0b1000, 0b100>;
+def : RWSysReg<"PMEVCNTR5_EL0", 0b11, 0b011, 0b1110, 0b1000, 0b101>;
+def : RWSysReg<"PMEVCNTR6_EL0", 0b11, 0b011, 0b1110, 0b1000, 0b110>;
+def : RWSysReg<"PMEVCNTR7_EL0", 0b11, 0b011, 0b1110, 0b1000, 0b111>;
+def : RWSysReg<"PMEVCNTR8_EL0", 0b11, 0b011, 0b1110, 0b1001, 0b000>;
+def : RWSysReg<"PMEVCNTR9_EL0", 0b11, 0b011, 0b1110, 0b1001, 0b001>;
+def : RWSysReg<"PMEVCNTR10_EL0", 0b11, 0b011, 0b1110, 0b1001, 0b010>;
+def : RWSysReg<"PMEVCNTR11_EL0", 0b11, 0b011, 0b1110, 0b1001, 0b011>;
+def : RWSysReg<"PMEVCNTR12_EL0", 0b11, 0b011, 0b1110, 0b1001, 0b100>;
+def : RWSysReg<"PMEVCNTR13_EL0", 0b11, 0b011, 0b1110, 0b1001, 0b101>;
+def : RWSysReg<"PMEVCNTR14_EL0", 0b11, 0b011, 0b1110, 0b1001, 0b110>;
+def : RWSysReg<"PMEVCNTR15_EL0", 0b11, 0b011, 0b1110, 0b1001, 0b111>;
+def : RWSysReg<"PMEVCNTR16_EL0", 0b11, 0b011, 0b1110, 0b1010, 0b000>;
+def : RWSysReg<"PMEVCNTR17_EL0", 0b11, 0b011, 0b1110, 0b1010, 0b001>;
+def : RWSysReg<"PMEVCNTR18_EL0", 0b11, 0b011, 0b1110, 0b1010, 0b010>;
+def : RWSysReg<"PMEVCNTR19_EL0", 0b11, 0b011, 0b1110, 0b1010, 0b011>;
+def : RWSysReg<"PMEVCNTR20_EL0", 0b11, 0b011, 0b1110, 0b1010, 0b100>;
+def : RWSysReg<"PMEVCNTR21_EL0", 0b11, 0b011, 0b1110, 0b1010, 0b101>;
+def : RWSysReg<"PMEVCNTR22_EL0", 0b11, 0b011, 0b1110, 0b1010, 0b110>;
+def : RWSysReg<"PMEVCNTR23_EL0", 0b11, 0b011, 0b1110, 0b1010, 0b111>;
+def : RWSysReg<"PMEVCNTR24_EL0", 0b11, 0b011, 0b1110, 0b1011, 0b000>;
+def : RWSysReg<"PMEVCNTR25_EL0", 0b11, 0b011, 0b1110, 0b1011, 0b001>;
+def : RWSysReg<"PMEVCNTR26_EL0", 0b11, 0b011, 0b1110, 0b1011, 0b010>;
+def : RWSysReg<"PMEVCNTR27_EL0", 0b11, 0b011, 0b1110, 0b1011, 0b011>;
+def : RWSysReg<"PMEVCNTR28_EL0", 0b11, 0b011, 0b1110, 0b1011, 0b100>;
+def : RWSysReg<"PMEVCNTR29_EL0", 0b11, 0b011, 0b1110, 0b1011, 0b101>;
+def : RWSysReg<"PMEVCNTR30_EL0", 0b11, 0b011, 0b1110, 0b1011, 0b110>;
+def : RWSysReg<"PMCCFILTR_EL0", 0b11, 0b011, 0b1110, 0b1111, 0b111>;
+def : RWSysReg<"PMEVTYPER0_EL0", 0b11, 0b011, 0b1110, 0b1100, 0b000>;
+def : RWSysReg<"PMEVTYPER1_EL0", 0b11, 0b011, 0b1110, 0b1100, 0b001>;
+def : RWSysReg<"PMEVTYPER2_EL0", 0b11, 0b011, 0b1110, 0b1100, 0b010>;
+def : RWSysReg<"PMEVTYPER3_EL0", 0b11, 0b011, 0b1110, 0b1100, 0b011>;
+def : RWSysReg<"PMEVTYPER4_EL0", 0b11, 0b011, 0b1110, 0b1100, 0b100>;
+def : RWSysReg<"PMEVTYPER5_EL0", 0b11, 0b011, 0b1110, 0b1100, 0b101>;
+def : RWSysReg<"PMEVTYPER6_EL0", 0b11, 0b011, 0b1110, 0b1100, 0b110>;
+def : RWSysReg<"PMEVTYPER7_EL0", 0b11, 0b011, 0b1110, 0b1100, 0b111>;
+def : RWSysReg<"PMEVTYPER8_EL0", 0b11, 0b011, 0b1110, 0b1101, 0b000>;
+def : RWSysReg<"PMEVTYPER9_EL0", 0b11, 0b011, 0b1110, 0b1101, 0b001>;
+def : RWSysReg<"PMEVTYPER10_EL0", 0b11, 0b011, 0b1110, 0b1101, 0b010>;
+def : RWSysReg<"PMEVTYPER11_EL0", 0b11, 0b011, 0b1110, 0b1101, 0b011>;
+def : RWSysReg<"PMEVTYPER12_EL0", 0b11, 0b011, 0b1110, 0b1101, 0b100>;
+def : RWSysReg<"PMEVTYPER13_EL0", 0b11, 0b011, 0b1110, 0b1101, 0b101>;
+def : RWSysReg<"PMEVTYPER14_EL0", 0b11, 0b011, 0b1110, 0b1101, 0b110>;
+def : RWSysReg<"PMEVTYPER15_EL0", 0b11, 0b011, 0b1110, 0b1101, 0b111>;
+def : RWSysReg<"PMEVTYPER16_EL0", 0b11, 0b011, 0b1110, 0b1110, 0b000>;
+def : RWSysReg<"PMEVTYPER17_EL0", 0b11, 0b011, 0b1110, 0b1110, 0b001>;
+def : RWSysReg<"PMEVTYPER18_EL0", 0b11, 0b011, 0b1110, 0b1110, 0b010>;
+def : RWSysReg<"PMEVTYPER19_EL0", 0b11, 0b011, 0b1110, 0b1110, 0b011>;
+def : RWSysReg<"PMEVTYPER20_EL0", 0b11, 0b011, 0b1110, 0b1110, 0b100>;
+def : RWSysReg<"PMEVTYPER21_EL0", 0b11, 0b011, 0b1110, 0b1110, 0b101>;
+def : RWSysReg<"PMEVTYPER22_EL0", 0b11, 0b011, 0b1110, 0b1110, 0b110>;
+def : RWSysReg<"PMEVTYPER23_EL0", 0b11, 0b011, 0b1110, 0b1110, 0b111>;
+def : RWSysReg<"PMEVTYPER24_EL0", 0b11, 0b011, 0b1110, 0b1111, 0b000>;
+def : RWSysReg<"PMEVTYPER25_EL0", 0b11, 0b011, 0b1110, 0b1111, 0b001>;
+def : RWSysReg<"PMEVTYPER26_EL0", 0b11, 0b011, 0b1110, 0b1111, 0b010>;
+def : RWSysReg<"PMEVTYPER27_EL0", 0b11, 0b011, 0b1110, 0b1111, 0b011>;
+def : RWSysReg<"PMEVTYPER28_EL0", 0b11, 0b011, 0b1110, 0b1111, 0b100>;
+def : RWSysReg<"PMEVTYPER29_EL0", 0b11, 0b011, 0b1110, 0b1111, 0b101>;
+def : RWSysReg<"PMEVTYPER30_EL0", 0b11, 0b011, 0b1110, 0b1111, 0b110>;
+
+// Trace registers
+// Op0 Op1 CRn CRm Op2
+def : RWSysReg<"TRCPRGCTLR", 0b10, 0b001, 0b0000, 0b0001, 0b000>;
+def : RWSysReg<"TRCPROCSELR", 0b10, 0b001, 0b0000, 0b0010, 0b000>;
+def : RWSysReg<"TRCCONFIGR", 0b10, 0b001, 0b0000, 0b0100, 0b000>;
+def : RWSysReg<"TRCAUXCTLR", 0b10, 0b001, 0b0000, 0b0110, 0b000>;
+def : RWSysReg<"TRCEVENTCTL0R", 0b10, 0b001, 0b0000, 0b1000, 0b000>;
+def : RWSysReg<"TRCEVENTCTL1R", 0b10, 0b001, 0b0000, 0b1001, 0b000>;
+def : RWSysReg<"TRCSTALLCTLR", 0b10, 0b001, 0b0000, 0b1011, 0b000>;
+def : RWSysReg<"TRCTSCTLR", 0b10, 0b001, 0b0000, 0b1100, 0b000>;
+def : RWSysReg<"TRCSYNCPR", 0b10, 0b001, 0b0000, 0b1101, 0b000>;
+def : RWSysReg<"TRCCCCTLR", 0b10, 0b001, 0b0000, 0b1110, 0b000>;
+def : RWSysReg<"TRCBBCTLR", 0b10, 0b001, 0b0000, 0b1111, 0b000>;
+def : RWSysReg<"TRCTRACEIDR", 0b10, 0b001, 0b0000, 0b0000, 0b001>;
+def : RWSysReg<"TRCQCTLR", 0b10, 0b001, 0b0000, 0b0001, 0b001>;
+def : RWSysReg<"TRCVICTLR", 0b10, 0b001, 0b0000, 0b0000, 0b010>;
+def : RWSysReg<"TRCVIIECTLR", 0b10, 0b001, 0b0000, 0b0001, 0b010>;
+def : RWSysReg<"TRCVISSCTLR", 0b10, 0b001, 0b0000, 0b0010, 0b010>;
+def : RWSysReg<"TRCVIPCSSCTLR", 0b10, 0b001, 0b0000, 0b0011, 0b010>;
+def : RWSysReg<"TRCVDCTLR", 0b10, 0b001, 0b0000, 0b1000, 0b010>;
+def : RWSysReg<"TRCVDSACCTLR", 0b10, 0b001, 0b0000, 0b1001, 0b010>;
+def : RWSysReg<"TRCVDARCCTLR", 0b10, 0b001, 0b0000, 0b1010, 0b010>;
+def : RWSysReg<"TRCSEQEVR0", 0b10, 0b001, 0b0000, 0b0000, 0b100>;
+def : RWSysReg<"TRCSEQEVR1", 0b10, 0b001, 0b0000, 0b0001, 0b100>;
+def : RWSysReg<"TRCSEQEVR2", 0b10, 0b001, 0b0000, 0b0010, 0b100>;
+def : RWSysReg<"TRCSEQRSTEVR", 0b10, 0b001, 0b0000, 0b0110, 0b100>;
+def : RWSysReg<"TRCSEQSTR", 0b10, 0b001, 0b0000, 0b0111, 0b100>;
+def : RWSysReg<"TRCEXTINSELR", 0b10, 0b001, 0b0000, 0b1000, 0b100>;
+def : RWSysReg<"TRCCNTRLDVR0", 0b10, 0b001, 0b0000, 0b0000, 0b101>;
+def : RWSysReg<"TRCCNTRLDVR1", 0b10, 0b001, 0b0000, 0b0001, 0b101>;
+def : RWSysReg<"TRCCNTRLDVR2", 0b10, 0b001, 0b0000, 0b0010, 0b101>;
+def : RWSysReg<"TRCCNTRLDVR3", 0b10, 0b001, 0b0000, 0b0011, 0b101>;
+def : RWSysReg<"TRCCNTCTLR0", 0b10, 0b001, 0b0000, 0b0100, 0b101>;
+def : RWSysReg<"TRCCNTCTLR1", 0b10, 0b001, 0b0000, 0b0101, 0b101>;
+def : RWSysReg<"TRCCNTCTLR2", 0b10, 0b001, 0b0000, 0b0110, 0b101>;
+def : RWSysReg<"TRCCNTCTLR3", 0b10, 0b001, 0b0000, 0b0111, 0b101>;
+def : RWSysReg<"TRCCNTVR0", 0b10, 0b001, 0b0000, 0b1000, 0b101>;
+def : RWSysReg<"TRCCNTVR1", 0b10, 0b001, 0b0000, 0b1001, 0b101>;
+def : RWSysReg<"TRCCNTVR2", 0b10, 0b001, 0b0000, 0b1010, 0b101>;
+def : RWSysReg<"TRCCNTVR3", 0b10, 0b001, 0b0000, 0b1011, 0b101>;
+def : RWSysReg<"TRCIMSPEC0", 0b10, 0b001, 0b0000, 0b0000, 0b111>;
+def : RWSysReg<"TRCIMSPEC1", 0b10, 0b001, 0b0000, 0b0001, 0b111>;
+def : RWSysReg<"TRCIMSPEC2", 0b10, 0b001, 0b0000, 0b0010, 0b111>;
+def : RWSysReg<"TRCIMSPEC3", 0b10, 0b001, 0b0000, 0b0011, 0b111>;
+def : RWSysReg<"TRCIMSPEC4", 0b10, 0b001, 0b0000, 0b0100, 0b111>;
+def : RWSysReg<"TRCIMSPEC5", 0b10, 0b001, 0b0000, 0b0101, 0b111>;
+def : RWSysReg<"TRCIMSPEC6", 0b10, 0b001, 0b0000, 0b0110, 0b111>;
+def : RWSysReg<"TRCIMSPEC7", 0b10, 0b001, 0b0000, 0b0111, 0b111>;
+def : RWSysReg<"TRCRSCTLR2", 0b10, 0b001, 0b0001, 0b0010, 0b000>;
+def : RWSysReg<"TRCRSCTLR3", 0b10, 0b001, 0b0001, 0b0011, 0b000>;
+def : RWSysReg<"TRCRSCTLR4", 0b10, 0b001, 0b0001, 0b0100, 0b000>;
+def : RWSysReg<"TRCRSCTLR5", 0b10, 0b001, 0b0001, 0b0101, 0b000>;
+def : RWSysReg<"TRCRSCTLR6", 0b10, 0b001, 0b0001, 0b0110, 0b000>;
+def : RWSysReg<"TRCRSCTLR7", 0b10, 0b001, 0b0001, 0b0111, 0b000>;
+def : RWSysReg<"TRCRSCTLR8", 0b10, 0b001, 0b0001, 0b1000, 0b000>;
+def : RWSysReg<"TRCRSCTLR9", 0b10, 0b001, 0b0001, 0b1001, 0b000>;
+def : RWSysReg<"TRCRSCTLR10", 0b10, 0b001, 0b0001, 0b1010, 0b000>;
+def : RWSysReg<"TRCRSCTLR11", 0b10, 0b001, 0b0001, 0b1011, 0b000>;
+def : RWSysReg<"TRCRSCTLR12", 0b10, 0b001, 0b0001, 0b1100, 0b000>;
+def : RWSysReg<"TRCRSCTLR13", 0b10, 0b001, 0b0001, 0b1101, 0b000>;
+def : RWSysReg<"TRCRSCTLR14", 0b10, 0b001, 0b0001, 0b1110, 0b000>;
+def : RWSysReg<"TRCRSCTLR15", 0b10, 0b001, 0b0001, 0b1111, 0b000>;
+def : RWSysReg<"TRCRSCTLR16", 0b10, 0b001, 0b0001, 0b0000, 0b001>;
+def : RWSysReg<"TRCRSCTLR17", 0b10, 0b001, 0b0001, 0b0001, 0b001>;
+def : RWSysReg<"TRCRSCTLR18", 0b10, 0b001, 0b0001, 0b0010, 0b001>;
+def : RWSysReg<"TRCRSCTLR19", 0b10, 0b001, 0b0001, 0b0011, 0b001>;
+def : RWSysReg<"TRCRSCTLR20", 0b10, 0b001, 0b0001, 0b0100, 0b001>;
+def : RWSysReg<"TRCRSCTLR21", 0b10, 0b001, 0b0001, 0b0101, 0b001>;
+def : RWSysReg<"TRCRSCTLR22", 0b10, 0b001, 0b0001, 0b0110, 0b001>;
+def : RWSysReg<"TRCRSCTLR23", 0b10, 0b001, 0b0001, 0b0111, 0b001>;
+def : RWSysReg<"TRCRSCTLR24", 0b10, 0b001, 0b0001, 0b1000, 0b001>;
+def : RWSysReg<"TRCRSCTLR25", 0b10, 0b001, 0b0001, 0b1001, 0b001>;
+def : RWSysReg<"TRCRSCTLR26", 0b10, 0b001, 0b0001, 0b1010, 0b001>;
+def : RWSysReg<"TRCRSCTLR27", 0b10, 0b001, 0b0001, 0b1011, 0b001>;
+def : RWSysReg<"TRCRSCTLR28", 0b10, 0b001, 0b0001, 0b1100, 0b001>;
+def : RWSysReg<"TRCRSCTLR29", 0b10, 0b001, 0b0001, 0b1101, 0b001>;
+def : RWSysReg<"TRCRSCTLR30", 0b10, 0b001, 0b0001, 0b1110, 0b001>;
+def : RWSysReg<"TRCRSCTLR31", 0b10, 0b001, 0b0001, 0b1111, 0b001>;
+def : RWSysReg<"TRCSSCCR0", 0b10, 0b001, 0b0001, 0b0000, 0b010>;
+def : RWSysReg<"TRCSSCCR1", 0b10, 0b001, 0b0001, 0b0001, 0b010>;
+def : RWSysReg<"TRCSSCCR2", 0b10, 0b001, 0b0001, 0b0010, 0b010>;
+def : RWSysReg<"TRCSSCCR3", 0b10, 0b001, 0b0001, 0b0011, 0b010>;
+def : RWSysReg<"TRCSSCCR4", 0b10, 0b001, 0b0001, 0b0100, 0b010>;
+def : RWSysReg<"TRCSSCCR5", 0b10, 0b001, 0b0001, 0b0101, 0b010>;
+def : RWSysReg<"TRCSSCCR6", 0b10, 0b001, 0b0001, 0b0110, 0b010>;
+def : RWSysReg<"TRCSSCCR7", 0b10, 0b001, 0b0001, 0b0111, 0b010>;
+def : RWSysReg<"TRCSSCSR0", 0b10, 0b001, 0b0001, 0b1000, 0b010>;
+def : RWSysReg<"TRCSSCSR1", 0b10, 0b001, 0b0001, 0b1001, 0b010>;
+def : RWSysReg<"TRCSSCSR2", 0b10, 0b001, 0b0001, 0b1010, 0b010>;
+def : RWSysReg<"TRCSSCSR3", 0b10, 0b001, 0b0001, 0b1011, 0b010>;
+def : RWSysReg<"TRCSSCSR4", 0b10, 0b001, 0b0001, 0b1100, 0b010>;
+def : RWSysReg<"TRCSSCSR5", 0b10, 0b001, 0b0001, 0b1101, 0b010>;
+def : RWSysReg<"TRCSSCSR6", 0b10, 0b001, 0b0001, 0b1110, 0b010>;
+def : RWSysReg<"TRCSSCSR7", 0b10, 0b001, 0b0001, 0b1111, 0b010>;
+def : RWSysReg<"TRCSSPCICR0", 0b10, 0b001, 0b0001, 0b0000, 0b011>;
+def : RWSysReg<"TRCSSPCICR1", 0b10, 0b001, 0b0001, 0b0001, 0b011>;
+def : RWSysReg<"TRCSSPCICR2", 0b10, 0b001, 0b0001, 0b0010, 0b011>;
+def : RWSysReg<"TRCSSPCICR3", 0b10, 0b001, 0b0001, 0b0011, 0b011>;
+def : RWSysReg<"TRCSSPCICR4", 0b10, 0b001, 0b0001, 0b0100, 0b011>;
+def : RWSysReg<"TRCSSPCICR5", 0b10, 0b001, 0b0001, 0b0101, 0b011>;
+def : RWSysReg<"TRCSSPCICR6", 0b10, 0b001, 0b0001, 0b0110, 0b011>;
+def : RWSysReg<"TRCSSPCICR7", 0b10, 0b001, 0b0001, 0b0111, 0b011>;
+def : RWSysReg<"TRCPDCR", 0b10, 0b001, 0b0001, 0b0100, 0b100>;
+def : RWSysReg<"TRCACVR0", 0b10, 0b001, 0b0010, 0b0000, 0b000>;
+def : RWSysReg<"TRCACVR1", 0b10, 0b001, 0b0010, 0b0010, 0b000>;
+def : RWSysReg<"TRCACVR2", 0b10, 0b001, 0b0010, 0b0100, 0b000>;
+def : RWSysReg<"TRCACVR3", 0b10, 0b001, 0b0010, 0b0110, 0b000>;
+def : RWSysReg<"TRCACVR4", 0b10, 0b001, 0b0010, 0b1000, 0b000>;
+def : RWSysReg<"TRCACVR5", 0b10, 0b001, 0b0010, 0b1010, 0b000>;
+def : RWSysReg<"TRCACVR6", 0b10, 0b001, 0b0010, 0b1100, 0b000>;
+def : RWSysReg<"TRCACVR7", 0b10, 0b001, 0b0010, 0b1110, 0b000>;
+def : RWSysReg<"TRCACVR8", 0b10, 0b001, 0b0010, 0b0000, 0b001>;
+def : RWSysReg<"TRCACVR9", 0b10, 0b001, 0b0010, 0b0010, 0b001>;
+def : RWSysReg<"TRCACVR10", 0b10, 0b001, 0b0010, 0b0100, 0b001>;
+def : RWSysReg<"TRCACVR11", 0b10, 0b001, 0b0010, 0b0110, 0b001>;
+def : RWSysReg<"TRCACVR12", 0b10, 0b001, 0b0010, 0b1000, 0b001>;
+def : RWSysReg<"TRCACVR13", 0b10, 0b001, 0b0010, 0b1010, 0b001>;
+def : RWSysReg<"TRCACVR14", 0b10, 0b001, 0b0010, 0b1100, 0b001>;
+def : RWSysReg<"TRCACVR15", 0b10, 0b001, 0b0010, 0b1110, 0b001>;
+def : RWSysReg<"TRCACATR0", 0b10, 0b001, 0b0010, 0b0000, 0b010>;
+def : RWSysReg<"TRCACATR1", 0b10, 0b001, 0b0010, 0b0010, 0b010>;
+def : RWSysReg<"TRCACATR2", 0b10, 0b001, 0b0010, 0b0100, 0b010>;
+def : RWSysReg<"TRCACATR3", 0b10, 0b001, 0b0010, 0b0110, 0b010>;
+def : RWSysReg<"TRCACATR4", 0b10, 0b001, 0b0010, 0b1000, 0b010>;
+def : RWSysReg<"TRCACATR5", 0b10, 0b001, 0b0010, 0b1010, 0b010>;
+def : RWSysReg<"TRCACATR6", 0b10, 0b001, 0b0010, 0b1100, 0b010>;
+def : RWSysReg<"TRCACATR7", 0b10, 0b001, 0b0010, 0b1110, 0b010>;
+def : RWSysReg<"TRCACATR8", 0b10, 0b001, 0b0010, 0b0000, 0b011>;
+def : RWSysReg<"TRCACATR9", 0b10, 0b001, 0b0010, 0b0010, 0b011>;
+def : RWSysReg<"TRCACATR10", 0b10, 0b001, 0b0010, 0b0100, 0b011>;
+def : RWSysReg<"TRCACATR11", 0b10, 0b001, 0b0010, 0b0110, 0b011>;
+def : RWSysReg<"TRCACATR12", 0b10, 0b001, 0b0010, 0b1000, 0b011>;
+def : RWSysReg<"TRCACATR13", 0b10, 0b001, 0b0010, 0b1010, 0b011>;
+def : RWSysReg<"TRCACATR14", 0b10, 0b001, 0b0010, 0b1100, 0b011>;
+def : RWSysReg<"TRCACATR15", 0b10, 0b001, 0b0010, 0b1110, 0b011>;
+def : RWSysReg<"TRCDVCVR0", 0b10, 0b001, 0b0010, 0b0000, 0b100>;
+def : RWSysReg<"TRCDVCVR1", 0b10, 0b001, 0b0010, 0b0100, 0b100>;
+def : RWSysReg<"TRCDVCVR2", 0b10, 0b001, 0b0010, 0b1000, 0b100>;
+def : RWSysReg<"TRCDVCVR3", 0b10, 0b001, 0b0010, 0b1100, 0b100>;
+def : RWSysReg<"TRCDVCVR4", 0b10, 0b001, 0b0010, 0b0000, 0b101>;
+def : RWSysReg<"TRCDVCVR5", 0b10, 0b001, 0b0010, 0b0100, 0b101>;
+def : RWSysReg<"TRCDVCVR6", 0b10, 0b001, 0b0010, 0b1000, 0b101>;
+def : RWSysReg<"TRCDVCVR7", 0b10, 0b001, 0b0010, 0b1100, 0b101>;
+def : RWSysReg<"TRCDVCMR0", 0b10, 0b001, 0b0010, 0b0000, 0b110>;
+def : RWSysReg<"TRCDVCMR1", 0b10, 0b001, 0b0010, 0b0100, 0b110>;
+def : RWSysReg<"TRCDVCMR2", 0b10, 0b001, 0b0010, 0b1000, 0b110>;
+def : RWSysReg<"TRCDVCMR3", 0b10, 0b001, 0b0010, 0b1100, 0b110>;
+def : RWSysReg<"TRCDVCMR4", 0b10, 0b001, 0b0010, 0b0000, 0b111>;
+def : RWSysReg<"TRCDVCMR5", 0b10, 0b001, 0b0010, 0b0100, 0b111>;
+def : RWSysReg<"TRCDVCMR6", 0b10, 0b001, 0b0010, 0b1000, 0b111>;
+def : RWSysReg<"TRCDVCMR7", 0b10, 0b001, 0b0010, 0b1100, 0b111>;
+def : RWSysReg<"TRCCIDCVR0", 0b10, 0b001, 0b0011, 0b0000, 0b000>;
+def : RWSysReg<"TRCCIDCVR1", 0b10, 0b001, 0b0011, 0b0010, 0b000>;
+def : RWSysReg<"TRCCIDCVR2", 0b10, 0b001, 0b0011, 0b0100, 0b000>;
+def : RWSysReg<"TRCCIDCVR3", 0b10, 0b001, 0b0011, 0b0110, 0b000>;
+def : RWSysReg<"TRCCIDCVR4", 0b10, 0b001, 0b0011, 0b1000, 0b000>;
+def : RWSysReg<"TRCCIDCVR5", 0b10, 0b001, 0b0011, 0b1010, 0b000>;
+def : RWSysReg<"TRCCIDCVR6", 0b10, 0b001, 0b0011, 0b1100, 0b000>;
+def : RWSysReg<"TRCCIDCVR7", 0b10, 0b001, 0b0011, 0b1110, 0b000>;
+def : RWSysReg<"TRCVMIDCVR0", 0b10, 0b001, 0b0011, 0b0000, 0b001>;
+def : RWSysReg<"TRCVMIDCVR1", 0b10, 0b001, 0b0011, 0b0010, 0b001>;
+def : RWSysReg<"TRCVMIDCVR2", 0b10, 0b001, 0b0011, 0b0100, 0b001>;
+def : RWSysReg<"TRCVMIDCVR3", 0b10, 0b001, 0b0011, 0b0110, 0b001>;
+def : RWSysReg<"TRCVMIDCVR4", 0b10, 0b001, 0b0011, 0b1000, 0b001>;
+def : RWSysReg<"TRCVMIDCVR5", 0b10, 0b001, 0b0011, 0b1010, 0b001>;
+def : RWSysReg<"TRCVMIDCVR6", 0b10, 0b001, 0b0011, 0b1100, 0b001>;
+def : RWSysReg<"TRCVMIDCVR7", 0b10, 0b001, 0b0011, 0b1110, 0b001>;
+def : RWSysReg<"TRCCIDCCTLR0", 0b10, 0b001, 0b0011, 0b0000, 0b010>;
+def : RWSysReg<"TRCCIDCCTLR1", 0b10, 0b001, 0b0011, 0b0001, 0b010>;
+def : RWSysReg<"TRCVMIDCCTLR0", 0b10, 0b001, 0b0011, 0b0010, 0b010>;
+def : RWSysReg<"TRCVMIDCCTLR1", 0b10, 0b001, 0b0011, 0b0011, 0b010>;
+def : RWSysReg<"TRCITCTRL", 0b10, 0b001, 0b0111, 0b0000, 0b100>;
+def : RWSysReg<"TRCCLAIMSET", 0b10, 0b001, 0b0111, 0b1000, 0b110>;
+def : RWSysReg<"TRCCLAIMCLR", 0b10, 0b001, 0b0111, 0b1001, 0b110>;
+
+// GICv3 registers
+// Op0 Op1 CRn CRm Op2
+def : RWSysReg<"ICC_BPR1_EL1", 0b11, 0b000, 0b1100, 0b1100, 0b011>;
+def : RWSysReg<"ICC_BPR0_EL1", 0b11, 0b000, 0b1100, 0b1000, 0b011>;
+def : RWSysReg<"ICC_PMR_EL1", 0b11, 0b000, 0b0100, 0b0110, 0b000>;
+def : RWSysReg<"ICC_CTLR_EL1", 0b11, 0b000, 0b1100, 0b1100, 0b100>;
+def : RWSysReg<"ICC_CTLR_EL3", 0b11, 0b110, 0b1100, 0b1100, 0b100>;
+def : RWSysReg<"ICC_SRE_EL1", 0b11, 0b000, 0b1100, 0b1100, 0b101>;
+def : RWSysReg<"ICC_SRE_EL2", 0b11, 0b100, 0b1100, 0b1001, 0b101>;
+def : RWSysReg<"ICC_SRE_EL3", 0b11, 0b110, 0b1100, 0b1100, 0b101>;
+def : RWSysReg<"ICC_IGRPEN0_EL1", 0b11, 0b000, 0b1100, 0b1100, 0b110>;
+def : RWSysReg<"ICC_IGRPEN1_EL1", 0b11, 0b000, 0b1100, 0b1100, 0b111>;
+def : RWSysReg<"ICC_IGRPEN1_EL3", 0b11, 0b110, 0b1100, 0b1100, 0b111>;
+def : RWSysReg<"ICC_SEIEN_EL1", 0b11, 0b000, 0b1100, 0b1101, 0b000>;
+def : RWSysReg<"ICC_AP0R0_EL1", 0b11, 0b000, 0b1100, 0b1000, 0b100>;
+def : RWSysReg<"ICC_AP0R1_EL1", 0b11, 0b000, 0b1100, 0b1000, 0b101>;
+def : RWSysReg<"ICC_AP0R2_EL1", 0b11, 0b000, 0b1100, 0b1000, 0b110>;
+def : RWSysReg<"ICC_AP0R3_EL1", 0b11, 0b000, 0b1100, 0b1000, 0b111>;
+def : RWSysReg<"ICC_AP1R0_EL1", 0b11, 0b000, 0b1100, 0b1001, 0b000>;
+def : RWSysReg<"ICC_AP1R1_EL1", 0b11, 0b000, 0b1100, 0b1001, 0b001>;
+def : RWSysReg<"ICC_AP1R2_EL1", 0b11, 0b000, 0b1100, 0b1001, 0b010>;
+def : RWSysReg<"ICC_AP1R3_EL1", 0b11, 0b000, 0b1100, 0b1001, 0b011>;
+def : RWSysReg<"ICH_AP0R0_EL2", 0b11, 0b100, 0b1100, 0b1000, 0b000>;
+def : RWSysReg<"ICH_AP0R1_EL2", 0b11, 0b100, 0b1100, 0b1000, 0b001>;
+def : RWSysReg<"ICH_AP0R2_EL2", 0b11, 0b100, 0b1100, 0b1000, 0b010>;
+def : RWSysReg<"ICH_AP0R3_EL2", 0b11, 0b100, 0b1100, 0b1000, 0b011>;
+def : RWSysReg<"ICH_AP1R0_EL2", 0b11, 0b100, 0b1100, 0b1001, 0b000>;
+def : RWSysReg<"ICH_AP1R1_EL2", 0b11, 0b100, 0b1100, 0b1001, 0b001>;
+def : RWSysReg<"ICH_AP1R2_EL2", 0b11, 0b100, 0b1100, 0b1001, 0b010>;
+def : RWSysReg<"ICH_AP1R3_EL2", 0b11, 0b100, 0b1100, 0b1001, 0b011>;
+def : RWSysReg<"ICH_HCR_EL2", 0b11, 0b100, 0b1100, 0b1011, 0b000>;
+def : RWSysReg<"ICH_MISR_EL2", 0b11, 0b100, 0b1100, 0b1011, 0b010>;
+def : RWSysReg<"ICH_VMCR_EL2", 0b11, 0b100, 0b1100, 0b1011, 0b111>;
+def : RWSysReg<"ICH_VSEIR_EL2", 0b11, 0b100, 0b1100, 0b1001, 0b100>;
+def : RWSysReg<"ICH_LR0_EL2", 0b11, 0b100, 0b1100, 0b1100, 0b000>;
+def : RWSysReg<"ICH_LR1_EL2", 0b11, 0b100, 0b1100, 0b1100, 0b001>;
+def : RWSysReg<"ICH_LR2_EL2", 0b11, 0b100, 0b1100, 0b1100, 0b010>;
+def : RWSysReg<"ICH_LR3_EL2", 0b11, 0b100, 0b1100, 0b1100, 0b011>;
+def : RWSysReg<"ICH_LR4_EL2", 0b11, 0b100, 0b1100, 0b1100, 0b100>;
+def : RWSysReg<"ICH_LR5_EL2", 0b11, 0b100, 0b1100, 0b1100, 0b101>;
+def : RWSysReg<"ICH_LR6_EL2", 0b11, 0b100, 0b1100, 0b1100, 0b110>;
+def : RWSysReg<"ICH_LR7_EL2", 0b11, 0b100, 0b1100, 0b1100, 0b111>;
+def : RWSysReg<"ICH_LR8_EL2", 0b11, 0b100, 0b1100, 0b1101, 0b000>;
+def : RWSysReg<"ICH_LR9_EL2", 0b11, 0b100, 0b1100, 0b1101, 0b001>;
+def : RWSysReg<"ICH_LR10_EL2", 0b11, 0b100, 0b1100, 0b1101, 0b010>;
+def : RWSysReg<"ICH_LR11_EL2", 0b11, 0b100, 0b1100, 0b1101, 0b011>;
+def : RWSysReg<"ICH_LR12_EL2", 0b11, 0b100, 0b1100, 0b1101, 0b100>;
+def : RWSysReg<"ICH_LR13_EL2", 0b11, 0b100, 0b1100, 0b1101, 0b101>;
+def : RWSysReg<"ICH_LR14_EL2", 0b11, 0b100, 0b1100, 0b1101, 0b110>;
+def : RWSysReg<"ICH_LR15_EL2", 0b11, 0b100, 0b1100, 0b1101, 0b111>;
+
+// v8.1a "Privileged Access Never" extension-specific system registers
+let Requires = [{ {AArch64::HasV8_1aOps} }] in
+def : RWSysReg<"PAN", 0b11, 0b000, 0b0100, 0b0010, 0b011>;
+
+// v8.1a "Limited Ordering Regions" extension-specific system registers
+// Op0 Op1 CRn CRm Op2
+let Requires = [{ {AArch64::HasV8_1aOps} }] in {
+def : RWSysReg<"LORSA_EL1", 0b11, 0b000, 0b1010, 0b0100, 0b000>;
+def : RWSysReg<"LOREA_EL1", 0b11, 0b000, 0b1010, 0b0100, 0b001>;
+def : RWSysReg<"LORN_EL1", 0b11, 0b000, 0b1010, 0b0100, 0b010>;
+def : RWSysReg<"LORC_EL1", 0b11, 0b000, 0b1010, 0b0100, 0b011>;
+}
+
+// v8.1a "Virtualization hos extensions" system registers
+// Op0 Op1 CRn CRm Op2
+let Requires = [{ {AArch64::HasV8_1aOps} }] in {
+def : RWSysReg<"TTBR1_EL2", 0b11, 0b100, 0b0010, 0b0000, 0b001>;
+def : RWSysReg<"CONTEXTIDR_EL2", 0b11, 0b100, 0b1101, 0b0000, 0b001>;
+def : RWSysReg<"CNTHV_TVAL_EL2", 0b11, 0b100, 0b1110, 0b0011, 0b000>;
+def : RWSysReg<"CNTHV_CVAL_EL2", 0b11, 0b100, 0b1110, 0b0011, 0b010>;
+def : RWSysReg<"CNTHV_CTL_EL2", 0b11, 0b100, 0b1110, 0b0011, 0b001>;
+def : RWSysReg<"SCTLR_EL12", 0b11, 0b101, 0b0001, 0b0000, 0b000>;
+def : RWSysReg<"CPACR_EL12", 0b11, 0b101, 0b0001, 0b0000, 0b010>;
+def : RWSysReg<"TTBR0_EL12", 0b11, 0b101, 0b0010, 0b0000, 0b000>;
+def : RWSysReg<"TTBR1_EL12", 0b11, 0b101, 0b0010, 0b0000, 0b001>;
+def : RWSysReg<"TCR_EL12", 0b11, 0b101, 0b0010, 0b0000, 0b010>;
+def : RWSysReg<"AFSR0_EL12", 0b11, 0b101, 0b0101, 0b0001, 0b000>;
+def : RWSysReg<"AFSR1_EL12", 0b11, 0b101, 0b0101, 0b0001, 0b001>;
+def : RWSysReg<"ESR_EL12", 0b11, 0b101, 0b0101, 0b0010, 0b000>;
+def : RWSysReg<"FAR_EL12", 0b11, 0b101, 0b0110, 0b0000, 0b000>;
+def : RWSysReg<"MAIR_EL12", 0b11, 0b101, 0b1010, 0b0010, 0b000>;
+def : RWSysReg<"AMAIR_EL12", 0b11, 0b101, 0b1010, 0b0011, 0b000>;
+def : RWSysReg<"VBAR_EL12", 0b11, 0b101, 0b1100, 0b0000, 0b000>;
+def : RWSysReg<"CONTEXTIDR_EL12", 0b11, 0b101, 0b1101, 0b0000, 0b001>;
+def : RWSysReg<"CNTKCTL_EL12", 0b11, 0b101, 0b1110, 0b0001, 0b000>;
+def : RWSysReg<"CNTP_TVAL_EL02", 0b11, 0b101, 0b1110, 0b0010, 0b000>;
+def : RWSysReg<"CNTP_CTL_EL02", 0b11, 0b101, 0b1110, 0b0010, 0b001>;
+def : RWSysReg<"CNTP_CVAL_EL02", 0b11, 0b101, 0b1110, 0b0010, 0b010>;
+def : RWSysReg<"CNTV_TVAL_EL02", 0b11, 0b101, 0b1110, 0b0011, 0b000>;
+def : RWSysReg<"CNTV_CTL_EL02", 0b11, 0b101, 0b1110, 0b0011, 0b001>;
+def : RWSysReg<"CNTV_CVAL_EL02", 0b11, 0b101, 0b1110, 0b0011, 0b010>;
+def : RWSysReg<"SPSR_EL12", 0b11, 0b101, 0b0100, 0b0000, 0b000>;
+def : RWSysReg<"ELR_EL12", 0b11, 0b101, 0b0100, 0b0000, 0b001>;
+}
+// v8.2a registers
+// Op0 Op1 CRn CRm Op2
+let Requires = [{ {AArch64::HasV8_2aOps} }] in
+def : RWSysReg<"UAO", 0b11, 0b000, 0b0100, 0b0010, 0b100>;
+
+// v8.2a "Statistical Profiling extension" registers
+// Op0 Op1 CRn CRm Op2
+let Requires = [{ {AArch64::FeatureSPE} }] in {
+def : RWSysReg<"PMBLIMITR_EL1", 0b11, 0b000, 0b1001, 0b1010, 0b000>;
+def : RWSysReg<"PMBPTR_EL1", 0b11, 0b000, 0b1001, 0b1010, 0b001>;
+def : RWSysReg<"PMBSR_EL1", 0b11, 0b000, 0b1001, 0b1010, 0b011>;
+def : RWSysReg<"PMBIDR_EL1", 0b11, 0b000, 0b1001, 0b1010, 0b111>;
+def : RWSysReg<"PMSCR_EL2", 0b11, 0b100, 0b1001, 0b1001, 0b000>;
+def : RWSysReg<"PMSCR_EL12", 0b11, 0b101, 0b1001, 0b1001, 0b000>;
+def : RWSysReg<"PMSCR_EL1", 0b11, 0b000, 0b1001, 0b1001, 0b000>;
+def : RWSysReg<"PMSICR_EL1", 0b11, 0b000, 0b1001, 0b1001, 0b010>;
+def : RWSysReg<"PMSIRR_EL1", 0b11, 0b000, 0b1001, 0b1001, 0b011>;
+def : RWSysReg<"PMSFCR_EL1", 0b11, 0b000, 0b1001, 0b1001, 0b100>;
+def : RWSysReg<"PMSEVFR_EL1", 0b11, 0b000, 0b1001, 0b1001, 0b101>;
+def : RWSysReg<"PMSLATFR_EL1", 0b11, 0b000, 0b1001, 0b1001, 0b110>;
+def : RWSysReg<"PMSIDR_EL1", 0b11, 0b000, 0b1001, 0b1001, 0b111>;
+}
+
+// v8.2a "RAS extension" registers
+// Op0 Op1 CRn CRm Op2
+let Requires = [{ {AArch64::FeatureRAS} }] in {
+def : RWSysReg<"ERRSELR_EL1", 0b11, 0b000, 0b0101, 0b0011, 0b001>;
+def : RWSysReg<"ERXCTLR_EL1", 0b11, 0b000, 0b0101, 0b0100, 0b001>;
+def : RWSysReg<"ERXSTATUS_EL1", 0b11, 0b000, 0b0101, 0b0100, 0b010>;
+def : RWSysReg<"ERXADDR_EL1", 0b11, 0b000, 0b0101, 0b0100, 0b011>;
+def : RWSysReg<"ERXMISC0_EL1", 0b11, 0b000, 0b0101, 0b0101, 0b000>;
+def : RWSysReg<"ERXMISC1_EL1", 0b11, 0b000, 0b0101, 0b0101, 0b001>;
+def : RWSysReg<"DISR_EL1", 0b11, 0b000, 0b1100, 0b0001, 0b001>;
+def : RWSysReg<"VDISR_EL2", 0b11, 0b100, 0b1100, 0b0001, 0b001>;
+def : RWSysReg<"VSESR_EL2", 0b11, 0b100, 0b0101, 0b0010, 0b011>;
+}
+
+// Cyclone specific system registers
+// Op0 Op1 CRn CRm Op2
+let Requires = [{ {AArch64::ProcCyclone} }] in
+def : RWSysReg<"CPM_IOACC_CTL_EL3", 0b11, 0b111, 0b1111, 0b0010, 0b000>;
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index c52c5544fc7e..0b6345ff8011 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -11,13 +11,19 @@
//===----------------------------------------------------------------------===//
#include "AArch64.h"
+#include "AArch64CallLowering.h"
+#include "AArch64RegisterBankInfo.h"
#include "AArch64TargetMachine.h"
#include "AArch64TargetObjectFile.h"
#include "AArch64TargetTransformInfo.h"
+#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
+#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/InitializePasses.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
@@ -58,6 +64,11 @@ EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden,
cl::init(true));
static cl::opt<bool>
+EnableRedundantCopyElimination("aarch64-redundant-copy-elim",
+ cl::desc("Enable the redundant copy elimination pass"),
+ cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair"
" optimization pass"), cl::init(true), cl::Hidden);
@@ -92,11 +103,19 @@ static cl::opt<cl::boolOrDefault>
EnableGlobalMerge("aarch64-global-merge", cl::Hidden,
cl::desc("Enable the global merge pass"));
+static cl::opt<bool>
+ EnableLoopDataPrefetch("aarch64-loop-data-prefetch", cl::Hidden,
+ cl::desc("Enable the loop data prefetch pass"),
+ cl::init(true));
+
extern "C" void LLVMInitializeAArch64Target() {
// Register the target.
RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target);
+ auto PR = PassRegistry::getPassRegistry();
+ initializeGlobalISel(*PR);
+ initializeAArch64ExpandPseudoPass(*PR);
}
//===----------------------------------------------------------------------===//
@@ -114,29 +133,79 @@ static std::string computeDataLayout(const Triple &TT, bool LittleEndian) {
if (TT.isOSBinFormatMachO())
return "e-m:o-i64:64-i128:128-n32:64-S128";
if (LittleEndian)
- return "e-m:e-i64:64-i128:128-n32:64-S128";
- return "E-m:e-i64:64-i128:128-n32:64-S128";
+ return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
+ return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
}
-/// TargetMachine ctor - Create an AArch64 architecture model.
+// Helper function to set up the defaults for reciprocals.
+static void initReciprocals(AArch64TargetMachine& TM, AArch64Subtarget& ST)
+{
+ // For the estimates, convergence is quadratic, so essentially the number of
+ // digits is doubled after each iteration. ARMv8, the minimum architected
+ // accuracy of the initial estimate is 2^-8. Therefore, the number of extra
+ // steps to refine the result for float (23 mantissa bits) and for double
+ // (52 mantissa bits) are 2 and 3, respectively.
+ unsigned ExtraStepsF = 2,
+ ExtraStepsD = ExtraStepsF + 1;
+ bool UseRsqrt = ST.useRSqrt();
+
+ TM.Options.Reciprocals.setDefaults("sqrtf", UseRsqrt, ExtraStepsF);
+ TM.Options.Reciprocals.setDefaults("sqrtd", UseRsqrt, ExtraStepsD);
+ TM.Options.Reciprocals.setDefaults("vec-sqrtf", UseRsqrt, ExtraStepsF);
+ TM.Options.Reciprocals.setDefaults("vec-sqrtd", UseRsqrt, ExtraStepsD);
+
+ TM.Options.Reciprocals.setDefaults("divf", false, ExtraStepsF);
+ TM.Options.Reciprocals.setDefaults("divd", false, ExtraStepsD);
+ TM.Options.Reciprocals.setDefaults("vec-divf", false, ExtraStepsF);
+ TM.Options.Reciprocals.setDefaults("vec-divd", false, ExtraStepsD);
+}
+
+static Reloc::Model getEffectiveRelocModel(const Triple &TT,
+ Optional<Reloc::Model> RM) {
+ // AArch64 Darwin is always PIC.
+ if (TT.isOSDarwin())
+ return Reloc::PIC_;
+ // On ELF platforms the default static relocation model has a smart enough
+ // linker to cope with referencing external symbols defined in a shared
+ // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
+ if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
+ return Reloc::Static;
+ return *RM;
+}
+
+/// Create an AArch64 architecture model.
///
-AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool LittleEndian)
+AArch64TargetMachine::AArch64TargetMachine(
+ const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian)
// This nested ternary is horrible, but DL needs to be properly
// initialized before TLInfo is constructed.
: LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS,
- Options, RM, CM, OL),
+ Options, getEffectiveRelocModel(TT, RM), CM, OL),
TLOF(createTLOF(getTargetTriple())),
- isLittle(LittleEndian) {
+ Subtarget(TT, CPU, FS, *this, LittleEndian) {
+ initReciprocals(*this, Subtarget);
initAsmInfo();
}
AArch64TargetMachine::~AArch64TargetMachine() {}
+#ifdef LLVM_BUILD_GLOBAL_ISEL
+namespace {
+struct AArch64GISelActualAccessor : public GISelAccessor {
+ std::unique_ptr<CallLowering> CallLoweringInfo;
+ std::unique_ptr<RegisterBankInfo> RegBankInfo;
+ const CallLowering *getCallLowering() const override {
+ return CallLoweringInfo.get();
+ }
+ const RegisterBankInfo *getRegBankInfo() const override {
+ return RegBankInfo.get();
+ }
+};
+} // End anonymous namespace.
+#endif
+
const AArch64Subtarget *
AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
Attribute CPUAttr = F.getFnAttribute("target-cpu");
@@ -156,7 +225,18 @@ AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
// function that reside in TargetOptions.
resetTargetOptions(F);
I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
- isLittle);
+ Subtarget.isLittleEndian());
+#ifndef LLVM_BUILD_GLOBAL_ISEL
+ GISelAccessor *GISel = new GISelAccessor();
+#else
+ AArch64GISelActualAccessor *GISel =
+ new AArch64GISelActualAccessor();
+ GISel->CallLoweringInfo.reset(
+ new AArch64CallLowering(*I->getTargetLowering()));
+ GISel->RegBankInfo.reset(
+ new AArch64RegisterBankInfo(*I->getRegisterInfo()));
+#endif
+ I->setGISelAccessor(*GISel);
}
return I.get();
}
@@ -165,16 +245,16 @@ void AArch64leTargetMachine::anchor() { }
AArch64leTargetMachine::AArch64leTargetMachine(
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
+ const TargetOptions &Options, Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
: AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
void AArch64beTargetMachine::anchor() { }
AArch64beTargetMachine::AArch64beTargetMachine(
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
+ const TargetOptions &Options, Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
: AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
namespace {
@@ -194,6 +274,10 @@ public:
void addIRPasses() override;
bool addPreISel() override;
bool addInstSelector() override;
+#ifdef LLVM_BUILD_GLOBAL_ISEL
+ bool addIRTranslator() override;
+ bool addRegBankSelect() override;
+#endif
bool addILPOpts() override;
void addPreRegAlloc() override;
void addPostRegAlloc() override;
@@ -223,6 +307,13 @@ void AArch64PassConfig::addIRPasses() {
if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
addPass(createCFGSimplificationPass());
+ // Run LoopDataPrefetch
+ //
+ // Run this before LSR to remove the multiplies involved in computing the
+ // pointer values N iterations ahead.
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch)
+ addPass(createLoopDataPrefetchPass());
+
TargetPassConfig::addIRPasses();
// Match interleaved memory accesses to ldN/stN intrinsics.
@@ -278,6 +369,17 @@ bool AArch64PassConfig::addInstSelector() {
return false;
}
+#ifdef LLVM_BUILD_GLOBAL_ISEL
+bool AArch64PassConfig::addIRTranslator() {
+ addPass(new IRTranslator());
+ return false;
+}
+bool AArch64PassConfig::addRegBankSelect() {
+ addPass(new RegBankSelect());
+ return false;
+}
+#endif
+
bool AArch64PassConfig::addILPOpts() {
if (EnableCondOpt)
addPass(createAArch64ConditionOptimizerPass());
@@ -303,6 +405,10 @@ void AArch64PassConfig::addPreRegAlloc() {
}
void AArch64PassConfig::addPostRegAlloc() {
+ // Remove redundant copy instructions.
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
+ addPass(createAArch64RedundantCopyEliminationPass());
+
// Change dead register definitions to refer to the zero register.
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
addPass(createAArch64DeadRegisterDefinitions());
diff --git a/lib/Target/AArch64/AArch64TargetMachine.h b/lib/Target/AArch64/AArch64TargetMachine.h
index 8d49a29386ac..b44107b065bd 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.h
+++ b/lib/Target/AArch64/AArch64TargetMachine.h
@@ -29,7 +29,7 @@ protected:
public:
AArch64TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool IsLittleEndian);
~AArch64TargetMachine() override;
@@ -46,28 +46,28 @@ public:
}
private:
- bool isLittle;
+ AArch64Subtarget Subtarget;
};
-// AArch64leTargetMachine - AArch64 little endian target machine.
+// AArch64 little endian target machine.
//
class AArch64leTargetMachine : public AArch64TargetMachine {
virtual void anchor();
public:
AArch64leTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
-// AArch64beTargetMachine - AArch64 big endian target machine.
+// AArch64 big endian target machine.
//
class AArch64beTargetMachine : public AArch64TargetMachine {
virtual void anchor();
public:
AArch64beTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
diff --git a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 9af0e6444789..ecf4d93068a4 100644
--- a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -291,6 +291,61 @@ int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
return BaseT::getCastInstrCost(Opcode, Dst, Src);
}
+int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
+ VectorType *VecTy,
+ unsigned Index) {
+
+ // Make sure we were given a valid extend opcode.
+ assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
+ "Invalid opcode");
+
+ // We are extending an element we extract from a vector, so the source type
+ // of the extend is the element type of the vector.
+ auto *Src = VecTy->getElementType();
+
+ // Sign- and zero-extends are for integer types only.
+ assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
+
+ // Get the cost for the extract. We compute the cost (if any) for the extend
+ // below.
+ auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
+
+ // Legalize the types.
+ auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
+ auto DstVT = TLI->getValueType(DL, Dst);
+ auto SrcVT = TLI->getValueType(DL, Src);
+
+ // If the resulting type is still a vector and the destination type is legal,
+ // we may get the extension for free. If not, get the default cost for the
+ // extend.
+ if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
+ return Cost + getCastInstrCost(Opcode, Dst, Src);
+
+ // The destination type should be larger than the element type. If not, get
+ // the default cost for the extend.
+ if (DstVT.getSizeInBits() < SrcVT.getSizeInBits())
+ return Cost + getCastInstrCost(Opcode, Dst, Src);
+
+ switch (Opcode) {
+ default:
+ llvm_unreachable("Opcode should be either SExt or ZExt");
+
+ // For sign-extends, we only need a smov, which performs the extension
+ // automatically.
+ case Instruction::SExt:
+ return Cost;
+
+ // For zero-extends, the extend is performed automatically by a umov unless
+ // the destination type is i64 and the element type is i8 or i16.
+ case Instruction::ZExt:
+ if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
+ return Cost;
+ }
+
+ // If we are unable to perform the extend for free, get the default cost.
+ return Cost + getCastInstrCost(Opcode, Dst, Src);
+}
+
int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
@@ -313,7 +368,7 @@ int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
}
// All other insert/extracts cost this much.
- return 3;
+ return ST->getVectorInsertExtractBaseCost();
}
int AArch64TTIImpl::getArithmeticInstrCost(
@@ -472,9 +527,7 @@ int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
}
unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
- if (ST->isCortexA57())
- return 4;
- return 2;
+ return ST->getMaxInterleaveFactor();
}
void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
@@ -571,3 +624,19 @@ bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
}
return true;
}
+
+unsigned AArch64TTIImpl::getCacheLineSize() {
+ return ST->getCacheLineSize();
+}
+
+unsigned AArch64TTIImpl::getPrefetchDistance() {
+ return ST->getPrefetchDistance();
+}
+
+unsigned AArch64TTIImpl::getMinPrefetchStride() {
+ return ST->getMinPrefetchStride();
+}
+
+unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() {
+ return ST->getMaxPrefetchIterationsAhead();
+}
diff --git a/lib/Target/AArch64/AArch64TargetTransformInfo.h b/lib/Target/AArch64/AArch64TargetTransformInfo.h
index ec58c4fe309f..4f2e8310d769 100644
--- a/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -99,6 +99,9 @@ public:
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
+ int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
+ unsigned Index);
+
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
int getArithmeticInstrCost(
@@ -127,6 +130,14 @@ public:
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, unsigned Alignment,
unsigned AddressSpace);
+
+ unsigned getCacheLineSize();
+
+ unsigned getPrefetchDistance();
+
+ unsigned getMinPrefetchStride();
+
+ unsigned getMaxPrefetchIterationsAhead();
/// @}
};
diff --git a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 394c8e78581f..aebc370333e3 100644
--- a/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -13,7 +13,6 @@
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
@@ -24,13 +23,14 @@
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
@@ -70,6 +70,8 @@ private:
bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
bool showMatchError(SMLoc Loc, unsigned ErrCode);
+ bool parseDirectiveArch(SMLoc L);
+ bool parseDirectiveCPU(SMLoc L);
bool parseDirectiveWord(unsigned Size, SMLoc L);
bool parseDirectiveInst(SMLoc L);
@@ -866,14 +868,7 @@ public:
if (!CE) return false;
uint64_t Value = CE->getValue();
- if (RegWidth == 32)
- Value &= 0xffffffffULL;
-
- // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
- if (Value == 0 && Shift != 0)
- return false;
-
- return (Value & ~(0xffffULL << Shift)) == 0;
+ return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
}
template<int RegWidth, int Shift>
@@ -884,16 +879,7 @@ public:
if (!CE) return false;
uint64_t Value = CE->getValue();
- // MOVZ takes precedence over MOVN.
- for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
- if ((Value & ~(0xffffULL << MOVZShift)) == 0)
- return false;
-
- Value = ~Value;
- if (RegWidth == 32)
- Value &= 0xffffffffULL;
-
- return (Value & ~(0xffffULL << Shift)) == 0;
+ return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
}
bool isFPImm() const { return Kind == k_FPImm; }
@@ -2087,12 +2073,9 @@ AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
return MatchOperand_ParseFail;
}
- bool Valid;
- auto Mapper = AArch64PRFM::PRFMMapper();
- StringRef Name =
- Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
- Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
- S, getContext()));
+ auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
+ Operands.push_back(AArch64Operand::CreatePrefetch(
+ prfop, PRFM ? PRFM->Name : "", S, getContext()));
return MatchOperand_Success;
}
@@ -2101,18 +2084,15 @@ AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
return MatchOperand_ParseFail;
}
- bool Valid;
- auto Mapper = AArch64PRFM::PRFMMapper();
- unsigned prfop =
- Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
- if (!Valid) {
+ auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
+ if (!PRFM) {
TokError("pre-fetch hint expected");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat identifier token.
- Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
- S, getContext()));
+ Operands.push_back(AArch64Operand::CreatePrefetch(
+ PRFM->Encoding, Tok.getString(), S, getContext()));
return MatchOperand_Success;
}
@@ -2127,18 +2107,15 @@ AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
return MatchOperand_ParseFail;
}
- bool Valid;
- auto Mapper = AArch64PSBHint::PSBHintMapper();
- unsigned psbhint =
- Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
- if (!Valid) {
+ auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
+ if (!PSB) {
TokError("invalid operand for instruction");
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat identifier token.
- Operands.push_back(AArch64Operand::CreatePSBHint(psbhint, Tok.getString(),
- S, getContext()));
+ Operands.push_back(AArch64Operand::CreatePSBHint(
+ PSB->Encoding, Tok.getString(), S, getContext()));
return MatchOperand_Success;
}
@@ -2762,12 +2739,9 @@ AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
Error(ExprLoc, "barrier operand out of range");
return MatchOperand_ParseFail;
}
- bool Valid;
- auto Mapper = AArch64DB::DBarrierMapper();
- StringRef Name =
- Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
- Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
- ExprLoc, getContext()));
+ auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
+ Operands.push_back(AArch64Operand::CreateBarrier(
+ MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
return MatchOperand_Success;
}
@@ -2776,23 +2750,20 @@ AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
return MatchOperand_ParseFail;
}
- bool Valid;
- auto Mapper = AArch64DB::DBarrierMapper();
- unsigned Opt =
- Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
- if (!Valid) {
+ auto DB = AArch64DB::lookupDBByName(Tok.getString());
+ if (!DB) {
TokError("invalid barrier option name");
return MatchOperand_ParseFail;
}
// The only valid named option for ISB is 'sy'
- if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
+ if (Mnemonic == "isb" && DB->Encoding != AArch64DB::sy) {
TokError("'sy' or #imm operand expected");
return MatchOperand_ParseFail;
}
- Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
- getLoc(), getContext()));
+ Operands.push_back(AArch64Operand::CreateBarrier(
+ DB->Encoding, Tok.getString(), getLoc(), getContext()));
Parser.Lex(); // Consume the option
return MatchOperand_Success;
@@ -2806,28 +2777,22 @@ AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
- bool IsKnown;
- auto MRSMapper = AArch64SysReg::MRSMapper();
- uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
- getSTI().getFeatureBits(), IsKnown);
- assert(IsKnown == (MRSReg != -1U) &&
- "register should be -1 if and only if it's unknown");
-
- auto MSRMapper = AArch64SysReg::MSRMapper();
- uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
- getSTI().getFeatureBits(), IsKnown);
- assert(IsKnown == (MSRReg != -1U) &&
- "register should be -1 if and only if it's unknown");
-
- auto PStateMapper = AArch64PState::PStateMapper();
- uint32_t PStateField =
- PStateMapper.fromString(Tok.getString(),
- getSTI().getFeatureBits(), IsKnown);
- assert(IsKnown == (PStateField != -1U) &&
- "register should be -1 if and only if it's unknown");
-
- Operands.push_back(AArch64Operand::CreateSysReg(
- Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
+ int MRSReg, MSRReg;
+ auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
+ if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
+ MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
+ MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
+ } else
+ MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
+
+ auto PState = AArch64PState::lookupPStateByName(Tok.getString());
+ unsigned PStateImm = -1;
+ if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
+ PStateImm = PState->Encoding;
+
+ Operands.push_back(
+ AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
+ PStateImm, getContext()));
Parser.Lex(); // Eat identifier
return MatchOperand_Success;
@@ -4195,6 +4160,10 @@ bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getIdentifier();
SMLoc Loc = DirectiveID.getLoc();
+ if (IDVal == ".arch")
+ return parseDirectiveArch(Loc);
+ if (IDVal == ".cpu")
+ return parseDirectiveCPU(Loc);
if (IDVal == ".hword")
return parseDirectiveWord(2, Loc);
if (IDVal == ".word")
@@ -4216,6 +4185,99 @@ bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
return parseDirectiveLOH(IDVal, Loc);
}
+static const struct {
+ const char *Name;
+ const FeatureBitset Features;
+} ExtensionMap[] = {
+ { "crc", {AArch64::FeatureCRC} },
+ { "crypto", {AArch64::FeatureCrypto} },
+ { "fp", {AArch64::FeatureFPARMv8} },
+ { "simd", {AArch64::FeatureNEON} },
+
+ // FIXME: Unsupported extensions
+ { "lse", {} },
+ { "pan", {} },
+ { "lor", {} },
+ { "rdma", {} },
+ { "profile", {} },
+};
+
+/// parseDirectiveArch
+/// ::= .arch token
+bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
+ SMLoc ArchLoc = getLoc();
+
+ StringRef Arch, ExtensionString;
+ std::tie(Arch, ExtensionString) =
+ getParser().parseStringToEndOfStatement().trim().split('+');
+
+ unsigned ID = AArch64::parseArch(Arch);
+ if (ID == ARM::AK_INVALID) {
+ Error(ArchLoc, "unknown arch name");
+ return false;
+ }
+
+ MCSubtargetInfo &STI = copySTI();
+ STI.setDefaultFeatures("", "");
+ if (!ExtensionString.empty())
+ STI.setDefaultFeatures("", ("+" + ExtensionString).str());
+ setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+
+ return false;
+}
+
+/// parseDirectiveCPU
+/// ::= .cpu id
+bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
+ SMLoc CPULoc = getLoc();
+
+ StringRef CPU, ExtensionString;
+ std::tie(CPU, ExtensionString) =
+ getParser().parseStringToEndOfStatement().trim().split('+');
+
+ SmallVector<StringRef, 4> RequestedExtensions;
+ if (!ExtensionString.empty())
+ ExtensionString.split(RequestedExtensions, '+');
+
+ // FIXME This is using tablegen data, but should be moved to ARMTargetParser
+ // once that is tablegen'ed
+ if (!getSTI().isCPUStringValid(CPU)) {
+ Error(CPULoc, "unknown CPU name");
+ return false;
+ }
+
+ MCSubtargetInfo &STI = copySTI();
+ STI.setDefaultFeatures(CPU, "");
+
+ FeatureBitset Features = STI.getFeatureBits();
+ for (auto Name : RequestedExtensions) {
+ bool EnableFeature = true;
+
+ if (Name.startswith_lower("no")) {
+ EnableFeature = false;
+ Name = Name.substr(2);
+ }
+
+ for (const auto &Extension : ExtensionMap) {
+ if (Extension.Name != Name)
+ continue;
+
+ if (Extension.Features.none())
+ report_fatal_error("unsupported architectural extension: " + Name);
+
+ FeatureBitset ToggleFeatures = EnableFeature
+ ? (~Features & Extension.Features)
+ : ( Features & Extension.Features);
+ uint64_t Features =
+ ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
+ setAvailableFeatures(Features);
+
+ break;
+ }
+ }
+ return false;
+}
+
/// parseDirectiveWord
/// ::= .word [ expression (, expression)* ]
bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
diff --git a/lib/Target/AArch64/AsmParser/Makefile b/lib/Target/AArch64/AsmParser/Makefile
deleted file mode 100644
index 00268c76f8e8..000000000000
--- a/lib/Target/AArch64/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/AArch64/AsmParser/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64AsmParser
-
-# Hack: we need to include 'main' ARM target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AArch64/CMakeLists.txt b/lib/Target/AArch64/CMakeLists.txt
index f26327ff84ad..a79960ea9605 100644
--- a/lib/Target/AArch64/CMakeLists.txt
+++ b/lib/Target/AArch64/CMakeLists.txt
@@ -12,8 +12,25 @@ tablegen(LLVM AArch64GenFastISel.inc -gen-fast-isel)
tablegen(LLVM AArch64GenCallingConv.inc -gen-callingconv)
tablegen(LLVM AArch64GenSubtargetInfo.inc -gen-subtarget)
tablegen(LLVM AArch64GenDisassemblerTables.inc -gen-disassembler)
+tablegen(LLVM AArch64GenSystemOperands.inc -gen-searchable-tables)
+
add_public_tablegen_target(AArch64CommonTableGen)
+# List of all GlobalISel files.
+set(GLOBAL_ISEL_FILES
+ AArch64CallLowering.cpp
+ AArch64RegisterBankInfo.cpp
+ )
+
+# Add GlobalISel files to the dependencies if the user wants to build it.
+if(LLVM_BUILD_GLOBAL_ISEL)
+ set(GLOBAL_ISEL_BUILD_FILES ${GLOBAL_ISEL_FILES})
+else()
+ set(GLOBAL_ISEL_BUILD_FILES"")
+ set(LLVM_OPTIONAL_SOURCES LLVMGlobalISel ${GLOBAL_ISEL_FILES})
+endif()
+
+
add_llvm_target(AArch64CodeGen
AArch64A57FPLoadBalancing.cpp
AArch64AddressTypePromotion.cpp
@@ -29,6 +46,7 @@ add_llvm_target(AArch64CodeGen
AArch64A53Fix835769.cpp
AArch64FrameLowering.cpp
AArch64ConditionOptimizer.cpp
+ AArch64RedundantCopyElimination.cpp
AArch64ISelDAGToDAG.cpp
AArch64ISelLowering.cpp
AArch64InstrInfo.cpp
@@ -43,6 +61,7 @@ add_llvm_target(AArch64CodeGen
AArch64TargetMachine.cpp
AArch64TargetObjectFile.cpp
AArch64TargetTransformInfo.cpp
+ ${GLOBAL_ISEL_BUILD_FILES}
)
add_dependencies(LLVMAArch64CodeGen intrinsics_gen)
diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index f1f968e73123..fe6ea31b9061 100644
--- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -1523,13 +1523,12 @@ static DecodeStatus DecodeSystemPStateInstruction(llvm::MCInst &Inst,
Inst.addOperand(MCOperand::createImm(pstate_field));
Inst.addOperand(MCOperand::createImm(crm));
- bool ValidNamed;
- const AArch64Disassembler *Dis =
+ const AArch64Disassembler *Dis =
static_cast<const AArch64Disassembler *>(Decoder);
- (void)AArch64PState::PStateMapper().toString(pstate_field,
- Dis->getSubtargetInfo().getFeatureBits(), ValidNamed);
-
- return ValidNamed ? Success : Fail;
+ auto PState = AArch64PState::lookupPStateByEncoding(pstate_field);
+ if (PState && PState->haveFeatures(Dis->getSubtargetInfo().getFeatureBits()))
+ return Success;
+ return Fail;
}
static DecodeStatus DecodeTestAndBranch(llvm::MCInst &Inst, uint32_t insn,
@@ -1574,7 +1573,7 @@ static DecodeStatus DecodeWSeqPairsClassRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
- return DecodeGPRSeqPairsClassRegisterClass(Inst,
+ return DecodeGPRSeqPairsClassRegisterClass(Inst,
AArch64::WSeqPairsClassRegClassID,
RegNo, Addr, Decoder);
}
@@ -1583,7 +1582,7 @@ static DecodeStatus DecodeXSeqPairsClassRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Addr,
const void *Decoder) {
- return DecodeGPRSeqPairsClassRegisterClass(Inst,
+ return DecodeGPRSeqPairsClassRegisterClass(Inst,
AArch64::XSeqPairsClassRegClassID,
RegNo, Addr, Decoder);
}
diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.h b/lib/Target/AArch64/Disassembler/AArch64Disassembler.h
index 7fb57adfeeba..e475e505e7d1 100644
--- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.h
+++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.h
@@ -13,7 +13,7 @@
#ifndef LLVM_LIB_TARGET_AARCH64_DISASSEMBLER_AARCH64DISASSEMBLER_H
#define LLVM_LIB_TARGET_AARCH64_DISASSEMBLER_AARCH64DISASSEMBLER_H
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
namespace llvm {
diff --git a/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp b/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
index 82bc949927ce..19d0ba2e1c41 100644
--- a/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
+++ b/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
@@ -134,9 +134,11 @@ bool AArch64ExternalSymbolizer::tryAddingSymbolicOperand(
if (ReferenceType == LLVMDisassembler_ReferenceType_Out_LitPool_SymAddr)
CommentStream << "literal pool symbol address: " << ReferenceName;
else if (ReferenceType ==
- LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr)
- CommentStream << "literal pool for: \"" << ReferenceName << "\"";
- else if (ReferenceType ==
+ LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr) {
+ CommentStream << "literal pool for: \"";
+ CommentStream.write_escaped(ReferenceName);
+ CommentStream << "\"";
+ } else if (ReferenceType ==
LLVMDisassembler_ReferenceType_Out_Objc_CFString_Ref)
CommentStream << "Objc cfstring ref: @\"" << ReferenceName << "\"";
else if (ReferenceType ==
diff --git a/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h b/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h
index 12b8450b13c6..49e844963797 100644
--- a/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h
+++ b/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.h
@@ -14,7 +14,7 @@
#ifndef LLVM_LIB_TARGET_AARCH64_DISASSEMBLER_AARCH64EXTERNALSYMBOLIZER_H
#define LLVM_LIB_TARGET_AARCH64_DISASSEMBLER_AARCH64EXTERNALSYMBOLIZER_H
-#include "llvm/MC/MCExternalSymbolizer.h"
+#include "llvm/MC/MCDisassembler/MCExternalSymbolizer.h"
namespace llvm {
diff --git a/lib/Target/AArch64/Disassembler/Makefile b/lib/Target/AArch64/Disassembler/Makefile
deleted file mode 100644
index 741bb817a633..000000000000
--- a/lib/Target/AArch64/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/AArch64/Disassembler/Makefile ------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64Disassembler
-
-# Hack: we need to include 'main' arm target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
index d8a810824370..b4f85204714f 100644
--- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
+++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
@@ -219,6 +219,54 @@ void AArch64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
return;
}
+ // MOVZ, MOVN and "ORR wzr, #imm" instructions are aliases for MOV, but their
+ // domains overlap so they need to be prioritized. The chain is "MOVZ lsl #0 >
+ // MOVZ lsl #N > MOVN lsl #0 > MOVN lsl #N > ORR". The highest instruction
+ // that can represent the move is the MOV alias, and the rest get printed
+ // normally.
+ if ((Opcode == AArch64::MOVZXi || Opcode == AArch64::MOVZWi) &&
+ MI->getOperand(1).isImm() && MI->getOperand(2).isImm()) {
+ int RegWidth = Opcode == AArch64::MOVZXi ? 64 : 32;
+ int Shift = MI->getOperand(2).getImm();
+ uint64_t Value = (uint64_t)MI->getOperand(1).getImm() << Shift;
+
+ if (AArch64_AM::isMOVZMovAlias(Value, Shift,
+ Opcode == AArch64::MOVZXi ? 64 : 32)) {
+ O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
+ << formatImm(SignExtend64(Value, RegWidth));
+ return;
+ }
+ }
+
+ if ((Opcode == AArch64::MOVNXi || Opcode == AArch64::MOVNWi) &&
+ MI->getOperand(1).isImm() && MI->getOperand(2).isImm()) {
+ int RegWidth = Opcode == AArch64::MOVNXi ? 64 : 32;
+ int Shift = MI->getOperand(2).getImm();
+ uint64_t Value = ~((uint64_t)MI->getOperand(1).getImm() << Shift);
+ if (RegWidth == 32)
+ Value = Value & 0xffffffff;
+
+ if (AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth)) {
+ O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
+ << formatImm(SignExtend64(Value, RegWidth));
+ return;
+ }
+ }
+
+ if ((Opcode == AArch64::ORRXri || Opcode == AArch64::ORRWri) &&
+ (MI->getOperand(1).getReg() == AArch64::XZR ||
+ MI->getOperand(1).getReg() == AArch64::WZR) &&
+ MI->getOperand(2).isImm()) {
+ int RegWidth = Opcode == AArch64::ORRXri ? 64 : 32;
+ uint64_t Value = AArch64_AM::decodeLogicalImmediate(
+ MI->getOperand(2).getImm(), RegWidth);
+ if (!AArch64_AM::isAnyMOVWMovAlias(Value, RegWidth)) {
+ O << "\tmov\t" << getRegisterName(MI->getOperand(0).getReg()) << ", #"
+ << formatImm(SignExtend64(Value, RegWidth));
+ return;
+ }
+ }
+
if (!printAliasInstr(MI, STI, O))
printInstruction(MI, STI, O);
@@ -928,14 +976,21 @@ void AArch64InstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
unsigned Reg = Op.getReg();
O << getRegisterName(Reg);
} else if (Op.isImm()) {
- O << '#' << Op.getImm();
+ printImm(MI, OpNo, STI, O);
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
Op.getExpr()->print(O, &MAI);
}
}
-void AArch64InstPrinter::printHexImm(const MCInst *MI, unsigned OpNo,
+void AArch64InstPrinter::printImm(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ O << "#" << formatImm(Op.getImm());
+}
+
+void AArch64InstPrinter::printImmHex(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
@@ -981,12 +1036,12 @@ void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
assert(Val == MO.getImm() && "Add/sub immediate out of range!");
unsigned Shift =
AArch64_AM::getShiftValue(MI->getOperand(OpNum + 1).getImm());
- O << '#' << Val;
+ O << '#' << formatImm(Val);
if (Shift != 0)
printShifter(MI, OpNum + 1, STI, O);
if (CommentStream)
- *CommentStream << '=' << (Val << Shift) << '\n';
+ *CommentStream << '=' << formatImm(Val << Shift) << '\n';
} else {
assert(MO.isExpr() && "Unexpected operand type!");
MO.getExpr()->print(O, &MAI);
@@ -1104,14 +1159,14 @@ template<int Scale>
void AArch64InstPrinter::printImmScale(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
- O << '#' << Scale * MI->getOperand(OpNum).getImm();
+ O << '#' << formatImm(Scale * MI->getOperand(OpNum).getImm());
}
void AArch64InstPrinter::printUImm12Offset(const MCInst *MI, unsigned OpNum,
unsigned Scale, raw_ostream &O) {
const MCOperand MO = MI->getOperand(OpNum);
if (MO.isImm()) {
- O << "#" << (MO.getImm() * Scale);
+ O << "#" << formatImm(MO.getImm() * Scale);
} else {
assert(MO.isExpr() && "Unexpected operand type!");
MO.getExpr()->print(O, &MAI);
@@ -1123,7 +1178,7 @@ void AArch64InstPrinter::printAMIndexedWB(const MCInst *MI, unsigned OpNum,
const MCOperand MO1 = MI->getOperand(OpNum + 1);
O << '[' << getRegisterName(MI->getOperand(OpNum).getReg());
if (MO1.isImm()) {
- O << ", #" << (MO1.getImm() * Scale);
+ O << ", #" << formatImm(MO1.getImm() * Scale);
} else {
assert(MO1.isExpr() && "Unexpected operand type!");
O << ", ";
@@ -1136,26 +1191,22 @@ void AArch64InstPrinter::printPrefetchOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned prfop = MI->getOperand(OpNum).getImm();
- bool Valid;
- StringRef Name =
- AArch64PRFM::PRFMMapper().toString(prfop, STI.getFeatureBits(), Valid);
- if (Valid)
- O << Name;
+ auto PRFM = AArch64PRFM::lookupPRFMByEncoding(prfop);
+ if (PRFM)
+ O << PRFM->Name;
else
- O << '#' << prfop;
+ O << '#' << formatImm(prfop);
}
void AArch64InstPrinter::printPSBHintOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned psbhintop = MI->getOperand(OpNum).getImm();
- bool Valid;
- StringRef Name =
- AArch64PSBHint::PSBHintMapper().toString(psbhintop, STI.getFeatureBits(), Valid);
- if (Valid)
- O << Name;
+ auto PSB = AArch64PSBHint::lookupPSBByEncoding(psbhintop);
+ if (PSB)
+ O << PSB->Name;
else
- O << '#' << psbhintop;
+ O << '#' << formatImm(psbhintop);
}
void AArch64InstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum,
@@ -1310,7 +1361,7 @@ void AArch64InstPrinter::printAlignedLabel(const MCInst *MI, unsigned OpNum,
// If the label has already been resolved to an immediate offset (say, when
// we're running the disassembler), just print the immediate.
if (Op.isImm()) {
- O << "#" << (Op.getImm() * 4);
+ O << "#" << formatImm(Op.getImm() * 4);
return;
}
@@ -1335,7 +1386,7 @@ void AArch64InstPrinter::printAdrpLabel(const MCInst *MI, unsigned OpNum,
// If the label has already been resolved to an immediate offset (say, when
// we're running the disassembler), just print the immediate.
if (Op.isImm()) {
- O << "#" << (Op.getImm() * (1 << 12));
+ O << "#" << formatImm(Op.getImm() * (1 << 12));
return;
}
@@ -1349,15 +1400,15 @@ void AArch64InstPrinter::printBarrierOption(const MCInst *MI, unsigned OpNo,
unsigned Val = MI->getOperand(OpNo).getImm();
unsigned Opcode = MI->getOpcode();
- bool Valid;
StringRef Name;
- if (Opcode == AArch64::ISB)
- Name = AArch64ISB::ISBMapper().toString(Val, STI.getFeatureBits(),
- Valid);
- else
- Name = AArch64DB::DBarrierMapper().toString(Val, STI.getFeatureBits(),
- Valid);
- if (Valid)
+ if (Opcode == AArch64::ISB) {
+ auto ISB = AArch64ISB::lookupISBByEncoding(Val);
+ Name = ISB ? ISB->Name : "";
+ } else {
+ auto DB = AArch64DB::lookupDBByEncoding(Val);
+ Name = DB ? DB->Name : "";
+ }
+ if (!Name.empty())
O << Name;
else
O << "#" << Val;
@@ -1368,10 +1419,19 @@ void AArch64InstPrinter::printMRSSystemRegister(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
- auto Mapper = AArch64SysReg::MRSMapper();
- std::string Name = Mapper.toString(Val, STI.getFeatureBits());
+ // Horrible hack for the one register that has identical encodings but
+ // different names in MSR and MRS. Because of this, one of MRS and MSR is
+ // going to get the wrong entry
+ if (Val == AArch64SysReg::DBGDTRRX_EL0) {
+ O << "DBGDTRRX_EL0";
+ return;
+ }
- O << StringRef(Name).upper();
+ const AArch64SysReg::SysReg *Reg = AArch64SysReg::lookupSysRegByEncoding(Val);
+ if (Reg && Reg->Readable && Reg->haveFeatures(STI.getFeatureBits()))
+ O << Reg->Name;
+ else
+ O << AArch64SysReg::genericRegisterString(Val);
}
void AArch64InstPrinter::printMSRSystemRegister(const MCInst *MI, unsigned OpNo,
@@ -1379,10 +1439,19 @@ void AArch64InstPrinter::printMSRSystemRegister(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
- auto Mapper = AArch64SysReg::MSRMapper();
- std::string Name = Mapper.toString(Val, STI.getFeatureBits());
+ // Horrible hack for the one register that has identical encodings but
+ // different names in MSR and MRS. Because of this, one of MRS and MSR is
+ // going to get the wrong entry
+ if (Val == AArch64SysReg::DBGDTRTX_EL0) {
+ O << "DBGDTRTX_EL0";
+ return;
+ }
- O << StringRef(Name).upper();
+ const AArch64SysReg::SysReg *Reg = AArch64SysReg::lookupSysRegByEncoding(Val);
+ if (Reg && Reg->Writeable && Reg->haveFeatures(STI.getFeatureBits()))
+ O << Reg->Name;
+ else
+ O << AArch64SysReg::genericRegisterString(Val);
}
void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
@@ -1390,13 +1459,11 @@ void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
unsigned Val = MI->getOperand(OpNo).getImm();
- bool Valid;
- StringRef Name =
- AArch64PState::PStateMapper().toString(Val, STI.getFeatureBits(), Valid);
- if (Valid)
- O << Name.upper();
+ auto PState = AArch64PState::lookupPStateByEncoding(Val);
+ if (PState && PState->haveFeatures(STI.getFeatureBits()))
+ O << PState->Name;
else
- O << "#" << Val;
+ O << "#" << formatImm(Val);
}
void AArch64InstPrinter::printSIMDType10Operand(const MCInst *MI, unsigned OpNo,
diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
index ea68d9848b42..65dca99ed04e 100644
--- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
+++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
@@ -49,7 +49,9 @@ protected:
// Operand printers
void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
- void printHexImm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ void printImm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ void printImmHex(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
void printPostIncOperand(const MCInst *MI, unsigned OpNo, unsigned Imm,
raw_ostream &O);
diff --git a/lib/Target/AArch64/InstPrinter/Makefile b/lib/Target/AArch64/InstPrinter/Makefile
deleted file mode 100644
index b17e8d080119..000000000000
--- a/lib/Target/AArch64/InstPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/AArch64/AsmPrinter/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64AsmPrinter
-
-# Hack: we need to include 'main' arm target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AArch64/LLVMBuild.txt b/lib/Target/AArch64/LLVMBuild.txt
index 642c18394a67..0196c505ba3c 100644
--- a/lib/Target/AArch64/LLVMBuild.txt
+++ b/lib/Target/AArch64/LLVMBuild.txt
@@ -31,5 +31,5 @@ has_jit = 1
type = Library
name = AArch64CodeGen
parent = AArch64
-required_libraries = AArch64AsmPrinter AArch64Desc AArch64Info AArch64Utils Analysis AsmPrinter CodeGen Core MC Scalar SelectionDAG Support Target
+required_libraries = AArch64AsmPrinter AArch64Desc AArch64Info AArch64Utils Analysis AsmPrinter CodeGen Core MC Scalar SelectionDAG Support Target GlobalISel
add_to_library_groups = AArch64
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
index 648b1dfc8c5e..3e5ef4df4706 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -753,6 +753,49 @@ static inline uint64_t decodeAdvSIMDModImmType12(uint8_t Imm) {
return (EncVal << 32) | EncVal;
}
+inline static bool isAnyMOVZMovAlias(uint64_t Value, int RegWidth) {
+ for (int Shift = 0; Shift <= RegWidth - 16; Shift += 16)
+ if ((Value & ~(0xffffULL << Shift)) == 0)
+ return true;
+
+ return false;
+}
+
+inline static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth) {
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+
+ // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
+ if (Value == 0 && Shift != 0)
+ return false;
+
+ return (Value & ~(0xffffULL << Shift)) == 0;
+}
+
+inline static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth) {
+ // MOVZ takes precedence over MOVN.
+ if (isAnyMOVZMovAlias(Value, RegWidth))
+ return false;
+
+ Value = ~Value;
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+
+ return isMOVZMovAlias(Value, Shift, RegWidth);
+}
+
+inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) {
+ if (isAnyMOVZMovAlias(Value, RegWidth))
+ return true;
+
+ // It's not a MOVZ, but it might be a MOVN.
+ Value = ~Value;
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+
+ return isAnyMOVZMovAlias(Value, RegWidth);
+}
+
} // end namespace AArch64_AM
} // end namespace llvm
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 7624c7240d68..27993246eb07 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -12,6 +12,7 @@
#include "MCTargetDesc/AArch64FixupKinds.h"
#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCFixupKindInfo.h"
@@ -28,9 +29,12 @@ namespace {
class AArch64AsmBackend : public MCAsmBackend {
static const unsigned PCRelFlagVal =
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
+public:
+ bool IsLittleEndian;
public:
- AArch64AsmBackend(const Target &T) : MCAsmBackend() {}
+ AArch64AsmBackend(const Target &T, bool IsLittleEndian)
+ : MCAsmBackend(), IsLittleEndian(IsLittleEndian) {}
unsigned getNumFixupKinds() const override {
return AArch64::NumTargetFixupKinds;
@@ -74,12 +78,15 @@ public:
bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
const MCRelaxableFragment *DF,
const MCAsmLayout &Layout) const override;
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override;
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
unsigned getPointerSize() const { return 8; }
+
+ unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
};
} // end anonymous namespace
@@ -129,14 +136,16 @@ static unsigned AdrImmBits(unsigned Value) {
return (hi19 << 5) | (lo2 << 29);
}
-static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
+static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
+ MCContext *Ctx) {
+ unsigned Kind = Fixup.getKind();
int64_t SignedValue = static_cast<int64_t>(Value);
switch (Kind) {
default:
llvm_unreachable("Unknown fixup kind!");
case AArch64::fixup_aarch64_pcrel_adr_imm21:
- if (SignedValue > 2097151 || SignedValue < -2097152)
- report_fatal_error("fixup value out of range");
+ if (Ctx && (SignedValue > 2097151 || SignedValue < -2097152))
+ Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
return AdrImmBits(Value & 0x1fffffULL);
case AArch64::fixup_aarch64_pcrel_adrp_imm21:
return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
@@ -144,54 +153,66 @@ static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
case AArch64::fixup_aarch64_pcrel_branch19:
// Signed 21-bit immediate
if (SignedValue > 2097151 || SignedValue < -2097152)
- report_fatal_error("fixup value out of range");
+ if (Ctx) Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Ctx && (Value & 0x3))
+ Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
// Low two bits are not encoded.
return (Value >> 2) & 0x7ffff;
case AArch64::fixup_aarch64_add_imm12:
case AArch64::fixup_aarch64_ldst_imm12_scale1:
// Unsigned 12-bit immediate
- if (Value >= 0x1000)
- report_fatal_error("invalid imm12 fixup value");
+ if (Ctx && Value >= 0x1000)
+ Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
return Value;
case AArch64::fixup_aarch64_ldst_imm12_scale2:
// Unsigned 12-bit immediate which gets multiplied by 2
- if (Value & 1 || Value >= 0x2000)
- report_fatal_error("invalid imm12 fixup value");
+ if (Ctx && (Value >= 0x2000))
+ Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Ctx && (Value & 0x1))
+ Ctx->reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
return Value >> 1;
case AArch64::fixup_aarch64_ldst_imm12_scale4:
// Unsigned 12-bit immediate which gets multiplied by 4
- if (Value & 3 || Value >= 0x4000)
- report_fatal_error("invalid imm12 fixup value");
+ if (Ctx && (Value >= 0x4000))
+ Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Ctx && (Value & 0x3))
+ Ctx->reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
return Value >> 2;
case AArch64::fixup_aarch64_ldst_imm12_scale8:
// Unsigned 12-bit immediate which gets multiplied by 8
- if (Value & 7 || Value >= 0x8000)
- report_fatal_error("invalid imm12 fixup value");
+ if (Ctx && (Value >= 0x8000))
+ Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Ctx && (Value & 0x7))
+ Ctx->reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
return Value >> 3;
case AArch64::fixup_aarch64_ldst_imm12_scale16:
// Unsigned 12-bit immediate which gets multiplied by 16
- if (Value & 15 || Value >= 0x10000)
- report_fatal_error("invalid imm12 fixup value");
+ if (Ctx && (Value >= 0x10000))
+ Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
+ if (Ctx && (Value & 0xf))
+ Ctx->reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
return Value >> 4;
case AArch64::fixup_aarch64_movw:
- report_fatal_error("no resolvable MOVZ/MOVK fixups supported yet");
+ if (Ctx)
+ Ctx->reportError(Fixup.getLoc(),
+ "no resolvable MOVZ/MOVK fixups supported yet");
return Value;
case AArch64::fixup_aarch64_pcrel_branch14:
// Signed 16-bit immediate
- if (SignedValue > 32767 || SignedValue < -32768)
- report_fatal_error("fixup value out of range");
+ if (Ctx && (SignedValue > 32767 || SignedValue < -32768))
+ Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
// Low two bits are not encoded (4-byte alignment assumed).
- if (Value & 0x3)
- report_fatal_error("fixup not sufficiently aligned");
+ if (Ctx && (Value & 0x3))
+ Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
return (Value >> 2) & 0x3fff;
case AArch64::fixup_aarch64_pcrel_branch26:
case AArch64::fixup_aarch64_pcrel_call26:
// Signed 28-bit immediate
- if (SignedValue > 134217727 || SignedValue < -134217728)
- report_fatal_error("fixup value out of range");
+ if (Ctx && (SignedValue > 134217727 || SignedValue < -134217728))
+ Ctx->reportError(Fixup.getLoc(), "fixup value out of range");
// Low two bits are not encoded (4-byte alignment assumed).
- if (Value & 0x3)
- report_fatal_error("fixup not sufficiently aligned");
+ if (Ctx && (Value & 0x3))
+ Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
return (Value >> 2) & 0x3ffffff;
case FK_Data_1:
case FK_Data_2:
@@ -201,6 +222,45 @@ static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) {
}
}
+/// getFixupKindContainereSizeInBytes - The number of bytes of the
+/// container involved in big endian or 0 if the item is little endian
+unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
+ if (IsLittleEndian)
+ return 0;
+
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+
+ case FK_Data_1:
+ return 1;
+ case FK_Data_2:
+ return 2;
+ case FK_Data_4:
+ return 4;
+ case FK_Data_8:
+ return 8;
+
+ case AArch64::fixup_aarch64_tlsdesc_call:
+ case AArch64::fixup_aarch64_movw:
+ case AArch64::fixup_aarch64_pcrel_branch14:
+ case AArch64::fixup_aarch64_add_imm12:
+ case AArch64::fixup_aarch64_ldst_imm12_scale1:
+ case AArch64::fixup_aarch64_ldst_imm12_scale2:
+ case AArch64::fixup_aarch64_ldst_imm12_scale4:
+ case AArch64::fixup_aarch64_ldst_imm12_scale8:
+ case AArch64::fixup_aarch64_ldst_imm12_scale16:
+ case AArch64::fixup_aarch64_ldr_pcrel_imm19:
+ case AArch64::fixup_aarch64_pcrel_branch19:
+ case AArch64::fixup_aarch64_pcrel_adr_imm21:
+ case AArch64::fixup_aarch64_pcrel_adrp_imm21:
+ case AArch64::fixup_aarch64_pcrel_branch26:
+ case AArch64::fixup_aarch64_pcrel_call26:
+ // Instructions are always little endian
+ return 0;
+ }
+}
+
void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value,
bool IsPCRel) const {
@@ -209,7 +269,7 @@ void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
return; // Doesn't change encoding.
MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
// Apply any target-specific value adjustments.
- Value = adjustFixupValue(Fixup.getKind(), Value);
+ Value = adjustFixupValue(Fixup, Value, nullptr);
// Shift the value into position.
Value <<= Info.TargetOffset;
@@ -217,10 +277,25 @@ void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned Offset = Fixup.getOffset();
assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
+ // Used to point to big endian bytes.
+ unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
+
// For each byte of the fragment that the fixup touches, mask in the
// bits from the fixup value.
- for (unsigned i = 0; i != NumBytes; ++i)
- Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+ if (FulleSizeInBytes == 0) {
+ // Handle as little-endian
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+ }
+ } else {
+ // Handle as big-endian
+ assert((Offset + FulleSizeInBytes) <= DataSize && "Invalid fixup size!");
+ assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = FulleSizeInBytes - 1 - i;
+ Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
+ }
+ }
}
bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
@@ -239,6 +314,7 @@ bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
}
void AArch64AsmBackend::relaxInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI,
MCInst &Res) const {
llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
}
@@ -264,14 +340,14 @@ namespace CU {
enum CompactUnwindEncodings {
/// \brief A "frameless" leaf function, where no non-volatile registers are
/// saved. The return remains in LR throughout the function.
- UNWIND_AArch64_MODE_FRAMELESS = 0x02000000,
+ UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
/// \brief No compact unwind encoding available. Instead the low 23-bits of
/// the compact unwind encoding is the offset of the DWARF FDE in the
/// __eh_frame section. This mode is never used in object files. It is only
/// generated by the linker in final linked images, which have only DWARF info
/// for a function.
- UNWIND_AArch64_MODE_DWARF = 0x03000000,
+ UNWIND_ARM64_MODE_DWARF = 0x03000000,
/// \brief This is a standard arm64 prologue where FP/LR are immediately
/// pushed on the stack, then SP is copied to FP. If there are any
@@ -279,18 +355,18 @@ enum CompactUnwindEncodings {
/// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
/// five X pairs and four D pairs can be saved, but the memory layout must be
/// in register number order.
- UNWIND_AArch64_MODE_FRAME = 0x04000000,
+ UNWIND_ARM64_MODE_FRAME = 0x04000000,
/// \brief Frame register pair encodings.
- UNWIND_AArch64_FRAME_X19_X20_PAIR = 0x00000001,
- UNWIND_AArch64_FRAME_X21_X22_PAIR = 0x00000002,
- UNWIND_AArch64_FRAME_X23_X24_PAIR = 0x00000004,
- UNWIND_AArch64_FRAME_X25_X26_PAIR = 0x00000008,
- UNWIND_AArch64_FRAME_X27_X28_PAIR = 0x00000010,
- UNWIND_AArch64_FRAME_D8_D9_PAIR = 0x00000100,
- UNWIND_AArch64_FRAME_D10_D11_PAIR = 0x00000200,
- UNWIND_AArch64_FRAME_D12_D13_PAIR = 0x00000400,
- UNWIND_AArch64_FRAME_D14_D15_PAIR = 0x00000800
+ UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
+ UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
+ UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
+ UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
+ UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
+ UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
+ UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
+ UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
+ UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
};
} // end CU namespace
@@ -300,7 +376,7 @@ class DarwinAArch64AsmBackend : public AArch64AsmBackend {
const MCRegisterInfo &MRI;
/// \brief Encode compact unwind stack adjustment for frameless functions.
- /// See UNWIND_AArch64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
+ /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
/// The stack size always needs to be 16 byte aligned.
uint32_t encodeStackAdjustment(uint32_t StackSize) const {
return (StackSize / 16) << 12;
@@ -308,7 +384,7 @@ class DarwinAArch64AsmBackend : public AArch64AsmBackend {
public:
DarwinAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI)
- : AArch64AsmBackend(T), MRI(MRI) {}
+ : AArch64AsmBackend(T, /*IsLittleEndian*/true), MRI(MRI) {}
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createAArch64MachObjectWriter(OS, MachO::CPU_TYPE_ARM64,
@@ -319,7 +395,7 @@ public:
uint32_t generateCompactUnwindEncoding(
ArrayRef<MCCFIInstruction> Instrs) const override {
if (Instrs.empty())
- return CU::UNWIND_AArch64_MODE_FRAMELESS;
+ return CU::UNWIND_ARM64_MODE_FRAMELESS;
bool HasFP = false;
unsigned StackSize = 0;
@@ -331,7 +407,7 @@ public:
switch (Inst.getOperation()) {
default:
// Cannot handle this directive: bail out.
- return CU::UNWIND_AArch64_MODE_DWARF;
+ return CU::UNWIND_ARM64_MODE_DWARF;
case MCCFIInstruction::OpDefCfa: {
// Defines a frame pointer.
assert(getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true)) ==
@@ -356,7 +432,7 @@ public:
"Pushing invalid registers for frame!");
// Indicate that the function has a frame.
- CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAME;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
HasFP = true;
break;
}
@@ -370,11 +446,11 @@ public:
// `.cfi_offset' instructions with the appropriate registers specified.
unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true);
if (i + 1 == e)
- return CU::UNWIND_AArch64_MODE_DWARF;
+ return CU::UNWIND_ARM64_MODE_DWARF;
const MCCFIInstruction &Inst2 = Instrs[++i];
if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
- return CU::UNWIND_AArch64_MODE_DWARF;
+ return CU::UNWIND_ARM64_MODE_DWARF;
unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true);
// N.B. The encodings must be in register number order, and the X
@@ -390,19 +466,19 @@ public:
if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
(CompactUnwindEncoding & 0xF1E) == 0)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X19_X20_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
(CompactUnwindEncoding & 0xF1C) == 0)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X21_X22_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
(CompactUnwindEncoding & 0xF18) == 0)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X23_X24_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
(CompactUnwindEncoding & 0xF10) == 0)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X25_X26_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
(CompactUnwindEncoding & 0xF00) == 0)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_X27_X28_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
else {
Reg1 = getDRegFromBReg(Reg1);
Reg2 = getDRegFromBReg(Reg2);
@@ -413,18 +489,18 @@ public:
// D14/D15 pair = 0x00000800
if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
(CompactUnwindEncoding & 0xE00) == 0)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D8_D9_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
(CompactUnwindEncoding & 0xC00) == 0)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D10_D11_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
(CompactUnwindEncoding & 0x800) == 0)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D12_D13_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
- CompactUnwindEncoding |= CU::UNWIND_AArch64_FRAME_D14_D15_PAIR;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
else
// A pair was pushed which we cannot handle.
- return CU::UNWIND_AArch64_MODE_DWARF;
+ return CU::UNWIND_ARM64_MODE_DWARF;
}
break;
@@ -436,9 +512,9 @@ public:
// With compact unwind info we can only represent stack adjustments of up
// to 65520 bytes.
if (StackSize > 65520)
- return CU::UNWIND_AArch64_MODE_DWARF;
+ return CU::UNWIND_ARM64_MODE_DWARF;
- CompactUnwindEncoding |= CU::UNWIND_AArch64_MODE_FRAMELESS;
+ CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
}
@@ -453,10 +529,9 @@ namespace {
class ELFAArch64AsmBackend : public AArch64AsmBackend {
public:
uint8_t OSABI;
- bool IsLittleEndian;
ELFAArch64AsmBackend(const Target &T, uint8_t OSABI, bool IsLittleEndian)
- : AArch64AsmBackend(T), OSABI(OSABI), IsLittleEndian(IsLittleEndian) {}
+ : AArch64AsmBackend(T, IsLittleEndian), OSABI(OSABI) {}
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createAArch64ELFObjectWriter(OS, OSABI, IsLittleEndian);
@@ -466,9 +541,6 @@ public:
const MCFixup &Fixup, const MCFragment *DF,
const MCValue &Target, uint64_t &Value,
bool &IsResolved) override;
-
- void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value, bool IsPCRel) const override;
};
void ELFAArch64AsmBackend::processFixupValue(
@@ -489,34 +561,14 @@ void ELFAArch64AsmBackend::processFixupValue(
// to the linker -- a relocation!
if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
IsResolved = false;
-}
-
-// Returns whether this fixup is based on an address in the .eh_frame section,
-// and therefore should be byte swapped.
-// FIXME: Should be replaced with something more principled.
-static bool isByteSwappedFixup(const MCExpr *E) {
- MCValue Val;
- if (!E->evaluateAsRelocatable(Val, nullptr, nullptr))
- return false;
- if (!Val.getSymA() || Val.getSymA()->getSymbol().isUndefined())
- return false;
-
- const MCSectionELF *SecELF =
- dyn_cast<MCSectionELF>(&Val.getSymA()->getSymbol().getSection());
- return SecELF->getSectionName() == ".eh_frame";
+ // Try to get the encoded value for the fixup as-if we're mapping it into
+ // the instruction. This allows adjustFixupValue() to issue a diagnostic
+ // if the value is invalid.
+ if (IsResolved)
+ (void)adjustFixupValue(Fixup, Value, &Asm.getContext());
}
-void ELFAArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
- unsigned DataSize, uint64_t Value,
- bool IsPCRel) const {
- // store fixups in .eh_frame section in big endian order
- if (!IsLittleEndian && Fixup.getKind() == FK_Data_4) {
- if (isByteSwappedFixup(Fixup.getValue()))
- Value = ByteSwap_32(unsigned(Value));
- }
- AArch64AsmBackend::applyFixup (Fixup, Data, DataSize, Value, IsPCRel);
-}
}
MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index 1f516d1db896..4b4c4097b97b 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -15,6 +15,7 @@
#include "MCTargetDesc/AArch64FixupKinds.h"
#include "MCTargetDesc/AArch64MCExpr.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/ErrorHandling.h"
@@ -29,8 +30,8 @@ public:
~AArch64ELFObjectWriter() override;
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
private:
};
@@ -43,9 +44,10 @@ AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI,
AArch64ELFObjectWriter::~AArch64ELFObjectWriter() {}
-unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel) const {
+unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
AArch64MCExpr::VariantKind RefKind =
static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
@@ -61,6 +63,9 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
if (IsPCRel) {
switch ((unsigned)Fixup.getKind()) {
+ case FK_Data_1:
+ Ctx.reportError(Fixup.getLoc(), "1-byte data relocations not supported");
+ return ELF::R_AARCH64_NONE;
case FK_Data_2:
return ELF::R_AARCH64_PREL16;
case FK_Data_4:
@@ -79,7 +84,9 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
if (SymLoc == AArch64MCExpr::VK_TLSDESC && !IsNC)
return ELF::R_AARCH64_TLSDESC_ADR_PAGE21;
- llvm_unreachable("invalid symbol kind for ADRP relocation");
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid symbol kind for ADRP relocation");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_pcrel_branch26:
return ELF::R_AARCH64_JUMP26;
case AArch64::fixup_aarch64_pcrel_call26:
@@ -93,10 +100,14 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
case AArch64::fixup_aarch64_pcrel_branch19:
return ELF::R_AARCH64_CONDBR19;
default:
- llvm_unreachable("Unsupported pc-relative fixup kind");
+ Ctx.reportError(Fixup.getLoc(), "Unsupported pc-relative fixup kind");
+ return ELF::R_AARCH64_NONE;
}
} else {
switch ((unsigned)Fixup.getKind()) {
+ case FK_Data_1:
+ Ctx.reportError(Fixup.getLoc(), "1-byte data relocations not supported");
+ return ELF::R_AARCH64_NONE;
case FK_Data_2:
return ELF::R_AARCH64_ABS16;
case FK_Data_4:
@@ -121,8 +132,9 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return ELF::R_AARCH64_ADD_ABS_LO12_NC;
- report_fatal_error("invalid fixup for add (uimm12) instruction");
- return 0;
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for add (uimm12) instruction");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale1:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return ELF::R_AARCH64_LDST8_ABS_LO12_NC;
@@ -135,8 +147,9 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
return ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC;
- report_fatal_error("invalid fixup for 8-bit load/store instruction");
- return 0;
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 8-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale2:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return ELF::R_AARCH64_LDST16_ABS_LO12_NC;
@@ -149,8 +162,9 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
return ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC;
- report_fatal_error("invalid fixup for 16-bit load/store instruction");
- return 0;
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 16-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale4:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return ELF::R_AARCH64_LDST32_ABS_LO12_NC;
@@ -163,8 +177,9 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
if (SymLoc == AArch64MCExpr::VK_TPREL && IsNC)
return ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC;
- report_fatal_error("invalid fixup for 32-bit load/store instruction");
- return 0;
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 32-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale8:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return ELF::R_AARCH64_LDST64_ABS_LO12_NC;
@@ -183,14 +198,16 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
if (SymLoc == AArch64MCExpr::VK_TLSDESC && IsNC)
return ELF::R_AARCH64_TLSDESC_LD64_LO12_NC;
- report_fatal_error("invalid fixup for 64-bit load/store instruction");
- return 0;
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 64-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_ldst_imm12_scale16:
if (SymLoc == AArch64MCExpr::VK_ABS && IsNC)
return ELF::R_AARCH64_LDST128_ABS_LO12_NC;
- report_fatal_error("invalid fixup for 128-bit load/store instruction");
- return 0;
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for 128-bit load/store instruction");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_movw:
if (RefKind == AArch64MCExpr::VK_ABS_G3)
return ELF::R_AARCH64_MOVW_UABS_G3;
@@ -236,12 +253,14 @@ unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
if (RefKind == AArch64MCExpr::VK_GOTTPREL_G0_NC)
return ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
- report_fatal_error("invalid fixup for movz/movk instruction");
- return 0;
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid fixup for movz/movk instruction");
+ return ELF::R_AARCH64_NONE;
case AArch64::fixup_aarch64_tlsdesc_call:
return ELF::R_AARCH64_TLSDESC_CALL;
default:
- llvm_unreachable("Unknown ELF relocation type");
+ Ctx.reportError(Fixup.getLoc(), "Unknown ELF relocation type");
+ return ELF::R_AARCH64_NONE;
}
}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
index 7d8e79bc63c8..7b9ff8fa0503 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
@@ -154,24 +154,6 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
- /// getSIMDShift64OpValue - Return the encoded value for the
- // shift-by-immediate AdvSIMD instructions.
- uint32_t getSIMDShift64OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
-
- uint32_t getSIMDShift64_32OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
-
- uint32_t getSIMDShift32OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
-
- uint32_t getSIMDShift16OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
-
unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
const MCSubtargetInfo &STI) const;
@@ -428,41 +410,6 @@ AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
llvm_unreachable("Invalid value for vector shift amount!");
}
-uint32_t
-AArch64MCCodeEmitter::getSIMDShift64OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- const MCOperand &MO = MI.getOperand(OpIdx);
- assert(MO.isImm() && "Expected an immediate value for the shift amount!");
- return 64 - (MO.getImm());
-}
-
-uint32_t AArch64MCCodeEmitter::getSIMDShift64_32OpValue(
- const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- const MCOperand &MO = MI.getOperand(OpIdx);
- assert(MO.isImm() && "Expected an immediate value for the shift amount!");
- return 64 - (MO.getImm() | 32);
-}
-
-uint32_t
-AArch64MCCodeEmitter::getSIMDShift32OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- const MCOperand &MO = MI.getOperand(OpIdx);
- assert(MO.isImm() && "Expected an immediate value for the shift amount!");
- return 32 - (MO.getImm() | 16);
-}
-
-uint32_t
-AArch64MCCodeEmitter::getSIMDShift16OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- const MCOperand &MO = MI.getOperand(OpIdx);
- assert(MO.isImm() && "Expected an immediate value for the shift amount!");
- return 16 - (MO.getImm() | 8);
-}
-
/// getFixedPointScaleOpValue - Return the encoded value for the
// FP-to-fixed-point scale factor.
uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index 9f7bed0d3b12..702780621208 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -15,7 +15,6 @@
#include "AArch64ELFStreamer.h"
#include "AArch64MCAsmInfo.h"
#include "InstPrinter/AArch64InstPrinter.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
@@ -72,10 +71,8 @@ static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createAArch64MCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
+static void adjustCodeGenOpts(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
assert((TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()) &&
"Only expect Darwin and ELF targets");
@@ -89,19 +86,6 @@ static MCCodeGenInfo *createAArch64MCCodeGenInfo(const Triple &TT,
else if (CM != CodeModel::Small && CM != CodeModel::Large)
report_fatal_error(
"Only small and large code models are allowed on AArch64");
-
- // AArch64 Darwin is always PIC.
- if (TT.isOSDarwin())
- RM = Reloc::PIC_;
- // On ELF platforms the default static relocation model has a smart enough
- // linker to cope with referencing external symbols defined in a shared
- // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
- else if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC)
- RM = Reloc::Static;
-
- MCCodeGenInfo *X = new MCCodeGenInfo();
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
static MCInstPrinter *createAArch64MCInstPrinter(const Triple &T,
@@ -140,7 +124,7 @@ extern "C" void LLVMInitializeAArch64TargetMC() {
RegisterMCAsmInfoFn X(*T, createAArch64MCAsmInfo);
// Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(*T, createAArch64MCCodeGenInfo);
+ TargetRegistry::registerMCAdjustCodeGenOpts(*T, adjustCodeGenOpts);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createAArch64MCInstrInfo);
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
index 342384437c6a..39414cc0c6a5 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
@@ -15,7 +15,6 @@
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64MCTARGETDESC_H
#include "llvm/Support/DataTypes.h"
-#include <string>
namespace llvm {
class formatted_raw_ostream;
diff --git a/lib/Target/AArch64/MCTargetDesc/Makefile b/lib/Target/AArch64/MCTargetDesc/Makefile
deleted file mode 100644
index 5779ac5ac60a..000000000000
--- a/lib/Target/AArch64/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/AArch64/TargetDesc/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64Desc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AArch64/Makefile b/lib/Target/AArch64/Makefile
deleted file mode 100644
index f356c5850413..000000000000
--- a/lib/Target/AArch64/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
-##===- lib/Target/AArch64/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMAArch64CodeGen
-TARGET = AArch64
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = AArch64GenRegisterInfo.inc AArch64GenInstrInfo.inc \
- AArch64GenAsmWriter.inc AArch64GenAsmWriter1.inc \
- AArch64GenDAGISel.inc \
- AArch64GenCallingConv.inc AArch64GenAsmMatcher.inc \
- AArch64GenSubtargetInfo.inc AArch64GenMCCodeEmitter.inc \
- AArch64GenFastISel.inc AArch64GenDisassemblerTables.inc \
- AArch64GenMCPseudoLowering.inc
-
-DIRS = TargetInfo InstPrinter AsmParser Disassembler MCTargetDesc Utils
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AArch64/TargetInfo/Makefile b/lib/Target/AArch64/TargetInfo/Makefile
deleted file mode 100644
index 9dc9aa4bccf7..000000000000
--- a/lib/Target/AArch64/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/AArch64/TargetInfo/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64Info
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp b/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
index cde1c6df2608..e65ba1f2401d 100644
--- a/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
+++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.cpp
@@ -11,858 +11,84 @@
//
//===----------------------------------------------------------------------===//
#include "AArch64BaseInfo.h"
-#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Regex.h"
using namespace llvm;
-StringRef AArch64NamedImmMapper::toString(uint32_t Value,
- const FeatureBitset& FeatureBits, bool &Valid) const {
- for (unsigned i = 0; i < NumMappings; ++i) {
- if (Mappings[i].isValueEqual(Value, FeatureBits)) {
- Valid = true;
- return Mappings[i].Name;
- }
+namespace llvm {
+ namespace AArch64AT {
+#define GET_AT_IMPL
+#include "AArch64GenSystemOperands.inc"
}
-
- Valid = false;
- return StringRef();
}
-uint32_t AArch64NamedImmMapper::fromString(StringRef Name,
- const FeatureBitset& FeatureBits, bool &Valid) const {
- std::string LowerCaseName = Name.lower();
- for (unsigned i = 0; i < NumMappings; ++i) {
- if (Mappings[i].isNameEqual(LowerCaseName, FeatureBits)) {
- Valid = true;
- return Mappings[i].Value;
- }
- }
- Valid = false;
- return -1;
+namespace llvm {
+ namespace AArch64DB {
+#define GET_DB_IMPL
+#include "AArch64GenSystemOperands.inc"
+ }
}
-bool AArch64NamedImmMapper::validImm(uint32_t Value) const {
- return Value < TooBigImm;
+namespace llvm {
+ namespace AArch64DC {
+#define GET_DC_IMPL
+#include "AArch64GenSystemOperands.inc"
+ }
}
-const AArch64NamedImmMapper::Mapping AArch64AT::ATMapper::ATMappings[] = {
- {"s1e1r", S1E1R, {}},
- {"s1e2r", S1E2R, {}},
- {"s1e3r", S1E3R, {}},
- {"s1e1w", S1E1W, {}},
- {"s1e2w", S1E2W, {}},
- {"s1e3w", S1E3W, {}},
- {"s1e0r", S1E0R, {}},
- {"s1e0w", S1E0W, {}},
- {"s12e1r", S12E1R, {}},
- {"s12e1w", S12E1W, {}},
- {"s12e0r", S12E0R, {}},
- {"s12e0w", S12E0W, {}},
-};
-
-AArch64AT::ATMapper::ATMapper()
- : AArch64NamedImmMapper(ATMappings, 0) {}
-
-const AArch64NamedImmMapper::Mapping AArch64DB::DBarrierMapper::DBarrierMappings[] = {
- {"oshld", OSHLD, {}},
- {"oshst", OSHST, {}},
- {"osh", OSH, {}},
- {"nshld", NSHLD, {}},
- {"nshst", NSHST, {}},
- {"nsh", NSH, {}},
- {"ishld", ISHLD, {}},
- {"ishst", ISHST, {}},
- {"ish", ISH, {}},
- {"ld", LD, {}},
- {"st", ST, {}},
- {"sy", SY, {}}
-};
-
-AArch64DB::DBarrierMapper::DBarrierMapper()
- : AArch64NamedImmMapper(DBarrierMappings, 16u) {}
-
-const AArch64NamedImmMapper::Mapping AArch64DC::DCMapper::DCMappings[] = {
- {"zva", ZVA, {}},
- {"ivac", IVAC, {}},
- {"isw", ISW, {}},
- {"cvac", CVAC, {}},
- {"csw", CSW, {}},
- {"cvau", CVAU, {}},
- {"civac", CIVAC, {}},
- {"cisw", CISW, {}}
-};
-
-AArch64DC::DCMapper::DCMapper()
- : AArch64NamedImmMapper(DCMappings, 0) {}
-
-const AArch64NamedImmMapper::Mapping AArch64IC::ICMapper::ICMappings[] = {
- {"ialluis", IALLUIS, {}},
- {"iallu", IALLU, {}},
- {"ivau", IVAU, {}}
-};
-
-AArch64IC::ICMapper::ICMapper()
- : AArch64NamedImmMapper(ICMappings, 0) {}
-
-const AArch64NamedImmMapper::Mapping AArch64ISB::ISBMapper::ISBMappings[] = {
- {"sy", SY, {}},
-};
-
-AArch64ISB::ISBMapper::ISBMapper()
- : AArch64NamedImmMapper(ISBMappings, 16) {}
-
-const AArch64NamedImmMapper::Mapping AArch64PRFM::PRFMMapper::PRFMMappings[] = {
- {"pldl1keep", PLDL1KEEP, {}},
- {"pldl1strm", PLDL1STRM, {}},
- {"pldl2keep", PLDL2KEEP, {}},
- {"pldl2strm", PLDL2STRM, {}},
- {"pldl3keep", PLDL3KEEP, {}},
- {"pldl3strm", PLDL3STRM, {}},
- {"plil1keep", PLIL1KEEP, {}},
- {"plil1strm", PLIL1STRM, {}},
- {"plil2keep", PLIL2KEEP, {}},
- {"plil2strm", PLIL2STRM, {}},
- {"plil3keep", PLIL3KEEP, {}},
- {"plil3strm", PLIL3STRM, {}},
- {"pstl1keep", PSTL1KEEP, {}},
- {"pstl1strm", PSTL1STRM, {}},
- {"pstl2keep", PSTL2KEEP, {}},
- {"pstl2strm", PSTL2STRM, {}},
- {"pstl3keep", PSTL3KEEP, {}},
- {"pstl3strm", PSTL3STRM, {}}
-};
-
-AArch64PRFM::PRFMMapper::PRFMMapper()
- : AArch64NamedImmMapper(PRFMMappings, 32) {}
-
-const AArch64NamedImmMapper::Mapping AArch64PState::PStateMapper::PStateMappings[] = {
- {"spsel", SPSel, {}},
- {"daifset", DAIFSet, {}},
- {"daifclr", DAIFClr, {}},
-
- // v8.1a "Privileged Access Never" extension-specific PStates
- {"pan", PAN, {AArch64::HasV8_1aOps}},
-
- // v8.2a
- {"uao", UAO, {AArch64::HasV8_2aOps}},
-};
-
-AArch64PState::PStateMapper::PStateMapper()
- : AArch64NamedImmMapper(PStateMappings, 0) {}
-
-const AArch64NamedImmMapper::Mapping AArch64PSBHint::PSBHintMapper::PSBHintMappings[] = {
- // v8.2a "Statistical Profiling" extension-specific PSB operand
- {"csync", CSync, {AArch64::FeatureSPE}},
-};
-
-AArch64PSBHint::PSBHintMapper::PSBHintMapper()
- : AArch64NamedImmMapper(PSBHintMappings, 0) {}
-
-const AArch64NamedImmMapper::Mapping AArch64SysReg::MRSMapper::MRSMappings[] = {
- {"mdccsr_el0", MDCCSR_EL0, {}},
- {"dbgdtrrx_el0", DBGDTRRX_EL0, {}},
- {"mdrar_el1", MDRAR_EL1, {}},
- {"oslsr_el1", OSLSR_EL1, {}},
- {"dbgauthstatus_el1", DBGAUTHSTATUS_EL1, {}},
- {"pmceid0_el0", PMCEID0_EL0, {}},
- {"pmceid1_el0", PMCEID1_EL0, {}},
- {"midr_el1", MIDR_EL1, {}},
- {"ccsidr_el1", CCSIDR_EL1, {}},
- {"clidr_el1", CLIDR_EL1, {}},
- {"ctr_el0", CTR_EL0, {}},
- {"mpidr_el1", MPIDR_EL1, {}},
- {"revidr_el1", REVIDR_EL1, {}},
- {"aidr_el1", AIDR_EL1, {}},
- {"dczid_el0", DCZID_EL0, {}},
- {"id_pfr0_el1", ID_PFR0_EL1, {}},
- {"id_pfr1_el1", ID_PFR1_EL1, {}},
- {"id_dfr0_el1", ID_DFR0_EL1, {}},
- {"id_afr0_el1", ID_AFR0_EL1, {}},
- {"id_mmfr0_el1", ID_MMFR0_EL1, {}},
- {"id_mmfr1_el1", ID_MMFR1_EL1, {}},
- {"id_mmfr2_el1", ID_MMFR2_EL1, {}},
- {"id_mmfr3_el1", ID_MMFR3_EL1, {}},
- {"id_mmfr4_el1", ID_MMFR4_EL1, {}},
- {"id_isar0_el1", ID_ISAR0_EL1, {}},
- {"id_isar1_el1", ID_ISAR1_EL1, {}},
- {"id_isar2_el1", ID_ISAR2_EL1, {}},
- {"id_isar3_el1", ID_ISAR3_EL1, {}},
- {"id_isar4_el1", ID_ISAR4_EL1, {}},
- {"id_isar5_el1", ID_ISAR5_EL1, {}},
- {"id_aa64pfr0_el1", ID_A64PFR0_EL1, {}},
- {"id_aa64pfr1_el1", ID_A64PFR1_EL1, {}},
- {"id_aa64dfr0_el1", ID_A64DFR0_EL1, {}},
- {"id_aa64dfr1_el1", ID_A64DFR1_EL1, {}},
- {"id_aa64afr0_el1", ID_A64AFR0_EL1, {}},
- {"id_aa64afr1_el1", ID_A64AFR1_EL1, {}},
- {"id_aa64isar0_el1", ID_A64ISAR0_EL1, {}},
- {"id_aa64isar1_el1", ID_A64ISAR1_EL1, {}},
- {"id_aa64mmfr0_el1", ID_A64MMFR0_EL1, {}},
- {"id_aa64mmfr1_el1", ID_A64MMFR1_EL1, {}},
- {"id_aa64mmfr2_el1", ID_A64MMFR2_EL1, {AArch64::HasV8_2aOps}},
- {"mvfr0_el1", MVFR0_EL1, {}},
- {"mvfr1_el1", MVFR1_EL1, {}},
- {"mvfr2_el1", MVFR2_EL1, {}},
- {"rvbar_el1", RVBAR_EL1, {}},
- {"rvbar_el2", RVBAR_EL2, {}},
- {"rvbar_el3", RVBAR_EL3, {}},
- {"isr_el1", ISR_EL1, {}},
- {"cntpct_el0", CNTPCT_EL0, {}},
- {"cntvct_el0", CNTVCT_EL0, {}},
-
- // Trace registers
- {"trcstatr", TRCSTATR, {}},
- {"trcidr8", TRCIDR8, {}},
- {"trcidr9", TRCIDR9, {}},
- {"trcidr10", TRCIDR10, {}},
- {"trcidr11", TRCIDR11, {}},
- {"trcidr12", TRCIDR12, {}},
- {"trcidr13", TRCIDR13, {}},
- {"trcidr0", TRCIDR0, {}},
- {"trcidr1", TRCIDR1, {}},
- {"trcidr2", TRCIDR2, {}},
- {"trcidr3", TRCIDR3, {}},
- {"trcidr4", TRCIDR4, {}},
- {"trcidr5", TRCIDR5, {}},
- {"trcidr6", TRCIDR6, {}},
- {"trcidr7", TRCIDR7, {}},
- {"trcoslsr", TRCOSLSR, {}},
- {"trcpdsr", TRCPDSR, {}},
- {"trcdevaff0", TRCDEVAFF0, {}},
- {"trcdevaff1", TRCDEVAFF1, {}},
- {"trclsr", TRCLSR, {}},
- {"trcauthstatus", TRCAUTHSTATUS, {}},
- {"trcdevarch", TRCDEVARCH, {}},
- {"trcdevid", TRCDEVID, {}},
- {"trcdevtype", TRCDEVTYPE, {}},
- {"trcpidr4", TRCPIDR4, {}},
- {"trcpidr5", TRCPIDR5, {}},
- {"trcpidr6", TRCPIDR6, {}},
- {"trcpidr7", TRCPIDR7, {}},
- {"trcpidr0", TRCPIDR0, {}},
- {"trcpidr1", TRCPIDR1, {}},
- {"trcpidr2", TRCPIDR2, {}},
- {"trcpidr3", TRCPIDR3, {}},
- {"trccidr0", TRCCIDR0, {}},
- {"trccidr1", TRCCIDR1, {}},
- {"trccidr2", TRCCIDR2, {}},
- {"trccidr3", TRCCIDR3, {}},
-
- // GICv3 registers
- {"icc_iar1_el1", ICC_IAR1_EL1, {}},
- {"icc_iar0_el1", ICC_IAR0_EL1, {}},
- {"icc_hppir1_el1", ICC_HPPIR1_EL1, {}},
- {"icc_hppir0_el1", ICC_HPPIR0_EL1, {}},
- {"icc_rpr_el1", ICC_RPR_EL1, {}},
- {"ich_vtr_el2", ICH_VTR_EL2, {}},
- {"ich_eisr_el2", ICH_EISR_EL2, {}},
- {"ich_elsr_el2", ICH_ELSR_EL2, {}},
-
- // v8.1a "Limited Ordering Regions" extension-specific system registers
- {"lorid_el1", LORID_EL1, {AArch64::HasV8_1aOps}},
-};
-
-AArch64SysReg::MRSMapper::MRSMapper() {
- InstMappings = &MRSMappings[0];
- NumInstMappings = llvm::array_lengthof(MRSMappings);
+namespace llvm {
+ namespace AArch64IC {
+#define GET_IC_IMPL
+#include "AArch64GenSystemOperands.inc"
+ }
}
-const AArch64NamedImmMapper::Mapping AArch64SysReg::MSRMapper::MSRMappings[] = {
- {"dbgdtrtx_el0", DBGDTRTX_EL0, {}},
- {"oslar_el1", OSLAR_EL1, {}},
- {"pmswinc_el0", PMSWINC_EL0, {}},
-
- // Trace registers
- {"trcoslar", TRCOSLAR, {}},
- {"trclar", TRCLAR, {}},
-
- // GICv3 registers
- {"icc_eoir1_el1", ICC_EOIR1_EL1, {}},
- {"icc_eoir0_el1", ICC_EOIR0_EL1, {}},
- {"icc_dir_el1", ICC_DIR_EL1, {}},
- {"icc_sgi1r_el1", ICC_SGI1R_EL1, {}},
- {"icc_asgi1r_el1", ICC_ASGI1R_EL1, {}},
- {"icc_sgi0r_el1", ICC_SGI0R_EL1, {}},
-};
-
-AArch64SysReg::MSRMapper::MSRMapper() {
- InstMappings = &MSRMappings[0];
- NumInstMappings = llvm::array_lengthof(MSRMappings);
+namespace llvm {
+ namespace AArch64ISB {
+#define GET_ISB_IMPL
+#include "AArch64GenSystemOperands.inc"
+ }
+}
+namespace llvm {
+ namespace AArch64PRFM {
+#define GET_PRFM_IMPL
+#include "AArch64GenSystemOperands.inc"
+ }
}
+namespace llvm {
+ namespace AArch64PState {
+#define GET_PSTATE_IMPL
+#include "AArch64GenSystemOperands.inc"
+ }
+}
-const AArch64NamedImmMapper::Mapping AArch64SysReg::SysRegMapper::SysRegMappings[] = {
- {"osdtrrx_el1", OSDTRRX_EL1, {}},
- {"osdtrtx_el1", OSDTRTX_EL1, {}},
- {"teecr32_el1", TEECR32_EL1, {}},
- {"mdccint_el1", MDCCINT_EL1, {}},
- {"mdscr_el1", MDSCR_EL1, {}},
- {"dbgdtr_el0", DBGDTR_EL0, {}},
- {"oseccr_el1", OSECCR_EL1, {}},
- {"dbgvcr32_el2", DBGVCR32_EL2, {}},
- {"dbgbvr0_el1", DBGBVR0_EL1, {}},
- {"dbgbvr1_el1", DBGBVR1_EL1, {}},
- {"dbgbvr2_el1", DBGBVR2_EL1, {}},
- {"dbgbvr3_el1", DBGBVR3_EL1, {}},
- {"dbgbvr4_el1", DBGBVR4_EL1, {}},
- {"dbgbvr5_el1", DBGBVR5_EL1, {}},
- {"dbgbvr6_el1", DBGBVR6_EL1, {}},
- {"dbgbvr7_el1", DBGBVR7_EL1, {}},
- {"dbgbvr8_el1", DBGBVR8_EL1, {}},
- {"dbgbvr9_el1", DBGBVR9_EL1, {}},
- {"dbgbvr10_el1", DBGBVR10_EL1, {}},
- {"dbgbvr11_el1", DBGBVR11_EL1, {}},
- {"dbgbvr12_el1", DBGBVR12_EL1, {}},
- {"dbgbvr13_el1", DBGBVR13_EL1, {}},
- {"dbgbvr14_el1", DBGBVR14_EL1, {}},
- {"dbgbvr15_el1", DBGBVR15_EL1, {}},
- {"dbgbcr0_el1", DBGBCR0_EL1, {}},
- {"dbgbcr1_el1", DBGBCR1_EL1, {}},
- {"dbgbcr2_el1", DBGBCR2_EL1, {}},
- {"dbgbcr3_el1", DBGBCR3_EL1, {}},
- {"dbgbcr4_el1", DBGBCR4_EL1, {}},
- {"dbgbcr5_el1", DBGBCR5_EL1, {}},
- {"dbgbcr6_el1", DBGBCR6_EL1, {}},
- {"dbgbcr7_el1", DBGBCR7_EL1, {}},
- {"dbgbcr8_el1", DBGBCR8_EL1, {}},
- {"dbgbcr9_el1", DBGBCR9_EL1, {}},
- {"dbgbcr10_el1", DBGBCR10_EL1, {}},
- {"dbgbcr11_el1", DBGBCR11_EL1, {}},
- {"dbgbcr12_el1", DBGBCR12_EL1, {}},
- {"dbgbcr13_el1", DBGBCR13_EL1, {}},
- {"dbgbcr14_el1", DBGBCR14_EL1, {}},
- {"dbgbcr15_el1", DBGBCR15_EL1, {}},
- {"dbgwvr0_el1", DBGWVR0_EL1, {}},
- {"dbgwvr1_el1", DBGWVR1_EL1, {}},
- {"dbgwvr2_el1", DBGWVR2_EL1, {}},
- {"dbgwvr3_el1", DBGWVR3_EL1, {}},
- {"dbgwvr4_el1", DBGWVR4_EL1, {}},
- {"dbgwvr5_el1", DBGWVR5_EL1, {}},
- {"dbgwvr6_el1", DBGWVR6_EL1, {}},
- {"dbgwvr7_el1", DBGWVR7_EL1, {}},
- {"dbgwvr8_el1", DBGWVR8_EL1, {}},
- {"dbgwvr9_el1", DBGWVR9_EL1, {}},
- {"dbgwvr10_el1", DBGWVR10_EL1, {}},
- {"dbgwvr11_el1", DBGWVR11_EL1, {}},
- {"dbgwvr12_el1", DBGWVR12_EL1, {}},
- {"dbgwvr13_el1", DBGWVR13_EL1, {}},
- {"dbgwvr14_el1", DBGWVR14_EL1, {}},
- {"dbgwvr15_el1", DBGWVR15_EL1, {}},
- {"dbgwcr0_el1", DBGWCR0_EL1, {}},
- {"dbgwcr1_el1", DBGWCR1_EL1, {}},
- {"dbgwcr2_el1", DBGWCR2_EL1, {}},
- {"dbgwcr3_el1", DBGWCR3_EL1, {}},
- {"dbgwcr4_el1", DBGWCR4_EL1, {}},
- {"dbgwcr5_el1", DBGWCR5_EL1, {}},
- {"dbgwcr6_el1", DBGWCR6_EL1, {}},
- {"dbgwcr7_el1", DBGWCR7_EL1, {}},
- {"dbgwcr8_el1", DBGWCR8_EL1, {}},
- {"dbgwcr9_el1", DBGWCR9_EL1, {}},
- {"dbgwcr10_el1", DBGWCR10_EL1, {}},
- {"dbgwcr11_el1", DBGWCR11_EL1, {}},
- {"dbgwcr12_el1", DBGWCR12_EL1, {}},
- {"dbgwcr13_el1", DBGWCR13_EL1, {}},
- {"dbgwcr14_el1", DBGWCR14_EL1, {}},
- {"dbgwcr15_el1", DBGWCR15_EL1, {}},
- {"teehbr32_el1", TEEHBR32_EL1, {}},
- {"osdlr_el1", OSDLR_EL1, {}},
- {"dbgprcr_el1", DBGPRCR_EL1, {}},
- {"dbgclaimset_el1", DBGCLAIMSET_EL1, {}},
- {"dbgclaimclr_el1", DBGCLAIMCLR_EL1, {}},
- {"csselr_el1", CSSELR_EL1, {}},
- {"vpidr_el2", VPIDR_EL2, {}},
- {"vmpidr_el2", VMPIDR_EL2, {}},
- {"sctlr_el1", SCTLR_EL1, {}},
- {"sctlr_el2", SCTLR_EL2, {}},
- {"sctlr_el3", SCTLR_EL3, {}},
- {"actlr_el1", ACTLR_EL1, {}},
- {"actlr_el2", ACTLR_EL2, {}},
- {"actlr_el3", ACTLR_EL3, {}},
- {"cpacr_el1", CPACR_EL1, {}},
- {"hcr_el2", HCR_EL2, {}},
- {"scr_el3", SCR_EL3, {}},
- {"mdcr_el2", MDCR_EL2, {}},
- {"sder32_el3", SDER32_EL3, {}},
- {"cptr_el2", CPTR_EL2, {}},
- {"cptr_el3", CPTR_EL3, {}},
- {"hstr_el2", HSTR_EL2, {}},
- {"hacr_el2", HACR_EL2, {}},
- {"mdcr_el3", MDCR_EL3, {}},
- {"ttbr0_el1", TTBR0_EL1, {}},
- {"ttbr0_el2", TTBR0_EL2, {}},
- {"ttbr0_el3", TTBR0_EL3, {}},
- {"ttbr1_el1", TTBR1_EL1, {}},
- {"tcr_el1", TCR_EL1, {}},
- {"tcr_el2", TCR_EL2, {}},
- {"tcr_el3", TCR_EL3, {}},
- {"vttbr_el2", VTTBR_EL2, {}},
- {"vtcr_el2", VTCR_EL2, {}},
- {"dacr32_el2", DACR32_EL2, {}},
- {"spsr_el1", SPSR_EL1, {}},
- {"spsr_el2", SPSR_EL2, {}},
- {"spsr_el3", SPSR_EL3, {}},
- {"elr_el1", ELR_EL1, {}},
- {"elr_el2", ELR_EL2, {}},
- {"elr_el3", ELR_EL3, {}},
- {"sp_el0", SP_EL0, {}},
- {"sp_el1", SP_EL1, {}},
- {"sp_el2", SP_EL2, {}},
- {"spsel", SPSel, {}},
- {"nzcv", NZCV, {}},
- {"daif", DAIF, {}},
- {"currentel", CurrentEL, {}},
- {"spsr_irq", SPSR_irq, {}},
- {"spsr_abt", SPSR_abt, {}},
- {"spsr_und", SPSR_und, {}},
- {"spsr_fiq", SPSR_fiq, {}},
- {"fpcr", FPCR, {}},
- {"fpsr", FPSR, {}},
- {"dspsr_el0", DSPSR_EL0, {}},
- {"dlr_el0", DLR_EL0, {}},
- {"ifsr32_el2", IFSR32_EL2, {}},
- {"afsr0_el1", AFSR0_EL1, {}},
- {"afsr0_el2", AFSR0_EL2, {}},
- {"afsr0_el3", AFSR0_EL3, {}},
- {"afsr1_el1", AFSR1_EL1, {}},
- {"afsr1_el2", AFSR1_EL2, {}},
- {"afsr1_el3", AFSR1_EL3, {}},
- {"esr_el1", ESR_EL1, {}},
- {"esr_el2", ESR_EL2, {}},
- {"esr_el3", ESR_EL3, {}},
- {"fpexc32_el2", FPEXC32_EL2, {}},
- {"far_el1", FAR_EL1, {}},
- {"far_el2", FAR_EL2, {}},
- {"far_el3", FAR_EL3, {}},
- {"hpfar_el2", HPFAR_EL2, {}},
- {"par_el1", PAR_EL1, {}},
- {"pmcr_el0", PMCR_EL0, {}},
- {"pmcntenset_el0", PMCNTENSET_EL0, {}},
- {"pmcntenclr_el0", PMCNTENCLR_EL0, {}},
- {"pmovsclr_el0", PMOVSCLR_EL0, {}},
- {"pmselr_el0", PMSELR_EL0, {}},
- {"pmccntr_el0", PMCCNTR_EL0, {}},
- {"pmxevtyper_el0", PMXEVTYPER_EL0, {}},
- {"pmxevcntr_el0", PMXEVCNTR_EL0, {}},
- {"pmuserenr_el0", PMUSERENR_EL0, {}},
- {"pmintenset_el1", PMINTENSET_EL1, {}},
- {"pmintenclr_el1", PMINTENCLR_EL1, {}},
- {"pmovsset_el0", PMOVSSET_EL0, {}},
- {"mair_el1", MAIR_EL1, {}},
- {"mair_el2", MAIR_EL2, {}},
- {"mair_el3", MAIR_EL3, {}},
- {"amair_el1", AMAIR_EL1, {}},
- {"amair_el2", AMAIR_EL2, {}},
- {"amair_el3", AMAIR_EL3, {}},
- {"vbar_el1", VBAR_EL1, {}},
- {"vbar_el2", VBAR_EL2, {}},
- {"vbar_el3", VBAR_EL3, {}},
- {"rmr_el1", RMR_EL1, {}},
- {"rmr_el2", RMR_EL2, {}},
- {"rmr_el3", RMR_EL3, {}},
- {"contextidr_el1", CONTEXTIDR_EL1, {}},
- {"tpidr_el0", TPIDR_EL0, {}},
- {"tpidr_el2", TPIDR_EL2, {}},
- {"tpidr_el3", TPIDR_EL3, {}},
- {"tpidrro_el0", TPIDRRO_EL0, {}},
- {"tpidr_el1", TPIDR_EL1, {}},
- {"cntfrq_el0", CNTFRQ_EL0, {}},
- {"cntvoff_el2", CNTVOFF_EL2, {}},
- {"cntkctl_el1", CNTKCTL_EL1, {}},
- {"cnthctl_el2", CNTHCTL_EL2, {}},
- {"cntp_tval_el0", CNTP_TVAL_EL0, {}},
- {"cnthp_tval_el2", CNTHP_TVAL_EL2, {}},
- {"cntps_tval_el1", CNTPS_TVAL_EL1, {}},
- {"cntp_ctl_el0", CNTP_CTL_EL0, {}},
- {"cnthp_ctl_el2", CNTHP_CTL_EL2, {}},
- {"cntps_ctl_el1", CNTPS_CTL_EL1, {}},
- {"cntp_cval_el0", CNTP_CVAL_EL0, {}},
- {"cnthp_cval_el2", CNTHP_CVAL_EL2, {}},
- {"cntps_cval_el1", CNTPS_CVAL_EL1, {}},
- {"cntv_tval_el0", CNTV_TVAL_EL0, {}},
- {"cntv_ctl_el0", CNTV_CTL_EL0, {}},
- {"cntv_cval_el0", CNTV_CVAL_EL0, {}},
- {"pmevcntr0_el0", PMEVCNTR0_EL0, {}},
- {"pmevcntr1_el0", PMEVCNTR1_EL0, {}},
- {"pmevcntr2_el0", PMEVCNTR2_EL0, {}},
- {"pmevcntr3_el0", PMEVCNTR3_EL0, {}},
- {"pmevcntr4_el0", PMEVCNTR4_EL0, {}},
- {"pmevcntr5_el0", PMEVCNTR5_EL0, {}},
- {"pmevcntr6_el0", PMEVCNTR6_EL0, {}},
- {"pmevcntr7_el0", PMEVCNTR7_EL0, {}},
- {"pmevcntr8_el0", PMEVCNTR8_EL0, {}},
- {"pmevcntr9_el0", PMEVCNTR9_EL0, {}},
- {"pmevcntr10_el0", PMEVCNTR10_EL0, {}},
- {"pmevcntr11_el0", PMEVCNTR11_EL0, {}},
- {"pmevcntr12_el0", PMEVCNTR12_EL0, {}},
- {"pmevcntr13_el0", PMEVCNTR13_EL0, {}},
- {"pmevcntr14_el0", PMEVCNTR14_EL0, {}},
- {"pmevcntr15_el0", PMEVCNTR15_EL0, {}},
- {"pmevcntr16_el0", PMEVCNTR16_EL0, {}},
- {"pmevcntr17_el0", PMEVCNTR17_EL0, {}},
- {"pmevcntr18_el0", PMEVCNTR18_EL0, {}},
- {"pmevcntr19_el0", PMEVCNTR19_EL0, {}},
- {"pmevcntr20_el0", PMEVCNTR20_EL0, {}},
- {"pmevcntr21_el0", PMEVCNTR21_EL0, {}},
- {"pmevcntr22_el0", PMEVCNTR22_EL0, {}},
- {"pmevcntr23_el0", PMEVCNTR23_EL0, {}},
- {"pmevcntr24_el0", PMEVCNTR24_EL0, {}},
- {"pmevcntr25_el0", PMEVCNTR25_EL0, {}},
- {"pmevcntr26_el0", PMEVCNTR26_EL0, {}},
- {"pmevcntr27_el0", PMEVCNTR27_EL0, {}},
- {"pmevcntr28_el0", PMEVCNTR28_EL0, {}},
- {"pmevcntr29_el0", PMEVCNTR29_EL0, {}},
- {"pmevcntr30_el0", PMEVCNTR30_EL0, {}},
- {"pmccfiltr_el0", PMCCFILTR_EL0, {}},
- {"pmevtyper0_el0", PMEVTYPER0_EL0, {}},
- {"pmevtyper1_el0", PMEVTYPER1_EL0, {}},
- {"pmevtyper2_el0", PMEVTYPER2_EL0, {}},
- {"pmevtyper3_el0", PMEVTYPER3_EL0, {}},
- {"pmevtyper4_el0", PMEVTYPER4_EL0, {}},
- {"pmevtyper5_el0", PMEVTYPER5_EL0, {}},
- {"pmevtyper6_el0", PMEVTYPER6_EL0, {}},
- {"pmevtyper7_el0", PMEVTYPER7_EL0, {}},
- {"pmevtyper8_el0", PMEVTYPER8_EL0, {}},
- {"pmevtyper9_el0", PMEVTYPER9_EL0, {}},
- {"pmevtyper10_el0", PMEVTYPER10_EL0, {}},
- {"pmevtyper11_el0", PMEVTYPER11_EL0, {}},
- {"pmevtyper12_el0", PMEVTYPER12_EL0, {}},
- {"pmevtyper13_el0", PMEVTYPER13_EL0, {}},
- {"pmevtyper14_el0", PMEVTYPER14_EL0, {}},
- {"pmevtyper15_el0", PMEVTYPER15_EL0, {}},
- {"pmevtyper16_el0", PMEVTYPER16_EL0, {}},
- {"pmevtyper17_el0", PMEVTYPER17_EL0, {}},
- {"pmevtyper18_el0", PMEVTYPER18_EL0, {}},
- {"pmevtyper19_el0", PMEVTYPER19_EL0, {}},
- {"pmevtyper20_el0", PMEVTYPER20_EL0, {}},
- {"pmevtyper21_el0", PMEVTYPER21_EL0, {}},
- {"pmevtyper22_el0", PMEVTYPER22_EL0, {}},
- {"pmevtyper23_el0", PMEVTYPER23_EL0, {}},
- {"pmevtyper24_el0", PMEVTYPER24_EL0, {}},
- {"pmevtyper25_el0", PMEVTYPER25_EL0, {}},
- {"pmevtyper26_el0", PMEVTYPER26_EL0, {}},
- {"pmevtyper27_el0", PMEVTYPER27_EL0, {}},
- {"pmevtyper28_el0", PMEVTYPER28_EL0, {}},
- {"pmevtyper29_el0", PMEVTYPER29_EL0, {}},
- {"pmevtyper30_el0", PMEVTYPER30_EL0, {}},
-
- // Trace registers
- {"trcprgctlr", TRCPRGCTLR, {}},
- {"trcprocselr", TRCPROCSELR, {}},
- {"trcconfigr", TRCCONFIGR, {}},
- {"trcauxctlr", TRCAUXCTLR, {}},
- {"trceventctl0r", TRCEVENTCTL0R, {}},
- {"trceventctl1r", TRCEVENTCTL1R, {}},
- {"trcstallctlr", TRCSTALLCTLR, {}},
- {"trctsctlr", TRCTSCTLR, {}},
- {"trcsyncpr", TRCSYNCPR, {}},
- {"trcccctlr", TRCCCCTLR, {}},
- {"trcbbctlr", TRCBBCTLR, {}},
- {"trctraceidr", TRCTRACEIDR, {}},
- {"trcqctlr", TRCQCTLR, {}},
- {"trcvictlr", TRCVICTLR, {}},
- {"trcviiectlr", TRCVIIECTLR, {}},
- {"trcvissctlr", TRCVISSCTLR, {}},
- {"trcvipcssctlr", TRCVIPCSSCTLR, {}},
- {"trcvdctlr", TRCVDCTLR, {}},
- {"trcvdsacctlr", TRCVDSACCTLR, {}},
- {"trcvdarcctlr", TRCVDARCCTLR, {}},
- {"trcseqevr0", TRCSEQEVR0, {}},
- {"trcseqevr1", TRCSEQEVR1, {}},
- {"trcseqevr2", TRCSEQEVR2, {}},
- {"trcseqrstevr", TRCSEQRSTEVR, {}},
- {"trcseqstr", TRCSEQSTR, {}},
- {"trcextinselr", TRCEXTINSELR, {}},
- {"trccntrldvr0", TRCCNTRLDVR0, {}},
- {"trccntrldvr1", TRCCNTRLDVR1, {}},
- {"trccntrldvr2", TRCCNTRLDVR2, {}},
- {"trccntrldvr3", TRCCNTRLDVR3, {}},
- {"trccntctlr0", TRCCNTCTLR0, {}},
- {"trccntctlr1", TRCCNTCTLR1, {}},
- {"trccntctlr2", TRCCNTCTLR2, {}},
- {"trccntctlr3", TRCCNTCTLR3, {}},
- {"trccntvr0", TRCCNTVR0, {}},
- {"trccntvr1", TRCCNTVR1, {}},
- {"trccntvr2", TRCCNTVR2, {}},
- {"trccntvr3", TRCCNTVR3, {}},
- {"trcimspec0", TRCIMSPEC0, {}},
- {"trcimspec1", TRCIMSPEC1, {}},
- {"trcimspec2", TRCIMSPEC2, {}},
- {"trcimspec3", TRCIMSPEC3, {}},
- {"trcimspec4", TRCIMSPEC4, {}},
- {"trcimspec5", TRCIMSPEC5, {}},
- {"trcimspec6", TRCIMSPEC6, {}},
- {"trcimspec7", TRCIMSPEC7, {}},
- {"trcrsctlr2", TRCRSCTLR2, {}},
- {"trcrsctlr3", TRCRSCTLR3, {}},
- {"trcrsctlr4", TRCRSCTLR4, {}},
- {"trcrsctlr5", TRCRSCTLR5, {}},
- {"trcrsctlr6", TRCRSCTLR6, {}},
- {"trcrsctlr7", TRCRSCTLR7, {}},
- {"trcrsctlr8", TRCRSCTLR8, {}},
- {"trcrsctlr9", TRCRSCTLR9, {}},
- {"trcrsctlr10", TRCRSCTLR10, {}},
- {"trcrsctlr11", TRCRSCTLR11, {}},
- {"trcrsctlr12", TRCRSCTLR12, {}},
- {"trcrsctlr13", TRCRSCTLR13, {}},
- {"trcrsctlr14", TRCRSCTLR14, {}},
- {"trcrsctlr15", TRCRSCTLR15, {}},
- {"trcrsctlr16", TRCRSCTLR16, {}},
- {"trcrsctlr17", TRCRSCTLR17, {}},
- {"trcrsctlr18", TRCRSCTLR18, {}},
- {"trcrsctlr19", TRCRSCTLR19, {}},
- {"trcrsctlr20", TRCRSCTLR20, {}},
- {"trcrsctlr21", TRCRSCTLR21, {}},
- {"trcrsctlr22", TRCRSCTLR22, {}},
- {"trcrsctlr23", TRCRSCTLR23, {}},
- {"trcrsctlr24", TRCRSCTLR24, {}},
- {"trcrsctlr25", TRCRSCTLR25, {}},
- {"trcrsctlr26", TRCRSCTLR26, {}},
- {"trcrsctlr27", TRCRSCTLR27, {}},
- {"trcrsctlr28", TRCRSCTLR28, {}},
- {"trcrsctlr29", TRCRSCTLR29, {}},
- {"trcrsctlr30", TRCRSCTLR30, {}},
- {"trcrsctlr31", TRCRSCTLR31, {}},
- {"trcssccr0", TRCSSCCR0, {}},
- {"trcssccr1", TRCSSCCR1, {}},
- {"trcssccr2", TRCSSCCR2, {}},
- {"trcssccr3", TRCSSCCR3, {}},
- {"trcssccr4", TRCSSCCR4, {}},
- {"trcssccr5", TRCSSCCR5, {}},
- {"trcssccr6", TRCSSCCR6, {}},
- {"trcssccr7", TRCSSCCR7, {}},
- {"trcsscsr0", TRCSSCSR0, {}},
- {"trcsscsr1", TRCSSCSR1, {}},
- {"trcsscsr2", TRCSSCSR2, {}},
- {"trcsscsr3", TRCSSCSR3, {}},
- {"trcsscsr4", TRCSSCSR4, {}},
- {"trcsscsr5", TRCSSCSR5, {}},
- {"trcsscsr6", TRCSSCSR6, {}},
- {"trcsscsr7", TRCSSCSR7, {}},
- {"trcsspcicr0", TRCSSPCICR0, {}},
- {"trcsspcicr1", TRCSSPCICR1, {}},
- {"trcsspcicr2", TRCSSPCICR2, {}},
- {"trcsspcicr3", TRCSSPCICR3, {}},
- {"trcsspcicr4", TRCSSPCICR4, {}},
- {"trcsspcicr5", TRCSSPCICR5, {}},
- {"trcsspcicr6", TRCSSPCICR6, {}},
- {"trcsspcicr7", TRCSSPCICR7, {}},
- {"trcpdcr", TRCPDCR, {}},
- {"trcacvr0", TRCACVR0, {}},
- {"trcacvr1", TRCACVR1, {}},
- {"trcacvr2", TRCACVR2, {}},
- {"trcacvr3", TRCACVR3, {}},
- {"trcacvr4", TRCACVR4, {}},
- {"trcacvr5", TRCACVR5, {}},
- {"trcacvr6", TRCACVR6, {}},
- {"trcacvr7", TRCACVR7, {}},
- {"trcacvr8", TRCACVR8, {}},
- {"trcacvr9", TRCACVR9, {}},
- {"trcacvr10", TRCACVR10, {}},
- {"trcacvr11", TRCACVR11, {}},
- {"trcacvr12", TRCACVR12, {}},
- {"trcacvr13", TRCACVR13, {}},
- {"trcacvr14", TRCACVR14, {}},
- {"trcacvr15", TRCACVR15, {}},
- {"trcacatr0", TRCACATR0, {}},
- {"trcacatr1", TRCACATR1, {}},
- {"trcacatr2", TRCACATR2, {}},
- {"trcacatr3", TRCACATR3, {}},
- {"trcacatr4", TRCACATR4, {}},
- {"trcacatr5", TRCACATR5, {}},
- {"trcacatr6", TRCACATR6, {}},
- {"trcacatr7", TRCACATR7, {}},
- {"trcacatr8", TRCACATR8, {}},
- {"trcacatr9", TRCACATR9, {}},
- {"trcacatr10", TRCACATR10, {}},
- {"trcacatr11", TRCACATR11, {}},
- {"trcacatr12", TRCACATR12, {}},
- {"trcacatr13", TRCACATR13, {}},
- {"trcacatr14", TRCACATR14, {}},
- {"trcacatr15", TRCACATR15, {}},
- {"trcdvcvr0", TRCDVCVR0, {}},
- {"trcdvcvr1", TRCDVCVR1, {}},
- {"trcdvcvr2", TRCDVCVR2, {}},
- {"trcdvcvr3", TRCDVCVR3, {}},
- {"trcdvcvr4", TRCDVCVR4, {}},
- {"trcdvcvr5", TRCDVCVR5, {}},
- {"trcdvcvr6", TRCDVCVR6, {}},
- {"trcdvcvr7", TRCDVCVR7, {}},
- {"trcdvcmr0", TRCDVCMR0, {}},
- {"trcdvcmr1", TRCDVCMR1, {}},
- {"trcdvcmr2", TRCDVCMR2, {}},
- {"trcdvcmr3", TRCDVCMR3, {}},
- {"trcdvcmr4", TRCDVCMR4, {}},
- {"trcdvcmr5", TRCDVCMR5, {}},
- {"trcdvcmr6", TRCDVCMR6, {}},
- {"trcdvcmr7", TRCDVCMR7, {}},
- {"trccidcvr0", TRCCIDCVR0, {}},
- {"trccidcvr1", TRCCIDCVR1, {}},
- {"trccidcvr2", TRCCIDCVR2, {}},
- {"trccidcvr3", TRCCIDCVR3, {}},
- {"trccidcvr4", TRCCIDCVR4, {}},
- {"trccidcvr5", TRCCIDCVR5, {}},
- {"trccidcvr6", TRCCIDCVR6, {}},
- {"trccidcvr7", TRCCIDCVR7, {}},
- {"trcvmidcvr0", TRCVMIDCVR0, {}},
- {"trcvmidcvr1", TRCVMIDCVR1, {}},
- {"trcvmidcvr2", TRCVMIDCVR2, {}},
- {"trcvmidcvr3", TRCVMIDCVR3, {}},
- {"trcvmidcvr4", TRCVMIDCVR4, {}},
- {"trcvmidcvr5", TRCVMIDCVR5, {}},
- {"trcvmidcvr6", TRCVMIDCVR6, {}},
- {"trcvmidcvr7", TRCVMIDCVR7, {}},
- {"trccidcctlr0", TRCCIDCCTLR0, {}},
- {"trccidcctlr1", TRCCIDCCTLR1, {}},
- {"trcvmidcctlr0", TRCVMIDCCTLR0, {}},
- {"trcvmidcctlr1", TRCVMIDCCTLR1, {}},
- {"trcitctrl", TRCITCTRL, {}},
- {"trcclaimset", TRCCLAIMSET, {}},
- {"trcclaimclr", TRCCLAIMCLR, {}},
-
- // GICv3 registers
- {"icc_bpr1_el1", ICC_BPR1_EL1, {}},
- {"icc_bpr0_el1", ICC_BPR0_EL1, {}},
- {"icc_pmr_el1", ICC_PMR_EL1, {}},
- {"icc_ctlr_el1", ICC_CTLR_EL1, {}},
- {"icc_ctlr_el3", ICC_CTLR_EL3, {}},
- {"icc_sre_el1", ICC_SRE_EL1, {}},
- {"icc_sre_el2", ICC_SRE_EL2, {}},
- {"icc_sre_el3", ICC_SRE_EL3, {}},
- {"icc_igrpen0_el1", ICC_IGRPEN0_EL1, {}},
- {"icc_igrpen1_el1", ICC_IGRPEN1_EL1, {}},
- {"icc_igrpen1_el3", ICC_IGRPEN1_EL3, {}},
- {"icc_seien_el1", ICC_SEIEN_EL1, {}},
- {"icc_ap0r0_el1", ICC_AP0R0_EL1, {}},
- {"icc_ap0r1_el1", ICC_AP0R1_EL1, {}},
- {"icc_ap0r2_el1", ICC_AP0R2_EL1, {}},
- {"icc_ap0r3_el1", ICC_AP0R3_EL1, {}},
- {"icc_ap1r0_el1", ICC_AP1R0_EL1, {}},
- {"icc_ap1r1_el1", ICC_AP1R1_EL1, {}},
- {"icc_ap1r2_el1", ICC_AP1R2_EL1, {}},
- {"icc_ap1r3_el1", ICC_AP1R3_EL1, {}},
- {"ich_ap0r0_el2", ICH_AP0R0_EL2, {}},
- {"ich_ap0r1_el2", ICH_AP0R1_EL2, {}},
- {"ich_ap0r2_el2", ICH_AP0R2_EL2, {}},
- {"ich_ap0r3_el2", ICH_AP0R3_EL2, {}},
- {"ich_ap1r0_el2", ICH_AP1R0_EL2, {}},
- {"ich_ap1r1_el2", ICH_AP1R1_EL2, {}},
- {"ich_ap1r2_el2", ICH_AP1R2_EL2, {}},
- {"ich_ap1r3_el2", ICH_AP1R3_EL2, {}},
- {"ich_hcr_el2", ICH_HCR_EL2, {}},
- {"ich_misr_el2", ICH_MISR_EL2, {}},
- {"ich_vmcr_el2", ICH_VMCR_EL2, {}},
- {"ich_vseir_el2", ICH_VSEIR_EL2, {}},
- {"ich_lr0_el2", ICH_LR0_EL2, {}},
- {"ich_lr1_el2", ICH_LR1_EL2, {}},
- {"ich_lr2_el2", ICH_LR2_EL2, {}},
- {"ich_lr3_el2", ICH_LR3_EL2, {}},
- {"ich_lr4_el2", ICH_LR4_EL2, {}},
- {"ich_lr5_el2", ICH_LR5_EL2, {}},
- {"ich_lr6_el2", ICH_LR6_EL2, {}},
- {"ich_lr7_el2", ICH_LR7_EL2, {}},
- {"ich_lr8_el2", ICH_LR8_EL2, {}},
- {"ich_lr9_el2", ICH_LR9_EL2, {}},
- {"ich_lr10_el2", ICH_LR10_EL2, {}},
- {"ich_lr11_el2", ICH_LR11_EL2, {}},
- {"ich_lr12_el2", ICH_LR12_EL2, {}},
- {"ich_lr13_el2", ICH_LR13_EL2, {}},
- {"ich_lr14_el2", ICH_LR14_EL2, {}},
- {"ich_lr15_el2", ICH_LR15_EL2, {}},
-
- // Cyclone registers
- {"cpm_ioacc_ctl_el3", CPM_IOACC_CTL_EL3, {AArch64::ProcCyclone}},
-
- // v8.1a "Privileged Access Never" extension-specific system registers
- {"pan", PAN, {AArch64::HasV8_1aOps}},
-
- // v8.1a "Limited Ordering Regions" extension-specific system registers
- {"lorsa_el1", LORSA_EL1, {AArch64::HasV8_1aOps}},
- {"lorea_el1", LOREA_EL1, {AArch64::HasV8_1aOps}},
- {"lorn_el1", LORN_EL1, {AArch64::HasV8_1aOps}},
- {"lorc_el1", LORC_EL1, {AArch64::HasV8_1aOps}},
-
- // v8.1a "Virtualization host extensions" system registers
- {"ttbr1_el2", TTBR1_EL2, {AArch64::HasV8_1aOps}},
- {"contextidr_el2", CONTEXTIDR_EL2, {AArch64::HasV8_1aOps}},
- {"cnthv_tval_el2", CNTHV_TVAL_EL2, {AArch64::HasV8_1aOps}},
- {"cnthv_cval_el2", CNTHV_CVAL_EL2, {AArch64::HasV8_1aOps}},
- {"cnthv_ctl_el2", CNTHV_CTL_EL2, {AArch64::HasV8_1aOps}},
- {"sctlr_el12", SCTLR_EL12, {AArch64::HasV8_1aOps}},
- {"cpacr_el12", CPACR_EL12, {AArch64::HasV8_1aOps}},
- {"ttbr0_el12", TTBR0_EL12, {AArch64::HasV8_1aOps}},
- {"ttbr1_el12", TTBR1_EL12, {AArch64::HasV8_1aOps}},
- {"tcr_el12", TCR_EL12, {AArch64::HasV8_1aOps}},
- {"afsr0_el12", AFSR0_EL12, {AArch64::HasV8_1aOps}},
- {"afsr1_el12", AFSR1_EL12, {AArch64::HasV8_1aOps}},
- {"esr_el12", ESR_EL12, {AArch64::HasV8_1aOps}},
- {"far_el12", FAR_EL12, {AArch64::HasV8_1aOps}},
- {"mair_el12", MAIR_EL12, {AArch64::HasV8_1aOps}},
- {"amair_el12", AMAIR_EL12, {AArch64::HasV8_1aOps}},
- {"vbar_el12", VBAR_EL12, {AArch64::HasV8_1aOps}},
- {"contextidr_el12", CONTEXTIDR_EL12, {AArch64::HasV8_1aOps}},
- {"cntkctl_el12", CNTKCTL_EL12, {AArch64::HasV8_1aOps}},
- {"cntp_tval_el02", CNTP_TVAL_EL02, {AArch64::HasV8_1aOps}},
- {"cntp_ctl_el02", CNTP_CTL_EL02, {AArch64::HasV8_1aOps}},
- {"cntp_cval_el02", CNTP_CVAL_EL02, {AArch64::HasV8_1aOps}},
- {"cntv_tval_el02", CNTV_TVAL_EL02, {AArch64::HasV8_1aOps}},
- {"cntv_ctl_el02", CNTV_CTL_EL02, {AArch64::HasV8_1aOps}},
- {"cntv_cval_el02", CNTV_CVAL_EL02, {AArch64::HasV8_1aOps}},
- {"spsr_el12", SPSR_EL12, {AArch64::HasV8_1aOps}},
- {"elr_el12", ELR_EL12, {AArch64::HasV8_1aOps}},
-
- // v8.2a registers
- {"uao", UAO, {AArch64::HasV8_2aOps}},
-
- // v8.2a "Statistical Profiling extension" registers
- {"pmblimitr_el1", PMBLIMITR_EL1, {AArch64::FeatureSPE}},
- {"pmbptr_el1", PMBPTR_EL1, {AArch64::FeatureSPE}},
- {"pmbsr_el1", PMBSR_EL1, {AArch64::FeatureSPE}},
- {"pmbidr_el1", PMBIDR_EL1, {AArch64::FeatureSPE}},
- {"pmscr_el2", PMSCR_EL2, {AArch64::FeatureSPE}},
- {"pmscr_el12", PMSCR_EL12, {AArch64::FeatureSPE}},
- {"pmscr_el1", PMSCR_EL1, {AArch64::FeatureSPE}},
- {"pmsicr_el1", PMSICR_EL1, {AArch64::FeatureSPE}},
- {"pmsirr_el1", PMSIRR_EL1, {AArch64::FeatureSPE}},
- {"pmsfcr_el1", PMSFCR_EL1, {AArch64::FeatureSPE}},
- {"pmsevfr_el1", PMSEVFR_EL1, {AArch64::FeatureSPE}},
- {"pmslatfr_el1", PMSLATFR_EL1, {AArch64::FeatureSPE}},
- {"pmsidr_el1", PMSIDR_EL1, {AArch64::FeatureSPE}},
-};
-
-uint32_t
-AArch64SysReg::SysRegMapper::fromString(StringRef Name,
- const FeatureBitset& FeatureBits, bool &Valid) const {
- std::string NameLower = Name.lower();
-
- // First search the registers shared by all
- for (unsigned i = 0; i < array_lengthof(SysRegMappings); ++i) {
- if (SysRegMappings[i].isNameEqual(NameLower, FeatureBits)) {
- Valid = true;
- return SysRegMappings[i].Value;
- }
+namespace llvm {
+ namespace AArch64PSBHint {
+#define GET_PSB_IMPL
+#include "AArch64GenSystemOperands.inc"
}
+}
- // Now try the instruction-specific registers (either read-only or
- // write-only).
- for (unsigned i = 0; i < NumInstMappings; ++i) {
- if (InstMappings[i].isNameEqual(NameLower, FeatureBits)) {
- Valid = true;
- return InstMappings[i].Value;
- }
+namespace llvm {
+ namespace AArch64SysReg {
+#define GET_SYSREG_IMPL
+#include "AArch64GenSystemOperands.inc"
}
+}
+uint32_t AArch64SysReg::parseGenericRegister(StringRef Name) {
// Try to parse an S<op0>_<op1>_<Cn>_<Cm>_<op2> register name
- Regex GenericRegPattern("^s([0-3])_([0-7])_c([0-9]|1[0-5])_c([0-9]|1[0-5])_([0-7])$");
+ Regex GenericRegPattern("^S([0-3])_([0-7])_C([0-9]|1[0-5])_C([0-9]|1[0-5])_([0-7])$");
+ std::string UpperName = Name.upper();
SmallVector<StringRef, 5> Ops;
- if (!GenericRegPattern.match(NameLower, &Ops)) {
- Valid = false;
+ if (!GenericRegPattern.match(UpperName, &Ops))
return -1;
- }
uint32_t Op0 = 0, Op1 = 0, CRn = 0, CRm = 0, Op2 = 0;
uint32_t Bits;
@@ -873,28 +99,10 @@ AArch64SysReg::SysRegMapper::fromString(StringRef Name,
Ops[5].getAsInteger(10, Op2);
Bits = (Op0 << 14) | (Op1 << 11) | (CRn << 7) | (CRm << 3) | Op2;
- Valid = true;
return Bits;
}
-std::string
-AArch64SysReg::SysRegMapper::toString(uint32_t Bits,
- const FeatureBitset& FeatureBits) const {
- // First search the registers shared by all
- for (unsigned i = 0; i < array_lengthof(SysRegMappings); ++i) {
- if (SysRegMappings[i].isValueEqual(Bits, FeatureBits)) {
- return SysRegMappings[i].Name;
- }
- }
-
- // Now try the instruction-specific registers (either read-only or
- // write-only).
- for (unsigned i = 0; i < NumInstMappings; ++i) {
- if (InstMappings[i].isValueEqual(Bits, FeatureBits)) {
- return InstMappings[i].Name;
- }
- }
-
+std::string AArch64SysReg::genericRegisterString(uint32_t Bits) {
assert(Bits < 0x10000);
uint32_t Op0 = (Bits >> 14) & 0x3;
uint32_t Op1 = (Bits >> 11) & 0x7;
@@ -902,44 +110,13 @@ AArch64SysReg::SysRegMapper::toString(uint32_t Bits,
uint32_t CRm = (Bits >> 3) & 0xf;
uint32_t Op2 = Bits & 0x7;
- return "s" + utostr(Op0)+ "_" + utostr(Op1) + "_c" + utostr(CRn)
- + "_c" + utostr(CRm) + "_" + utostr(Op2);
+ return "S" + utostr(Op0) + "_" + utostr(Op1) + "_C" + utostr(CRn) + "_C" +
+ utostr(CRm) + "_" + utostr(Op2);
}
-const AArch64NamedImmMapper::Mapping AArch64TLBI::TLBIMapper::TLBIMappings[] = {
- {"ipas2e1is", IPAS2E1IS, {}},
- {"ipas2le1is", IPAS2LE1IS, {}},
- {"vmalle1is", VMALLE1IS, {}},
- {"alle2is", ALLE2IS, {}},
- {"alle3is", ALLE3IS, {}},
- {"vae1is", VAE1IS, {}},
- {"vae2is", VAE2IS, {}},
- {"vae3is", VAE3IS, {}},
- {"aside1is", ASIDE1IS, {}},
- {"vaae1is", VAAE1IS, {}},
- {"alle1is", ALLE1IS, {}},
- {"vale1is", VALE1IS, {}},
- {"vale2is", VALE2IS, {}},
- {"vale3is", VALE3IS, {}},
- {"vmalls12e1is", VMALLS12E1IS, {}},
- {"vaale1is", VAALE1IS, {}},
- {"ipas2e1", IPAS2E1, {}},
- {"ipas2le1", IPAS2LE1, {}},
- {"vmalle1", VMALLE1, {}},
- {"alle2", ALLE2, {}},
- {"alle3", ALLE3, {}},
- {"vae1", VAE1, {}},
- {"vae2", VAE2, {}},
- {"vae3", VAE3, {}},
- {"aside1", ASIDE1, {}},
- {"vaae1", VAAE1, {}},
- {"alle1", ALLE1, {}},
- {"vale1", VALE1, {}},
- {"vale2", VALE2, {}},
- {"vale3", VALE3, {}},
- {"vmalls12e1", VMALLS12E1, {}},
- {"vaale1", VAALE1, {}}
-};
-
-AArch64TLBI::TLBIMapper::TLBIMapper()
- : AArch64NamedImmMapper(TLBIMappings, 0) {}
+namespace llvm {
+ namespace AArch64TLBI {
+#define GET_TLBI_IMPL
+#include "AArch64GenSystemOperands.inc"
+ }
+}
diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/lib/Target/AArch64/Utils/AArch64BaseInfo.h
index e63627eae123..dcc39176031c 100644
--- a/lib/Target/AArch64/Utils/AArch64BaseInfo.h
+++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.h
@@ -266,231 +266,85 @@ inline static unsigned getNZCVToSatisfyCondCode(CondCode Code) {
}
} // end namespace AArch64CC
-/// Instances of this class can perform bidirectional mapping from random
-/// identifier strings to operand encodings. For example "MSR" takes a named
-/// system-register which must be encoded somehow and decoded for printing. This
-/// central location means that the information for those transformations is not
-/// duplicated and remains in sync.
-///
-/// FIXME: currently the algorithm is a completely unoptimised linear
-/// search. Obviously this could be improved, but we would probably want to work
-/// out just how often these instructions are emitted before working on it. It
-/// might even be optimal to just reorder the tables for the common instructions
-/// rather than changing the algorithm.
-struct AArch64NamedImmMapper {
- struct Mapping {
+namespace AArch64AT{
+ struct AT {
const char *Name;
- uint32_t Value;
- // Set of features this mapping is available for
- // Zero value of FeatureBitSet means the mapping is always available
- FeatureBitset FeatureBitSet;
-
- bool isNameEqual(std::string Other,
- const FeatureBitset& FeatureBits) const {
- if (FeatureBitSet.any() &&
- (FeatureBitSet & FeatureBits).none())
- return false;
- return Name == Other;
- }
-
- bool isValueEqual(uint32_t Other,
- const FeatureBitset& FeatureBits) const {
- if (FeatureBitSet.any() &&
- (FeatureBitSet & FeatureBits).none())
- return false;
- return Value == Other;
- }
- };
-
- template<int N>
- AArch64NamedImmMapper(const Mapping (&Mappings)[N], uint32_t TooBigImm)
- : Mappings(&Mappings[0]), NumMappings(N), TooBigImm(TooBigImm) {}
-
- // Maps value to string, depending on availability for FeatureBits given
- StringRef toString(uint32_t Value, const FeatureBitset& FeatureBits,
- bool &Valid) const;
- // Maps string to value, depending on availability for FeatureBits given
- uint32_t fromString(StringRef Name, const FeatureBitset& FeatureBits,
- bool &Valid) const;
-
- /// Many of the instructions allow an alternative assembly form consisting of
- /// a simple immediate. Currently the only valid forms are ranges [0, N) where
- /// N being 0 indicates no immediate syntax-form is allowed.
- bool validImm(uint32_t Value) const;
-protected:
- const Mapping *Mappings;
- size_t NumMappings;
- uint32_t TooBigImm;
-};
-
-namespace AArch64AT {
- enum ATValues {
- Invalid = -1, // Op0 Op1 CRn CRm Op2
- S1E1R = 0x43c0, // 01 000 0111 1000 000
- S1E2R = 0x63c0, // 01 100 0111 1000 000
- S1E3R = 0x73c0, // 01 110 0111 1000 000
- S1E1W = 0x43c1, // 01 000 0111 1000 001
- S1E2W = 0x63c1, // 01 100 0111 1000 001
- S1E3W = 0x73c1, // 01 110 0111 1000 001
- S1E0R = 0x43c2, // 01 000 0111 1000 010
- S1E0W = 0x43c3, // 01 000 0111 1000 011
- S12E1R = 0x63c4, // 01 100 0111 1000 100
- S12E1W = 0x63c5, // 01 100 0111 1000 101
- S12E0R = 0x63c6, // 01 100 0111 1000 110
- S12E0W = 0x63c7, // 01 100 0111 1000 111
- S1E1RP = 0x43c8, // 01 000 0111 1001 000
- S1E1WP = 0x43c9 // 01 000 0111 1001 001
+ uint16_t Encoding;
};
- struct ATMapper : AArch64NamedImmMapper {
- const static Mapping ATMappings[];
-
- ATMapper();
- };
+ #define GET_AT_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64DB {
- enum DBValues {
- Invalid = -1,
- OSHLD = 0x1,
- OSHST = 0x2,
- OSH = 0x3,
- NSHLD = 0x5,
- NSHST = 0x6,
- NSH = 0x7,
- ISHLD = 0x9,
- ISHST = 0xa,
- ISH = 0xb,
- LD = 0xd,
- ST = 0xe,
- SY = 0xf
+ struct DB {
+ const char *Name;
+ uint16_t Encoding;
};
- struct DBarrierMapper : AArch64NamedImmMapper {
- const static Mapping DBarrierMappings[];
-
- DBarrierMapper();
- };
+ #define GET_DB_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64DC {
- enum DCValues {
- Invalid = -1, // Op1 CRn CRm Op2
- ZVA = 0x5ba1, // 01 011 0111 0100 001
- IVAC = 0x43b1, // 01 000 0111 0110 001
- ISW = 0x43b2, // 01 000 0111 0110 010
- CVAC = 0x5bd1, // 01 011 0111 1010 001
- CSW = 0x43d2, // 01 000 0111 1010 010
- CVAU = 0x5bd9, // 01 011 0111 1011 001
- CIVAC = 0x5bf1, // 01 011 0111 1110 001
- CISW = 0x43f2 // 01 000 0111 1110 010
- };
-
- struct DCMapper : AArch64NamedImmMapper {
- const static Mapping DCMappings[];
-
- DCMapper();
+ struct DC {
+ const char *Name;
+ uint16_t Encoding;
};
+ #define GET_DC_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64IC {
- enum ICValues {
- Invalid = -1, // Op1 CRn CRm Op2
- IALLUIS = 0x0388, // 000 0111 0001 000
- IALLU = 0x03a8, // 000 0111 0101 000
- IVAU = 0x1ba9 // 011 0111 0101 001
- };
-
-
- struct ICMapper : AArch64NamedImmMapper {
- const static Mapping ICMappings[];
-
- ICMapper();
+ struct IC {
+ const char *Name;
+ uint16_t Encoding;
+ bool NeedsReg;
};
-
- static inline bool NeedsRegister(ICValues Val) {
- return Val == IVAU;
- }
+ #define GET_IC_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64ISB {
- enum ISBValues {
- Invalid = -1,
- SY = 0xf
- };
- struct ISBMapper : AArch64NamedImmMapper {
- const static Mapping ISBMappings[];
-
- ISBMapper();
+ struct ISB {
+ const char *Name;
+ uint16_t Encoding;
};
+ #define GET_ISB_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64PRFM {
- enum PRFMValues {
- Invalid = -1,
- PLDL1KEEP = 0x00,
- PLDL1STRM = 0x01,
- PLDL2KEEP = 0x02,
- PLDL2STRM = 0x03,
- PLDL3KEEP = 0x04,
- PLDL3STRM = 0x05,
- PLIL1KEEP = 0x08,
- PLIL1STRM = 0x09,
- PLIL2KEEP = 0x0a,
- PLIL2STRM = 0x0b,
- PLIL3KEEP = 0x0c,
- PLIL3STRM = 0x0d,
- PSTL1KEEP = 0x10,
- PSTL1STRM = 0x11,
- PSTL2KEEP = 0x12,
- PSTL2STRM = 0x13,
- PSTL3KEEP = 0x14,
- PSTL3STRM = 0x15
- };
-
- struct PRFMMapper : AArch64NamedImmMapper {
- const static Mapping PRFMMappings[];
-
- PRFMMapper();
+ struct PRFM {
+ const char *Name;
+ uint16_t Encoding;
};
+ #define GET_PRFM_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64PState {
- enum PStateValues {
- Invalid = -1,
- SPSel = 0x05,
- DAIFSet = 0x1e,
- DAIFClr = 0x1f,
-
- // v8.1a "Privileged Access Never" extension-specific PStates
- PAN = 0x04,
-
- // v8.2a "User Access Override" extension-specific PStates
- UAO = 0x03
- };
-
- struct PStateMapper : AArch64NamedImmMapper {
- const static Mapping PStateMappings[];
+ struct PState {
+ const char *Name;
+ uint16_t Encoding;
+ FeatureBitset FeaturesRequired;
- PStateMapper();
+ bool haveFeatures(FeatureBitset ActiveFeatures) const {
+ return (FeaturesRequired & ActiveFeatures) == FeaturesRequired;
+ }
};
-
+ #define GET_PSTATE_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64PSBHint {
- enum PSBHintValues {
- Invalid = -1,
- // v8.2a "Statistical Profiling" extension-specific PSB operands
- CSync = 0x11, // psb csync = hint #0x11
- };
-
- struct PSBHintMapper : AArch64NamedImmMapper {
- const static Mapping PSBHintMappings[];
-
- PSBHintMapper();
+ struct PSB {
+ const char *Name;
+ uint16_t Encoding;
};
-
+ #define GET_PSB_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64SE {
@@ -574,754 +428,36 @@ AArch64StringToVectorLayout(StringRef LayoutStr) {
}
namespace AArch64SysReg {
- enum SysRegROValues {
- MDCCSR_EL0 = 0x9808, // 10 011 0000 0001 000
- DBGDTRRX_EL0 = 0x9828, // 10 011 0000 0101 000
- MDRAR_EL1 = 0x8080, // 10 000 0001 0000 000
- OSLSR_EL1 = 0x808c, // 10 000 0001 0001 100
- DBGAUTHSTATUS_EL1 = 0x83f6, // 10 000 0111 1110 110
- PMCEID0_EL0 = 0xdce6, // 11 011 1001 1100 110
- PMCEID1_EL0 = 0xdce7, // 11 011 1001 1100 111
- MIDR_EL1 = 0xc000, // 11 000 0000 0000 000
- CCSIDR_EL1 = 0xc800, // 11 001 0000 0000 000
- CLIDR_EL1 = 0xc801, // 11 001 0000 0000 001
- CTR_EL0 = 0xd801, // 11 011 0000 0000 001
- MPIDR_EL1 = 0xc005, // 11 000 0000 0000 101
- REVIDR_EL1 = 0xc006, // 11 000 0000 0000 110
- AIDR_EL1 = 0xc807, // 11 001 0000 0000 111
- DCZID_EL0 = 0xd807, // 11 011 0000 0000 111
- ID_PFR0_EL1 = 0xc008, // 11 000 0000 0001 000
- ID_PFR1_EL1 = 0xc009, // 11 000 0000 0001 001
- ID_DFR0_EL1 = 0xc00a, // 11 000 0000 0001 010
- ID_AFR0_EL1 = 0xc00b, // 11 000 0000 0001 011
- ID_MMFR0_EL1 = 0xc00c, // 11 000 0000 0001 100
- ID_MMFR1_EL1 = 0xc00d, // 11 000 0000 0001 101
- ID_MMFR2_EL1 = 0xc00e, // 11 000 0000 0001 110
- ID_MMFR3_EL1 = 0xc00f, // 11 000 0000 0001 111
- ID_ISAR0_EL1 = 0xc010, // 11 000 0000 0010 000
- ID_ISAR1_EL1 = 0xc011, // 11 000 0000 0010 001
- ID_ISAR2_EL1 = 0xc012, // 11 000 0000 0010 010
- ID_ISAR3_EL1 = 0xc013, // 11 000 0000 0010 011
- ID_ISAR4_EL1 = 0xc014, // 11 000 0000 0010 100
- ID_ISAR5_EL1 = 0xc015, // 11 000 0000 0010 101
- ID_A64PFR0_EL1 = 0xc020, // 11 000 0000 0100 000
- ID_A64PFR1_EL1 = 0xc021, // 11 000 0000 0100 001
- ID_A64DFR0_EL1 = 0xc028, // 11 000 0000 0101 000
- ID_A64DFR1_EL1 = 0xc029, // 11 000 0000 0101 001
- ID_A64AFR0_EL1 = 0xc02c, // 11 000 0000 0101 100
- ID_A64AFR1_EL1 = 0xc02d, // 11 000 0000 0101 101
- ID_A64ISAR0_EL1 = 0xc030, // 11 000 0000 0110 000
- ID_A64ISAR1_EL1 = 0xc031, // 11 000 0000 0110 001
- ID_A64MMFR0_EL1 = 0xc038, // 11 000 0000 0111 000
- ID_A64MMFR1_EL1 = 0xc039, // 11 000 0000 0111 001
- ID_A64MMFR2_EL1 = 0xc03a, // 11 000 0000 0111 010
- MVFR0_EL1 = 0xc018, // 11 000 0000 0011 000
- MVFR1_EL1 = 0xc019, // 11 000 0000 0011 001
- MVFR2_EL1 = 0xc01a, // 11 000 0000 0011 010
- RVBAR_EL1 = 0xc601, // 11 000 1100 0000 001
- RVBAR_EL2 = 0xe601, // 11 100 1100 0000 001
- RVBAR_EL3 = 0xf601, // 11 110 1100 0000 001
- ISR_EL1 = 0xc608, // 11 000 1100 0001 000
- CNTPCT_EL0 = 0xdf01, // 11 011 1110 0000 001
- CNTVCT_EL0 = 0xdf02, // 11 011 1110 0000 010
- ID_MMFR4_EL1 = 0xc016, // 11 000 0000 0010 110
-
- // Trace registers
- TRCSTATR = 0x8818, // 10 001 0000 0011 000
- TRCIDR8 = 0x8806, // 10 001 0000 0000 110
- TRCIDR9 = 0x880e, // 10 001 0000 0001 110
- TRCIDR10 = 0x8816, // 10 001 0000 0010 110
- TRCIDR11 = 0x881e, // 10 001 0000 0011 110
- TRCIDR12 = 0x8826, // 10 001 0000 0100 110
- TRCIDR13 = 0x882e, // 10 001 0000 0101 110
- TRCIDR0 = 0x8847, // 10 001 0000 1000 111
- TRCIDR1 = 0x884f, // 10 001 0000 1001 111
- TRCIDR2 = 0x8857, // 10 001 0000 1010 111
- TRCIDR3 = 0x885f, // 10 001 0000 1011 111
- TRCIDR4 = 0x8867, // 10 001 0000 1100 111
- TRCIDR5 = 0x886f, // 10 001 0000 1101 111
- TRCIDR6 = 0x8877, // 10 001 0000 1110 111
- TRCIDR7 = 0x887f, // 10 001 0000 1111 111
- TRCOSLSR = 0x888c, // 10 001 0001 0001 100
- TRCPDSR = 0x88ac, // 10 001 0001 0101 100
- TRCDEVAFF0 = 0x8bd6, // 10 001 0111 1010 110
- TRCDEVAFF1 = 0x8bde, // 10 001 0111 1011 110
- TRCLSR = 0x8bee, // 10 001 0111 1101 110
- TRCAUTHSTATUS = 0x8bf6, // 10 001 0111 1110 110
- TRCDEVARCH = 0x8bfe, // 10 001 0111 1111 110
- TRCDEVID = 0x8b97, // 10 001 0111 0010 111
- TRCDEVTYPE = 0x8b9f, // 10 001 0111 0011 111
- TRCPIDR4 = 0x8ba7, // 10 001 0111 0100 111
- TRCPIDR5 = 0x8baf, // 10 001 0111 0101 111
- TRCPIDR6 = 0x8bb7, // 10 001 0111 0110 111
- TRCPIDR7 = 0x8bbf, // 10 001 0111 0111 111
- TRCPIDR0 = 0x8bc7, // 10 001 0111 1000 111
- TRCPIDR1 = 0x8bcf, // 10 001 0111 1001 111
- TRCPIDR2 = 0x8bd7, // 10 001 0111 1010 111
- TRCPIDR3 = 0x8bdf, // 10 001 0111 1011 111
- TRCCIDR0 = 0x8be7, // 10 001 0111 1100 111
- TRCCIDR1 = 0x8bef, // 10 001 0111 1101 111
- TRCCIDR2 = 0x8bf7, // 10 001 0111 1110 111
- TRCCIDR3 = 0x8bff, // 10 001 0111 1111 111
-
- // GICv3 registers
- ICC_IAR1_EL1 = 0xc660, // 11 000 1100 1100 000
- ICC_IAR0_EL1 = 0xc640, // 11 000 1100 1000 000
- ICC_HPPIR1_EL1 = 0xc662, // 11 000 1100 1100 010
- ICC_HPPIR0_EL1 = 0xc642, // 11 000 1100 1000 010
- ICC_RPR_EL1 = 0xc65b, // 11 000 1100 1011 011
- ICH_VTR_EL2 = 0xe659, // 11 100 1100 1011 001
- ICH_EISR_EL2 = 0xe65b, // 11 100 1100 1011 011
- ICH_ELSR_EL2 = 0xe65d // 11 100 1100 1011 101
- };
-
- enum SysRegWOValues {
- DBGDTRTX_EL0 = 0x9828, // 10 011 0000 0101 000
- OSLAR_EL1 = 0x8084, // 10 000 0001 0000 100
- PMSWINC_EL0 = 0xdce4, // 11 011 1001 1100 100
-
- // Trace Registers
- TRCOSLAR = 0x8884, // 10 001 0001 0000 100
- TRCLAR = 0x8be6, // 10 001 0111 1100 110
-
- // GICv3 registers
- ICC_EOIR1_EL1 = 0xc661, // 11 000 1100 1100 001
- ICC_EOIR0_EL1 = 0xc641, // 11 000 1100 1000 001
- ICC_DIR_EL1 = 0xc659, // 11 000 1100 1011 001
- ICC_SGI1R_EL1 = 0xc65d, // 11 000 1100 1011 101
- ICC_ASGI1R_EL1 = 0xc65e, // 11 000 1100 1011 110
- ICC_SGI0R_EL1 = 0xc65f // 11 000 1100 1011 111
- };
-
- enum SysRegValues {
- Invalid = -1, // Op0 Op1 CRn CRm Op2
- OSDTRRX_EL1 = 0x8002, // 10 000 0000 0000 010
- OSDTRTX_EL1 = 0x801a, // 10 000 0000 0011 010
- TEECR32_EL1 = 0x9000, // 10 010 0000 0000 000
- MDCCINT_EL1 = 0x8010, // 10 000 0000 0010 000
- MDSCR_EL1 = 0x8012, // 10 000 0000 0010 010
- DBGDTR_EL0 = 0x9820, // 10 011 0000 0100 000
- OSECCR_EL1 = 0x8032, // 10 000 0000 0110 010
- DBGVCR32_EL2 = 0xa038, // 10 100 0000 0111 000
- DBGBVR0_EL1 = 0x8004, // 10 000 0000 0000 100
- DBGBVR1_EL1 = 0x800c, // 10 000 0000 0001 100
- DBGBVR2_EL1 = 0x8014, // 10 000 0000 0010 100
- DBGBVR3_EL1 = 0x801c, // 10 000 0000 0011 100
- DBGBVR4_EL1 = 0x8024, // 10 000 0000 0100 100
- DBGBVR5_EL1 = 0x802c, // 10 000 0000 0101 100
- DBGBVR6_EL1 = 0x8034, // 10 000 0000 0110 100
- DBGBVR7_EL1 = 0x803c, // 10 000 0000 0111 100
- DBGBVR8_EL1 = 0x8044, // 10 000 0000 1000 100
- DBGBVR9_EL1 = 0x804c, // 10 000 0000 1001 100
- DBGBVR10_EL1 = 0x8054, // 10 000 0000 1010 100
- DBGBVR11_EL1 = 0x805c, // 10 000 0000 1011 100
- DBGBVR12_EL1 = 0x8064, // 10 000 0000 1100 100
- DBGBVR13_EL1 = 0x806c, // 10 000 0000 1101 100
- DBGBVR14_EL1 = 0x8074, // 10 000 0000 1110 100
- DBGBVR15_EL1 = 0x807c, // 10 000 0000 1111 100
- DBGBCR0_EL1 = 0x8005, // 10 000 0000 0000 101
- DBGBCR1_EL1 = 0x800d, // 10 000 0000 0001 101
- DBGBCR2_EL1 = 0x8015, // 10 000 0000 0010 101
- DBGBCR3_EL1 = 0x801d, // 10 000 0000 0011 101
- DBGBCR4_EL1 = 0x8025, // 10 000 0000 0100 101
- DBGBCR5_EL1 = 0x802d, // 10 000 0000 0101 101
- DBGBCR6_EL1 = 0x8035, // 10 000 0000 0110 101
- DBGBCR7_EL1 = 0x803d, // 10 000 0000 0111 101
- DBGBCR8_EL1 = 0x8045, // 10 000 0000 1000 101
- DBGBCR9_EL1 = 0x804d, // 10 000 0000 1001 101
- DBGBCR10_EL1 = 0x8055, // 10 000 0000 1010 101
- DBGBCR11_EL1 = 0x805d, // 10 000 0000 1011 101
- DBGBCR12_EL1 = 0x8065, // 10 000 0000 1100 101
- DBGBCR13_EL1 = 0x806d, // 10 000 0000 1101 101
- DBGBCR14_EL1 = 0x8075, // 10 000 0000 1110 101
- DBGBCR15_EL1 = 0x807d, // 10 000 0000 1111 101
- DBGWVR0_EL1 = 0x8006, // 10 000 0000 0000 110
- DBGWVR1_EL1 = 0x800e, // 10 000 0000 0001 110
- DBGWVR2_EL1 = 0x8016, // 10 000 0000 0010 110
- DBGWVR3_EL1 = 0x801e, // 10 000 0000 0011 110
- DBGWVR4_EL1 = 0x8026, // 10 000 0000 0100 110
- DBGWVR5_EL1 = 0x802e, // 10 000 0000 0101 110
- DBGWVR6_EL1 = 0x8036, // 10 000 0000 0110 110
- DBGWVR7_EL1 = 0x803e, // 10 000 0000 0111 110
- DBGWVR8_EL1 = 0x8046, // 10 000 0000 1000 110
- DBGWVR9_EL1 = 0x804e, // 10 000 0000 1001 110
- DBGWVR10_EL1 = 0x8056, // 10 000 0000 1010 110
- DBGWVR11_EL1 = 0x805e, // 10 000 0000 1011 110
- DBGWVR12_EL1 = 0x8066, // 10 000 0000 1100 110
- DBGWVR13_EL1 = 0x806e, // 10 000 0000 1101 110
- DBGWVR14_EL1 = 0x8076, // 10 000 0000 1110 110
- DBGWVR15_EL1 = 0x807e, // 10 000 0000 1111 110
- DBGWCR0_EL1 = 0x8007, // 10 000 0000 0000 111
- DBGWCR1_EL1 = 0x800f, // 10 000 0000 0001 111
- DBGWCR2_EL1 = 0x8017, // 10 000 0000 0010 111
- DBGWCR3_EL1 = 0x801f, // 10 000 0000 0011 111
- DBGWCR4_EL1 = 0x8027, // 10 000 0000 0100 111
- DBGWCR5_EL1 = 0x802f, // 10 000 0000 0101 111
- DBGWCR6_EL1 = 0x8037, // 10 000 0000 0110 111
- DBGWCR7_EL1 = 0x803f, // 10 000 0000 0111 111
- DBGWCR8_EL1 = 0x8047, // 10 000 0000 1000 111
- DBGWCR9_EL1 = 0x804f, // 10 000 0000 1001 111
- DBGWCR10_EL1 = 0x8057, // 10 000 0000 1010 111
- DBGWCR11_EL1 = 0x805f, // 10 000 0000 1011 111
- DBGWCR12_EL1 = 0x8067, // 10 000 0000 1100 111
- DBGWCR13_EL1 = 0x806f, // 10 000 0000 1101 111
- DBGWCR14_EL1 = 0x8077, // 10 000 0000 1110 111
- DBGWCR15_EL1 = 0x807f, // 10 000 0000 1111 111
- TEEHBR32_EL1 = 0x9080, // 10 010 0001 0000 000
- OSDLR_EL1 = 0x809c, // 10 000 0001 0011 100
- DBGPRCR_EL1 = 0x80a4, // 10 000 0001 0100 100
- DBGCLAIMSET_EL1 = 0x83c6, // 10 000 0111 1000 110
- DBGCLAIMCLR_EL1 = 0x83ce, // 10 000 0111 1001 110
- CSSELR_EL1 = 0xd000, // 11 010 0000 0000 000
- VPIDR_EL2 = 0xe000, // 11 100 0000 0000 000
- VMPIDR_EL2 = 0xe005, // 11 100 0000 0000 101
- CPACR_EL1 = 0xc082, // 11 000 0001 0000 010
- SCTLR_EL1 = 0xc080, // 11 000 0001 0000 000
- SCTLR_EL2 = 0xe080, // 11 100 0001 0000 000
- SCTLR_EL3 = 0xf080, // 11 110 0001 0000 000
- ACTLR_EL1 = 0xc081, // 11 000 0001 0000 001
- ACTLR_EL2 = 0xe081, // 11 100 0001 0000 001
- ACTLR_EL3 = 0xf081, // 11 110 0001 0000 001
- HCR_EL2 = 0xe088, // 11 100 0001 0001 000
- SCR_EL3 = 0xf088, // 11 110 0001 0001 000
- MDCR_EL2 = 0xe089, // 11 100 0001 0001 001
- SDER32_EL3 = 0xf089, // 11 110 0001 0001 001
- CPTR_EL2 = 0xe08a, // 11 100 0001 0001 010
- CPTR_EL3 = 0xf08a, // 11 110 0001 0001 010
- HSTR_EL2 = 0xe08b, // 11 100 0001 0001 011
- HACR_EL2 = 0xe08f, // 11 100 0001 0001 111
- MDCR_EL3 = 0xf099, // 11 110 0001 0011 001
- TTBR0_EL1 = 0xc100, // 11 000 0010 0000 000
- TTBR0_EL2 = 0xe100, // 11 100 0010 0000 000
- TTBR0_EL3 = 0xf100, // 11 110 0010 0000 000
- TTBR1_EL1 = 0xc101, // 11 000 0010 0000 001
- TCR_EL1 = 0xc102, // 11 000 0010 0000 010
- TCR_EL2 = 0xe102, // 11 100 0010 0000 010
- TCR_EL3 = 0xf102, // 11 110 0010 0000 010
- VTTBR_EL2 = 0xe108, // 11 100 0010 0001 000
- VTCR_EL2 = 0xe10a, // 11 100 0010 0001 010
- DACR32_EL2 = 0xe180, // 11 100 0011 0000 000
- SPSR_EL1 = 0xc200, // 11 000 0100 0000 000
- SPSR_EL2 = 0xe200, // 11 100 0100 0000 000
- SPSR_EL3 = 0xf200, // 11 110 0100 0000 000
- ELR_EL1 = 0xc201, // 11 000 0100 0000 001
- ELR_EL2 = 0xe201, // 11 100 0100 0000 001
- ELR_EL3 = 0xf201, // 11 110 0100 0000 001
- SP_EL0 = 0xc208, // 11 000 0100 0001 000
- SP_EL1 = 0xe208, // 11 100 0100 0001 000
- SP_EL2 = 0xf208, // 11 110 0100 0001 000
- SPSel = 0xc210, // 11 000 0100 0010 000
- NZCV = 0xda10, // 11 011 0100 0010 000
- DAIF = 0xda11, // 11 011 0100 0010 001
- CurrentEL = 0xc212, // 11 000 0100 0010 010
- SPSR_irq = 0xe218, // 11 100 0100 0011 000
- SPSR_abt = 0xe219, // 11 100 0100 0011 001
- SPSR_und = 0xe21a, // 11 100 0100 0011 010
- SPSR_fiq = 0xe21b, // 11 100 0100 0011 011
- FPCR = 0xda20, // 11 011 0100 0100 000
- FPSR = 0xda21, // 11 011 0100 0100 001
- DSPSR_EL0 = 0xda28, // 11 011 0100 0101 000
- DLR_EL0 = 0xda29, // 11 011 0100 0101 001
- IFSR32_EL2 = 0xe281, // 11 100 0101 0000 001
- AFSR0_EL1 = 0xc288, // 11 000 0101 0001 000
- AFSR0_EL2 = 0xe288, // 11 100 0101 0001 000
- AFSR0_EL3 = 0xf288, // 11 110 0101 0001 000
- AFSR1_EL1 = 0xc289, // 11 000 0101 0001 001
- AFSR1_EL2 = 0xe289, // 11 100 0101 0001 001
- AFSR1_EL3 = 0xf289, // 11 110 0101 0001 001
- ESR_EL1 = 0xc290, // 11 000 0101 0010 000
- ESR_EL2 = 0xe290, // 11 100 0101 0010 000
- ESR_EL3 = 0xf290, // 11 110 0101 0010 000
- FPEXC32_EL2 = 0xe298, // 11 100 0101 0011 000
- FAR_EL1 = 0xc300, // 11 000 0110 0000 000
- FAR_EL2 = 0xe300, // 11 100 0110 0000 000
- FAR_EL3 = 0xf300, // 11 110 0110 0000 000
- HPFAR_EL2 = 0xe304, // 11 100 0110 0000 100
- PAR_EL1 = 0xc3a0, // 11 000 0111 0100 000
- PMCR_EL0 = 0xdce0, // 11 011 1001 1100 000
- PMCNTENSET_EL0 = 0xdce1, // 11 011 1001 1100 001
- PMCNTENCLR_EL0 = 0xdce2, // 11 011 1001 1100 010
- PMOVSCLR_EL0 = 0xdce3, // 11 011 1001 1100 011
- PMSELR_EL0 = 0xdce5, // 11 011 1001 1100 101
- PMCCNTR_EL0 = 0xdce8, // 11 011 1001 1101 000
- PMXEVTYPER_EL0 = 0xdce9, // 11 011 1001 1101 001
- PMXEVCNTR_EL0 = 0xdcea, // 11 011 1001 1101 010
- PMUSERENR_EL0 = 0xdcf0, // 11 011 1001 1110 000
- PMINTENSET_EL1 = 0xc4f1, // 11 000 1001 1110 001
- PMINTENCLR_EL1 = 0xc4f2, // 11 000 1001 1110 010
- PMOVSSET_EL0 = 0xdcf3, // 11 011 1001 1110 011
- MAIR_EL1 = 0xc510, // 11 000 1010 0010 000
- MAIR_EL2 = 0xe510, // 11 100 1010 0010 000
- MAIR_EL3 = 0xf510, // 11 110 1010 0010 000
- AMAIR_EL1 = 0xc518, // 11 000 1010 0011 000
- AMAIR_EL2 = 0xe518, // 11 100 1010 0011 000
- AMAIR_EL3 = 0xf518, // 11 110 1010 0011 000
- VBAR_EL1 = 0xc600, // 11 000 1100 0000 000
- VBAR_EL2 = 0xe600, // 11 100 1100 0000 000
- VBAR_EL3 = 0xf600, // 11 110 1100 0000 000
- RMR_EL1 = 0xc602, // 11 000 1100 0000 010
- RMR_EL2 = 0xe602, // 11 100 1100 0000 010
- RMR_EL3 = 0xf602, // 11 110 1100 0000 010
- CONTEXTIDR_EL1 = 0xc681, // 11 000 1101 0000 001
- TPIDR_EL0 = 0xde82, // 11 011 1101 0000 010
- TPIDR_EL2 = 0xe682, // 11 100 1101 0000 010
- TPIDR_EL3 = 0xf682, // 11 110 1101 0000 010
- TPIDRRO_EL0 = 0xde83, // 11 011 1101 0000 011
- TPIDR_EL1 = 0xc684, // 11 000 1101 0000 100
- CNTFRQ_EL0 = 0xdf00, // 11 011 1110 0000 000
- CNTVOFF_EL2 = 0xe703, // 11 100 1110 0000 011
- CNTKCTL_EL1 = 0xc708, // 11 000 1110 0001 000
- CNTHCTL_EL2 = 0xe708, // 11 100 1110 0001 000
- CNTP_TVAL_EL0 = 0xdf10, // 11 011 1110 0010 000
- CNTHP_TVAL_EL2 = 0xe710, // 11 100 1110 0010 000
- CNTPS_TVAL_EL1 = 0xff10, // 11 111 1110 0010 000
- CNTP_CTL_EL0 = 0xdf11, // 11 011 1110 0010 001
- CNTHP_CTL_EL2 = 0xe711, // 11 100 1110 0010 001
- CNTPS_CTL_EL1 = 0xff11, // 11 111 1110 0010 001
- CNTP_CVAL_EL0 = 0xdf12, // 11 011 1110 0010 010
- CNTHP_CVAL_EL2 = 0xe712, // 11 100 1110 0010 010
- CNTPS_CVAL_EL1 = 0xff12, // 11 111 1110 0010 010
- CNTV_TVAL_EL0 = 0xdf18, // 11 011 1110 0011 000
- CNTV_CTL_EL0 = 0xdf19, // 11 011 1110 0011 001
- CNTV_CVAL_EL0 = 0xdf1a, // 11 011 1110 0011 010
- PMEVCNTR0_EL0 = 0xdf40, // 11 011 1110 1000 000
- PMEVCNTR1_EL0 = 0xdf41, // 11 011 1110 1000 001
- PMEVCNTR2_EL0 = 0xdf42, // 11 011 1110 1000 010
- PMEVCNTR3_EL0 = 0xdf43, // 11 011 1110 1000 011
- PMEVCNTR4_EL0 = 0xdf44, // 11 011 1110 1000 100
- PMEVCNTR5_EL0 = 0xdf45, // 11 011 1110 1000 101
- PMEVCNTR6_EL0 = 0xdf46, // 11 011 1110 1000 110
- PMEVCNTR7_EL0 = 0xdf47, // 11 011 1110 1000 111
- PMEVCNTR8_EL0 = 0xdf48, // 11 011 1110 1001 000
- PMEVCNTR9_EL0 = 0xdf49, // 11 011 1110 1001 001
- PMEVCNTR10_EL0 = 0xdf4a, // 11 011 1110 1001 010
- PMEVCNTR11_EL0 = 0xdf4b, // 11 011 1110 1001 011
- PMEVCNTR12_EL0 = 0xdf4c, // 11 011 1110 1001 100
- PMEVCNTR13_EL0 = 0xdf4d, // 11 011 1110 1001 101
- PMEVCNTR14_EL0 = 0xdf4e, // 11 011 1110 1001 110
- PMEVCNTR15_EL0 = 0xdf4f, // 11 011 1110 1001 111
- PMEVCNTR16_EL0 = 0xdf50, // 11 011 1110 1010 000
- PMEVCNTR17_EL0 = 0xdf51, // 11 011 1110 1010 001
- PMEVCNTR18_EL0 = 0xdf52, // 11 011 1110 1010 010
- PMEVCNTR19_EL0 = 0xdf53, // 11 011 1110 1010 011
- PMEVCNTR20_EL0 = 0xdf54, // 11 011 1110 1010 100
- PMEVCNTR21_EL0 = 0xdf55, // 11 011 1110 1010 101
- PMEVCNTR22_EL0 = 0xdf56, // 11 011 1110 1010 110
- PMEVCNTR23_EL0 = 0xdf57, // 11 011 1110 1010 111
- PMEVCNTR24_EL0 = 0xdf58, // 11 011 1110 1011 000
- PMEVCNTR25_EL0 = 0xdf59, // 11 011 1110 1011 001
- PMEVCNTR26_EL0 = 0xdf5a, // 11 011 1110 1011 010
- PMEVCNTR27_EL0 = 0xdf5b, // 11 011 1110 1011 011
- PMEVCNTR28_EL0 = 0xdf5c, // 11 011 1110 1011 100
- PMEVCNTR29_EL0 = 0xdf5d, // 11 011 1110 1011 101
- PMEVCNTR30_EL0 = 0xdf5e, // 11 011 1110 1011 110
- PMCCFILTR_EL0 = 0xdf7f, // 11 011 1110 1111 111
- PMEVTYPER0_EL0 = 0xdf60, // 11 011 1110 1100 000
- PMEVTYPER1_EL0 = 0xdf61, // 11 011 1110 1100 001
- PMEVTYPER2_EL0 = 0xdf62, // 11 011 1110 1100 010
- PMEVTYPER3_EL0 = 0xdf63, // 11 011 1110 1100 011
- PMEVTYPER4_EL0 = 0xdf64, // 11 011 1110 1100 100
- PMEVTYPER5_EL0 = 0xdf65, // 11 011 1110 1100 101
- PMEVTYPER6_EL0 = 0xdf66, // 11 011 1110 1100 110
- PMEVTYPER7_EL0 = 0xdf67, // 11 011 1110 1100 111
- PMEVTYPER8_EL0 = 0xdf68, // 11 011 1110 1101 000
- PMEVTYPER9_EL0 = 0xdf69, // 11 011 1110 1101 001
- PMEVTYPER10_EL0 = 0xdf6a, // 11 011 1110 1101 010
- PMEVTYPER11_EL0 = 0xdf6b, // 11 011 1110 1101 011
- PMEVTYPER12_EL0 = 0xdf6c, // 11 011 1110 1101 100
- PMEVTYPER13_EL0 = 0xdf6d, // 11 011 1110 1101 101
- PMEVTYPER14_EL0 = 0xdf6e, // 11 011 1110 1101 110
- PMEVTYPER15_EL0 = 0xdf6f, // 11 011 1110 1101 111
- PMEVTYPER16_EL0 = 0xdf70, // 11 011 1110 1110 000
- PMEVTYPER17_EL0 = 0xdf71, // 11 011 1110 1110 001
- PMEVTYPER18_EL0 = 0xdf72, // 11 011 1110 1110 010
- PMEVTYPER19_EL0 = 0xdf73, // 11 011 1110 1110 011
- PMEVTYPER20_EL0 = 0xdf74, // 11 011 1110 1110 100
- PMEVTYPER21_EL0 = 0xdf75, // 11 011 1110 1110 101
- PMEVTYPER22_EL0 = 0xdf76, // 11 011 1110 1110 110
- PMEVTYPER23_EL0 = 0xdf77, // 11 011 1110 1110 111
- PMEVTYPER24_EL0 = 0xdf78, // 11 011 1110 1111 000
- PMEVTYPER25_EL0 = 0xdf79, // 11 011 1110 1111 001
- PMEVTYPER26_EL0 = 0xdf7a, // 11 011 1110 1111 010
- PMEVTYPER27_EL0 = 0xdf7b, // 11 011 1110 1111 011
- PMEVTYPER28_EL0 = 0xdf7c, // 11 011 1110 1111 100
- PMEVTYPER29_EL0 = 0xdf7d, // 11 011 1110 1111 101
- PMEVTYPER30_EL0 = 0xdf7e, // 11 011 1110 1111 110
-
- // Trace registers
- TRCPRGCTLR = 0x8808, // 10 001 0000 0001 000
- TRCPROCSELR = 0x8810, // 10 001 0000 0010 000
- TRCCONFIGR = 0x8820, // 10 001 0000 0100 000
- TRCAUXCTLR = 0x8830, // 10 001 0000 0110 000
- TRCEVENTCTL0R = 0x8840, // 10 001 0000 1000 000
- TRCEVENTCTL1R = 0x8848, // 10 001 0000 1001 000
- TRCSTALLCTLR = 0x8858, // 10 001 0000 1011 000
- TRCTSCTLR = 0x8860, // 10 001 0000 1100 000
- TRCSYNCPR = 0x8868, // 10 001 0000 1101 000
- TRCCCCTLR = 0x8870, // 10 001 0000 1110 000
- TRCBBCTLR = 0x8878, // 10 001 0000 1111 000
- TRCTRACEIDR = 0x8801, // 10 001 0000 0000 001
- TRCQCTLR = 0x8809, // 10 001 0000 0001 001
- TRCVICTLR = 0x8802, // 10 001 0000 0000 010
- TRCVIIECTLR = 0x880a, // 10 001 0000 0001 010
- TRCVISSCTLR = 0x8812, // 10 001 0000 0010 010
- TRCVIPCSSCTLR = 0x881a, // 10 001 0000 0011 010
- TRCVDCTLR = 0x8842, // 10 001 0000 1000 010
- TRCVDSACCTLR = 0x884a, // 10 001 0000 1001 010
- TRCVDARCCTLR = 0x8852, // 10 001 0000 1010 010
- TRCSEQEVR0 = 0x8804, // 10 001 0000 0000 100
- TRCSEQEVR1 = 0x880c, // 10 001 0000 0001 100
- TRCSEQEVR2 = 0x8814, // 10 001 0000 0010 100
- TRCSEQRSTEVR = 0x8834, // 10 001 0000 0110 100
- TRCSEQSTR = 0x883c, // 10 001 0000 0111 100
- TRCEXTINSELR = 0x8844, // 10 001 0000 1000 100
- TRCCNTRLDVR0 = 0x8805, // 10 001 0000 0000 101
- TRCCNTRLDVR1 = 0x880d, // 10 001 0000 0001 101
- TRCCNTRLDVR2 = 0x8815, // 10 001 0000 0010 101
- TRCCNTRLDVR3 = 0x881d, // 10 001 0000 0011 101
- TRCCNTCTLR0 = 0x8825, // 10 001 0000 0100 101
- TRCCNTCTLR1 = 0x882d, // 10 001 0000 0101 101
- TRCCNTCTLR2 = 0x8835, // 10 001 0000 0110 101
- TRCCNTCTLR3 = 0x883d, // 10 001 0000 0111 101
- TRCCNTVR0 = 0x8845, // 10 001 0000 1000 101
- TRCCNTVR1 = 0x884d, // 10 001 0000 1001 101
- TRCCNTVR2 = 0x8855, // 10 001 0000 1010 101
- TRCCNTVR3 = 0x885d, // 10 001 0000 1011 101
- TRCIMSPEC0 = 0x8807, // 10 001 0000 0000 111
- TRCIMSPEC1 = 0x880f, // 10 001 0000 0001 111
- TRCIMSPEC2 = 0x8817, // 10 001 0000 0010 111
- TRCIMSPEC3 = 0x881f, // 10 001 0000 0011 111
- TRCIMSPEC4 = 0x8827, // 10 001 0000 0100 111
- TRCIMSPEC5 = 0x882f, // 10 001 0000 0101 111
- TRCIMSPEC6 = 0x8837, // 10 001 0000 0110 111
- TRCIMSPEC7 = 0x883f, // 10 001 0000 0111 111
- TRCRSCTLR2 = 0x8890, // 10 001 0001 0010 000
- TRCRSCTLR3 = 0x8898, // 10 001 0001 0011 000
- TRCRSCTLR4 = 0x88a0, // 10 001 0001 0100 000
- TRCRSCTLR5 = 0x88a8, // 10 001 0001 0101 000
- TRCRSCTLR6 = 0x88b0, // 10 001 0001 0110 000
- TRCRSCTLR7 = 0x88b8, // 10 001 0001 0111 000
- TRCRSCTLR8 = 0x88c0, // 10 001 0001 1000 000
- TRCRSCTLR9 = 0x88c8, // 10 001 0001 1001 000
- TRCRSCTLR10 = 0x88d0, // 10 001 0001 1010 000
- TRCRSCTLR11 = 0x88d8, // 10 001 0001 1011 000
- TRCRSCTLR12 = 0x88e0, // 10 001 0001 1100 000
- TRCRSCTLR13 = 0x88e8, // 10 001 0001 1101 000
- TRCRSCTLR14 = 0x88f0, // 10 001 0001 1110 000
- TRCRSCTLR15 = 0x88f8, // 10 001 0001 1111 000
- TRCRSCTLR16 = 0x8881, // 10 001 0001 0000 001
- TRCRSCTLR17 = 0x8889, // 10 001 0001 0001 001
- TRCRSCTLR18 = 0x8891, // 10 001 0001 0010 001
- TRCRSCTLR19 = 0x8899, // 10 001 0001 0011 001
- TRCRSCTLR20 = 0x88a1, // 10 001 0001 0100 001
- TRCRSCTLR21 = 0x88a9, // 10 001 0001 0101 001
- TRCRSCTLR22 = 0x88b1, // 10 001 0001 0110 001
- TRCRSCTLR23 = 0x88b9, // 10 001 0001 0111 001
- TRCRSCTLR24 = 0x88c1, // 10 001 0001 1000 001
- TRCRSCTLR25 = 0x88c9, // 10 001 0001 1001 001
- TRCRSCTLR26 = 0x88d1, // 10 001 0001 1010 001
- TRCRSCTLR27 = 0x88d9, // 10 001 0001 1011 001
- TRCRSCTLR28 = 0x88e1, // 10 001 0001 1100 001
- TRCRSCTLR29 = 0x88e9, // 10 001 0001 1101 001
- TRCRSCTLR30 = 0x88f1, // 10 001 0001 1110 001
- TRCRSCTLR31 = 0x88f9, // 10 001 0001 1111 001
- TRCSSCCR0 = 0x8882, // 10 001 0001 0000 010
- TRCSSCCR1 = 0x888a, // 10 001 0001 0001 010
- TRCSSCCR2 = 0x8892, // 10 001 0001 0010 010
- TRCSSCCR3 = 0x889a, // 10 001 0001 0011 010
- TRCSSCCR4 = 0x88a2, // 10 001 0001 0100 010
- TRCSSCCR5 = 0x88aa, // 10 001 0001 0101 010
- TRCSSCCR6 = 0x88b2, // 10 001 0001 0110 010
- TRCSSCCR7 = 0x88ba, // 10 001 0001 0111 010
- TRCSSCSR0 = 0x88c2, // 10 001 0001 1000 010
- TRCSSCSR1 = 0x88ca, // 10 001 0001 1001 010
- TRCSSCSR2 = 0x88d2, // 10 001 0001 1010 010
- TRCSSCSR3 = 0x88da, // 10 001 0001 1011 010
- TRCSSCSR4 = 0x88e2, // 10 001 0001 1100 010
- TRCSSCSR5 = 0x88ea, // 10 001 0001 1101 010
- TRCSSCSR6 = 0x88f2, // 10 001 0001 1110 010
- TRCSSCSR7 = 0x88fa, // 10 001 0001 1111 010
- TRCSSPCICR0 = 0x8883, // 10 001 0001 0000 011
- TRCSSPCICR1 = 0x888b, // 10 001 0001 0001 011
- TRCSSPCICR2 = 0x8893, // 10 001 0001 0010 011
- TRCSSPCICR3 = 0x889b, // 10 001 0001 0011 011
- TRCSSPCICR4 = 0x88a3, // 10 001 0001 0100 011
- TRCSSPCICR5 = 0x88ab, // 10 001 0001 0101 011
- TRCSSPCICR6 = 0x88b3, // 10 001 0001 0110 011
- TRCSSPCICR7 = 0x88bb, // 10 001 0001 0111 011
- TRCPDCR = 0x88a4, // 10 001 0001 0100 100
- TRCACVR0 = 0x8900, // 10 001 0010 0000 000
- TRCACVR1 = 0x8910, // 10 001 0010 0010 000
- TRCACVR2 = 0x8920, // 10 001 0010 0100 000
- TRCACVR3 = 0x8930, // 10 001 0010 0110 000
- TRCACVR4 = 0x8940, // 10 001 0010 1000 000
- TRCACVR5 = 0x8950, // 10 001 0010 1010 000
- TRCACVR6 = 0x8960, // 10 001 0010 1100 000
- TRCACVR7 = 0x8970, // 10 001 0010 1110 000
- TRCACVR8 = 0x8901, // 10 001 0010 0000 001
- TRCACVR9 = 0x8911, // 10 001 0010 0010 001
- TRCACVR10 = 0x8921, // 10 001 0010 0100 001
- TRCACVR11 = 0x8931, // 10 001 0010 0110 001
- TRCACVR12 = 0x8941, // 10 001 0010 1000 001
- TRCACVR13 = 0x8951, // 10 001 0010 1010 001
- TRCACVR14 = 0x8961, // 10 001 0010 1100 001
- TRCACVR15 = 0x8971, // 10 001 0010 1110 001
- TRCACATR0 = 0x8902, // 10 001 0010 0000 010
- TRCACATR1 = 0x8912, // 10 001 0010 0010 010
- TRCACATR2 = 0x8922, // 10 001 0010 0100 010
- TRCACATR3 = 0x8932, // 10 001 0010 0110 010
- TRCACATR4 = 0x8942, // 10 001 0010 1000 010
- TRCACATR5 = 0x8952, // 10 001 0010 1010 010
- TRCACATR6 = 0x8962, // 10 001 0010 1100 010
- TRCACATR7 = 0x8972, // 10 001 0010 1110 010
- TRCACATR8 = 0x8903, // 10 001 0010 0000 011
- TRCACATR9 = 0x8913, // 10 001 0010 0010 011
- TRCACATR10 = 0x8923, // 10 001 0010 0100 011
- TRCACATR11 = 0x8933, // 10 001 0010 0110 011
- TRCACATR12 = 0x8943, // 10 001 0010 1000 011
- TRCACATR13 = 0x8953, // 10 001 0010 1010 011
- TRCACATR14 = 0x8963, // 10 001 0010 1100 011
- TRCACATR15 = 0x8973, // 10 001 0010 1110 011
- TRCDVCVR0 = 0x8904, // 10 001 0010 0000 100
- TRCDVCVR1 = 0x8924, // 10 001 0010 0100 100
- TRCDVCVR2 = 0x8944, // 10 001 0010 1000 100
- TRCDVCVR3 = 0x8964, // 10 001 0010 1100 100
- TRCDVCVR4 = 0x8905, // 10 001 0010 0000 101
- TRCDVCVR5 = 0x8925, // 10 001 0010 0100 101
- TRCDVCVR6 = 0x8945, // 10 001 0010 1000 101
- TRCDVCVR7 = 0x8965, // 10 001 0010 1100 101
- TRCDVCMR0 = 0x8906, // 10 001 0010 0000 110
- TRCDVCMR1 = 0x8926, // 10 001 0010 0100 110
- TRCDVCMR2 = 0x8946, // 10 001 0010 1000 110
- TRCDVCMR3 = 0x8966, // 10 001 0010 1100 110
- TRCDVCMR4 = 0x8907, // 10 001 0010 0000 111
- TRCDVCMR5 = 0x8927, // 10 001 0010 0100 111
- TRCDVCMR6 = 0x8947, // 10 001 0010 1000 111
- TRCDVCMR7 = 0x8967, // 10 001 0010 1100 111
- TRCCIDCVR0 = 0x8980, // 10 001 0011 0000 000
- TRCCIDCVR1 = 0x8990, // 10 001 0011 0010 000
- TRCCIDCVR2 = 0x89a0, // 10 001 0011 0100 000
- TRCCIDCVR3 = 0x89b0, // 10 001 0011 0110 000
- TRCCIDCVR4 = 0x89c0, // 10 001 0011 1000 000
- TRCCIDCVR5 = 0x89d0, // 10 001 0011 1010 000
- TRCCIDCVR6 = 0x89e0, // 10 001 0011 1100 000
- TRCCIDCVR7 = 0x89f0, // 10 001 0011 1110 000
- TRCVMIDCVR0 = 0x8981, // 10 001 0011 0000 001
- TRCVMIDCVR1 = 0x8991, // 10 001 0011 0010 001
- TRCVMIDCVR2 = 0x89a1, // 10 001 0011 0100 001
- TRCVMIDCVR3 = 0x89b1, // 10 001 0011 0110 001
- TRCVMIDCVR4 = 0x89c1, // 10 001 0011 1000 001
- TRCVMIDCVR5 = 0x89d1, // 10 001 0011 1010 001
- TRCVMIDCVR6 = 0x89e1, // 10 001 0011 1100 001
- TRCVMIDCVR7 = 0x89f1, // 10 001 0011 1110 001
- TRCCIDCCTLR0 = 0x8982, // 10 001 0011 0000 010
- TRCCIDCCTLR1 = 0x898a, // 10 001 0011 0001 010
- TRCVMIDCCTLR0 = 0x8992, // 10 001 0011 0010 010
- TRCVMIDCCTLR1 = 0x899a, // 10 001 0011 0011 010
- TRCITCTRL = 0x8b84, // 10 001 0111 0000 100
- TRCCLAIMSET = 0x8bc6, // 10 001 0111 1000 110
- TRCCLAIMCLR = 0x8bce, // 10 001 0111 1001 110
-
- // GICv3 registers
- ICC_BPR1_EL1 = 0xc663, // 11 000 1100 1100 011
- ICC_BPR0_EL1 = 0xc643, // 11 000 1100 1000 011
- ICC_PMR_EL1 = 0xc230, // 11 000 0100 0110 000
- ICC_CTLR_EL1 = 0xc664, // 11 000 1100 1100 100
- ICC_CTLR_EL3 = 0xf664, // 11 110 1100 1100 100
- ICC_SRE_EL1 = 0xc665, // 11 000 1100 1100 101
- ICC_SRE_EL2 = 0xe64d, // 11 100 1100 1001 101
- ICC_SRE_EL3 = 0xf665, // 11 110 1100 1100 101
- ICC_IGRPEN0_EL1 = 0xc666, // 11 000 1100 1100 110
- ICC_IGRPEN1_EL1 = 0xc667, // 11 000 1100 1100 111
- ICC_IGRPEN1_EL3 = 0xf667, // 11 110 1100 1100 111
- ICC_SEIEN_EL1 = 0xc668, // 11 000 1100 1101 000
- ICC_AP0R0_EL1 = 0xc644, // 11 000 1100 1000 100
- ICC_AP0R1_EL1 = 0xc645, // 11 000 1100 1000 101
- ICC_AP0R2_EL1 = 0xc646, // 11 000 1100 1000 110
- ICC_AP0R3_EL1 = 0xc647, // 11 000 1100 1000 111
- ICC_AP1R0_EL1 = 0xc648, // 11 000 1100 1001 000
- ICC_AP1R1_EL1 = 0xc649, // 11 000 1100 1001 001
- ICC_AP1R2_EL1 = 0xc64a, // 11 000 1100 1001 010
- ICC_AP1R3_EL1 = 0xc64b, // 11 000 1100 1001 011
- ICH_AP0R0_EL2 = 0xe640, // 11 100 1100 1000 000
- ICH_AP0R1_EL2 = 0xe641, // 11 100 1100 1000 001
- ICH_AP0R2_EL2 = 0xe642, // 11 100 1100 1000 010
- ICH_AP0R3_EL2 = 0xe643, // 11 100 1100 1000 011
- ICH_AP1R0_EL2 = 0xe648, // 11 100 1100 1001 000
- ICH_AP1R1_EL2 = 0xe649, // 11 100 1100 1001 001
- ICH_AP1R2_EL2 = 0xe64a, // 11 100 1100 1001 010
- ICH_AP1R3_EL2 = 0xe64b, // 11 100 1100 1001 011
- ICH_HCR_EL2 = 0xe658, // 11 100 1100 1011 000
- ICH_MISR_EL2 = 0xe65a, // 11 100 1100 1011 010
- ICH_VMCR_EL2 = 0xe65f, // 11 100 1100 1011 111
- ICH_VSEIR_EL2 = 0xe64c, // 11 100 1100 1001 100
- ICH_LR0_EL2 = 0xe660, // 11 100 1100 1100 000
- ICH_LR1_EL2 = 0xe661, // 11 100 1100 1100 001
- ICH_LR2_EL2 = 0xe662, // 11 100 1100 1100 010
- ICH_LR3_EL2 = 0xe663, // 11 100 1100 1100 011
- ICH_LR4_EL2 = 0xe664, // 11 100 1100 1100 100
- ICH_LR5_EL2 = 0xe665, // 11 100 1100 1100 101
- ICH_LR6_EL2 = 0xe666, // 11 100 1100 1100 110
- ICH_LR7_EL2 = 0xe667, // 11 100 1100 1100 111
- ICH_LR8_EL2 = 0xe668, // 11 100 1100 1101 000
- ICH_LR9_EL2 = 0xe669, // 11 100 1100 1101 001
- ICH_LR10_EL2 = 0xe66a, // 11 100 1100 1101 010
- ICH_LR11_EL2 = 0xe66b, // 11 100 1100 1101 011
- ICH_LR12_EL2 = 0xe66c, // 11 100 1100 1101 100
- ICH_LR13_EL2 = 0xe66d, // 11 100 1100 1101 101
- ICH_LR14_EL2 = 0xe66e, // 11 100 1100 1101 110
- ICH_LR15_EL2 = 0xe66f, // 11 100 1100 1101 111
-
- // v8.1a "Privileged Access Never" extension-specific system registers
- PAN = 0xc213, // 11 000 0100 0010 011
-
- // v8.1a "Limited Ordering Regions" extension-specific system registers
- LORSA_EL1 = 0xc520, // 11 000 1010 0100 000
- LOREA_EL1 = 0xc521, // 11 000 1010 0100 001
- LORN_EL1 = 0xc522, // 11 000 1010 0100 010
- LORC_EL1 = 0xc523, // 11 000 1010 0100 011
- LORID_EL1 = 0xc527, // 11 000 1010 0100 111
-
- // v8.1a "Virtualization host extensions" system registers
- TTBR1_EL2 = 0xe101, // 11 100 0010 0000 001
- CONTEXTIDR_EL2 = 0xe681, // 11 100 1101 0000 001
- CNTHV_TVAL_EL2 = 0xe718, // 11 100 1110 0011 000
- CNTHV_CVAL_EL2 = 0xe71a, // 11 100 1110 0011 010
- CNTHV_CTL_EL2 = 0xe719, // 11 100 1110 0011 001
- SCTLR_EL12 = 0xe880, // 11 101 0001 0000 000
- CPACR_EL12 = 0xe882, // 11 101 0001 0000 010
- TTBR0_EL12 = 0xe900, // 11 101 0010 0000 000
- TTBR1_EL12 = 0xe901, // 11 101 0010 0000 001
- TCR_EL12 = 0xe902, // 11 101 0010 0000 010
- AFSR0_EL12 = 0xea88, // 11 101 0101 0001 000
- AFSR1_EL12 = 0xea89, // 11 101 0101 0001 001
- ESR_EL12 = 0xea90, // 11 101 0101 0010 000
- FAR_EL12 = 0xeb00, // 11 101 0110 0000 000
- MAIR_EL12 = 0xed10, // 11 101 1010 0010 000
- AMAIR_EL12 = 0xed18, // 11 101 1010 0011 000
- VBAR_EL12 = 0xee00, // 11 101 1100 0000 000
- CONTEXTIDR_EL12 = 0xee81, // 11 101 1101 0000 001
- CNTKCTL_EL12 = 0xef08, // 11 101 1110 0001 000
- CNTP_TVAL_EL02 = 0xef10, // 11 101 1110 0010 000
- CNTP_CTL_EL02 = 0xef11, // 11 101 1110 0010 001
- CNTP_CVAL_EL02 = 0xef12, // 11 101 1110 0010 010
- CNTV_TVAL_EL02 = 0xef18, // 11 101 1110 0011 000
- CNTV_CTL_EL02 = 0xef19, // 11 101 1110 0011 001
- CNTV_CVAL_EL02 = 0xef1a, // 11 101 1110 0011 010
- SPSR_EL12 = 0xea00, // 11 101 0100 0000 000
- ELR_EL12 = 0xea01, // 11 101 0100 0000 001
-
- // v8.2a registers
- UAO = 0xc214, // 11 000 0100 0010 100
-
- // v8.2a "Statistical Profiling extension" registers
- PMBLIMITR_EL1 = 0xc4d0, // 11 000 1001 1010 000
- PMBPTR_EL1 = 0xc4d1, // 11 000 1001 1010 001
- PMBSR_EL1 = 0xc4d3, // 11 000 1001 1010 011
- PMBIDR_EL1 = 0xc4d7, // 11 000 1001 1010 111
- PMSCR_EL2 = 0xe4c8, // 11 100 1001 1001 000
- PMSCR_EL12 = 0xecc8, // 11 101 1001 1001 000
- PMSCR_EL1 = 0xc4c8, // 11 000 1001 1001 000
- PMSICR_EL1 = 0xc4ca, // 11 000 1001 1001 010
- PMSIRR_EL1 = 0xc4cb, // 11 000 1001 1001 011
- PMSFCR_EL1 = 0xc4cc, // 11 000 1001 1001 100
- PMSEVFR_EL1 = 0xc4cd, // 11 000 1001 1001 101
- PMSLATFR_EL1 = 0xc4ce, // 11 000 1001 1001 110
- PMSIDR_EL1 = 0xc4cf, // 11 000 1001 1001 111
+ struct SysReg {
+ const char *Name;
+ unsigned Encoding;
+ bool Readable;
+ bool Writeable;
+ FeatureBitset FeaturesRequired;
- // Cyclone specific system registers
- CPM_IOACC_CTL_EL3 = 0xff90,
+ bool haveFeatures(FeatureBitset ActiveFeatures) const {
+ return (FeaturesRequired & ActiveFeatures) == FeaturesRequired;
+ }
};
- // Note that these do not inherit from AArch64NamedImmMapper. This class is
- // sufficiently different in its behaviour that I don't believe it's worth
- // burdening the common AArch64NamedImmMapper with abstractions only needed in
- // this one case.
- struct SysRegMapper {
- static const AArch64NamedImmMapper::Mapping SysRegMappings[];
+ #define GET_SYSREG_DECL
+ #include "AArch64GenSystemOperands.inc"
- const AArch64NamedImmMapper::Mapping *InstMappings;
- size_t NumInstMappings;
+ const SysReg *lookupSysRegByName(StringRef);
+ const SysReg *lookupSysRegByEncoding(uint16_t);
- SysRegMapper() { }
- uint32_t fromString(StringRef Name, const FeatureBitset& FeatureBits,
- bool &Valid) const;
- std::string toString(uint32_t Bits, const FeatureBitset& FeatureBits) const;
- };
-
- struct MSRMapper : SysRegMapper {
- static const AArch64NamedImmMapper::Mapping MSRMappings[];
- MSRMapper();
- };
-
- struct MRSMapper : SysRegMapper {
- static const AArch64NamedImmMapper::Mapping MRSMappings[];
- MRSMapper();
- };
-
- uint32_t ParseGenericRegister(StringRef Name, bool &Valid);
+ uint32_t parseGenericRegister(StringRef Name);
+ std::string genericRegisterString(uint32_t Bits);
}
namespace AArch64TLBI {
- enum TLBIValues {
- Invalid = -1, // Op0 Op1 CRn CRm Op2
- IPAS2E1IS = 0x6401, // 01 100 1000 0000 001
- IPAS2LE1IS = 0x6405, // 01 100 1000 0000 101
- VMALLE1IS = 0x4418, // 01 000 1000 0011 000
- ALLE2IS = 0x6418, // 01 100 1000 0011 000
- ALLE3IS = 0x7418, // 01 110 1000 0011 000
- VAE1IS = 0x4419, // 01 000 1000 0011 001
- VAE2IS = 0x6419, // 01 100 1000 0011 001
- VAE3IS = 0x7419, // 01 110 1000 0011 001
- ASIDE1IS = 0x441a, // 01 000 1000 0011 010
- VAAE1IS = 0x441b, // 01 000 1000 0011 011
- ALLE1IS = 0x641c, // 01 100 1000 0011 100
- VALE1IS = 0x441d, // 01 000 1000 0011 101
- VALE2IS = 0x641d, // 01 100 1000 0011 101
- VALE3IS = 0x741d, // 01 110 1000 0011 101
- VMALLS12E1IS = 0x641e, // 01 100 1000 0011 110
- VAALE1IS = 0x441f, // 01 000 1000 0011 111
- IPAS2E1 = 0x6421, // 01 100 1000 0100 001
- IPAS2LE1 = 0x6425, // 01 100 1000 0100 101
- VMALLE1 = 0x4438, // 01 000 1000 0111 000
- ALLE2 = 0x6438, // 01 100 1000 0111 000
- ALLE3 = 0x7438, // 01 110 1000 0111 000
- VAE1 = 0x4439, // 01 000 1000 0111 001
- VAE2 = 0x6439, // 01 100 1000 0111 001
- VAE3 = 0x7439, // 01 110 1000 0111 001
- ASIDE1 = 0x443a, // 01 000 1000 0111 010
- VAAE1 = 0x443b, // 01 000 1000 0111 011
- ALLE1 = 0x643c, // 01 100 1000 0111 100
- VALE1 = 0x443d, // 01 000 1000 0111 101
- VALE2 = 0x643d, // 01 100 1000 0111 101
- VALE3 = 0x743d, // 01 110 1000 0111 101
- VMALLS12E1 = 0x643e, // 01 100 1000 0111 110
- VAALE1 = 0x443f // 01 000 1000 0111 111
- };
-
- struct TLBIMapper : AArch64NamedImmMapper {
- const static Mapping TLBIMappings[];
-
- TLBIMapper();
+ struct TLBI {
+ const char *Name;
+ uint16_t Encoding;
+ bool NeedsReg;
};
-
- static inline bool NeedsRegister(TLBIValues Val) {
- switch (Val) {
- case VMALLE1IS:
- case ALLE2IS:
- case ALLE3IS:
- case ALLE1IS:
- case VMALLS12E1IS:
- case VMALLE1:
- case ALLE2:
- case ALLE3:
- case ALLE1:
- case VMALLS12E1:
- return false;
- default:
- return true;
- }
- }
+ #define GET_TLBI_DECL
+ #include "AArch64GenSystemOperands.inc"
}
namespace AArch64II {
@@ -1379,12 +515,7 @@ namespace AArch64II {
/// thread-local symbol. On Darwin, only one type of thread-local access
/// exists (pre linker-relaxation), but on ELF the TLSModel used for the
/// referee will affect interpretation.
- MO_TLS = 0x40,
-
- /// MO_CONSTPOOL - This flag indicates that a symbol operand represents
- /// the address of a constant pool entry for the symbol, rather than the
- /// address of the symbol itself.
- MO_CONSTPOOL = 0x80
+ MO_TLS = 0x40
};
} // end namespace AArch64II
diff --git a/lib/Target/AArch64/Utils/Makefile b/lib/Target/AArch64/Utils/Makefile
deleted file mode 100644
index 0b80f82f2b99..000000000000
--- a/lib/Target/AArch64/Utils/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/AArch64/Utils/Makefile -------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAArch64Utils
-
-# Hack: we need to include 'main' AArch64 target directory to grab private
-# headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/AMDGPU.h b/lib/Target/AMDGPU/AMDGPU.h
index 4f718e1ca310..7e59710a427a 100644
--- a/lib/Target/AMDGPU/AMDGPU.h
+++ b/lib/Target/AMDGPU/AMDGPU.h
@@ -8,8 +8,8 @@
/// \file
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPU_H
-#define LLVM_LIB_TARGET_R600_AMDGPU_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPU_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPU_H
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
@@ -29,7 +29,6 @@ class TargetMachine;
// R600 Passes
FunctionPass *createR600VectorRegMerger(TargetMachine &tm);
-FunctionPass *createR600TextureIntrinsicsReplacer();
FunctionPass *createR600ExpandSpecialInstrsPass(TargetMachine &tm);
FunctionPass *createR600EmitClauseMarkers();
FunctionPass *createR600ClauseMergePass(TargetMachine &tm);
@@ -44,12 +43,14 @@ FunctionPass *createSIFoldOperandsPass();
FunctionPass *createSILowerI1CopiesPass();
FunctionPass *createSIShrinkInstructionsPass();
FunctionPass *createSILoadStoreOptimizerPass(TargetMachine &tm);
-FunctionPass *createSILowerControlFlowPass(TargetMachine &tm);
+FunctionPass *createSIWholeQuadModePass();
+FunctionPass *createSILowerControlFlowPass();
FunctionPass *createSIFixControlFlowLiveIntervalsPass();
FunctionPass *createSIFixSGPRCopiesPass();
-FunctionPass *createSIFixSGPRLiveRangesPass();
FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
-FunctionPass *createSIInsertWaits(TargetMachine &tm);
+FunctionPass *createSIDebuggerInsertNopsPass();
+FunctionPass *createSIInsertWaitsPass();
+FunctionPass *createAMDGPUCodeGenPreparePass(const TargetMachine *TM = nullptr);
ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C);
@@ -60,6 +61,9 @@ extern char &AMDGPUAnnotateKernelFeaturesID;
void initializeSIFoldOperandsPass(PassRegistry &);
extern char &SIFoldOperandsID;
+void initializeSIShrinkInstructionsPass(PassRegistry&);
+extern char &SIShrinkInstructionsID;
+
void initializeSIFixSGPRCopiesPass(PassRegistry &);
extern char &SIFixSGPRCopiesID;
@@ -69,8 +73,19 @@ extern char &SILowerI1CopiesID;
void initializeSILoadStoreOptimizerPass(PassRegistry &);
extern char &SILoadStoreOptimizerID;
+void initializeSIWholeQuadModePass(PassRegistry &);
+extern char &SIWholeQuadModeID;
+
+void initializeSILowerControlFlowPass(PassRegistry &);
+extern char &SILowerControlFlowPassID;
+
+
// Passes common to R600 and SI
-FunctionPass *createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST);
+FunctionPass *createAMDGPUPromoteAlloca(const TargetMachine *TM = nullptr);
+void initializeAMDGPUPromoteAllocaPass(PassRegistry&);
+extern char &AMDGPUPromoteAllocaID;
+
+FunctionPass *createAMDGPUAddDivergenceMetadata(const AMDGPUSubtarget &ST);
Pass *createAMDGPUStructurizeCFGPass();
FunctionPass *createAMDGPUISelDag(TargetMachine &tm);
ModulePass *createAMDGPUAlwaysInlinePass();
@@ -80,12 +95,21 @@ FunctionPass *createAMDGPUAnnotateUniformValues();
void initializeSIFixControlFlowLiveIntervalsPass(PassRegistry&);
extern char &SIFixControlFlowLiveIntervalsID;
-void initializeSIFixSGPRLiveRangesPass(PassRegistry&);
-extern char &SIFixSGPRLiveRangesID;
-
void initializeAMDGPUAnnotateUniformValuesPass(PassRegistry&);
extern char &AMDGPUAnnotateUniformValuesPassID;
+void initializeAMDGPUCodeGenPreparePass(PassRegistry&);
+extern char &AMDGPUCodeGenPrepareID;
+
+void initializeSIAnnotateControlFlowPass(PassRegistry&);
+extern char &SIAnnotateControlFlowPassID;
+
+void initializeSIDebuggerInsertNopsPass(PassRegistry&);
+extern char &SIDebuggerInsertNopsID;
+
+void initializeSIInsertWaitsPass(PassRegistry&);
+extern char &SIInsertWaitsID;
+
extern Target TheAMDGPUTarget;
extern Target TheGCNTarget;
@@ -101,15 +125,6 @@ enum TargetIndex {
} // End namespace llvm
-namespace ShaderType {
- enum Type {
- PIXEL = 0,
- VERTEX = 1,
- GEOMETRY = 2,
- COMPUTE = 3
- };
-}
-
/// OpenCL uses address spaces to differentiate between
/// various memory regions on the hardware. On the CPU
/// all of the address spaces point to the same memory,
@@ -120,7 +135,7 @@ namespace AMDGPUAS {
enum AddressSpaces : unsigned {
PRIVATE_ADDRESS = 0, ///< Address space for private memory.
GLOBAL_ADDRESS = 1, ///< Address space for global memory (RAT0, VTX0).
- CONSTANT_ADDRESS = 2, ///< Address space for constant memory
+ CONSTANT_ADDRESS = 2, ///< Address space for constant memory (VTX2)
LOCAL_ADDRESS = 3, ///< Address space for local memory.
FLAT_ADDRESS = 4, ///< Address space for flat memory.
REGION_ADDRESS = 5, ///< Address space for region memory.
@@ -148,8 +163,6 @@ enum AddressSpaces : unsigned {
CONSTANT_BUFFER_13 = 21,
CONSTANT_BUFFER_14 = 22,
CONSTANT_BUFFER_15 = 23,
- ADDRESS_NONE = 24, ///< Address space for unknown memory.
- LAST_ADDRESS = ADDRESS_NONE,
// Some places use this if the address space can't be determined.
UNKNOWN_ADDRESS_SPACE = ~0u
diff --git a/lib/Target/AMDGPU/AMDGPU.td b/lib/Target/AMDGPU/AMDGPU.td
index 844d89c737bf..72c455354411 100644
--- a/lib/Target/AMDGPU/AMDGPU.td
+++ b/lib/Target/AMDGPU/AMDGPU.td
@@ -1,182 +1,121 @@
-//===-- AMDGPU.td - AMDGPU Tablegen files ------------------*- tablegen -*-===//
+//===-- AMDGPU.td - AMDGPU Tablegen files --------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
-//===----------------------------------------------------------------------===//
+//===------------------------------------------------------------===//
include "llvm/Target/Target.td"
-//===----------------------------------------------------------------------===//
-// Subtarget Features
-//===----------------------------------------------------------------------===//
-
-// Debugging Features
-
-def FeatureDumpCode : SubtargetFeature <"DumpCode",
- "DumpCode",
- "true",
- "Dump MachineInstrs in the CodeEmitter">;
-
-def FeatureDumpCodeLower : SubtargetFeature <"dumpcode",
- "DumpCode",
- "true",
- "Dump MachineInstrs in the CodeEmitter">;
-
-def FeatureIRStructurizer : SubtargetFeature <"disable-irstructurizer",
- "EnableIRStructurizer",
- "false",
- "Disable IR Structurizer">;
-
-def FeaturePromoteAlloca : SubtargetFeature <"promote-alloca",
- "EnablePromoteAlloca",
- "true",
- "Enable promote alloca pass">;
-
-// Target features
-
-def FeatureIfCvt : SubtargetFeature <"disable-ifcvt",
- "EnableIfCvt",
- "false",
- "Disable the if conversion pass">;
+//===------------------------------------------------------------===//
+// Subtarget Features (device properties)
+//===------------------------------------------------------------===//
def FeatureFP64 : SubtargetFeature<"fp64",
- "FP64",
- "true",
- "Enable double precision operations">;
-
-def FeatureFP64Denormals : SubtargetFeature<"fp64-denormals",
- "FP64Denormals",
- "true",
- "Enable double precision denormal handling",
- [FeatureFP64]>;
+ "FP64",
+ "true",
+ "Enable double precision operations"
+>;
def FeatureFastFMAF32 : SubtargetFeature<"fast-fmaf",
- "FastFMAF32",
- "true",
- "Assuming f32 fma is at least as fast as mul + add",
- []>;
-
-// Some instructions do not support denormals despite this flag. Using
-// fp32 denormals also causes instructions to run at the double
-// precision rate for the device.
-def FeatureFP32Denormals : SubtargetFeature<"fp32-denormals",
- "FP32Denormals",
- "true",
- "Enable single precision denormal handling">;
+ "FastFMAF32",
+ "true",
+ "Assuming f32 fma is at least as fast as mul + add"
+>;
-def Feature64BitPtr : SubtargetFeature<"64BitPtr",
- "Is64bit",
- "true",
- "Specify if 64-bit addressing should be used">;
+def HalfRate64Ops : SubtargetFeature<"half-rate-64-ops",
+ "HalfRate64Ops",
+ "true",
+ "Most fp64 instructions are half rate instead of quarter"
+>;
def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst",
- "R600ALUInst",
- "false",
- "Older version of ALU instructions encoding">;
+ "R600ALUInst",
+ "false",
+ "Older version of ALU instructions encoding"
+>;
def FeatureVertexCache : SubtargetFeature<"HasVertexCache",
- "HasVertexCache",
- "true",
- "Specify use of dedicated vertex cache">;
+ "HasVertexCache",
+ "true",
+ "Specify use of dedicated vertex cache"
+>;
def FeatureCaymanISA : SubtargetFeature<"caymanISA",
- "CaymanISA",
- "true",
- "Use Cayman ISA">;
+ "CaymanISA",
+ "true",
+ "Use Cayman ISA"
+>;
def FeatureCFALUBug : SubtargetFeature<"cfalubug",
- "CFALUBug",
- "true",
- "GPU has CF_ALU bug">;
-
-// XXX - This should probably be removed once enabled by default
-def FeatureEnableLoadStoreOpt : SubtargetFeature <"load-store-opt",
- "EnableLoadStoreOpt",
- "true",
- "Enable SI load/store optimizer pass">;
-
-// Performance debugging feature. Allow using DS instruction immediate
-// offsets even if the base pointer can't be proven to be base. On SI,
-// base pointer values that won't give the same result as a 16-bit add
-// are not safe to fold, but this will override the conservative test
-// for the base pointer.
-def FeatureEnableUnsafeDSOffsetFolding : SubtargetFeature <"unsafe-ds-offset-folding",
- "EnableUnsafeDSOffsetFolding",
- "true",
- "Force using DS instruction immediate offsets on SI">;
-
-def FeatureFlatForGlobal : SubtargetFeature<"flat-for-global",
- "FlatForGlobal",
- "true",
- "Force to generate flat instruction for global">;
+ "CFALUBug",
+ "true",
+ "GPU has CF_ALU bug"
+>;
def FeatureFlatAddressSpace : SubtargetFeature<"flat-address-space",
- "FlatAddressSpace",
- "true",
- "Support flat address space">;
+ "FlatAddressSpace",
+ "true",
+ "Support flat address space"
+>;
-def FeatureXNACK : SubtargetFeature<"xnack",
- "EnableXNACK",
- "true",
- "Enable XNACK support">;
+def FeatureUnalignedBufferAccess : SubtargetFeature<"unaligned-buffer-access",
+ "UnalignedBufferAccess",
+ "true",
+ "Support unaligned global loads and stores"
+>;
-def FeatureVGPRSpilling : SubtargetFeature<"vgpr-spilling",
- "EnableVGPRSpilling",
- "true",
- "Enable spilling of VGPRs to scratch memory">;
+def FeatureXNACK : SubtargetFeature<"xnack",
+ "EnableXNACK",
+ "true",
+ "Enable XNACK support"
+>;
def FeatureSGPRInitBug : SubtargetFeature<"sgpr-init-bug",
- "SGPRInitBug",
- "true",
- "VI SGPR initilization bug requiring a fixed SGPR allocation size">;
-
-def FeatureEnableHugeScratchBuffer : SubtargetFeature<"huge-scratch-buffer",
- "EnableHugeScratchBuffer",
- "true",
- "Enable scratch buffer sizes greater than 128 GB">;
-
-def FeatureEnableSIScheduler : SubtargetFeature<"si-scheduler",
- "EnableSIScheduler",
- "true",
- "Enable SI Machine Scheduler">;
+ "SGPRInitBug",
+ "true",
+ "VI SGPR initilization bug requiring a fixed SGPR allocation size"
+>;
class SubtargetFeatureFetchLimit <string Value> :
SubtargetFeature <"fetch"#Value,
- "TexVTXClauseSize",
- Value,
- "Limit the maximum number of fetches in a clause to "#Value>;
+ "TexVTXClauseSize",
+ Value,
+ "Limit the maximum number of fetches in a clause to "#Value
+>;
def FeatureFetchLimit8 : SubtargetFeatureFetchLimit <"8">;
def FeatureFetchLimit16 : SubtargetFeatureFetchLimit <"16">;
class SubtargetFeatureWavefrontSize <int Value> : SubtargetFeature<
- "wavefrontsize"#Value,
- "WavefrontSize",
- !cast<string>(Value),
- "The number of threads per wavefront">;
+ "wavefrontsize"#Value,
+ "WavefrontSize",
+ !cast<string>(Value),
+ "The number of threads per wavefront"
+>;
def FeatureWavefrontSize16 : SubtargetFeatureWavefrontSize<16>;
def FeatureWavefrontSize32 : SubtargetFeatureWavefrontSize<32>;
def FeatureWavefrontSize64 : SubtargetFeatureWavefrontSize<64>;
class SubtargetFeatureLDSBankCount <int Value> : SubtargetFeature <
- "ldsbankcount"#Value,
- "LDSBankCount",
- !cast<string>(Value),
- "The number of LDS banks per compute unit.">;
+ "ldsbankcount"#Value,
+ "LDSBankCount",
+ !cast<string>(Value),
+ "The number of LDS banks per compute unit."
+>;
def FeatureLDSBankCount16 : SubtargetFeatureLDSBankCount<16>;
def FeatureLDSBankCount32 : SubtargetFeatureLDSBankCount<32>;
class SubtargetFeatureISAVersion <int Major, int Minor, int Stepping>
: SubtargetFeature <
- "isaver"#Major#"."#Minor#"."#Stepping,
- "IsaVersion",
- "ISAVersion"#Major#"_"#Minor#"_"#Stepping,
- "Instruction set version number"
+ "isaver"#Major#"."#Minor#"."#Stepping,
+ "IsaVersion",
+ "ISAVersion"#Major#"_"#Minor#"_"#Stepping,
+ "Instruction set version number"
>;
def FeatureISAVersion7_0_0 : SubtargetFeatureISAVersion <7,0,0>;
@@ -186,36 +125,145 @@ def FeatureISAVersion8_0_1 : SubtargetFeatureISAVersion <8,0,1>;
def FeatureISAVersion8_0_3 : SubtargetFeatureISAVersion <8,0,3>;
class SubtargetFeatureLocalMemorySize <int Value> : SubtargetFeature<
- "localmemorysize"#Value,
- "LocalMemorySize",
- !cast<string>(Value),
- "The size of local memory in bytes">;
+ "localmemorysize"#Value,
+ "LocalMemorySize",
+ !cast<string>(Value),
+ "The size of local memory in bytes"
+>;
def FeatureGCN : SubtargetFeature<"gcn",
- "IsGCN",
- "true",
- "GCN or newer GPU">;
+ "IsGCN",
+ "true",
+ "GCN or newer GPU"
+>;
def FeatureGCN1Encoding : SubtargetFeature<"gcn1-encoding",
- "GCN1Encoding",
- "true",
- "Encoding format for SI and CI">;
+ "GCN1Encoding",
+ "true",
+ "Encoding format for SI and CI"
+>;
def FeatureGCN3Encoding : SubtargetFeature<"gcn3-encoding",
- "GCN3Encoding",
- "true",
- "Encoding format for VI">;
+ "GCN3Encoding",
+ "true",
+ "Encoding format for VI"
+>;
def FeatureCIInsts : SubtargetFeature<"ci-insts",
- "CIInsts",
- "true",
- "Additional intstructions for CI+">;
+ "CIInsts",
+ "true",
+ "Additional intstructions for CI+"
+>;
+
+def FeatureSMemRealTime : SubtargetFeature<"s-memrealtime",
+ "HasSMemRealTime",
+ "true",
+ "Has s_memrealtime instruction"
+>;
+
+def Feature16BitInsts : SubtargetFeature<"16-bit-insts",
+ "Has16BitInsts",
+ "true",
+ "Has i16/f16 instructions"
+>;
+
+//===------------------------------------------------------------===//
+// Subtarget Features (options and debugging)
+//===------------------------------------------------------------===//
+
+// Some instructions do not support denormals despite this flag. Using
+// fp32 denormals also causes instructions to run at the double
+// precision rate for the device.
+def FeatureFP32Denormals : SubtargetFeature<"fp32-denormals",
+ "FP32Denormals",
+ "true",
+ "Enable single precision denormal handling"
+>;
+
+def FeatureFP64Denormals : SubtargetFeature<"fp64-denormals",
+ "FP64Denormals",
+ "true",
+ "Enable double precision denormal handling",
+ [FeatureFP64]
+>;
+
+def FeatureFPExceptions : SubtargetFeature<"fp-exceptions",
+ "FPExceptions",
+ "true",
+ "Enable floating point exceptions"
+>;
+
+class FeatureMaxPrivateElementSize<int size> : SubtargetFeature<
+ "max-private-element-size-"#size,
+ "MaxPrivateElementSize",
+ !cast<string>(size),
+ "Maximum private access size may be "#size
+>;
+
+def FeatureMaxPrivateElementSize4 : FeatureMaxPrivateElementSize<4>;
+def FeatureMaxPrivateElementSize8 : FeatureMaxPrivateElementSize<8>;
+def FeatureMaxPrivateElementSize16 : FeatureMaxPrivateElementSize<16>;
+
+def FeatureVGPRSpilling : SubtargetFeature<"vgpr-spilling",
+ "EnableVGPRSpilling",
+ "true",
+ "Enable spilling of VGPRs to scratch memory"
+>;
+
+def FeatureDumpCode : SubtargetFeature <"DumpCode",
+ "DumpCode",
+ "true",
+ "Dump MachineInstrs in the CodeEmitter"
+>;
+
+def FeatureDumpCodeLower : SubtargetFeature <"dumpcode",
+ "DumpCode",
+ "true",
+ "Dump MachineInstrs in the CodeEmitter"
+>;
+
+def FeaturePromoteAlloca : SubtargetFeature <"promote-alloca",
+ "EnablePromoteAlloca",
+ "true",
+ "Enable promote alloca pass"
+>;
+
+// XXX - This should probably be removed once enabled by default
+def FeatureEnableLoadStoreOpt : SubtargetFeature <"load-store-opt",
+ "EnableLoadStoreOpt",
+ "true",
+ "Enable SI load/store optimizer pass"
+>;
+
+// Performance debugging feature. Allow using DS instruction immediate
+// offsets even if the base pointer can't be proven to be base. On SI,
+// base pointer values that won't give the same result as a 16-bit add
+// are not safe to fold, but this will override the conservative test
+// for the base pointer.
+def FeatureEnableUnsafeDSOffsetFolding : SubtargetFeature <
+ "unsafe-ds-offset-folding",
+ "EnableUnsafeDSOffsetFolding",
+ "true",
+ "Force using DS instruction immediate offsets on SI"
+>;
+
+def FeatureEnableSIScheduler : SubtargetFeature<"si-scheduler",
+ "EnableSIScheduler",
+ "true",
+ "Enable SI Machine Scheduler"
+>;
+
+def FeatureFlatForGlobal : SubtargetFeature<"flat-for-global",
+ "FlatForGlobal",
+ "true",
+ "Force to generate flat instruction for global"
+>;
// Dummy feature used to disable assembler instructions.
def FeatureDisable : SubtargetFeature<"",
- "FeatureDisable","true",
- "Dummy feature to disable assembler"
- " instructions">;
+ "FeatureDisable","true",
+ "Dummy feature to disable assembler instructions"
+>;
class SubtargetFeatureGeneration <string Value,
list<SubtargetFeature> Implies> :
@@ -227,33 +275,66 @@ def FeatureLocalMemorySize32768 : SubtargetFeatureLocalMemorySize<32768>;
def FeatureLocalMemorySize65536 : SubtargetFeatureLocalMemorySize<65536>;
def FeatureR600 : SubtargetFeatureGeneration<"R600",
- [FeatureR600ALUInst, FeatureFetchLimit8, FeatureLocalMemorySize0]>;
+ [FeatureR600ALUInst, FeatureFetchLimit8, FeatureLocalMemorySize0]
+>;
def FeatureR700 : SubtargetFeatureGeneration<"R700",
- [FeatureFetchLimit16, FeatureLocalMemorySize0]>;
+ [FeatureFetchLimit16, FeatureLocalMemorySize0]
+>;
def FeatureEvergreen : SubtargetFeatureGeneration<"EVERGREEN",
- [FeatureFetchLimit16, FeatureLocalMemorySize32768]>;
+ [FeatureFetchLimit16, FeatureLocalMemorySize32768]
+>;
def FeatureNorthernIslands : SubtargetFeatureGeneration<"NORTHERN_ISLANDS",
- [FeatureFetchLimit16, FeatureWavefrontSize64,
- FeatureLocalMemorySize32768]
+ [FeatureFetchLimit16, FeatureWavefrontSize64,
+ FeatureLocalMemorySize32768]
>;
def FeatureSouthernIslands : SubtargetFeatureGeneration<"SOUTHERN_ISLANDS",
- [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize32768,
- FeatureWavefrontSize64, FeatureGCN, FeatureGCN1Encoding,
- FeatureLDSBankCount32]>;
+ [FeatureFP64, FeatureLocalMemorySize32768,
+ FeatureWavefrontSize64, FeatureGCN, FeatureGCN1Encoding,
+ FeatureLDSBankCount32]
+>;
def FeatureSeaIslands : SubtargetFeatureGeneration<"SEA_ISLANDS",
- [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize65536,
- FeatureWavefrontSize64, FeatureGCN, FeatureFlatAddressSpace,
- FeatureGCN1Encoding, FeatureCIInsts]>;
+ [FeatureFP64, FeatureLocalMemorySize65536,
+ FeatureWavefrontSize64, FeatureGCN, FeatureFlatAddressSpace,
+ FeatureGCN1Encoding, FeatureCIInsts]
+>;
def FeatureVolcanicIslands : SubtargetFeatureGeneration<"VOLCANIC_ISLANDS",
- [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize65536,
- FeatureWavefrontSize64, FeatureFlatAddressSpace, FeatureGCN,
- FeatureGCN3Encoding, FeatureCIInsts]>;
+ [FeatureFP64, FeatureLocalMemorySize65536,
+ FeatureWavefrontSize64, FeatureFlatAddressSpace, FeatureGCN,
+ FeatureGCN3Encoding, FeatureCIInsts, Feature16BitInsts,
+ FeatureSMemRealTime
+ ]
+>;
+
+//===----------------------------------------------------------------------===//
+// Debugger related subtarget features.
+//===----------------------------------------------------------------------===//
+
+def FeatureDebuggerInsertNops : SubtargetFeature<
+ "amdgpu-debugger-insert-nops",
+ "DebuggerInsertNops",
+ "true",
+ "Insert one nop instruction for each high level source statement"
+>;
+
+def FeatureDebuggerReserveRegs : SubtargetFeature<
+ "amdgpu-debugger-reserve-regs",
+ "DebuggerReserveRegs",
+ "true",
+ "Reserve registers for debugger usage"
+>;
+
+def FeatureDebuggerEmitPrologue : SubtargetFeature<
+ "amdgpu-debugger-emit-prologue",
+ "DebuggerEmitPrologue",
+ "true",
+ "Emit debugger prologue"
+>;
//===----------------------------------------------------------------------===//
@@ -283,6 +364,7 @@ def NullALU : InstrItinClass;
//===----------------------------------------------------------------------===//
def TruePredicate : Predicate<"true">;
+
def isSICI : Predicate<
"Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS ||"
"Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS"
@@ -292,6 +374,13 @@ def isVI : Predicate <
"Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS">,
AssemblerPredicate<"FeatureGCN3Encoding">;
+def isCIVI : Predicate <
+ "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS || "
+ "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS"
+>, AssemblerPredicate<"FeatureCIInsts">;
+
+def HasFlatAddressSpace : Predicate<"Subtarget->hasFlatAddressSpace()">;
+
class PredicateControl {
Predicate SubtargetPredicate;
Predicate SIAssemblerPredicate = isSICI;
diff --git a/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp b/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp
index ad267d350850..63f5fb3cdf00 100644
--- a/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp
@@ -45,9 +45,8 @@ bool AMDGPUAlwaysInline::runOnModule(Module &M) {
for (Function *F : FuncsToClone) {
ValueToValueMapTy VMap;
- Function *NewFunc = CloneFunction(F, VMap, false);
+ Function *NewFunc = CloneFunction(F, VMap);
NewFunc->setLinkage(GlobalValue::InternalLinkage);
- M.getFunctionList().push_back(NewFunc);
F->replaceAllUsesWith(NewFunc);
}
diff --git a/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp b/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
index 378183927242..0910b2877b09 100644
--- a/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "AMDGPU.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
@@ -24,6 +25,8 @@ namespace {
class AMDGPUAnnotateKernelFeatures : public ModulePass {
private:
+ static bool hasAddrSpaceCast(const Function &F);
+
void addAttrToCallers(Function *Intrin, StringRef AttrName);
bool addAttrsForIntrinsics(Module &M, ArrayRef<StringRef[2]>);
@@ -40,6 +43,11 @@ public:
AU.setPreservesAll();
ModulePass::getAnalysisUsage(AU);
}
+
+ static bool visitConstantExpr(const ConstantExpr *CE);
+ static bool visitConstantExprsRecursively(
+ const Constant *EntryC,
+ SmallPtrSet<const Constant *, 8> &ConstantExprVisited);
};
}
@@ -48,12 +56,87 @@ char AMDGPUAnnotateKernelFeatures::ID = 0;
char &llvm::AMDGPUAnnotateKernelFeaturesID = AMDGPUAnnotateKernelFeatures::ID;
+INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
+ "Add AMDGPU function attributes", false, false)
+
+
+// The queue ptr is only needed when casting to flat, not from it.
+static bool castRequiresQueuePtr(unsigned SrcAS) {
+ return SrcAS == AMDGPUAS::LOCAL_ADDRESS || SrcAS == AMDGPUAS::PRIVATE_ADDRESS;
+}
+
+static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC) {
+ return castRequiresQueuePtr(ASC->getSrcAddressSpace());
+}
+
+bool AMDGPUAnnotateKernelFeatures::visitConstantExpr(const ConstantExpr *CE) {
+ if (CE->getOpcode() == Instruction::AddrSpaceCast) {
+ unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
+ return castRequiresQueuePtr(SrcAS);
+ }
+
+ return false;
+}
+
+bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively(
+ const Constant *EntryC,
+ SmallPtrSet<const Constant *, 8> &ConstantExprVisited) {
-INITIALIZE_PASS_BEGIN(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
- "Add AMDGPU function attributes", false, false)
-INITIALIZE_PASS_END(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
- "Add AMDGPU function attributes", false, false)
+ if (!ConstantExprVisited.insert(EntryC).second)
+ return false;
+ SmallVector<const Constant *, 16> Stack;
+ Stack.push_back(EntryC);
+
+ while (!Stack.empty()) {
+ const Constant *C = Stack.pop_back_val();
+
+ // Check this constant expression.
+ if (const auto *CE = dyn_cast<ConstantExpr>(C)) {
+ if (visitConstantExpr(CE))
+ return true;
+ }
+
+ // Visit all sub-expressions.
+ for (const Use &U : C->operands()) {
+ const auto *OpC = dyn_cast<Constant>(U);
+ if (!OpC)
+ continue;
+
+ if (!ConstantExprVisited.insert(OpC).second)
+ continue;
+
+ Stack.push_back(OpC);
+ }
+ }
+
+ return false;
+}
+
+// Return true if an addrspacecast is used that requires the queue ptr.
+bool AMDGPUAnnotateKernelFeatures::hasAddrSpaceCast(const Function &F) {
+ SmallPtrSet<const Constant *, 8> ConstantExprVisited;
+
+ for (const BasicBlock &BB : F) {
+ for (const Instruction &I : BB) {
+ if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
+ if (castRequiresQueuePtr(ASC))
+ return true;
+ }
+
+ for (const Use &U : I.operands()) {
+ const auto *OpC = dyn_cast<Constant>(U);
+ if (!OpC)
+ continue;
+
+ if (visitConstantExprsRecursively(OpC, ConstantExprVisited))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
void AMDGPUAnnotateKernelFeatures::addAttrToCallers(Function *Intrin,
StringRef AttrName) {
@@ -89,35 +172,46 @@ bool AMDGPUAnnotateKernelFeatures::runOnModule(Module &M) {
static const StringRef IntrinsicToAttr[][2] = {
// .x omitted
+ { "llvm.amdgcn.workitem.id.y", "amdgpu-work-item-id-y" },
+ { "llvm.amdgcn.workitem.id.z", "amdgpu-work-item-id-z" },
+
+ { "llvm.amdgcn.workgroup.id.y", "amdgpu-work-group-id-y" },
+ { "llvm.amdgcn.workgroup.id.z", "amdgpu-work-group-id-z" },
+
{ "llvm.r600.read.tgid.y", "amdgpu-work-group-id-y" },
{ "llvm.r600.read.tgid.z", "amdgpu-work-group-id-z" },
// .x omitted
{ "llvm.r600.read.tidig.y", "amdgpu-work-item-id-y" },
{ "llvm.r600.read.tidig.z", "amdgpu-work-item-id-z" }
-
};
static const StringRef HSAIntrinsicToAttr[][2] = {
- { "llvm.r600.read.local.size.x", "amdgpu-dispatch-ptr" },
- { "llvm.r600.read.local.size.y", "amdgpu-dispatch-ptr" },
- { "llvm.r600.read.local.size.z", "amdgpu-dispatch-ptr" },
-
- { "llvm.r600.read.global.size.x", "amdgpu-dispatch-ptr" },
- { "llvm.r600.read.global.size.y", "amdgpu-dispatch-ptr" },
- { "llvm.r600.read.global.size.z", "amdgpu-dispatch-ptr" },
- { "llvm.amdgcn.dispatch.ptr", "amdgpu-dispatch-ptr" }
+ { "llvm.amdgcn.dispatch.ptr", "amdgpu-dispatch-ptr" },
+ { "llvm.amdgcn.queue.ptr", "amdgpu-queue-ptr" }
};
+ // TODO: We should not add the attributes if the known compile time workgroup
+ // size is 1 for y/z.
+
// TODO: Intrinsics that require queue ptr.
// We do not need to note the x workitem or workgroup id because they are
// always initialized.
bool Changed = addAttrsForIntrinsics(M, IntrinsicToAttr);
- if (TT.getOS() == Triple::AMDHSA)
+ if (TT.getOS() == Triple::AMDHSA) {
Changed |= addAttrsForIntrinsics(M, HSAIntrinsicToAttr);
+ for (Function &F : M) {
+ if (F.hasFnAttribute("amdgpu-queue-ptr"))
+ continue;
+
+ if (hasAddrSpaceCast(F))
+ F.addFnAttr("amdgpu-queue-ptr");
+ }
+ }
+
return Changed;
}
diff --git a/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp b/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
index dfddc345f286..2010cc952265 100644
--- a/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
@@ -43,6 +43,7 @@ public:
AU.setPreservesAll();
}
+ void visitBranchInst(BranchInst &I);
void visitLoadInst(LoadInst &I);
};
@@ -57,13 +58,28 @@ INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
char AMDGPUAnnotateUniformValues::ID = 0;
+static void setUniformMetadata(Instruction *I) {
+ I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {}));
+}
+
+void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
+ if (I.isUnconditional())
+ return;
+
+ Value *Cond = I.getCondition();
+ if (!DA->isUniform(Cond))
+ return;
+
+ setUniformMetadata(I.getParent()->getTerminator());
+}
+
void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
Value *Ptr = I.getPointerOperand();
if (!DA->isUniform(Ptr))
return;
if (Instruction *PtrI = dyn_cast<Instruction>(Ptr))
- PtrI->setMetadata("amdgpu.uniform", MDNode::get(I.getContext(), {}));
+ setUniformMetadata(PtrI);
}
@@ -72,6 +88,9 @@ bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) {
}
bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
DA = &getAnalysis<DivergenceAnalysis>();
visit(F);
diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 1239dfb235ef..cfe6346fb6b1 100644
--- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -28,8 +28,10 @@
#include "R600RegisterInfo.h"
#include "SIDefines.h"
#include "SIMachineFunctionInfo.h"
+#include "SIInstrInfo.h"
#include "SIRegisterInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCStreamer.h"
@@ -37,7 +39,9 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "AMDGPURuntimeMetadata.h"
+using namespace ::AMDGPU;
using namespace llvm;
// TODO: This should get the default rounding mode from the kernel. We just set
@@ -61,7 +65,7 @@ using namespace llvm;
// instructions to run at the double precision rate for the device so it's
// probably best to just report no single precision denormals.
static uint32_t getFPMode(const MachineFunction &F) {
- const AMDGPUSubtarget& ST = F.getSubtarget<AMDGPUSubtarget>();
+ const SISubtarget& ST = F.getSubtarget<SISubtarget>();
// TODO: Is there any real use for the flush in only / flush out only modes?
uint32_t FP32Denormals =
@@ -104,10 +108,12 @@ void AMDGPUAsmPrinter::EmitStartOfAsmFile(Module &M) {
AMDGPUTargetStreamer *TS =
static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
- TS->EmitDirectiveHSACodeObjectVersion(1, 0);
+ TS->EmitDirectiveHSACodeObjectVersion(2, 1);
+
AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(STI->getFeatureBits());
TS->EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor, ISA.Stepping,
"AMD", "AMDGPU");
+ emitStartOfRuntimeMetadata(M);
}
void AMDGPUAsmPrinter::EmitFunctionBodyStart() {
@@ -132,54 +138,13 @@ void AMDGPUAsmPrinter::EmitFunctionEntryLabel() {
AsmPrinter::EmitFunctionEntryLabel();
}
-static bool isModuleLinkage(const GlobalValue *GV) {
- switch (GV->getLinkage()) {
- case GlobalValue::InternalLinkage:
- case GlobalValue::CommonLinkage:
- return true;
- case GlobalValue::ExternalLinkage:
- return false;
- default: llvm_unreachable("unknown linkage type");
- }
-}
-
void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
- if (TM.getTargetTriple().getOS() != Triple::AMDHSA) {
- AsmPrinter::EmitGlobalVariable(GV);
- return;
- }
-
- if (GV->isDeclaration() || GV->getLinkage() == GlobalValue::PrivateLinkage) {
- AsmPrinter::EmitGlobalVariable(GV);
- return;
- }
-
// Group segment variables aren't emitted in HSA.
if (AMDGPU::isGroupSegment(GV))
return;
- AMDGPUTargetStreamer *TS =
- static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
- if (isModuleLinkage(GV)) {
- TS->EmitAMDGPUHsaModuleScopeGlobal(GV->getName());
- } else {
- TS->EmitAMDGPUHsaProgramScopeGlobal(GV->getName());
- }
-
- MCSymbolELF *GVSym = cast<MCSymbolELF>(getSymbol(GV));
- const DataLayout &DL = getDataLayout();
-
- // Emit the size
- uint64_t Size = DL.getTypeAllocSize(GV->getType()->getElementType());
- OutStreamer->emitELFSize(GVSym, MCConstantExpr::create(Size, OutContext));
- OutStreamer->PushSection();
- OutStreamer->SwitchSection(
- getObjFileLowering().SectionForGlobal(GV, *Mang, TM));
- const Constant *C = GV->getInitializer();
- OutStreamer->EmitLabel(GVSym);
- EmitGlobalConstant(DL, C);
- OutStreamer->PopSection();
+ AsmPrinter::EmitGlobalVariable(GV);
}
bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
@@ -230,6 +195,20 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
false);
OutStreamer->emitRawComment(" ScratchSize: " + Twine(KernelInfo.ScratchSize),
false);
+ OutStreamer->emitRawComment(" LDSByteSize: " + Twine(KernelInfo.LDSSize) +
+ " bytes/workgroup (compile time only)", false);
+
+ OutStreamer->emitRawComment(" ReservedVGPRFirst: " + Twine(KernelInfo.ReservedVGPRFirst),
+ false);
+ OutStreamer->emitRawComment(" ReservedVGPRCount: " + Twine(KernelInfo.ReservedVGPRCount),
+ false);
+
+ if (MF.getSubtarget<SISubtarget>().debuggerEmitPrologue()) {
+ OutStreamer->emitRawComment(" DebuggerWavefrontPrivateSegmentOffsetSGPR: s" +
+ Twine(KernelInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR), false);
+ OutStreamer->emitRawComment(" DebuggerPrivateSegmentBufferSGPR: s" +
+ Twine(KernelInfo.DebuggerPrivateSegmentBufferSGPR), false);
+ }
OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:USER_SGPR: " +
Twine(G_00B84C_USER_SGPR(KernelInfo.ComputePGMRSrc2)),
@@ -268,15 +247,16 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
}
}
+ emitRuntimeMetadata(*MF.getFunction());
+
return false;
}
void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
unsigned MaxGPR = 0;
bool killPixel = false;
- const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
- const R600RegisterInfo *RI =
- static_cast<const R600RegisterInfo *>(STM.getRegisterInfo());
+ const R600Subtarget &STM = MF.getSubtarget<R600Subtarget>();
+ const R600RegisterInfo *RI = STM.getRegisterInfo();
const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
for (const MachineBasicBlock &MBB : MF) {
@@ -299,23 +279,23 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
}
unsigned RsrcReg;
- if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) {
+ if (STM.getGeneration() >= R600Subtarget::EVERGREEN) {
// Evergreen / Northern Islands
- switch (MFI->getShaderType()) {
+ switch (MF.getFunction()->getCallingConv()) {
default: // Fall through
- case ShaderType::COMPUTE: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
- case ShaderType::GEOMETRY: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
- case ShaderType::PIXEL: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break;
- case ShaderType::VERTEX: RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break;
+ case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
+ case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
+ case CallingConv::AMDGPU_PS: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break;
+ case CallingConv::AMDGPU_VS: RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break;
}
} else {
// R600 / R700
- switch (MFI->getShaderType()) {
+ switch (MF.getFunction()->getCallingConv()) {
default: // Fall through
- case ShaderType::GEOMETRY: // Fall through
- case ShaderType::COMPUTE: // Fall through
- case ShaderType::VERTEX: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break;
- case ShaderType::PIXEL: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break;
+ case CallingConv::AMDGPU_GS: // Fall through
+ case CallingConv::AMDGPU_CS: // Fall through
+ case CallingConv::AMDGPU_VS: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break;
+ case CallingConv::AMDGPU_PS: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break;
}
}
@@ -325,23 +305,23 @@ void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
- if (MFI->getShaderType() == ShaderType::COMPUTE) {
+ if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
- OutStreamer->EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
+ OutStreamer->EmitIntValue(alignTo(MFI->LDSSize, 4) >> 2, 4);
}
}
void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
const MachineFunction &MF) const {
- const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
+ const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
uint64_t CodeSize = 0;
unsigned MaxSGPR = 0;
unsigned MaxVGPR = 0;
bool VCCUsed = false;
bool FlatUsed = false;
- const SIRegisterInfo *RI =
- static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
+ const SIRegisterInfo *RI = STM.getRegisterInfo();
+ const SIInstrInfo *TII = STM.getInstrInfo();
for (const MachineBasicBlock &MBB : MF) {
for (const MachineInstr &MI : MBB) {
@@ -351,8 +331,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
if (MI.isDebugValue())
continue;
- // FIXME: This is reporting 0 for many instructions.
- CodeSize += MI.getDesc().Size;
+ CodeSize += TII->getInstSizeInBytes(MI);
unsigned numOperands = MI.getNumOperands();
for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
@@ -366,6 +345,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
unsigned reg = MO.getReg();
switch (reg) {
case AMDGPU::EXEC:
+ case AMDGPU::EXEC_LO:
+ case AMDGPU::EXEC_HI:
case AMDGPU::SCC:
case AMDGPU::M0:
continue;
@@ -382,17 +363,32 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
FlatUsed = true;
continue;
+ case AMDGPU::TBA:
+ case AMDGPU::TBA_LO:
+ case AMDGPU::TBA_HI:
+ case AMDGPU::TMA:
+ case AMDGPU::TMA_LO:
+ case AMDGPU::TMA_HI:
+ llvm_unreachable("Trap Handler registers should not be used");
+ continue;
+
default:
break;
}
if (AMDGPU::SReg_32RegClass.contains(reg)) {
+ if (AMDGPU::TTMP_32RegClass.contains(reg)) {
+ llvm_unreachable("Trap Handler registers should not be used");
+ }
isSGPR = true;
width = 1;
} else if (AMDGPU::VGPR_32RegClass.contains(reg)) {
isSGPR = false;
width = 1;
} else if (AMDGPU::SReg_64RegClass.contains(reg)) {
+ if (AMDGPU::TTMP_64RegClass.contains(reg)) {
+ llvm_unreachable("Trap Handler registers should not be used");
+ }
isSGPR = true;
width = 2;
} else if (AMDGPU::VReg_64RegClass.contains(reg)) {
@@ -438,7 +434,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
if (VCCUsed)
ExtraSGPRs = 2;
- if (STM.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (STM.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) {
if (FlatUsed)
ExtraSGPRs = 4;
} else {
@@ -451,23 +447,54 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
MaxSGPR += ExtraSGPRs;
+ // Record first reserved register and reserved register count fields, and
+ // update max register counts if "amdgpu-debugger-reserve-regs" attribute was
+ // specified.
+ if (STM.debuggerReserveRegs()) {
+ ProgInfo.ReservedVGPRFirst = MaxVGPR + 1;
+ ProgInfo.ReservedVGPRCount = MFI->getDebuggerReservedVGPRCount();
+ MaxVGPR += MFI->getDebuggerReservedVGPRCount();
+ }
+
+ // Update DebuggerWavefrontPrivateSegmentOffsetSGPR and
+ // DebuggerPrivateSegmentBufferSGPR fields if "amdgpu-debugger-emit-prologue"
+ // attribute was specified.
+ if (STM.debuggerEmitPrologue()) {
+ ProgInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR =
+ RI->getHWRegIndex(MFI->getScratchWaveOffsetReg());
+ ProgInfo.DebuggerPrivateSegmentBufferSGPR =
+ RI->getHWRegIndex(MFI->getScratchRSrcReg());
+ }
+
// We found the maximum register index. They start at 0, so add one to get the
// number of registers.
ProgInfo.NumVGPR = MaxVGPR + 1;
ProgInfo.NumSGPR = MaxSGPR + 1;
if (STM.hasSGPRInitBug()) {
- if (ProgInfo.NumSGPR > AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG) {
+ if (ProgInfo.NumSGPR > SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG) {
LLVMContext &Ctx = MF.getFunction()->getContext();
- Ctx.emitError("too many SGPRs used with the SGPR init bug");
+ DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
+ "SGPRs with SGPR init bug",
+ ProgInfo.NumSGPR, DS_Error);
+ Ctx.diagnose(Diag);
}
- ProgInfo.NumSGPR = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
+ ProgInfo.NumSGPR = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
}
if (MFI->NumUserSGPRs > STM.getMaxNumUserSGPRs()) {
LLVMContext &Ctx = MF.getFunction()->getContext();
- Ctx.emitError("too many user SGPRs used");
+ DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs",
+ MFI->NumUserSGPRs, DS_Error);
+ Ctx.diagnose(Diag);
+ }
+
+ if (MFI->LDSSize > static_cast<unsigned>(STM.getLocalMemorySize())) {
+ LLVMContext &Ctx = MF.getFunction()->getContext();
+ DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "local memory",
+ MFI->LDSSize, DS_Error);
+ Ctx.diagnose(Diag);
}
ProgInfo.VGPRBlocks = (ProgInfo.NumVGPR - 1) / 4;
@@ -476,21 +503,20 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
// register.
ProgInfo.FloatMode = getFPMode(MF);
- // XXX: Not quite sure what this does, but sc seems to unset this.
ProgInfo.IEEEMode = 0;
- // Do not clamp NAN to 0.
- ProgInfo.DX10Clamp = 0;
+ // Make clamp modifier on NaN input returns 0.
+ ProgInfo.DX10Clamp = 1;
const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
- ProgInfo.ScratchSize = FrameInfo->estimateStackSize(MF);
+ ProgInfo.ScratchSize = FrameInfo->getStackSize();
ProgInfo.FlatUsed = FlatUsed;
ProgInfo.VCCUsed = VCCUsed;
ProgInfo.CodeLen = CodeSize;
unsigned LDSAlignShift;
- if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
+ if (STM.getGeneration() < SISubtarget::SEA_ISLANDS) {
// LDS is allocated in 64 dword blocks.
LDSAlignShift = 8;
} else {
@@ -503,7 +529,7 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
ProgInfo.LDSBlocks =
- RoundUpToAlignment(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
+ alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift;
// Scratch is allocated in 256 dword blocks.
unsigned ScratchAlignShift = 10;
@@ -511,8 +537,9 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
// is used by the entire wave. ProgInfo.ScratchSize is the amount of
// scratch memory used per thread.
ProgInfo.ScratchBlocks =
- RoundUpToAlignment(ProgInfo.ScratchSize * STM.getWavefrontSize(),
- 1 << ScratchAlignShift) >> ScratchAlignShift;
+ alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
+ 1ULL << ScratchAlignShift) >>
+ ScratchAlignShift;
ProgInfo.ComputePGMRSrc1 =
S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
@@ -544,23 +571,23 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
S_00B84C_EXCP_EN(0);
}
-static unsigned getRsrcReg(unsigned ShaderType) {
- switch (ShaderType) {
+static unsigned getRsrcReg(CallingConv::ID CallConv) {
+ switch (CallConv) {
default: // Fall through
- case ShaderType::COMPUTE: return R_00B848_COMPUTE_PGM_RSRC1;
- case ShaderType::GEOMETRY: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
- case ShaderType::PIXEL: return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
- case ShaderType::VERTEX: return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
+ case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1;
+ case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
+ case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
+ case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
}
}
void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
const SIProgramInfo &KernelInfo) {
- const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
+ const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
- unsigned RsrcReg = getRsrcReg(MFI->getShaderType());
+ unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv());
- if (MFI->getShaderType() == ShaderType::COMPUTE) {
+ if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc1, 4);
@@ -577,13 +604,13 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
OutStreamer->EmitIntValue(RsrcReg, 4);
OutStreamer->EmitIntValue(S_00B028_VGPRS(KernelInfo.VGPRBlocks) |
S_00B028_SGPRS(KernelInfo.SGPRBlocks), 4);
- if (STM.isVGPRSpillingEnabled(MFI)) {
+ if (STM.isVGPRSpillingEnabled(*MF.getFunction())) {
OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(KernelInfo.ScratchBlocks), 4);
}
}
- if (MFI->getShaderType() == ShaderType::PIXEL) {
+ if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
OutStreamer->EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
OutStreamer->EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(KernelInfo.LDSBlocks), 4);
OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
@@ -591,12 +618,31 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4);
OutStreamer->EmitIntValue(MFI->getPSInputAddr(), 4);
}
+
+ OutStreamer->EmitIntValue(R_SPILLED_SGPRS, 4);
+ OutStreamer->EmitIntValue(MFI->getNumSpilledSGPRs(), 4);
+ OutStreamer->EmitIntValue(R_SPILLED_VGPRS, 4);
+ OutStreamer->EmitIntValue(MFI->getNumSpilledVGPRs(), 4);
+}
+
+// This is supposed to be log2(Size)
+static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) {
+ switch (Size) {
+ case 4:
+ return AMD_ELEMENT_4_BYTES;
+ case 8:
+ return AMD_ELEMENT_8_BYTES;
+ case 16:
+ return AMD_ELEMENT_16_BYTES;
+ default:
+ llvm_unreachable("invalid private_element_size");
+ }
}
void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
const SIProgramInfo &KernelInfo) const {
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
- const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
+ const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
amd_kernel_code_t header;
AMDGPU::initDefaultAMDKernelCodeT(header, STM.getFeatureBits());
@@ -606,6 +652,11 @@ void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
(KernelInfo.ComputePGMRSrc2 << 32);
header.code_properties = AMD_CODE_PROPERTY_IS_PTR64;
+
+ AMD_HSA_BITS_SET(header.code_properties,
+ AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE,
+ getElementByteSizeValue(STM.getMaxPrivateElementSize()));
+
if (MFI->hasPrivateSegmentBuffer()) {
header.code_properties |=
AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
@@ -646,6 +697,9 @@ void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
if (MFI->hasDispatchPtr())
header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
+ if (STM.debuggerSupported())
+ header.code_properties |= AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED;
+
if (STM.isXNACKEnabled())
header.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED;
@@ -654,9 +708,20 @@ void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
header.workitem_vgpr_count = KernelInfo.NumVGPR;
header.workitem_private_segment_byte_size = KernelInfo.ScratchSize;
header.workgroup_group_segment_byte_size = KernelInfo.LDSSize;
+ header.reserved_vgpr_first = KernelInfo.ReservedVGPRFirst;
+ header.reserved_vgpr_count = KernelInfo.ReservedVGPRCount;
+
+ if (STM.debuggerEmitPrologue()) {
+ header.debug_wavefront_private_segment_offset_sgpr =
+ KernelInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR;
+ header.debug_private_segment_buffer_sgpr =
+ KernelInfo.DebuggerPrivateSegmentBufferSGPR;
+ }
AMDGPUTargetStreamer *TS =
static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
+
+ OutStreamer->SwitchSection(getObjFileLowering().getTextSection());
TS->EmitAMDKernelCodeT(header);
}
@@ -680,3 +745,227 @@ bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
*TM.getSubtargetImpl(*MF->getFunction())->getRegisterInfo());
return false;
}
+
+// Emit a key and an integer value for runtime metadata.
+static void emitRuntimeMDIntValue(std::unique_ptr<MCStreamer> &Streamer,
+ RuntimeMD::Key K, uint64_t V,
+ unsigned Size) {
+ Streamer->EmitIntValue(K, 1);
+ Streamer->EmitIntValue(V, Size);
+}
+
+// Emit a key and a string value for runtime metadata.
+static void emitRuntimeMDStringValue(std::unique_ptr<MCStreamer> &Streamer,
+ RuntimeMD::Key K, StringRef S) {
+ Streamer->EmitIntValue(K, 1);
+ Streamer->EmitIntValue(S.size(), 4);
+ Streamer->EmitBytes(S);
+}
+
+// Emit a key and three integer values for runtime metadata.
+// The three integer values are obtained from MDNode \p Node;
+static void emitRuntimeMDThreeIntValues(std::unique_ptr<MCStreamer> &Streamer,
+ RuntimeMD::Key K, MDNode *Node,
+ unsigned Size) {
+ Streamer->EmitIntValue(K, 1);
+ Streamer->EmitIntValue(mdconst::extract<ConstantInt>(
+ Node->getOperand(0))->getZExtValue(), Size);
+ Streamer->EmitIntValue(mdconst::extract<ConstantInt>(
+ Node->getOperand(1))->getZExtValue(), Size);
+ Streamer->EmitIntValue(mdconst::extract<ConstantInt>(
+ Node->getOperand(2))->getZExtValue(), Size);
+}
+
+void AMDGPUAsmPrinter::emitStartOfRuntimeMetadata(const Module &M) {
+ OutStreamer->SwitchSection(getObjFileLowering().getContext()
+ .getELFSection(RuntimeMD::SectionName, ELF::SHT_PROGBITS, 0));
+
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyMDVersion,
+ RuntimeMD::MDVersion << 8 | RuntimeMD::MDRevision, 2);
+ if (auto MD = M.getNamedMetadata("opencl.ocl.version")) {
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyLanguage,
+ RuntimeMD::OpenCL_C, 1);
+ auto Node = MD->getOperand(0);
+ unsigned short Major = mdconst::extract<ConstantInt>(Node->getOperand(0))
+ ->getZExtValue();
+ unsigned short Minor = mdconst::extract<ConstantInt>(Node->getOperand(1))
+ ->getZExtValue();
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyLanguageVersion,
+ Major * 100 + Minor * 10, 2);
+ }
+}
+
+static std::string getOCLTypeName(Type *Ty, bool isSigned) {
+ if (VectorType* VecTy = dyn_cast<VectorType>(Ty)) {
+ Type* EleTy = VecTy->getElementType();
+ unsigned Size = VecTy->getVectorNumElements();
+ return (Twine(getOCLTypeName(EleTy, isSigned)) + Twine(Size)).str();
+ }
+ switch (Ty->getTypeID()) {
+ case Type::HalfTyID: return "half";
+ case Type::FloatTyID: return "float";
+ case Type::DoubleTyID: return "double";
+ case Type::IntegerTyID: {
+ if (!isSigned)
+ return (Twine('u') + Twine(getOCLTypeName(Ty, true))).str();
+ auto IntTy = cast<IntegerType>(Ty);
+ auto BW = IntTy->getIntegerBitWidth();
+ switch (BW) {
+ case 8:
+ return "char";
+ case 16:
+ return "short";
+ case 32:
+ return "int";
+ case 64:
+ return "long";
+ default:
+ return (Twine('i') + Twine(BW)).str();
+ }
+ }
+ default:
+ llvm_unreachable("invalid type");
+ }
+}
+
+static RuntimeMD::KernelArg::ValueType getRuntimeMDValueType(
+ Type *Ty, StringRef TypeName) {
+ if (auto VT = dyn_cast<VectorType>(Ty))
+ return getRuntimeMDValueType(VT->getElementType(), TypeName);
+ else if (auto PT = dyn_cast<PointerType>(Ty))
+ return getRuntimeMDValueType(PT->getElementType(), TypeName);
+ else if (Ty->isHalfTy())
+ return RuntimeMD::KernelArg::F16;
+ else if (Ty->isFloatTy())
+ return RuntimeMD::KernelArg::F32;
+ else if (Ty->isDoubleTy())
+ return RuntimeMD::KernelArg::F64;
+ else if (IntegerType* intTy = dyn_cast<IntegerType>(Ty)) {
+ bool Signed = !TypeName.startswith("u");
+ switch (intTy->getIntegerBitWidth()) {
+ case 8:
+ return Signed ? RuntimeMD::KernelArg::I8 : RuntimeMD::KernelArg::U8;
+ case 16:
+ return Signed ? RuntimeMD::KernelArg::I16 : RuntimeMD::KernelArg::U16;
+ case 32:
+ return Signed ? RuntimeMD::KernelArg::I32 : RuntimeMD::KernelArg::U32;
+ case 64:
+ return Signed ? RuntimeMD::KernelArg::I64 : RuntimeMD::KernelArg::U64;
+ default:
+ // Runtime does not recognize other integer types. Report as
+ // struct type.
+ return RuntimeMD::KernelArg::Struct;
+ }
+ } else
+ return RuntimeMD::KernelArg::Struct;
+}
+
+void AMDGPUAsmPrinter::emitRuntimeMetadata(const Function &F) {
+ if (!F.getMetadata("kernel_arg_type"))
+ return;
+
+ MCContext &Context = getObjFileLowering().getContext();
+ OutStreamer->SwitchSection(
+ Context.getELFSection(RuntimeMD::SectionName, ELF::SHT_PROGBITS, 0));
+ OutStreamer->EmitIntValue(RuntimeMD::KeyKernelBegin, 1);
+ emitRuntimeMDStringValue(OutStreamer, RuntimeMD::KeyKernelName, F.getName());
+
+ for (auto &Arg:F.args()) {
+ // Emit KeyArgBegin.
+ unsigned I = Arg.getArgNo();
+ OutStreamer->EmitIntValue(RuntimeMD::KeyArgBegin, 1);
+
+ // Emit KeyArgSize and KeyArgAlign.
+ auto T = Arg.getType();
+ auto DL = F.getParent()->getDataLayout();
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgSize,
+ DL.getTypeAllocSize(T), 4);
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgAlign,
+ DL.getABITypeAlignment(T), 4);
+
+ // Emit KeyArgTypeName.
+ auto TypeName = dyn_cast<MDString>(F.getMetadata(
+ "kernel_arg_type")->getOperand(I))->getString();
+ emitRuntimeMDStringValue(OutStreamer, RuntimeMD::KeyArgTypeName, TypeName);
+
+ // Emit KeyArgName.
+ if (auto ArgNameMD = F.getMetadata("kernel_arg_name")) {
+ auto ArgName = cast<MDString>(ArgNameMD->getOperand(
+ I))->getString();
+ emitRuntimeMDStringValue(OutStreamer, RuntimeMD::KeyArgName, ArgName);
+ }
+
+ // Emit KeyArgIsVolatile, KeyArgIsRestrict, KeyArgIsConst and KeyArgIsPipe.
+ auto TypeQual = cast<MDString>(F.getMetadata(
+ "kernel_arg_type_qual")->getOperand(I))->getString();
+ SmallVector<StringRef, 1> SplitQ;
+ TypeQual.split(SplitQ, " ", -1, false/* drop empty entry*/);
+ for (auto &I:SplitQ) {
+ auto Key = StringSwitch<RuntimeMD::Key>(I)
+ .Case("volatile", RuntimeMD::KeyArgIsVolatile)
+ .Case("restrict", RuntimeMD::KeyArgIsRestrict)
+ .Case("const", RuntimeMD::KeyArgIsConst)
+ .Case("pipe", RuntimeMD::KeyArgIsPipe)
+ .Default(RuntimeMD::KeyNull);
+ OutStreamer->EmitIntValue(Key, 1);
+ }
+
+ // Emit KeyArgTypeKind.
+ auto BaseTypeName = cast<MDString>(
+ F.getMetadata("kernel_arg_base_type")->getOperand(I))->getString();
+ auto TypeKind = StringSwitch<RuntimeMD::KernelArg::TypeKind>(BaseTypeName)
+ .Case("sampler_t", RuntimeMD::KernelArg::Sampler)
+ .Case("queue_t", RuntimeMD::KernelArg::Queue)
+ .Cases("image1d_t", "image1d_array_t", "image1d_buffer_t",
+ "image2d_t" , "image2d_array_t", RuntimeMD::KernelArg::Image)
+ .Cases("image2d_depth_t", "image2d_array_depth_t",
+ "image2d_msaa_t", "image2d_array_msaa_t",
+ "image2d_msaa_depth_t", RuntimeMD::KernelArg::Image)
+ .Cases("image2d_array_msaa_depth_t", "image3d_t",
+ RuntimeMD::KernelArg::Image)
+ .Default(isa<PointerType>(T) ? RuntimeMD::KernelArg::Pointer :
+ RuntimeMD::KernelArg::Value);
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgTypeKind, TypeKind, 1);
+
+ // Emit KeyArgValueType.
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgValueType,
+ getRuntimeMDValueType(T, BaseTypeName), 2);
+
+ // Emit KeyArgAccQual.
+ auto AccQual = cast<MDString>(F.getMetadata(
+ "kernel_arg_access_qual")->getOperand(I))->getString();
+ auto AQ = StringSwitch<RuntimeMD::KernelArg::AccessQualifer>(AccQual)
+ .Case("read_only", RuntimeMD::KernelArg::ReadOnly)
+ .Case("write_only", RuntimeMD::KernelArg::WriteOnly)
+ .Case("read_write", RuntimeMD::KernelArg::ReadWrite)
+ .Default(RuntimeMD::KernelArg::None);
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgAccQual,
+ AQ, 1);
+
+ // Emit KeyArgAddrQual.
+ if (isa<PointerType>(T))
+ emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgAddrQual,
+ T->getPointerAddressSpace(), 1);
+
+ // Emit KeyArgEnd
+ OutStreamer->EmitIntValue(RuntimeMD::KeyArgEnd, 1);
+ }
+
+ // Emit KeyReqdWorkGroupSize, KeyWorkGroupSizeHint, and KeyVecTypeHint.
+ if (auto RWGS = F.getMetadata("reqd_work_group_size"))
+ emitRuntimeMDThreeIntValues(OutStreamer, RuntimeMD::KeyReqdWorkGroupSize,
+ RWGS, 4);
+ if (auto WGSH = F.getMetadata("work_group_size_hint"))
+ emitRuntimeMDThreeIntValues(OutStreamer, RuntimeMD::KeyWorkGroupSizeHint,
+ WGSH, 4);
+ if (auto VTH = F.getMetadata("vec_type_hint")) {
+ auto TypeName = getOCLTypeName(cast<ValueAsMetadata>(
+ VTH->getOperand(0))->getType(), mdconst::extract<ConstantInt>(
+ VTH->getOperand(1))->getZExtValue());
+ emitRuntimeMDStringValue(OutStreamer, RuntimeMD::KeyVecTypeHint,
+ TypeName);
+ }
+
+ // Emit KeyKernelEnd
+ OutStreamer->EmitIntValue(RuntimeMD::KeyKernelEnd, 1);
+}
diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.h b/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
index 99d4091670fe..7b04c539520d 100644
--- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
+++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
@@ -12,15 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUASMPRINTER_H
-#define LLVM_LIB_TARGET_R600_AMDGPUASMPRINTER_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUASMPRINTER_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUASMPRINTER_H
#include "llvm/CodeGen/AsmPrinter.h"
#include <vector>
namespace llvm {
-class AMDGPUAsmPrinter : public AsmPrinter {
+class AMDGPUAsmPrinter final : public AsmPrinter {
private:
struct SIProgramInfo {
SIProgramInfo() :
@@ -40,6 +40,10 @@ private:
NumVGPR(0),
NumSGPR(0),
FlatUsed(false),
+ ReservedVGPRFirst(0),
+ ReservedVGPRCount(0),
+ DebuggerWavefrontPrivateSegmentOffsetSGPR((uint16_t)-1),
+ DebuggerPrivateSegmentBufferSGPR((uint16_t)-1),
VCCUsed(false),
CodeLen(0) {}
@@ -67,6 +71,20 @@ private:
uint32_t LDSSize;
bool FlatUsed;
+ // If ReservedVGPRCount is 0 then must be 0. Otherwise, this is the first
+ // fixed VGPR number reserved.
+ uint16_t ReservedVGPRFirst;
+ // The number of consecutive VGPRs reserved.
+ uint16_t ReservedVGPRCount;
+
+ // Fixed SGPR number used to hold wave scratch offset for entire kernel
+ // execution, or uint16_t(-1) if the register is not used or not known.
+ uint16_t DebuggerWavefrontPrivateSegmentOffsetSGPR;
+ // Fixed SGPR number of the first 4 SGPRs used to hold scratch V# for entire
+ // kernel execution, or uint16_t(-1) if the register is not used or not
+ // known.
+ uint16_t DebuggerPrivateSegmentBufferSGPR;
+
// Bonus information for debugging.
bool VCCUsed;
uint64_t CodeLen;
@@ -109,6 +127,10 @@ public:
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O) override;
+ void emitStartOfRuntimeMetadata(const Module &M);
+
+ void emitRuntimeMetadata(const Function &F);
+
protected:
std::vector<std::string> DisasmLines, HexLines;
size_t DisasmLineMaxLen;
diff --git a/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
new file mode 100644
index 000000000000..1a1da8a254a7
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -0,0 +1,42 @@
+//===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the lowering of LLVM calls to machine code calls for
+/// GlobalISel.
+///
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUCallLowering.h"
+#include "AMDGPUISelLowering.h"
+
+#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+
+using namespace llvm;
+
+#ifndef LLVM_BUILD_GLOBAL_ISEL
+#error "This shouldn't be built without GISel"
+#endif
+
+AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
+ : CallLowering(&TLI) {
+}
+
+bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
+ const Value *Val, unsigned VReg) const {
+ return true;
+}
+
+bool AMDGPUCallLowering::lowerFormalArguments(
+ MachineIRBuilder &MIRBuilder, const Function::ArgumentListType &Args,
+ const SmallVectorImpl<unsigned> &VRegs) const {
+ // TODO: Implement once there are generic loads/stores.
+ return true;
+}
diff --git a/lib/Target/AMDGPU/AMDGPUCallLowering.h b/lib/Target/AMDGPU/AMDGPUCallLowering.h
new file mode 100644
index 000000000000..61174bacdac3
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUCallLowering.h
@@ -0,0 +1,36 @@
+//===- lib/Target/AMDGPU/AMDGPUCallLowering.h - Call lowering -*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM calls to machine code calls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUCALLLOWERING_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUCALLLOWERING_H
+
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+
+namespace llvm {
+
+class AMDGPUTargetLowering;
+
+class AMDGPUCallLowering: public CallLowering {
+ public:
+ AMDGPUCallLowering(const AMDGPUTargetLowering &TLI);
+
+ bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val,
+ unsigned VReg) const override;
+ bool
+ lowerFormalArguments(MachineIRBuilder &MIRBuilder,
+ const Function::ArgumentListType &Args,
+ const SmallVectorImpl<unsigned> &VRegs) const override;
+};
+} // End of namespace llvm;
+#endif
diff --git a/lib/Target/AMDGPU/AMDGPUCallingConv.td b/lib/Target/AMDGPU/AMDGPUCallingConv.td
index b0db26124a0c..47dfa4992068 100644
--- a/lib/Target/AMDGPU/AMDGPUCallingConv.td
+++ b/lib/Target/AMDGPU/AMDGPUCallingConv.td
@@ -110,21 +110,19 @@ def CC_R600 : CallingConv<[
// Calling convention for compute kernels
def CC_AMDGPU_Kernel : CallingConv<[
- CCCustom<"allocateStack">
+ CCCustom<"allocateKernArg">
]>;
def CC_AMDGPU : CallingConv<[
CCIf<"static_cast<const AMDGPUSubtarget&>"
"(State.getMachineFunction().getSubtarget()).getGeneration() >="
"AMDGPUSubtarget::SOUTHERN_ISLANDS && "
- "State.getMachineFunction().getInfo<SIMachineFunctionInfo>()"
- "->getShaderType() == ShaderType::COMPUTE",
+ "!AMDGPU::isShader(State.getCallingConv())",
CCDelegateTo<CC_AMDGPU_Kernel>>,
CCIf<"static_cast<const AMDGPUSubtarget&>"
"(State.getMachineFunction().getSubtarget()).getGeneration() < "
"AMDGPUSubtarget::SOUTHERN_ISLANDS && "
- "State.getMachineFunction().getInfo<R600MachineFunctionInfo>()"
- "->getShaderType() == ShaderType::COMPUTE",
+ "!AMDGPU::isShader(State.getCallingConv())",
CCDelegateTo<CC_AMDGPU_Kernel>>,
CCIf<"static_cast<const AMDGPUSubtarget&>"
"(State.getMachineFunction().getSubtarget()).getGeneration() >= "
diff --git a/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
new file mode 100644
index 000000000000..3b415774df49
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -0,0 +1,82 @@
+//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass does misc. AMDGPU optimizations on IR before instruction
+/// selection.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
+
+#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "amdgpu-codegenprepare"
+
+using namespace llvm;
+
+namespace {
+
+class AMDGPUCodeGenPrepare : public FunctionPass,
+ public InstVisitor<AMDGPUCodeGenPrepare> {
+ DivergenceAnalysis *DA;
+ const TargetMachine *TM;
+
+public:
+ static char ID;
+ AMDGPUCodeGenPrepare(const TargetMachine *TM = nullptr) :
+ FunctionPass(ID),
+ TM(TM) { }
+
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+
+ const char *getPassName() const override {
+ return "AMDGPU IR optimizations";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DivergenceAnalysis>();
+ AU.setPreservesAll();
+ }
+};
+
+} // End anonymous namespace
+
+bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
+ return false;
+}
+
+bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
+ if (!TM || skipFunction(F))
+ return false;
+
+ DA = &getAnalysis<DivergenceAnalysis>();
+ visit(F);
+
+ return true;
+}
+
+INITIALIZE_TM_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
+ "AMDGPU IR optimizations", false, false)
+INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
+INITIALIZE_TM_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE,
+ "AMDGPU IR optimizations", false, false)
+
+char AMDGPUCodeGenPrepare::ID = 0;
+
+FunctionPass *llvm::createAMDGPUCodeGenPreparePass(const TargetMachine *TM) {
+ return new AMDGPUCodeGenPrepare(TM);
+}
diff --git a/lib/Target/AMDGPU/AMDGPUDiagnosticInfoUnsupported.cpp b/lib/Target/AMDGPU/AMDGPUDiagnosticInfoUnsupported.cpp
deleted file mode 100644
index 2f6b3022dd6e..000000000000
--- a/lib/Target/AMDGPU/AMDGPUDiagnosticInfoUnsupported.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-//===-- AMDGPUDiagnosticInfoUnsupported.cpp -------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUDiagnosticInfoUnsupported.h"
-
-using namespace llvm;
-
-DiagnosticInfoUnsupported::DiagnosticInfoUnsupported(
- const Function &Fn,
- const Twine &Desc,
- DiagnosticSeverity Severity)
- : DiagnosticInfo(getKindID(), Severity),
- Description(Desc),
- Fn(Fn) { }
-
-int DiagnosticInfoUnsupported::KindID = 0;
-
-void DiagnosticInfoUnsupported::print(DiagnosticPrinter &DP) const {
- DP << "unsupported " << getDescription() << " in " << Fn.getName();
-}
diff --git a/lib/Target/AMDGPU/AMDGPUDiagnosticInfoUnsupported.h b/lib/Target/AMDGPU/AMDGPUDiagnosticInfoUnsupported.h
deleted file mode 100644
index 0fd37e1ede6b..000000000000
--- a/lib/Target/AMDGPU/AMDGPUDiagnosticInfoUnsupported.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//===-- AMDGPUDiagnosticInfoUnsupported.h - Error reporting -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUDIAGNOSTICINFOUNSUPPORTED_H
-#define LLVM_LIB_TARGET_AMDGPU_AMDGPUDIAGNOSTICINFOUNSUPPORTED_H
-
-#include "llvm/IR/DiagnosticInfo.h"
-#include "llvm/IR/DiagnosticPrinter.h"
-
-namespace llvm {
-
-/// Diagnostic information for unimplemented or unsupported feature reporting.
-class DiagnosticInfoUnsupported : public DiagnosticInfo {
-private:
- const Twine &Description;
- const Function &Fn;
-
- static int KindID;
-
- static int getKindID() {
- if (KindID == 0)
- KindID = llvm::getNextAvailablePluginDiagnosticKind();
- return KindID;
- }
-
-public:
- DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
- DiagnosticSeverity Severity = DS_Error);
-
- const Function &getFunction() const { return Fn; }
- const Twine &getDescription() const { return Description; }
-
- void print(DiagnosticPrinter &DP) const override;
-
- static bool classof(const DiagnosticInfo *DI) {
- return DI->getKind() == getKindID();
- }
-};
-
-}
-
-#endif
diff --git a/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp b/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
index 4d84d281d998..bbc28b885721 100644
--- a/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
@@ -7,12 +7,13 @@
//
//==-----------------------------------------------------------------------===//
//
-// Interface to describe a layout of a stack frame on a AMDIL target machine
+// Interface to describe a layout of a stack frame on a AMDGPU target machine.
//
//===----------------------------------------------------------------------===//
#include "AMDGPUFrameLowering.h"
#include "AMDGPURegisterInfo.h"
-#include "R600MachineFunctionInfo.h"
+#include "AMDGPUSubtarget.h"
+
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Instructions.h"
@@ -57,7 +58,7 @@ unsigned AMDGPUFrameLowering::getStackWidth(const MachineFunction &MF) const {
// T2.Y = stack[1].y
// T3.X = stack[1].z
// T3.Y = stack[1].w
- //
+ //
// StackWidth = 4:
// T0.X = stack[0].x
// T0.Y = stack[0].y
@@ -75,7 +76,8 @@ int AMDGPUFrameLowering::getFrameIndexReference(const MachineFunction &MF,
int FI,
unsigned &FrameReg) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ const AMDGPURegisterInfo *RI
+ = MF.getSubtarget<AMDGPUSubtarget>().getRegisterInfo();
// Fill in FrameReg output argument.
FrameReg = RI->getFrameRegister(MF);
@@ -87,32 +89,16 @@ int AMDGPUFrameLowering::getFrameIndexReference(const MachineFunction &MF,
int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
- OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(i));
+ OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(i));
OffsetBytes += MFI->getObjectSize(i);
// Each register holds 4 bytes, so we must always align the offset to at
// least 4 bytes, so that 2 frame objects won't share the same register.
- OffsetBytes = RoundUpToAlignment(OffsetBytes, 4);
+ OffsetBytes = alignTo(OffsetBytes, 4);
}
if (FI != -1)
- OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(FI));
+ OffsetBytes = alignTo(OffsetBytes, MFI->getObjectAlignment(FI));
return OffsetBytes / (getStackWidth(MF) * 4);
}
-const TargetFrameLowering::SpillSlot *
-AMDGPUFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const {
- NumEntries = 0;
- return nullptr;
-}
-void AMDGPUFrameLowering::emitPrologue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {}
-void
-AMDGPUFrameLowering::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
-}
-
-bool
-AMDGPUFrameLowering::hasFP(const MachineFunction &MF) const {
- return false;
-}
diff --git a/lib/Target/AMDGPU/AMDGPUFrameLowering.h b/lib/Target/AMDGPU/AMDGPUFrameLowering.h
index 257a3da40589..513848a1d887 100644
--- a/lib/Target/AMDGPU/AMDGPUFrameLowering.h
+++ b/lib/Target/AMDGPU/AMDGPUFrameLowering.h
@@ -32,13 +32,13 @@ public:
/// \returns The number of 32-bit sub-registers that are used when storing
/// values to the stack.
unsigned getStackWidth(const MachineFunction &MF) const;
+
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const override;
- const SpillSlot *
- getCalleeSavedSpillSlots(unsigned &NumEntries) const override;
- void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- bool hasFP(const MachineFunction &MF) const override;
+
+ bool hasFP(const MachineFunction &MF) const override {
+ return false;
+ }
};
} // namespace llvm
#endif
diff --git a/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index b33040b4d06a..23c9352ce273 100644
--- a/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1,4 +1,4 @@
-//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
+//===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,30 +12,44 @@
//
//===----------------------------------------------------------------------===//
-#include "AMDGPUDiagnosticInfoUnsupported.h"
#include "AMDGPUInstrInfo.h"
+#include "AMDGPUIntrinsicInfo.h"
#include "AMDGPUISelLowering.h" // For AMDGPUISD
-#include "AMDGPURegisterInfo.h"
#include "AMDGPUSubtarget.h"
-#include "R600InstrInfo.h"
-#include "SIDefines.h"
#include "SIISelLowering.h"
#include "SIMachineFunctionInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/IR/Function.h"
+#include "llvm/IR/DiagnosticInfo.h"
using namespace llvm;
+namespace llvm {
+class R600InstrInfo;
+}
+
//===----------------------------------------------------------------------===//
// Instruction Selector Implementation
//===----------------------------------------------------------------------===//
namespace {
+
+static bool isCBranchSCC(const SDNode *N) {
+ assert(N->getOpcode() == ISD::BRCOND);
+ if (!N->hasOneUse())
+ return false;
+
+ SDValue Cond = N->getOperand(1);
+ if (Cond.getOpcode() == ISD::CopyToReg)
+ Cond = Cond.getOperand(2);
+ return Cond.getOpcode() == ISD::SETCC &&
+ Cond.getOperand(0).getValueType() == MVT::i32 && Cond.hasOneUse();
+}
+
/// AMDGPU specific code to select AMDGPU machine instructions for
/// SelectionDAG operations.
class AMDGPUDAGToDAGISel : public SelectionDAGISel {
@@ -47,7 +61,7 @@ public:
AMDGPUDAGToDAGISel(TargetMachine &TM);
virtual ~AMDGPUDAGToDAGISel();
bool runOnMachineFunction(MachineFunction &MF) override;
- SDNode *Select(SDNode *N) override;
+ void Select(SDNode *N) override;
const char *getPassName() const override;
void PreprocessISelDAG() override;
void PostprocessISelDAG() override;
@@ -59,28 +73,8 @@ private:
bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
- // Complex pattern selectors
- bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
- bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
- bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
-
- static bool checkType(const Value *ptr, unsigned int addrspace);
- static bool checkPrivateAddress(const MachineMemOperand *Op);
-
- static bool isGlobalStore(const StoreSDNode *N);
- static bool isFlatStore(const StoreSDNode *N);
- static bool isPrivateStore(const StoreSDNode *N);
- static bool isLocalStore(const StoreSDNode *N);
- static bool isRegionStore(const StoreSDNode *N);
-
- bool isCPLoad(const LoadSDNode *N) const;
- bool isConstantLoad(const LoadSDNode *N, int cbID) const;
- bool isGlobalLoad(const LoadSDNode *N) const;
- bool isFlatLoad(const LoadSDNode *N) const;
- bool isParamLoad(const LoadSDNode *N) const;
- bool isPrivateLoad(const LoadSDNode *N) const;
- bool isLocalLoad(const LoadSDNode *N) const;
- bool isRegionLoad(const LoadSDNode *N) const;
+ bool isConstantLoad(const MemSDNode *N, int cbID) const;
+ bool isUniformBr(const SDNode *N) const;
SDNode *glueCopyToM0(SDNode *N) const;
@@ -111,7 +105,20 @@ private:
SDValue &Offset, SDValue &GLC, SDValue &SLC,
SDValue &TFE) const;
bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
- SDValue &Offset, SDValue &GLC) const;
+ SDValue &Offset, SDValue &SLC) const;
+ bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
+ SDValue &Offset) const;
+ bool SelectMUBUFConstant(SDValue Constant,
+ SDValue &SOffset,
+ SDValue &ImmOffset) const;
+ bool SelectMUBUFIntrinsicOffset(SDValue Offset, SDValue &SOffset,
+ SDValue &ImmOffset) const;
+ bool SelectMUBUFIntrinsicVOffset(SDValue Offset, SDValue &SOffset,
+ SDValue &ImmOffset, SDValue &VOffset) const;
+
+ bool SelectFlat(SDValue Addr, SDValue &VAddr,
+ SDValue &SLC, SDValue &TFE) const;
+
bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
bool &Imm) const;
bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
@@ -122,7 +129,7 @@ private:
bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
bool SelectSMRDBufferSgpr(SDValue Addr, SDValue &Offset) const;
- SDNode *SelectAddrSpaceCast(SDNode *N);
+ bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
bool SelectVOP3NoMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
@@ -136,13 +143,15 @@ private:
SDValue &Clamp,
SDValue &Omod) const;
- SDNode *SelectADD_SUB_I64(SDNode *N);
- SDNode *SelectDIV_SCALE(SDNode *N);
+ void SelectADD_SUB_I64(SDNode *N);
+ void SelectDIV_SCALE(SDNode *N);
- SDNode *getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
+ SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
uint32_t Offset, uint32_t Width);
- SDNode *SelectS_BFEFromShifts(SDNode *N);
- SDNode *SelectS_BFE(SDNode *N);
+ void SelectS_BFEFromShifts(SDNode *N);
+ void SelectS_BFE(SDNode *N);
+ void SelectBRCOND(SDNode *N);
+ void SelectATOMIC_CMP_SWAP(SDNode *N);
// Include the pieces autogenerated from the target description.
#include "AMDGPUGenDAGISel.inc"
@@ -159,7 +168,7 @@ AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
: SelectionDAGISel(TM) {}
bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
- Subtarget = &static_cast<const AMDGPUSubtarget &>(MF.getSubtarget());
+ Subtarget = &MF.getSubtarget<AMDGPUSubtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
@@ -207,64 +216,9 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
}
}
-bool AMDGPUDAGToDAGISel::SelectADDRParam(
- SDValue Addr, SDValue& R1, SDValue& R2) {
-
- if (Addr.getOpcode() == ISD::FrameIndex) {
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
- }
- } else if (Addr.getOpcode() == ISD::ADD) {
- R1 = Addr.getOperand(0);
- R2 = Addr.getOperand(1);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
- }
- return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
- if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress) {
- return false;
- }
- return SelectADDRParam(Addr, R1, R2);
-}
-
-
-bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
- if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress) {
- return false;
- }
-
- if (Addr.getOpcode() == ISD::FrameIndex) {
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
- R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
- }
- } else if (Addr.getOpcode() == ISD::ADD) {
- R1 = Addr.getOperand(0);
- R2 = Addr.getOperand(1);
- } else {
- R1 = Addr;
- R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
- }
- return true;
-}
-
SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N) const {
if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
- !checkType(cast<MemSDNode>(N)->getMemOperand()->getValue(),
- AMDGPUAS::LOCAL_ADDRESS))
+ cast<MemSDNode>(N)->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
return N;
const SITargetLowering& Lowering =
@@ -304,14 +258,15 @@ static unsigned selectSGPRVectorRegClassID(unsigned NumVectorElts) {
llvm_unreachable("invalid vector size");
}
-SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
+void AMDGPUDAGToDAGISel::Select(SDNode *N) {
unsigned int Opc = N->getOpcode();
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return nullptr; // Already selected.
+ return; // Already selected.
}
- if (isa<AtomicSDNode>(N))
+ if (isa<AtomicSDNode>(N) ||
+ (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC))
N = glueCopyToM0(N);
switch (Opc) {
@@ -325,7 +280,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
break;
- return SelectADD_SUB_I64(N);
+ SelectADD_SUB_I64(N);
+ return;
}
case ISD::SCALAR_TO_VECTOR:
case AMDGPUISD::BUILD_VERTICAL_VECTOR:
@@ -359,8 +315,9 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
if (NumVectorElts == 1) {
- return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
- N->getOperand(0), RegClass);
+ CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
+ RegClass);
+ return;
}
assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
@@ -400,8 +357,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
if (!IsRegSeq)
break;
- return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
- RegSeqArgs);
+ CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
+ return;
}
case ISD::BUILD_PAIR: {
SDValue RC, SubReg0, SubReg1;
@@ -422,8 +379,9 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
}
const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
N->getOperand(1), SubReg1 };
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
- DL, N->getValueType(0), Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
+ N->getValueType(0), Ops));
+ return;
}
case ISD::Constant:
@@ -452,8 +410,9 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
};
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
- N->getValueType(0), Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
+ N->getValueType(0), Ops));
+ return;
}
case ISD::LOAD:
case ISD::STORE: {
@@ -487,11 +446,13 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
uint32_t OffsetVal = Offset->getZExtValue();
uint32_t WidthVal = Width->getZExtValue();
- return getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, SDLoc(N),
- N->getOperand(0), OffsetVal, WidthVal);
+ ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
+ SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
+ return;
}
case AMDGPUISD::DIV_SCALE: {
- return SelectDIV_SCALE(N);
+ SelectDIV_SCALE(N);
+ return;
}
case ISD::CopyToReg: {
const SITargetLowering& Lowering =
@@ -499,139 +460,48 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
Lowering.legalizeTargetIndependentNode(N, *CurDAG);
break;
}
- case ISD::ADDRSPACECAST:
- return SelectAddrSpaceCast(N);
case ISD::AND:
case ISD::SRL:
case ISD::SRA:
+ case ISD::SIGN_EXTEND_INREG:
if (N->getValueType(0) != MVT::i32 ||
Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
break;
- return SelectS_BFE(N);
+ SelectS_BFE(N);
+ return;
+ case ISD::BRCOND:
+ SelectBRCOND(N);
+ return;
+
+ case AMDGPUISD::ATOMIC_CMP_SWAP:
+ SelectATOMIC_CMP_SWAP(N);
+ return;
}
- return SelectCode(N);
+ SelectCode(N);
}
-bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
- assert(AS != 0 && "Use checkPrivateAddress instead.");
- if (!Ptr)
+bool AMDGPUDAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
+ if (!N->readMem())
return false;
-
- return Ptr->getType()->getPointerAddressSpace() == AS;
-}
-
-bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
- if (Op->getPseudoValue())
- return true;
-
- if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
- return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
-
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
- const Value *MemVal = N->getMemOperand()->getValue();
- return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
- !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
- !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
-}
-
-bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isFlatStore(const StoreSDNode *N) {
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
- const Value *MemVal = N->getMemOperand()->getValue();
if (CbId == -1)
- return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
+ return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
- return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
+ return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
}
-bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
- if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS)
- if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
- N->getMemoryVT().bitsLT(MVT::i32))
- return true;
-
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isFlatLoad(const LoadSDNode *N) const {
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
- return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
- MachineMemOperand *MMO = N->getMemOperand();
- if (checkPrivateAddress(N->getMemOperand())) {
- if (MMO) {
- const PseudoSourceValue *PSV = MMO->getPseudoValue();
- if (PSV && PSV->isConstantPool()) {
- return true;
- }
- }
- }
- return false;
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
- if (checkPrivateAddress(N->getMemOperand())) {
- // Check to make sure we are not a constant pool load or a constant load
- // that is marked as a private load
- if (isCPLoad(N) || isConstantLoad(N, -1)) {
- return false;
- }
- }
-
- const Value *MemVal = N->getMemOperand()->getValue();
- if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
- !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
- !checkType(MemVal, AMDGPUAS::FLAT_ADDRESS) &&
- !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
- !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
- !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
- !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)) {
- return true;
- }
- return false;
+bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
+ const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
+ const Instruction *Term = BB->getTerminator();
+ return Term->getMetadata("amdgpu.uniform") ||
+ Term->getMetadata("structurizecfg.uniform");
}
const char *AMDGPUDAGToDAGISel::getPassName() const {
return "AMDGPU DAG->DAG Pattern Instruction Selection";
}
-#ifdef DEBUGTMP
-#undef INT64_C
-#endif
-#undef DEBUGTMP
-
//===----------------------------------------------------------------------===//
// Complex Patterns
//===----------------------------------------------------------------------===//
@@ -705,7 +575,7 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
return true;
}
-SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
+void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
SDLoc DL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -728,7 +598,6 @@ SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
-
unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
@@ -745,12 +614,12 @@ SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
SDValue(AddHi,0),
Sub1,
};
- return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
+ CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
}
// We need to handle this here because tablegen doesn't support matching
// instructions with multiple outputs.
-SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
+void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
SDLoc SL(N);
EVT VT = N->getValueType(0);
@@ -766,7 +635,7 @@ SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
- return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
+ CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
}
bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
@@ -786,6 +655,7 @@ bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
+ SDLoc DL(Addr);
if (CurDAG->isBaseWithConstantOffset(Addr)) {
SDValue N0 = Addr.getOperand(0);
SDValue N1 = Addr.getOperand(1);
@@ -793,7 +663,7 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
// (add n0, c0)
Base = N0;
- Offset = N1;
+ Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
return true;
}
} else if (Addr.getOpcode() == ISD::SUB) {
@@ -801,7 +671,6 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
int64_t ByteOffset = C->getSExtValue();
if (isUInt<16>(ByteOffset)) {
- SDLoc DL(Addr);
SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
// XXX - This is kind of hacky. Create a dummy sub node so we can check
@@ -816,7 +685,7 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
Zero, Addr.getOperand(1));
Base = SDValue(MachineSub, 0);
- Offset = Addr.getOperand(0);
+ Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
return true;
}
}
@@ -834,7 +703,7 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
DL, MVT::i32, Zero);
Base = SDValue(MovZero, 0);
- Offset = Addr;
+ Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
return true;
}
}
@@ -932,8 +801,10 @@ bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
SDLoc DL(Addr);
- GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
- SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
+ if (!GLC.getNode())
+ GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
+ if (!SLC.getNode())
+ SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
@@ -961,9 +832,11 @@ bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
}
if (isLegalMUBUFImmOffset(C1)) {
- Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
- return true;
- } else if (isUInt<32>(C1->getZExtValue())) {
+ Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
+ return true;
+ }
+
+ if (isUInt<32>(C1->getZExtValue())) {
// Illegal offset, store it in soffset.
Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
@@ -1045,14 +918,13 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
if (CurDAG->isBaseWithConstantOffset(Addr)) {
SDValue N0 = Addr.getOperand(0);
SDValue N1 = Addr.getOperand(1);
+
// Offsets in vaddr must be positive.
- if (CurDAG->SignBitIsZero(N0)) {
- ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
- if (isLegalMUBUFImmOffset(C1)) {
- VAddr = N0;
- ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
- return true;
- }
+ ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
+ if (isLegalMUBUFImmOffset(C1)) {
+ VAddr = N0;
+ ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
+ return true;
}
}
@@ -1091,13 +963,118 @@ bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
}
bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
+ SDValue &Soffset, SDValue &Offset
+ ) const {
+ SDValue GLC, SLC, TFE;
+
+ return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
+}
+bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
SDValue &Soffset, SDValue &Offset,
- SDValue &GLC) const {
- SDValue SLC, TFE;
+ SDValue &SLC) const {
+ SDValue GLC, TFE;
return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
}
+bool AMDGPUDAGToDAGISel::SelectMUBUFConstant(SDValue Constant,
+ SDValue &SOffset,
+ SDValue &ImmOffset) const {
+ SDLoc DL(Constant);
+ uint32_t Imm = cast<ConstantSDNode>(Constant)->getZExtValue();
+ uint32_t Overflow = 0;
+
+ if (Imm >= 4096) {
+ if (Imm <= 4095 + 64) {
+ // Use an SOffset inline constant for 1..64
+ Overflow = Imm - 4095;
+ Imm = 4095;
+ } else {
+ // Try to keep the same value in SOffset for adjacent loads, so that
+ // the corresponding register contents can be re-used.
+ //
+ // Load values with all low-bits set into SOffset, so that a larger
+ // range of values can be covered using s_movk_i32
+ uint32_t High = (Imm + 1) & ~4095;
+ uint32_t Low = (Imm + 1) & 4095;
+ Imm = Low;
+ Overflow = High - 1;
+ }
+ }
+
+ // There is a hardware bug in SI and CI which prevents address clamping in
+ // MUBUF instructions from working correctly with SOffsets. The immediate
+ // offset is unaffected.
+ if (Overflow > 0 &&
+ Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
+ return false;
+
+ ImmOffset = CurDAG->getTargetConstant(Imm, DL, MVT::i16);
+
+ if (Overflow <= 64)
+ SOffset = CurDAG->getTargetConstant(Overflow, DL, MVT::i32);
+ else
+ SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
+ CurDAG->getTargetConstant(Overflow, DL, MVT::i32)),
+ 0);
+
+ return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectMUBUFIntrinsicOffset(SDValue Offset,
+ SDValue &SOffset,
+ SDValue &ImmOffset) const {
+ SDLoc DL(Offset);
+
+ if (!isa<ConstantSDNode>(Offset))
+ return false;
+
+ return SelectMUBUFConstant(Offset, SOffset, ImmOffset);
+}
+
+bool AMDGPUDAGToDAGISel::SelectMUBUFIntrinsicVOffset(SDValue Offset,
+ SDValue &SOffset,
+ SDValue &ImmOffset,
+ SDValue &VOffset) const {
+ SDLoc DL(Offset);
+
+ // Don't generate an unnecessary voffset for constant offsets.
+ if (isa<ConstantSDNode>(Offset)) {
+ SDValue Tmp1, Tmp2;
+
+ // When necessary, use a voffset in <= CI anyway to work around a hardware
+ // bug.
+ if (Subtarget->getGeneration() > AMDGPUSubtarget::SEA_ISLANDS ||
+ SelectMUBUFConstant(Offset, Tmp1, Tmp2))
+ return false;
+ }
+
+ if (CurDAG->isBaseWithConstantOffset(Offset)) {
+ SDValue N0 = Offset.getOperand(0);
+ SDValue N1 = Offset.getOperand(1);
+ if (cast<ConstantSDNode>(N1)->getSExtValue() >= 0 &&
+ SelectMUBUFConstant(N1, SOffset, ImmOffset)) {
+ VOffset = N0;
+ return true;
+ }
+ }
+
+ SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
+ ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
+ VOffset = Offset;
+
+ return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectFlat(SDValue Addr,
+ SDValue &VAddr,
+ SDValue &SLC,
+ SDValue &TFE) const {
+ VAddr = Addr;
+ TFE = SLC = CurDAG->getTargetConstant(0, SDLoc(), MVT::i1);
+ return true;
+}
+
///
/// \param EncodedOffset This is the immediate value that will be encoded
/// directly into the instruction. On SI/CI the \p EncodedOffset
@@ -1213,71 +1190,33 @@ bool AMDGPUDAGToDAGISel::SelectSMRDBufferSgpr(SDValue Addr,
!isa<ConstantSDNode>(Offset);
}
-// FIXME: This is incorrect and only enough to be able to compile.
-SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
- AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(N);
- SDLoc DL(N);
-
- const MachineFunction &MF = CurDAG->getMachineFunction();
- DiagnosticInfoUnsupported NotImplemented(*MF.getFunction(),
- "addrspacecast not implemented");
- CurDAG->getContext()->diagnose(NotImplemented);
-
- assert(Subtarget->hasFlatAddressSpace() &&
- "addrspacecast only supported with flat address space!");
-
- assert((ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
- ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) &&
- "Can only cast to / from flat address space!");
-
- // The flat instructions read the address as the index of the VGPR holding the
- // address, so casting should just be reinterpreting the base VGPR, so just
- // insert trunc / bitcast / zext.
-
- SDValue Src = ASC->getOperand(0);
- EVT DestVT = ASC->getValueType(0);
- EVT SrcVT = Src.getValueType();
-
- unsigned SrcSize = SrcVT.getSizeInBits();
- unsigned DestSize = DestVT.getSizeInBits();
-
- if (SrcSize > DestSize) {
- assert(SrcSize == 64 && DestSize == 32);
- return CurDAG->getMachineNode(
- TargetOpcode::EXTRACT_SUBREG,
- DL,
- DestVT,
- Src,
- CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32));
- }
-
- if (DestSize > SrcSize) {
- assert(SrcSize == 32 && DestSize == 64);
-
- // FIXME: This is probably wrong, we should never be defining
- // a register class with both VGPRs and SGPRs
- SDValue RC = CurDAG->getTargetConstant(AMDGPU::VS_64RegClassID, DL,
- MVT::i32);
+bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
+ SDValue &Base,
+ SDValue &Offset) const {
+ SDLoc DL(Index);
- const SDValue Ops[] = {
- RC,
- Src,
- CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
- SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
- CurDAG->getConstant(0, DL, MVT::i32)), 0),
- CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
- };
+ if (CurDAG->isBaseWithConstantOffset(Index)) {
+ SDValue N0 = Index.getOperand(0);
+ SDValue N1 = Index.getOperand(1);
+ ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
- DL, N->getValueType(0), Ops);
+ // (add n0, c0)
+ Base = N0;
+ Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
+ return true;
}
- assert(SrcSize == 64 && DestSize == 64);
- return CurDAG->getNode(ISD::BITCAST, DL, DestVT, Src).getNode();
+ if (isa<ConstantSDNode>(Index))
+ return false;
+
+ Base = Index;
+ Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
+ return true;
}
-SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
- uint32_t Offset, uint32_t Width) {
+SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
+ SDValue Val, uint32_t Offset,
+ uint32_t Width) {
// Transformation function, pack the offset and width of a BFE into
// the format expected by the S_BFE_I32 / S_BFE_U32. In the second
// source, bits [5:0] contain the offset and bits [22:16] the width.
@@ -1287,7 +1226,7 @@ SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
}
-SDNode *AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
+void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
// "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
// "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
// Predicate: 0 < b <= c < 32
@@ -1304,14 +1243,15 @@ SDNode *AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
bool Signed = N->getOpcode() == ISD::SRA;
unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
- return getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0),
- CVal - BVal, 32 - CVal);
+ ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
+ 32 - CVal));
+ return;
}
}
- return SelectCode(N);
+ SelectCode(N);
}
-SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
+void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
switch (N->getOpcode()) {
case ISD::AND:
if (N->getOperand(0).getOpcode() == ISD::SRL) {
@@ -1328,8 +1268,9 @@ SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
if (isMask_32(MaskVal)) {
uint32_t WidthVal = countPopulation(MaskVal);
- return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), Srl.getOperand(0),
- ShiftVal, WidthVal);
+ ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
+ Srl.getOperand(0), ShiftVal, WidthVal));
+ return;
}
}
}
@@ -1349,20 +1290,139 @@ SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
if (isMask_32(MaskVal)) {
uint32_t WidthVal = countPopulation(MaskVal);
- return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), And.getOperand(0),
- ShiftVal, WidthVal);
+ ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
+ And.getOperand(0), ShiftVal, WidthVal));
+ return;
}
}
- } else if (N->getOperand(0).getOpcode() == ISD::SHL)
- return SelectS_BFEFromShifts(N);
+ } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
+ SelectS_BFEFromShifts(N);
+ return;
+ }
break;
case ISD::SRA:
- if (N->getOperand(0).getOpcode() == ISD::SHL)
- return SelectS_BFEFromShifts(N);
+ if (N->getOperand(0).getOpcode() == ISD::SHL) {
+ SelectS_BFEFromShifts(N);
+ return;
+ }
break;
+
+ case ISD::SIGN_EXTEND_INREG: {
+ // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
+ SDValue Src = N->getOperand(0);
+ if (Src.getOpcode() != ISD::SRL)
+ break;
+
+ const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
+ if (!Amt)
+ break;
+
+ unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
+ ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
+ Amt->getZExtValue(), Width));
+ return;
+ }
}
- return SelectCode(N);
+ SelectCode(N);
+}
+
+void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
+ SDValue Cond = N->getOperand(1);
+
+ if (isCBranchSCC(N)) {
+ // This brcond will use S_CBRANCH_SCC*, so let tablegen handle it.
+ SelectCode(N);
+ return;
+ }
+
+ // The result of VOPC instructions is or'd against ~EXEC before it is
+ // written to vcc or another SGPR. This means that the value '1' is always
+ // written to the corresponding bit for results that are masked. In order
+ // to correctly check against vccz, we need to and VCC with the EXEC
+ // register in order to clear the value from the masked bits.
+
+ SDLoc SL(N);
+
+ SDNode *MaskedCond =
+ CurDAG->getMachineNode(AMDGPU::S_AND_B64, SL, MVT::i1,
+ CurDAG->getRegister(AMDGPU::EXEC, MVT::i1),
+ Cond);
+ SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, AMDGPU::VCC,
+ SDValue(MaskedCond, 0),
+ SDValue()); // Passing SDValue() adds a
+ // glue output.
+ CurDAG->SelectNodeTo(N, AMDGPU::S_CBRANCH_VCCNZ, MVT::Other,
+ N->getOperand(2), // Basic Block
+ VCC.getValue(0), // Chain
+ VCC.getValue(1)); // Glue
+ return;
+}
+
+// This is here because there isn't a way to use the generated sub0_sub1 as the
+// subreg index to EXTRACT_SUBREG in tablegen.
+void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
+ MemSDNode *Mem = cast<MemSDNode>(N);
+ unsigned AS = Mem->getAddressSpace();
+ if (AS == AMDGPUAS::FLAT_ADDRESS) {
+ SelectCode(N);
+ return;
+ }
+
+ MVT VT = N->getSimpleValueType(0);
+ bool Is32 = (VT == MVT::i32);
+ SDLoc SL(N);
+
+ MachineSDNode *CmpSwap = nullptr;
+ if (Subtarget->hasAddr64()) {
+ SDValue SRsrc, VAddr, SOffset, Offset, GLC, SLC;
+
+ if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) {
+ unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_RTN_ADDR64 :
+ AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_RTN_ADDR64;
+ SDValue CmpVal = Mem->getOperand(2);
+
+ // XXX - Do we care about glue operands?
+
+ SDValue Ops[] = {
+ CmpVal, VAddr, SRsrc, SOffset, Offset, SLC, Mem->getChain()
+ };
+
+ CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
+ }
+ }
+
+ if (!CmpSwap) {
+ SDValue SRsrc, SOffset, Offset, SLC;
+ if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) {
+ unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_RTN_OFFSET :
+ AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_RTN_OFFSET;
+
+ SDValue CmpVal = Mem->getOperand(2);
+ SDValue Ops[] = {
+ CmpVal, SRsrc, SOffset, Offset, SLC, Mem->getChain()
+ };
+
+ CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
+ }
+ }
+
+ if (!CmpSwap) {
+ SelectCode(N);
+ return;
+ }
+
+ MachineSDNode::mmo_iterator MMOs = MF->allocateMemRefsArray(1);
+ *MMOs = Mem->getMemOperand();
+ CmpSwap->setMemRefs(MMOs, MMOs + 1);
+
+ unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
+ SDValue Extract
+ = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
+
+ ReplaceUses(SDValue(N, 0), Extract);
+ ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
+ CurDAG->RemoveDeadNode(N);
}
bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
@@ -1432,62 +1492,59 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
}
void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
- bool Modified = false;
-
- // XXX - Other targets seem to be able to do this without a worklist.
- SmallVector<LoadSDNode *, 8> LoadsToReplace;
- SmallVector<StoreSDNode *, 8> StoresToReplace;
-
- for (SDNode &Node : CurDAG->allnodes()) {
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(&Node)) {
- EVT VT = LD->getValueType(0);
- if (VT != MVT::i64 || LD->getExtensionType() != ISD::NON_EXTLOAD)
- continue;
-
- // To simplify the TableGen patters, we replace all i64 loads with v2i32
- // loads. Alternatively, we could promote i64 loads to v2i32 during DAG
- // legalization, however, so places (ExpandUnalignedLoad) in the DAG
- // legalizer assume that if i64 is legal, so doing this promotion early
- // can cause problems.
- LoadsToReplace.push_back(LD);
- } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(&Node)) {
- // Handle i64 stores here for the same reason mentioned above for loads.
- SDValue Value = ST->getValue();
- if (Value.getValueType() != MVT::i64 || ST->isTruncatingStore())
- continue;
- StoresToReplace.push_back(ST);
+ MachineFrameInfo *MFI = CurDAG->getMachineFunction().getFrameInfo();
+
+ // Handle the perverse case where a frame index is being stored. We don't
+ // want to see multiple frame index operands on the same instruction since
+ // it complicates things and violates some assumptions about frame index
+ // lowering.
+ for (int I = MFI->getObjectIndexBegin(), E = MFI->getObjectIndexEnd();
+ I != E; ++I) {
+ SDValue FI = CurDAG->getTargetFrameIndex(I, MVT::i32);
+
+ // It's possible that we have a frame index defined in the function that
+ // isn't used in this block.
+ if (FI.use_empty())
+ continue;
+
+ // Skip over the AssertZext inserted during lowering.
+ SDValue EffectiveFI = FI;
+ auto It = FI->use_begin();
+ if (It->getOpcode() == ISD::AssertZext && FI->hasOneUse()) {
+ EffectiveFI = SDValue(*It, 0);
+ It = EffectiveFI->use_begin();
}
- }
-
- for (LoadSDNode *LD : LoadsToReplace) {
- SDLoc SL(LD);
-
- SDValue NewLoad = CurDAG->getLoad(MVT::v2i32, SL, LD->getChain(),
- LD->getBasePtr(), LD->getMemOperand());
- SDValue BitCast = CurDAG->getNode(ISD::BITCAST, SL,
- MVT::i64, NewLoad);
- CurDAG->ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLoad.getValue(1));
- CurDAG->ReplaceAllUsesOfValueWith(SDValue(LD, 0), BitCast);
- Modified = true;
- }
- for (StoreSDNode *ST : StoresToReplace) {
- SDValue NewValue = CurDAG->getNode(ISD::BITCAST, SDLoc(ST),
- MVT::v2i32, ST->getValue());
- const SDValue StoreOps[] = {
- ST->getChain(),
- NewValue,
- ST->getBasePtr(),
- ST->getOffset()
- };
+ for (auto It = EffectiveFI->use_begin(); !It.atEnd(); ) {
+ SDUse &Use = It.getUse();
+ SDNode *User = Use.getUser();
+ unsigned OpIdx = It.getOperandNo();
+ ++It;
+
+ if (MemSDNode *M = dyn_cast<MemSDNode>(User)) {
+ unsigned PtrIdx = M->getOpcode() == ISD::STORE ? 2 : 1;
+ if (OpIdx == PtrIdx)
+ continue;
+
+ unsigned OpN = M->getNumOperands();
+ SDValue NewOps[8];
+
+ assert(OpN < array_lengthof(NewOps));
+ for (unsigned Op = 0; Op != OpN; ++Op) {
+ if (Op != OpIdx) {
+ NewOps[Op] = M->getOperand(Op);
+ continue;
+ }
+
+ MachineSDNode *Mov = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
+ SDLoc(M), MVT::i32, FI);
+ NewOps[Op] = SDValue(Mov, 0);
+ }
- CurDAG->UpdateNodeOperands(ST, StoreOps);
- Modified = true;
+ CurDAG->UpdateNodeOperands(M, makeArrayRef(NewOps, OpN));
+ }
+ }
}
-
- // XXX - Is this necessary?
- if (Modified)
- CurDAG->RemoveDeadNodes();
}
void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 1a59a460ee7d..352423ed3ad6 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -15,7 +15,6 @@
#include "AMDGPUISelLowering.h"
#include "AMDGPU.h"
-#include "AMDGPUDiagnosticInfoUnsupported.h"
#include "AMDGPUFrameLowering.h"
#include "AMDGPUIntrinsicInfo.h"
#include "AMDGPURegisterInfo.h"
@@ -28,16 +27,19 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/DataLayout.h"
-
+#include "llvm/IR/DiagnosticInfo.h"
+#include "SIInstrInfo.h"
using namespace llvm;
-static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
- unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
- ArgFlags.getOrigAlign());
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+static bool allocateKernArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ MachineFunction &MF = State.getMachineFunction();
+ AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
+ uint64_t Offset = MFI->allocateKernArg(ValVT.getStoreSize(),
+ ArgFlags.getOrigAlign());
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return true;
}
@@ -53,60 +55,104 @@ EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
}
-// Type for a vector that will be loaded to.
-EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
+EVT AMDGPUTargetLowering::getEquivalentBitType(LLVMContext &Ctx, EVT VT) {
unsigned StoreSize = VT.getStoreSizeInBits();
if (StoreSize <= 32)
- return EVT::getIntegerVT(Ctx, 32);
+ return EVT::getIntegerVT(Ctx, StoreSize);
return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
}
-AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
+AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
const AMDGPUSubtarget &STI)
: TargetLowering(TM), Subtarget(&STI) {
- setOperationAction(ISD::Constant, MVT::i32, Legal);
- setOperationAction(ISD::Constant, MVT::i64, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+ // Lower floating point store/load to integer store/load to reduce the number
+ // of patterns in tablegen.
+ setOperationAction(ISD::LOAD, MVT::f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BRIND, MVT::Other, Expand);
+ setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
- // This is totally unsupported, just custom lower to produce an error.
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
- // We need to custom lower some of the intrinsics
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
- // Library functions. These default to Expand, but we have instructions
- // for them.
- setOperationAction(ISD::FCEIL, MVT::f32, Legal);
- setOperationAction(ISD::FEXP2, MVT::f32, Legal);
- setOperationAction(ISD::FPOW, MVT::f32, Legal);
- setOperationAction(ISD::FLOG2, MVT::f32, Legal);
- setOperationAction(ISD::FABS, MVT::f32, Legal);
- setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
- setOperationAction(ISD::FRINT, MVT::f32, Legal);
- setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
- setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
- setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
+ setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
- setOperationAction(ISD::FROUND, MVT::f32, Custom);
- setOperationAction(ISD::FROUND, MVT::f64, Custom);
+ setOperationAction(ISD::LOAD, MVT::i64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
- setOperationAction(ISD::FREM, MVT::f32, Custom);
- setOperationAction(ISD::FREM, MVT::f64, Custom);
+ setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
- // v_mad_f32 does not support denormals according to some sources.
- if (!Subtarget->hasFP32Denormals())
- setOperationAction(ISD::FMAD, MVT::f32, Legal);
+ setOperationAction(ISD::LOAD, MVT::f64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
- // Expand to fneg + fadd.
- setOperationAction(ISD::FSUB, MVT::f64, Expand);
+ setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
+
+ // There are no 64-bit extloads. These should be done as a 32-bit extload and
+ // an extension to 64-bit.
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
+ }
+
+ for (MVT VT : MVT::integer_valuetypes()) {
+ if (VT == MVT::i64)
+ continue;
+
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
+
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
+ }
+
+ for (MVT VT : MVT::integer_vector_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
+ }
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
- // Lower floating point store/load to integer store/load to reduce the number
- // of patterns in tablegen.
setOperationAction(ISD::STORE, MVT::f32, Promote);
AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
@@ -122,51 +168,99 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
setOperationAction(ISD::STORE, MVT::v16f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
+ setOperationAction(ISD::STORE, MVT::i64, Promote);
+ AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
+
+ setOperationAction(ISD::STORE, MVT::v2i64, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
+
setOperationAction(ISD::STORE, MVT::f64, Promote);
- AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
+ AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
setOperationAction(ISD::STORE, MVT::v2f64, Promote);
- AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
+ AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
- // Custom lowering of vector stores is required for local address space
- // stores.
- setOperationAction(ISD::STORE, MVT::v4i32, Custom);
-
- setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
- setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
+ setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
- // XXX: This can be change to Custom, once ExpandVectorStores can
- // handle 64-bit stores.
+ setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
- setTruncStoreAction(MVT::i64, MVT::i16, Expand);
- setTruncStoreAction(MVT::i64, MVT::i8, Expand);
+ setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
+ setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
+ setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
+
setTruncStoreAction(MVT::i64, MVT::i1, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i8, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i32, Expand);
+
setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
- setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
+ setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
+ setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
+ setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
+ setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
+ setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
- setOperationAction(ISD::LOAD, MVT::f32, Promote);
- AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
- setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
- AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
+ setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
+ setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
- setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
- AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
+ setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
+ setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
- setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
- AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
+ setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
+ setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
- setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
- AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
- setOperationAction(ISD::LOAD, MVT::f64, Promote);
- AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
+ setOperationAction(ISD::Constant, MVT::i32, Legal);
+ setOperationAction(ISD::Constant, MVT::i64, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
- setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
- AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+
+ // This is totally unsupported, just custom lower to produce an error.
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+
+ // We need to custom lower some of the intrinsics
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+
+ // Library functions. These default to Expand, but we have instructions
+ // for them.
+ setOperationAction(ISD::FCEIL, MVT::f32, Legal);
+ setOperationAction(ISD::FEXP2, MVT::f32, Legal);
+ setOperationAction(ISD::FPOW, MVT::f32, Legal);
+ setOperationAction(ISD::FLOG2, MVT::f32, Legal);
+ setOperationAction(ISD::FABS, MVT::f32, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
+ setOperationAction(ISD::FRINT, MVT::f32, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
+ setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
+
+ setOperationAction(ISD::FROUND, MVT::f32, Custom);
+ setOperationAction(ISD::FROUND, MVT::f64, Custom);
+
+ setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
+ setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
+
+ setOperationAction(ISD::FREM, MVT::f32, Custom);
+ setOperationAction(ISD::FREM, MVT::f64, Custom);
+
+ // v_mad_f32 does not support denormals according to some sources.
+ if (!Subtarget->hasFP32Denormals())
+ setOperationAction(ISD::FMAD, MVT::f32, Legal);
+
+ // Expand to fneg + fadd.
+ setOperationAction(ISD::FSUB, MVT::f64, Expand);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
@@ -179,31 +273,6 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
- // There are no 64-bit extloads. These should be done as a 32-bit extload and
- // an extension to 64-bit.
- for (MVT VT : MVT::integer_valuetypes()) {
- setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
- }
-
- for (MVT VT : MVT::integer_vector_valuetypes()) {
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
- }
-
- setOperationAction(ISD::BR_CC, MVT::i1, Expand);
-
if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
setOperationAction(ISD::FCEIL, MVT::f64, Custom);
setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
@@ -219,28 +288,13 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
-
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
-
- setTruncStoreAction(MVT::f32, MVT::f16, Expand);
- setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
- setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
- setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
-
- setTruncStoreAction(MVT::f64, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
for (MVT VT : ScalarIntVTs) {
- setOperationAction(ISD::SREM, VT, Expand);
+ // These should use [SU]DIVREM, so set them to expand
setOperationAction(ISD::SDIV, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
// GPU does not have divrem function for signed or unsigned.
setOperationAction(ISD::SDIVREM, VT, Custom);
@@ -284,17 +338,24 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
if (Subtarget->hasFFBH())
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
- else
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
-
- if (!Subtarget->hasFFBL())
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
+ if (Subtarget->hasFFBL())
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Legal);
setOperationAction(ISD::CTLZ, MVT::i64, Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
+ // We only really have 32-bit BFE instructions (and 16-bit on VI).
+ //
+ // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
+ // effort to match them now. We want this to be false for i64 cases when the
+ // extraction isn't restricted to the upper or lower half. Ideally we would
+ // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
+ // span the midpoint are probably relatively rare, so don't worry about them
+ // for now.
+ if (Subtarget->hasBFE())
+ setHasExtractBitsInsn(true);
+
static const MVT::SimpleValueType VectorIntTypes[] = {
MVT::v2i32, MVT::v4i32
};
@@ -334,9 +395,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::CTPOP, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
}
@@ -366,24 +425,20 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FSUB, VT, Expand);
setOperationAction(ISD::FNEG, VT, Expand);
- setOperationAction(ISD::SELECT, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
setOperationAction(ISD::FCOPYSIGN, VT, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
}
- setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
- setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
-
- setTargetDAGCombine(ISD::SHL);
- setTargetDAGCombine(ISD::MUL);
- setTargetDAGCombine(ISD::SELECT);
- setTargetDAGCombine(ISD::SELECT_CC);
- setTargetDAGCombine(ISD::STORE);
+ // This causes using an unrolled select operation rather than expansion with
+ // bit operations. This is in general better, but the alternative using BFI
+ // instructions may be better if the select sources are SGPRs.
+ setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
+ AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
- setTargetDAGCombine(ISD::FADD);
- setTargetDAGCombine(ISD::FSUB);
+ setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
+ AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
setBooleanContents(ZeroOrNegativeOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
@@ -394,7 +449,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
// SI at least has hardware support for floating point exceptions, but no way
// of using or handling them is implemented. They are also optional in OpenCL
// (Section 7.3)
- setHasFloatingPointExceptions(false);
+ setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
setSelectIsExpensive(false);
PredictableSelectIsExpensive = false;
@@ -415,6 +470,18 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
MaxStoresPerMemcpy = 4096;
MaxStoresPerMemmove = 4096;
MaxStoresPerMemset = 4096;
+
+ setTargetDAGCombine(ISD::BITCAST);
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::SHL);
+ setTargetDAGCombine(ISD::SRA);
+ setTargetDAGCombine(ISD::SRL);
+ setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::SELECT);
+ setTargetDAGCombine(ISD::SELECT_CC);
+ setTargetDAGCombine(ISD::STORE);
+ setTargetDAGCombine(ISD::FADD);
+ setTargetDAGCombine(ISD::FSUB);
}
//===----------------------------------------------------------------------===//
@@ -467,15 +534,17 @@ bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
EVT CastTy) const {
- if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
- return true;
- unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
- unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
+ assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
- return ((LScalarSize <= CastScalarSize) ||
- (CastScalarSize >= 32) ||
- (LScalarSize < 32));
+ if (LoadTy.getScalarType() == MVT::i32)
+ return false;
+
+ unsigned LScalarSize = LoadTy.getScalarSizeInBits();
+ unsigned CastScalarSize = CastTy.getScalarSizeInBits();
+
+ return (LScalarSize < CastScalarSize) ||
+ (CastScalarSize >= 32);
}
// SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
@@ -578,14 +647,13 @@ void AMDGPUTargetLowering::AnalyzeReturn(CCState &State,
State.AnalyzeReturn(Outs, RetCC_SI);
}
-SDValue AMDGPUTargetLowering::LowerReturn(
- SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
- return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
+SDValue
+AMDGPUTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const {
+ return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
}
//===---------------------------------------------------------------------===//
@@ -606,32 +674,38 @@ SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
FuncName = G->getGlobal()->getName();
- DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
+ DiagnosticInfoUnsupported NoCalls(
+ Fn, "unsupported call to function " + FuncName, CLI.DL.getDebugLoc());
DAG.getContext()->diagnose(NoCalls);
- return SDValue();
+
+ for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
+ InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
+
+ return DAG.getEntryNode();
}
SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
const Function &Fn = *DAG.getMachineFunction().getFunction();
- DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "dynamic alloca");
+ DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
+ SDLoc(Op).getDebugLoc());
DAG.getContext()->diagnose(NoDynamicAlloca);
- return SDValue();
+ auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
+ return DAG.getMergeValues(Ops, SDLoc());
}
SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default:
- Op.getNode()->dump();
+ Op->dump(&DAG);
llvm_unreachable("Custom lowering code for this"
"instruction is not implemented yet!");
break;
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
- case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
@@ -666,24 +740,6 @@ void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
// ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
// nothing here and let the illegal result integer be handled normally.
return;
- case ISD::LOAD: {
- SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
- if (!Node)
- return;
-
- Results.push_back(SDValue(Node, 0));
- Results.push_back(SDValue(Node, 1));
- // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
- // function
- DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
- return;
- }
- case ISD::STORE: {
- SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
- if (Lowered.getNode())
- Results.push_back(Lowered);
- return;
- }
default:
return;
}
@@ -712,16 +768,16 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
EVT VT = EVT::getEVT(InitTy);
PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr,
- MachinePointerInfo(UndefValue::get(PtrTy)), false,
- false, TD.getPrefTypeAlignment(InitTy));
+ MachinePointerInfo(UndefValue::get(PtrTy)),
+ TD.getPrefTypeAlignment(InitTy));
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
EVT VT = EVT::getEVT(CFP->getType());
PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr,
- MachinePointerInfo(UndefValue::get(PtrTy)), false,
- false, TD.getPrefTypeAlignment(CFP->getType()));
+ MachinePointerInfo(UndefValue::get(PtrTy)),
+ TD.getPrefTypeAlignment(CFP->getType()));
}
if (StructType *ST = dyn_cast<StructType>(InitTy)) {
@@ -769,8 +825,8 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
EVT VT = EVT::getEVT(InitTy);
PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
- MachinePointerInfo(UndefValue::get(PtrTy)), false,
- false, TD.getPrefTypeAlignment(InitTy));
+ MachinePointerInfo(UndefValue::get(PtrTy)),
+ TD.getPrefTypeAlignment(InitTy));
}
Init->dump();
@@ -782,10 +838,7 @@ static bool hasDefinedInitializer(const GlobalValue *GV) {
if (!GVar || !GVar->hasInitializer())
return false;
- if (isa<UndefValue>(GVar->getInitializer()))
- return false;
-
- return true;
+ return !isa<UndefValue>(GVar->getInitializer());
}
SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
@@ -797,6 +850,11 @@ SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
const GlobalValue *GV = G->getGlobal();
switch (G->getAddressSpace()) {
+ case AMDGPUAS::CONSTANT_ADDRESS: {
+ MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
+ SDValue GA = DAG.getTargetGlobalAddress(GV, SDLoc(G), ConstPtrVT);
+ return DAG.getNode(AMDGPUISD::CONST_DATA_PTR, SDLoc(G), ConstPtrVT, GA);
+ }
case AMDGPUAS::LOCAL_ADDRESS: {
// XXX: What does the value of G->getOffset() mean?
assert(G->getOffset() == 0 &&
@@ -808,11 +866,16 @@ SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
unsigned Offset;
if (MFI->LocalMemoryObjects.count(GV) == 0) {
- uint64_t Size = DL.getTypeAllocSize(GV->getType()->getElementType());
- Offset = MFI->LDSSize;
+ unsigned Align = GV->getAlignment();
+ if (Align == 0)
+ Align = DL.getABITypeAlignment(GV->getValueType());
+
+ /// TODO: We should sort these to minimize wasted space due to alignment
+ /// padding. Currently the padding is decided by the first encountered use
+ /// during lowering.
+ Offset = MFI->LDSSize = alignTo(MFI->LDSSize, Align);
MFI->LocalMemoryObjects[GV] = Offset;
- // XXX: Account for alignment?
- MFI->LDSSize += Size;
+ MFI->LDSSize += DL.getTypeAllocSize(GV->getValueType());
} else {
Offset = MFI->LocalMemoryObjects[GV];
}
@@ -820,50 +883,11 @@ SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
return DAG.getConstant(Offset, SDLoc(Op),
getPointerTy(DL, AMDGPUAS::LOCAL_ADDRESS));
}
- case AMDGPUAS::CONSTANT_ADDRESS: {
- MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
- Type *EltType = GV->getType()->getElementType();
- unsigned Size = DL.getTypeAllocSize(EltType);
- unsigned Alignment = DL.getPrefTypeAlignment(EltType);
-
- MVT PrivPtrVT = getPointerTy(DL, AMDGPUAS::PRIVATE_ADDRESS);
- MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
-
- int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
- SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
-
- const GlobalVariable *Var = cast<GlobalVariable>(GV);
- if (!Var->hasInitializer()) {
- // This has no use, but bugpoint will hit it.
- return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
- }
-
- const Constant *Init = Var->getInitializer();
- SmallVector<SDNode*, 8> WorkList;
-
- for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
- E = DAG.getEntryNode()->use_end(); I != E; ++I) {
- if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
- continue;
- WorkList.push_back(*I);
- }
- SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
- for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
- E = WorkList.end(); I != E; ++I) {
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(Chain);
- for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
- Ops.push_back((*I)->getOperand(i));
- }
- DAG.UpdateNodeOperands(*I, Ops);
- }
- return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
- }
}
const Function &Fn = *DAG.getMachineFunction().getFunction();
- DiagnosticInfoUnsupported BadInit(Fn,
- "initializer for address space");
+ DiagnosticInfoUnsupported BadInit(
+ Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
DAG.getContext()->diagnose(BadInit);
return SDValue();
}
@@ -875,7 +899,7 @@ SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
for (const SDUse &U : Op->ops())
DAG.ExtractVectorElements(U.get(), Args);
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
+ return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
}
SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
@@ -887,23 +911,7 @@ SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
VT.getVectorNumElements());
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
-}
-
-SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
- SelectionDAG &DAG) const {
-
- MachineFunction &MF = DAG.getMachineFunction();
- const AMDGPUFrameLowering *TFL = Subtarget->getFrameLowering();
-
- FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
-
- unsigned FrameIndex = FIN->getIndex();
- unsigned IgnoredFrameReg;
- unsigned Offset =
- TFL->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
- return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), SDLoc(Op),
- Op.getValueType());
+ return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
}
SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
@@ -914,121 +922,10 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
switch (IntrinsicID) {
default: return Op;
- case AMDGPUIntrinsic::AMDGPU_abs:
- case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
- return LowerIntrinsicIABS(Op, DAG);
- case AMDGPUIntrinsic::AMDGPU_lrp:
- return LowerIntrinsicLRP(Op, DAG);
-
- case AMDGPUIntrinsic::AMDGPU_clamp:
- case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
+ case AMDGPUIntrinsic::AMDGPU_clamp: // Legacy name.
return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
- case Intrinsic::AMDGPU_div_scale: {
- // 3rd parameter required to be a constant.
- const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
- if (!Param)
- return DAG.getUNDEF(VT);
-
- // Translate to the operands expected by the machine instruction. The
- // first parameter must be the same as the first instruction.
- SDValue Numerator = Op.getOperand(1);
- SDValue Denominator = Op.getOperand(2);
-
- // Note this order is opposite of the machine instruction's operations,
- // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
- // intrinsic has the numerator as the first operand to match a normal
- // division operation.
-
- SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
-
- return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
- Denominator, Numerator);
- }
-
- case Intrinsic::AMDGPU_div_fmas:
- return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
- Op.getOperand(4));
-
- case Intrinsic::AMDGPU_div_fixup:
- return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
-
- case Intrinsic::AMDGPU_trig_preop:
- return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
- Op.getOperand(1), Op.getOperand(2));
-
- case Intrinsic::AMDGPU_rcp:
- return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
-
- case Intrinsic::AMDGPU_rsq:
- return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
-
- case AMDGPUIntrinsic::AMDGPU_legacy_rsq:
- return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
-
- case Intrinsic::AMDGPU_rsq_clamped:
- if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
- Type *Type = VT.getTypeForEVT(*DAG.getContext());
- APFloat Max = APFloat::getLargest(Type->getFltSemantics());
- APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
-
- SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
- SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
- DAG.getConstantFP(Max, DL, VT));
- return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
- DAG.getConstantFP(Min, DL, VT));
- } else {
- return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
- }
-
- case Intrinsic::AMDGPU_ldexp:
- return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
-
- case AMDGPUIntrinsic::AMDGPU_imax:
- return DAG.getNode(ISD::SMAX, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
- case AMDGPUIntrinsic::AMDGPU_umax:
- return DAG.getNode(ISD::UMAX, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
- case AMDGPUIntrinsic::AMDGPU_imin:
- return DAG.getNode(ISD::SMIN, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
- case AMDGPUIntrinsic::AMDGPU_umin:
- return DAG.getNode(ISD::UMIN, DL, VT, Op.getOperand(1),
- Op.getOperand(2));
-
- case AMDGPUIntrinsic::AMDGPU_umul24:
- return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
- Op.getOperand(1), Op.getOperand(2));
-
- case AMDGPUIntrinsic::AMDGPU_imul24:
- return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
- Op.getOperand(1), Op.getOperand(2));
-
- case AMDGPUIntrinsic::AMDGPU_umad24:
- return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
-
- case AMDGPUIntrinsic::AMDGPU_imad24:
- return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
- Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
-
- case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
- return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
-
- case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
- return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
-
- case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
- return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
-
- case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
- return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
-
case AMDGPUIntrinsic::AMDGPU_bfe_i32:
return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
Op.getOperand(1),
@@ -1040,69 +937,13 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
-
- case AMDGPUIntrinsic::AMDGPU_bfi:
- return DAG.getNode(AMDGPUISD::BFI, DL, VT,
- Op.getOperand(1),
- Op.getOperand(2),
- Op.getOperand(3));
-
- case AMDGPUIntrinsic::AMDGPU_bfm:
- return DAG.getNode(AMDGPUISD::BFM, DL, VT,
- Op.getOperand(1),
- Op.getOperand(2));
-
- case Intrinsic::AMDGPU_class:
- return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
- Op.getOperand(1), Op.getOperand(2));
-
- case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
- return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
-
- case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
- return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
- case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name.
- return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
- case AMDGPUIntrinsic::AMDGPU_brev: // Legacy name
- return DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(1));
}
}
-///IABS(a) = SMAX(sub(0, a), a)
-SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
- Op.getOperand(1));
-
- return DAG.getNode(ISD::SMAX, DL, VT, Neg, Op.getOperand(1));
-}
-
-/// Linear Interpolation
-/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
-SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT VT = Op.getValueType();
- // TODO: Should this propagate fast-math-flags?
- SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
- DAG.getConstantFP(1.0f, DL, MVT::f32),
- Op.getOperand(1));
- SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
- Op.getOperand(3));
- return DAG.getNode(ISD::FADD, DL, VT,
- DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
- OneSubAC);
-}
-
/// \brief Generate Min/Max node
-SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL,
- EVT VT,
- SDValue LHS,
- SDValue RHS,
- SDValue True,
- SDValue False,
+SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(const SDLoc &DL, EVT VT,
+ SDValue LHS, SDValue RHS,
+ SDValue True, SDValue False,
SDValue CC,
DAGCombinerInfo &DCI) const {
if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
@@ -1176,56 +1017,48 @@ SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL,
return SDValue();
}
-SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op,
- SelectionDAG &DAG) const {
- LoadSDNode *Load = cast<LoadSDNode>(Op);
- EVT MemVT = Load->getMemoryVT();
- EVT MemEltVT = MemVT.getVectorElementType();
+std::pair<SDValue, SDValue>
+AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
- EVT LoadVT = Op.getValueType();
- EVT EltVT = LoadVT.getVectorElementType();
- EVT PtrVT = Load->getBasePtr().getValueType();
+ SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
- unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
- SmallVector<SDValue, 8> Loads;
- SmallVector<SDValue, 8> Chains;
+ const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+ const SDValue One = DAG.getConstant(1, SL, MVT::i32);
- SDLoc SL(Op);
- unsigned MemEltSize = MemEltVT.getStoreSize();
- MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
- DAG.getConstant(i * MemEltSize, SL, PtrVT));
+ return std::make_pair(Lo, Hi);
+}
- SDValue NewLoad
- = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
- Load->getChain(), Ptr,
- SrcValue.getWithOffset(i * MemEltSize),
- MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
- Load->isInvariant(), Load->getAlignment());
- Loads.push_back(NewLoad.getValue(0));
- Chains.push_back(NewLoad.getValue(1));
- }
+SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
- SDValue Ops[] = {
- DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads),
- DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains)
- };
+ SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
+ const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
+}
- return DAG.getMergeValues(Ops, SL);
+SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+
+ SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
+ const SDValue One = DAG.getConstant(1, SL, MVT::i32);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
}
SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
SelectionDAG &DAG) const {
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
EVT VT = Op.getValueType();
+
// If this is a 2 element vector, we really want to scalarize and not create
// weird 1 element vectors.
if (VT.getVectorNumElements() == 2)
- return ScalarizeVectorLoad(Op, DAG);
+ return scalarizeVectorLoad(Load, DAG);
- LoadSDNode *Load = cast<LoadSDNode>(Op);
SDValue BasePtr = Load->getBasePtr();
EVT PtrVT = BasePtr.getValueType();
EVT MemVT = Load->getMemoryVT();
@@ -1245,22 +1078,15 @@ SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
unsigned BaseAlign = Load->getAlignment();
unsigned HiAlign = MinAlign(BaseAlign, Size);
- SDValue LoLoad
- = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
- Load->getChain(), BasePtr,
- SrcValue,
- LoMemVT, Load->isVolatile(), Load->isNonTemporal(),
- Load->isInvariant(), BaseAlign);
-
+ SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
+ Load->getChain(), BasePtr, SrcValue, LoMemVT,
+ BaseAlign, Load->getMemOperand()->getFlags());
SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
DAG.getConstant(Size, SL, PtrVT));
-
- SDValue HiLoad
- = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
- Load->getChain(), HiPtr,
- SrcValue.getWithOffset(LoMemVT.getStoreSize()),
- HiMemVT, Load->isVolatile(), Load->isNonTemporal(),
- Load->isInvariant(), HiAlign);
+ SDValue HiLoad =
+ DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
+ HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
+ HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
SDValue Ops[] = {
DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
@@ -1271,6 +1097,8 @@ SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
return DAG.getMergeValues(Ops, SL);
}
+// FIXME: This isn't doing anything for SI. This should be used in a target
+// combine during type legalization.
SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
SelectionDAG &DAG) const {
StoreSDNode *Store = cast<StoreSDNode>(Op);
@@ -1317,48 +1145,15 @@ SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
if (PackedSize < 32) {
EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
- Store->getMemOperand()->getPointerInfo(),
- PackedVT,
- Store->isNonTemporal(), Store->isVolatile(),
- Store->getAlignment());
+ Store->getMemOperand()->getPointerInfo(), PackedVT,
+ Store->getAlignment(),
+ Store->getMemOperand()->getFlags());
}
return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
Store->getMemOperand()->getPointerInfo(),
- Store->isVolatile(), Store->isNonTemporal(),
- Store->getAlignment());
-}
-
-SDValue AMDGPUTargetLowering::ScalarizeVectorStore(SDValue Op,
- SelectionDAG &DAG) const {
- StoreSDNode *Store = cast<StoreSDNode>(Op);
- EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
- EVT EltVT = Store->getValue().getValueType().getVectorElementType();
- EVT PtrVT = Store->getBasePtr().getValueType();
- unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
- SDLoc SL(Op);
-
- SmallVector<SDValue, 8> Chains;
-
- unsigned EltSize = MemEltVT.getStoreSize();
- MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
-
- for (unsigned i = 0, e = NumElts; i != e; ++i) {
- SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
- Store->getValue(),
- DAG.getConstant(i, SL, MVT::i32));
-
- SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), SL, PtrVT);
- SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Store->getBasePtr(), Offset);
- SDValue NewStore =
- DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
- SrcValue.getWithOffset(i * EltSize),
- MemEltVT, Store->isNonTemporal(), Store->isVolatile(),
- Store->getAlignment());
- Chains.push_back(NewStore);
- }
-
- return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
+ Store->getAlignment(),
+ Store->getMemOperand()->getFlags());
}
SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
@@ -1370,7 +1165,7 @@ SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
// If this is a 2 element vector, we really want to scalarize and not create
// weird 1 element vectors.
if (VT.getVectorNumElements() == 2)
- return ScalarizeVectorStore(Op, DAG);
+ return scalarizeVectorStore(Store, DAG);
EVT MemVT = Store->getMemoryVT();
SDValue Chain = Store->getChain();
@@ -1395,171 +1190,21 @@ SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
unsigned Size = LoMemVT.getStoreSize();
unsigned HiAlign = MinAlign(BaseAlign, Size);
- SDValue LoStore
- = DAG.getTruncStore(Chain, SL, Lo,
- BasePtr,
- SrcValue,
- LoMemVT,
- Store->isNonTemporal(),
- Store->isVolatile(),
- BaseAlign);
- SDValue HiStore
- = DAG.getTruncStore(Chain, SL, Hi,
- HiPtr,
- SrcValue.getWithOffset(Size),
- HiMemVT,
- Store->isNonTemporal(),
- Store->isVolatile(),
- HiAlign);
+ SDValue LoStore =
+ DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
+ Store->getMemOperand()->getFlags());
+ SDValue HiStore =
+ DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
+ HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
}
-
-SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- LoadSDNode *Load = cast<LoadSDNode>(Op);
- ISD::LoadExtType ExtType = Load->getExtensionType();
- EVT VT = Op.getValueType();
- EVT MemVT = Load->getMemoryVT();
-
- if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
- assert(VT == MVT::i1 && "Only i1 non-extloads expected");
- // FIXME: Copied from PPC
- // First, load into 32 bits, then truncate to 1 bit.
-
- SDValue Chain = Load->getChain();
- SDValue BasePtr = Load->getBasePtr();
- MachineMemOperand *MMO = Load->getMemOperand();
-
- SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
- BasePtr, MVT::i8, MMO);
-
- SDValue Ops[] = {
- DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
- NewLD.getValue(1)
- };
-
- return DAG.getMergeValues(Ops, DL);
- }
-
- if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS ||
- Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
- ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
- return SDValue();
-
- // <SI && AS=PRIVATE && EXTLOAD && size < 32bit,
- // register (2-)byte extract.
-
- // Get Register holding the target.
- SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
- DAG.getConstant(2, DL, MVT::i32));
- // Load the Register.
- SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
- Load->getChain(), Ptr,
- DAG.getTargetConstant(0, DL, MVT::i32),
- Op.getOperand(2));
-
- // Get offset within the register.
- SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
- Load->getBasePtr(),
- DAG.getConstant(0x3, DL, MVT::i32));
-
- // Bit offset of target byte (byteIdx * 8).
- SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
- DAG.getConstant(3, DL, MVT::i32));
-
- // Shift to the right.
- Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
-
- // Eliminate the upper bits by setting them to ...
- EVT MemEltVT = MemVT.getScalarType();
-
- // ... ones.
- if (ExtType == ISD::SEXTLOAD) {
- SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
-
- SDValue Ops[] = {
- DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
- Load->getChain()
- };
-
- return DAG.getMergeValues(Ops, DL);
- }
-
- // ... or zeros.
- SDValue Ops[] = {
- DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
- Load->getChain()
- };
-
- return DAG.getMergeValues(Ops, DL);
-}
-
-SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
- SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
- if (Result.getNode()) {
- return Result;
- }
-
- StoreSDNode *Store = cast<StoreSDNode>(Op);
- SDValue Chain = Store->getChain();
- if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
- Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
- Store->getValue().getValueType().isVector()) {
- return SplitVectorStore(Op, DAG);
- }
-
- EVT MemVT = Store->getMemoryVT();
- if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
- MemVT.bitsLT(MVT::i32)) {
- unsigned Mask = 0;
- if (Store->getMemoryVT() == MVT::i8) {
- Mask = 0xff;
- } else if (Store->getMemoryVT() == MVT::i16) {
- Mask = 0xffff;
- }
- SDValue BasePtr = Store->getBasePtr();
- SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
- DAG.getConstant(2, DL, MVT::i32));
- SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
- Chain, Ptr,
- DAG.getTargetConstant(0, DL, MVT::i32));
-
- SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
- DAG.getConstant(0x3, DL, MVT::i32));
-
- SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
- DAG.getConstant(3, DL, MVT::i32));
-
- SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
- Store->getValue());
-
- SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
-
- SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
- MaskedValue, ShiftAmt);
-
- SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32,
- DAG.getConstant(Mask, DL, MVT::i32),
- ShiftAmt);
- DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
- DAG.getConstant(0xffffffff, DL, MVT::i32));
- Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
-
- SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
- return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
- Chain, Value, Ptr,
- DAG.getTargetConstant(0, DL, MVT::i32));
- }
- return SDValue();
-}
-
// This is a shortcut for integer division because we have fast i32<->f32
// conversions, and fast f32 reciprocal instructions. The fractional part of a
-// float is enough to accurately represent up to a 24-bit integer.
-SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const {
+// float is enough to accurately represent up to a 24-bit signed integer.
+SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
+ bool Sign) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
@@ -1567,20 +1212,26 @@ SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool
MVT IntVT = MVT::i32;
MVT FltVT = MVT::f32;
- ISD::NodeType ToFp = sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
- ISD::NodeType ToInt = sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
+ unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
+ if (LHSSignBits < 9)
+ return SDValue();
- if (VT.isVector()) {
- unsigned NElts = VT.getVectorNumElements();
- IntVT = MVT::getVectorVT(MVT::i32, NElts);
- FltVT = MVT::getVectorVT(MVT::f32, NElts);
- }
+ unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
+ if (RHSSignBits < 9)
+ return SDValue();
+
+ unsigned BitSize = VT.getSizeInBits();
+ unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
+ unsigned DivBits = BitSize - SignBits;
+ if (Sign)
+ ++DivBits;
- unsigned BitSize = VT.getScalarType().getSizeInBits();
+ ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
+ ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
SDValue jq = DAG.getConstant(1, DL, IntVT);
- if (sign) {
+ if (Sign) {
// char|short jq = ia ^ ib;
jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
@@ -1590,18 +1241,13 @@ SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool
// jq = jq | 0x1
jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
-
- // jq = (int)jq
- jq = DAG.getSExtOrTrunc(jq, DL, IntVT);
}
// int ia = (int)LHS;
- SDValue ia = sign ?
- DAG.getSExtOrTrunc(LHS, DL, IntVT) : DAG.getZExtOrTrunc(LHS, DL, IntVT);
+ SDValue ia = LHS;
// int ib, (int)RHS;
- SDValue ib = sign ?
- DAG.getSExtOrTrunc(RHS, DL, IntVT) : DAG.getZExtOrTrunc(RHS, DL, IntVT);
+ SDValue ib = RHS;
// float fa = (float)ia;
SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
@@ -1609,8 +1255,6 @@ SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool
// float fb = (float)ib;
SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
- // TODO: Should this propagate fast-math-flags?
- // float fq = native_divide(fa, fb);
SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
@@ -1621,8 +1265,7 @@ SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool
SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
// float fr = mad(fqneg, fb, fa);
- SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT,
- DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa);
+ SDValue fr = DAG.getNode(ISD::FMAD, DL, FltVT, fqneg, fb, fa);
// int iq = (int)fq;
SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
@@ -1641,9 +1284,6 @@ SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool
// jq = (cv ? jq : 0);
jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
- // dst = trunc/extend to legal type
- iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT);
-
// dst = iq + jq;
SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
@@ -1651,11 +1291,19 @@ SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool
SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
- SDValue Res[2] = {
- Div,
- Rem
- };
- return DAG.getMergeValues(Res, DL);
+ // Truncate to number of bits this divide really is.
+ if (Sign) {
+ SDValue InRegSize
+ = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
+ Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
+ Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
+ } else {
+ SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
+ Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
+ Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
+ }
+
+ return DAG.getMergeValues({ Div, Rem }, DL);
}
void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
@@ -1686,10 +1334,11 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
LHS_Lo, RHS_Lo);
- SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(0), zero);
- SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(1), zero);
- Results.push_back(DIV);
- Results.push_back(REM);
+ SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), zero});
+ SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), zero});
+
+ Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
+ Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
return;
}
@@ -1698,7 +1347,8 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
- SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, zero);
+ SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, zero});
+ REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
SDValue DIV_Lo = zero;
@@ -1718,7 +1368,7 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
// Add LHS high bit
REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
- SDValue BIT = DAG.getConstant(1 << bitPos, DL, HalfVT);
+ SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
@@ -1728,7 +1378,8 @@ void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
}
- SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
+ SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
+ DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
Results.push_back(DIV);
Results.push_back(REM);
}
@@ -1744,19 +1395,14 @@ SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
return DAG.getMergeValues(Results, DL);
}
- SDValue Num = Op.getOperand(0);
- SDValue Den = Op.getOperand(1);
-
if (VT == MVT::i32) {
- if (DAG.MaskedValueIsZero(Num, APInt::getHighBitsSet(32, 8)) &&
- DAG.MaskedValueIsZero(Den, APInt::getHighBitsSet(32, 8))) {
- // TODO: We technically could do this for i64, but shouldn't that just be
- // handled by something generally reducing 64-bit division on 32-bit
- // values to 32-bit?
- return LowerDIVREM24(Op, DAG, false);
- }
+ if (SDValue Res = LowerDIVREM24(Op, DAG, false))
+ return Res;
}
+ SDValue Num = Op.getOperand(0);
+ SDValue Den = Op.getOperand(1);
+
// RCP = URECIP(Den) = 2^32 / Den + e
// e is rounding error.
SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
@@ -1864,11 +1510,11 @@ SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
SDValue Zero = DAG.getConstant(0, DL, VT);
SDValue NegOne = DAG.getConstant(-1, DL, VT);
- if (VT == MVT::i32 &&
- DAG.ComputeNumSignBits(LHS) > 8 &&
- DAG.ComputeNumSignBits(RHS) > 8) {
- return LowerDIVREM24(Op, DAG, true);
+ if (VT == MVT::i32) {
+ if (SDValue Res = LowerDIVREM24(Op, DAG, true))
+ return Res;
}
+
if (VT == MVT::i64 &&
DAG.ComputeNumSignBits(LHS) > 32 &&
DAG.ComputeNumSignBits(RHS) > 32) {
@@ -1954,7 +1600,8 @@ SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
}
-static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) {
+static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
+ SelectionDAG &DAG) {
const unsigned FractBits = 52;
const unsigned ExpBits = 11;
@@ -1992,8 +1639,7 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
// Extend back to to 64-bits.
- SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
- Zero, SignBit);
+ SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
@@ -2391,7 +2037,7 @@ SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
MVT::i32, FloorMul);
SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
- SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Lo, Hi);
+ SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
}
@@ -2437,7 +2083,7 @@ SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
for (unsigned I = 0; I < NElts; ++I)
Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
- return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
+ return DAG.getBuildVector(VT, DL, Args);
}
//===----------------------------------------------------------------------===//
@@ -2476,8 +2122,8 @@ static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
}
template <typename IntTy>
-static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
- uint32_t Offset, uint32_t Width, SDLoc DL) {
+static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
+ uint32_t Width, const SDLoc &DL) {
if (Width + Offset < 32) {
uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
@@ -2487,55 +2133,175 @@ static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
}
-static bool usesAllNormalStores(SDNode *LoadVal) {
- for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
- if (!ISD::isNormalStore(*I))
- return false;
+static bool hasVolatileUser(SDNode *Val) {
+ for (SDNode *U : Val->uses()) {
+ if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
+ if (M->isVolatile())
+ return true;
+ }
}
+ return false;
+}
+
+bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
+ // i32 vectors are the canonical memory type.
+ if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
+ return false;
+
+ if (!VT.isByteSized())
+ return false;
+
+ unsigned Size = VT.getStoreSize();
+
+ if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
+ return false;
+
+ if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
+ return false;
+
return true;
}
-// If we have a copy of an illegal type, replace it with a load / store of an
-// equivalently sized legal type. This avoids intermediate bit pack / unpack
-// instructions emitted when handling extloads and truncstores. Ideally we could
-// recognize the pack / unpack pattern to eliminate it.
+// Replace load of an illegal type with a store of a bitcast to a friendlier
+// type.
+SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ if (!DCI.isBeforeLegalize())
+ return SDValue();
+
+ LoadSDNode *LN = cast<LoadSDNode>(N);
+ if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
+ return SDValue();
+
+ SDLoc SL(N);
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = LN->getMemoryVT();
+
+ unsigned Size = VT.getStoreSize();
+ unsigned Align = LN->getAlignment();
+ if (Align < Size && isTypeLegal(VT)) {
+ bool IsFast;
+ unsigned AS = LN->getAddressSpace();
+
+ // Expand unaligned loads earlier than legalization. Due to visitation order
+ // problems during legalization, the emitted instructions to pack and unpack
+ // the bytes again are not eliminated in the case of an unaligned copy.
+ if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) {
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
+ return DAG.getMergeValues(Ops, SDLoc(N));
+ }
+
+ if (!IsFast)
+ return SDValue();
+ }
+
+ if (!shouldCombineMemoryType(VT))
+ return SDValue();
+
+ EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
+
+ SDValue NewLoad
+ = DAG.getLoad(NewVT, SL, LN->getChain(),
+ LN->getBasePtr(), LN->getMemOperand());
+
+ SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
+ DCI.CombineTo(N, BC, NewLoad.getValue(1));
+ return SDValue(N, 0);
+}
+
+// Replace store of an illegal type with a store of a bitcast to a friendlier
+// type.
SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
if (!DCI.isBeforeLegalize())
return SDValue();
StoreSDNode *SN = cast<StoreSDNode>(N);
- SDValue Value = SN->getValue();
- EVT VT = Value.getValueType();
+ if (SN->isVolatile() || !ISD::isNormalStore(SN))
+ return SDValue();
- if (isTypeLegal(VT) || SN->isVolatile() ||
- !ISD::isNormalLoad(Value.getNode()) || VT.getSizeInBits() < 8)
+ EVT VT = SN->getMemoryVT();
+ unsigned Size = VT.getStoreSize();
+
+ SDLoc SL(N);
+ SelectionDAG &DAG = DCI.DAG;
+ unsigned Align = SN->getAlignment();
+ if (Align < Size && isTypeLegal(VT)) {
+ bool IsFast;
+ unsigned AS = SN->getAddressSpace();
+
+ // Expand unaligned stores earlier than legalization. Due to visitation
+ // order problems during legalization, the emitted instructions to pack and
+ // unpack the bytes again are not eliminated in the case of an unaligned
+ // copy.
+ if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast))
+ return expandUnalignedStore(SN, DAG);
+
+ if (!IsFast)
+ return SDValue();
+ }
+
+ if (!shouldCombineMemoryType(VT))
+ return SDValue();
+
+ EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
+ SDValue Val = SN->getValue();
+
+ //DCI.AddToWorklist(Val.getNode());
+
+ bool OtherUses = !Val.hasOneUse();
+ SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
+ if (OtherUses) {
+ SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
+ DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
+ }
+
+ return DAG.getStore(SN->getChain(), SL, CastVal,
+ SN->getBasePtr(), SN->getMemOperand());
+}
+
+// TODO: Should repeat for other bit ops.
+SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ if (N->getValueType(0) != MVT::i64)
return SDValue();
- LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
- if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
+ // Break up 64-bit and of a constant into two 32-bit ands. This will typically
+ // happen anyway for a VALU 64-bit and. This exposes other 32-bit integer
+ // combine opportunities since most 64-bit operations are decomposed this way.
+ // TODO: We won't want this for SALU especially if it is an inline immediate.
+ const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!RHS)
return SDValue();
- EVT MemVT = LoadVal->getMemoryVT();
+ uint64_t Val = RHS->getZExtValue();
+ if (Lo_32(Val) != 0 && Hi_32(Val) != 0 && !RHS->hasOneUse()) {
+ // If either half of the constant is 0, this is really a 32-bit and, so
+ // split it. If we can re-use the full materialized constant, keep it.
+ return SDValue();
+ }
SDLoc SL(N);
SelectionDAG &DAG = DCI.DAG;
- EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
- SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
- LoadVT, SL,
- LoadVal->getChain(),
- LoadVal->getBasePtr(),
- LoadVal->getOffset(),
- LoadVT,
- LoadVal->getMemOperand());
+ SDValue Lo, Hi;
+ std::tie(Lo, Hi) = split64BitValue(N->getOperand(0), DAG);
- SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
- DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
+ SDValue LoRHS = DAG.getConstant(Lo_32(Val), SL, MVT::i32);
+ SDValue HiRHS = DAG.getConstant(Hi_32(Val), SL, MVT::i32);
- return DAG.getStore(SN->getChain(), SL, NewLoad,
- SN->getBasePtr(), SN->getMemOperand());
+ SDValue LoAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Lo, LoRHS);
+ SDValue HiAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, HiRHS);
+
+ // Re-visit the ands. It's possible we eliminated one of them and it could
+ // simplify the vector.
+ DCI.AddToWorklist(Lo.getNode());
+ DCI.AddToWorklist(Hi.getNode());
+
+ SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
}
SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
@@ -2543,14 +2309,17 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
if (N->getValueType(0) != MVT::i64)
return SDValue();
- // i64 (shl x, 32) -> (build_pair 0, x)
+ // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
- // Doing this with moves theoretically helps MI optimizations that understand
- // copies. 2 v_mov_b32_e32 will have the same code size / cycle count as
- // v_lshl_b64. In the SALU case, I think this is slightly worse since it
- // doubles the code size and I'm unsure about cycle count.
+ // On some subtargets, 64-bit shift is a quarter rate instruction. In the
+ // common case, splitting this into a move and a 32-bit shift is faster and
+ // the same code size.
const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (!RHS || RHS->getZExtValue() != 32)
+ if (!RHS)
+ return SDValue();
+
+ unsigned RHSVal = RHS->getZExtValue();
+ if (RHSVal < 32)
return SDValue();
SDValue LHS = N->getOperand(0);
@@ -2558,11 +2327,85 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
SDLoc SL(N);
SelectionDAG &DAG = DCI.DAG;
- // Extract low 32-bits.
+ SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
+
SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
+ SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
- return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, Zero, Lo);
+
+ SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
+}
+
+SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ if (N->getValueType(0) != MVT::i64)
+ return SDValue();
+
+ const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!RHS)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc SL(N);
+ unsigned RHSVal = RHS->getZExtValue();
+
+ // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
+ if (RHSVal == 32) {
+ SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
+ SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
+ DAG.getConstant(31, SL, MVT::i32));
+
+ SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
+ }
+
+ // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
+ if (RHSVal == 63) {
+ SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
+ SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
+ DAG.getConstant(31, SL, MVT::i32));
+ SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
+ }
+
+ return SDValue();
+}
+
+SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ if (N->getValueType(0) != MVT::i64)
+ return SDValue();
+
+ const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!RHS)
+ return SDValue();
+
+ unsigned ShiftAmt = RHS->getZExtValue();
+ if (ShiftAmt < 32)
+ return SDValue();
+
+ // srl i64:x, C for C >= 32
+ // =>
+ // build_pair (srl hi_32(x), C - 32), 0
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc SL(N);
+
+ SDValue One = DAG.getConstant(1, SL, MVT::i32);
+ SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
+
+ SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0));
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32,
+ VecOp, One);
+
+ SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
+ SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
+
+ SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
+
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
}
SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
@@ -2610,8 +2453,8 @@ static bool isCtlzOpc(unsigned Opc) {
// type VT.
// Need to match pre-legalized type because the generic legalization inserts the
// add/sub between the select and compare.
-static SDValue getFFBH_U32(const TargetLowering &TLI,
- SelectionDAG &DAG, SDLoc SL, SDValue Op) {
+static SDValue getFFBH_U32(const TargetLowering &TLI, SelectionDAG &DAG,
+ const SDLoc &SL, SDValue Op) {
EVT VT = Op.getValueType();
EVT LegalVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
if (LegalVT != MVT::i32)
@@ -2634,10 +2477,8 @@ static SDValue getFFBH_U32(const TargetLowering &TLI,
// against the bitwidth.
//
// TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
-SDValue AMDGPUTargetLowering::performCtlzCombine(SDLoc SL,
- SDValue Cond,
- SDValue LHS,
- SDValue RHS,
+SDValue AMDGPUTargetLowering::performCtlzCombine(const SDLoc &SL, SDValue Cond,
+ SDValue LHS, SDValue RHS,
DAGCombinerInfo &DCI) const {
ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
if (!CmpRhs || !CmpRhs->isNullValue())
@@ -2680,8 +2521,13 @@ SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
SDValue True = N->getOperand(1);
SDValue False = N->getOperand(2);
- if (VT == MVT::f32 && Cond.hasOneUse())
- return CombineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
+ if (VT == MVT::f32 && Cond.hasOneUse()) {
+ SDValue MinMax
+ = CombineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
+ // Revisit this node so we can catch min3/max3/med3 patterns.
+ //DCI.AddToWorklist(MinMax.getNode());
+ return MinMax;
+ }
// There's no reason to not do this if the condition has other uses.
return performCtlzCombine(SDLoc(N), Cond, True, False, DCI);
@@ -2695,12 +2541,62 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
switch(N->getOpcode()) {
default:
break;
+ case ISD::BITCAST: {
+ EVT DestVT = N->getValueType(0);
+ if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
+ break;
+
+ // Fold bitcasts of constants.
+ //
+ // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
+ // TODO: Generalize and move to DAGCombiner
+ SDValue Src = N->getOperand(0);
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
+ assert(Src.getValueType() == MVT::i64);
+ SDLoc SL(N);
+ uint64_t CVal = C->getZExtValue();
+ return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT,
+ DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
+ DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
+ }
+
+ if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
+ const APInt &Val = C->getValueAPF().bitcastToAPInt();
+ SDLoc SL(N);
+ uint64_t CVal = Val.getZExtValue();
+ SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
+ DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
+ DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
+
+ return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
+ }
+
+ break;
+ }
case ISD::SHL: {
if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
break;
return performShlCombine(N, DCI);
}
+ case ISD::SRL: {
+ if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
+ break;
+
+ return performSrlCombine(N, DCI);
+ }
+ case ISD::SRA: {
+ if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
+ break;
+
+ return performSraCombine(N, DCI);
+ }
+ case ISD::AND: {
+ if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
+ break;
+
+ return performAndCombine(N, DCI);
+ }
case ISD::MUL:
return performMulCombine(N, DCI);
case AMDGPUISD::MUL_I24:
@@ -2797,7 +2693,8 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
break;
}
-
+ case ISD::LOAD:
+ return performLoadCombine(N, DCI);
case ISD::STORE:
return performStoreCombine(N, DCI);
}
@@ -2840,20 +2737,6 @@ void AMDGPUTargetLowering::getOriginalFunctionArgs(
}
}
-bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
- if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
- return CFP->isExactlyValue(1.0);
- }
- return isAllOnesConstant(Op);
-}
-
-bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
- if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
- return CFP->getValueAPF().isZero();
- }
- return isNullConstant(Op);
-}
-
SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
const TargetRegisterClass *RC,
unsigned Reg, EVT VT) const {
@@ -2889,10 +2772,11 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
// AMDIL DAG nodes
NODE_NAME_CASE(CALL);
NODE_NAME_CASE(UMUL);
- NODE_NAME_CASE(RET_FLAG);
NODE_NAME_CASE(BRANCH_COND);
// AMDGPU DAG nodes
+ NODE_NAME_CASE(ENDPGM)
+ NODE_NAME_CASE(RETURN)
NODE_NAME_CASE(DWORDADDR)
NODE_NAME_CASE(FRACT)
NODE_NAME_CASE(CLAMP)
@@ -2906,6 +2790,9 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(FMIN3)
NODE_NAME_CASE(SMIN3)
NODE_NAME_CASE(UMIN3)
+ NODE_NAME_CASE(FMED3)
+ NODE_NAME_CASE(SMED3)
+ NODE_NAME_CASE(UMED3)
NODE_NAME_CASE(URECIP)
NODE_NAME_CASE(DIV_SCALE)
NODE_NAME_CASE(DIV_FMAS)
@@ -2914,7 +2801,7 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(RCP)
NODE_NAME_CASE(RSQ)
NODE_NAME_CASE(RSQ_LEGACY)
- NODE_NAME_CASE(RSQ_CLAMPED)
+ NODE_NAME_CASE(RSQ_CLAMP)
NODE_NAME_CASE(LDEXP)
NODE_NAME_CASE(FP_CLASS)
NODE_NAME_CASE(DOT4)
@@ -2934,7 +2821,6 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(CONST_ADDRESS)
NODE_NAME_CASE(REGISTER_LOAD)
NODE_NAME_CASE(REGISTER_STORE)
- NODE_NAME_CASE(LOAD_CONSTANT)
NODE_NAME_CASE(LOAD_INPUT)
NODE_NAME_CASE(SAMPLE)
NODE_NAME_CASE(SAMPLEB)
@@ -2946,13 +2832,18 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(CVT_F32_UBYTE3)
NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
NODE_NAME_CASE(CONST_DATA_PTR)
+ NODE_NAME_CASE(PC_ADD_REL_OFFSET)
case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
NODE_NAME_CASE(SENDMSG)
NODE_NAME_CASE(INTERP_MOV)
NODE_NAME_CASE(INTERP_P1)
NODE_NAME_CASE(INTERP_P2)
NODE_NAME_CASE(STORE_MSKOR)
+ NODE_NAME_CASE(LOAD_CONSTANT)
NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
+ NODE_NAME_CASE(ATOMIC_CMP_SWAP)
+ NODE_NAME_CASE(ATOMIC_INC)
+ NODE_NAME_CASE(ATOMIC_DEC)
case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
}
return nullptr;
@@ -2998,21 +2889,6 @@ SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
return SDValue();
}
-static void computeKnownBitsForMinMax(const SDValue Op0,
- const SDValue Op1,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) {
- APInt Op0Zero, Op0One;
- APInt Op1Zero, Op1One;
- DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
- DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
-
- KnownZero = Op0Zero & Op1Zero;
- KnownOne = Op0One & Op1One;
-}
-
void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op,
APInt &KnownZero,
@@ -3029,22 +2905,6 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
switch (Opc) {
default:
break;
- case ISD::INTRINSIC_WO_CHAIN: {
- // FIXME: The intrinsic should just use the node.
- switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
- case AMDGPUIntrinsic::AMDGPU_imax:
- case AMDGPUIntrinsic::AMDGPU_umax:
- case AMDGPUIntrinsic::AMDGPU_imin:
- case AMDGPUIntrinsic::AMDGPU_umin:
- computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
- KnownZero, KnownOne, DAG, Depth);
- break;
- default:
- break;
- }
-
- break;
- }
case AMDGPUISD::CARRY:
case AMDGPUISD::BORROW: {
KnownZero = APInt::getHighBitsSet(32, 31);
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.h b/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 37925416a9c4..c2c758592d1c 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUISELLOWERING_H
-#define LLVM_LIB_TARGET_R600_AMDGPUISELLOWERING_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H
#include "llvm/Target/TargetLowering.h"
@@ -28,12 +28,10 @@ class AMDGPUTargetLowering : public TargetLowering {
protected:
const AMDGPUSubtarget *Subtarget;
-private:
SDValue LowerConstantInitializer(const Constant* Init, const GlobalValue *GV,
const SDValue &InitPtr,
SDValue Chain,
SelectionDAG &DAG) const;
- SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
@@ -67,42 +65,43 @@ private:
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
+protected:
+ bool shouldCombineMemoryType(EVT VT) const;
+ SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performAndCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- SDValue performCtlzCombine(SDLoc SL, SDValue Cond, SDValue LHS, SDValue RHS,
- DAGCombinerInfo &DCI) const;
+ SDValue performCtlzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS,
+ SDValue RHS, DAGCombinerInfo &DCI) const;
SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-protected:
static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
- static EVT getEquivalentLoadRegType(LLVMContext &Context, EVT VT);
+ static EVT getEquivalentBitType(LLVMContext &Context, EVT VT);
virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
SelectionDAG &DAG) const;
- /// \brief Split a vector load into a scalar load of each component.
- SDValue ScalarizeVectorLoad(SDValue Op, SelectionDAG &DAG) const;
+ /// Return 64-bit value Op as two 32-bit integers.
+ std::pair<SDValue, SDValue> split64BitValue(SDValue Op,
+ SelectionDAG &DAG) const;
+ SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const;
+ SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const;
/// \brief Split a vector load into 2 loads of half the vector.
SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
- /// \brief Split a vector store into a scalar store of each component.
- SDValue ScalarizeVectorStore(SDValue Op, SelectionDAG &DAG) const;
-
/// \brief Split a vector store into 2 stores of half the vector.
SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &Results) const;
- bool isHWTrueValue(SDValue Op) const;
- bool isHWFalseValue(SDValue Op) const;
-
/// The SelectionDAGBuilder will automatically promote function arguments
/// with illegal types. However, this does not work for the AMDGPU targets
/// since the function arguments are stored in memory as these illegal types.
@@ -119,7 +118,7 @@ protected:
const SmallVectorImpl<ISD::OutputArg> &Outs) const;
public:
- AMDGPUTargetLowering(TargetMachine &TM, const AMDGPUSubtarget &STI);
+ AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI);
bool isFAbsFree(EVT VT) const override;
bool isFNegFree(EVT VT) const override;
@@ -141,7 +140,7 @@ public:
ISD::LoadExtType ExtType,
EVT ExtVT) const override;
- bool isLoadBitCastBeneficial(EVT, EVT) const override;
+ bool isLoadBitCastBeneficial(EVT, EVT) const final;
bool storeOfVectorConstantIsCheap(EVT MemVT,
unsigned NumElem,
@@ -150,11 +149,10 @@ public:
bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override;
- SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg,
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const override;
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+ SelectionDAG &DAG) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
@@ -167,16 +165,9 @@ public:
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const override;
- SDValue LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const;
- SDValue CombineFMinMaxLegacy(SDLoc DL,
- EVT VT,
- SDValue LHS,
- SDValue RHS,
- SDValue True,
- SDValue False,
- SDValue CC,
- DAGCombinerInfo &DCI) const;
+ SDValue CombineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS,
+ SDValue RHS, SDValue True, SDValue False,
+ SDValue CC, DAGCombinerInfo &DCI) const;
const char* getTargetNodeName(unsigned Opcode) const override;
@@ -189,9 +180,7 @@ public:
unsigned &RefinementSteps) const override;
virtual SDNode *PostISelFolding(MachineSDNode *N,
- SelectionDAG &DAG) const {
- return N;
- }
+ SelectionDAG &DAG) const = 0;
/// \brief Determine which of the bits specified in \p Mask are known to be
/// either zero or one and return them in the \p KnownZero and \p KnownOne
@@ -214,8 +203,9 @@ public:
unsigned Reg, EVT VT) const;
enum ImplicitParameter {
- GRID_DIM,
- GRID_OFFSET
+ FIRST_IMPLICIT,
+ GRID_DIM = FIRST_IMPLICIT,
+ GRID_OFFSET,
};
/// \brief Helper function that returns the byte offset of the given
@@ -231,9 +221,10 @@ enum NodeType : unsigned {
FIRST_NUMBER = ISD::BUILTIN_OP_END,
CALL, // Function call based on a single integer
UMUL, // 32bit unsigned multiplication
- RET_FLAG,
BRANCH_COND,
// End AMDIL ISD Opcodes
+ ENDPGM,
+ RETURN,
DWORDADDR,
FRACT,
CLAMP,
@@ -250,6 +241,9 @@ enum NodeType : unsigned {
FMIN3,
SMIN3,
UMIN3,
+ FMED3,
+ SMED3,
+ UMED3,
URECIP,
DIV_SCALE,
DIV_FMAS,
@@ -261,7 +255,7 @@ enum NodeType : unsigned {
RCP,
RSQ,
RSQ_LEGACY,
- RSQ_CLAMPED,
+ RSQ_CLAMP,
LDEXP,
FP_CLASS,
DOT4,
@@ -307,10 +301,14 @@ enum NodeType : unsigned {
INTERP_MOV,
INTERP_P1,
INTERP_P2,
+ PC_ADD_REL_OFFSET,
FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
STORE_MSKOR,
LOAD_CONSTANT,
TBUFFER_STORE_FORMAT,
+ ATOMIC_CMP_SWAP,
+ ATOMIC_INC,
+ ATOMIC_DEC,
LAST_AMDGPU_ISD_NUMBER
};
diff --git a/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp b/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
index a266e711af5b..9a00ecb24ebe 100644
--- a/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
@@ -30,163 +30,8 @@ using namespace llvm;
// Pin the vtable to this file.
void AMDGPUInstrInfo::anchor() {}
-AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
- : AMDGPUGenInstrInfo(-1, -1), ST(st) {}
-
-const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
- return RI;
-}
-
-bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SubIdx) const {
-// TODO: Implement this function
- return false;
-}
-
-unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-
-unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-
-bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const {
-// TODO: Implement this function
- return false;
-}
-unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const {
-// TODO: Implement this function
- return 0;
-}
-bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const {
-// TODO: Implement this function
- return false;
-}
-
-MachineInstr *
-AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
-// TODO: Implement this function
- return nullptr;
-}
-
-void
-AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill,
- int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- llvm_unreachable("Not Implemented");
-}
-
-void
-AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- llvm_unreachable("Not Implemented");
-}
-
-bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
- MachineBasicBlock *MBB = MI->getParent();
- int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
- AMDGPU::OpName::addr);
- // addr is a custom operand with multiple MI operands, and only the
- // first MI operand is given a name.
- int RegOpIdx = OffsetOpIdx + 1;
- int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
- AMDGPU::OpName::chan);
- if (isRegisterLoad(*MI)) {
- int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
- AMDGPU::OpName::dst);
- unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
- unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
- unsigned Address = calculateIndirectAddress(RegIndex, Channel);
- unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
- if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
- buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
- getIndirectAddrRegClass()->getRegister(Address));
- } else {
- buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
- Address, OffsetReg);
- }
- } else if (isRegisterStore(*MI)) {
- int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
- AMDGPU::OpName::val);
- unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
- unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
- unsigned Address = calculateIndirectAddress(RegIndex, Channel);
- unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
- if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
- buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
- MI->getOperand(ValOpIdx).getReg());
- } else {
- buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
- calculateIndirectAddress(RegIndex, Channel),
- OffsetReg);
- }
- } else {
- return false;
- }
-
- MBB->erase(MI);
- return true;
-}
-
-MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
-// TODO: Implement this function
- return nullptr;
-}
-MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
- // TODO: Implement this function
- return nullptr;
-}
-bool
-AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad,
- bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- // TODO: Implement this function
- return false;
-}
-
-bool
-AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
- SmallVectorImpl<SDNode*> &NewNodes) const {
- // TODO: Implement this function
- return false;
-}
-
-unsigned
-AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
- bool UnfoldLoad, bool UnfoldStore,
- unsigned *LoadRegIndex) const {
- // TODO: Implement this function
- return 0;
-}
+AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &ST)
+ : AMDGPUGenInstrInfo(-1, -1), ST(ST) {}
bool AMDGPUInstrInfo::enableClusterLoads() const {
return true;
@@ -214,106 +59,6 @@ bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
}
-bool
-AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
- const {
- // TODO: Implement this function
- return true;
-}
-void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const {
- // TODO: Implement this function
-}
-
-bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
- // TODO: Implement this function
- return false;
-}
-
-bool AMDGPUInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
- ArrayRef<MachineOperand> Pred2) const {
- // TODO: Implement this function
- return false;
-}
-
-bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const {
- // TODO: Implement this function
- return false;
-}
-
-bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
- // TODO: Implement this function
- return MI->getDesc().isPredicable();
-}
-
-bool
-AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
- // TODO: Implement this function
- return true;
-}
-
-bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
- return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
-}
-
-bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
- return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
-}
-
-int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- int Offset = -1;
-
- if (MFI->getNumObjects() == 0) {
- return -1;
- }
-
- if (MRI.livein_empty()) {
- return 0;
- }
-
- const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
- for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
- LE = MRI.livein_end();
- LI != LE; ++LI) {
- unsigned Reg = LI->first;
- if (TargetRegisterInfo::isVirtualRegister(Reg) ||
- !IndirectRC->contains(Reg))
- continue;
-
- unsigned RegIndex;
- unsigned RegEnd;
- for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
- ++RegIndex) {
- if (IndirectRC->getRegister(RegIndex) == Reg)
- break;
- }
- Offset = std::max(Offset, (int)RegIndex);
- }
-
- return Offset + 1;
-}
-
-int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
- int Offset = 0;
- const MachineFrameInfo *MFI = MF.getFrameInfo();
-
- // Variable sized objects are not supported
- assert(!MFI->hasVarSizedObjects());
-
- if (MFI->getNumObjects() == 0) {
- return -1;
- }
-
- unsigned IgnoredFrameReg;
- Offset = MF.getSubtarget().getFrameLowering()->getFrameIndexReference(
- MF, -1, IgnoredFrameReg);
-
- return getIndirectIndexBegin(MF) + Offset;
-}
-
int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
switch (Channels) {
default: return Opcode;
@@ -323,35 +68,44 @@ int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
}
}
+// This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td
+enum SIEncodingFamily {
+ SI = 0,
+ VI = 1
+};
+
// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
// header files, so we need to wrap it in a function that takes unsigned
// instead.
namespace llvm {
namespace AMDGPU {
static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
- return getMCOpcodeGen(Opcode, (enum Subtarget)Gen);
+ return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
}
}
}
-// This must be kept in sync with the SISubtarget class in SIInstrInfo.td
-enum SISubtarget {
- SI = 0,
- VI = 1
-};
-
-static enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) {
- switch (Gen) {
- default:
- return SI;
+static SIEncodingFamily subtargetEncodingFamily(const AMDGPUSubtarget &ST) {
+ switch (ST.getGeneration()) {
+ case AMDGPUSubtarget::SOUTHERN_ISLANDS:
+ case AMDGPUSubtarget::SEA_ISLANDS:
+ return SIEncodingFamily::SI;
case AMDGPUSubtarget::VOLCANIC_ISLANDS:
- return VI;
+ return SIEncodingFamily::VI;
+
+ // FIXME: This should never be called for r600 GPUs.
+ case AMDGPUSubtarget::R600:
+ case AMDGPUSubtarget::R700:
+ case AMDGPUSubtarget::EVERGREEN:
+ case AMDGPUSubtarget::NORTHERN_ISLANDS:
+ return SIEncodingFamily::SI;
}
+
+ llvm_unreachable("Unknown subtarget generation!");
}
int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
- int MCOp = AMDGPU::getMCOpcode(
- Opcode, AMDGPUSubtargetToSISubtarget(ST.getGeneration()));
+ int MCOp = AMDGPU::getMCOpcode(Opcode, subtargetEncodingFamily(ST));
// -1 means that Opcode is already a native instruction.
if (MCOp == -1)
@@ -364,14 +118,3 @@ int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
return MCOp;
}
-
-ArrayRef<std::pair<int, const char *>>
-AMDGPUInstrInfo::getSerializableTargetIndices() const {
- static const std::pair<int, const char *> TargetIndices[] = {
- {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
- {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
- {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
- {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
- {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
- return makeArrayRef(TargetIndices);
-}
diff --git a/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/lib/Target/AMDGPU/AMDGPUInstrInfo.h
index 53e8b23b3d62..a59eafadeb93 100644
--- a/lib/Target/AMDGPU/AMDGPUInstrInfo.h
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -13,12 +13,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H
-#define LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUINSTRINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUINSTRINFO_H
-#include "AMDGPURegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include <map>
#define GET_INSTRINFO_HEADER
#define GET_INSTRINFO_ENUM
@@ -39,78 +37,12 @@ class MachineInstrBuilder;
class AMDGPUInstrInfo : public AMDGPUGenInstrInfo {
private:
- const AMDGPURegisterInfo RI;
- virtual void anchor();
-protected:
const AMDGPUSubtarget &ST;
-public:
- explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st);
- virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0;
-
- bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
- unsigned &DstReg, unsigned &SubIdx) const override;
-
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const override;
- unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const override;
- bool hasLoadFromStackSlot(const MachineInstr *MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const override;
- unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
- unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const;
- bool hasStoreFromStackSlot(const MachineInstr *MI,
- const MachineMemOperand *&MMO,
- int &FrameIndex) const;
-
- MachineInstr *
- convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const override;
-
-
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
-
- void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const override;
- void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const override;
-
-protected:
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- int FrameIndex) const override;
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- MachineInstr *LoadMI) const override;
+ virtual void anchor();
public:
- /// \returns the smallest register index that will be accessed by an indirect
- /// read or write or -1 if indirect addressing is not used by this program.
- int getIndirectIndexBegin(const MachineFunction &MF) const;
-
- /// \returns the largest register index that will be accessed by an indirect
- /// read or write or -1 if indirect addressing is not used by this program.
- int getIndirectIndexEnd(const MachineFunction &MF) const;
-
- bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr *> &NewMIs) const override;
- bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
- SmallVectorImpl<SDNode *> &NewNodes) const override;
- unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
- bool UnfoldLoad, bool UnfoldStore,
- unsigned *LoadRegIndex = nullptr) const override;
+ explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st);
bool enableClusterLoads() const override;
@@ -118,81 +50,14 @@ public:
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const override;
- bool
- ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
- void insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const override;
- bool isPredicated(const MachineInstr *MI) const override;
- bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
- ArrayRef<MachineOperand> Pred2) const override;
- bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const override;
- bool isPredicable(MachineInstr *MI) const override;
- bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
-
- // Helper functions that check the opcode for status information
- bool isRegisterStore(const MachineInstr &MI) const;
- bool isRegisterLoad(const MachineInstr &MI) const;
-
/// \brief Return a target-specific opcode if Opcode is a pseudo instruction.
/// Return -1 if the target-specific opcode for the pseudo instruction does
/// not exist. If Opcode is not a pseudo instruction, this is identity.
int pseudoToMCOpcode(int Opcode) const;
- /// \brief Return the descriptor of the target-specific machine instruction
- /// that corresponds to the specified pseudo or native opcode.
- const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const {
- return get(pseudoToMCOpcode(Opcode));
- }
-
- ArrayRef<std::pair<int, const char *>>
- getSerializableTargetIndices() const override;
-
-//===---------------------------------------------------------------------===//
-// Pure virtual funtions to be implemented by sub-classes.
-//===---------------------------------------------------------------------===//
-
- virtual bool isMov(unsigned opcode) const = 0;
-
- /// \brief Calculate the "Indirect Address" for the given \p RegIndex and
- /// \p Channel
- ///
- /// We model indirect addressing using a virtual address space that can be
- /// accesed with loads and stores. The "Indirect Address" is the memory
- /// address in this virtual address space that maps to the given \p RegIndex
- /// and \p Channel.
- virtual unsigned calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const = 0;
-
- /// \returns The register class to be used for loading and storing values
- /// from an "Indirect Address" .
- virtual const TargetRegisterClass *getIndirectAddrRegClass() const = 0;
-
- /// \brief Build instruction(s) for an indirect register write.
- ///
- /// \returns The instruction that performs the indirect register write
- virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg, unsigned Address,
- unsigned OffsetReg) const = 0;
-
- /// \brief Build instruction(s) for an indirect register read.
- ///
- /// \returns The instruction that performs the indirect register read
- virtual MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg, unsigned Address,
- unsigned OffsetReg) const = 0;
-
- /// \brief Build a MOV instruction.
- virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned DstReg, unsigned SrcReg) const = 0;
-
/// \brief Given a MIMG \p Opcode that writes all 4 channels, return the
/// equivalent opcode that writes \p Channels Channels.
int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const;
-
};
namespace AMDGPU {
diff --git a/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index 575dfe413658..2b13bb9079ea 100644
--- a/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -44,6 +44,11 @@ def AMDGPUFmasOp : SDTypeProfile<1, 4,
// AMDGPU DAG Nodes
//
+def AMDGPUconstdata_ptr : SDNode<
+ "AMDGPUISD::CONST_DATA_PTR", SDTypeProfile <1, 1, [SDTCisVT<0, iPTR>,
+ SDTCisVT<0, iPTR>]>
+>;
+
// This argument to this node is a dword address.
def AMDGPUdwordaddr : SDNode<"AMDGPUISD::DWORDADDR", SDTIntUnaryOp>;
@@ -63,7 +68,7 @@ def AMDGPUrsq : SDNode<"AMDGPUISD::RSQ", SDTFPUnaryOp>;
def AMDGPUrsq_legacy : SDNode<"AMDGPUISD::RSQ_LEGACY", SDTFPUnaryOp>;
// out = 1.0 / sqrt(a) result clamped to +/- max_float.
-def AMDGPUrsq_clamped : SDNode<"AMDGPUISD::RSQ_CLAMPED", SDTFPUnaryOp>;
+def AMDGPUrsq_clamp : SDNode<"AMDGPUISD::RSQ_CLAMP", SDTFPUnaryOp>;
def AMDGPUldexp : SDNode<"AMDGPUISD::LDEXP", AMDGPULdExpOp>;
@@ -183,6 +188,11 @@ def AMDGPUstore_mskor : SDNode<"AMDGPUISD::STORE_MSKOR",
SDTypeProfile<0, 2, []>,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def AMDGPUatomic_cmp_swap : SDNode<"AMDGPUISD::ATOMIC_CMP_SWAP",
+ SDTypeProfile<1, 2, [SDTCisPtrTy<1>, SDTCisVec<2>]>,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+
def AMDGPUround : SDNode<"ISD::FROUND",
SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>>;
@@ -209,6 +219,16 @@ def AMDGPUmad_i24 : SDNode<"AMDGPUISD::MAD_I24", AMDGPUDTIntTernaryOp,
[]
>;
+def AMDGPUsmed3 : SDNode<"AMDGPUISD::SMED3", AMDGPUDTIntTernaryOp,
+ []
+>;
+
+def AMDGPUumed3 : SDNode<"AMDGPUISD::UMED3", AMDGPUDTIntTernaryOp,
+ []
+>;
+
+def AMDGPUfmed3 : SDNode<"AMDGPUISD::FMED3", SDTFPTernaryOp, []>;
+
def AMDGPUsendmsg : SDNode<"AMDGPUISD::SENDMSG",
SDTypeProfile<0, 1, [SDTCisInt<0>]>,
[SDNPHasChain, SDNPInGlue]>;
@@ -241,5 +261,8 @@ def IL_brcond : SDNode<"AMDGPUISD::BRANCH_COND", SDTIL_BRCond, [SDNPHasChai
//===----------------------------------------------------------------------===//
// Call/Return DAG Nodes
//===----------------------------------------------------------------------===//
-def IL_retflag : SDNode<"AMDGPUISD::RET_FLAG", SDTNone,
+def AMDGPUendpgm : SDNode<"AMDGPUISD::ENDPGM", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+
+def AMDGPUreturn : SDNode<"AMDGPUISD::RETURN", SDTNone,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
diff --git a/lib/Target/AMDGPU/AMDGPUInstructions.td b/lib/Target/AMDGPU/AMDGPUInstructions.td
index 2a7ce6a47176..6761b4b5df95 100644
--- a/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -12,7 +12,8 @@
//
//===----------------------------------------------------------------------===//
-class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction {
+class AMDGPUInst <dag outs, dag ins, string asm = "",
+ list<dag> pattern = []> : Instruction {
field bit isRegisterLoad = 0;
field bit isRegisterStore = 0;
@@ -23,15 +24,22 @@ class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instructio
let Pattern = pattern;
let Itinerary = NullALU;
+ // SoftFail is a field the disassembler can use to provide a way for
+ // instructions to not match without killing the whole decode process. It is
+ // mainly used for ARM, but Tablegen expects this field to exist or it fails
+ // to build the decode table.
+ field bits<64> SoftFail = 0;
+
+ let DecoderNamespace = Namespace;
+
let TSFlags{63} = isRegisterLoad;
let TSFlags{62} = isRegisterStore;
}
-class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
- : AMDGPUInst<outs, ins, asm, pattern> {
+class AMDGPUShaderInst <dag outs, dag ins, string asm = "",
+ list<dag> pattern = []> : AMDGPUInst<outs, ins, asm, pattern> {
field bits<32> Inst = 0xffffffff;
-
}
def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">;
@@ -41,6 +49,13 @@ def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
+// 32-bit VALU immediate operand that uses the constant bus.
+def u32kimm : Operand<i32> {
+ let OperandNamespace = "AMDGPU";
+ let OperandType = "OPERAND_KIMM32";
+ let PrintMethod = "printU32ImmOperand";
+}
+
let OperandType = "OPERAND_IMMEDIATE" in {
def u32imm : Operand<i32> {
@@ -146,6 +161,17 @@ def COND_NULL : PatLeaf <
[{(void)N; return false;}]
>;
+
+//===----------------------------------------------------------------------===//
+// Misc. PatFrags
+//===----------------------------------------------------------------------===//
+
+class HasOneUseBinOp<SDPatternOperator op> : PatFrag<
+ (ops node:$src0, node:$src1),
+ (op $src0, $src1),
+ [{ return N->hasOneUse(); }]
+>;
+
//===----------------------------------------------------------------------===//
// Load/Store Pattern Fragments
//===----------------------------------------------------------------------===//
@@ -168,21 +194,58 @@ def truncstorei8_private : PrivateStore <truncstorei8>;
def truncstorei16_private : PrivateStore <truncstorei16>;
def store_private : PrivateStore <store>;
-def global_store : PatFrag<(ops node:$val, node:$ptr),
- (store node:$val, node:$ptr), [{
- return isGlobalStore(dyn_cast<StoreSDNode>(N));
+class GlobalMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
}]>;
// Global address space loads
-def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+class GlobalLoad <SDPatternOperator op> : GlobalMemOp <
+ (ops node:$ptr), (op node:$ptr)
+>;
+
+def global_load : GlobalLoad <load>;
+
+// Global address space stores
+class GlobalStore <SDPatternOperator op> : GlobalMemOp <
+ (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
+>;
+
+def global_store : GlobalStore <store>;
+def global_store_atomic : GlobalStore<atomic_store>;
+
+
+class ConstantMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
}]>;
// Constant address space loads
-def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+class ConstantLoad <SDPatternOperator op> : ConstantMemOp <
+ (ops node:$ptr), (op node:$ptr)
+>;
+
+def constant_load : ConstantLoad<load>;
+
+class LocalMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
+ return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
+}]>;
+
+// Local address space loads
+class LocalLoad <SDPatternOperator op> : LocalMemOp <
+ (ops node:$ptr), (op node:$ptr)
+>;
+
+class LocalStore <SDPatternOperator op> : LocalMemOp <
+ (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
+>;
+
+class FlatMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
+ return cast<MemSDNode>(N)->getAddressSPace() == AMDGPUAS::FLAT_ADDRESS;
}]>;
+class FlatLoad <SDPatternOperator op> : FlatMemOp <
+ (ops node:$ptr), (op node:$ptr)
+>;
+
class AZExtLoadBase <SDPatternOperator ld_node>: PatFrag<(ops node:$ptr),
(ld_node node:$ptr), [{
LoadSDNode *L = cast<LoadSDNode>(N);
@@ -196,29 +259,14 @@ def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
-def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
- return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
- return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
+def az_extloadi8_global : GlobalLoad <az_extloadi8>;
+def sextloadi8_global : GlobalLoad <sextloadi8>;
-def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
- return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
+def az_extloadi8_constant : ConstantLoad <az_extloadi8>;
+def sextloadi8_constant : ConstantLoad <sextloadi8>;
-def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
- return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
- return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
- return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
+def az_extloadi8_local : LocalLoad <az_extloadi8>;
+def sextloadi8_local : LocalLoad <sextloadi8>;
def extloadi8_private : PrivateLoad <az_extloadi8>;
def sextloadi8_private : PrivateLoad <sextloadi8>;
@@ -227,29 +275,14 @@ def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
-def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
- return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
- return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
- return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
- return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
+def az_extloadi16_global : GlobalLoad <az_extloadi16>;
+def sextloadi16_global : GlobalLoad <sextloadi16>;
-def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
- return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
+def az_extloadi16_constant : ConstantLoad <az_extloadi16>;
+def sextloadi16_constant : ConstantLoad <sextloadi16>;
-def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
- return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
+def az_extloadi16_local : LocalLoad <az_extloadi16>;
+def sextloadi16_local : LocalLoad <sextloadi16>;
def extloadi16_private : PrivateLoad <az_extloadi16>;
def sextloadi16_private : PrivateLoad <sextloadi16>;
@@ -258,49 +291,20 @@ def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
-def az_extloadi32_global : PatFrag<(ops node:$ptr),
- (az_extloadi32 node:$ptr), [{
- return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
+def az_extloadi32_global : GlobalLoad <az_extloadi32>;
-def az_extloadi32_flat : PatFrag<(ops node:$ptr),
- (az_extloadi32 node:$ptr), [{
- return isFlatLoad(dyn_cast<LoadSDNode>(N));
-}]>;
+def az_extloadi32_flat : FlatLoad <az_extloadi32>;
-def az_extloadi32_constant : PatFrag<(ops node:$ptr),
- (az_extloadi32 node:$ptr), [{
- return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
+def az_extloadi32_constant : ConstantLoad <az_extloadi32>;
-def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
- (truncstorei8 node:$val, node:$ptr), [{
- return isGlobalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
- (truncstorei16 node:$val, node:$ptr), [{
- return isGlobalStore(dyn_cast<StoreSDNode>(N));
-}]>;
+def truncstorei8_global : GlobalStore <truncstorei8>;
+def truncstorei16_global : GlobalStore <truncstorei16>;
-def local_store : PatFrag<(ops node:$val, node:$ptr),
- (store node:$val, node:$ptr), [{
- return isLocalStore(dyn_cast<StoreSDNode>(N));
-}]>;
+def local_store : LocalStore <store>;
+def truncstorei8_local : LocalStore <truncstorei8>;
+def truncstorei16_local : LocalStore <truncstorei16>;
-def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
- (truncstorei8 node:$val, node:$ptr), [{
- return isLocalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
- (truncstorei16 node:$val, node:$ptr), [{
- return isLocalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
+def local_load : LocalLoad <load>;
class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
@@ -370,6 +374,12 @@ class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
[{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]
>;
+class flat_binary_atomic_op<SDNode atomic_op> : PatFrag<
+ (ops node:$ptr, node:$value),
+ (atomic_op node:$ptr, node:$value),
+ [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;}]
+>;
+
def atomic_swap_global : global_binary_atomic_op<atomic_swap>;
def atomic_add_global : global_binary_atomic_op<atomic_load_add>;
def atomic_and_global : global_binary_atomic_op<atomic_load_and>;
@@ -381,6 +391,26 @@ def atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
def atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
+def atomic_cmp_swap_global : global_binary_atomic_op<AMDGPUatomic_cmp_swap>;
+def atomic_cmp_swap_global_nortn : PatFrag<
+ (ops node:$ptr, node:$value),
+ (atomic_cmp_swap_global node:$ptr, node:$value),
+ [{ return SDValue(N, 0).use_empty(); }]
+>;
+
+def atomic_swap_flat : flat_binary_atomic_op<atomic_swap>;
+def atomic_add_flat : flat_binary_atomic_op<atomic_load_add>;
+def atomic_and_flat : flat_binary_atomic_op<atomic_load_and>;
+def atomic_max_flat : flat_binary_atomic_op<atomic_load_max>;
+def atomic_min_flat : flat_binary_atomic_op<atomic_load_min>;
+def atomic_or_flat : flat_binary_atomic_op<atomic_load_or>;
+def atomic_sub_flat : flat_binary_atomic_op<atomic_load_sub>;
+def atomic_umax_flat : flat_binary_atomic_op<atomic_load_umax>;
+def atomic_umin_flat : flat_binary_atomic_op<atomic_load_umin>;
+def atomic_xor_flat : flat_binary_atomic_op<atomic_load_xor>;
+
+def atomic_cmp_swap_flat : flat_binary_atomic_op<AMDGPUatomic_cmp_swap>;
+
//===----------------------------------------------------------------------===//
// Misc Pattern Fragments
//===----------------------------------------------------------------------===//
@@ -392,6 +422,7 @@ int TWO_PI_INV = 0x3e22f983;
int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
int FP32_NEG_ONE = 0xbf800000;
int FP32_ONE = 0x3f800000;
+int FP64_ONE = 0x3ff0000000000000;
}
def CONST : Constants;
@@ -570,6 +601,25 @@ class ROTRPattern <Instruction BIT_ALIGN> : Pat <
(BIT_ALIGN $src0, $src0, $src1)
>;
+// This matches 16 permutations of
+// max(min(x, y), min(max(x, y), z))
+class IntMed3Pat<Instruction med3Inst,
+ SDPatternOperator max,
+ SDPatternOperator max_oneuse,
+ SDPatternOperator min_oneuse> : Pat<
+ (max (min_oneuse i32:$src0, i32:$src1),
+ (min_oneuse (max_oneuse i32:$src0, i32:$src1), i32:$src2)),
+ (med3Inst $src0, $src1, $src2)
+>;
+
+let Properties = [SDNPCommutative, SDNPAssociative] in {
+def smax_oneuse : HasOneUseBinOp<smax>;
+def smin_oneuse : HasOneUseBinOp<smin>;
+def umax_oneuse : HasOneUseBinOp<umax>;
+def umin_oneuse : HasOneUseBinOp<umin>;
+} // Properties = [SDNPCommutative, SDNPAssociative]
+
+
// 24-bit arithmetic patterns
def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
@@ -587,13 +637,6 @@ def cvt_flr_i32_f32 : PatFrag <
[{ (void)N; return TM.Options.NoNaNsFPMath; }]
>;
-/*
-class UMUL24Pattern <Instruction UMUL24> : Pat <
- (mul U24:$x, U24:$y),
- (UMUL24 $x, $y)
->;
-*/
-
class IMad24Pat<Instruction Inst> : Pat <
(add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
(Inst $src0, $src1, $src2)
@@ -604,30 +647,6 @@ class UMad24Pat<Instruction Inst> : Pat <
(Inst $src0, $src1, $src2)
>;
-multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> {
- def _expand_imad24 : Pat <
- (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2),
- (AddInst (MulInst $src0, $src1), $src2)
- >;
-
- def _expand_imul24 : Pat <
- (AMDGPUmul_i24 i32:$src0, i32:$src1),
- (MulInst $src0, $src1)
- >;
-}
-
-multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> {
- def _expand_umad24 : Pat <
- (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2),
- (AddInst (MulInst $src0, $src1), $src2)
- >;
-
- def _expand_umul24 : Pat <
- (AMDGPUmul_u24 i32:$src0, i32:$src1),
- (MulInst $src0, $src1)
- >;
-}
-
class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
(fdiv FP_ONE, vt:$src),
(RcpInst $src)
diff --git a/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp b/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
index e94bb6013d83..791872a9db40 100644
--- a/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
@@ -20,46 +20,44 @@
using namespace llvm;
-#define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
-#include "AMDGPUGenIntrinsics.inc"
-#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
-
AMDGPUIntrinsicInfo::AMDGPUIntrinsicInfo()
: TargetIntrinsicInfo() {}
-std::string AMDGPUIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
- unsigned numTys) const {
- static const char *const names[] = {
+static const char *const IntrinsicNameTable[] = {
#define GET_INTRINSIC_NAME_TABLE
#include "AMDGPUGenIntrinsics.inc"
#undef GET_INTRINSIC_NAME_TABLE
- };
+};
+std::string AMDGPUIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
+ unsigned numTys) const {
if (IntrID < Intrinsic::num_intrinsics) {
return nullptr;
}
assert(IntrID < AMDGPUIntrinsic::num_AMDGPU_intrinsics &&
"Invalid intrinsic ID");
- std::string Result(names[IntrID - Intrinsic::num_intrinsics]);
+ std::string Result(IntrinsicNameTable[IntrID - Intrinsic::num_intrinsics]);
return Result;
}
-unsigned AMDGPUIntrinsicInfo::lookupName(const char *Name,
+unsigned AMDGPUIntrinsicInfo::lookupName(const char *NameData,
unsigned Len) const {
- if (!StringRef(Name, Len).startswith("llvm."))
+ StringRef Name(NameData, Len);
+ if (!Name.startswith("llvm."))
return 0; // All intrinsics start with 'llvm.'
-#define GET_FUNCTION_RECOGNIZER
-#include "AMDGPUGenIntrinsics.inc"
-#undef GET_FUNCTION_RECOGNIZER
- AMDGPUIntrinsic::ID IntrinsicID =
- (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic;
- IntrinsicID = getIntrinsicForGCCBuiltin("AMDGPU", Name);
-
- if (IntrinsicID != (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic) {
- return IntrinsicID;
+ // Look for a name match in our table. If the intrinsic is not overloaded,
+ // require an exact match. If it is overloaded, require a prefix match. The
+ // AMDGPU enum enum starts at Intrinsic::num_intrinsics.
+ int Idx = Intrinsic::lookupLLVMIntrinsicByName(IntrinsicNameTable, Name);
+ if (Idx >= 0) {
+ bool IsPrefixMatch = Name.size() > strlen(IntrinsicNameTable[Idx]);
+ return IsPrefixMatch == isOverloaded(Idx + 1)
+ ? Intrinsic::num_intrinsics + Idx
+ : 0;
}
+
return 0;
}
diff --git a/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h b/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h
index 4c95b5ec0974..f4173929259c 100644
--- a/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h
+++ b/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h
@@ -11,8 +11,8 @@
/// \brief Interface for the AMDGPU Implementation of the Intrinsic Info class.
//
//===-----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUINTRINSICINFO_H
-#define LLVM_LIB_TARGET_R600_AMDGPUINTRINSICINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUINTRINSICINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUINTRINSICINFO_H
#include "llvm/IR/Intrinsics.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
@@ -31,7 +31,7 @@ enum ID {
} // end namespace AMDGPUIntrinsic
-class AMDGPUIntrinsicInfo : public TargetIntrinsicInfo {
+class AMDGPUIntrinsicInfo final : public TargetIntrinsicInfo {
public:
AMDGPUIntrinsicInfo();
std::string getName(unsigned IntrId, Type **Tys = nullptr,
diff --git a/lib/Target/AMDGPU/AMDGPUIntrinsics.td b/lib/Target/AMDGPU/AMDGPUIntrinsics.td
index 1de3546485b1..2127391f18e7 100644
--- a/lib/Target/AMDGPU/AMDGPUIntrinsics.td
+++ b/lib/Target/AMDGPU/AMDGPUIntrinsics.td
@@ -12,79 +12,26 @@
//===----------------------------------------------------------------------===//
let TargetPrefix = "AMDGPU", isTarget = 1 in {
-
- def int_AMDGPU_store_output : Intrinsic<[], [llvm_float_ty, llvm_i32_ty], []>;
- def int_AMDGPU_swizzle : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_abs : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_arl : Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_cndlt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_div : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_fract : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
def int_AMDGPU_clamp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
- // This is named backwards (instead of rsq_legacy) so we don't have
- // to define it with the public builtins intrinsics. This is a
- // workaround for how intrinsic names are parsed. If the name is
- // llvm.AMDGPU.rsq.legacy, the parser assumes that you meant
- // llvm.AMDGPU.rsq.{f32 | f64} and incorrectly mangled the name.
- def int_AMDGPU_legacy_rsq : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
-
- def int_AMDGPU_dp4 : Intrinsic<[llvm_float_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_AMDGPU_kill : Intrinsic<[], [llvm_float_ty], []>;
def int_AMDGPU_kilp : Intrinsic<[], [], []>;
- def int_AMDGPU_lrp : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_mul : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_pow : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_seq : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_sgt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_sge : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_sle : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_sne : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_mullit : Intrinsic<[llvm_v4f32_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_tex : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_txb : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_txf : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_txq : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_txd : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_txl : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_trunc : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
- def int_AMDGPU_ddx : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_ddy : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_imax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_imin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_umax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_umin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_umul24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_imul24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_imad24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_umad24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_cvt_f32_ubyte0 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_cvt_f32_ubyte1 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_cvt_f32_ubyte2 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_cvt_f32_ubyte3 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_AMDGPU_flbit_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ // Deprecated in favor of separate int_amdgcn_cube* intrinsics.
def int_AMDGPU_cube : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
- def int_AMDGPU_bfi : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ // Deprecated in favor of expanded bit operations
def int_AMDGPU_bfe_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_bfe_u32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_bfm : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_brev : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_flbit_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_AMDGPU_barrier_local : Intrinsic<[], [], [IntrConvergent]>;
- def int_AMDGPU_barrier_global : Intrinsic<[], [], [IntrConvergent]>;
-}
-
-// Legacy names for compatibility.
-let TargetPrefix = "AMDIL", isTarget = 1 in {
- def int_AMDIL_abs : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- def int_AMDIL_fraction : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- def int_AMDIL_clamp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
- def int_AMDIL_exp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- def int_AMDIL_round_nearest : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-}
-let TargetPrefix = "TGSI", isTarget = 1 in {
+ // Deprecated in favor of llvm.amdgcn.rsq
+ def int_AMDGPU_rsq : Intrinsic<
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ >;
- def int_TGSI_lit_z : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],[IntrNoMem]>;
+ // Deprecated in favor of llvm.amdgcn.read.workdim
+ def int_AMDGPU_read_workdim : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
}
include "SIIntrinsics.td"
diff --git a/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
index dfc652f31da5..ad8d3e4d3545 100644
--- a/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
@@ -15,9 +15,9 @@
#include "AMDGPUMCInstLower.h"
#include "AMDGPUAsmPrinter.h"
+#include "AMDGPUSubtarget.h"
#include "AMDGPUTargetMachine.h"
#include "InstPrinter/AMDGPUInstPrinter.h"
-#include "R600InstrInfo.h"
#include "SIInstrInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -37,8 +37,14 @@
using namespace llvm;
AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &st):
- Ctx(ctx), ST(st)
-{ }
+ Ctx(ctx), ST(st) { }
+
+static MCSymbolRefExpr::VariantKind getVariantKind(unsigned MOFlags) {
+ switch (MOFlags) {
+ default: return MCSymbolRefExpr::VK_None;
+ case SIInstrInfo::MO_GOTPCREL: return MCSymbolRefExpr::VK_GOTPCREL;
+ }
+}
void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
@@ -70,11 +76,16 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
case MachineOperand::MO_GlobalAddress: {
const GlobalValue *GV = MO.getGlobal();
MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(GV->getName()));
- MCOp = MCOperand::createExpr(MCSymbolRefExpr::create(Sym, Ctx));
+ const MCExpr *SymExpr =
+ MCSymbolRefExpr::create(Sym, getVariantKind(MO.getTargetFlags()),Ctx);
+ const MCExpr *Expr = MCBinaryExpr::createAdd(SymExpr,
+ MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
+ MCOp = MCOperand::createExpr(Expr);
break;
}
case MachineOperand::MO_ExternalSymbol: {
MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName()));
+ Sym->setExternal(true);
const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx);
MCOp = MCOperand::createExpr(Expr);
break;
@@ -88,13 +99,13 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>();
AMDGPUMCInstLower MCInstLowering(OutContext, STI);
-#ifdef _DEBUG
StringRef Err;
- if (!STI.getInstrInfo()->verifyInstruction(MI, Err)) {
- errs() << "Warning: Illegal instruction detected: " << Err << "\n";
+ if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) {
+ LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
+ C.emitError("Illegal instruction detected: " + Err);
MI->dump();
}
-#endif
+
if (MI->isBundle()) {
const MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock::const_instr_iterator I = ++MI->getIterator();
@@ -103,6 +114,29 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
++I;
}
} else {
+ // We don't want SI_MASK_BRANCH/SI_RETURN encoded. They are placeholder
+ // terminator instructions and should only be printed as comments.
+ if (MI->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
+ if (isVerbose()) {
+ SmallVector<char, 16> BBStr;
+ raw_svector_ostream Str(BBStr);
+
+ const MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
+ const MCSymbolRefExpr *Expr
+ = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext);
+ Expr->print(Str, MAI);
+ OutStreamer->emitRawComment(" mask branch " + BBStr);
+ }
+
+ return;
+ }
+
+ if (MI->getOpcode() == AMDGPU::SI_RETURN) {
+ if (isVerbose())
+ OutStreamer->emitRawComment(" return");
+ return;
+ }
+
MCInst TmpInst;
MCInstLowering.lower(MI, TmpInst);
EmitToStreamer(*OutStreamer, TmpInst);
@@ -114,10 +148,9 @@ void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
raw_string_ostream DisasmStream(DisasmLine);
AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(),
- *MF->getSubtarget().getInstrInfo(),
- *MF->getSubtarget().getRegisterInfo());
- InstPrinter.printInst(&TmpInst, DisasmStream, StringRef(),
- MF->getSubtarget());
+ *STI.getInstrInfo(),
+ *STI.getRegisterInfo());
+ InstPrinter.printInst(&TmpInst, DisasmStream, StringRef(), STI);
// Disassemble instruction/operands to hex representation.
SmallVector<MCFixup, 4> Fixups;
diff --git a/lib/Target/AMDGPU/AMDGPUMCInstLower.h b/lib/Target/AMDGPU/AMDGPUMCInstLower.h
index d322fe072b2b..957dcd0de8ef 100644
--- a/lib/Target/AMDGPU/AMDGPUMCInstLower.h
+++ b/lib/Target/AMDGPU/AMDGPUMCInstLower.h
@@ -8,8 +8,8 @@
/// \file
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUMCINSTLOWER_H
-#define LLVM_LIB_TARGET_R600_AMDGPUMCINSTLOWER_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUMCINSTLOWER_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUMCINSTLOWER_H
namespace llvm {
diff --git a/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
index 54137177e4c0..44516dab04f1 100644
--- a/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
@@ -1,8 +1,5 @@
#include "AMDGPUMachineFunction.h"
-#include "AMDGPU.h"
-#include "Utils/AMDGPUBaseInfo.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/Function.h"
+
using namespace llvm;
// Pin the vtable to this file.
@@ -10,11 +7,17 @@ void AMDGPUMachineFunction::anchor() {}
AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
MachineFunctionInfo(),
- ShaderType(ShaderType::COMPUTE),
+ KernArgSize(0),
+ MaxKernArgAlign(0),
LDSSize(0),
ABIArgOffset(0),
ScratchSize(0),
- IsKernel(true) {
+ IsKernel(MF.getFunction()->getCallingConv() == llvm::CallingConv::AMDGPU_KERNEL ||
+ MF.getFunction()->getCallingConv() == llvm::CallingConv::SPIR_KERNEL)
+{
+}
- ShaderType = AMDGPU::getShaderType(*MF.getFunction());
+bool AMDGPUMachineFunction::isKernel() const
+{
+ return IsKernel;
}
diff --git a/lib/Target/AMDGPU/AMDGPUMachineFunction.h b/lib/Target/AMDGPU/AMDGPUMachineFunction.h
index 46fcee874887..6b31f63e1a9d 100644
--- a/lib/Target/AMDGPU/AMDGPUMachineFunction.h
+++ b/lib/Target/AMDGPU/AMDGPUMachineFunction.h
@@ -1,4 +1,4 @@
-//===-- R600MachineFunctionInfo.h - R600 Machine Function Info ----*- C++ -*-=//
+//===-- AMDGPUMachineFunctionInfo.h -------------------------------*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -6,12 +6,9 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-/// \file
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUMACHINEFUNCTION_H
-#define LLVM_LIB_TARGET_R600_AMDGPUMACHINEFUNCTION_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUMACHINEFUNCTION_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUMACHINEFUNCTION_H
#include "llvm/CodeGen/MachineFunction.h"
#include <map>
@@ -19,11 +16,25 @@
namespace llvm {
class AMDGPUMachineFunction : public MachineFunctionInfo {
+ uint64_t KernArgSize;
+ unsigned MaxKernArgAlign;
+
virtual void anchor();
- unsigned ShaderType;
public:
AMDGPUMachineFunction(const MachineFunction &MF);
+
+ uint64_t allocateKernArg(uint64_t Size, unsigned Align) {
+ assert(isPowerOf2_32(Align));
+ KernArgSize = alignTo(KernArgSize, Align);
+
+ uint64_t Result = KernArgSize;
+ KernArgSize += Size;
+
+ MaxKernArgAlign = std::max(Align, MaxKernArgAlign);
+ return Result;
+ }
+
/// A map to keep track of local memory objects and their offsets within
/// the local memory space.
std::map<const GlobalValue *, unsigned> LocalMemoryObjects;
@@ -33,14 +44,7 @@ public:
/// Start of implicit kernel args
unsigned ABIArgOffset;
- unsigned getShaderType() const {
- return ShaderType;
- }
-
- bool isKernel() const {
- // FIXME: Assume everything is a kernel until function calls are supported.
- return true;
- }
+ bool isKernel() const;
unsigned ScratchSize;
bool IsKernel;
diff --git a/lib/Target/AMDGPU/AMDGPUOpenCLImageTypeLoweringPass.cpp b/lib/Target/AMDGPU/AMDGPUOpenCLImageTypeLoweringPass.cpp
index 554bf1da81f5..8bc7b53435be 100644
--- a/lib/Target/AMDGPU/AMDGPUOpenCLImageTypeLoweringPass.cpp
+++ b/lib/Target/AMDGPU/AMDGPUOpenCLImageTypeLoweringPass.cpp
@@ -25,7 +25,6 @@
//===----------------------------------------------------------------------===//
#include "AMDGPU.h"
-#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/Passes.h"
diff --git a/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 87d50d587059..775463809634 100644
--- a/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -16,7 +16,8 @@
#include "AMDGPUSubtarget.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -26,79 +27,317 @@ using namespace llvm;
namespace {
-class AMDGPUPromoteAlloca : public FunctionPass,
- public InstVisitor<AMDGPUPromoteAlloca> {
-
- static char ID;
+// FIXME: This can create globals so should be a module pass.
+class AMDGPUPromoteAlloca : public FunctionPass {
+private:
+ const TargetMachine *TM;
Module *Mod;
- const AMDGPUSubtarget &ST;
- int LocalMemAvailable;
+ const DataLayout *DL;
+ MDNode *MaxWorkGroupSizeRange;
+
+ // FIXME: This should be per-kernel.
+ uint32_t LocalMemLimit;
+ uint32_t CurrentLocalMemUsage;
+
+ bool IsAMDGCN;
+ bool IsAMDHSA;
+
+ std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
+ Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
+
+ /// BaseAlloca is the alloca root the search started from.
+ /// Val may be that alloca or a recursive user of it.
+ bool collectUsesWithPtrTypes(Value *BaseAlloca,
+ Value *Val,
+ std::vector<Value*> &WorkList) const;
+
+ /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
+ /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
+ /// Returns true if both operands are derived from the same alloca. Val should
+ /// be the same value as one of the input operands of UseInst.
+ bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
+ Instruction *UseInst,
+ int OpIdx0, int OpIdx1) const;
public:
- AMDGPUPromoteAlloca(const AMDGPUSubtarget &st) : FunctionPass(ID), ST(st),
- LocalMemAvailable(0) { }
+ static char ID;
+
+ AMDGPUPromoteAlloca(const TargetMachine *TM_ = nullptr) :
+ FunctionPass(ID),
+ TM(TM_),
+ Mod(nullptr),
+ DL(nullptr),
+ MaxWorkGroupSizeRange(nullptr),
+ LocalMemLimit(0),
+ CurrentLocalMemUsage(0),
+ IsAMDGCN(false),
+ IsAMDHSA(false) { }
+
bool doInitialization(Module &M) override;
bool runOnFunction(Function &F) override;
- const char *getPassName() const override { return "AMDGPU Promote Alloca"; }
- void visitAlloca(AllocaInst &I);
+
+ const char *getPassName() const override {
+ return "AMDGPU Promote Alloca";
+ }
+
+ void handleAlloca(AllocaInst &I);
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ FunctionPass::getAnalysisUsage(AU);
+ }
};
} // End anonymous namespace
char AMDGPUPromoteAlloca::ID = 0;
+INITIALIZE_TM_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE,
+ "AMDGPU promote alloca to vector or LDS", false, false)
+
+char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
+
+
bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
+ if (!TM)
+ return false;
+
Mod = &M;
+ DL = &Mod->getDataLayout();
+
+ // The maximum workitem id.
+ //
+ // FIXME: Should get as subtarget property. Usually runtime enforced max is
+ // 256.
+ MDBuilder MDB(Mod->getContext());
+ MaxWorkGroupSizeRange = MDB.createRange(APInt(32, 0), APInt(32, 2048));
+
+ const Triple &TT = TM->getTargetTriple();
+
+ IsAMDGCN = TT.getArch() == Triple::amdgcn;
+ IsAMDHSA = TT.getOS() == Triple::AMDHSA;
+
return false;
}
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
+ if (!TM || skipFunction(F))
+ return false;
- FunctionType *FTy = F.getFunctionType();
-
- LocalMemAvailable = ST.getLocalMemorySize();
+ const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
+ if (!ST.isPromoteAllocaEnabled())
+ return false;
+ FunctionType *FTy = F.getFunctionType();
// If the function has any arguments in the local address space, then it's
// possible these arguments require the entire local memory space, so
// we cannot use local memory in the pass.
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
- Type *ParamTy = FTy->getParamType(i);
- if (ParamTy->isPointerTy() &&
- ParamTy->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
- LocalMemAvailable = 0;
- DEBUG(dbgs() << "Function has local memory argument. Promoting to "
+ for (Type *ParamTy : FTy->params()) {
+ PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
+ if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
+ LocalMemLimit = 0;
+ DEBUG(dbgs() << "Function has local memory argument. Promoting to "
"local memory disabled.\n");
- break;
+ return false;
}
}
- if (LocalMemAvailable > 0) {
- // Check how much local memory is being used by global objects
- for (Module::global_iterator I = Mod->global_begin(),
- E = Mod->global_end(); I != E; ++I) {
- GlobalVariable *GV = &*I;
- PointerType *GVTy = GV->getType();
- if (GVTy->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
+ LocalMemLimit = ST.getLocalMemorySize();
+ if (LocalMemLimit == 0)
+ return false;
+
+ const DataLayout &DL = Mod->getDataLayout();
+
+ // Check how much local memory is being used by global objects
+ CurrentLocalMemUsage = 0;
+ for (GlobalVariable &GV : Mod->globals()) {
+ if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
+ continue;
+
+ for (const User *U : GV.users()) {
+ const Instruction *Use = dyn_cast<Instruction>(U);
+ if (!Use)
continue;
- for (Value::use_iterator U = GV->use_begin(),
- UE = GV->use_end(); U != UE; ++U) {
- Instruction *Use = dyn_cast<Instruction>(*U);
- if (!Use)
- continue;
- if (Use->getParent()->getParent() == &F)
- LocalMemAvailable -=
- Mod->getDataLayout().getTypeAllocSize(GVTy->getElementType());
+
+ if (Use->getParent()->getParent() == &F) {
+ unsigned Align = GV.getAlignment();
+ if (Align == 0)
+ Align = DL.getABITypeAlignment(GV.getValueType());
+
+ // FIXME: Try to account for padding here. The padding is currently
+ // determined from the inverse order of uses in the function. I'm not
+ // sure if the use list order is in any way connected to this, so the
+ // total reported size is likely incorrect.
+ uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
+ CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
+ CurrentLocalMemUsage += AllocSize;
+ break;
}
}
}
- LocalMemAvailable = std::max(0, LocalMemAvailable);
- DEBUG(dbgs() << LocalMemAvailable << "bytes free in local memory.\n");
+ unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage);
- visit(F);
+ // Restrict local memory usage so that we don't drastically reduce occupancy,
+ // unless it is already significantly reduced.
- return false;
+ // TODO: Have some sort of hint or other heuristics to guess occupancy based
+ // on other factors..
+ unsigned OccupancyHint
+ = AMDGPU::getIntegerAttribute(F, "amdgpu-max-waves-per-eu", 0);
+ if (OccupancyHint == 0)
+ OccupancyHint = 7;
+
+ // Clamp to max value.
+ OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerCU());
+
+ // Check the hint but ignore it if it's obviously wrong from the existing LDS
+ // usage.
+ MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
+
+
+ // Round up to the next tier of usage.
+ unsigned MaxSizeWithWaveCount
+ = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy);
+
+ // Program is possibly broken by using more local mem than available.
+ if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
+ return false;
+
+ LocalMemLimit = MaxSizeWithWaveCount;
+
+ DEBUG(
+ dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n"
+ << " Rounding size to " << MaxSizeWithWaveCount
+ << " with a maximum occupancy of " << MaxOccupancy << '\n'
+ << " and " << (LocalMemLimit - CurrentLocalMemUsage)
+ << " available for promotion\n"
+ );
+
+ BasicBlock &EntryBB = *F.begin();
+ for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
+ AllocaInst *AI = dyn_cast<AllocaInst>(I);
+
+ ++I;
+ if (AI)
+ handleAlloca(*AI);
+ }
+
+ return true;
+}
+
+std::pair<Value *, Value *>
+AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
+ if (!IsAMDHSA) {
+ Function *LocalSizeYFn
+ = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
+ Function *LocalSizeZFn
+ = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
+
+ CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
+ CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
+
+ LocalSizeY->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
+ LocalSizeZ->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
+
+ return std::make_pair(LocalSizeY, LocalSizeZ);
+ }
+
+ // We must read the size out of the dispatch pointer.
+ assert(IsAMDGCN);
+
+ // We are indexing into this struct, and want to extract the workgroup_size_*
+ // fields.
+ //
+ // typedef struct hsa_kernel_dispatch_packet_s {
+ // uint16_t header;
+ // uint16_t setup;
+ // uint16_t workgroup_size_x ;
+ // uint16_t workgroup_size_y;
+ // uint16_t workgroup_size_z;
+ // uint16_t reserved0;
+ // uint32_t grid_size_x ;
+ // uint32_t grid_size_y ;
+ // uint32_t grid_size_z;
+ //
+ // uint32_t private_segment_size;
+ // uint32_t group_segment_size;
+ // uint64_t kernel_object;
+ //
+ // #ifdef HSA_LARGE_MODEL
+ // void *kernarg_address;
+ // #elif defined HSA_LITTLE_ENDIAN
+ // void *kernarg_address;
+ // uint32_t reserved1;
+ // #else
+ // uint32_t reserved1;
+ // void *kernarg_address;
+ // #endif
+ // uint64_t reserved2;
+ // hsa_signal_t completion_signal; // uint64_t wrapper
+ // } hsa_kernel_dispatch_packet_t
+ //
+ Function *DispatchPtrFn
+ = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
+
+ CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
+ DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NoAlias);
+ DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
+
+ // Size of the dispatch packet struct.
+ DispatchPtr->addDereferenceableAttr(AttributeSet::ReturnIndex, 64);
+
+ Type *I32Ty = Type::getInt32Ty(Mod->getContext());
+ Value *CastDispatchPtr = Builder.CreateBitCast(
+ DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
+
+ // We could do a single 64-bit load here, but it's likely that the basic
+ // 32-bit and extract sequence is already present, and it is probably easier
+ // to CSE this. The loads should be mergable later anyway.
+ Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 1);
+ LoadInst *LoadXY = Builder.CreateAlignedLoad(GEPXY, 4);
+
+ Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(CastDispatchPtr, 2);
+ LoadInst *LoadZU = Builder.CreateAlignedLoad(GEPZU, 4);
+
+ MDNode *MD = llvm::MDNode::get(Mod->getContext(), None);
+ LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
+ LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
+ LoadZU->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
+
+ // Extract y component. Upper half of LoadZU should be zero already.
+ Value *Y = Builder.CreateLShr(LoadXY, 16);
+
+ return std::make_pair(Y, LoadZU);
+}
+
+Value *AMDGPUPromoteAlloca::getWorkitemID(IRBuilder<> &Builder, unsigned N) {
+ Intrinsic::ID IntrID = Intrinsic::ID::not_intrinsic;
+
+ switch (N) {
+ case 0:
+ IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_x
+ : Intrinsic::r600_read_tidig_x;
+ break;
+ case 1:
+ IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_y
+ : Intrinsic::r600_read_tidig_y;
+ break;
+
+ case 2:
+ IntrID = IsAMDGCN ? Intrinsic::amdgcn_workitem_id_z
+ : Intrinsic::r600_read_tidig_z;
+ break;
+ default:
+ llvm_unreachable("invalid dimension");
+ }
+
+ Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
+ CallInst *CI = Builder.CreateCall(WorkitemIdFn);
+ CI->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
+
+ return CI;
}
static VectorType *arrayTypeToVecType(Type *ArrayTy) {
@@ -151,17 +390,16 @@ static bool canVectorizeInst(Instruction *Inst, User *User) {
}
static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
- Type *AllocaTy = Alloca->getAllocatedType();
+ ArrayType *AllocaTy = dyn_cast<ArrayType>(Alloca->getAllocatedType());
- DEBUG(dbgs() << "Alloca Candidate for vectorization \n");
+ DEBUG(dbgs() << "Alloca candidate for vectorization\n");
// FIXME: There is no reason why we can't support larger arrays, we
// are just being conservative for now.
- if (!AllocaTy->isArrayTy() ||
- AllocaTy->getArrayElementType()->isVectorTy() ||
- AllocaTy->getArrayNumElements() > 4) {
-
- DEBUG(dbgs() << " Cannot convert type to vector");
+ if (!AllocaTy ||
+ AllocaTy->getElementType()->isVectorTy() ||
+ AllocaTy->getNumElements() > 4) {
+ DEBUG(dbgs() << " Cannot convert type to vector\n");
return false;
}
@@ -200,9 +438,8 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
DEBUG(dbgs() << " Converting alloca to vector "
<< *AllocaTy << " -> " << *VectorTy << '\n');
- for (std::vector<Value*>::iterator I = WorkList.begin(),
- E = WorkList.end(); I != E; ++I) {
- Instruction *Inst = cast<Instruction>(*I);
+ for (Value *V : WorkList) {
+ Instruction *Inst = cast<Instruction>(V);
IRBuilder<> Builder(Inst);
switch (Inst->getOpcode()) {
case Instruction::Load: {
@@ -239,44 +476,163 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
return true;
}
-static bool collectUsesWithPtrTypes(Value *Val, std::vector<Value*> &WorkList) {
- bool Success = true;
+static bool isCallPromotable(CallInst *CI) {
+ // TODO: We might be able to handle some cases where the callee is a
+ // constantexpr bitcast of a function.
+ if (!CI->getCalledFunction())
+ return false;
+
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
+ if (!II)
+ return false;
+
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::invariant_group_barrier:
+ case Intrinsic::objectsize:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
+ Value *Val,
+ Instruction *Inst,
+ int OpIdx0,
+ int OpIdx1) const {
+ // Figure out which operand is the one we might not be promoting.
+ Value *OtherOp = Inst->getOperand(OpIdx0);
+ if (Val == OtherOp)
+ OtherOp = Inst->getOperand(OpIdx1);
+
+ if (isa<ConstantPointerNull>(OtherOp))
+ return true;
+
+ Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
+ if (!isa<AllocaInst>(OtherObj))
+ return false;
+
+ // TODO: We should be able to replace undefs with the right pointer type.
+
+ // TODO: If we know the other base object is another promotable
+ // alloca, not necessarily this alloca, we can do this. The
+ // important part is both must have the same address space at
+ // the end.
+ if (OtherObj != BaseAlloca) {
+ DEBUG(dbgs() << "Found a binary instruction with another alloca object\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool AMDGPUPromoteAlloca::collectUsesWithPtrTypes(
+ Value *BaseAlloca,
+ Value *Val,
+ std::vector<Value*> &WorkList) const {
+
for (User *User : Val->users()) {
- if(std::find(WorkList.begin(), WorkList.end(), User) != WorkList.end())
+ if (std::find(WorkList.begin(), WorkList.end(), User) != WorkList.end())
continue;
+
if (CallInst *CI = dyn_cast<CallInst>(User)) {
- // TODO: We might be able to handle some cases where the callee is a
- // constantexpr bitcast of a function.
- if (!CI->getCalledFunction())
+ if (!isCallPromotable(CI))
return false;
WorkList.push_back(User);
continue;
}
- // FIXME: Correctly handle ptrtoint instructions.
- Instruction *UseInst = dyn_cast<Instruction>(User);
- if (UseInst && UseInst->getOpcode() == Instruction::PtrToInt)
+ Instruction *UseInst = cast<Instruction>(User);
+ if (UseInst->getOpcode() == Instruction::PtrToInt)
return false;
- if (StoreInst *SI = dyn_cast_or_null<StoreInst>(UseInst)) {
+ if (LoadInst *LI = dyn_cast_or_null<LoadInst>(UseInst)) {
+ if (LI->isVolatile())
+ return false;
+
+ continue;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
+ if (SI->isVolatile())
+ return false;
+
// Reject if the stored value is not the pointer operand.
if (SI->getPointerOperand() != Val)
return false;
+ } else if (AtomicRMWInst *RMW = dyn_cast_or_null<AtomicRMWInst>(UseInst)) {
+ if (RMW->isVolatile())
+ return false;
+ } else if (AtomicCmpXchgInst *CAS
+ = dyn_cast_or_null<AtomicCmpXchgInst>(UseInst)) {
+ if (CAS->isVolatile())
+ return false;
+ }
+
+ // Only promote a select if we know that the other select operand
+ // is from another pointer that will also be promoted.
+ if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
+ if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
+ return false;
+
+ // May need to rewrite constant operands.
+ WorkList.push_back(ICmp);
}
if (!User->getType()->isPointerTy())
continue;
- WorkList.push_back(User);
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
+ // Be conservative if an address could be computed outside the bounds of
+ // the alloca.
+ if (!GEP->isInBounds())
+ return false;
+ }
- Success &= collectUsesWithPtrTypes(User, WorkList);
+ // Only promote a select if we know that the other select operand is from
+ // another pointer that will also be promoted.
+ if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
+ if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
+ return false;
+ }
+
+ // Repeat for phis.
+ if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
+ // TODO: Handle more complex cases. We should be able to replace loops
+ // over arrays.
+ switch (Phi->getNumIncomingValues()) {
+ case 1:
+ break;
+ case 2:
+ if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
+ return false;
+ break;
+ default:
+ return false;
+ }
+ }
+
+ WorkList.push_back(User);
+ if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
+ return false;
}
- return Success;
+
+ return true;
}
-void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
- if (!I.isStaticAlloca())
+// FIXME: Should try to pick the most likely to be profitable allocas first.
+void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) {
+ // Array allocations are probably not worth handling, since an allocation of
+ // the array type is the canonical form.
+ if (!I.isStaticAlloca() || I.isArrayAllocation())
return;
IRBuilder<> Builder(&I);
@@ -286,95 +642,144 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
DEBUG(dbgs() << "Trying to promote " << I << '\n');
- if (tryPromoteAllocaToVector(&I))
+ if (tryPromoteAllocaToVector(&I)) {
+ DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
+ return;
+ }
+
+ const Function &ContainingFunction = *I.getParent()->getParent();
+
+ // Don't promote the alloca to LDS for shader calling conventions as the work
+ // item ID intrinsics are not supported for these calling conventions.
+ // Furthermore not all LDS is available for some of the stages.
+ if (AMDGPU::isShader(ContainingFunction.getCallingConv()))
return;
- DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
+ // FIXME: We should also try to get this value from the reqd_work_group_size
+ // function attribute if it is available.
+ unsigned WorkGroupSize = AMDGPU::getMaximumWorkGroupSize(ContainingFunction);
- // FIXME: This is the maximum work group size. We should try to get
- // value from the reqd_work_group_size function attribute if it is
- // available.
- unsigned WorkGroupSize = 256;
- int AllocaSize =
- WorkGroupSize * Mod->getDataLayout().getTypeAllocSize(AllocaTy);
+ const DataLayout &DL = Mod->getDataLayout();
- if (AllocaSize > LocalMemAvailable) {
- DEBUG(dbgs() << " Not enough local memory to promote alloca.\n");
+ unsigned Align = I.getAlignment();
+ if (Align == 0)
+ Align = DL.getABITypeAlignment(I.getAllocatedType());
+
+ // FIXME: This computed padding is likely wrong since it depends on inverse
+ // usage order.
+ //
+ // FIXME: It is also possible that if we're allowed to use all of the memory
+ // could could end up using more than the maximum due to alignment padding.
+
+ uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
+ uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
+ NewSize += AllocSize;
+
+ if (NewSize > LocalMemLimit) {
+ DEBUG(dbgs() << " " << AllocSize
+ << " bytes of local memory not available to promote\n");
return;
}
+ CurrentLocalMemUsage = NewSize;
+
std::vector<Value*> WorkList;
- if (!collectUsesWithPtrTypes(&I, WorkList)) {
+ if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
DEBUG(dbgs() << " Do not know how to convert all uses\n");
return;
}
DEBUG(dbgs() << "Promoting alloca to local memory\n");
- LocalMemAvailable -= AllocaSize;
- Type *GVTy = ArrayType::get(I.getAllocatedType(), 256);
+ Function *F = I.getParent()->getParent();
+
+ Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
GlobalVariable *GV = new GlobalVariable(
- *Mod, GVTy, false, GlobalValue::ExternalLinkage, 0, I.getName(), 0,
- GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
-
- FunctionType *FTy = FunctionType::get(
- Type::getInt32Ty(Mod->getContext()), false);
- AttributeSet AttrSet;
- AttrSet.addAttribute(Mod->getContext(), 0, Attribute::ReadNone);
-
- Value *ReadLocalSizeY = Mod->getOrInsertFunction(
- "llvm.r600.read.local.size.y", FTy, AttrSet);
- Value *ReadLocalSizeZ = Mod->getOrInsertFunction(
- "llvm.r600.read.local.size.z", FTy, AttrSet);
- Value *ReadTIDIGX = Mod->getOrInsertFunction(
- "llvm.r600.read.tidig.x", FTy, AttrSet);
- Value *ReadTIDIGY = Mod->getOrInsertFunction(
- "llvm.r600.read.tidig.y", FTy, AttrSet);
- Value *ReadTIDIGZ = Mod->getOrInsertFunction(
- "llvm.r600.read.tidig.z", FTy, AttrSet);
-
- Value *TCntY = Builder.CreateCall(ReadLocalSizeY, {});
- Value *TCntZ = Builder.CreateCall(ReadLocalSizeZ, {});
- Value *TIdX = Builder.CreateCall(ReadTIDIGX, {});
- Value *TIdY = Builder.CreateCall(ReadTIDIGY, {});
- Value *TIdZ = Builder.CreateCall(ReadTIDIGZ, {});
-
- Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ);
+ *Mod, GVTy, false, GlobalValue::InternalLinkage,
+ UndefValue::get(GVTy),
+ Twine(F->getName()) + Twine('.') + I.getName(),
+ nullptr,
+ GlobalVariable::NotThreadLocal,
+ AMDGPUAS::LOCAL_ADDRESS);
+ GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
+ GV->setAlignment(I.getAlignment());
+
+ Value *TCntY, *TCntZ;
+
+ std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
+ Value *TIdX = getWorkitemID(Builder, 0);
+ Value *TIdY = getWorkitemID(Builder, 1);
+ Value *TIdZ = getWorkitemID(Builder, 2);
+
+ Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
Tmp0 = Builder.CreateMul(Tmp0, TIdX);
- Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ);
+ Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
TID = Builder.CreateAdd(TID, TIdZ);
- std::vector<Value*> Indices;
- Indices.push_back(Constant::getNullValue(Type::getInt32Ty(Mod->getContext())));
- Indices.push_back(TID);
+ Value *Indices[] = {
+ Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
+ TID
+ };
- Value *Offset = Builder.CreateGEP(GVTy, GV, Indices);
+ Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
I.mutateType(Offset->getType());
I.replaceAllUsesWith(Offset);
I.eraseFromParent();
- for (std::vector<Value*>::iterator i = WorkList.begin(),
- e = WorkList.end(); i != e; ++i) {
- Value *V = *i;
+ for (Value *V : WorkList) {
CallInst *Call = dyn_cast<CallInst>(V);
if (!Call) {
- Type *EltTy = V->getType()->getPointerElementType();
- PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
+ if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
+ Value *Src0 = CI->getOperand(0);
+ Type *EltTy = Src0->getType()->getPointerElementType();
+ PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
+
+ if (isa<ConstantPointerNull>(CI->getOperand(0)))
+ CI->setOperand(0, ConstantPointerNull::get(NewTy));
+
+ if (isa<ConstantPointerNull>(CI->getOperand(1)))
+ CI->setOperand(1, ConstantPointerNull::get(NewTy));
+
+ continue;
+ }
// The operand's value should be corrected on its own.
if (isa<AddrSpaceCastInst>(V))
continue;
+ Type *EltTy = V->getType()->getPointerElementType();
+ PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
+
// FIXME: It doesn't really make sense to try to do this for all
// instructions.
V->mutateType(NewTy);
+
+ // Adjust the types of any constant operands.
+ if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
+ if (isa<ConstantPointerNull>(SI->getOperand(1)))
+ SI->setOperand(1, ConstantPointerNull::get(NewTy));
+
+ if (isa<ConstantPointerNull>(SI->getOperand(2)))
+ SI->setOperand(2, ConstantPointerNull::get(NewTy));
+ } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
+ for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
+ if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
+ Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
+ }
+ }
+
continue;
}
IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call);
if (!Intr) {
+ // FIXME: What is this for? It doesn't make sense to promote arbitrary
+ // function calls. If the call is to a defined function that can also be
+ // promoted, we should be able to do this once that function is also
+ // rewritten.
+
std::vector<Type*> ArgTypes;
for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands();
ArgIdx != ArgEnd; ++ArgIdx) {
@@ -405,6 +810,14 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
Intr->eraseFromParent();
continue;
}
+ case Intrinsic::memmove: {
+ MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
+ Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(),
+ MemMove->getLength(), MemMove->getAlignment(),
+ MemMove->isVolatile());
+ Intr->eraseFromParent();
+ continue;
+ }
case Intrinsic::memset: {
MemSetInst *MemSet = cast<MemSetInst>(Intr);
Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
@@ -413,6 +826,28 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
Intr->eraseFromParent();
continue;
}
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::invariant_group_barrier:
+ Intr->eraseFromParent();
+ // FIXME: I think the invariant marker should still theoretically apply,
+ // but the intrinsics need to be changed to accept pointers with any
+ // address space.
+ continue;
+ case Intrinsic::objectsize: {
+ Value *Src = Intr->getOperand(0);
+ Type *SrcTy = Src->getType()->getPointerElementType();
+ Function *ObjectSize = Intrinsic::getDeclaration(Mod,
+ Intrinsic::objectsize,
+ { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
+ );
+
+ CallInst *NewCall
+ = Builder.CreateCall(ObjectSize, { Src, Intr->getOperand(1) });
+ Intr->replaceAllUsesWith(NewCall);
+ Intr->eraseFromParent();
+ continue;
+ }
default:
Intr->dump();
llvm_unreachable("Don't know how to promote alloca intrinsic use.");
@@ -420,6 +855,6 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
}
}
-FunctionPass *llvm::createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST) {
- return new AMDGPUPromoteAlloca(ST);
+FunctionPass *llvm::createAMDGPUPromoteAlloca(const TargetMachine *TM) {
+ return new AMDGPUPromoteAlloca(TM);
}
diff --git a/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
index 3ca0eca3417f..941f2d8a468a 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
@@ -24,20 +24,14 @@ AMDGPURegisterInfo::AMDGPURegisterInfo() : AMDGPUGenRegisterInfo(0) {}
// they are not supported at this time.
//===----------------------------------------------------------------------===//
-const MCPhysReg AMDGPURegisterInfo::CalleeSavedReg = AMDGPU::NoRegister;
+// Dummy to not crash RegisterClassInfo.
+static const MCPhysReg CalleeSavedReg = AMDGPU::NoRegister;
-const MCPhysReg*
-AMDGPURegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+const MCPhysReg *AMDGPURegisterInfo::getCalleeSavedRegs(
+ const MachineFunction *) const {
return &CalleeSavedReg;
}
-void AMDGPURegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
- int SPAdj,
- unsigned FIOperandNum,
- RegScavenger *RS) const {
- llvm_unreachable("Subroutines not supported yet");
-}
-
unsigned AMDGPURegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return AMDGPU::NoRegister;
}
@@ -54,10 +48,5 @@ unsigned AMDGPURegisterInfo::getSubRegFromChannel(unsigned Channel) const {
return SubRegs[Channel];
}
-unsigned AMDGPURegisterInfo::getIndirectSubReg(unsigned IndirectIndex) const {
-
- return getSubRegFromChannel(IndirectIndex);
-}
-
#define GET_REGINFO_TARGET_DESC
#include "AMDGPUGenRegisterInfo.inc"
diff --git a/lib/Target/AMDGPU/AMDGPURegisterInfo.h b/lib/Target/AMDGPU/AMDGPURegisterInfo.h
index 0344834328f6..ef51aad95dce 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterInfo.h
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.h
@@ -13,10 +13,9 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUREGISTERINFO_H
-#define LLVM_LIB_TARGET_R600_AMDGPUREGISTERINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERINFO_H
-#include "llvm/ADT/BitVector.h"
#include "llvm/Target/TargetRegisterInfo.h"
#define GET_REGINFO_HEADER
@@ -29,30 +28,14 @@ class AMDGPUSubtarget;
class TargetInstrInfo;
struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
- static const MCPhysReg CalleeSavedReg;
-
AMDGPURegisterInfo();
- BitVector getReservedRegs(const MachineFunction &MF) const override {
- assert(!"Unimplemented"); return BitVector();
- }
-
- virtual unsigned getHWRegIndex(unsigned Reg) const {
- assert(!"Unimplemented"); return 0;
- }
-
/// \returns the sub reg enum value for the given \p Channel
/// (e.g. getSubRegFromChannel(0) -> AMDGPU::sub0)
unsigned getSubRegFromChannel(unsigned Channel) const;
const MCPhysReg* getCalleeSavedRegs(const MachineFunction *MF) const override;
- void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
- unsigned FIOperandNum,
- RegScavenger *RS) const override;
unsigned getFrameRegister(const MachineFunction &MF) const override;
-
- unsigned getIndirectSubReg(unsigned IndirectIndex) const;
-
};
} // End namespace llvm
diff --git a/lib/Target/AMDGPU/AMDGPURuntimeMetadata.h b/lib/Target/AMDGPU/AMDGPURuntimeMetadata.h
new file mode 100644
index 000000000000..40f639434507
--- /dev/null
+++ b/lib/Target/AMDGPU/AMDGPURuntimeMetadata.h
@@ -0,0 +1,138 @@
+//===-- AMDGPURuntimeMetadata.h - AMDGPU Runtime Metadata -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+///
+/// Enums and structure types used by runtime metadata.
+///
+/// Runtime requests certain information (metadata) about kernels to be able
+/// to execute the kernels and answer the queries about the kernels.
+/// The metadata is represented as a byte stream in an ELF section of a
+/// binary (code object). The byte stream consists of key-value pairs.
+/// Each key is an 8 bit unsigned integer. Each value can be an integer,
+/// a string, or a stream of key-value pairs. There are 3 levels of key-value
+/// pair streams. At the beginning of the ELF section is the top level
+/// key-value pair stream. A kernel-level key-value pair stream starts after
+/// encountering KeyKernelBegin and ends immediately before encountering
+/// KeyKernelEnd. A kernel-argument-level key-value pair stream starts
+/// after encountering KeyArgBegin and ends immediately before encountering
+/// KeyArgEnd. A kernel-level key-value pair stream can only appear in a top
+/// level key-value pair stream. A kernel-argument-level key-value pair stream
+/// can only appear in a kernel-level key-value pair stream.
+///
+/// The format should be kept backward compatible. New enum values and bit
+/// fields should be appended at the end. It is suggested to bump up the
+/// revision number whenever the format changes and document the change
+/// in the revision in this header.
+///
+//
+//===----------------------------------------------------------------------===//
+//
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPURUNTIMEMETADATA_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPURUNTIMEMETADATA_H
+
+#include <stdint.h>
+
+namespace AMDGPU {
+
+namespace RuntimeMD {
+
+ // Version and revision of runtime metadata
+ const unsigned char MDVersion = 1;
+ const unsigned char MDRevision = 0;
+
+ // ELF section name containing runtime metadata
+ const char SectionName[] = ".AMDGPU.runtime_metadata";
+
+ // Enumeration values of keys in runtime metadata.
+ enum Key {
+ KeyNull = 0, // Place holder. Ignored when encountered
+ KeyMDVersion = 1, // Runtime metadata version
+ KeyLanguage = 2, // Language
+ KeyLanguageVersion = 3, // Language version
+ KeyKernelBegin = 4, // Beginning of kernel-level stream
+ KeyKernelEnd = 5, // End of kernel-level stream
+ KeyKernelName = 6, // Kernel name
+ KeyArgBegin = 7, // Beginning of kernel-arg-level stream
+ KeyArgEnd = 8, // End of kernel-arg-level stream
+ KeyArgSize = 9, // Kernel arg size
+ KeyArgAlign = 10, // Kernel arg alignment
+ KeyArgTypeName = 11, // Kernel type name
+ KeyArgName = 12, // Kernel name
+ KeyArgTypeKind = 13, // Kernel argument type kind
+ KeyArgValueType = 14, // Kernel argument value type
+ KeyArgAddrQual = 15, // Kernel argument address qualifier
+ KeyArgAccQual = 16, // Kernel argument access qualifier
+ KeyArgIsConst = 17, // Kernel argument is const qualified
+ KeyArgIsRestrict = 18, // Kernel argument is restrict qualified
+ KeyArgIsVolatile = 19, // Kernel argument is volatile qualified
+ KeyArgIsPipe = 20, // Kernel argument is pipe qualified
+ KeyReqdWorkGroupSize = 21, // Required work group size
+ KeyWorkGroupSizeHint = 22, // Work group size hint
+ KeyVecTypeHint = 23, // Vector type hint
+ KeyKernelIndex = 24, // Kernel index for device enqueue
+ KeySGPRs = 25, // Number of SGPRs
+ KeyVGPRs = 26, // Number of VGPRs
+ KeyMinWavesPerSIMD = 27, // Minimum number of waves per SIMD
+ KeyMaxWavesPerSIMD = 28, // Maximum number of waves per SIMD
+ KeyFlatWorkGroupSizeLimits = 29, // Flat work group size limits
+ KeyMaxWorkGroupSize = 30, // Maximum work group size
+ KeyNoPartialWorkGroups = 31, // No partial work groups
+ };
+
+ enum Language : uint8_t {
+ OpenCL_C = 0,
+ HCC = 1,
+ OpenMP = 2,
+ OpenCL_CPP = 3,
+};
+
+ enum LanguageVersion : uint16_t {
+ V100 = 100,
+ V110 = 110,
+ V120 = 120,
+ V200 = 200,
+ V210 = 210,
+ };
+
+ namespace KernelArg {
+ enum TypeKind : uint8_t {
+ Value = 0,
+ Pointer = 1,
+ Image = 2,
+ Sampler = 3,
+ Queue = 4,
+ };
+
+ enum ValueType : uint16_t {
+ Struct = 0,
+ I8 = 1,
+ U8 = 2,
+ I16 = 3,
+ U16 = 4,
+ F16 = 5,
+ I32 = 6,
+ U32 = 7,
+ F32 = 8,
+ I64 = 9,
+ U64 = 10,
+ F64 = 11,
+ };
+
+ enum AccessQualifer : uint8_t {
+ None = 0,
+ ReadOnly = 1,
+ WriteOnly = 2,
+ ReadWrite = 3,
+ };
+ } // namespace KernelArg
+} // namespace RuntimeMD
+} // namespace AMDGPU
+
+#endif // LLVM_LIB_TARGET_AMDGPU_AMDGPURUNTIMEMETADATA_H
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index 7d70fa73da29..10fa9cf46737 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -15,7 +15,6 @@
#include "AMDGPUSubtarget.h"
#include "R600ISelLowering.h"
#include "R600InstrInfo.h"
-#include "R600MachineScheduler.h"
#include "SIFrameLowering.h"
#include "SIISelLowering.h"
#include "SIInstrInfo.h"
@@ -32,6 +31,8 @@ using namespace llvm;
#define GET_SUBTARGETINFO_CTOR
#include "AMDGPUGenSubtargetInfo.inc"
+AMDGPUSubtarget::~AMDGPUSubtarget() {}
+
AMDGPUSubtarget &
AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
StringRef GPU, StringRef FS) {
@@ -44,14 +45,11 @@ AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
// for SI has the unhelpful behavior that it unsets everything else if you
// disable it.
- SmallString<256> FullFS("+promote-alloca,+fp64-denormals,");
+ SmallString<256> FullFS("+promote-alloca,+fp64-denormals,+load-store-opt,");
if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA.
- FullFS += "+flat-for-global,";
+ FullFS += "+flat-for-global,+unaligned-buffer-access,";
FullFS += FS;
- if (GPU == "" && TT.getArch() == Triple::amdgcn)
- GPU = "SI";
-
ParseSubtargetFeatures(GPU, FullFS);
// FIXME: I don't think think Evergreen has any useful support for
@@ -61,52 +59,142 @@ AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
FP32Denormals = false;
FP64Denormals = false;
}
+
+ // Set defaults if needed.
+ if (MaxPrivateElementSize == 0)
+ MaxPrivateElementSize = 4;
+
return *this;
}
AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
- TargetMachine &TM)
- : AMDGPUGenSubtargetInfo(TT, GPU, FS), DevName(GPU), Is64bit(false),
- DumpCode(false), R600ALUInst(false), HasVertexCache(false),
- TexVTXClauseSize(0), Gen(AMDGPUSubtarget::R600), FP64(false),
- FP64Denormals(false), FP32Denormals(false), FastFMAF32(false),
- CaymanISA(false), FlatAddressSpace(false), FlatForGlobal(false),
- EnableIRStructurizer(true), EnablePromoteAlloca(false), EnableIfCvt(true),
- EnableLoadStoreOpt(false), EnableUnsafeDSOffsetFolding(false),
- EnableXNACK(false),
- WavefrontSize(0), CFALUBug(false), LocalMemorySize(0),
- EnableVGPRSpilling(false), SGPRInitBug(false), IsGCN(false),
- GCN1Encoding(false), GCN3Encoding(false), CIInsts(false), LDSBankCount(0),
- IsaVersion(ISAVersion0_0_0), EnableHugeScratchBuffer(false),
- EnableSIScheduler(false), FrameLowering(nullptr),
- InstrItins(getInstrItineraryForCPU(GPU)), TargetTriple(TT) {
-
+ const TargetMachine &TM)
+ : AMDGPUGenSubtargetInfo(TT, GPU, FS),
+ TargetTriple(TT),
+ Gen(TT.getArch() == Triple::amdgcn ? SOUTHERN_ISLANDS : R600),
+ IsaVersion(ISAVersion0_0_0),
+ WavefrontSize(64),
+ LocalMemorySize(0),
+ LDSBankCount(0),
+ MaxPrivateElementSize(0),
+
+ FastFMAF32(false),
+ HalfRate64Ops(false),
+
+ FP32Denormals(false),
+ FP64Denormals(false),
+ FPExceptions(false),
+ FlatForGlobal(false),
+ UnalignedBufferAccess(false),
+
+ EnableXNACK(false),
+ DebuggerInsertNops(false),
+ DebuggerReserveRegs(false),
+ DebuggerEmitPrologue(false),
+
+ EnableVGPRSpilling(false),
+ EnablePromoteAlloca(false),
+ EnableLoadStoreOpt(false),
+ EnableUnsafeDSOffsetFolding(false),
+ EnableSIScheduler(false),
+ DumpCode(false),
+
+ FP64(false),
+ IsGCN(false),
+ GCN1Encoding(false),
+ GCN3Encoding(false),
+ CIInsts(false),
+ SGPRInitBug(false),
+ HasSMemRealTime(false),
+ Has16BitInsts(false),
+ FlatAddressSpace(false),
+
+ R600ALUInst(false),
+ CaymanISA(false),
+ CFALUBug(false),
+ HasVertexCache(false),
+ TexVTXClauseSize(0),
+
+ FeatureDisable(false),
+ InstrItins(getInstrItineraryForCPU(GPU)) {
initializeSubtargetDependencies(TT, GPU, FS);
+}
- const unsigned MaxStackAlign = 64 * 16; // Maximum stack alignment (long16)
-
- if (getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
- InstrInfo.reset(new R600InstrInfo(*this));
- TLInfo.reset(new R600TargetLowering(TM, *this));
-
- // FIXME: Should have R600 specific FrameLowering
- FrameLowering.reset(new AMDGPUFrameLowering(
- TargetFrameLowering::StackGrowsUp,
- MaxStackAlign,
- 0));
- } else {
- InstrInfo.reset(new SIInstrInfo(*this));
- TLInfo.reset(new SITargetLowering(TM, *this));
- FrameLowering.reset(new SIFrameLowering(
- TargetFrameLowering::StackGrowsUp,
- MaxStackAlign,
- 0));
+// FIXME: These limits are for SI. Did they change with the larger maximum LDS
+// size?
+unsigned AMDGPUSubtarget::getMaxLocalMemSizeWithWaveCount(unsigned NWaves) const {
+ switch (NWaves) {
+ case 10:
+ return 1638;
+ case 9:
+ return 1820;
+ case 8:
+ return 2048;
+ case 7:
+ return 2340;
+ case 6:
+ return 2730;
+ case 5:
+ return 3276;
+ case 4:
+ return 4096;
+ case 3:
+ return 5461;
+ case 2:
+ return 8192;
+ default:
+ return getLocalMemorySize();
}
}
-unsigned AMDGPUSubtarget::getStackEntrySize() const {
- assert(getGeneration() <= NORTHERN_ISLANDS);
- switch(getWavefrontSize()) {
+unsigned AMDGPUSubtarget::getOccupancyWithLocalMemSize(uint32_t Bytes) const {
+ if (Bytes <= 1638)
+ return 10;
+
+ if (Bytes <= 1820)
+ return 9;
+
+ if (Bytes <= 2048)
+ return 8;
+
+ if (Bytes <= 2340)
+ return 7;
+
+ if (Bytes <= 2730)
+ return 6;
+
+ if (Bytes <= 3276)
+ return 5;
+
+ if (Bytes <= 4096)
+ return 4;
+
+ if (Bytes <= 5461)
+ return 3;
+
+ if (Bytes <= 8192)
+ return 2;
+
+ return 1;
+}
+
+R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS,
+ const TargetMachine &TM) :
+ AMDGPUSubtarget(TT, GPU, FS, TM),
+ InstrInfo(*this),
+ FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
+ TLInfo(TM, *this) {}
+
+SISubtarget::SISubtarget(const Triple &TT, StringRef GPU, StringRef FS,
+ const TargetMachine &TM) :
+ AMDGPUSubtarget(TT, GPU, FS, TM),
+ InstrInfo(*this),
+ FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
+ TLInfo(TM, *this),
+ GISel() {}
+
+unsigned R600Subtarget::getStackEntrySize() const {
+ switch (getWavefrontSize()) {
case 16:
return 8;
case 32:
@@ -118,37 +206,36 @@ unsigned AMDGPUSubtarget::getStackEntrySize() const {
}
}
-unsigned AMDGPUSubtarget::getAmdKernelCodeChipID() const {
- switch(getGeneration()) {
- default: llvm_unreachable("ChipID unknown");
- case SEA_ISLANDS: return 12;
- }
-}
-
-AMDGPU::IsaVersion AMDGPUSubtarget::getIsaVersion() const {
- return AMDGPU::getIsaVersion(getFeatureBits());
+void SISubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
+ unsigned NumRegionInstrs) const {
+ // Track register pressure so the scheduler can try to decrease
+ // pressure once register usage is above the threshold defined by
+ // SIRegisterInfo::getRegPressureSetLimit()
+ Policy.ShouldTrackPressure = true;
+
+ // Enabling both top down and bottom up scheduling seems to give us less
+ // register spills than just using one of these approaches on its own.
+ Policy.OnlyTopDown = false;
+ Policy.OnlyBottomUp = false;
+
+ // Enabling ShouldTrackLaneMasks crashes the SI Machine Scheduler.
+ if (!enableSIScheduler())
+ Policy.ShouldTrackLaneMasks = true;
}
-bool AMDGPUSubtarget::isVGPRSpillingEnabled(
- const SIMachineFunctionInfo *MFI) const {
- return MFI->getShaderType() == ShaderType::COMPUTE || EnableVGPRSpilling;
+bool SISubtarget::isVGPRSpillingEnabled(const Function& F) const {
+ return EnableVGPRSpilling || !AMDGPU::isShader(F.getCallingConv());
}
-void AMDGPUSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
- MachineInstr *begin,
- MachineInstr *end,
- unsigned NumRegionInstrs) const {
- if (getGeneration() >= SOUTHERN_ISLANDS) {
-
- // Track register pressure so the scheduler can try to decrease
- // pressure once register usage is above the threshold defined by
- // SIRegisterInfo::getRegPressureSetLimit()
- Policy.ShouldTrackPressure = true;
-
- // Enabling both top down and bottom up scheduling seems to give us less
- // register spills than just using one of these approaches on its own.
- Policy.OnlyTopDown = false;
- Policy.OnlyBottomUp = false;
+unsigned SISubtarget::getAmdKernelCodeChipID() const {
+ switch (getGeneration()) {
+ case SEA_ISLANDS:
+ return 12;
+ default:
+ llvm_unreachable("ChipID unknown");
}
}
+AMDGPU::IsaVersion SISubtarget::getIsaVersion() const {
+ return AMDGPU::getIsaVersion(getFeatureBits());
+}
diff --git a/lib/Target/AMDGPU/AMDGPUSubtarget.h b/lib/Target/AMDGPU/AMDGPUSubtarget.h
index 49c94f1eceb8..3fe61aa449e0 100644
--- a/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -16,12 +16,14 @@
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUSUBTARGET_H
#include "AMDGPU.h"
-#include "AMDGPUFrameLowering.h"
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPUISelLowering.h"
-#include "AMDGPUSubtarget.h"
+#include "R600InstrInfo.h"
+#include "R600ISelLowering.h"
+#include "R600FrameLowering.h"
+#include "SIInstrInfo.h"
+#include "SIISelLowering.h"
+#include "SIFrameLowering.h"
#include "Utils/AMDGPUBaseInfo.h"
-#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/GlobalISel/GISelAccessor.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#define GET_SUBTARGETINFO_HEADER
@@ -30,9 +32,9 @@
namespace llvm {
class SIMachineFunctionInfo;
+class StringRef;
class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
-
public:
enum Generation {
R600 = 0,
@@ -45,10 +47,6 @@ public:
};
enum {
- FIXED_SGPR_COUNT_FOR_INIT_BUG = 80
- };
-
- enum {
ISAVersion0_0_0,
ISAVersion7_0_0,
ISAVersion7_0_1,
@@ -57,114 +55,116 @@ public:
ISAVersion8_0_3
};
-private:
- std::string DevName;
- bool Is64bit;
- bool DumpCode;
- bool R600ALUInst;
- bool HasVertexCache;
- short TexVTXClauseSize;
+protected:
+ // Basic subtarget description.
+ Triple TargetTriple;
Generation Gen;
- bool FP64;
- bool FP64Denormals;
- bool FP32Denormals;
+ unsigned IsaVersion;
+ unsigned WavefrontSize;
+ int LocalMemorySize;
+ int LDSBankCount;
+ unsigned MaxPrivateElementSize;
+
+ // Possibly statically set by tablegen, but may want to be overridden.
bool FastFMAF32;
- bool CaymanISA;
- bool FlatAddressSpace;
+ bool HalfRate64Ops;
+
+ // Dynamially set bits that enable features.
+ bool FP32Denormals;
+ bool FP64Denormals;
+ bool FPExceptions;
bool FlatForGlobal;
- bool EnableIRStructurizer;
+ bool UnalignedBufferAccess;
+ bool EnableXNACK;
+ bool DebuggerInsertNops;
+ bool DebuggerReserveRegs;
+ bool DebuggerEmitPrologue;
+
+ // Used as options.
+ bool EnableVGPRSpilling;
bool EnablePromoteAlloca;
- bool EnableIfCvt;
bool EnableLoadStoreOpt;
bool EnableUnsafeDSOffsetFolding;
- bool EnableXNACK;
- unsigned WavefrontSize;
- bool CFALUBug;
- int LocalMemorySize;
- bool EnableVGPRSpilling;
- bool SGPRInitBug;
+ bool EnableSIScheduler;
+ bool DumpCode;
+
+ // Subtarget statically properties set by tablegen
+ bool FP64;
bool IsGCN;
bool GCN1Encoding;
bool GCN3Encoding;
bool CIInsts;
+ bool SGPRInitBug;
+ bool HasSMemRealTime;
+ bool Has16BitInsts;
+ bool FlatAddressSpace;
+ bool R600ALUInst;
+ bool CaymanISA;
+ bool CFALUBug;
+ bool HasVertexCache;
+ short TexVTXClauseSize;
+
+ // Dummy feature to use for assembler in tablegen.
bool FeatureDisable;
- int LDSBankCount;
- unsigned IsaVersion;
- bool EnableHugeScratchBuffer;
- bool EnableSIScheduler;
- std::unique_ptr<AMDGPUFrameLowering> FrameLowering;
- std::unique_ptr<AMDGPUTargetLowering> TLInfo;
- std::unique_ptr<AMDGPUInstrInfo> InstrInfo;
InstrItineraryData InstrItins;
- Triple TargetTriple;
public:
- AMDGPUSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
- TargetMachine &TM);
+ AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
+ const TargetMachine &TM);
+ virtual ~AMDGPUSubtarget();
AMDGPUSubtarget &initializeSubtargetDependencies(const Triple &TT,
StringRef GPU, StringRef FS);
- const AMDGPUFrameLowering *getFrameLowering() const override {
- return FrameLowering.get();
- }
- const AMDGPUInstrInfo *getInstrInfo() const override {
- return InstrInfo.get();
- }
- const AMDGPURegisterInfo *getRegisterInfo() const override {
- return &InstrInfo->getRegisterInfo();
- }
- AMDGPUTargetLowering *getTargetLowering() const override {
- return TLInfo.get();
- }
+ const AMDGPUInstrInfo *getInstrInfo() const override;
+ const AMDGPUFrameLowering *getFrameLowering() const override;
+ const AMDGPUTargetLowering *getTargetLowering() const override;
+ const AMDGPURegisterInfo *getRegisterInfo() const override;
+
const InstrItineraryData *getInstrItineraryData() const override {
return &InstrItins;
}
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- bool is64bit() const {
- return Is64bit;
- }
-
- bool hasVertexCache() const {
- return HasVertexCache;
- }
-
- short getTexVTXClauseSize() const {
- return TexVTXClauseSize;
+ bool isAmdHsaOS() const {
+ return TargetTriple.getOS() == Triple::AMDHSA;
}
Generation getGeneration() const {
return Gen;
}
- bool hasHWFP64() const {
- return FP64;
+ unsigned getWavefrontSize() const {
+ return WavefrontSize;
}
- bool hasCaymanISA() const {
- return CaymanISA;
+ int getLocalMemorySize() const {
+ return LocalMemorySize;
}
- bool hasFP32Denormals() const {
- return FP32Denormals;
+ int getLDSBankCount() const {
+ return LDSBankCount;
}
- bool hasFP64Denormals() const {
- return FP64Denormals;
+ unsigned getMaxPrivateElementSize() const {
+ return MaxPrivateElementSize;
+ }
+
+ bool hasHWFP64() const {
+ return FP64;
}
bool hasFastFMAF32() const {
return FastFMAF32;
}
- bool hasFlatAddressSpace() const {
- return FlatAddressSpace;
+ bool hasHalfRate64Ops() const {
+ return HalfRate64Ops;
}
- bool useFlatForGlobal() const {
- return FlatForGlobal;
+ bool hasAddr64() const {
+ return (getGeneration() < VOLCANIC_ISLANDS);
}
bool hasBFE() const {
@@ -214,116 +214,249 @@ public:
return (getGeneration() >= EVERGREEN);
}
- bool IsIRStructurizerEnabled() const {
- return EnableIRStructurizer;
+ bool hasCaymanISA() const {
+ return CaymanISA;
}
bool isPromoteAllocaEnabled() const {
return EnablePromoteAlloca;
}
- bool isIfCvtEnabled() const {
- return EnableIfCvt;
+ bool unsafeDSOffsetFoldingEnabled() const {
+ return EnableUnsafeDSOffsetFolding;
}
- bool loadStoreOptEnabled() const {
- return EnableLoadStoreOpt;
+ bool dumpCode() const {
+ return DumpCode;
}
- bool unsafeDSOffsetFoldingEnabled() const {
- return EnableUnsafeDSOffsetFolding;
+ /// Return the amount of LDS that can be used that will not restrict the
+ /// occupancy lower than WaveCount.
+ unsigned getMaxLocalMemSizeWithWaveCount(unsigned WaveCount) const;
+
+ /// Inverse of getMaxLocalMemWithWaveCount. Return the maximum wavecount if
+ /// the given LDS memory size is the only constraint.
+ unsigned getOccupancyWithLocalMemSize(uint32_t Bytes) const;
+
+
+ bool hasFP32Denormals() const {
+ return FP32Denormals;
}
- unsigned getWavefrontSize() const {
- return WavefrontSize;
+ bool hasFP64Denormals() const {
+ return FP64Denormals;
}
- unsigned getStackEntrySize() const;
+ bool hasFPExceptions() const {
+ return FPExceptions;
+ }
- bool hasCFAluBug() const {
- assert(getGeneration() <= NORTHERN_ISLANDS);
- return CFALUBug;
+ bool useFlatForGlobal() const {
+ return FlatForGlobal;
}
- int getLocalMemorySize() const {
- return LocalMemorySize;
+ bool hasUnalignedBufferAccess() const {
+ return UnalignedBufferAccess;
}
- bool hasSGPRInitBug() const {
- return SGPRInitBug;
+ bool isXNACKEnabled() const {
+ return EnableXNACK;
}
- int getLDSBankCount() const {
- return LDSBankCount;
+ unsigned getMaxWavesPerCU() const {
+ if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
+ return 10;
+
+ // FIXME: Not sure what this is for other subtagets.
+ return 8;
}
- unsigned getAmdKernelCodeChipID() const;
+ /// \brief Returns the offset in bytes from the start of the input buffer
+ /// of the first explicit kernel argument.
+ unsigned getExplicitKernelArgOffset() const {
+ return isAmdHsaOS() ? 0 : 36;
+ }
- AMDGPU::IsaVersion getIsaVersion() const;
+ unsigned getStackAlignment() const {
+ // Scratch is allocated in 256 dword per wave blocks.
+ return 4 * 256 / getWavefrontSize();
+ }
bool enableMachineScheduler() const override {
return true;
}
- void overrideSchedPolicy(MachineSchedPolicy &Policy,
- MachineInstr *begin, MachineInstr *end,
- unsigned NumRegionInstrs) const override;
+ bool enableSubRegLiveness() const override {
+ return true;
+ }
+};
- // Helper functions to simplify if statements
- bool isTargetELF() const {
- return false;
+class R600Subtarget final : public AMDGPUSubtarget {
+private:
+ R600InstrInfo InstrInfo;
+ R600FrameLowering FrameLowering;
+ R600TargetLowering TLInfo;
+
+public:
+ R600Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
+ const TargetMachine &TM);
+
+ const R600InstrInfo *getInstrInfo() const override {
+ return &InstrInfo;
}
- StringRef getDeviceName() const {
- return DevName;
+ const R600FrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
}
- bool enableHugeScratchBuffer() const {
- return EnableHugeScratchBuffer;
+ const R600TargetLowering *getTargetLowering() const override {
+ return &TLInfo;
}
- bool enableSIScheduler() const {
- return EnableSIScheduler;
+ const R600RegisterInfo *getRegisterInfo() const override {
+ return &InstrInfo.getRegisterInfo();
}
- bool dumpCode() const {
- return DumpCode;
+ bool hasCFAluBug() const {
+ return CFALUBug;
}
- bool r600ALUEncoding() const {
- return R600ALUInst;
+
+ bool hasVertexCache() const {
+ return HasVertexCache;
}
- bool isAmdHsaOS() const {
- return TargetTriple.getOS() == Triple::AMDHSA;
+
+ short getTexVTXClauseSize() const {
+ return TexVTXClauseSize;
}
- bool isVGPRSpillingEnabled(const SIMachineFunctionInfo *MFI) const;
- bool isXNACKEnabled() const {
- return EnableXNACK;
+ unsigned getStackEntrySize() const;
+};
+
+class SISubtarget final : public AMDGPUSubtarget {
+public:
+ enum {
+ FIXED_SGPR_COUNT_FOR_INIT_BUG = 80
+ };
+
+private:
+ SIInstrInfo InstrInfo;
+ SIFrameLowering FrameLowering;
+ SITargetLowering TLInfo;
+ std::unique_ptr<GISelAccessor> GISel;
+
+public:
+ SISubtarget(const Triple &TT, StringRef CPU, StringRef FS,
+ const TargetMachine &TM);
+
+ const SIInstrInfo *getInstrInfo() const override {
+ return &InstrInfo;
}
- unsigned getMaxWavesPerCU() const {
- if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
- return 10;
+ const SIFrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
+ }
- // FIXME: Not sure what this is for other subtagets.
- llvm_unreachable("do not know max waves per CU for this subtarget.");
+ const SITargetLowering *getTargetLowering() const override {
+ return &TLInfo;
}
- bool enableSubRegLiveness() const override {
- return true;
+ const CallLowering *getCallLowering() const override {
+ assert(GISel && "Access to GlobalISel APIs not set");
+ return GISel->getCallLowering();
}
- /// \brief Returns the offset in bytes from the start of the input buffer
- /// of the first explicit kernel argument.
- unsigned getExplicitKernelArgOffset() const {
- return isAmdHsaOS() ? 0 : 36;
+ const SIRegisterInfo *getRegisterInfo() const override {
+ return &InstrInfo.getRegisterInfo();
+ }
+
+ void setGISelAccessor(GISelAccessor &GISel) {
+ this->GISel.reset(&GISel);
}
+ void overrideSchedPolicy(MachineSchedPolicy &Policy,
+ unsigned NumRegionInstrs) const override;
+
+ bool isVGPRSpillingEnabled(const Function& F) const;
+
+ unsigned getAmdKernelCodeChipID() const;
+
+ AMDGPU::IsaVersion getIsaVersion() const;
+
unsigned getMaxNumUserSGPRs() const {
return 16;
}
+
+ bool hasFlatAddressSpace() const {
+ return FlatAddressSpace;
+ }
+
+ bool hasSMemRealTime() const {
+ return HasSMemRealTime;
+ }
+
+ bool has16BitInsts() const {
+ return Has16BitInsts;
+ }
+
+ bool enableSIScheduler() const {
+ return EnableSIScheduler;
+ }
+
+ bool debuggerSupported() const {
+ return debuggerInsertNops() && debuggerReserveRegs() &&
+ debuggerEmitPrologue();
+ }
+
+ bool debuggerInsertNops() const {
+ return DebuggerInsertNops;
+ }
+
+ bool debuggerReserveRegs() const {
+ return DebuggerReserveRegs;
+ }
+
+ bool debuggerEmitPrologue() const {
+ return DebuggerEmitPrologue;
+ }
+
+ bool loadStoreOptEnabled() const {
+ return EnableLoadStoreOpt;
+ }
+
+ bool hasSGPRInitBug() const {
+ return SGPRInitBug;
+ }
};
+
+inline const AMDGPUInstrInfo *AMDGPUSubtarget::getInstrInfo() const {
+ if (getGeneration() >= SOUTHERN_ISLANDS)
+ return static_cast<const SISubtarget *>(this)->getInstrInfo();
+
+ return static_cast<const R600Subtarget *>(this)->getInstrInfo();
+}
+
+inline const AMDGPUFrameLowering *AMDGPUSubtarget::getFrameLowering() const {
+ if (getGeneration() >= SOUTHERN_ISLANDS)
+ return static_cast<const SISubtarget *>(this)->getFrameLowering();
+
+ return static_cast<const R600Subtarget *>(this)->getFrameLowering();
+}
+
+inline const AMDGPUTargetLowering *AMDGPUSubtarget::getTargetLowering() const {
+ if (getGeneration() >= SOUTHERN_ISLANDS)
+ return static_cast<const SISubtarget *>(this)->getTargetLowering();
+
+ return static_cast<const R600Subtarget *>(this)->getTargetLowering();
+}
+
+inline const AMDGPURegisterInfo *AMDGPUSubtarget::getRegisterInfo() const {
+ if (getGeneration() >= SOUTHERN_ISLANDS)
+ return static_cast<const SISubtarget *>(this)->getRegisterInfo();
+
+ return static_cast<const R600Subtarget *>(this)->getRegisterInfo();
+}
+
} // End namespace llvm
#endif
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 519ae5cc748d..3e53f52c689f 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -14,19 +14,23 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUTargetMachine.h"
-#include "AMDGPUTargetObjectFile.h"
#include "AMDGPU.h"
+#include "AMDGPUCallLowering.h"
+#include "AMDGPUTargetObjectFile.h"
#include "AMDGPUTargetTransformInfo.h"
#include "R600ISelLowering.h"
#include "R600InstrInfo.h"
#include "R600MachineScheduler.h"
#include "SIISelLowering.h"
#include "SIInstrInfo.h"
+
#include "llvm/Analysis/Passes.h"
+#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Verifier.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/IR/LegacyPassManager.h"
@@ -34,10 +38,35 @@
#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Scalar.h"
-#include <llvm/CodeGen/Passes.h>
+#include "llvm/Transforms/Scalar/GVN.h"
+#include "llvm/Transforms/Vectorize.h"
using namespace llvm;
+static cl::opt<bool> EnableR600StructurizeCFG(
+ "r600-ir-structurize",
+ cl::desc("Use StructurizeCFG IR pass"),
+ cl::init(true));
+
+static cl::opt<bool> EnableSROA(
+ "amdgpu-sroa",
+ cl::desc("Run SROA after promote alloca pass"),
+ cl::ReallyHidden,
+ cl::init(true));
+
+static cl::opt<bool> EnableR600IfConvert(
+ "r600-if-convert",
+ cl::desc("Use if conversion pass"),
+ cl::ReallyHidden,
+ cl::init(true));
+
+// Option to disable vectorizer for tests.
+static cl::opt<bool> EnableLoadStoreVectorizer(
+ "amdgpu-load-store-vectorizer",
+ cl::desc("Enable load store vectorizer"),
+ cl::init(false),
+ cl::Hidden);
+
extern "C" void LLVMInitializeAMDGPUTarget() {
// Register the target
RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
@@ -47,17 +76,22 @@ extern "C" void LLVMInitializeAMDGPUTarget() {
initializeSILowerI1CopiesPass(*PR);
initializeSIFixSGPRCopiesPass(*PR);
initializeSIFoldOperandsPass(*PR);
- initializeSIFixSGPRLiveRangesPass(*PR);
+ initializeSIShrinkInstructionsPass(*PR);
initializeSIFixControlFlowLiveIntervalsPass(*PR);
initializeSILoadStoreOptimizerPass(*PR);
initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
initializeAMDGPUAnnotateUniformValuesPass(*PR);
+ initializeAMDGPUPromoteAllocaPass(*PR);
+ initializeAMDGPUCodeGenPreparePass(*PR);
+ initializeSIAnnotateControlFlowPass(*PR);
+ initializeSIDebuggerInsertNopsPass(*PR);
+ initializeSIInsertWaitsPass(*PR);
+ initializeSIWholeQuadModePass(*PR);
+ initializeSILowerControlFlowPass(*PR);
+ initializeSIDebuggerInsertNopsPass(*PR);
}
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
- if (TT.getOS() == Triple::AMDHSA)
- return make_unique<AMDGPUHSATargetObjectFile>();
-
return make_unique<AMDGPUTargetObjectFile>();
}
@@ -73,60 +107,156 @@ static MachineSchedRegistry
SISchedRegistry("si", "Run SI's custom scheduler",
createSIMachineScheduler);
-static std::string computeDataLayout(const Triple &TT) {
- std::string Ret = "e-p:32:32";
-
- if (TT.getArch() == Triple::amdgcn) {
- // 32-bit private, local, and region pointers. 64-bit global and constant.
- Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
+static StringRef computeDataLayout(const Triple &TT) {
+ if (TT.getArch() == Triple::r600) {
+ // 32-bit pointers.
+ return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
}
- Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
- "-v512:512-v1024:1024-v2048:2048-n32:64";
+ // 32-bit private, local, and region pointers. 64-bit global, constant and
+ // flat.
+ return "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
+ "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
+}
+
+LLVM_READNONE
+static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
+ if (!GPU.empty())
+ return GPU;
- return Ret;
+ // HSA only supports CI+, so change the default GPU to a CI for HSA.
+ if (TT.getArch() == Triple::amdgcn)
+ return (TT.getOS() == Triple::AMDHSA) ? "kaveri" : "tahiti";
+
+ return "r600";
+}
+
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ // The AMDGPU toolchain only supports generating shared objects, so we
+ // must always use PIC.
+ return Reloc::PIC_;
}
AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
- TargetOptions Options, Reloc::Model RM,
+ TargetOptions Options,
+ Optional<Reloc::Model> RM,
CodeModel::Model CM,
CodeGenOpt::Level OptLevel)
- : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, RM, CM,
- OptLevel),
- TLOF(createTLOF(getTargetTriple())), Subtarget(TT, CPU, FS, *this),
- IntrinsicInfo() {
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
+ FS, Options, getEffectiveRelocModel(RM), CM, OptLevel),
+ TLOF(createTLOF(getTargetTriple())),
+ IntrinsicInfo() {
setRequiresStructuredCFG(true);
initAsmInfo();
}
AMDGPUTargetMachine::~AMDGPUTargetMachine() { }
+StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
+ Attribute GPUAttr = F.getFnAttribute("target-cpu");
+ return GPUAttr.hasAttribute(Attribute::None) ?
+ getTargetCPU() : GPUAttr.getValueAsString();
+}
+
+StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
+ Attribute FSAttr = F.getFnAttribute("target-features");
+
+ return FSAttr.hasAttribute(Attribute::None) ?
+ getTargetFeatureString() :
+ FSAttr.getValueAsString();
+}
+
//===----------------------------------------------------------------------===//
// R600 Target Machine (R600 -> Cayman)
//===----------------------------------------------------------------------===//
R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
- StringRef FS, StringRef CPU,
- TargetOptions Options, Reloc::Model RM,
+ StringRef CPU, StringRef FS,
+ TargetOptions Options,
+ Optional<Reloc::Model> RM,
CodeModel::Model CM, CodeGenOpt::Level OL)
- : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
+ : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
+
+const R600Subtarget *R600TargetMachine::getSubtargetImpl(
+ const Function &F) const {
+ StringRef GPU = getGPUName(F);
+ StringRef FS = getFeatureString(F);
+
+ SmallString<128> SubtargetKey(GPU);
+ SubtargetKey.append(FS);
+
+ auto &I = SubtargetMap[SubtargetKey];
+ if (!I) {
+ // This needs to be done before we create a new subtarget since any
+ // creation will depend on the TM and the code generation flags on the
+ // function that reside in TargetOptions.
+ resetTargetOptions(F);
+ I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
+ }
+
+ return I.get();
+}
//===----------------------------------------------------------------------===//
// GCN Target Machine (SI+)
//===----------------------------------------------------------------------===//
+#ifdef LLVM_BUILD_GLOBAL_ISEL
+namespace {
+struct SIGISelActualAccessor : public GISelAccessor {
+ std::unique_ptr<AMDGPUCallLowering> CallLoweringInfo;
+ const AMDGPUCallLowering *getCallLowering() const override {
+ return CallLoweringInfo.get();
+ }
+};
+} // End anonymous namespace.
+#endif
+
GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
- StringRef FS, StringRef CPU,
- TargetOptions Options, Reloc::Model RM,
+ StringRef CPU, StringRef FS,
+ TargetOptions Options,
+ Optional<Reloc::Model> RM,
CodeModel::Model CM, CodeGenOpt::Level OL)
- : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
+ : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
+
+const SISubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
+ StringRef GPU = getGPUName(F);
+ StringRef FS = getFeatureString(F);
+
+ SmallString<128> SubtargetKey(GPU);
+ SubtargetKey.append(FS);
+
+ auto &I = SubtargetMap[SubtargetKey];
+ if (!I) {
+ // This needs to be done before we create a new subtarget since any
+ // creation will depend on the TM and the code generation flags on the
+ // function that reside in TargetOptions.
+ resetTargetOptions(F);
+ I = llvm::make_unique<SISubtarget>(TargetTriple, GPU, FS, *this);
+
+#ifndef LLVM_BUILD_GLOBAL_ISEL
+ GISelAccessor *GISel = new GISelAccessor();
+#else
+ SIGISelActualAccessor *GISel = new SIGISelActualAccessor();
+ GISel->CallLoweringInfo.reset(
+ new AMDGPUCallLowering(*I->getTargetLowering()));
+#endif
+
+ I->setGISelAccessor(*GISel);
+ }
+
+ return I.get();
+}
//===----------------------------------------------------------------------===//
// AMDGPU Pass Setup
//===----------------------------------------------------------------------===//
namespace {
+
class AMDGPUPassConfig : public TargetPassConfig {
public:
AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
@@ -142,16 +272,8 @@ public:
return getTM<AMDGPUTargetMachine>();
}
- ScheduleDAGInstrs *
- createMachineScheduler(MachineSchedContext *C) const override {
- const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
- if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
- return createR600MachineScheduler(C);
- else if (ST.enableSIScheduler())
- return createSIMachineScheduler(C);
- return nullptr;
- }
-
+ void addEarlyCSEOrGVNPass();
+ void addStraightLineScalarOptimizationPasses();
void addIRPasses() override;
void addCodeGenPrepare() override;
bool addPreISel() override;
@@ -159,27 +281,44 @@ public:
bool addGCPasses() override;
};
-class R600PassConfig : public AMDGPUPassConfig {
+class R600PassConfig final : public AMDGPUPassConfig {
public:
R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
: AMDGPUPassConfig(TM, PM) { }
+ ScheduleDAGInstrs *createMachineScheduler(
+ MachineSchedContext *C) const override {
+ return createR600MachineScheduler(C);
+ }
+
bool addPreISel() override;
void addPreRegAlloc() override;
void addPreSched2() override;
void addPreEmitPass() override;
};
-class GCNPassConfig : public AMDGPUPassConfig {
+class GCNPassConfig final : public AMDGPUPassConfig {
public:
GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
: AMDGPUPassConfig(TM, PM) { }
+
+ GCNTargetMachine &getGCNTargetMachine() const {
+ return getTM<GCNTargetMachine>();
+ }
+
+ ScheduleDAGInstrs *
+ createMachineScheduler(MachineSchedContext *C) const override;
+
bool addPreISel() override;
+ void addMachineSSAOptimization() override;
bool addInstSelector() override;
+#ifdef LLVM_BUILD_GLOBAL_ISEL
+ bool addIRTranslator() override;
+ bool addRegBankSelect() override;
+#endif
void addFastRegAlloc(FunctionPass *RegAllocPass) override;
void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
void addPreRegAlloc() override;
- void addPostRegAlloc() override;
void addPreSched2() override;
void addPreEmitPass() override;
};
@@ -188,12 +327,39 @@ public:
TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
return TargetIRAnalysis([this](const Function &F) {
- return TargetTransformInfo(
- AMDGPUTTIImpl(this, F.getParent()->getDataLayout()));
+ return TargetTransformInfo(AMDGPUTTIImpl(this, F));
});
}
+void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
+ if (getOptLevel() == CodeGenOpt::Aggressive)
+ addPass(createGVNPass());
+ else
+ addPass(createEarlyCSEPass());
+}
+
+void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
+ addPass(createSeparateConstOffsetFromGEPPass());
+ addPass(createSpeculativeExecutionPass());
+ // ReassociateGEPs exposes more opportunites for SLSR. See
+ // the example in reassociate-geps-and-slsr.ll.
+ addPass(createStraightLineStrengthReducePass());
+ // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
+ // EarlyCSE can reuse.
+ addEarlyCSEOrGVNPass();
+ // Run NaryReassociate after EarlyCSE/GVN to be more effective.
+ addPass(createNaryReassociatePass());
+ // NaryReassociate on GEPs creates redundant common expressions, so run
+ // EarlyCSE after it.
+ addPass(createEarlyCSEPass());
+}
+
void AMDGPUPassConfig::addIRPasses() {
+ // There is no reason to run these.
+ disablePass(&StackMapLivenessID);
+ disablePass(&FuncletLayoutID);
+ disablePass(&PatchableFunctionID);
+
// Function calls are not supported, so make sure we inline everything.
addPass(createAMDGPUAlwaysInlinePass());
addPass(createAlwaysInlinerPass());
@@ -207,24 +373,43 @@ void AMDGPUPassConfig::addIRPasses() {
// Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
addPass(createAMDGPUOpenCLImageTypeLoweringPass());
+ const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
+ if (TM.getOptLevel() > CodeGenOpt::None) {
+ addPass(createAMDGPUPromoteAlloca(&TM));
+
+ if (EnableSROA)
+ addPass(createSROAPass());
+ }
+
+ addStraightLineScalarOptimizationPasses();
+
TargetPassConfig::addIRPasses();
+
+ // EarlyCSE is not always strong enough to clean up what LSR produces. For
+ // example, GVN can combine
+ //
+ // %0 = add %a, %b
+ // %1 = add %b, %a
+ //
+ // and
+ //
+ // %0 = shl nsw %a, 2
+ // %1 = shl %a, 2
+ //
+ // but EarlyCSE can do neither of them.
+ if (getOptLevel() != CodeGenOpt::None)
+ addEarlyCSEOrGVNPass();
}
void AMDGPUPassConfig::addCodeGenPrepare() {
- const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
- if (ST.isPromoteAllocaEnabled()) {
- addPass(createAMDGPUPromoteAlloca(ST));
- addPass(createSROAPass());
- }
TargetPassConfig::addCodeGenPrepare();
+
+ if (EnableLoadStoreVectorizer)
+ addPass(createLoadStoreVectorizerPass());
}
-bool
-AMDGPUPassConfig::addPreISel() {
- const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
+bool AMDGPUPassConfig::addPreISel() {
addPass(createFlattenCFGPass());
- if (ST.IsIRStructurizerEnabled())
- addPass(createStructurizeCFGPass());
return false;
}
@@ -244,7 +429,9 @@ bool AMDGPUPassConfig::addGCPasses() {
bool R600PassConfig::addPreISel() {
AMDGPUPassConfig::addPreISel();
- addPass(createR600TextureIntrinsicsReplacer());
+
+ if (EnableR600StructurizeCFG)
+ addPass(createStructurizeCFGPass());
return false;
}
@@ -253,9 +440,8 @@ void R600PassConfig::addPreRegAlloc() {
}
void R600PassConfig::addPreSched2() {
- const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
addPass(createR600EmitClauseMarkers(), false);
- if (ST.isIfCvtEnabled())
+ if (EnableR600IfConvert)
addPass(&IfConverterID, false);
addPass(createR600ClauseMergePass(*TM), false);
}
@@ -276,32 +462,62 @@ TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
// GCN Pass Setup
//===----------------------------------------------------------------------===//
+ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
+ MachineSchedContext *C) const {
+ const SISubtarget &ST = C->MF->getSubtarget<SISubtarget>();
+ if (ST.enableSIScheduler())
+ return createSIMachineScheduler(C);
+ return nullptr;
+}
+
bool GCNPassConfig::addPreISel() {
AMDGPUPassConfig::addPreISel();
// FIXME: We need to run a pass to propagate the attributes when calls are
// supported.
addPass(&AMDGPUAnnotateKernelFeaturesID);
-
+ addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
addPass(createSinkingPass());
addPass(createSITypeRewriter());
- addPass(createSIAnnotateControlFlowPass());
addPass(createAMDGPUAnnotateUniformValues());
+ addPass(createSIAnnotateControlFlowPass());
return false;
}
+void GCNPassConfig::addMachineSSAOptimization() {
+ TargetPassConfig::addMachineSSAOptimization();
+
+ // We want to fold operands after PeepholeOptimizer has run (or as part of
+ // it), because it will eliminate extra copies making it easier to fold the
+ // real source operand. We want to eliminate dead instructions after, so that
+ // we see fewer uses of the copies. We then need to clean up the dead
+ // instructions leftover after the operands are folded as well.
+ //
+ // XXX - Can we get away without running DeadMachineInstructionElim again?
+ addPass(&SIFoldOperandsID);
+ addPass(&DeadMachineInstructionElimID);
+}
+
bool GCNPassConfig::addInstSelector() {
AMDGPUPassConfig::addInstSelector();
addPass(createSILowerI1CopiesPass());
addPass(&SIFixSGPRCopiesID);
- addPass(createSIFoldOperandsPass());
return false;
}
-void GCNPassConfig::addPreRegAlloc() {
- const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
+#ifdef LLVM_BUILD_GLOBAL_ISEL
+bool GCNPassConfig::addIRTranslator() {
+ addPass(new IRTranslator());
+ return false;
+}
+bool GCNPassConfig::addRegBankSelect() {
+ return false;
+}
+#endif
+
+void GCNPassConfig::addPreRegAlloc() {
// This needs to be run directly before register allocation because
// earlier passes might recompute live intervals.
// TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
@@ -309,42 +525,48 @@ void GCNPassConfig::addPreRegAlloc() {
insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
}
- if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
+ if (getOptLevel() > CodeGenOpt::None) {
// Don't do this with no optimizations since it throws away debug info by
// merging nonadjacent loads.
// This should be run after scheduling, but before register allocation. It
// also need extra copies to the address operand to be eliminated.
+
+ // FIXME: Move pre-RA and remove extra reg coalescer run.
insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
insertPass(&MachineSchedulerID, &RegisterCoalescerID);
}
- addPass(createSIShrinkInstructionsPass(), false);
+
+ addPass(createSIShrinkInstructionsPass());
+ addPass(createSIWholeQuadModePass());
}
void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
- addPass(&SIFixSGPRLiveRangesID);
TargetPassConfig::addFastRegAlloc(RegAllocPass);
}
void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
- // We want to run this after LiveVariables is computed to avoid computing them
- // twice.
- // FIXME: We shouldn't disable the verifier here. r249087 introduced a failure
- // that needs to be fixed.
- insertPass(&LiveVariablesID, &SIFixSGPRLiveRangesID, /*VerifyAfter=*/false);
TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
}
-void GCNPassConfig::addPostRegAlloc() {
- addPass(createSIShrinkInstructionsPass(), false);
-}
-
void GCNPassConfig::addPreSched2() {
}
void GCNPassConfig::addPreEmitPass() {
- addPass(createSIInsertWaits(*TM), false);
- addPass(createSILowerControlFlowPass(*TM), false);
+ // The hazard recognizer that runs as part of the post-ra scheduler does not
+ // guarantee to be able handle all hazards correctly. This is because if there
+ // are multiple scheduling regions in a basic block, the regions are scheduled
+ // bottom up, so when we begin to schedule a region we don't know what
+ // instructions were emitted directly before it.
+ //
+ // Here we add a stand-alone hazard recognizer pass which can handle all
+ // cases.
+ addPass(&PostRAHazardRecognizerID);
+
+ addPass(createSIInsertWaitsPass());
+ addPass(createSIShrinkInstructionsPass());
+ addPass(createSILowerControlFlowPass());
+ addPass(createSIDebuggerInsertNopsPass());
}
TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
diff --git a/lib/Target/AMDGPU/AMDGPUTargetMachine.h b/lib/Target/AMDGPU/AMDGPUTargetMachine.h
index 236e3f824030..b0eb3a9a15f7 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetMachine.h
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.h
@@ -12,15 +12,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUTARGETMACHINE_H
-#define LLVM_LIB_TARGET_R600_AMDGPUTARGETMACHINE_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETMACHINE_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETMACHINE_H
-#include "AMDGPUFrameLowering.h"
-#include "AMDGPUInstrInfo.h"
#include "AMDGPUIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
-#include "R600ISelLowering.h"
-#include "llvm/IR/DataLayout.h"
namespace llvm {
@@ -29,23 +25,23 @@ namespace llvm {
//===----------------------------------------------------------------------===//
class AMDGPUTargetMachine : public LLVMTargetMachine {
-private:
-
protected:
std::unique_ptr<TargetLoweringObjectFile> TLOF;
- AMDGPUSubtarget Subtarget;
AMDGPUIntrinsicInfo IntrinsicInfo;
+ StringRef getGPUName(const Function &F) const;
+ StringRef getFeatureString(const Function &F) const;
+
public:
- AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef FS,
- StringRef CPU, TargetOptions Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, TargetOptions Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
~AMDGPUTargetMachine();
- const AMDGPUSubtarget *getSubtargetImpl() const { return &Subtarget; }
- const AMDGPUSubtarget *getSubtargetImpl(const Function &) const override {
- return &Subtarget;
- }
+ const AMDGPUSubtarget *getSubtargetImpl() const;
+ const AMDGPUSubtarget *getSubtargetImpl(const Function &) const override;
+
const AMDGPUIntrinsicInfo *getIntrinsicInfo() const override {
return &IntrinsicInfo;
}
@@ -60,30 +56,47 @@ public:
// R600 Target Machine (R600 -> Cayman)
//===----------------------------------------------------------------------===//
-class R600TargetMachine : public AMDGPUTargetMachine {
+class R600TargetMachine final : public AMDGPUTargetMachine {
+private:
+ mutable StringMap<std::unique_ptr<R600Subtarget>> SubtargetMap;
public:
- R600TargetMachine(const Target &T, const Triple &TT, StringRef FS,
- StringRef CPU, TargetOptions Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ R600TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, TargetOptions Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+ const R600Subtarget *getSubtargetImpl(const Function &) const override;
};
//===----------------------------------------------------------------------===//
// GCN Target Machine (SI+)
//===----------------------------------------------------------------------===//
-class GCNTargetMachine : public AMDGPUTargetMachine {
+class GCNTargetMachine final : public AMDGPUTargetMachine {
+private:
+ mutable StringMap<std::unique_ptr<SISubtarget>> SubtargetMap;
public:
- GCNTargetMachine(const Target &T, const Triple &TT, StringRef FS,
- StringRef CPU, TargetOptions Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, TargetOptions Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+ const SISubtarget *getSubtargetImpl(const Function &) const override;
};
+inline const AMDGPUSubtarget *AMDGPUTargetMachine::getSubtargetImpl(
+ const Function &F) const {
+ if (getTargetTriple().getArch() == Triple::amdgcn)
+ return static_cast<const GCNTargetMachine *>(this)->getSubtargetImpl(F);
+ return static_cast<const R600TargetMachine *>(this)->getSubtargetImpl(F);
+}
+
} // End namespace llvm
#endif
diff --git a/lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp b/lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp
index e050f21091ba..03d1e2c764de 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetObjectFile.cpp
@@ -29,59 +29,3 @@ MCSection *AMDGPUTargetObjectFile::SelectSectionForGlobal(const GlobalValue *GV,
return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang, TM);
}
-
-//===----------------------------------------------------------------------===//
-// HSA Object File
-//===----------------------------------------------------------------------===//
-
-
-void AMDGPUHSATargetObjectFile::Initialize(MCContext &Ctx,
- const TargetMachine &TM){
- TargetLoweringObjectFileELF::Initialize(Ctx, TM);
- InitializeELF(TM.Options.UseInitArray);
-
- TextSection = AMDGPU::getHSATextSection(Ctx);
-
- DataGlobalAgentSection = AMDGPU::getHSADataGlobalAgentSection(Ctx);
- DataGlobalProgramSection = AMDGPU::getHSADataGlobalProgramSection(Ctx);
-
- RodataReadonlyAgentSection = AMDGPU::getHSARodataReadonlyAgentSection(Ctx);
-}
-
-bool AMDGPUHSATargetObjectFile::isAgentAllocationSection(
- const char *SectionName) const {
- return cast<MCSectionELF>(DataGlobalAgentSection)
- ->getSectionName()
- .equals(SectionName);
-}
-
-bool AMDGPUHSATargetObjectFile::isAgentAllocation(const GlobalValue *GV) const {
- // Read-only segments can only have agent allocation.
- return AMDGPU::isReadOnlySegment(GV) ||
- (AMDGPU::isGlobalSegment(GV) && GV->hasSection() &&
- isAgentAllocationSection(GV->getSection()));
-}
-
-bool AMDGPUHSATargetObjectFile::isProgramAllocation(
- const GlobalValue *GV) const {
- // The default for global segments is program allocation.
- return AMDGPU::isGlobalSegment(GV) && !isAgentAllocation(GV);
-}
-
-MCSection *AMDGPUHSATargetObjectFile::SelectSectionForGlobal(
- const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
- const TargetMachine &TM) const {
- if (Kind.isText() && !GV->hasComdat())
- return getTextSection();
-
- if (AMDGPU::isGlobalSegment(GV)) {
- if (isAgentAllocation(GV))
- return DataGlobalAgentSection;
-
- if (isProgramAllocation(GV))
- return DataGlobalProgramSection;
- }
-
- return AMDGPUTargetObjectFile::SelectSectionForGlobal(GV, Kind, Mang, TM);
-}
diff --git a/lib/Target/AMDGPU/AMDGPUTargetObjectFile.h b/lib/Target/AMDGPU/AMDGPUTargetObjectFile.h
index 921341ebb897..f530e0952a74 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetObjectFile.h
+++ b/lib/Target/AMDGPU/AMDGPUTargetObjectFile.h
@@ -28,24 +28,6 @@ class AMDGPUTargetObjectFile : public TargetLoweringObjectFileELF {
const TargetMachine &TM) const override;
};
-class AMDGPUHSATargetObjectFile final : public AMDGPUTargetObjectFile {
-private:
- MCSection *DataGlobalAgentSection;
- MCSection *DataGlobalProgramSection;
- MCSection *RodataReadonlyAgentSection;
-
- bool isAgentAllocationSection(const char *SectionName) const;
- bool isAgentAllocation(const GlobalValue *GV) const;
- bool isProgramAllocation(const GlobalValue *GV) const;
-
-public:
- void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
-
- MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
- const TargetMachine &TM) const override;
-};
-
} // end namespace llvm
#endif
diff --git a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 54a003d6a9cf..3d630fe3ea9d 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -21,6 +21,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
#include "llvm/Target/TargetLowering.h"
@@ -28,6 +29,7 @@ using namespace llvm;
#define DEBUG_TYPE "AMDGPUtti"
+
void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
TTI::UnrollingPreferences &UP) {
UP.Threshold = 300; // Twice the default.
@@ -78,11 +80,127 @@ unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool Vector) {
return Vector ? 0 : 32;
}
+unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) {
+ switch (AddrSpace) {
+ case AMDGPUAS::GLOBAL_ADDRESS:
+ case AMDGPUAS::CONSTANT_ADDRESS:
+ case AMDGPUAS::FLAT_ADDRESS:
+ return 128;
+ case AMDGPUAS::LOCAL_ADDRESS:
+ case AMDGPUAS::REGION_ADDRESS:
+ return 64;
+ case AMDGPUAS::PRIVATE_ADDRESS:
+ return 8 * ST->getMaxPrivateElementSize();
+ default:
+ if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS &&
+ (AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
+ AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
+ (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
+ AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
+ return 128;
+ llvm_unreachable("unhandled address space");
+ }
+}
+
unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
// Semi-arbitrary large amount.
return 64;
}
+int AMDGPUTTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
+ TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo) {
+
+ EVT OrigTy = TLI->getValueType(DL, Ty);
+ if (!OrigTy.isSimple()) {
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo);
+ }
+
+ // Legalize the type.
+ std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+
+ // Because we don't have any legal vector operations, but the legal types, we
+ // need to account for split vectors.
+ unsigned NElts = LT.second.isVector() ?
+ LT.second.getVectorNumElements() : 1;
+
+ MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
+
+ switch (ISD) {
+ case ISD::SHL:
+ case ISD::SRL:
+ case ISD::SRA: {
+ if (SLT == MVT::i64)
+ return get64BitInstrCost() * LT.first * NElts;
+
+ // i32
+ return getFullRateInstrCost() * LT.first * NElts;
+ }
+ case ISD::ADD:
+ case ISD::SUB:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR: {
+ if (SLT == MVT::i64){
+ // and, or and xor are typically split into 2 VALU instructions.
+ return 2 * getFullRateInstrCost() * LT.first * NElts;
+ }
+
+ return LT.first * NElts * getFullRateInstrCost();
+ }
+ case ISD::MUL: {
+ const int QuarterRateCost = getQuarterRateInstrCost();
+ if (SLT == MVT::i64) {
+ const int FullRateCost = getFullRateInstrCost();
+ return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
+ }
+
+ // i32
+ return QuarterRateCost * NElts * LT.first;
+ }
+ case ISD::FADD:
+ case ISD::FSUB:
+ case ISD::FMUL:
+ if (SLT == MVT::f64)
+ return LT.first * NElts * get64BitInstrCost();
+
+ if (SLT == MVT::f32 || SLT == MVT::f16)
+ return LT.first * NElts * getFullRateInstrCost();
+ break;
+
+ case ISD::FDIV:
+ case ISD::FREM:
+ // FIXME: frem should be handled separately. The fdiv in it is most of it,
+ // but the current lowering is also not entirely correct.
+ if (SLT == MVT::f64) {
+ int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
+
+ // Add cost of workaround.
+ if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
+ Cost += 3 * getFullRateInstrCost();
+
+ return LT.first * Cost * NElts;
+ }
+
+ // Assuming no fp32 denormals lowering.
+ if (SLT == MVT::f32 || SLT == MVT::f16) {
+ assert(!ST->hasFP32Denormals() && "will change when supported");
+ int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
+ return LT.first * NElts * Cost;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo);
+}
+
unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
// XXX - For some reason this isn't called for switch.
switch (Opcode) {
@@ -98,6 +216,11 @@ int AMDGPUTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
unsigned Index) {
switch (Opcode) {
case Instruction::ExtractElement:
+ case Instruction::InsertElement:
+ // Extracts are just reads of a subregister, so are free. Inserts are
+ // considered free because we don't want to have any cost for scalarizing
+ // operations, and we don't have to copy into a different register class.
+
// Dynamic indexing isn't free and is best avoided.
return Index == ~0u ? 2 : 0;
default:
@@ -115,6 +238,9 @@ static bool isIntrinsicSourceOfDivergence(const TargetIntrinsicInfo *TII,
// IntrinsicsAMDGPU.td
break;
+ case Intrinsic::amdgcn_workitem_id_x:
+ case Intrinsic::amdgcn_workitem_id_y:
+ case Intrinsic::amdgcn_workitem_id_z:
case Intrinsic::amdgcn_interp_p1:
case Intrinsic::amdgcn_interp_p2:
case Intrinsic::amdgcn_mbcnt_hi:
@@ -122,6 +248,31 @@ static bool isIntrinsicSourceOfDivergence(const TargetIntrinsicInfo *TII,
case Intrinsic::r600_read_tidig_x:
case Intrinsic::r600_read_tidig_y:
case Intrinsic::r600_read_tidig_z:
+ case Intrinsic::amdgcn_image_atomic_swap:
+ case Intrinsic::amdgcn_image_atomic_add:
+ case Intrinsic::amdgcn_image_atomic_sub:
+ case Intrinsic::amdgcn_image_atomic_smin:
+ case Intrinsic::amdgcn_image_atomic_umin:
+ case Intrinsic::amdgcn_image_atomic_smax:
+ case Intrinsic::amdgcn_image_atomic_umax:
+ case Intrinsic::amdgcn_image_atomic_and:
+ case Intrinsic::amdgcn_image_atomic_or:
+ case Intrinsic::amdgcn_image_atomic_xor:
+ case Intrinsic::amdgcn_image_atomic_inc:
+ case Intrinsic::amdgcn_image_atomic_dec:
+ case Intrinsic::amdgcn_image_atomic_cmpswap:
+ case Intrinsic::amdgcn_buffer_atomic_swap:
+ case Intrinsic::amdgcn_buffer_atomic_add:
+ case Intrinsic::amdgcn_buffer_atomic_sub:
+ case Intrinsic::amdgcn_buffer_atomic_smin:
+ case Intrinsic::amdgcn_buffer_atomic_umin:
+ case Intrinsic::amdgcn_buffer_atomic_smax:
+ case Intrinsic::amdgcn_buffer_atomic_umax:
+ case Intrinsic::amdgcn_buffer_atomic_and:
+ case Intrinsic::amdgcn_buffer_atomic_or:
+ case Intrinsic::amdgcn_buffer_atomic_xor:
+ case Intrinsic::amdgcn_buffer_atomic_cmpswap:
+ case Intrinsic::amdgcn_ps_live:
return true;
}
@@ -129,18 +280,17 @@ static bool isIntrinsicSourceOfDivergence(const TargetIntrinsicInfo *TII,
switch (TII->lookupName((const char *)Name.bytes_begin(), Name.size())) {
default:
return false;
- case AMDGPUIntrinsic::SI_tid:
case AMDGPUIntrinsic::SI_fs_interp:
+ case AMDGPUIntrinsic::SI_fs_constant:
return true;
}
}
static bool isArgPassedInSGPR(const Argument *A) {
const Function *F = A->getParent();
- unsigned ShaderType = AMDGPU::getShaderType(*F);
// Arguments to compute shaders are never a source of divergence.
- if (ShaderType == ShaderType::COMPUTE)
+ if (!AMDGPU::isShader(F->getCallingConv()))
return true;
// For non-compute shaders, SGPR inputs are marked with either inreg or byval.
@@ -169,6 +319,13 @@ bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
if (const LoadInst *Load = dyn_cast<LoadInst>(V))
return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
+ // Atomics are divergent because they are executed sequentially: when an
+ // atomic operation refers to the same address in each thread, then each
+ // thread after the first sees the value written by the previous thread as
+ // original value.
+ if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
+ return true;
+
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
const TargetMachine &TM = getTLI()->getTargetMachine();
return isIntrinsicSourceOfDivergence(TM.getIntrinsicInfo(), Intrinsic);
diff --git a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index 976afb03443b..a82a07458086 100644
--- a/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -14,18 +14,18 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUTARGETTRANSFORMINFO_H
-#define LLVM_LIB_TARGET_R600_AMDGPUTARGETTRANSFORMINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETTRANSFORMINFO_H
#include "AMDGPU.h"
#include "AMDGPUTargetMachine.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
-#include "llvm/Target/TargetLowering.h"
namespace llvm {
+class AMDGPUTargetLowering;
-class AMDGPUTTIImpl : public BasicTTIImplBase<AMDGPUTTIImpl> {
+class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> {
typedef BasicTTIImplBase<AMDGPUTTIImpl> BaseT;
typedef TargetTransformInfo TTI;
friend BaseT;
@@ -36,10 +36,33 @@ class AMDGPUTTIImpl : public BasicTTIImplBase<AMDGPUTTIImpl> {
const AMDGPUSubtarget *getST() const { return ST; }
const AMDGPUTargetLowering *getTLI() const { return TLI; }
+
+ static inline int getFullRateInstrCost() {
+ return TargetTransformInfo::TCC_Basic;
+ }
+
+ static inline int getHalfRateInstrCost() {
+ return 2 * TargetTransformInfo::TCC_Basic;
+ }
+
+ // TODO: The size is usually 8 bytes, but takes 4x as many cycles. Maybe
+ // should be 2 or 4.
+ static inline int getQuarterRateInstrCost() {
+ return 3 * TargetTransformInfo::TCC_Basic;
+ }
+
+ // On some parts, normal fp64 operations are half rate, and others
+ // quarter. This also applies to some integer operations.
+ inline int get64BitInstrCost() const {
+ return ST->hasHalfRate64Ops() ?
+ getHalfRateInstrCost() : getQuarterRateInstrCost();
+ }
+
public:
- explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const DataLayout &DL)
- : BaseT(TM, DL), ST(TM->getSubtargetImpl()),
- TLI(ST->getTargetLowering()) {}
+ explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
+ : BaseT(TM, F.getParent()->getDataLayout()),
+ ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
// Provide value semantics. MSVC requires that we spell all of these out.
AMDGPUTTIImpl(const AMDGPUTTIImpl &Arg)
@@ -54,17 +77,27 @@ public:
TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
- return ST->hasBCNT(TyWidth) ? TTI::PSK_FastHardware : TTI::PSK_Software;
+ return TTI::PSK_FastHardware;
}
unsigned getNumberOfRegisters(bool Vector);
unsigned getRegisterBitWidth(bool Vector);
+ unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace);
unsigned getMaxInterleaveFactor(unsigned VF);
+ int getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
+
unsigned getCFInstrCost(unsigned Opcode);
int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index);
bool isSourceOfDivergence(const Value *V) const;
+
+ unsigned getVectorSplitCost() { return 0; }
};
} // end namespace llvm
diff --git a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
index 917efd149e00..21de76396b16 100644
--- a/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
@@ -50,8 +50,6 @@ STATISTIC(numSerialPatternMatch, "CFGStructurizer number of serial pattern "
"matched");
STATISTIC(numIfPatternMatch, "CFGStructurizer number of if pattern "
"matched");
-STATISTIC(numLoopcontPatternMatch, "CFGStructurizer number of loop-continue "
- "pattern matched");
STATISTIC(numClonedBlock, "CFGStructurizer cloned blocks");
STATISTIC(numClonedInstr, "CFGStructurizer cloned instructions");
@@ -162,7 +160,7 @@ public:
bool prepare();
bool runOnMachineFunction(MachineFunction &MF) override {
- TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ TII = MF.getSubtarget<R600Subtarget>().getInstrInfo();
TRI = &TII->getRegisterInfo();
DEBUG(MF.dump(););
OrderedBlks.clear();
@@ -213,7 +211,6 @@ protected:
int getSCCNum(MachineBasicBlock *MBB) const;
MachineBasicBlock *getLoopLandInfo(MachineLoop *LoopRep) const;
bool hasBackEdge(MachineBasicBlock *MBB) const;
- static unsigned getLoopDepth(MachineLoop *LoopRep);
bool isRetiredBlock(MachineBasicBlock *MBB) const;
bool isActiveLoophead(MachineBasicBlock *MBB) const;
PathToKind singlePathTo(MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
@@ -229,16 +226,15 @@ protected:
// Function originally from CFGStructTraits
void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
- DebugLoc DL = DebugLoc());
+ const DebugLoc &DL = DebugLoc());
MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
- DebugLoc DL = DebugLoc());
+ const DebugLoc &DL = DebugLoc());
MachineInstr *insertInstrBefore(MachineBasicBlock::iterator I, int NewOpcode);
void insertCondBranchBefore(MachineBasicBlock::iterator I, int NewOpcode,
- DebugLoc DL);
+ const DebugLoc &DL);
void insertCondBranchBefore(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
- DebugLoc DL);
- void insertCondBranchEnd(MachineBasicBlock *MBB, int NewOpcode, int RegNum);
+ MachineBasicBlock::iterator I, int NewOpcode,
+ int RegNum, const DebugLoc &DL);
static int getBranchNzeroOpcode(int OldOpcode);
static int getBranchZeroOpcode(int OldOpcode);
static int getContinueNzeroOpcode(int OldOpcode);
@@ -257,7 +253,6 @@ protected:
/// instruction. Such move instruction "belong to" the loop backward-edge.
MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *MBB);
static MachineInstr *getReturnInstr(MachineBasicBlock *MBB);
- static MachineInstr *getContinueInstr(MachineBasicBlock *MBB);
static bool isReturnBlock(MachineBasicBlock *MBB);
static void cloneSuccessorList(MachineBasicBlock *DstMBB,
MachineBasicBlock *SrcMBB) ;
@@ -276,11 +271,7 @@ protected:
int ifPatternMatch(MachineBasicBlock *MBB);
int loopendPatternMatch();
int mergeLoop(MachineLoop *LoopRep);
- int loopcontPatternMatch(MachineLoop *LoopRep, MachineBasicBlock *LoopHeader);
- void handleLoopcontBlock(MachineBasicBlock *ContingMBB,
- MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
- MachineLoop *ContLoop);
/// return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in
/// the same loop with LoopLandInfo without explicitly keeping track of
/// loopContBlks and loopBreakBlks, this is a method to get the information.
@@ -337,13 +328,7 @@ protected:
MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I);
void recordSccnum(MachineBasicBlock *MBB, int SCCNum);
void retireBlock(MachineBasicBlock *MBB);
- void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = nullptr);
- MachineBasicBlock *findNearestCommonPostDom(std::set<MachineBasicBlock *>&);
- /// This is work around solution for findNearestCommonDominator not available
- /// to post dom a proper fix should go to Dominators.h.
- MachineBasicBlock *findNearestCommonPostDom(MachineBasicBlock *MBB1,
- MachineBasicBlock *MBB2);
private:
MBBInfoMap BlockInfoMap;
@@ -376,10 +361,6 @@ bool AMDGPUCFGStructurizer::hasBackEdge(MachineBasicBlock *MBB) const {
return MBB->isSuccessor(LoopHeader);
}
-unsigned AMDGPUCFGStructurizer::getLoopDepth(MachineLoop *LoopRep) {
- return LoopRep ? LoopRep->getLoopDepth() : 0;
-}
-
bool AMDGPUCFGStructurizer::isRetiredBlock(MachineBasicBlock *MBB) const {
MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
if (It == BlockInfoMap.end())
@@ -442,7 +423,8 @@ bool AMDGPUCFGStructurizer::needMigrateBlock(MachineBasicBlock *MBB) const {
void AMDGPUCFGStructurizer::reversePredicateSetter(
MachineBasicBlock::iterator I) {
- while (I--) {
+ assert(static_cast<MachineInstr *>(I) && "Expected valid iterator");
+ for (;; --I) {
if (I->getOpcode() == AMDGPU::PRED_X) {
switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
case OPCODE_IS_ZERO_INT:
@@ -469,16 +451,17 @@ void AMDGPUCFGStructurizer::reversePredicateSetter(
}
void AMDGPUCFGStructurizer::insertInstrEnd(MachineBasicBlock *MBB,
- int NewOpcode, DebugLoc DL) {
- MachineInstr *MI = MBB->getParent()
- ->CreateMachineInstr(TII->get(NewOpcode), DL);
+ int NewOpcode, const DebugLoc &DL) {
+ MachineInstr *MI =
+ MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
MBB->push_back(MI);
//assume the instruction doesn't take any reg operand ...
SHOWNEWINSTR(MI);
}
MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(MachineBasicBlock *MBB,
- int NewOpcode, DebugLoc DL) {
+ int NewOpcode,
+ const DebugLoc &DL) {
MachineInstr *MI =
MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
if (MBB->begin() != MBB->end())
@@ -502,7 +485,7 @@ MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(
}
void AMDGPUCFGStructurizer::insertCondBranchBefore(
- MachineBasicBlock::iterator I, int NewOpcode, DebugLoc DL) {
+ MachineBasicBlock::iterator I, int NewOpcode, const DebugLoc &DL) {
MachineInstr *OldMI = &(*I);
MachineBasicBlock *MBB = OldMI->getParent();
MachineFunction *MF = MBB->getParent();
@@ -514,9 +497,9 @@ void AMDGPUCFGStructurizer::insertCondBranchBefore(
//erase later oldInstr->eraseFromParent();
}
-void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk,
- MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
- DebugLoc DL) {
+void AMDGPUCFGStructurizer::insertCondBranchBefore(
+ MachineBasicBlock *blk, MachineBasicBlock::iterator I, int NewOpcode,
+ int RegNum, const DebugLoc &DL) {
MachineFunction *MF = blk->getParent();
MachineInstr *NewInstr = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
//insert before
@@ -525,16 +508,6 @@ void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk,
SHOWNEWINSTR(NewInstr);
}
-void AMDGPUCFGStructurizer::insertCondBranchEnd(MachineBasicBlock *MBB,
- int NewOpcode, int RegNum) {
- MachineFunction *MF = MBB->getParent();
- MachineInstr *NewInstr =
- MF->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
- MBB->push_back(NewInstr);
- MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
- SHOWNEWINSTR(NewInstr);
-}
-
int AMDGPUCFGStructurizer::getBranchNzeroOpcode(int OldOpcode) {
switch(OldOpcode) {
case AMDGPU::JUMP_COND:
@@ -664,16 +637,6 @@ MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) {
return nullptr;
}
-MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) {
- MachineBasicBlock::reverse_iterator It = MBB->rbegin();
- if (It != MBB->rend()) {
- MachineInstr *MI = &(*It);
- if (MI->getOpcode() == AMDGPU::CONTINUE)
- return MI;
- }
- return nullptr;
-}
-
bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) {
MachineInstr *MI = getReturnInstr(MBB);
bool IsReturn = (MBB->succ_size() == 0);
@@ -697,11 +660,8 @@ MachineBasicBlock *AMDGPUCFGStructurizer::clone(MachineBasicBlock *MBB) {
MachineFunction *Func = MBB->getParent();
MachineBasicBlock *NewMBB = Func->CreateMachineBasicBlock();
Func->push_back(NewMBB); //insert to function
- for (MachineBasicBlock::iterator It = MBB->begin(), E = MBB->end();
- It != E; ++It) {
- MachineInstr *MI = Func->CloneMachineInstr(It);
- NewMBB->push_back(MI);
- }
+ for (const MachineInstr &It : *MBB)
+ NewMBB->push_back(Func->CloneMachineInstr(&It));
return NewMBB;
}
@@ -727,7 +687,7 @@ void AMDGPUCFGStructurizer::wrapup(MachineBasicBlock *MBB) {
while (It != E) {
if (Pre->getOpcode() == AMDGPU::CONTINUE
&& It->getOpcode() == AMDGPU::ENDLOOP)
- ContInstr.push_back(Pre);
+ ContInstr.push_back(&*Pre);
Pre = It;
++It;
}
@@ -923,7 +883,7 @@ bool AMDGPUCFGStructurizer::run() {
if (!Finish) {
DEBUG(FuncRep->viewCFG());
- llvm_unreachable("IRREDUCIBLE_CFG");
+ report_fatal_error("IRREDUCIBLE_CFG");
}
return true;
@@ -1145,34 +1105,6 @@ int AMDGPUCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
return 1;
}
-int AMDGPUCFGStructurizer::loopcontPatternMatch(MachineLoop *LoopRep,
- MachineBasicBlock *LoopHeader) {
- int NumCont = 0;
- SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> ContMBB;
- typedef GraphTraits<Inverse<MachineBasicBlock *> > GTIM;
- GTIM::ChildIteratorType It = GTIM::child_begin(LoopHeader),
- E = GTIM::child_end(LoopHeader);
- for (; It != E; ++It) {
- MachineBasicBlock *MBB = *It;
- if (LoopRep->contains(MBB)) {
- handleLoopcontBlock(MBB, MLI->getLoopFor(MBB),
- LoopHeader, LoopRep);
- ContMBB.push_back(MBB);
- ++NumCont;
- }
- }
-
- for (SmallVectorImpl<MachineBasicBlock *>::iterator It = ContMBB.begin(),
- E = ContMBB.end(); It != E; ++It) {
- (*It)->removeSuccessor(LoopHeader, true);
- }
-
- numLoopcontPatternMatch += NumCont;
-
- return NumCont;
-}
-
-
bool AMDGPUCFGStructurizer::isSameloopDetachedContbreak(
MachineBasicBlock *Src1MBB, MachineBasicBlock *Src2MBB) {
if (Src1MBB->succ_size() == 0) {
@@ -1413,10 +1345,10 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
MachineBasicBlock::iterator I = insertInstrBefore(LandBlk, AMDGPU::ENDIF);
if (LandBlkHasOtherPred) {
- llvm_unreachable("Extra register needed to handle CFG");
+ report_fatal_error("Extra register needed to handle CFG");
unsigned CmpResReg =
HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
- llvm_unreachable("Extra compare instruction needed to handle CFG");
+ report_fatal_error("Extra compare instruction needed to handle CFG");
insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET,
CmpResReg, DebugLoc());
}
@@ -1433,7 +1365,7 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
// need to uncondionally insert the assignment to ensure a path from its
// predecessor rather than headBlk has valid value in initReg if
// (initVal != 1).
- llvm_unreachable("Extra register needed to handle CFG");
+ report_fatal_error("Extra register needed to handle CFG");
}
insertInstrBefore(I, AMDGPU::ELSE);
@@ -1442,7 +1374,7 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
// need to uncondionally insert the assignment to ensure a path from its
// predecessor rather than headBlk has valid value in initReg if
// (initVal != 0)
- llvm_unreachable("Extra register needed to handle CFG");
+ report_fatal_error("Extra register needed to handle CFG");
}
if (LandBlkHasOtherPred) {
@@ -1454,7 +1386,7 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
PE = LandBlk->pred_end(); PI != PE; ++PI) {
MachineBasicBlock *MBB = *PI;
if (MBB != TrueMBB && MBB != FalseMBB)
- llvm_unreachable("Extra register needed to handle CFG");
+ report_fatal_error("Extra register needed to handle CFG");
}
}
DEBUG(
@@ -1468,17 +1400,6 @@ int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
return NumNewBlk;
}
-void AMDGPUCFGStructurizer::handleLoopcontBlock(MachineBasicBlock *ContingMBB,
- MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
- MachineLoop *ContLoop) {
- DEBUG(dbgs() << "loopcontPattern cont = BB" << ContingMBB->getNumber()
- << " header = BB" << ContMBB->getNumber() << "\n";
- dbgs() << "Trying to continue loop-depth = "
- << getLoopDepth(ContLoop)
- << " from loop-depth = " << getLoopDepth(ContingLoop) << "\n";);
- settleLoopcontBlock(ContingMBB, ContMBB);
-}
-
void AMDGPUCFGStructurizer::mergeSerialBlock(MachineBasicBlock *DstMBB,
MachineBasicBlock *SrcMBB) {
DEBUG(
@@ -1809,76 +1730,6 @@ void AMDGPUCFGStructurizer::retireBlock(MachineBasicBlock *MBB) {
&& "can't retire block yet");
}
-void AMDGPUCFGStructurizer::setLoopLandBlock(MachineLoop *loopRep,
- MachineBasicBlock *MBB) {
- MachineBasicBlock *&TheEntry = LLInfoMap[loopRep];
- if (!MBB) {
- MBB = FuncRep->CreateMachineBasicBlock();
- FuncRep->push_back(MBB); //insert to function
- SHOWNEWBLK(MBB, "DummyLandingBlock for loop without break: ");
- }
- TheEntry = MBB;
- DEBUG(
- dbgs() << "setLoopLandBlock loop-header = BB"
- << loopRep->getHeader()->getNumber()
- << " landing-block = BB" << MBB->getNumber() << "\n";
- );
-}
-
-MachineBasicBlock *
-AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1,
- MachineBasicBlock *MBB2) {
-
- if (PDT->dominates(MBB1, MBB2))
- return MBB1;
- if (PDT->dominates(MBB2, MBB1))
- return MBB2;
-
- MachineDomTreeNode *Node1 = PDT->getNode(MBB1);
- MachineDomTreeNode *Node2 = PDT->getNode(MBB2);
-
- // Handle newly cloned node.
- if (!Node1 && MBB1->succ_size() == 1)
- return findNearestCommonPostDom(*MBB1->succ_begin(), MBB2);
- if (!Node2 && MBB2->succ_size() == 1)
- return findNearestCommonPostDom(MBB1, *MBB2->succ_begin());
-
- if (!Node1 || !Node2)
- return nullptr;
-
- Node1 = Node1->getIDom();
- while (Node1) {
- if (PDT->dominates(Node1, Node2))
- return Node1->getBlock();
- Node1 = Node1->getIDom();
- }
-
- return nullptr;
-}
-
-MachineBasicBlock *
-AMDGPUCFGStructurizer::findNearestCommonPostDom(
- std::set<MachineBasicBlock *> &MBBs) {
- MachineBasicBlock *CommonDom;
- std::set<MachineBasicBlock *>::const_iterator It = MBBs.begin();
- std::set<MachineBasicBlock *>::const_iterator E = MBBs.end();
- for (CommonDom = *It; It != E && CommonDom; ++It) {
- MachineBasicBlock *MBB = *It;
- if (MBB != CommonDom)
- CommonDom = findNearestCommonPostDom(MBB, CommonDom);
- }
-
- DEBUG(
- dbgs() << "Common post dominator for exit blocks is ";
- if (CommonDom)
- dbgs() << "BB" << CommonDom->getNumber() << "\n";
- else
- dbgs() << "NULL\n";
- );
-
- return CommonDom;
-}
-
char AMDGPUCFGStructurizer::ID = 0;
} // end anonymous namespace
diff --git a/lib/Target/AMDGPU/AMDKernelCodeT.h b/lib/Target/AMDGPU/AMDKernelCodeT.h
index a9ba60c8cbad..5d243e949fd3 100644
--- a/lib/Target/AMDGPU/AMDKernelCodeT.h
+++ b/lib/Target/AMDGPU/AMDKernelCodeT.h
@@ -44,6 +44,15 @@ enum amd_code_version_t {
AMD_CODE_VERSION_MINOR = 1
};
+// Sets val bits for specified mask in specified dst packed instance.
+#define AMD_HSA_BITS_SET(dst, mask, val) \
+ dst &= (~(1 << mask ## _SHIFT) & ~mask); \
+ dst |= (((val) << mask ## _SHIFT) & mask)
+
+// Gets bits for specified mask from specified src packed instance.
+#define AMD_HSA_BITS_GET(src, mask) \
+ ((src & mask) >> mask ## _SHIFT) \
+
/// The values used to define the number of bytes to use for the
/// swizzle element size.
enum amd_element_byte_size_t {
@@ -118,10 +127,14 @@ enum amd_code_property_mask_t {
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH = 1,
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT,
+ AMD_CODE_PROPERTY_RESERVED1_SHIFT = 10,
+ AMD_CODE_PROPERTY_RESERVED1_WIDTH = 6,
+ AMD_CODE_PROPERTY_RESERVED1 = ((1 << AMD_CODE_PROPERTY_RESERVED1_WIDTH) - 1) << AMD_CODE_PROPERTY_RESERVED1_SHIFT,
+
/// Control wave ID base counter for GDS ordered-append. Used to set
/// COMPUTE_DISPATCH_INITIATOR.ORDERED_APPEND_ENBL. (Not sure if
/// ORDERED_APPEND_MODE also needs to be settable)
- AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT = 10,
+ AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT = 16,
AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH = 1,
AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS = ((1 << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT,
@@ -146,7 +159,7 @@ enum amd_code_property_mask_t {
/// is generally DWORD.
///
/// uSE VALUES FROM THE AMD_ELEMENT_BYTE_SIZE_T ENUM.
- AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT = 11,
+ AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT = 17,
AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH = 2,
AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE = ((1 << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH) - 1) << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT,
@@ -155,7 +168,7 @@ enum amd_code_property_mask_t {
/// HSA_MACHINE_LARGE. Must also match
/// SH_MEM_CONFIG.PTR32 (GFX6 (SI)/GFX7 (CI)),
/// SH_MEM_CONFIG.ADDRESS_MODE (GFX8 (VI)+).
- AMD_CODE_PROPERTY_IS_PTR64_SHIFT = 13,
+ AMD_CODE_PROPERTY_IS_PTR64_SHIFT = 19,
AMD_CODE_PROPERTY_IS_PTR64_WIDTH = 1,
AMD_CODE_PROPERTY_IS_PTR64 = ((1 << AMD_CODE_PROPERTY_IS_PTR64_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_PTR64_SHIFT,
@@ -167,18 +180,22 @@ enum amd_code_property_mask_t {
/// workitem_private_segment_byte_size only specifies the statically
/// know private segment size, and additional space must be added
/// for the call stack.
- AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT = 14,
+ AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT = 20,
AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH = 1,
AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK = ((1 << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT,
/// Indicate if code generated has support for debugging.
- AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT = 15,
+ AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT = 21,
AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH = 1,
AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT,
- AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT = 15,
+ AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT = 22,
AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_WIDTH = 1,
- AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT
+ AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT,
+
+ AMD_CODE_PROPERTY_RESERVED2_SHIFT = 23,
+ AMD_CODE_PROPERTY_RESERVED2_WIDTH = 9,
+ AMD_CODE_PROPERTY_RESERVED2 = ((1 << AMD_CODE_PROPERTY_RESERVED2_WIDTH) - 1) << AMD_CODE_PROPERTY_RESERVED2_SHIFT
};
/// @brief The hsa_ext_control_directives_t specifies the values for the HSAIL
diff --git a/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index d9f753f40133..efcf1b23adaa 100644
--- a/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1,4 +1,4 @@
-//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
+//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,15 +7,17 @@
//
//===----------------------------------------------------------------------===//
+#include "AMDKernelCodeT.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "MCTargetDesc/AMDGPUTargetStreamer.h"
-#include "Utils/AMDGPUBaseInfo.h"
-#include "AMDKernelCodeT.h"
#include "SIDefines.h"
+#include "Utils/AMDGPUBaseInfo.h"
+#include "Utils/AMDKernelCodeTUtils.h"
+#include "Utils/AMDGPUAsmUtils.h"
#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCContext.h"
@@ -25,16 +27,17 @@
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbolELF.h"
-#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
using namespace llvm;
@@ -42,6 +45,8 @@ namespace {
struct OptionalOperand;
+enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
+
class AMDGPUOperand : public MCParsedAsmOperand {
enum KindTy {
Token,
@@ -55,19 +60,74 @@ class AMDGPUOperand : public MCParsedAsmOperand {
public:
AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
- MCContext *Ctx;
+ typedef std::unique_ptr<AMDGPUOperand> Ptr;
+
+ struct Modifiers {
+ bool Abs;
+ bool Neg;
+ bool Sext;
+
+ bool hasFPModifiers() const { return Abs || Neg; }
+ bool hasIntModifiers() const { return Sext; }
+ bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
+
+ int64_t getFPModifiersOperand() const {
+ int64_t Operand = 0;
+ Operand |= Abs ? SISrcMods::ABS : 0;
+ Operand |= Neg ? SISrcMods::NEG : 0;
+ return Operand;
+ }
+
+ int64_t getIntModifiersOperand() const {
+ int64_t Operand = 0;
+ Operand |= Sext ? SISrcMods::SEXT : 0;
+ return Operand;
+ }
+
+ int64_t getModifiersOperand() const {
+ assert(!(hasFPModifiers() && hasIntModifiers())
+ && "fp and int modifiers should not be used simultaneously");
+ if (hasFPModifiers()) {
+ return getFPModifiersOperand();
+ } else if (hasIntModifiers()) {
+ return getIntModifiersOperand();
+ } else {
+ return 0;
+ }
+ }
+
+ friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
+ };
enum ImmTy {
ImmTyNone,
- ImmTyDSOffset0,
- ImmTyDSOffset1,
ImmTyGDS,
+ ImmTyOffen,
+ ImmTyIdxen,
+ ImmTyAddr64,
ImmTyOffset,
+ ImmTyOffset0,
+ ImmTyOffset1,
ImmTyGLC,
ImmTySLC,
ImmTyTFE,
- ImmTyClamp,
- ImmTyOMod
+ ImmTyClampSI,
+ ImmTyOModSI,
+ ImmTyDppCtrl,
+ ImmTyDppRowMask,
+ ImmTyDppBankMask,
+ ImmTyDppBoundCtrl,
+ ImmTySdwaDstSel,
+ ImmTySdwaSrc0Sel,
+ ImmTySdwaSrc1Sel,
+ ImmTySdwaDstUnused,
+ ImmTyDMask,
+ ImmTyUNorm,
+ ImmTyDA,
+ ImmTyR128,
+ ImmTyLWE,
+ ImmTyHwreg,
+ ImmTySendMsg,
};
struct TokOp {
@@ -79,11 +139,12 @@ public:
bool IsFPImm;
ImmTy Type;
int64_t Val;
+ Modifiers Mods;
};
struct RegOp {
unsigned RegNo;
- int Modifiers;
+ Modifiers Mods;
const MCRegisterInfo *TRI;
const MCSubtargetInfo *STI;
bool IsForcedVOP3;
@@ -96,175 +157,323 @@ public:
const MCExpr *Expr;
};
- void addImmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::createImm(getImm()));
+ bool isToken() const override {
+ if (Kind == Token)
+ return true;
+
+ if (Kind != Expression || !Expr)
+ return false;
+
+ // When parsing operands, we can't always tell if something was meant to be
+ // a token, like 'gds', or an expression that references a global variable.
+ // In this case, we assume the string is an expression, and if we need to
+ // interpret is a token, then we treat the symbol name as the token.
+ return isa<MCSymbolRefExpr>(Expr);
}
- StringRef getToken() const {
- return StringRef(Tok.Data, Tok.Length);
+ bool isImm() const override {
+ return Kind == Immediate;
}
- void addRegOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
+ bool isInlinableImm() const {
+ if (!isImmTy(ImmTyNone)) {
+ // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
+ return false;
+ }
+ // TODO: We should avoid using host float here. It would be better to
+ // check the float bit values which is what a few other places do.
+ // We've had bot failures before due to weird NaN support on mips hosts.
+ const float F = BitsToFloat(Imm.Val);
+ // TODO: Add 1/(2*pi) for VI
+ return (Imm.Val <= 64 && Imm.Val >= -16) ||
+ (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
+ F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
}
- void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
- if (isReg())
- addRegOperands(Inst, N);
- else
- addImmOperands(Inst, N);
+ bool isRegKind() const {
+ return Kind == Register;
}
- void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::createImm(
- Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
- addRegOperands(Inst, N);
+ bool isReg() const override {
+ return isRegKind() && !Reg.Mods.hasModifiers();
+ }
+
+ bool isRegOrImmWithInputMods() const {
+ return isRegKind() || isInlinableImm();
+ }
+
+ bool isImmTy(ImmTy ImmT) const {
+ return isImm() && Imm.Type == ImmT;
+ }
+
+ bool isImmModifier() const {
+ return isImm() && Imm.Type != ImmTyNone;
+ }
+
+ bool isClampSI() const { return isImmTy(ImmTyClampSI); }
+ bool isOModSI() const { return isImmTy(ImmTyOModSI); }
+ bool isDMask() const { return isImmTy(ImmTyDMask); }
+ bool isUNorm() const { return isImmTy(ImmTyUNorm); }
+ bool isDA() const { return isImmTy(ImmTyDA); }
+ bool isR128() const { return isImmTy(ImmTyUNorm); }
+ bool isLWE() const { return isImmTy(ImmTyLWE); }
+ bool isOffen() const { return isImmTy(ImmTyOffen); }
+ bool isIdxen() const { return isImmTy(ImmTyIdxen); }
+ bool isAddr64() const { return isImmTy(ImmTyAddr64); }
+ bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
+ bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
+ bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
+ bool isGDS() const { return isImmTy(ImmTyGDS); }
+ bool isGLC() const { return isImmTy(ImmTyGLC); }
+ bool isSLC() const { return isImmTy(ImmTySLC); }
+ bool isTFE() const { return isImmTy(ImmTyTFE); }
+ bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
+ bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
+ bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
+ bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
+ bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
+ bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
+ bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
+
+ bool isMod() const {
+ return isClampSI() || isOModSI();
}
- void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
- if (isImm())
- addImmOperands(Inst, N);
- else {
- assert(isExpr());
- Inst.addOperand(MCOperand::createExpr(Expr));
- }
+ bool isRegOrImm() const {
+ return isReg() || isImm();
}
- bool defaultTokenHasSuffix() const {
- StringRef Token(Tok.Data, Tok.Length);
+ bool isRegClass(unsigned RCID) const {
+ return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
+ }
- return Token.endswith("_e32") || Token.endswith("_e64");
+ bool isSCSrc32() const {
+ return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
}
- bool isToken() const override {
- return Kind == Token;
+ bool isSCSrc64() const {
+ return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
}
- bool isImm() const override {
- return Kind == Immediate;
+ bool isSSrc32() const {
+ return isImm() || isSCSrc32() || isExpr();
}
- bool isInlineImm() const {
- float F = BitsToFloat(Imm.Val);
- // TODO: Add 0.5pi for VI
- return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
- (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
- F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
+ bool isSSrc64() const {
+ // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
+ // See isVSrc64().
+ return isImm() || isSCSrc64();
}
- bool isDSOffset0() const {
- assert(isImm());
- return Imm.Type == ImmTyDSOffset0;
+ bool isVCSrc32() const {
+ return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
}
- bool isDSOffset1() const {
- assert(isImm());
- return Imm.Type == ImmTyDSOffset1;
+ bool isVCSrc64() const {
+ return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
}
- int64_t getImm() const {
- return Imm.Val;
+ bool isVSrc32() const {
+ return isImm() || isVCSrc32();
}
- enum ImmTy getImmTy() const {
- assert(isImm());
- return Imm.Type;
+ bool isVSrc64() const {
+ // TODO: Check if the 64-bit value (coming from assembly source) can be
+ // narrowed to 32 bits (in the instruction stream). That require knowledge
+ // of instruction type (unsigned/signed, floating or "untyped"/B64),
+ // see [AMD GCN3 ISA 6.3.1].
+ // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
+ return isImm() || isVCSrc64();
}
- bool isRegKind() const {
- return Kind == Register;
+ bool isMem() const override {
+ return false;
}
- bool isReg() const override {
- return Kind == Register && Reg.Modifiers == -1;
+ bool isExpr() const {
+ return Kind == Expression;
}
- bool isRegWithInputMods() const {
- return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
+ bool isSoppBrTarget() const {
+ return isExpr() || isImm();
}
- void setModifiers(unsigned Mods) {
- assert(isReg());
- Reg.Modifiers = Mods;
+ bool isSWaitCnt() const;
+ bool isHwreg() const;
+ bool isSendMsg() const;
+ bool isSMRDOffset() const;
+ bool isSMRDLiteralOffset() const;
+ bool isDPPCtrl() const;
+
+ StringRef getExpressionAsToken() const {
+ assert(isExpr());
+ const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
+ return S->getSymbol().getName();
}
- bool hasModifiers() const {
- assert(isRegKind());
- return Reg.Modifiers != -1;
+
+ StringRef getToken() const {
+ assert(isToken());
+
+ if (Kind == Expression)
+ return getExpressionAsToken();
+
+ return StringRef(Tok.Data, Tok.Length);
+ }
+
+ int64_t getImm() const {
+ assert(isImm());
+ return Imm.Val;
+ }
+
+ enum ImmTy getImmTy() const {
+ assert(isImm());
+ return Imm.Type;
}
unsigned getReg() const override {
return Reg.RegNo;
}
- bool isRegOrImm() const {
- return isReg() || isImm();
+ SMLoc getStartLoc() const override {
+ return StartLoc;
}
- bool isRegClass(unsigned RCID) const {
- return Reg.TRI->getRegClass(RCID).contains(getReg());
+ SMLoc getEndLoc() const override {
+ return EndLoc;
}
- bool isSCSrc32() const {
- return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
+ Modifiers getModifiers() const {
+ assert(isRegKind() || isImmTy(ImmTyNone));
+ return isRegKind() ? Reg.Mods : Imm.Mods;
}
- bool isSSrc32() const {
- return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
+ void setModifiers(Modifiers Mods) {
+ assert(isRegKind() || isImmTy(ImmTyNone));
+ if (isRegKind())
+ Reg.Mods = Mods;
+ else
+ Imm.Mods = Mods;
}
- bool isSSrc64() const {
- return isImm() || isInlineImm() ||
- (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
+ bool hasModifiers() const {
+ return getModifiers().hasModifiers();
}
-
- bool isSCSrc64() const {
- return (isReg() && isRegClass(AMDGPU::SReg_64RegClassID)) || isInlineImm();
+
+ bool hasFPModifiers() const {
+ return getModifiers().hasFPModifiers();
}
- bool isVCSrc32() const {
- return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
+ bool hasIntModifiers() const {
+ return getModifiers().hasIntModifiers();
}
- bool isVCSrc64() const {
- return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
+ void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const {
+ if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers()) {
+ // Apply modifiers to immediate value
+ int64_t Val = Imm.Val;
+ bool Negate = Imm.Mods.Neg; // Only negate can get here
+ if (Imm.IsFPImm) {
+ APFloat F(BitsToFloat(Val));
+ if (Negate) {
+ F.changeSign();
+ }
+ Val = F.bitcastToAPInt().getZExtValue();
+ } else {
+ Val = Negate ? -Val : Val;
+ }
+ Inst.addOperand(MCOperand::createImm(Val));
+ } else {
+ Inst.addOperand(MCOperand::createImm(getImm()));
+ }
}
- bool isVSrc32() const {
- return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
}
- bool isVSrc64() const {
- return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
+ void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
+ if (isRegKind())
+ addRegOperands(Inst, N);
+ else if (isExpr())
+ Inst.addOperand(MCOperand::createExpr(Expr));
+ else
+ addImmOperands(Inst, N);
}
- bool isMem() const override {
- return false;
+ void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
+ Modifiers Mods = getModifiers();
+ Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
+ if (isRegKind()) {
+ addRegOperands(Inst, N);
+ } else {
+ addImmOperands(Inst, N, false);
+ }
}
- bool isExpr() const {
- return Kind == Expression;
+ void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
+ assert(!hasIntModifiers());
+ addRegOrImmWithInputModsOperands(Inst, N);
}
- bool isSoppBrTarget() const {
- return isExpr() || isImm();
+ void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
+ assert(!hasFPModifiers());
+ addRegOrImmWithInputModsOperands(Inst, N);
}
- SMLoc getStartLoc() const override {
- return StartLoc;
+ void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
+ if (isImm())
+ addImmOperands(Inst, N);
+ else {
+ assert(isExpr());
+ Inst.addOperand(MCOperand::createExpr(Expr));
+ }
}
- SMLoc getEndLoc() const override {
- return EndLoc;
+ void printImmTy(raw_ostream& OS, ImmTy Type) const {
+ switch (Type) {
+ case ImmTyNone: OS << "None"; break;
+ case ImmTyGDS: OS << "GDS"; break;
+ case ImmTyOffen: OS << "Offen"; break;
+ case ImmTyIdxen: OS << "Idxen"; break;
+ case ImmTyAddr64: OS << "Addr64"; break;
+ case ImmTyOffset: OS << "Offset"; break;
+ case ImmTyOffset0: OS << "Offset0"; break;
+ case ImmTyOffset1: OS << "Offset1"; break;
+ case ImmTyGLC: OS << "GLC"; break;
+ case ImmTySLC: OS << "SLC"; break;
+ case ImmTyTFE: OS << "TFE"; break;
+ case ImmTyClampSI: OS << "ClampSI"; break;
+ case ImmTyOModSI: OS << "OModSI"; break;
+ case ImmTyDppCtrl: OS << "DppCtrl"; break;
+ case ImmTyDppRowMask: OS << "DppRowMask"; break;
+ case ImmTyDppBankMask: OS << "DppBankMask"; break;
+ case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
+ case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
+ case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
+ case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
+ case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
+ case ImmTyDMask: OS << "DMask"; break;
+ case ImmTyUNorm: OS << "UNorm"; break;
+ case ImmTyDA: OS << "DA"; break;
+ case ImmTyR128: OS << "R128"; break;
+ case ImmTyLWE: OS << "LWE"; break;
+ case ImmTyHwreg: OS << "Hwreg"; break;
+ case ImmTySendMsg: OS << "SendMsg"; break;
+ }
}
void print(raw_ostream &OS) const override {
switch (Kind) {
case Register:
- OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
+ OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
break;
case Immediate:
- OS << getImm();
+ OS << '<' << getImm();
+ if (getImmTy() != ImmTyNone) {
+ OS << " type: "; printImmTy(OS, getImmTy());
+ }
+ OS << " mods: " << Imm.Mods << '>';
break;
case Token:
OS << '\'' << getToken() << '\'';
@@ -275,20 +484,21 @@ public:
}
}
- static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
- enum ImmTy Type = ImmTyNone,
- bool IsFPImm = false) {
+ static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
+ enum ImmTy Type = ImmTyNone,
+ bool IsFPImm = false) {
auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
Op->Imm.Val = Val;
Op->Imm.IsFPImm = IsFPImm;
Op->Imm.Type = Type;
+ Op->Imm.Mods = {false, false, false};
Op->StartLoc = Loc;
Op->EndLoc = Loc;
return Op;
}
- static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
- bool HasExplicitEncodingSize = true) {
+ static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
+ bool HasExplicitEncodingSize = true) {
auto Res = llvm::make_unique<AMDGPUOperand>(Token);
Res->Tok.Data = Str.data();
Res->Tok.Length = Str.size();
@@ -297,43 +507,43 @@ public:
return Res;
}
- static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
- SMLoc E,
- const MCRegisterInfo *TRI,
- const MCSubtargetInfo *STI,
- bool ForceVOP3) {
+ static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
+ SMLoc E,
+ const MCRegisterInfo *TRI,
+ const MCSubtargetInfo *STI,
+ bool ForceVOP3) {
auto Op = llvm::make_unique<AMDGPUOperand>(Register);
Op->Reg.RegNo = RegNo;
Op->Reg.TRI = TRI;
Op->Reg.STI = STI;
- Op->Reg.Modifiers = -1;
+ Op->Reg.Mods = {false, false, false};
Op->Reg.IsForcedVOP3 = ForceVOP3;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
- static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
+ static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
Op->Expr = Expr;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
-
- bool isDSOffset() const;
- bool isDSOffset01() const;
- bool isSWaitCnt() const;
- bool isMubufOffset() const;
- bool isSMRDOffset() const;
- bool isSMRDLiteralOffset() const;
};
+raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
+ OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
+ return OS;
+}
+
class AMDGPUAsmParser : public MCTargetAsmParser {
const MCInstrInfo &MII;
MCAsmParser &Parser;
unsigned ForcedEncodingSize;
+ bool ForcedDPP;
+ bool ForcedSDWA;
bool isSI() const {
return AMDGPU::isSI(getSTI());
@@ -373,9 +583,11 @@ private:
bool ParseSectionDirectiveHSADataGlobalAgent();
bool ParseSectionDirectiveHSADataGlobalProgram();
bool ParseSectionDirectiveHSARodataReadonlyAgent();
+ bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
+ bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
+ void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
public:
-public:
enum AMDGPUMatchResultTy {
Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
};
@@ -384,7 +596,9 @@ public:
const MCInstrInfo &MII,
const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
- ForcedEncodingSize(0) {
+ ForcedEncodingSize(0),
+ ForcedDPP(false),
+ ForcedSDWA(false) {
MCAsmParserExtension::Initialize(Parser);
if (getSTI().getFeatureBits().none()) {
@@ -393,6 +607,21 @@ public:
}
setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
+
+ {
+ // TODO: make those pre-defined variables read-only.
+ // Currently there is none suitable machinery in the core llvm-mc for this.
+ // MCSymbol::isRedefinable is intended for another purpose, and
+ // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
+ AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
+ MCContext &Ctx = getContext();
+ MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
+ Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
+ Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
+ Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
+ Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
+ Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
+ }
}
AMDGPUTargetStreamer &getTargetStreamer() {
@@ -400,84 +629,117 @@ public:
return static_cast<AMDGPUTargetStreamer &>(TS);
}
- unsigned getForcedEncodingSize() const {
- return ForcedEncodingSize;
- }
-
- void setForcedEncodingSize(unsigned Size) {
- ForcedEncodingSize = Size;
- }
+ void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
+ void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
+ void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
- bool isForcedVOP3() const {
- return ForcedEncodingSize == 64;
- }
+ unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
+ bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
+ bool isForcedDPP() const { return ForcedDPP; }
+ bool isForcedSDWA() const { return ForcedSDWA; }
+ std::unique_ptr<AMDGPUOperand> parseRegister();
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
+ unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) override;
bool ParseDirective(AsmToken DirectiveID) override;
OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
+ StringRef parseMnemonicSuffix(StringRef Name);
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
- OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
- int64_t Default = 0);
+ OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
OperandVector &Operands,
- enum AMDGPUOperand::ImmTy ImmTy =
- AMDGPUOperand::ImmTyNone);
+ enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
+ bool (*ConvertResult)(int64_t&) = 0);
OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
- enum AMDGPUOperand::ImmTy ImmTy =
- AMDGPUOperand::ImmTyNone);
- OperandMatchResultTy parseOptionalOps(
- const ArrayRef<OptionalOperand> &OptionalOps,
- OperandVector &Operands);
+ enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
+ OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value);
+ OperandMatchResultTy parseImm(OperandVector &Operands);
+ OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
+ OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
+ OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
void cvtDS(MCInst &Inst, const OperandVector &Operands);
- OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
- OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
- OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
bool parseCnt(int64_t &IntVal);
OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
- OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
+ OperandMatchResultTy parseHwreg(OperandVector &Operands);
- OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
- OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
- void cvtFlat(MCInst &Inst, const OperandVector &Operands);
+private:
+ struct OperandInfoTy {
+ int64_t Id;
+ bool IsSymbolic;
+ OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
+ };
- void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
- OperandMatchResultTy parseOffset(OperandVector &Operands);
- OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
- OperandMatchResultTy parseGLC(OperandVector &Operands);
- OperandMatchResultTy parseSLC(OperandVector &Operands);
- OperandMatchResultTy parseTFE(OperandVector &Operands);
+ bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
+ bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
+public:
+ OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
- OperandMatchResultTy parseDMask(OperandVector &Operands);
- OperandMatchResultTy parseUNorm(OperandVector &Operands);
- OperandMatchResultTy parseR128(OperandVector &Operands);
+ OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
+ OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
+ void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
+ void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
+ void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
+ AMDGPUOperand::Ptr defaultGLC() const;
+ AMDGPUOperand::Ptr defaultSLC() const;
+ AMDGPUOperand::Ptr defaultTFE() const;
+
+ AMDGPUOperand::Ptr defaultDMask() const;
+ AMDGPUOperand::Ptr defaultUNorm() const;
+ AMDGPUOperand::Ptr defaultDA() const;
+ AMDGPUOperand::Ptr defaultR128() const;
+ AMDGPUOperand::Ptr defaultLWE() const;
+ AMDGPUOperand::Ptr defaultSMRDOffset() const;
+ AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
+
+ OperandMatchResultTy parseOModOperand(OperandVector &Operands);
+
+ void cvtId(MCInst &Inst, const OperandVector &Operands);
+ void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
- OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
+
+ void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
+ void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
+
+ OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
+ AMDGPUOperand::Ptr defaultRowMask() const;
+ AMDGPUOperand::Ptr defaultBankMask() const;
+ AMDGPUOperand::Ptr defaultBoundCtrl() const;
+ void cvtDPP(MCInst &Inst, const OperandVector &Operands);
+
+ OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
+ AMDGPUOperand::ImmTy Type);
+ OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
+ void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
+ void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
+ void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
+ void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
+ uint64_t BasicInstType);
};
struct OptionalOperand {
const char *Name;
AMDGPUOperand::ImmTy Type;
bool IsBit;
- int64_t Default;
bool (*ConvertResult)(int64_t&);
};
}
-static int getRegClass(bool IsVgpr, unsigned RegWidth) {
- if (IsVgpr) {
+static int getRegClass(RegisterKind Is, unsigned RegWidth) {
+ if (Is == IS_VGPR) {
switch (RegWidth) {
default: return -1;
case 1: return AMDGPU::VGPR_32RegClassID;
@@ -487,109 +749,379 @@ static int getRegClass(bool IsVgpr, unsigned RegWidth) {
case 8: return AMDGPU::VReg_256RegClassID;
case 16: return AMDGPU::VReg_512RegClassID;
}
+ } else if (Is == IS_TTMP) {
+ switch (RegWidth) {
+ default: return -1;
+ case 1: return AMDGPU::TTMP_32RegClassID;
+ case 2: return AMDGPU::TTMP_64RegClassID;
+ case 4: return AMDGPU::TTMP_128RegClassID;
+ }
+ } else if (Is == IS_SGPR) {
+ switch (RegWidth) {
+ default: return -1;
+ case 1: return AMDGPU::SGPR_32RegClassID;
+ case 2: return AMDGPU::SGPR_64RegClassID;
+ case 4: return AMDGPU::SGPR_128RegClassID;
+ case 8: return AMDGPU::SReg_256RegClassID;
+ case 16: return AMDGPU::SReg_512RegClassID;
+ }
}
-
- switch (RegWidth) {
- default: return -1;
- case 1: return AMDGPU::SGPR_32RegClassID;
- case 2: return AMDGPU::SGPR_64RegClassID;
- case 4: return AMDGPU::SReg_128RegClassID;
- case 8: return AMDGPU::SReg_256RegClassID;
- case 16: return AMDGPU::SReg_512RegClassID;
- }
+ return -1;
}
-static unsigned getRegForName(StringRef RegName) {
-
+static unsigned getSpecialRegForName(StringRef RegName) {
return StringSwitch<unsigned>(RegName)
.Case("exec", AMDGPU::EXEC)
.Case("vcc", AMDGPU::VCC)
.Case("flat_scratch", AMDGPU::FLAT_SCR)
.Case("m0", AMDGPU::M0)
.Case("scc", AMDGPU::SCC)
+ .Case("tba", AMDGPU::TBA)
+ .Case("tma", AMDGPU::TMA)
.Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
.Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
.Case("vcc_lo", AMDGPU::VCC_LO)
.Case("vcc_hi", AMDGPU::VCC_HI)
.Case("exec_lo", AMDGPU::EXEC_LO)
.Case("exec_hi", AMDGPU::EXEC_HI)
+ .Case("tma_lo", AMDGPU::TMA_LO)
+ .Case("tma_hi", AMDGPU::TMA_HI)
+ .Case("tba_lo", AMDGPU::TBA_LO)
+ .Case("tba_hi", AMDGPU::TBA_HI)
.Default(0);
}
bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
- const AsmToken Tok = Parser.getTok();
- StartLoc = Tok.getLoc();
- EndLoc = Tok.getEndLoc();
+ auto R = parseRegister();
+ if (!R) return true;
+ assert(R->isReg());
+ RegNo = R->getReg();
+ StartLoc = R->getStartLoc();
+ EndLoc = R->getEndLoc();
+ return false;
+}
+
+bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
+{
+ switch (RegKind) {
+ case IS_SPECIAL:
+ if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
+ if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
+ if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
+ if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
+ if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
+ return false;
+ case IS_VGPR:
+ case IS_SGPR:
+ case IS_TTMP:
+ if (Reg1 != Reg + RegWidth) { return false; }
+ RegWidth++;
+ return true;
+ default:
+ assert(false); return false;
+ }
+}
+
+bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
+{
+ const MCRegisterInfo *TRI = getContext().getRegisterInfo();
+ if (getLexer().is(AsmToken::Identifier)) {
+ StringRef RegName = Parser.getTok().getString();
+ if ((Reg = getSpecialRegForName(RegName))) {
+ Parser.Lex();
+ RegKind = IS_SPECIAL;
+ } else {
+ unsigned RegNumIndex = 0;
+ if (RegName[0] == 'v') {
+ RegNumIndex = 1;
+ RegKind = IS_VGPR;
+ } else if (RegName[0] == 's') {
+ RegNumIndex = 1;
+ RegKind = IS_SGPR;
+ } else if (RegName.startswith("ttmp")) {
+ RegNumIndex = strlen("ttmp");
+ RegKind = IS_TTMP;
+ } else {
+ return false;
+ }
+ if (RegName.size() > RegNumIndex) {
+ // Single 32-bit register: vXX.
+ if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
+ return false;
+ Parser.Lex();
+ RegWidth = 1;
+ } else {
+ // Range of registers: v[XX:YY]. ":YY" is optional.
+ Parser.Lex();
+ int64_t RegLo, RegHi;
+ if (getLexer().isNot(AsmToken::LBrac))
+ return false;
+ Parser.Lex();
+
+ if (getParser().parseAbsoluteExpression(RegLo))
+ return false;
+
+ const bool isRBrace = getLexer().is(AsmToken::RBrac);
+ if (!isRBrace && getLexer().isNot(AsmToken::Colon))
+ return false;
+ Parser.Lex();
+
+ if (isRBrace) {
+ RegHi = RegLo;
+ } else {
+ if (getParser().parseAbsoluteExpression(RegHi))
+ return false;
+
+ if (getLexer().isNot(AsmToken::RBrac))
+ return false;
+ Parser.Lex();
+ }
+ RegNum = (unsigned) RegLo;
+ RegWidth = (RegHi - RegLo) + 1;
+ }
+ }
+ } else if (getLexer().is(AsmToken::LBrac)) {
+ // List of consecutive registers: [s0,s1,s2,s3]
+ Parser.Lex();
+ if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
+ return false;
+ if (RegWidth != 1)
+ return false;
+ RegisterKind RegKind1;
+ unsigned Reg1, RegNum1, RegWidth1;
+ do {
+ if (getLexer().is(AsmToken::Comma)) {
+ Parser.Lex();
+ } else if (getLexer().is(AsmToken::RBrac)) {
+ Parser.Lex();
+ break;
+ } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
+ if (RegWidth1 != 1) {
+ return false;
+ }
+ if (RegKind1 != RegKind) {
+ return false;
+ }
+ if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ } while (true);
+ } else {
+ return false;
+ }
+ switch (RegKind) {
+ case IS_SPECIAL:
+ RegNum = 0;
+ RegWidth = 1;
+ break;
+ case IS_VGPR:
+ case IS_SGPR:
+ case IS_TTMP:
+ {
+ unsigned Size = 1;
+ if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
+ // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
+ Size = std::min(RegWidth, 4u);
+ }
+ if (RegNum % Size != 0)
+ return false;
+ RegNum = RegNum / Size;
+ int RCID = getRegClass(RegKind, RegWidth);
+ if (RCID == -1)
+ return false;
+ const MCRegisterClass RC = TRI->getRegClass(RCID);
+ if (RegNum >= RC.getNumRegs())
+ return false;
+ Reg = RC.getRegister(RegNum);
+ break;
+ }
+
+ default:
+ assert(false); return false;
+ }
+
+ if (!subtargetHasRegister(*TRI, Reg))
+ return false;
+ return true;
+}
+
+std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
+ const auto &Tok = Parser.getTok();
+ SMLoc StartLoc = Tok.getLoc();
+ SMLoc EndLoc = Tok.getEndLoc();
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
- StringRef RegName = Tok.getString();
- RegNo = getRegForName(RegName);
+ RegisterKind RegKind;
+ unsigned Reg, RegNum, RegWidth;
- if (RegNo) {
+ if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
+ return nullptr;
+ }
+ return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
+ TRI, &getSTI(), false);
+}
+
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseImm(OperandVector &Operands) {
+ bool Minus = false;
+ if (getLexer().getKind() == AsmToken::Minus) {
+ Minus = true;
Parser.Lex();
- return !subtargetHasRegister(*TRI, RegNo);
}
- // Match vgprs and sgprs
- if (RegName[0] != 's' && RegName[0] != 'v')
- return true;
+ SMLoc S = Parser.getTok().getLoc();
+ switch(getLexer().getKind()) {
+ case AsmToken::Integer: {
+ int64_t IntVal;
+ if (getParser().parseAbsoluteExpression(IntVal))
+ return MatchOperand_ParseFail;
+ if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
+ Error(S, "invalid immediate: only 32-bit values are legal");
+ return MatchOperand_ParseFail;
+ }
- bool IsVgpr = RegName[0] == 'v';
- unsigned RegWidth;
- unsigned RegIndexInClass;
- if (RegName.size() > 1) {
- // We have a 32-bit register
- RegWidth = 1;
- if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
- return true;
+ if (Minus)
+ IntVal *= -1;
+ Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
+ return MatchOperand_Success;
+ }
+ case AsmToken::Real: {
+ // FIXME: We should emit an error if a double precisions floating-point
+ // value is used. I'm not sure the best way to detect this.
+ int64_t IntVal;
+ if (getParser().parseAbsoluteExpression(IntVal))
+ return MatchOperand_ParseFail;
+
+ APFloat F((float)BitsToDouble(IntVal));
+ if (Minus)
+ F.changeSign();
+ Operands.push_back(
+ AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S,
+ AMDGPUOperand::ImmTyNone, true));
+ return MatchOperand_Success;
+ }
+ default:
+ return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
+ }
+}
+
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
+ auto res = parseImm(Operands);
+ if (res != MatchOperand_NoMatch) {
+ return res;
+ }
+
+ if (auto R = parseRegister()) {
+ assert(R->isReg());
+ R->Reg.IsForcedVOP3 = isForcedVOP3();
+ Operands.push_back(std::move(R));
+ return MatchOperand_Success;
+ }
+ return MatchOperand_ParseFail;
+}
+
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
+ // XXX: During parsing we can't determine if minus sign means
+ // negate-modifier or negative immediate value.
+ // By default we suppose it is modifier.
+ bool Negate = false, Abs = false, Abs2 = false;
+
+ if (getLexer().getKind()== AsmToken::Minus) {
Parser.Lex();
- } else {
- // We have a register greater than 32-bits.
+ Negate = true;
+ }
- int64_t RegLo, RegHi;
+ if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
Parser.Lex();
- if (getLexer().isNot(AsmToken::LBrac))
- return true;
+ Abs2 = true;
+ if (getLexer().isNot(AsmToken::LParen)) {
+ Error(Parser.getTok().getLoc(), "expected left paren after abs");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex();
+ }
+ if (getLexer().getKind() == AsmToken::Pipe) {
+ if (Abs2) {
+ Error(Parser.getTok().getLoc(), "expected register or immediate");
+ return MatchOperand_ParseFail;
+ }
Parser.Lex();
- if (getParser().parseAbsoluteExpression(RegLo))
- return true;
+ Abs = true;
+ }
- if (getLexer().isNot(AsmToken::Colon))
- return true;
+ auto Res = parseRegOrImm(Operands);
+ if (Res != MatchOperand_Success) {
+ return Res;
+ }
+ AMDGPUOperand::Modifiers Mods = {false, false, false};
+ if (Negate) {
+ Mods.Neg = true;
+ }
+ if (Abs) {
+ if (getLexer().getKind() != AsmToken::Pipe) {
+ Error(Parser.getTok().getLoc(), "expected vertical bar");
+ return MatchOperand_ParseFail;
+ }
Parser.Lex();
- if (getParser().parseAbsoluteExpression(RegHi))
- return true;
+ Mods.Abs = true;
+ }
+ if (Abs2) {
+ if (getLexer().isNot(AsmToken::RParen)) {
+ Error(Parser.getTok().getLoc(), "expected closing parentheses");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex();
+ Mods.Abs = true;
+ }
- if (getLexer().isNot(AsmToken::RBrac))
- return true;
+ if (Mods.hasFPModifiers()) {
+ AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
+ Op.setModifiers(Mods);
+ }
+ return MatchOperand_Success;
+}
- Parser.Lex();
- RegWidth = (RegHi - RegLo) + 1;
- if (IsVgpr) {
- // VGPR registers aren't aligned.
- RegIndexInClass = RegLo;
- } else {
- // SGPR registers are aligned. Max alignment is 4 dwords.
- unsigned Size = std::min(RegWidth, 4u);
- if (RegLo % Size != 0)
- return true;
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
+ bool Sext = false;
- RegIndexInClass = RegLo / Size;
+ if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
+ Parser.Lex();
+ Sext = true;
+ if (getLexer().isNot(AsmToken::LParen)) {
+ Error(Parser.getTok().getLoc(), "expected left paren after sext");
+ return MatchOperand_ParseFail;
}
+ Parser.Lex();
}
- int RCID = getRegClass(IsVgpr, RegWidth);
- if (RCID == -1)
- return true;
-
- const MCRegisterClass RC = TRI->getRegClass(RCID);
- if (RegIndexInClass >= RC.getNumRegs())
- return true;
+ auto Res = parseRegOrImm(Operands);
+ if (Res != MatchOperand_Success) {
+ return Res;
+ }
- RegNo = RC.getRegister(RegIndexInClass);
- return !subtargetHasRegister(*TRI, RegNo);
+ AMDGPUOperand::Modifiers Mods = {false, false, false};
+ if (Sext) {
+ if (getLexer().isNot(AsmToken::RParen)) {
+ Error(Parser.getTok().getLoc(), "expected closing parentheses");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex();
+ Mods.Sext = true;
+ }
+
+ if (Mods.hasIntModifiers()) {
+ AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
+ Op.setModifiers(Mods);
+ }
+ return MatchOperand_Success;
}
unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
@@ -597,7 +1129,9 @@ unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
- (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
+ (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
+ (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
+ (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
return Match_InvalidOperand;
if ((TSFlags & SIInstrFlags::VOP3) &&
@@ -608,7 +1142,6 @@ unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
return Match_Success;
}
-
bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
@@ -632,31 +1165,8 @@ bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size()) {
- if (isForcedVOP3()) {
- // If 64-bit encoding has been forced we can end up with no
- // clamp or omod operands if none of the registers have modifiers,
- // so we need to add these to the operand list.
- AMDGPUOperand &LastOp =
- ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
- if (LastOp.isRegKind() ||
- (LastOp.isImm() &&
- LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
- SMLoc S = Parser.getTok().getLoc();
- Operands.push_back(AMDGPUOperand::CreateImm(0, S,
- AMDGPUOperand::ImmTyClamp));
- Operands.push_back(AMDGPUOperand::CreateImm(0, S,
- AMDGPUOperand::ImmTyOMod));
- bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
- Out, ErrorInfo,
- MatchingInlineAsm);
- if (!Res)
- return Res;
- }
-
- }
return Error(IDLoc, "too few operands for instruction");
}
-
ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
@@ -762,164 +1272,12 @@ bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
amd_kernel_code_t &Header) {
-
- if (getLexer().isNot(AsmToken::Equal))
- return TokError("expected '='");
- Lex();
-
- if (getLexer().isNot(AsmToken::Integer))
- return TokError("amd_kernel_code_t values must be integers");
-
- uint64_t Value = getLexer().getTok().getIntVal();
+ SmallString<40> ErrStr;
+ raw_svector_ostream Err(ErrStr);
+ if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
+ return TokError(Err.str());
+ }
Lex();
-
- if (ID == "kernel_code_version_major")
- Header.amd_kernel_code_version_major = Value;
- else if (ID == "kernel_code_version_minor")
- Header.amd_kernel_code_version_minor = Value;
- else if (ID == "machine_kind")
- Header.amd_machine_kind = Value;
- else if (ID == "machine_version_major")
- Header.amd_machine_version_major = Value;
- else if (ID == "machine_version_minor")
- Header.amd_machine_version_minor = Value;
- else if (ID == "machine_version_stepping")
- Header.amd_machine_version_stepping = Value;
- else if (ID == "kernel_code_entry_byte_offset")
- Header.kernel_code_entry_byte_offset = Value;
- else if (ID == "kernel_code_prefetch_byte_size")
- Header.kernel_code_prefetch_byte_size = Value;
- else if (ID == "max_scratch_backing_memory_byte_size")
- Header.max_scratch_backing_memory_byte_size = Value;
- else if (ID == "compute_pgm_rsrc1_vgprs")
- Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
- else if (ID == "compute_pgm_rsrc1_sgprs")
- Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
- else if (ID == "compute_pgm_rsrc1_priority")
- Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
- else if (ID == "compute_pgm_rsrc1_float_mode")
- Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
- else if (ID == "compute_pgm_rsrc1_priv")
- Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
- else if (ID == "compute_pgm_rsrc1_dx10_clamp")
- Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
- else if (ID == "compute_pgm_rsrc1_debug_mode")
- Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
- else if (ID == "compute_pgm_rsrc1_ieee_mode")
- Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
- else if (ID == "compute_pgm_rsrc2_scratch_en")
- Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_user_sgpr")
- Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_tgid_x_en")
- Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_tgid_y_en")
- Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_tgid_z_en")
- Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_tg_size_en")
- Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
- Header.compute_pgm_resource_registers |=
- (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_excp_en_msb")
- Header.compute_pgm_resource_registers |=
- (S_00B84C_EXCP_EN_MSB(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_lds_size")
- Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
- else if (ID == "compute_pgm_rsrc2_excp_en")
- Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
- else if (ID == "compute_pgm_resource_registers")
- Header.compute_pgm_resource_registers = Value;
- else if (ID == "enable_sgpr_private_segment_buffer")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
- else if (ID == "enable_sgpr_dispatch_ptr")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
- else if (ID == "enable_sgpr_queue_ptr")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
- else if (ID == "enable_sgpr_kernarg_segment_ptr")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
- else if (ID == "enable_sgpr_dispatch_id")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
- else if (ID == "enable_sgpr_flat_scratch_init")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
- else if (ID == "enable_sgpr_private_segment_size")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
- else if (ID == "enable_sgpr_grid_workgroup_count_x")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
- else if (ID == "enable_sgpr_grid_workgroup_count_y")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
- else if (ID == "enable_sgpr_grid_workgroup_count_z")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
- else if (ID == "enable_ordered_append_gds")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
- else if (ID == "private_element_size")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
- else if (ID == "is_ptr64")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
- else if (ID == "is_dynamic_callstack")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
- else if (ID == "is_debug_enabled")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
- else if (ID == "is_xnack_enabled")
- Header.code_properties |=
- (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
- else if (ID == "workitem_private_segment_byte_size")
- Header.workitem_private_segment_byte_size = Value;
- else if (ID == "workgroup_group_segment_byte_size")
- Header.workgroup_group_segment_byte_size = Value;
- else if (ID == "gds_segment_byte_size")
- Header.gds_segment_byte_size = Value;
- else if (ID == "kernarg_segment_byte_size")
- Header.kernarg_segment_byte_size = Value;
- else if (ID == "workgroup_fbarrier_count")
- Header.workgroup_fbarrier_count = Value;
- else if (ID == "wavefront_sgpr_count")
- Header.wavefront_sgpr_count = Value;
- else if (ID == "workitem_vgpr_count")
- Header.workitem_vgpr_count = Value;
- else if (ID == "reserved_vgpr_first")
- Header.reserved_vgpr_first = Value;
- else if (ID == "reserved_vgpr_count")
- Header.reserved_vgpr_count = Value;
- else if (ID == "reserved_sgpr_first")
- Header.reserved_sgpr_first = Value;
- else if (ID == "reserved_sgpr_count")
- Header.reserved_sgpr_count = Value;
- else if (ID == "debug_wavefront_private_segment_offset_sgpr")
- Header.debug_wavefront_private_segment_offset_sgpr = Value;
- else if (ID == "debug_private_segment_buffer_sgpr")
- Header.debug_private_segment_buffer_sgpr = Value;
- else if (ID == "kernarg_segment_alignment")
- Header.kernarg_segment_alignment = Value;
- else if (ID == "group_segment_alignment")
- Header.group_segment_alignment = Value;
- else if (ID == "private_segment_alignment")
- Header.private_segment_alignment = Value;
- else if (ID == "wavefront_size")
- Header.wavefront_size = Value;
- else if (ID == "call_convention")
- Header.call_convention = Value;
- else if (ID == "runtime_loader_kernel_symbol")
- Header.runtime_loader_kernel_symbol = Value;
- else
- return TokError("amd_kernel_code_t value not recognized.");
-
return false;
}
@@ -930,9 +1288,6 @@ bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
while (true) {
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return TokError("amd_kernel_code_t values must begin on a new line");
-
// Lex EndOfStatement. This is in a while loop, because lexing a comment
// will set the current token to EndOfStatement.
while(getLexer().is(AsmToken::EndOfStatement))
@@ -1026,7 +1381,7 @@ bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
if (IDVal == ".amd_kernel_code_t")
return ParseDirectiveAMDKernelCodeT();
- if (IDVal == ".hsatext" || IDVal == ".text")
+ if (IDVal == ".hsatext")
return ParseSectionDirectiveHSAText();
if (IDVal == ".amdgpu_hsa_kernel")
@@ -1078,19 +1433,6 @@ bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
return true;
}
-static bool operandsHaveModifiers(const OperandVector &Operands) {
-
- for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
- const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
- if (Op.isRegKind() && Op.hasModifiers())
- return true;
- if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
- Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
- return true;
- }
- return false;
-}
-
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
@@ -1107,113 +1449,59 @@ AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
getLexer().is(AsmToken::EndOfStatement))
return ResTy;
- bool Negate = false, Abs = false;
- if (getLexer().getKind()== AsmToken::Minus) {
- Parser.Lex();
- Negate = true;
- }
+ ResTy = parseRegOrImm(Operands);
- if (getLexer().getKind() == AsmToken::Pipe) {
- Parser.Lex();
- Abs = true;
- }
-
- switch(getLexer().getKind()) {
- case AsmToken::Integer: {
- SMLoc S = Parser.getTok().getLoc();
- int64_t IntVal;
- if (getParser().parseAbsoluteExpression(IntVal))
- return MatchOperand_ParseFail;
- if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
- Error(S, "invalid immediate: only 32-bit values are legal");
- return MatchOperand_ParseFail;
- }
-
- if (Negate)
- IntVal *= -1;
- Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
- return MatchOperand_Success;
- }
- case AsmToken::Real: {
- // FIXME: We should emit an error if a double precisions floating-point
- // value is used. I'm not sure the best way to detect this.
- SMLoc S = Parser.getTok().getLoc();
- int64_t IntVal;
- if (getParser().parseAbsoluteExpression(IntVal))
- return MatchOperand_ParseFail;
+ if (ResTy == MatchOperand_Success)
+ return ResTy;
- APFloat F((float)BitsToDouble(IntVal));
- if (Negate)
- F.changeSign();
- Operands.push_back(
- AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
+ if (getLexer().getKind() == AsmToken::Identifier) {
+ // If this identifier is a symbol, we want to create an expression for it.
+ // It is a little difficult to distinguish between a symbol name, and
+ // an instruction flag like 'gds'. In order to do this, we parse
+ // all tokens as expressions and then treate the symbol name as the token
+ // string when we want to interpret the operand as a token.
+ const auto &Tok = Parser.getTok();
+ SMLoc S = Tok.getLoc();
+ const MCExpr *Expr = nullptr;
+ if (!Parser.parseExpression(Expr)) {
+ Operands.push_back(AMDGPUOperand::CreateExpr(Expr, S));
return MatchOperand_Success;
}
- case AsmToken::Identifier: {
- SMLoc S, E;
- unsigned RegNo;
- if (!ParseRegister(RegNo, S, E)) {
-
- bool HasModifiers = operandsHaveModifiers(Operands);
- unsigned Modifiers = 0;
- if (Negate)
- Modifiers |= 0x1;
-
- if (Abs) {
- if (getLexer().getKind() != AsmToken::Pipe)
- return MatchOperand_ParseFail;
- Parser.Lex();
- Modifiers |= 0x2;
- }
-
- if (Modifiers && !HasModifiers) {
- // We are adding a modifier to src1 or src2 and previous sources
- // don't have modifiers, so we need to go back and empty modifers
- // for each previous source.
- for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
- --PrevRegIdx) {
-
- AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
- RegOp.setModifiers(0);
- }
- }
-
-
- Operands.push_back(AMDGPUOperand::CreateReg(
- RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
- isForcedVOP3()));
-
- if (HasModifiers || Modifiers) {
- AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
- RegOp.setModifiers(Modifiers);
-
- }
- } else {
- Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
- S));
- Parser.Lex();
- }
- return MatchOperand_Success;
- }
- default:
- return MatchOperand_NoMatch;
+ Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(), Tok.getLoc()));
+ Parser.Lex();
+ return MatchOperand_Success;
}
+ return MatchOperand_NoMatch;
}
-bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
- StringRef Name,
- SMLoc NameLoc, OperandVector &Operands) {
-
+StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
// Clear any forced encodings from the previous instruction.
setForcedEncodingSize(0);
+ setForcedDPP(false);
+ setForcedSDWA(false);
- if (Name.endswith("_e64"))
+ if (Name.endswith("_e64")) {
setForcedEncodingSize(64);
- else if (Name.endswith("_e32"))
+ return Name.substr(0, Name.size() - 4);
+ } else if (Name.endswith("_e32")) {
setForcedEncodingSize(32);
+ return Name.substr(0, Name.size() - 4);
+ } else if (Name.endswith("_dpp")) {
+ setForcedDPP(true);
+ return Name.substr(0, Name.size() - 4);
+ } else if (Name.endswith("_sdwa")) {
+ setForcedSDWA(true);
+ return Name.substr(0, Name.size() - 5);
+ }
+ return Name;
+}
+bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
+ StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) {
// Add the instruction mnemonic
+ Name = parseMnemonicSuffix(Name);
Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
while (!getLexer().is(AsmToken::EndOfStatement)) {
@@ -1225,20 +1513,21 @@ bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
switch (Res) {
case MatchOperand_Success: break;
- case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
- "failed parsing operand.");
- case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
- "not a valid operand.");
+ case MatchOperand_ParseFail:
+ Error(getLexer().getLoc(), "failed parsing operand.");
+ while (!getLexer().is(AsmToken::EndOfStatement)) {
+ Parser.Lex();
+ }
+ return true;
+ case MatchOperand_NoMatch:
+ Error(getLexer().getLoc(), "not a valid operand.");
+ while (!getLexer().is(AsmToken::EndOfStatement)) {
+ Parser.Lex();
+ }
+ return true;
}
}
- // Once we reach end of statement, continue parsing so we can add default
- // values for optional arguments.
- AMDGPUAsmParser::OperandMatchResultTy Res;
- while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
- if (Res != MatchOperand_Success)
- return Error(getLexer().getLoc(), "failed parsing operand.");
- }
return false;
}
@@ -1247,22 +1536,14 @@ bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
//===----------------------------------------------------------------------===//
AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
- int64_t Default) {
-
- // We are at the end of the statement, and this is a default argument, so
- // use a default value.
- if (getLexer().is(AsmToken::EndOfStatement)) {
- Int = Default;
- return MatchOperand_Success;
- }
-
+AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
switch(getLexer().getKind()) {
default: return MatchOperand_NoMatch;
case AsmToken::Identifier: {
- StringRef OffsetName = Parser.getTok().getString();
- if (!OffsetName.equals(Prefix))
+ StringRef Name = Parser.getTok().getString();
+ if (!Name.equals(Prefix)) {
return MatchOperand_NoMatch;
+ }
Parser.Lex();
if (getLexer().isNot(AsmToken::Colon))
@@ -1282,16 +1563,21 @@ AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
- enum AMDGPUOperand::ImmTy ImmTy) {
+ enum AMDGPUOperand::ImmTy ImmTy,
+ bool (*ConvertResult)(int64_t&)) {
SMLoc S = Parser.getTok().getLoc();
- int64_t Offset = 0;
+ int64_t Value = 0;
- AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
+ AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
if (Res != MatchOperand_Success)
return Res;
- Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
+ if (ConvertResult && !ConvertResult(Value)) {
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
return MatchOperand_Success;
}
@@ -1327,101 +1613,52 @@ AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
return MatchOperand_Success;
}
-static bool operandsHasOptionalOp(const OperandVector &Operands,
- const OptionalOperand &OOp) {
- for (unsigned i = 0; i < Operands.size(); i++) {
- const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
- if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
- (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
- return true;
+typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
+void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
+ OptionalImmIndexMap& OptionalIdx,
+ enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
+ auto i = OptionalIdx.find(ImmT);
+ if (i != OptionalIdx.end()) {
+ unsigned Idx = i->second;
+ ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
+ } else {
+ Inst.addOperand(MCOperand::createImm(Default));
}
- return false;
}
AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
- OperandVector &Operands) {
- SMLoc S = Parser.getTok().getLoc();
- for (const OptionalOperand &Op : OptionalOps) {
- if (operandsHasOptionalOp(Operands, Op))
- continue;
- AMDGPUAsmParser::OperandMatchResultTy Res;
- int64_t Value;
- if (Op.IsBit) {
- Res = parseNamedBit(Op.Name, Operands, Op.Type);
- if (Res == MatchOperand_NoMatch)
- continue;
- return Res;
- }
-
- Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
-
- if (Res == MatchOperand_NoMatch)
- continue;
-
- if (Res != MatchOperand_Success)
- return Res;
+AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
+ if (getLexer().isNot(AsmToken::Identifier)) {
+ return MatchOperand_NoMatch;
+ }
+ StringRef Tok = Parser.getTok().getString();
+ if (Tok != Prefix) {
+ return MatchOperand_NoMatch;
+ }
- if (Op.ConvertResult && !Op.ConvertResult(Value)) {
- return MatchOperand_ParseFail;
- }
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Colon)) {
+ return MatchOperand_ParseFail;
+ }
- Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
- return MatchOperand_Success;
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Identifier)) {
+ return MatchOperand_ParseFail;
}
- return MatchOperand_NoMatch;
+
+ Value = Parser.getTok().getString();
+ return MatchOperand_Success;
}
//===----------------------------------------------------------------------===//
// ds
//===----------------------------------------------------------------------===//
-static const OptionalOperand DSOptionalOps [] = {
- {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
- {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
-};
-
-static const OptionalOperand DSOptionalOpsOff01 [] = {
- {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
- {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
- {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
-};
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
- return parseOptionalOps(DSOptionalOps, Operands);
-}
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
- return parseOptionalOps(DSOptionalOpsOff01, Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
- SMLoc S = Parser.getTok().getLoc();
- AMDGPUAsmParser::OperandMatchResultTy Res =
- parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
- if (Res == MatchOperand_NoMatch) {
- Operands.push_back(AMDGPUOperand::CreateImm(0, S,
- AMDGPUOperand::ImmTyOffset));
- Res = MatchOperand_Success;
- }
- return Res;
-}
-
-bool AMDGPUOperand::isDSOffset() const {
- return isImm() && isUInt<16>(getImm());
-}
-
-bool AMDGPUOperand::isDSOffset01() const {
- return isImm() && isUInt<8>(getImm());
-}
-
void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
const OperandVector &Operands) {
- std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+ OptionalImmIndexMap OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1436,13 +1673,10 @@ void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
OptionalIdx[Op.getImmTy()] = i;
}
- unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
- unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
- unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
- ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
- ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
- ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
@@ -1469,12 +1703,11 @@ void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
OptionalIdx[Op.getImmTy()] = i;
}
- unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
- ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
if (!GDSOnly) {
- unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
- ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
}
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
@@ -1516,7 +1749,7 @@ bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
CntMask = 0x7;
CntShift = 4;
} else if (CntName == "lgkmcnt") {
- CntMask = 0x7;
+ CntMask = 0xf;
CntShift = 8;
} else {
return true;
@@ -1532,8 +1765,8 @@ AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
// Disable all counters by default.
// vmcnt [3:0]
// expcnt [6:4]
- // lgkmcnt [10:8]
- int64_t CntVal = 0x77f;
+ // lgkmcnt [11:8]
+ int64_t CntVal = 0xf7f;
SMLoc S = Parser.getTok().getLoc();
switch(getLexer().getKind()) {
@@ -1555,10 +1788,298 @@ AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
return MatchOperand_Success;
}
+bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
+ using namespace llvm::AMDGPU::Hwreg;
+
+ if (Parser.getTok().getString() != "hwreg")
+ return true;
+ Parser.Lex();
+
+ if (getLexer().isNot(AsmToken::LParen))
+ return true;
+ Parser.Lex();
+
+ if (getLexer().is(AsmToken::Identifier)) {
+ HwReg.IsSymbolic = true;
+ HwReg.Id = ID_UNKNOWN_;
+ const StringRef tok = Parser.getTok().getString();
+ for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
+ if (tok == IdSymbolic[i]) {
+ HwReg.Id = i;
+ break;
+ }
+ }
+ Parser.Lex();
+ } else {
+ HwReg.IsSymbolic = false;
+ if (getLexer().isNot(AsmToken::Integer))
+ return true;
+ if (getParser().parseAbsoluteExpression(HwReg.Id))
+ return true;
+ }
+
+ if (getLexer().is(AsmToken::RParen)) {
+ Parser.Lex();
+ return false;
+ }
+
+ // optional params
+ if (getLexer().isNot(AsmToken::Comma))
+ return true;
+ Parser.Lex();
+
+ if (getLexer().isNot(AsmToken::Integer))
+ return true;
+ if (getParser().parseAbsoluteExpression(Offset))
+ return true;
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return true;
+ Parser.Lex();
+
+ if (getLexer().isNot(AsmToken::Integer))
+ return true;
+ if (getParser().parseAbsoluteExpression(Width))
+ return true;
+
+ if (getLexer().isNot(AsmToken::RParen))
+ return true;
+ Parser.Lex();
+
+ return false;
+}
+
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
+ using namespace llvm::AMDGPU::Hwreg;
+
+ int64_t Imm16Val = 0;
+ SMLoc S = Parser.getTok().getLoc();
+
+ switch(getLexer().getKind()) {
+ default: return MatchOperand_NoMatch;
+ case AsmToken::Integer:
+ // The operand can be an integer value.
+ if (getParser().parseAbsoluteExpression(Imm16Val))
+ return MatchOperand_NoMatch;
+ if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
+ Error(S, "invalid immediate: only 16-bit values are legal");
+ // Do not return error code, but create an imm operand anyway and proceed
+ // to the next operand, if any. That avoids unneccessary error messages.
+ }
+ break;
+
+ case AsmToken::Identifier: {
+ OperandInfoTy HwReg(ID_UNKNOWN_);
+ int64_t Offset = OFFSET_DEFAULT_;
+ int64_t Width = WIDTH_M1_DEFAULT_ + 1;
+ if (parseHwregConstruct(HwReg, Offset, Width))
+ return MatchOperand_ParseFail;
+ if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
+ if (HwReg.IsSymbolic)
+ Error(S, "invalid symbolic name of hardware register");
+ else
+ Error(S, "invalid code of hardware register: only 6-bit values are legal");
+ }
+ if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
+ Error(S, "invalid bit offset: only 5-bit values are legal");
+ if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
+ Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
+ Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
+ }
+ break;
+ }
+ Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
+ return MatchOperand_Success;
+}
+
bool AMDGPUOperand::isSWaitCnt() const {
return isImm();
}
+bool AMDGPUOperand::isHwreg() const {
+ return isImmTy(ImmTyHwreg);
+}
+
+bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
+ using namespace llvm::AMDGPU::SendMsg;
+
+ if (Parser.getTok().getString() != "sendmsg")
+ return true;
+ Parser.Lex();
+
+ if (getLexer().isNot(AsmToken::LParen))
+ return true;
+ Parser.Lex();
+
+ if (getLexer().is(AsmToken::Identifier)) {
+ Msg.IsSymbolic = true;
+ Msg.Id = ID_UNKNOWN_;
+ const std::string tok = Parser.getTok().getString();
+ for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
+ switch(i) {
+ default: continue; // Omit gaps.
+ case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
+ }
+ if (tok == IdSymbolic[i]) {
+ Msg.Id = i;
+ break;
+ }
+ }
+ Parser.Lex();
+ } else {
+ Msg.IsSymbolic = false;
+ if (getLexer().isNot(AsmToken::Integer))
+ return true;
+ if (getParser().parseAbsoluteExpression(Msg.Id))
+ return true;
+ if (getLexer().is(AsmToken::Integer))
+ if (getParser().parseAbsoluteExpression(Msg.Id))
+ Msg.Id = ID_UNKNOWN_;
+ }
+ if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
+ return false;
+
+ if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
+ if (getLexer().isNot(AsmToken::RParen))
+ return true;
+ Parser.Lex();
+ return false;
+ }
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return true;
+ Parser.Lex();
+
+ assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
+ Operation.Id = ID_UNKNOWN_;
+ if (getLexer().is(AsmToken::Identifier)) {
+ Operation.IsSymbolic = true;
+ const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
+ const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
+ const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
+ const StringRef Tok = Parser.getTok().getString();
+ for (int i = F; i < L; ++i) {
+ if (Tok == S[i]) {
+ Operation.Id = i;
+ break;
+ }
+ }
+ Parser.Lex();
+ } else {
+ Operation.IsSymbolic = false;
+ if (getLexer().isNot(AsmToken::Integer))
+ return true;
+ if (getParser().parseAbsoluteExpression(Operation.Id))
+ return true;
+ }
+
+ if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
+ // Stream id is optional.
+ if (getLexer().is(AsmToken::RParen)) {
+ Parser.Lex();
+ return false;
+ }
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return true;
+ Parser.Lex();
+
+ if (getLexer().isNot(AsmToken::Integer))
+ return true;
+ if (getParser().parseAbsoluteExpression(StreamId))
+ return true;
+ }
+
+ if (getLexer().isNot(AsmToken::RParen))
+ return true;
+ Parser.Lex();
+ return false;
+}
+
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
+ using namespace llvm::AMDGPU::SendMsg;
+
+ int64_t Imm16Val = 0;
+ SMLoc S = Parser.getTok().getLoc();
+
+ switch(getLexer().getKind()) {
+ default:
+ return MatchOperand_NoMatch;
+ case AsmToken::Integer:
+ // The operand can be an integer value.
+ if (getParser().parseAbsoluteExpression(Imm16Val))
+ return MatchOperand_NoMatch;
+ if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
+ Error(S, "invalid immediate: only 16-bit values are legal");
+ // Do not return error code, but create an imm operand anyway and proceed
+ // to the next operand, if any. That avoids unneccessary error messages.
+ }
+ break;
+ case AsmToken::Identifier: {
+ OperandInfoTy Msg(ID_UNKNOWN_);
+ OperandInfoTy Operation(OP_UNKNOWN_);
+ int64_t StreamId = STREAM_ID_DEFAULT_;
+ if (parseSendMsgConstruct(Msg, Operation, StreamId))
+ return MatchOperand_ParseFail;
+ do {
+ // Validate and encode message ID.
+ if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
+ || Msg.Id == ID_SYSMSG)) {
+ if (Msg.IsSymbolic)
+ Error(S, "invalid/unsupported symbolic name of message");
+ else
+ Error(S, "invalid/unsupported code of message");
+ break;
+ }
+ Imm16Val = (Msg.Id << ID_SHIFT_);
+ // Validate and encode operation ID.
+ if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
+ if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
+ if (Operation.IsSymbolic)
+ Error(S, "invalid symbolic name of GS_OP");
+ else
+ Error(S, "invalid code of GS_OP: only 2-bit values are legal");
+ break;
+ }
+ if (Operation.Id == OP_GS_NOP
+ && Msg.Id != ID_GS_DONE) {
+ Error(S, "invalid GS_OP: NOP is for GS_DONE only");
+ break;
+ }
+ Imm16Val |= (Operation.Id << OP_SHIFT_);
+ }
+ if (Msg.Id == ID_SYSMSG) {
+ if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
+ if (Operation.IsSymbolic)
+ Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
+ else
+ Error(S, "invalid/unsupported code of SYSMSG_OP");
+ break;
+ }
+ Imm16Val |= (Operation.Id << OP_SHIFT_);
+ }
+ // Validate and encode stream ID.
+ if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
+ if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
+ Error(S, "invalid stream id: only 2-bit values are legal");
+ break;
+ }
+ Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
+ }
+ } while (0);
+ }
+ break;
+ }
+ Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
+ return MatchOperand_Success;
+}
+
+bool AMDGPUOperand::isSendMsg() const {
+ return isImmTy(ImmTySendMsg);
+}
+
//===----------------------------------------------------------------------===//
// sopp branch targets
//===----------------------------------------------------------------------===//
@@ -1587,33 +2108,26 @@ AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
}
//===----------------------------------------------------------------------===//
-// flat
+// mubuf
//===----------------------------------------------------------------------===//
-static const OptionalOperand FlatOptionalOps [] = {
- {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
- {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
- {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
-};
-
-static const OptionalOperand FlatAtomicOptionalOps [] = {
- {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
- {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
-};
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
+}
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
- return parseOptionalOps(FlatOptionalOps, Operands);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
}
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
- return parseOptionalOps(FlatAtomicOptionalOps, Operands);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
}
-void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
- const OperandVector &Operands) {
- std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
+ const OperandVector &Operands,
+ bool IsAtomic, bool IsAtomicReturn) {
+ OptionalImmIndexMap OptionalIdx;
+ assert(IsAtomicReturn ? IsAtomic : true);
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1624,129 +2138,128 @@ void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
continue;
}
- // Handle 'glc' token which is sometimes hard-coded into the
+ // Handle the case where soffset is an immediate
+ if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
+ Op.addImmOperands(Inst, 1);
+ continue;
+ }
+
+ // Handle tokens like 'offen' which are sometimes hard-coded into the
// asm string. There are no MCInst operands for these.
- if (Op.isToken())
+ if (Op.isToken()) {
continue;
+ }
+ assert(Op.isImm());
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = i;
-
}
- // flat atomic instructions don't have a glc argument.
- if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
- unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
- ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
+ // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
+ if (IsAtomicReturn) {
+ MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
+ Inst.insert(I, *I);
}
- unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
- unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
-
- ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
+ if (!IsAtomic) { // glc is hard-coded.
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
+ }
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
}
//===----------------------------------------------------------------------===//
-// mubuf
+// mimg
//===----------------------------------------------------------------------===//
-static const OptionalOperand MubufOptionalOps [] = {
- {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
- {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
- {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
- {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
-};
+void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
+ unsigned I = 1;
+ const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
+ for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
+ }
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
- return parseOptionalOps(MubufOptionalOps, Operands);
-}
+ OptionalImmIndexMap OptionalIdx;
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
- return parseIntWithPrefix("offset", Operands);
-}
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
- return parseNamedBit("glc", Operands);
-}
+ // Add the register arguments
+ if (Op.isRegOrImm()) {
+ Op.addRegOrImmOperands(Inst, 1);
+ continue;
+ } else if (Op.isImmModifier()) {
+ OptionalIdx[Op.getImmTy()] = I;
+ } else {
+ assert(false);
+ }
+ }
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
- return parseNamedBit("slc", Operands);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
}
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
- return parseNamedBit("tfe", Operands);
-}
+void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
+ unsigned I = 1;
+ const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
+ for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
+ }
-bool AMDGPUOperand::isMubufOffset() const {
- return isImm() && isUInt<12>(getImm());
-}
+ // Add src, same as dst
+ ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
-void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
- const OperandVector &Operands) {
- std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+ OptionalImmIndexMap OptionalIdx;
- for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
// Add the register arguments
- if (Op.isReg()) {
- Op.addRegOperands(Inst, 1);
- continue;
- }
-
- // Handle the case where soffset is an immediate
- if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
- Op.addImmOperands(Inst, 1);
- continue;
- }
-
- // Handle tokens like 'offen' which are sometimes hard-coded into the
- // asm string. There are no MCInst operands for these.
- if (Op.isToken()) {
+ if (Op.isRegOrImm()) {
+ Op.addRegOrImmOperands(Inst, 1);
continue;
+ } else if (Op.isImmModifier()) {
+ OptionalIdx[Op.getImmTy()] = I;
+ } else {
+ assert(false);
}
- assert(Op.isImm());
-
- // Handle optional arguments
- OptionalIdx[Op.getImmTy()] = i;
}
- assert(OptionalIdx.size() == 4);
-
- unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
- unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
- unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
- unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
+}
- ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
}
-//===----------------------------------------------------------------------===//
-// mimg
-//===----------------------------------------------------------------------===//
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
+}
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
- return parseIntWithPrefix("dmask", Operands);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
}
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
- return parseNamedBit("unorm", Operands);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
}
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseR128(OperandVector &Operands) {
- return parseNamedBit("r128", Operands);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
}
//===----------------------------------------------------------------------===//
@@ -1766,6 +2279,14 @@ bool AMDGPUOperand::isSMRDLiteralOffset() const {
return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
}
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
+}
+
//===----------------------------------------------------------------------===//
// vop3
//===----------------------------------------------------------------------===//
@@ -1792,91 +2313,435 @@ static bool ConvertOmodDiv(int64_t &Div) {
return false;
}
-static const OptionalOperand VOP3OptionalOps [] = {
- {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
- {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
- {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
+static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
+ if (BoundCtrl == 0) {
+ BoundCtrl = 1;
+ return true;
+ } else if (BoundCtrl == -1) {
+ BoundCtrl = 0;
+ return true;
+ }
+ return false;
+}
+
+// Note: the order in this table matches the order of operands in AsmString.
+static const OptionalOperand AMDGPUOptionalOperandTable[] = {
+ {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
+ {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
+ {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
+ {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
+ {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
+ {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
+ {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
+ {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
+ {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
+ {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
+ {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
+ {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
+ {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
+ {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
+ {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
+ {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
+ {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
+ {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
+ {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
+ {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
+ {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
+ {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
+ {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
+ {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
};
-static bool isVOP3(OperandVector &Operands) {
- if (operandsHaveModifiers(Operands))
- return true;
+AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
+ OperandMatchResultTy res;
+ for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
+ // try to parse any optional operand here
+ if (Op.IsBit) {
+ res = parseNamedBit(Op.Name, Operands, Op.Type);
+ } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
+ res = parseOModOperand(Operands);
+ } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
+ Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
+ Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
+ res = parseSDWASel(Operands, Op.Name, Op.Type);
+ } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
+ res = parseSDWADstUnused(Operands);
+ } else {
+ res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
+ }
+ if (res != MatchOperand_NoMatch) {
+ return res;
+ }
+ }
+ return MatchOperand_NoMatch;
+}
- AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
+AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
+{
+ StringRef Name = Parser.getTok().getString();
+ if (Name == "mul") {
+ return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
+ } else if (Name == "div") {
+ return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
+ } else {
+ return MatchOperand_NoMatch;
+ }
+}
- if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
- return true;
+void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
+ unsigned I = 1;
+ const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
+ for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
+ }
+ for (unsigned E = Operands.size(); I != E; ++I)
+ ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
+}
- if (Operands.size() >= 5)
- return true;
+void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
+ uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
+ if (TSFlags & SIInstrFlags::VOP3) {
+ cvtVOP3(Inst, Operands);
+ } else {
+ cvtId(Inst, Operands);
+ }
+}
- if (Operands.size() > 3) {
- AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
- if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
- Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
- return true;
+void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
+ OptionalImmIndexMap OptionalIdx;
+ unsigned I = 1;
+ const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
+ for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
+ }
+
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
+ if (Op.isRegOrImmWithInputMods()) {
+ // only fp modifiers allowed in VOP3
+ Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
+ } else if (Op.isImm()) {
+ OptionalIdx[Op.getImmTy()] = I;
+ } else {
+ assert(false);
+ }
+ }
+
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
+}
+
+//===----------------------------------------------------------------------===//
+// dpp
+//===----------------------------------------------------------------------===//
+
+bool AMDGPUOperand::isDPPCtrl() const {
+ bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
+ if (result) {
+ int64_t Imm = getImm();
+ return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
+ ((Imm >= 0x101) && (Imm <= 0x10f)) ||
+ ((Imm >= 0x111) && (Imm <= 0x11f)) ||
+ ((Imm >= 0x121) && (Imm <= 0x12f)) ||
+ (Imm == 0x130) ||
+ (Imm == 0x134) ||
+ (Imm == 0x138) ||
+ (Imm == 0x13c) ||
+ (Imm == 0x140) ||
+ (Imm == 0x141) ||
+ (Imm == 0x142) ||
+ (Imm == 0x143);
}
return false;
}
AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
-
- // The value returned by this function may change after parsing
- // an operand so store the original value here.
- bool HasModifiers = operandsHaveModifiers(Operands);
-
- bool IsVOP3 = isVOP3(Operands);
- if (HasModifiers || IsVOP3 ||
- getLexer().isNot(AsmToken::EndOfStatement) ||
- getForcedEncodingSize() == 64) {
-
- AMDGPUAsmParser::OperandMatchResultTy Res =
- parseOptionalOps(VOP3OptionalOps, Operands);
-
- if (!HasModifiers && Res == MatchOperand_Success) {
- // We have added a modifier operation, so we need to make sure all
- // previous register operands have modifiers
- for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
- if (Op.isReg())
- Op.setModifiers(0);
+AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ StringRef Prefix;
+ int64_t Int;
+
+ if (getLexer().getKind() == AsmToken::Identifier) {
+ Prefix = Parser.getTok().getString();
+ } else {
+ return MatchOperand_NoMatch;
+ }
+
+ if (Prefix == "row_mirror") {
+ Int = 0x140;
+ } else if (Prefix == "row_half_mirror") {
+ Int = 0x141;
+ } else {
+ // Check to prevent parseDPPCtrlOps from eating invalid tokens
+ if (Prefix != "quad_perm"
+ && Prefix != "row_shl"
+ && Prefix != "row_shr"
+ && Prefix != "row_ror"
+ && Prefix != "wave_shl"
+ && Prefix != "wave_rol"
+ && Prefix != "wave_shr"
+ && Prefix != "wave_ror"
+ && Prefix != "row_bcast") {
+ return MatchOperand_NoMatch;
+ }
+
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Colon))
+ return MatchOperand_ParseFail;
+
+ if (Prefix == "quad_perm") {
+ // quad_perm:[%d,%d,%d,%d]
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::LBrac))
+ return MatchOperand_ParseFail;
+
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Integer))
+ return MatchOperand_ParseFail;
+ Int = getLexer().getTok().getIntVal();
+
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Comma))
+ return MatchOperand_ParseFail;
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Integer))
+ return MatchOperand_ParseFail;
+ Int += (getLexer().getTok().getIntVal() << 2);
+
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Comma))
+ return MatchOperand_ParseFail;
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Integer))
+ return MatchOperand_ParseFail;
+ Int += (getLexer().getTok().getIntVal() << 4);
+
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Comma))
+ return MatchOperand_ParseFail;
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Integer))
+ return MatchOperand_ParseFail;
+ Int += (getLexer().getTok().getIntVal() << 6);
+
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::RBrac))
+ return MatchOperand_ParseFail;
+
+ } else {
+ // sel:%d
+ Parser.Lex();
+ if (getLexer().isNot(AsmToken::Integer))
+ return MatchOperand_ParseFail;
+ Int = getLexer().getTok().getIntVal();
+
+ if (Prefix == "row_shl") {
+ Int |= 0x100;
+ } else if (Prefix == "row_shr") {
+ Int |= 0x110;
+ } else if (Prefix == "row_ror") {
+ Int |= 0x120;
+ } else if (Prefix == "wave_shl") {
+ Int = 0x130;
+ } else if (Prefix == "wave_rol") {
+ Int = 0x134;
+ } else if (Prefix == "wave_shr") {
+ Int = 0x138;
+ } else if (Prefix == "wave_ror") {
+ Int = 0x13C;
+ } else if (Prefix == "row_bcast") {
+ if (Int == 15) {
+ Int = 0x142;
+ } else if (Int == 31) {
+ Int = 0x143;
+ } else {
+ return MatchOperand_ParseFail;
+ }
+ } else {
+ return MatchOperand_ParseFail;
}
}
- return Res;
}
- return MatchOperand_NoMatch;
+ Parser.Lex(); // eat last token
+
+ Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
+ AMDGPUOperand::ImmTyDppCtrl));
+ return MatchOperand_Success;
}
-void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
+ return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
+ return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
+}
+
+void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
+ OptionalImmIndexMap OptionalIdx;
- unsigned i = 1;
+ unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
- if (Desc.getNumDefs() > 0) {
- ((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1);
+ for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
- std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
+ // Add the register arguments
+ if (Op.isRegOrImmWithInputMods()) {
+ // Only float modifiers supported in DPP
+ Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
+ } else if (Op.isDPPCtrl()) {
+ Op.addImmOperands(Inst, 1);
+ } else if (Op.isImm()) {
+ // Handle optional arguments
+ OptionalIdx[Op.getImmTy()] = I;
+ } else {
+ llvm_unreachable("Invalid operand type");
+ }
+ }
- if (operandsHaveModifiers(Operands)) {
- for (unsigned e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
+}
- if (Op.isRegWithInputMods()) {
- ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
- continue;
- }
- OptionalIdx[Op.getImmTy()] = i;
- }
+//===----------------------------------------------------------------------===//
+// sdwa
+//===----------------------------------------------------------------------===//
- unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
- unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
+ AMDGPUOperand::ImmTy Type) {
+ SMLoc S = Parser.getTok().getLoc();
+ StringRef Value;
+ AMDGPUAsmParser::OperandMatchResultTy res;
- ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
- ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
- } else {
- for (unsigned e = Operands.size(); i != e; ++i)
- ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
+ res = parseStringWithPrefix(Prefix, Value);
+ if (res != MatchOperand_Success) {
+ return res;
+ }
+
+ int64_t Int;
+ Int = StringSwitch<int64_t>(Value)
+ .Case("BYTE_0", 0)
+ .Case("BYTE_1", 1)
+ .Case("BYTE_2", 2)
+ .Case("BYTE_3", 3)
+ .Case("WORD_0", 4)
+ .Case("WORD_1", 5)
+ .Case("DWORD", 6)
+ .Default(0xffffffff);
+ Parser.Lex(); // eat last token
+
+ if (Int == 0xffffffff) {
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(AMDGPUOperand::CreateImm(Int, S, Type));
+ return MatchOperand_Success;
+}
+
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
+ SMLoc S = Parser.getTok().getLoc();
+ StringRef Value;
+ AMDGPUAsmParser::OperandMatchResultTy res;
+
+ res = parseStringWithPrefix("dst_unused", Value);
+ if (res != MatchOperand_Success) {
+ return res;
+ }
+
+ int64_t Int;
+ Int = StringSwitch<int64_t>(Value)
+ .Case("UNUSED_PAD", 0)
+ .Case("UNUSED_SEXT", 1)
+ .Case("UNUSED_PRESERVE", 2)
+ .Default(0xffffffff);
+ Parser.Lex(); // eat last token
+
+ if (Int == 0xffffffff) {
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
+ AMDGPUOperand::ImmTySdwaDstUnused));
+ return MatchOperand_Success;
+}
+
+void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
+ cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
+}
+
+void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
+ cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
+}
+
+void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
+ cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
+}
+
+void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
+ uint64_t BasicInstType) {
+ OptionalImmIndexMap OptionalIdx;
+
+ unsigned I = 1;
+ const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
+ for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
+ ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
+ }
+
+ for (unsigned E = Operands.size(); I != E; ++I) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
+ // Add the register arguments
+ if (BasicInstType == SIInstrFlags::VOPC &&
+ Op.isReg() &&
+ Op.Reg.RegNo == AMDGPU::VCC) {
+ // VOPC sdwa use "vcc" token as dst. Skip it.
+ continue;
+ } else if (Op.isRegOrImmWithInputMods()) {
+ Op.addRegOrImmWithInputModsOperands(Inst, 2);
+ } else if (Op.isImm()) {
+ // Handle optional arguments
+ OptionalIdx[Op.getImmTy()] = I;
+ } else {
+ llvm_unreachable("Invalid operand type");
+ }
+ }
+
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
+
+ if (Inst.getOpcode() == AMDGPU::V_NOP_sdwa) {
+ // V_NOP_sdwa has no optional sdwa arguments
+ return;
+ }
+ switch (BasicInstType) {
+ case SIInstrFlags::VOP1: {
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
+ break;
+ }
+ case SIInstrFlags::VOP2: {
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
+ break;
+ }
+ case SIInstrFlags::VOPC: {
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
+ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
}
}
@@ -1890,3 +2755,37 @@ extern "C" void LLVMInitializeAMDGPUAsmParser() {
#define GET_MATCHER_IMPLEMENTATION
#include "AMDGPUGenAsmMatcher.inc"
+
+// This fuction should be defined after auto-generated include so that we have
+// MatchClassKind enum defined
+unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) {
+ // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
+ // But MatchInstructionImpl() expects to meet token and fails to validate
+ // operand. This method checks if we are given immediate operand but expect to
+ // get corresponding token.
+ AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
+ switch (Kind) {
+ case MCK_addr64:
+ return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
+ case MCK_gds:
+ return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
+ case MCK_glc:
+ return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
+ case MCK_idxen:
+ return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
+ case MCK_offen:
+ return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
+ case MCK_SSrc32:
+ // When operands have expression values, they will return true for isToken,
+ // because it is not possible to distinguish between a token and an
+ // expression at parse time. MatchInstructionImpl() will always try to
+ // match an operand as a token, when isToken returns true, and when the
+ // name of the expression is not a valid token, the match will fail,
+ // so we need to handle it here.
+ return Operand.isSSrc32() ? Match_Success : Match_InvalidOperand;
+ case MCK_SoppBrTarget:
+ return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
+ default: return Match_InvalidOperand;
+ }
+}
diff --git a/lib/Target/AMDGPU/AsmParser/CMakeLists.txt b/lib/Target/AMDGPU/AsmParser/CMakeLists.txt
index 21ddc4eb83d2..70be7bb6eb36 100644
--- a/lib/Target/AMDGPU/AsmParser/CMakeLists.txt
+++ b/lib/Target/AMDGPU/AsmParser/CMakeLists.txt
@@ -1,3 +1,5 @@
add_llvm_library(LLVMAMDGPUAsmParser
AMDGPUAsmParser.cpp
)
+
+add_dependencies(LLVMAMDGPUAsmParser LLVMAMDGPUUtils)
diff --git a/lib/Target/AMDGPU/AsmParser/Makefile b/lib/Target/AMDGPU/AsmParser/Makefile
deleted file mode 100644
index 5ad219028036..000000000000
--- a/lib/Target/AMDGPU/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/AMDGPU/AsmParser/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAMDGPUAsmParser
-
-# Hack: we need to include 'main' AMDGPU target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/CIInstructions.td b/lib/Target/AMDGPU/CIInstructions.td
index c543814cae0d..f9a9f79126bd 100644
--- a/lib/Target/AMDGPU/CIInstructions.td
+++ b/lib/Target/AMDGPU/CIInstructions.td
@@ -25,14 +25,6 @@
// BUFFER_LOAD_DWORDX3
// BUFFER_STORE_DWORDX3
-
-def isCIVI : Predicate <
- "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS || "
- "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS"
->, AssemblerPredicate<"FeatureCIInsts">;
-
-def HasFlatAddressSpace : Predicate<"Subtarget->hasFlatAddressSpace()">;
-
//===----------------------------------------------------------------------===//
// VOP1 Instructions
//===----------------------------------------------------------------------===//
@@ -108,9 +100,11 @@ defm S_DCACHE_INV_VOL : SMRD_Inval <smrd<0x1d, 0x22>,
// MUBUF Instructions
//===----------------------------------------------------------------------===//
+let DisableSIDecoder = 1 in {
defm BUFFER_WBINVL1_VOL : MUBUF_Invalidate <mubuf<0x70, 0x3f>,
"buffer_wbinvl1_vol", int_amdgcn_buffer_wbinvl1_vol
>;
+}
//===----------------------------------------------------------------------===//
// Flat Instructions
@@ -159,129 +153,114 @@ defm FLAT_STORE_DWORDX3 : FLAT_Store_Helper <
flat<0x1f, 0x1e>, "flat_store_dwordx3", VReg_96
>;
defm FLAT_ATOMIC_SWAP : FLAT_ATOMIC <
- flat<0x30, 0x40>, "flat_atomic_swap", VGPR_32
+ flat<0x30, 0x40>, "flat_atomic_swap", VGPR_32, i32, atomic_swap_flat
>;
defm FLAT_ATOMIC_CMPSWAP : FLAT_ATOMIC <
- flat<0x31, 0x41>, "flat_atomic_cmpswap", VGPR_32, VReg_64
+ flat<0x31, 0x41>, "flat_atomic_cmpswap", VGPR_32, i32,
+ atomic_cmp_swap_flat, v2i32, VReg_64
>;
defm FLAT_ATOMIC_ADD : FLAT_ATOMIC <
- flat<0x32, 0x42>, "flat_atomic_add", VGPR_32
+ flat<0x32, 0x42>, "flat_atomic_add", VGPR_32, i32, atomic_add_flat
>;
defm FLAT_ATOMIC_SUB : FLAT_ATOMIC <
- flat<0x33, 0x43>, "flat_atomic_sub", VGPR_32
+ flat<0x33, 0x43>, "flat_atomic_sub", VGPR_32, i32, atomic_sub_flat
>;
defm FLAT_ATOMIC_SMIN : FLAT_ATOMIC <
- flat<0x35, 0x44>, "flat_atomic_smin", VGPR_32
+ flat<0x35, 0x44>, "flat_atomic_smin", VGPR_32, i32, atomic_min_flat
>;
defm FLAT_ATOMIC_UMIN : FLAT_ATOMIC <
- flat<0x36, 0x45>, "flat_atomic_umin", VGPR_32
+ flat<0x36, 0x45>, "flat_atomic_umin", VGPR_32, i32, atomic_umin_flat
>;
defm FLAT_ATOMIC_SMAX : FLAT_ATOMIC <
- flat<0x37, 0x46>, "flat_atomic_smax", VGPR_32
+ flat<0x37, 0x46>, "flat_atomic_smax", VGPR_32, i32, atomic_max_flat
>;
defm FLAT_ATOMIC_UMAX : FLAT_ATOMIC <
- flat<0x38, 0x47>, "flat_atomic_umax", VGPR_32
+ flat<0x38, 0x47>, "flat_atomic_umax", VGPR_32, i32, atomic_umax_flat
>;
defm FLAT_ATOMIC_AND : FLAT_ATOMIC <
- flat<0x39, 0x48>, "flat_atomic_and", VGPR_32
+ flat<0x39, 0x48>, "flat_atomic_and", VGPR_32, i32, atomic_and_flat
>;
defm FLAT_ATOMIC_OR : FLAT_ATOMIC <
- flat<0x3a, 0x49>, "flat_atomic_or", VGPR_32
+ flat<0x3a, 0x49>, "flat_atomic_or", VGPR_32, i32, atomic_or_flat
>;
defm FLAT_ATOMIC_XOR : FLAT_ATOMIC <
- flat<0x3b, 0x4a>, "flat_atomic_xor", VGPR_32
+ flat<0x3b, 0x4a>, "flat_atomic_xor", VGPR_32, i32, atomic_xor_flat
>;
defm FLAT_ATOMIC_INC : FLAT_ATOMIC <
- flat<0x3c, 0x4b>, "flat_atomic_inc", VGPR_32
+ flat<0x3c, 0x4b>, "flat_atomic_inc", VGPR_32, i32, atomic_inc_flat
>;
defm FLAT_ATOMIC_DEC : FLAT_ATOMIC <
- flat<0x3d, 0x4c>, "flat_atomic_dec", VGPR_32
+ flat<0x3d, 0x4c>, "flat_atomic_dec", VGPR_32, i32, atomic_dec_flat
>;
defm FLAT_ATOMIC_SWAP_X2 : FLAT_ATOMIC <
- flat<0x50, 0x60>, "flat_atomic_swap_x2", VReg_64
+ flat<0x50, 0x60>, "flat_atomic_swap_x2", VReg_64, i64, atomic_swap_flat
>;
defm FLAT_ATOMIC_CMPSWAP_X2 : FLAT_ATOMIC <
- flat<0x51, 0x61>, "flat_atomic_cmpswap_x2", VReg_64, VReg_128
+ flat<0x51, 0x61>, "flat_atomic_cmpswap_x2", VReg_64, i64,
+ atomic_cmp_swap_flat, v2i64, VReg_128
>;
defm FLAT_ATOMIC_ADD_X2 : FLAT_ATOMIC <
- flat<0x52, 0x62>, "flat_atomic_add_x2", VReg_64
+ flat<0x52, 0x62>, "flat_atomic_add_x2", VReg_64, i64, atomic_add_flat
>;
defm FLAT_ATOMIC_SUB_X2 : FLAT_ATOMIC <
- flat<0x53, 0x63>, "flat_atomic_sub_x2", VReg_64
+ flat<0x53, 0x63>, "flat_atomic_sub_x2", VReg_64, i64, atomic_sub_flat
>;
defm FLAT_ATOMIC_SMIN_X2 : FLAT_ATOMIC <
- flat<0x55, 0x64>, "flat_atomic_smin_x2", VReg_64
+ flat<0x55, 0x64>, "flat_atomic_smin_x2", VReg_64, i64, atomic_min_flat
>;
defm FLAT_ATOMIC_UMIN_X2 : FLAT_ATOMIC <
- flat<0x56, 0x65>, "flat_atomic_umin_x2", VReg_64
+ flat<0x56, 0x65>, "flat_atomic_umin_x2", VReg_64, i64, atomic_umin_flat
>;
defm FLAT_ATOMIC_SMAX_X2 : FLAT_ATOMIC <
- flat<0x57, 0x66>, "flat_atomic_smax_x2", VReg_64
+ flat<0x57, 0x66>, "flat_atomic_smax_x2", VReg_64, i64, atomic_max_flat
>;
defm FLAT_ATOMIC_UMAX_X2 : FLAT_ATOMIC <
- flat<0x58, 0x67>, "flat_atomic_umax_x2", VReg_64
+ flat<0x58, 0x67>, "flat_atomic_umax_x2", VReg_64, i64, atomic_umax_flat
>;
defm FLAT_ATOMIC_AND_X2 : FLAT_ATOMIC <
- flat<0x59, 0x68>, "flat_atomic_and_x2", VReg_64
+ flat<0x59, 0x68>, "flat_atomic_and_x2", VReg_64, i64, atomic_and_flat
>;
defm FLAT_ATOMIC_OR_X2 : FLAT_ATOMIC <
- flat<0x5a, 0x69>, "flat_atomic_or_x2", VReg_64
+ flat<0x5a, 0x69>, "flat_atomic_or_x2", VReg_64, i64, atomic_or_flat
>;
defm FLAT_ATOMIC_XOR_X2 : FLAT_ATOMIC <
- flat<0x5b, 0x6a>, "flat_atomic_xor_x2", VReg_64
+ flat<0x5b, 0x6a>, "flat_atomic_xor_x2", VReg_64, i64, atomic_xor_flat
>;
defm FLAT_ATOMIC_INC_X2 : FLAT_ATOMIC <
- flat<0x5c, 0x6b>, "flat_atomic_inc_x2", VReg_64
+ flat<0x5c, 0x6b>, "flat_atomic_inc_x2", VReg_64, i64, atomic_inc_flat
>;
defm FLAT_ATOMIC_DEC_X2 : FLAT_ATOMIC <
- flat<0x5d, 0x6c>, "flat_atomic_dec_x2", VReg_64
+ flat<0x5d, 0x6c>, "flat_atomic_dec_x2", VReg_64, i64, atomic_dec_flat
>;
} // End SubtargetPredicate = isCIVI
// CI Only flat instructions
-let SubtargetPredicate = isCI, VIAssemblerPredicate = DisableInst in {
+let SubtargetPredicate = isCI, VIAssemblerPredicate = DisableInst, DisableVIDecoder = 1 in {
defm FLAT_ATOMIC_FCMPSWAP : FLAT_ATOMIC <
- flat<0x3e>, "flat_atomic_fcmpswap", VGPR_32, VReg_64
+ flat<0x3e>, "flat_atomic_fcmpswap", VGPR_32, f32,
+ null_frag, v2f32, VReg_64
>;
defm FLAT_ATOMIC_FMIN : FLAT_ATOMIC <
- flat<0x3f>, "flat_atomic_fmin", VGPR_32
+ flat<0x3f>, "flat_atomic_fmin", VGPR_32, f32
>;
defm FLAT_ATOMIC_FMAX : FLAT_ATOMIC <
- flat<0x40>, "flat_atomic_fmax", VGPR_32
+ flat<0x40>, "flat_atomic_fmax", VGPR_32, f32
>;
defm FLAT_ATOMIC_FCMPSWAP_X2 : FLAT_ATOMIC <
- flat<0x5e>, "flat_atomic_fcmpswap_x2", VReg_64, VReg_128
+ flat<0x5e>, "flat_atomic_fcmpswap_x2", VReg_64, f64,
+ null_frag, v2f64, VReg_128
>;
defm FLAT_ATOMIC_FMIN_X2 : FLAT_ATOMIC <
- flat<0x5f>, "flat_atomic_fmin_x2", VReg_64
+ flat<0x5f>, "flat_atomic_fmin_x2", VReg_64, f64
>;
defm FLAT_ATOMIC_FMAX_X2 : FLAT_ATOMIC <
- flat<0x60>, "flat_atomic_fmax_x2", VReg_64
+ flat<0x60>, "flat_atomic_fmax_x2", VReg_64, f64
>;
-} // End let SubtargetPredicate = isCI, VIAssemblerPredicate = DisableInst
-
-let Predicates = [isCI] in {
-
-// Convert (x - floor(x)) to fract(x)
-def : Pat <
- (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
- (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
- (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
->;
-
-// Convert (x + (-floor(x))) to fract(x)
-def : Pat <
- (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
- (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
- (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
->;
-
-} // End Predicates = [isCI]
-
+} // End SubtargetPredicate = isCI, VIAssemblerPredicate = DisableInst, DisableVIDecoder = 1
//===----------------------------------------------------------------------===//
// Flat Patterns
@@ -289,12 +268,17 @@ def : Pat <
let Predicates = [isCIVI] in {
-// Patterns for global loads with no offset
+// Patterns for global loads with no offset.
class FlatLoadPat <FLAT inst, SDPatternOperator node, ValueType vt> : Pat <
(vt (node i64:$addr)),
(inst $addr, 0, 0, 0)
>;
+class FlatLoadAtomicPat <FLAT inst, SDPatternOperator node, ValueType vt> : Pat <
+ (vt (node i64:$addr)),
+ (inst $addr, 1, 0, 0)
+>;
+
def : FlatLoadPat <FLAT_LOAD_UBYTE, flat_az_extloadi8, i32>;
def : FlatLoadPat <FLAT_LOAD_SBYTE, flat_sextloadi8, i32>;
def : FlatLoadPat <FLAT_LOAD_USHORT, flat_az_extloadi16, i32>;
@@ -303,9 +287,20 @@ def : FlatLoadPat <FLAT_LOAD_DWORD, flat_load, i32>;
def : FlatLoadPat <FLAT_LOAD_DWORDX2, flat_load, v2i32>;
def : FlatLoadPat <FLAT_LOAD_DWORDX4, flat_load, v4i32>;
+def : FlatLoadAtomicPat <FLAT_LOAD_DWORD, atomic_flat_load, i32>;
+def : FlatLoadAtomicPat <FLAT_LOAD_DWORDX2, atomic_flat_load, i64>;
+
+
class FlatStorePat <FLAT inst, SDPatternOperator node, ValueType vt> : Pat <
(node vt:$data, i64:$addr),
- (inst $data, $addr, 0, 0, 0)
+ (inst $addr, $data, 0, 0, 0)
+>;
+
+class FlatStoreAtomicPat <FLAT inst, SDPatternOperator node, ValueType vt> : Pat <
+ // atomic store follows atomic binop convention so the address comes
+ // first.
+ (node i64:$addr, vt:$data),
+ (inst $addr, $data, 1, 0, 0)
>;
def : FlatStorePat <FLAT_STORE_BYTE, flat_truncstorei8, i32>;
@@ -314,20 +309,41 @@ def : FlatStorePat <FLAT_STORE_DWORD, flat_store, i32>;
def : FlatStorePat <FLAT_STORE_DWORDX2, flat_store, v2i32>;
def : FlatStorePat <FLAT_STORE_DWORDX4, flat_store, v4i32>;
-class FlatAtomicPat <FLAT inst, SDPatternOperator node, ValueType vt> : Pat <
- (vt (node i64:$addr, vt:$data)),
+def : FlatStoreAtomicPat <FLAT_STORE_DWORD, atomic_flat_store, i32>;
+def : FlatStoreAtomicPat <FLAT_STORE_DWORDX2, atomic_flat_store, i64>;
+
+class FlatAtomicPat <FLAT inst, SDPatternOperator node, ValueType vt,
+ ValueType data_vt = vt> : Pat <
+ (vt (node i64:$addr, data_vt:$data)),
(inst $addr, $data, 0, 0)
>;
def : FlatAtomicPat <FLAT_ATOMIC_ADD_RTN, atomic_add_global, i32>;
-def : FlatAtomicPat <FLAT_ATOMIC_AND_RTN, atomic_and_global, i32>;
def : FlatAtomicPat <FLAT_ATOMIC_SUB_RTN, atomic_sub_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_INC_RTN, atomic_inc_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_DEC_RTN, atomic_dec_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_AND_RTN, atomic_and_global, i32>;
def : FlatAtomicPat <FLAT_ATOMIC_SMAX_RTN, atomic_max_global, i32>;
def : FlatAtomicPat <FLAT_ATOMIC_UMAX_RTN, atomic_umax_global, i32>;
def : FlatAtomicPat <FLAT_ATOMIC_SMIN_RTN, atomic_min_global, i32>;
def : FlatAtomicPat <FLAT_ATOMIC_UMIN_RTN, atomic_umin_global, i32>;
def : FlatAtomicPat <FLAT_ATOMIC_OR_RTN, atomic_or_global, i32>;
def : FlatAtomicPat <FLAT_ATOMIC_SWAP_RTN, atomic_swap_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_CMPSWAP_RTN, atomic_cmp_swap_global, i32, v2i32>;
def : FlatAtomicPat <FLAT_ATOMIC_XOR_RTN, atomic_xor_global, i32>;
+def : FlatAtomicPat <FLAT_ATOMIC_ADD_X2_RTN, atomic_add_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_SUB_X2_RTN, atomic_sub_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_INC_X2_RTN, atomic_inc_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_DEC_X2_RTN, atomic_dec_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_AND_X2_RTN, atomic_and_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_SMAX_X2_RTN, atomic_max_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_UMAX_X2_RTN, atomic_umax_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_SMIN_X2_RTN, atomic_min_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_UMIN_X2_RTN, atomic_umin_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_OR_X2_RTN, atomic_or_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_SWAP_X2_RTN, atomic_swap_global, i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_CMPSWAP_X2_RTN, atomic_cmp_swap_global, i64, v2i64>;
+def : FlatAtomicPat <FLAT_ATOMIC_XOR_X2_RTN, atomic_xor_global, i64>;
+
} // End Predicates = [isCIVI]
diff --git a/lib/Target/AMDGPU/CMakeLists.txt b/lib/Target/AMDGPU/CMakeLists.txt
index b9ef0e821763..45825c9cc76a 100644
--- a/lib/Target/AMDGPU/CMakeLists.txt
+++ b/lib/Target/AMDGPU/CMakeLists.txt
@@ -10,15 +10,30 @@ tablegen(LLVM AMDGPUGenMCCodeEmitter.inc -gen-emitter)
tablegen(LLVM AMDGPUGenDFAPacketizer.inc -gen-dfa-packetizer)
tablegen(LLVM AMDGPUGenAsmWriter.inc -gen-asm-writer)
tablegen(LLVM AMDGPUGenAsmMatcher.inc -gen-asm-matcher)
+tablegen(LLVM AMDGPUGenDisassemblerTables.inc -gen-disassembler)
add_public_tablegen_target(AMDGPUCommonTableGen)
+# List of all GlobalISel files.
+set(GLOBAL_ISEL_FILES
+ AMDGPUCallLowering.cpp
+ )
+
+# Add GlobalISel files to the dependencies if the user wants to build it.
+if(LLVM_BUILD_GLOBAL_ISEL)
+ set(GLOBAL_ISEL_BUILD_FILES ${GLOBAL_ISEL_FILES})
+else()
+ set(GLOBAL_ISEL_BUILD_FILES"")
+ set(LLVM_OPTIONAL_SOURCES LLVMGlobalISel ${GLOBAL_ISEL_FILES})
+endif()
+
+
add_llvm_target(AMDGPUCodeGen
AMDILCFGStructurizer.cpp
AMDGPUAlwaysInlinePass.cpp
AMDGPUAnnotateKernelFeatures.cpp
AMDGPUAnnotateUniformValues.cpp
AMDGPUAsmPrinter.cpp
- AMDGPUDiagnosticInfoUnsupported.cpp
+ AMDGPUCodeGenPrepare.cpp
AMDGPUFrameLowering.cpp
AMDGPUTargetObjectFile.cpp
AMDGPUIntrinsicInfo.cpp
@@ -33,10 +48,12 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUInstrInfo.cpp
AMDGPUPromoteAlloca.cpp
AMDGPURegisterInfo.cpp
+ GCNHazardRecognizer.cpp
R600ClauseMergePass.cpp
R600ControlFlowFinalizer.cpp
R600EmitClauseMarkers.cpp
R600ExpandSpecialInstrs.cpp
+ R600FrameLowering.cpp
R600InstrInfo.cpp
R600ISelLowering.cpp
R600MachineFunctionInfo.cpp
@@ -44,11 +61,10 @@ add_llvm_target(AMDGPUCodeGen
R600OptimizeVectorRegisters.cpp
R600Packetizer.cpp
R600RegisterInfo.cpp
- R600TextureIntrinsicsReplacer.cpp
SIAnnotateControlFlow.cpp
+ SIDebuggerInsertNops.cpp
SIFixControlFlowLiveIntervals.cpp
SIFixSGPRCopies.cpp
- SIFixSGPRLiveRanges.cpp
SIFoldOperands.cpp
SIFrameLowering.cpp
SIInsertWaits.cpp
@@ -62,10 +78,13 @@ add_llvm_target(AMDGPUCodeGen
SIRegisterInfo.cpp
SIShrinkInstructions.cpp
SITypeRewriter.cpp
+ SIWholeQuadMode.cpp
+ ${GLOBAL_ISEL_BUILD_FILES}
)
add_subdirectory(AsmParser)
add_subdirectory(InstPrinter)
+add_subdirectory(Disassembler)
add_subdirectory(TargetInfo)
add_subdirectory(MCTargetDesc)
add_subdirectory(Utils)
diff --git a/lib/Target/AMDGPU/CaymanInstructions.td b/lib/Target/AMDGPU/CaymanInstructions.td
index a6c3785c815b..98bc6e856ea2 100644
--- a/lib/Target/AMDGPU/CaymanInstructions.td
+++ b/lib/Target/AMDGPU/CaymanInstructions.td
@@ -51,7 +51,6 @@ def : RsqPat<RECIPSQRT_IEEE_cm, f32>;
def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
defm DIV_cm : DIV_Common<RECIP_IEEE_cm>;
-defm : Expand24UBitOps<MULLO_UINT_cm, ADD_INT>;
// RECIP_UINT emulation for Cayman
// The multiplication scales from [0,1] to the unsigned integer range
@@ -203,27 +202,53 @@ def VTX_READ_PARAM_128_cm : VTX_READ_128_cm <0,
//===----------------------------------------------------------------------===//
// 8-bit reads
-def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1,
- [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
+def VTX_READ_ID1_8_cm : VTX_READ_8_cm <1,
+ [(set i32:$dst_gpr, (vtx_id1_az_extloadi8 ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_GLOBAL_16_cm : VTX_READ_16_cm <1,
- [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
+// 16-bit reads
+def VTX_READ_ID1_16_cm : VTX_READ_16_cm <1,
+ [(set i32:$dst_gpr, (vtx_id1_az_extloadi16 ADDRVTX_READ:$src_gpr))]
>;
// 32-bit reads
-def VTX_READ_GLOBAL_32_cm : VTX_READ_32_cm <1,
- [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+def VTX_READ_ID1_32_cm : VTX_READ_32_cm <1,
+ [(set i32:$dst_gpr, (vtx_id1_load ADDRVTX_READ:$src_gpr))]
>;
// 64-bit reads
-def VTX_READ_GLOBAL_64_cm : VTX_READ_64_cm <1,
- [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+def VTX_READ_ID1_64_cm : VTX_READ_64_cm <1,
+ [(set v2i32:$dst_gpr, (vtx_id1_load ADDRVTX_READ:$src_gpr))]
>;
// 128-bit reads
-def VTX_READ_GLOBAL_128_cm : VTX_READ_128_cm <1,
- [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+def VTX_READ_ID1_128_cm : VTX_READ_128_cm <1,
+ [(set v4i32:$dst_gpr, (vtx_id1_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 8-bit reads
+def VTX_READ_ID2_8_cm : VTX_READ_8_cm <2,
+ [(set i32:$dst_gpr, (vtx_id2_az_extloadi8 ADDRVTX_READ:$src_gpr))]
+>;
+
+// 16-bit reads
+def VTX_READ_ID2_16_cm : VTX_READ_16_cm <2,
+ [(set i32:$dst_gpr, (vtx_id2_az_extloadi16 ADDRVTX_READ:$src_gpr))]
+>;
+
+// 32-bit reads
+def VTX_READ_ID2_32_cm : VTX_READ_32_cm <2,
+ [(set i32:$dst_gpr, (vtx_id2_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 64-bit reads
+def VTX_READ_ID2_64_cm : VTX_READ_64_cm <2,
+ [(set v2i32:$dst_gpr, (vtx_id2_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 128-bit reads
+def VTX_READ_ID2_128_cm : VTX_READ_128_cm <2,
+ [(set v4i32:$dst_gpr, (vtx_id2_load ADDRVTX_READ:$src_gpr))]
>;
} // End isCayman
diff --git a/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
new file mode 100644
index 000000000000..e11de855fe5f
--- /dev/null
+++ b/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -0,0 +1,437 @@
+//===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+///
+/// This file contains definition for AMDGPU ISA disassembler
+//
+//===----------------------------------------------------------------------===//
+
+// ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
+
+#include "AMDGPUDisassembler.h"
+#include "AMDGPU.h"
+#include "AMDGPURegisterInfo.h"
+#include "SIDefines.h"
+#include "Utils/AMDGPUBaseInfo.h"
+
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/TargetRegistry.h"
+
+
+using namespace llvm;
+
+#define DEBUG_TYPE "amdgpu-disassembler"
+
+typedef llvm::MCDisassembler::DecodeStatus DecodeStatus;
+
+
+inline static MCDisassembler::DecodeStatus
+addOperand(MCInst &Inst, const MCOperand& Opnd) {
+ Inst.addOperand(Opnd);
+ return Opnd.isValid() ?
+ MCDisassembler::Success :
+ MCDisassembler::SoftFail;
+}
+
+#define DECODE_OPERAND2(RegClass, DecName) \
+static DecodeStatus Decode##RegClass##RegisterClass(MCInst &Inst, \
+ unsigned Imm, \
+ uint64_t /*Addr*/, \
+ const void *Decoder) { \
+ auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
+ return addOperand(Inst, DAsm->decodeOperand_##DecName(Imm)); \
+}
+
+#define DECODE_OPERAND(RegClass) DECODE_OPERAND2(RegClass, RegClass)
+
+DECODE_OPERAND(VGPR_32)
+DECODE_OPERAND(VS_32)
+DECODE_OPERAND(VS_64)
+
+DECODE_OPERAND(VReg_64)
+DECODE_OPERAND(VReg_96)
+DECODE_OPERAND(VReg_128)
+
+DECODE_OPERAND(SReg_32)
+DECODE_OPERAND(SReg_32_XM0)
+DECODE_OPERAND(SReg_64)
+DECODE_OPERAND(SReg_128)
+DECODE_OPERAND(SReg_256)
+DECODE_OPERAND(SReg_512)
+
+#define GET_SUBTARGETINFO_ENUM
+#include "AMDGPUGenSubtargetInfo.inc"
+#undef GET_SUBTARGETINFO_ENUM
+
+#include "AMDGPUGenDisassemblerTables.inc"
+
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
+ assert(Bytes.size() >= sizeof(T));
+ const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
+ Bytes = Bytes.slice(sizeof(T));
+ return Res;
+}
+
+DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
+ MCInst &MI,
+ uint64_t Inst,
+ uint64_t Address) const {
+ assert(MI.getOpcode() == 0);
+ assert(MI.getNumOperands() == 0);
+ MCInst TmpInst;
+ const auto SavedBytes = Bytes;
+ if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
+ MI = TmpInst;
+ return MCDisassembler::Success;
+ }
+ Bytes = SavedBytes;
+ return MCDisassembler::Fail;
+}
+
+DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes_,
+ uint64_t Address,
+ raw_ostream &WS,
+ raw_ostream &CS) const {
+ CommentStream = &CS;
+
+ // ToDo: AMDGPUDisassembler supports only VI ISA.
+ assert(AMDGPU::isVI(STI) && "Can disassemble only VI ISA.");
+
+ const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
+ Bytes = Bytes_.slice(0, MaxInstBytesNum);
+
+ DecodeStatus Res = MCDisassembler::Fail;
+ do {
+ // ToDo: better to switch encoding length using some bit predicate
+ // but it is unknown yet, so try all we can
+
+ // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
+ // encodings
+ if (Bytes.size() >= 8) {
+ const uint64_t QW = eatBytes<uint64_t>(Bytes);
+ Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
+ if (Res) break;
+
+ Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
+ if (Res) break;
+ }
+
+ // Reinitialize Bytes as DPP64 could have eaten too much
+ Bytes = Bytes_.slice(0, MaxInstBytesNum);
+
+ // Try decode 32-bit instruction
+ if (Bytes.size() < 4) break;
+ const uint32_t DW = eatBytes<uint32_t>(Bytes);
+ Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
+ if (Res) break;
+
+ Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
+ if (Res) break;
+
+ if (Bytes.size() < 4) break;
+ const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
+ Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
+ if (Res) break;
+
+ Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
+ } while (false);
+
+ Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0;
+ return Res;
+}
+
+const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
+ return getContext().getRegisterInfo()->
+ getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
+}
+
+inline
+MCOperand AMDGPUDisassembler::errOperand(unsigned V,
+ const Twine& ErrMsg) const {
+ *CommentStream << "Error: " + ErrMsg;
+
+ // ToDo: add support for error operands to MCInst.h
+ // return MCOperand::createError(V);
+ return MCOperand();
+}
+
+inline
+MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
+ return MCOperand::createReg(RegId);
+}
+
+inline
+MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
+ unsigned Val) const {
+ const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
+ if (Val >= RegCl.getNumRegs())
+ return errOperand(Val, Twine(getRegClassName(RegClassID)) +
+ ": unknown register " + Twine(Val));
+ return createRegOperand(RegCl.getRegister(Val));
+}
+
+inline
+MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
+ unsigned Val) const {
+ // ToDo: SI/CI have 104 SGPRs, VI - 102
+ // Valery: here we accepting as much as we can, let assembler sort it out
+ int shift = 0;
+ switch (SRegClassID) {
+ case AMDGPU::SGPR_32RegClassID:
+ case AMDGPU::TTMP_32RegClassID:
+ break;
+ case AMDGPU::SGPR_64RegClassID:
+ case AMDGPU::TTMP_64RegClassID:
+ shift = 1;
+ break;
+ case AMDGPU::SGPR_128RegClassID:
+ case AMDGPU::TTMP_128RegClassID:
+ // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
+ // this bundle?
+ case AMDGPU::SReg_256RegClassID:
+ // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
+ // this bundle?
+ case AMDGPU::SReg_512RegClassID:
+ shift = 2;
+ break;
+ // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
+ // this bundle?
+ default:
+ assert(false);
+ break;
+ }
+ if (Val % (1 << shift))
+ *CommentStream << "Warning: " << getRegClassName(SRegClassID)
+ << ": scalar reg isn't aligned " << Val;
+ return createRegOperand(SRegClassID, Val >> shift);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
+ return decodeSrcOp(OPW32, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
+ return decodeSrcOp(OPW64, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
+ return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
+ return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
+ return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
+ return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
+ // table-gen generated disassembler doesn't care about operand types
+ // leaving only registry class so SSrc_32 operand turns into SReg_32
+ // and therefore we accept immediates and literals here as well
+ return decodeSrcOp(OPW32, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0(unsigned Val) const {
+ // SReg_32_XM0 is SReg_32 without M0
+ return decodeOperand_SReg_32(Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
+ // see decodeOperand_SReg_32 comment
+ return decodeSrcOp(OPW64, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
+ return decodeSrcOp(OPW128, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
+ return createSRegOperand(AMDGPU::SReg_256RegClassID, Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
+ return createSRegOperand(AMDGPU::SReg_512RegClassID, Val);
+}
+
+
+MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
+ // For now all literal constants are supposed to be unsigned integer
+ // ToDo: deal with signed/unsigned 64-bit integer constants
+ // ToDo: deal with float/double constants
+ if (Bytes.size() < 4)
+ return errOperand(0, "cannot read literal, inst bytes left " +
+ Twine(Bytes.size()));
+ return MCOperand::createImm(eatBytes<uint32_t>(Bytes));
+}
+
+MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
+ using namespace AMDGPU::EncValues;
+ assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
+ return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
+ (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
+ (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
+ // Cast prevents negative overflow.
+}
+
+MCOperand AMDGPUDisassembler::decodeFPImmed(bool Is32, unsigned Imm) {
+ assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
+ && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
+ // ToDo: case 248: 1/(2*PI) - is allowed only on VI
+ // ToDo: AMDGPUInstPrinter does not support 1/(2*PI). It consider 1/(2*PI) as
+ // literal constant.
+ float V = 0.0f;
+ switch (Imm) {
+ case 240: V = 0.5f; break;
+ case 241: V = -0.5f; break;
+ case 242: V = 1.0f; break;
+ case 243: V = -1.0f; break;
+ case 244: V = 2.0f; break;
+ case 245: V = -2.0f; break;
+ case 246: V = 4.0f; break;
+ case 247: V = -4.0f; break;
+ case 248: return MCOperand::createImm(Is32 ? // 1/(2*PI)
+ 0x3e22f983 :
+ 0x3fc45f306dc9c882);
+ default: break;
+ }
+ return MCOperand::createImm(Is32? FloatToBits(V) : DoubleToBits(V));
+}
+
+unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
+ using namespace AMDGPU;
+ assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
+ switch (Width) {
+ default: // fall
+ case OPW32: return VGPR_32RegClassID;
+ case OPW64: return VReg_64RegClassID;
+ case OPW128: return VReg_128RegClassID;
+ }
+}
+
+unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
+ using namespace AMDGPU;
+ assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
+ switch (Width) {
+ default: // fall
+ case OPW32: return SGPR_32RegClassID;
+ case OPW64: return SGPR_64RegClassID;
+ case OPW128: return SGPR_128RegClassID;
+ }
+}
+
+unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
+ using namespace AMDGPU;
+ assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
+ switch (Width) {
+ default: // fall
+ case OPW32: return TTMP_32RegClassID;
+ case OPW64: return TTMP_64RegClassID;
+ case OPW128: return TTMP_128RegClassID;
+ }
+}
+
+MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
+ using namespace AMDGPU::EncValues;
+ assert(Val < 512); // enum9
+
+ if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
+ return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
+ }
+ if (Val <= SGPR_MAX) {
+ assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
+ return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
+ }
+ if (TTMP_MIN <= Val && Val <= TTMP_MAX) {
+ return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN);
+ }
+
+ assert(Width == OPW32 || Width == OPW64);
+ const bool Is32 = (Width == OPW32);
+
+ if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
+ return decodeIntImmed(Val);
+
+ if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
+ return decodeFPImmed(Is32, Val);
+
+ if (Val == LITERAL_CONST)
+ return decodeLiteralConstant();
+
+ return Is32 ? decodeSpecialReg32(Val) : decodeSpecialReg64(Val);
+}
+
+MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
+ using namespace AMDGPU;
+ switch (Val) {
+ case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI));
+ case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI));
+ // ToDo: no support for xnack_mask_lo/_hi register
+ case 104:
+ case 105: break;
+ case 106: return createRegOperand(VCC_LO);
+ case 107: return createRegOperand(VCC_HI);
+ case 108: return createRegOperand(TBA_LO);
+ case 109: return createRegOperand(TBA_HI);
+ case 110: return createRegOperand(TMA_LO);
+ case 111: return createRegOperand(TMA_HI);
+ case 124: return createRegOperand(M0);
+ case 126: return createRegOperand(EXEC_LO);
+ case 127: return createRegOperand(EXEC_HI);
+ // ToDo: no support for vccz register
+ case 251: break;
+ // ToDo: no support for execz register
+ case 252: break;
+ case 253: return createRegOperand(SCC);
+ default: break;
+ }
+ return errOperand(Val, "unknown operand encoding " + Twine(Val));
+}
+
+MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
+ using namespace AMDGPU;
+ switch (Val) {
+ case 102: return createRegOperand(getMCReg(FLAT_SCR, STI));
+ case 106: return createRegOperand(VCC);
+ case 108: return createRegOperand(TBA);
+ case 110: return createRegOperand(TMA);
+ case 126: return createRegOperand(EXEC);
+ default: break;
+ }
+ return errOperand(Val, "unknown operand encoding " + Twine(Val));
+}
+
+static MCDisassembler *createAMDGPUDisassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new AMDGPUDisassembler(STI, Ctx);
+}
+
+extern "C" void LLVMInitializeAMDGPUDisassembler() {
+ TargetRegistry::RegisterMCDisassembler(TheGCNTarget, createAMDGPUDisassembler);
+}
diff --git a/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
new file mode 100644
index 000000000000..dff26a044bf5
--- /dev/null
+++ b/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
@@ -0,0 +1,93 @@
+//===-- AMDGPUDisassembler.hpp - Disassembler for AMDGPU ISA ---*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+///
+/// This file contains declaration for AMDGPU ISA disassembler
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_DISASSEMBLER_AMDGPUDISASSEMBLER_H
+#define LLVM_LIB_TARGET_AMDGPU_DISASSEMBLER_AMDGPUDISASSEMBLER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+
+namespace llvm {
+
+ class MCContext;
+ class MCInst;
+ class MCOperand;
+ class MCSubtargetInfo;
+ class Twine;
+
+ class AMDGPUDisassembler : public MCDisassembler {
+ private:
+ mutable ArrayRef<uint8_t> Bytes;
+
+ public:
+ AMDGPUDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) :
+ MCDisassembler(STI, Ctx) {}
+
+ ~AMDGPUDisassembler() {}
+
+ DecodeStatus getInstruction(MCInst &MI, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream &WS, raw_ostream &CS) const override;
+
+ const char* getRegClassName(unsigned RegClassID) const;
+
+ MCOperand createRegOperand(unsigned int RegId) const;
+ MCOperand createRegOperand(unsigned RegClassID, unsigned Val) const;
+ MCOperand createSRegOperand(unsigned SRegClassID, unsigned Val) const;
+
+ MCOperand errOperand(unsigned V, const llvm::Twine& ErrMsg) const;
+
+ DecodeStatus tryDecodeInst(const uint8_t* Table,
+ MCInst &MI,
+ uint64_t Inst,
+ uint64_t Address) const;
+
+ MCOperand decodeOperand_VGPR_32(unsigned Val) const;
+ MCOperand decodeOperand_VS_32(unsigned Val) const;
+ MCOperand decodeOperand_VS_64(unsigned Val) const;
+
+ MCOperand decodeOperand_VReg_64(unsigned Val) const;
+ MCOperand decodeOperand_VReg_96(unsigned Val) const;
+ MCOperand decodeOperand_VReg_128(unsigned Val) const;
+
+ MCOperand decodeOperand_SReg_32(unsigned Val) const;
+ MCOperand decodeOperand_SReg_32_XM0(unsigned Val) const;
+ MCOperand decodeOperand_SReg_64(unsigned Val) const;
+ MCOperand decodeOperand_SReg_128(unsigned Val) const;
+ MCOperand decodeOperand_SReg_256(unsigned Val) const;
+ MCOperand decodeOperand_SReg_512(unsigned Val) const;
+
+ enum OpWidthTy {
+ OPW32,
+ OPW64,
+ OPW128,
+ OPW_LAST_,
+ OPW_FIRST_ = OPW32
+ };
+ unsigned getVgprClassId(const OpWidthTy Width) const;
+ unsigned getSgprClassId(const OpWidthTy Width) const;
+ unsigned getTtmpClassId(const OpWidthTy Width) const;
+
+ static MCOperand decodeIntImmed(unsigned Imm);
+ static MCOperand decodeFPImmed(bool Is32, unsigned Imm);
+ MCOperand decodeLiteralConstant() const;
+
+ MCOperand decodeSrcOp(const OpWidthTy Width, unsigned Val) const;
+ MCOperand decodeSpecialReg32(unsigned Val) const;
+ MCOperand decodeSpecialReg64(unsigned Val) const;
+ };
+} // namespace llvm
+
+#endif //LLVM_LIB_TARGET_AMDGPU_DISASSEMBLER_AMDGPUDISASSEMBLER_H
diff --git a/lib/Target/AMDGPU/Disassembler/CMakeLists.txt b/lib/Target/AMDGPU/Disassembler/CMakeLists.txt
new file mode 100644
index 000000000000..fb9231576919
--- /dev/null
+++ b/lib/Target/AMDGPU/Disassembler/CMakeLists.txt
@@ -0,0 +1,7 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMAMDGPUDisassembler
+ AMDGPUDisassembler.cpp
+ )
+
+add_dependencies(LLVMAMDGPUDisassembler AMDGPUCommonTableGen LLVMAMDGPUUtils)
diff --git a/lib/Target/AMDGPU/Disassembler/LLVMBuild.txt b/lib/Target/AMDGPU/Disassembler/LLVMBuild.txt
new file mode 100644
index 000000000000..c9005f8a7884
--- /dev/null
+++ b/lib/Target/AMDGPU/Disassembler/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Target/AMDGPU/Disassembler/LLVMBuild.txt ------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = AMDGPUDisassembler
+parent = AMDGPU
+required_libraries = AMDGPUDesc AMDGPUInfo AMDGPUUtils MC MCDisassembler Support
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/AMDGPU/EvergreenInstructions.td b/lib/Target/AMDGPU/EvergreenInstructions.td
index 2245f1417e53..94f05cc41aff 100644
--- a/lib/Target/AMDGPU/EvergreenInstructions.td
+++ b/lib/Target/AMDGPU/EvergreenInstructions.td
@@ -85,8 +85,6 @@ def COS_eg : COS_Common<0x8E>;
def : POW_Common <LOG_IEEE_eg, EXP_IEEE_eg, MUL>;
def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_eg $src))>;
-defm : Expand24IBitOps<MULLO_INT_eg, ADD_INT>;
-
//===----------------------------------------------------------------------===//
// Memory read/write instructions
//===----------------------------------------------------------------------===//
@@ -212,23 +210,23 @@ class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
// VTX Read from parameter memory space
//===----------------------------------------------------------------------===//
-def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
+def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <3,
[(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
+def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <3,
[(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
+def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <3,
[(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_64_eg : VTX_READ_64_eg <0,
+def VTX_READ_PARAM_64_eg : VTX_READ_64_eg <3,
[(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
+def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <3,
[(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
>;
@@ -237,27 +235,53 @@ def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
//===----------------------------------------------------------------------===//
// 8-bit reads
-def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
- [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
+def VTX_READ_ID1_8_eg : VTX_READ_8_eg <1,
+ [(set i32:$dst_gpr, (vtx_id1_az_extloadi8 ADDRVTX_READ:$src_gpr))]
+>;
+
+// 16-bit reads
+def VTX_READ_ID1_16_eg : VTX_READ_16_eg <1,
+ [(set i32:$dst_gpr, (vtx_id1_az_extloadi16 ADDRVTX_READ:$src_gpr))]
+>;
+
+// 32-bit reads
+def VTX_READ_ID1_32_eg : VTX_READ_32_eg <1,
+ [(set i32:$dst_gpr, (vtx_id1_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 64-bit reads
+def VTX_READ_ID1_64_eg : VTX_READ_64_eg <1,
+ [(set v2i32:$dst_gpr, (vtx_id1_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 128-bit reads
+def VTX_READ_ID1_128_eg : VTX_READ_128_eg <1,
+ [(set v4i32:$dst_gpr, (vtx_id1_load ADDRVTX_READ:$src_gpr))]
+>;
+
+// 8-bit reads
+def VTX_READ_ID2_8_eg : VTX_READ_8_eg <2,
+ [(set i32:$dst_gpr, (vtx_id2_az_extloadi8 ADDRVTX_READ:$src_gpr))]
>;
-def VTX_READ_GLOBAL_16_eg : VTX_READ_16_eg <1,
- [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
+// 16-bit reads
+def VTX_READ_ID2_16_eg : VTX_READ_16_eg <2,
+ [(set i32:$dst_gpr, (vtx_id2_az_extloadi16 ADDRVTX_READ:$src_gpr))]
>;
// 32-bit reads
-def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
- [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+def VTX_READ_ID2_32_eg : VTX_READ_32_eg <2,
+ [(set i32:$dst_gpr, (vtx_id2_load ADDRVTX_READ:$src_gpr))]
>;
// 64-bit reads
-def VTX_READ_GLOBAL_64_eg : VTX_READ_64_eg <1,
- [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+def VTX_READ_ID2_64_eg : VTX_READ_64_eg <2,
+ [(set v2i32:$dst_gpr, (vtx_id2_load ADDRVTX_READ:$src_gpr))]
>;
// 128-bit reads
-def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1,
- [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
+def VTX_READ_ID2_128_eg : VTX_READ_128_eg <2,
+ [(set v4i32:$dst_gpr, (vtx_id2_load ADDRVTX_READ:$src_gpr))]
>;
} // End Predicates = [isEG]
@@ -356,8 +380,6 @@ let hasSideEffects = 1 in {
def MOVA_INT_eg : R600_1OP <0xCC, "MOVA_INT", [], VecALU>;
}
-def TGSI_LIT_Z_eg : TGSI_LIT_Z_Common<MUL_LIT_eg, LOG_CLAMPED_eg, EXP_IEEE_eg>;
-
def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> {
let Pattern = [];
let Itinerary = AnyALU;
@@ -372,7 +394,7 @@ def FLT_TO_UINT_eg : FLT_TO_UINT_Common<0x9A> {
def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>;
def GROUP_BARRIER : InstR600 <
- (outs), (ins), " GROUP_BARRIER", [(int_AMDGPU_barrier_local), (int_AMDGPU_barrier_global)], AnyALU>,
+ (outs), (ins), " GROUP_BARRIER", [(int_r600_group_barrier)], AnyALU>,
R600ALU_Word0,
R600ALU_Word1_OP2 <0x54> {
@@ -401,11 +423,6 @@ def GROUP_BARRIER : InstR600 <
let ALUInst = 1;
}
-def : Pat <
- (int_AMDGPU_barrier_global),
- (GROUP_BARRIER)
->;
-
//===----------------------------------------------------------------------===//
// LDS Instructions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
new file mode 100644
index 000000000000..29b1f79187d5
--- /dev/null
+++ b/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -0,0 +1,264 @@
+//===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements hazard recognizers for scheduling on GCN processors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GCNHazardRecognizer.h"
+#include "AMDGPUSubtarget.h"
+#include "SIInstrInfo.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Hazard Recoginizer Implementation
+//===----------------------------------------------------------------------===//
+
+GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) :
+ CurrCycleInstr(nullptr),
+ MF(MF),
+ ST(MF.getSubtarget<SISubtarget>()) {
+ MaxLookAhead = 5;
+}
+
+void GCNHazardRecognizer::EmitInstruction(SUnit *SU) {
+ EmitInstruction(SU->getInstr());
+}
+
+void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) {
+ CurrCycleInstr = MI;
+}
+
+ScheduleHazardRecognizer::HazardType
+GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
+ MachineInstr *MI = SU->getInstr();
+
+ if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0)
+ return NoopHazard;
+
+ if (SIInstrInfo::isVMEM(*MI) && checkVMEMHazards(MI) > 0)
+ return NoopHazard;
+
+ if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0)
+ return NoopHazard;
+
+ return NoHazard;
+}
+
+unsigned GCNHazardRecognizer::PreEmitNoops(SUnit *SU) {
+ return PreEmitNoops(SU->getInstr());
+}
+
+unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
+ if (SIInstrInfo::isSMRD(*MI))
+ return std::max(0, checkSMRDHazards(MI));
+
+ if (SIInstrInfo::isVMEM(*MI))
+ return std::max(0, checkVMEMHazards(MI));
+
+ if (SIInstrInfo::isDPP(*MI))
+ return std::max(0, checkDPPHazards(MI));
+
+ return 0;
+}
+
+void GCNHazardRecognizer::EmitNoop() {
+ EmittedInstrs.push_front(nullptr);
+}
+
+void GCNHazardRecognizer::AdvanceCycle() {
+
+ // When the scheduler detects a stall, it will call AdvanceCycle() without
+ // emitting any instructions.
+ if (!CurrCycleInstr)
+ return;
+
+ const SIInstrInfo *TII = ST.getInstrInfo();
+ unsigned NumWaitStates = TII->getNumWaitStates(*CurrCycleInstr);
+
+ // Keep track of emitted instructions
+ EmittedInstrs.push_front(CurrCycleInstr);
+
+ // Add a nullptr for each additional wait state after the first. Make sure
+ // not to add more than getMaxLookAhead() items to the list, since we
+ // truncate the list to that size right after this loop.
+ for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead());
+ i < e; ++i) {
+ EmittedInstrs.push_front(nullptr);
+ }
+
+ // getMaxLookahead() is the largest number of wait states we will ever need
+ // to insert, so there is no point in keeping track of more than that many
+ // wait states.
+ EmittedInstrs.resize(getMaxLookAhead());
+
+ CurrCycleInstr = nullptr;
+}
+
+void GCNHazardRecognizer::RecedeCycle() {
+ llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
+}
+
+//===----------------------------------------------------------------------===//
+// Helper Functions
+//===----------------------------------------------------------------------===//
+
+int GCNHazardRecognizer::getWaitStatesSinceDef(
+ unsigned Reg, function_ref<bool(MachineInstr *)> IsHazardDef) {
+ const SIRegisterInfo *TRI = ST.getRegisterInfo();
+
+ int WaitStates = -1;
+ for (MachineInstr *MI : EmittedInstrs) {
+ ++WaitStates;
+ if (!MI || !IsHazardDef(MI))
+ continue;
+ if (MI->modifiesRegister(Reg, TRI))
+ return WaitStates;
+ }
+ return std::numeric_limits<int>::max();
+}
+
+//===----------------------------------------------------------------------===//
+// No-op Hazard Detection
+//===----------------------------------------------------------------------===//
+
+static void addRegsToSet(iterator_range<MachineInstr::const_mop_iterator> Ops,
+ std::set<unsigned> &Set) {
+ for (const MachineOperand &Op : Ops) {
+ if (Op.isReg())
+ Set.insert(Op.getReg());
+ }
+}
+
+int GCNHazardRecognizer::checkSMEMSoftClauseHazards(MachineInstr *SMEM) {
+ // SMEM soft clause are only present on VI+
+ if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
+ return 0;
+
+ // A soft-clause is any group of consecutive SMEM instructions. The
+ // instructions in this group may return out of order and/or may be
+ // replayed (i.e. the same instruction issued more than once).
+ //
+ // In order to handle these situations correctly we need to make sure
+ // that when a clause has more than one instruction, no instruction in the
+ // clause writes to a register that is read another instruction in the clause
+ // (including itself). If we encounter this situaion, we need to break the
+ // clause by inserting a non SMEM instruction.
+
+ std::set<unsigned> ClauseDefs;
+ std::set<unsigned> ClauseUses;
+
+ for (MachineInstr *MI : EmittedInstrs) {
+
+ // When we hit a non-SMEM instruction then we have passed the start of the
+ // clause and we can stop.
+ if (!MI || !SIInstrInfo::isSMRD(*MI))
+ break;
+
+ addRegsToSet(MI->defs(), ClauseDefs);
+ addRegsToSet(MI->uses(), ClauseUses);
+ }
+
+ if (ClauseDefs.empty())
+ return 0;
+
+ // FIXME: When we support stores, we need to make sure not to put loads and
+ // stores in the same clause if they use the same address. For now, just
+ // start a new clause whenever we see a store.
+ if (SMEM->mayStore())
+ return 1;
+
+ addRegsToSet(SMEM->defs(), ClauseDefs);
+ addRegsToSet(SMEM->uses(), ClauseUses);
+
+ std::vector<unsigned> Result(std::max(ClauseDefs.size(), ClauseUses.size()));
+ std::vector<unsigned>::iterator End;
+
+ End = std::set_intersection(ClauseDefs.begin(), ClauseDefs.end(),
+ ClauseUses.begin(), ClauseUses.end(), Result.begin());
+
+ // If the set of defs and uses intersect then we cannot add this instruction
+ // to the clause, so we have a hazard.
+ if (End != Result.begin())
+ return 1;
+
+ return 0;
+}
+
+int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = ST.getInstrInfo();
+ int WaitStatesNeeded = 0;
+
+ WaitStatesNeeded = checkSMEMSoftClauseHazards(SMRD);
+
+ // This SMRD hazard only affects SI.
+ if (ST.getGeneration() != SISubtarget::SOUTHERN_ISLANDS)
+ return WaitStatesNeeded;
+
+ // A read of an SGPR by SMRD instruction requires 4 wait states when the
+ // SGPR was written by a VALU instruction.
+ int SmrdSgprWaitStates = 4;
+ auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
+
+ for (const MachineOperand &Use : SMRD->uses()) {
+ if (!Use.isReg())
+ continue;
+ int WaitStatesNeededForUse =
+ SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn);
+ WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
+ }
+ return WaitStatesNeeded;
+}
+
+int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) {
+ const SIInstrInfo *TII = ST.getInstrInfo();
+
+ if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
+ return 0;
+
+ const SIRegisterInfo &TRI = TII->getRegisterInfo();
+
+ // A read of an SGPR by a VMEM instruction requires 5 wait states when the
+ // SGPR was written by a VALU Instruction.
+ int VmemSgprWaitStates = 5;
+ int WaitStatesNeeded = 0;
+ auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
+
+ for (const MachineOperand &Use : VMEM->uses()) {
+ if (!Use.isReg() || TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
+ continue;
+
+ int WaitStatesNeededForUse =
+ VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn);
+ WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
+ }
+ return WaitStatesNeeded;
+}
+
+int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) {
+ const SIRegisterInfo *TRI = ST.getRegisterInfo();
+
+ // Check for DPP VGPR read after VALU VGPR write.
+ int DppVgprWaitStates = 2;
+ int WaitStatesNeeded = 0;
+
+ for (const MachineOperand &Use : DPP->uses()) {
+ if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
+ continue;
+ int WaitStatesNeededForUse =
+ DppVgprWaitStates - getWaitStatesSinceDef(Use.getReg());
+ WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
+ }
+
+ return WaitStatesNeeded;
+}
diff --git a/lib/Target/AMDGPU/GCNHazardRecognizer.h b/lib/Target/AMDGPU/GCNHazardRecognizer.h
new file mode 100644
index 000000000000..d82041c5f174
--- /dev/null
+++ b/lib/Target/AMDGPU/GCNHazardRecognizer.h
@@ -0,0 +1,62 @@
+//===-- GCNHazardRecognizers.h - GCN Hazard Recognizers ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines hazard recognizers for scheduling on GCN processors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPUHAZARDRECOGNIZERS_H
+#define LLVM_LIB_TARGET_AMDGPUHAZARDRECOGNIZERS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include <list>
+
+namespace llvm {
+
+class MachineFunction;
+class MachineInstr;
+class ScheduleDAG;
+class SIInstrInfo;
+class SISubtarget;
+
+class GCNHazardRecognizer final : public ScheduleHazardRecognizer {
+ // This variable stores the instruction that has been emitted this cycle. It
+ // will be added to EmittedInstrs, when AdvanceCycle() or RecedeCycle() is
+ // called.
+ MachineInstr *CurrCycleInstr;
+ std::list<MachineInstr*> EmittedInstrs;
+ const MachineFunction &MF;
+ const SISubtarget &ST;
+
+ int getWaitStatesSinceDef(unsigned Reg,
+ function_ref<bool(MachineInstr *)> IsHazardDef =
+ [](MachineInstr *) { return true; });
+
+ int checkSMEMSoftClauseHazards(MachineInstr *SMEM);
+ int checkSMRDHazards(MachineInstr *SMRD);
+ int checkVMEMHazards(MachineInstr* VMEM);
+ int checkDPPHazards(MachineInstr *DPP);
+public:
+ GCNHazardRecognizer(const MachineFunction &MF);
+ // We can only issue one instruction per cycle.
+ bool atIssueLimit() const override { return true; }
+ void EmitInstruction(SUnit *SU) override;
+ void EmitInstruction(MachineInstr *MI) override;
+ HazardType getHazardType(SUnit *SU, int Stalls) override;
+ void EmitNoop() override;
+ unsigned PreEmitNoops(SUnit *SU) override;
+ unsigned PreEmitNoops(MachineInstr *) override;
+ void AdvanceCycle() override;
+ void RecedeCycle() override;
+};
+
+} // end namespace llvm
+
+#endif //LLVM_LIB_TARGET_AMDGPUHAZARDRECOGNIZERS_H
diff --git a/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp b/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp
index a187de88f639..2932d3bb1580 100644
--- a/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp
+++ b/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp
@@ -11,6 +11,7 @@
#include "AMDGPUInstPrinter.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIDefines.h"
+#include "Utils/AMDGPUAsmUtils.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -18,6 +19,8 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include <string>
+
using namespace llvm;
void AMDGPUInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
@@ -28,6 +31,11 @@ void AMDGPUInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
printAnnotation(OS, Annot);
}
+void AMDGPUInstPrinter::printU4ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << formatHex(MI->getOperand(OpNo).getImm() & 0xf);
+}
+
void AMDGPUInstPrinter::printU8ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
O << formatHex(MI->getOperand(OpNo).getImm() & 0xff);
@@ -43,6 +51,11 @@ void AMDGPUInstPrinter::printU32ImmOperand(const MCInst *MI, unsigned OpNo,
O << formatHex(MI->getOperand(OpNo).getImm() & 0xffffffff);
}
+void AMDGPUInstPrinter::printU4ImmDecOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << formatDec(MI->getOperand(OpNo).getImm() & 0xf);
+}
+
void AMDGPUInstPrinter::printU8ImmDecOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
O << formatDec(MI->getOperand(OpNo).getImm() & 0xff);
@@ -53,22 +66,26 @@ void AMDGPUInstPrinter::printU16ImmDecOperand(const MCInst *MI, unsigned OpNo,
O << formatDec(MI->getOperand(OpNo).getImm() & 0xffff);
}
+void AMDGPUInstPrinter::printNamedBit(const MCInst* MI, unsigned OpNo,
+ raw_ostream& O, StringRef BitName) {
+ if (MI->getOperand(OpNo).getImm()) {
+ O << ' ' << BitName;
+ }
+}
+
void AMDGPUInstPrinter::printOffen(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- if (MI->getOperand(OpNo).getImm())
- O << " offen";
+ printNamedBit(MI, OpNo, O, "offen");
}
void AMDGPUInstPrinter::printIdxen(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- if (MI->getOperand(OpNo).getImm())
- O << " idxen";
+ printNamedBit(MI, OpNo, O, "idxen");
}
void AMDGPUInstPrinter::printAddr64(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- if (MI->getOperand(OpNo).getImm())
- O << " addr64";
+ printNamedBit(MI, OpNo, O, "addr64");
}
void AMDGPUInstPrinter::printMBUFOffset(const MCInst *MI, unsigned OpNo,
@@ -79,7 +96,7 @@ void AMDGPUInstPrinter::printMBUFOffset(const MCInst *MI, unsigned OpNo,
}
}
-void AMDGPUInstPrinter::printDSOffset(const MCInst *MI, unsigned OpNo,
+void AMDGPUInstPrinter::printOffset(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
uint16_t Imm = MI->getOperand(OpNo).getImm();
if (Imm != 0) {
@@ -88,7 +105,7 @@ void AMDGPUInstPrinter::printDSOffset(const MCInst *MI, unsigned OpNo,
}
}
-void AMDGPUInstPrinter::printDSOffset0(const MCInst *MI, unsigned OpNo,
+void AMDGPUInstPrinter::printOffset0(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
if (MI->getOperand(OpNo).getImm()) {
O << " offset0:";
@@ -96,7 +113,7 @@ void AMDGPUInstPrinter::printDSOffset0(const MCInst *MI, unsigned OpNo,
}
}
-void AMDGPUInstPrinter::printDSOffset1(const MCInst *MI, unsigned OpNo,
+void AMDGPUInstPrinter::printOffset1(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
if (MI->getOperand(OpNo).getImm()) {
O << " offset1:";
@@ -104,28 +121,62 @@ void AMDGPUInstPrinter::printDSOffset1(const MCInst *MI, unsigned OpNo,
}
}
+void AMDGPUInstPrinter::printSMRDOffset(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ printU32ImmOperand(MI, OpNo, O);
+}
+
+void AMDGPUInstPrinter::printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ printU32ImmOperand(MI, OpNo, O);
+}
+
void AMDGPUInstPrinter::printGDS(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- if (MI->getOperand(OpNo).getImm())
- O << " gds";
+ printNamedBit(MI, OpNo, O, "gds");
}
void AMDGPUInstPrinter::printGLC(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- if (MI->getOperand(OpNo).getImm())
- O << " glc";
+ printNamedBit(MI, OpNo, O, "glc");
}
void AMDGPUInstPrinter::printSLC(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- if (MI->getOperand(OpNo).getImm())
- O << " slc";
+ printNamedBit(MI, OpNo, O, "slc");
}
void AMDGPUInstPrinter::printTFE(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- if (MI->getOperand(OpNo).getImm())
- O << " tfe";
+ printNamedBit(MI, OpNo, O, "tfe");
+}
+
+void AMDGPUInstPrinter::printDMask(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ if (MI->getOperand(OpNo).getImm()) {
+ O << " dmask:";
+ printU16ImmOperand(MI, OpNo, O);
+ }
+}
+
+void AMDGPUInstPrinter::printUNorm(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ printNamedBit(MI, OpNo, O, "unorm");
+}
+
+void AMDGPUInstPrinter::printDA(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ printNamedBit(MI, OpNo, O, "da");
+}
+
+void AMDGPUInstPrinter::printR128(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ printNamedBit(MI, OpNo, O, "r128");
+}
+
+void AMDGPUInstPrinter::printLWE(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ printNamedBit(MI, OpNo, O, "lwe");
}
void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O,
@@ -152,6 +203,18 @@ void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O,
case AMDGPU::VCC_HI:
O << "vcc_hi";
return;
+ case AMDGPU::TBA_LO:
+ O << "tba_lo";
+ return;
+ case AMDGPU::TBA_HI:
+ O << "tba_hi";
+ return;
+ case AMDGPU::TMA_LO:
+ O << "tma_lo";
+ return;
+ case AMDGPU::TMA_HI:
+ O << "tma_hi";
+ return;
case AMDGPU::EXEC_LO:
O << "exec_lo";
return;
@@ -168,62 +231,73 @@ void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O,
break;
}
- char Type;
- unsigned NumRegs;
+ // The low 8 bits of the encoding value is the register index, for both VGPRs
+ // and SGPRs.
+ unsigned RegIdx = MRI.getEncodingValue(reg) & ((1 << 8) - 1);
+ unsigned NumRegs;
if (MRI.getRegClass(AMDGPU::VGPR_32RegClassID).contains(reg)) {
- Type = 'v';
+ O << 'v';
NumRegs = 1;
} else if (MRI.getRegClass(AMDGPU::SGPR_32RegClassID).contains(reg)) {
- Type = 's';
+ O << 's';
NumRegs = 1;
} else if (MRI.getRegClass(AMDGPU::VReg_64RegClassID).contains(reg)) {
- Type = 'v';
+ O <<'v';
NumRegs = 2;
- } else if (MRI.getRegClass(AMDGPU::SReg_64RegClassID).contains(reg)) {
- Type = 's';
+ } else if (MRI.getRegClass(AMDGPU::SGPR_64RegClassID).contains(reg)) {
+ O << 's';
NumRegs = 2;
} else if (MRI.getRegClass(AMDGPU::VReg_128RegClassID).contains(reg)) {
- Type = 'v';
+ O << 'v';
NumRegs = 4;
- } else if (MRI.getRegClass(AMDGPU::SReg_128RegClassID).contains(reg)) {
- Type = 's';
+ } else if (MRI.getRegClass(AMDGPU::SGPR_128RegClassID).contains(reg)) {
+ O << 's';
NumRegs = 4;
} else if (MRI.getRegClass(AMDGPU::VReg_96RegClassID).contains(reg)) {
- Type = 'v';
+ O << 'v';
NumRegs = 3;
} else if (MRI.getRegClass(AMDGPU::VReg_256RegClassID).contains(reg)) {
- Type = 'v';
+ O << 'v';
NumRegs = 8;
} else if (MRI.getRegClass(AMDGPU::SReg_256RegClassID).contains(reg)) {
- Type = 's';
+ O << 's';
NumRegs = 8;
} else if (MRI.getRegClass(AMDGPU::VReg_512RegClassID).contains(reg)) {
- Type = 'v';
+ O << 'v';
NumRegs = 16;
} else if (MRI.getRegClass(AMDGPU::SReg_512RegClassID).contains(reg)) {
- Type = 's';
+ O << 's';
NumRegs = 16;
+ } else if (MRI.getRegClass(AMDGPU::TTMP_64RegClassID).contains(reg)) {
+ O << "ttmp";
+ NumRegs = 2;
+ RegIdx -= 112; // Trap temps start at offset 112. TODO: Get this from tablegen.
+ } else if (MRI.getRegClass(AMDGPU::TTMP_128RegClassID).contains(reg)) {
+ O << "ttmp";
+ NumRegs = 4;
+ RegIdx -= 112; // Trap temps start at offset 112. TODO: Get this from tablegen.
} else {
O << getRegisterName(reg);
return;
}
- // The low 8 bits of the encoding value is the register index, for both VGPRs
- // and SGPRs.
- unsigned RegIdx = MRI.getEncodingValue(reg) & ((1 << 8) - 1);
if (NumRegs == 1) {
- O << Type << RegIdx;
+ O << RegIdx;
return;
}
- O << Type << '[' << RegIdx << ':' << (RegIdx + NumRegs - 1) << ']';
+ O << '[' << RegIdx << ':' << (RegIdx + NumRegs - 1) << ']';
}
void AMDGPUInstPrinter::printVOPDst(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
if (MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::VOP3)
O << "_e64 ";
+ else if (MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::DPP)
+ O << "_dpp ";
+ else if (MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::SDWA)
+ O << "_sdwa ";
else
O << "_e32 ";
@@ -345,12 +419,13 @@ void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
const MCExpr *Exp = Op.getExpr();
Exp->print(O, &MAI);
} else {
- llvm_unreachable("unknown operand type in printOperand");
+ O << "/*INV_OP*/";
}
}
-void AMDGPUInstPrinter::printOperandAndMods(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
+void AMDGPUInstPrinter::printOperandAndFPInputMods(const MCInst *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
unsigned InputModifiers = MI->getOperand(OpNo).getImm();
if (InputModifiers & SISrcMods::NEG)
O << '-';
@@ -361,6 +436,122 @@ void AMDGPUInstPrinter::printOperandAndMods(const MCInst *MI, unsigned OpNo,
O << '|';
}
+void AMDGPUInstPrinter::printOperandAndIntInputMods(const MCInst *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
+ unsigned InputModifiers = MI->getOperand(OpNo).getImm();
+ if (InputModifiers & SISrcMods::SEXT)
+ O << "sext(";
+ printOperand(MI, OpNo + 1, O);
+ if (InputModifiers & SISrcMods::SEXT)
+ O << ')';
+}
+
+
+void AMDGPUInstPrinter::printDPPCtrl(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Imm = MI->getOperand(OpNo).getImm();
+ if (Imm <= 0x0ff) {
+ O << " quad_perm:[";
+ O << formatDec(Imm & 0x3) << ',';
+ O << formatDec((Imm & 0xc) >> 2) << ',';
+ O << formatDec((Imm & 0x30) >> 4) << ',';
+ O << formatDec((Imm & 0xc0) >> 6) << ']';
+ } else if ((Imm >= 0x101) && (Imm <= 0x10f)) {
+ O << " row_shl:";
+ printU4ImmDecOperand(MI, OpNo, O);
+ } else if ((Imm >= 0x111) && (Imm <= 0x11f)) {
+ O << " row_shr:";
+ printU4ImmDecOperand(MI, OpNo, O);
+ } else if ((Imm >= 0x121) && (Imm <= 0x12f)) {
+ O << " row_ror:";
+ printU4ImmDecOperand(MI, OpNo, O);
+ } else if (Imm == 0x130) {
+ O << " wave_shl:1";
+ } else if (Imm == 0x134) {
+ O << " wave_rol:1";
+ } else if (Imm == 0x138) {
+ O << " wave_shr:1";
+ } else if (Imm == 0x13c) {
+ O << " wave_ror:1";
+ } else if (Imm == 0x140) {
+ O << " row_mirror";
+ } else if (Imm == 0x141) {
+ O << " row_half_mirror";
+ } else if (Imm == 0x142) {
+ O << " row_bcast:15";
+ } else if (Imm == 0x143) {
+ O << " row_bcast:31";
+ } else {
+ llvm_unreachable("Invalid dpp_ctrl value");
+ }
+}
+
+void AMDGPUInstPrinter::printRowMask(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << " row_mask:";
+ printU4ImmOperand(MI, OpNo, O);
+}
+
+void AMDGPUInstPrinter::printBankMask(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << " bank_mask:";
+ printU4ImmOperand(MI, OpNo, O);
+}
+
+void AMDGPUInstPrinter::printBoundCtrl(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Imm = MI->getOperand(OpNo).getImm();
+ if (Imm) {
+ O << " bound_ctrl:0"; // XXX - this syntax is used in sp3
+ }
+}
+
+void AMDGPUInstPrinter::printSDWASel(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned Imm = MI->getOperand(OpNo).getImm();
+ switch (Imm) {
+ case 0: O << "BYTE_0"; break;
+ case 1: O << "BYTE_1"; break;
+ case 2: O << "BYTE_2"; break;
+ case 3: O << "BYTE_3"; break;
+ case 4: O << "WORD_0"; break;
+ case 5: O << "WORD_1"; break;
+ case 6: O << "DWORD"; break;
+ default: llvm_unreachable("Invalid SDWA data select operand");
+ }
+}
+
+void AMDGPUInstPrinter::printSDWADstSel(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << "dst_sel:";
+ printSDWASel(MI, OpNo, O);
+}
+
+void AMDGPUInstPrinter::printSDWASrc0Sel(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << "src0_sel:";
+ printSDWASel(MI, OpNo, O);
+}
+
+void AMDGPUInstPrinter::printSDWASrc1Sel(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << "src1_sel:";
+ printSDWASel(MI, OpNo, O);
+}
+
+void AMDGPUInstPrinter::printSDWADstUnused(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ O << "dst_unused:";
+ unsigned Imm = MI->getOperand(OpNo).getImm();
+ switch (Imm) {
+ case 0: O << "UNUSED_PAD"; break;
+ case 1: O << "UNUSED_SEXT"; break;
+ case 2: O << "UNUSED_PRESERVE"; break;
+ default: llvm_unreachable("Invalid SDWA dest_unused operand");
+ }
+}
+
void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
unsigned Imm = MI->getOperand(OpNum).getImm();
@@ -395,9 +586,17 @@ void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
}
}
+void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O, char Asm) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ assert(Op.isImm());
+ if (Op.getImm() == 1)
+ O << Asm;
+}
+
void AMDGPUInstPrinter::printAbs(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- printIfSet(MI, OpNo, O, "|");
+ printIfSet(MI, OpNo, O, '|');
}
void AMDGPUInstPrinter::printClamp(const MCInst *MI, unsigned OpNo,
@@ -424,8 +623,15 @@ void AMDGPUInstPrinter::printOModSI(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printLiteral(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- int32_t Imm = MI->getOperand(OpNo).getImm();
- O << Imm << '(' << BitsToFloat(Imm) << ')';
+ const MCOperand &Op = MI->getOperand(OpNo);
+ assert(Op.isImm() || Op.isExpr());
+ if (Op.isImm()) {
+ int64_t Imm = Op.getImm();
+ O << Imm << '(' << BitsToFloat(Imm) << ')';
+ }
+ if (Op.isExpr()) {
+ Op.getExpr()->print(O << '@', &MAI);
+ }
}
void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo,
@@ -435,7 +641,7 @@ void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printNeg(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- printIfSet(MI, OpNo, O, "-");
+ printIfSet(MI, OpNo, O, '-');
}
void AMDGPUInstPrinter::printOMOD(const MCInst *MI, unsigned OpNo,
@@ -456,7 +662,7 @@ void AMDGPUInstPrinter::printOMOD(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printRel(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- printIfSet(MI, OpNo, O, "+");
+ printIfSet(MI, OpNo, O, '+');
}
void AMDGPUInstPrinter::printUpdateExecMask(const MCInst *MI, unsigned OpNo,
@@ -585,43 +791,49 @@ void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printSendMsg(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- unsigned SImm16 = MI->getOperand(OpNo).getImm();
- unsigned Msg = SImm16 & 0xF;
- if (Msg == 2 || Msg == 3) {
- unsigned Op = (SImm16 >> 4) & 0xF;
- if (Msg == 3)
- O << "Gs_done(";
- else
- O << "Gs(";
- if (Op == 0) {
- O << "nop";
- } else {
- unsigned Stream = (SImm16 >> 8) & 0x3;
- if (Op == 1)
- O << "cut";
- else if (Op == 2)
- O << "emit";
- else if (Op == 3)
- O << "emit-cut";
- O << " stream " << Stream;
+ using namespace llvm::AMDGPU::SendMsg;
+
+ const unsigned SImm16 = MI->getOperand(OpNo).getImm();
+ const unsigned Id = SImm16 & ID_MASK_;
+ do {
+ if (Id == ID_INTERRUPT) {
+ if ((SImm16 & ~ID_MASK_) != 0) // Unused/unknown bits must be 0.
+ break;
+ O << "sendmsg(" << IdSymbolic[Id] << ')';
+ return;
}
- O << "), [m0] ";
- } else if (Msg == 1)
- O << "interrupt ";
- else if (Msg == 15)
- O << "system ";
- else
- O << "unknown(" << Msg << ") ";
+ if (Id == ID_GS || Id == ID_GS_DONE) {
+ if ((SImm16 & ~(ID_MASK_|OP_GS_MASK_|STREAM_ID_MASK_)) != 0) // Unused/unknown bits must be 0.
+ break;
+ const unsigned OpGs = (SImm16 & OP_GS_MASK_) >> OP_SHIFT_;
+ const unsigned StreamId = (SImm16 & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_;
+ if (OpGs == OP_GS_NOP && Id != ID_GS_DONE) // NOP to be used for GS_DONE only.
+ break;
+ if (OpGs == OP_GS_NOP && StreamId != 0) // NOP does not use/define stream id bits.
+ break;
+ O << "sendmsg(" << IdSymbolic[Id] << ", " << OpGsSymbolic[OpGs];
+ if (OpGs != OP_GS_NOP) { O << ", " << StreamId; }
+ O << ')';
+ return;
+ }
+ if (Id == ID_SYSMSG) {
+ if ((SImm16 & ~(ID_MASK_|OP_SYS_MASK_)) != 0) // Unused/unknown bits must be 0.
+ break;
+ const unsigned OpSys = (SImm16 & OP_SYS_MASK_) >> OP_SHIFT_;
+ if (! (OP_SYS_FIRST_ <= OpSys && OpSys < OP_SYS_LAST_)) // Unused/unknown.
+ break;
+ O << "sendmsg(" << IdSymbolic[Id] << ", " << OpSysSymbolic[OpSys] << ')';
+ return;
+ }
+ } while (0);
+ O << SImm16; // Unknown simm16 code.
}
void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- // Note: Mask values are taken from SIInsertWaits.cpp and not from ISA docs
- // SIInsertWaits.cpp bits usage does not match ISA docs description but it
- // works so it might be a misprint in docs.
unsigned SImm16 = MI->getOperand(OpNo).getImm();
unsigned Vmcnt = SImm16 & 0xF;
- unsigned Expcnt = (SImm16 >> 4) & 0xF;
+ unsigned Expcnt = (SImm16 >> 4) & 0x7;
unsigned Lgkmcnt = (SImm16 >> 8) & 0xF;
bool NeedSpace = false;
@@ -638,11 +850,32 @@ void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo,
NeedSpace = true;
}
- if (Lgkmcnt != 0x7) {
+ if (Lgkmcnt != 0xF) {
if (NeedSpace)
O << ' ';
O << "lgkmcnt(" << Lgkmcnt << ')';
}
}
+void AMDGPUInstPrinter::printHwreg(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ using namespace llvm::AMDGPU::Hwreg;
+
+ unsigned SImm16 = MI->getOperand(OpNo).getImm();
+ const unsigned Id = (SImm16 & ID_MASK_) >> ID_SHIFT_;
+ const unsigned Offset = (SImm16 & OFFSET_MASK_) >> OFFSET_SHIFT_;
+ const unsigned Width = ((SImm16 & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
+
+ O << "hwreg(";
+ if (ID_SYMBOLIC_FIRST_ <= Id && Id < ID_SYMBOLIC_LAST_) {
+ O << IdSymbolic[Id];
+ } else {
+ O << Id;
+ }
+ if (Width != WIDTH_M1_DEFAULT_ + 1 || Offset != OFFSET_DEFAULT_) {
+ O << ", " << Offset << ", " << Width;
+ }
+ O << ')';
+}
+
#include "AMDGPUGenAsmWriter.inc"
diff --git a/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h b/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h
index 90541d86132d..f5a290f16045 100644
--- a/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h
+++ b/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h
@@ -10,8 +10,8 @@
/// \file
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_INSTPRINTER_AMDGPUINSTPRINTER_H
-#define LLVM_LIB_TARGET_R600_INSTPRINTER_AMDGPUINSTPRINTER_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_INSTPRINTER_AMDGPUINSTPRINTER_H
+#define LLVM_LIB_TARGET_AMDGPU_INSTPRINTER_AMDGPUINSTPRINTER_H
#include "llvm/MC/MCInstPrinter.h"
@@ -33,37 +33,60 @@ public:
const MCRegisterInfo &MRI);
private:
+ void printU4ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU8ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU4ImmDecOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU8ImmDecOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU16ImmDecOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU32ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printNamedBit(const MCInst* MI, unsigned OpNo, raw_ostream& O,
+ StringRef BitName);
void printOffen(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printIdxen(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printAddr64(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMBUFOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printDSOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printDSOffset0(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printDSOffset1(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printOffset0(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printOffset1(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSMRDOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printGDS(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printGLC(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printSLC(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printTFE(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printDMask(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printUNorm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printDA(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printR128(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printLWE(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printRegOperand(unsigned RegNo, raw_ostream &O);
void printVOPDst(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printImmediate32(uint32_t I, raw_ostream &O);
void printImmediate64(uint64_t I, raw_ostream &O);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printOperandAndMods(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printOperandAndFPInputMods(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printOperandAndIntInputMods(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printDPPCtrl(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printRowMask(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printBankMask(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printBoundCtrl(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSDWASel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSDWADstSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSDWASrc0Sel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSDWASrc1Sel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printSDWADstUnused(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O,
StringRef Asm, StringRef Default = "");
+ static void printIfSet(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O, char Asm);
static void printAbs(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printClamp(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printClampSI(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printOModSI(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- static void printLiteral(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printLiteral(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printLast(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printNeg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printOMOD(const MCInst *MI, unsigned OpNo, raw_ostream &O);
@@ -79,6 +102,7 @@ private:
static void printKCache(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printSendMsg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
static void printWaitFlag(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ static void printHwreg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
};
} // End namespace llvm
diff --git a/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt b/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt
index ce63bd553b9c..7191ff2c4577 100644
--- a/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt
+++ b/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt
@@ -1,3 +1,5 @@
add_llvm_library(LLVMAMDGPUAsmPrinter
AMDGPUInstPrinter.cpp
)
+
+add_dependencies(LLVMAMDGPUAsmPrinter LLVMAMDGPUUtils)
diff --git a/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt b/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt
index fdb43844dc63..30c2670316c8 100644
--- a/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt
+++ b/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt
@@ -19,6 +19,6 @@
type = Library
name = AMDGPUAsmPrinter
parent = AMDGPU
-required_libraries = MC Support
+required_libraries = MC Support AMDGPUUtils
add_to_library_groups = AMDGPU
diff --git a/lib/Target/AMDGPU/InstPrinter/Makefile b/lib/Target/AMDGPU/InstPrinter/Makefile
deleted file mode 100644
index 4e48ac7e28a9..000000000000
--- a/lib/Target/AMDGPU/InstPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-#===- lib/Target/R600/AsmPrinter/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAMDGPUAsmPrinter
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/LLVMBuild.txt b/lib/Target/AMDGPU/LLVMBuild.txt
index 38c5489586f1..bbdd17737cf0 100644
--- a/lib/Target/AMDGPU/LLVMBuild.txt
+++ b/lib/Target/AMDGPU/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/AMDIL/LLVMBuild.txt -------------------------*- Conf -*--===;
+;===- ./lib/Target/AMDGPU/LLVMBuild.txt ------------------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
@@ -16,7 +16,7 @@
;===------------------------------------------------------------------------===;
[common]
-subdirectories = AsmParser InstPrinter MCTargetDesc TargetInfo Utils
+subdirectories = AsmParser Disassembler InstPrinter MCTargetDesc TargetInfo Utils
[component_0]
type = TargetGroup
@@ -24,10 +24,11 @@ name = AMDGPU
parent = Target
has_asmparser = 1
has_asmprinter = 1
+has_disassembler = 1
[component_1]
type = Library
name = AMDGPUCodeGen
parent = AMDGPU
-required_libraries = Analysis AsmPrinter CodeGen Core IPO MC AMDGPUAsmParser AMDGPUAsmPrinter AMDGPUDesc AMDGPUInfo AMDGPUUtils Scalar SelectionDAG Support Target TransformUtils
+required_libraries = Analysis AsmPrinter CodeGen Core IPO MC AMDGPUAsmPrinter AMDGPUDesc AMDGPUInfo AMDGPUUtils Scalar SelectionDAG Support Target TransformUtils Vectorize
add_to_library_groups = AMDGPU
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
index 60e8c8f3d303..1cb9d21408c6 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -53,7 +53,8 @@ public:
const MCAsmLayout &Layout) const override {
return false;
}
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {
assert(!"Not implemented");
}
bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
@@ -73,12 +74,17 @@ void AMDGPUMCObjectWriter::writeObject(MCAssembler &Asm,
static unsigned getFixupKindNumBytes(unsigned Kind) {
switch (Kind) {
+ case FK_SecRel_1:
case FK_Data_1:
return 1;
+ case FK_SecRel_2:
case FK_Data_2:
return 2;
+ case FK_SecRel_4:
case FK_Data_4:
+ case FK_PCRel_4:
return 4;
+ case FK_SecRel_8:
case FK_Data_8:
return 8;
default:
@@ -92,32 +98,15 @@ void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
switch ((unsigned)Fixup.getKind()) {
case AMDGPU::fixup_si_sopp_br: {
+ int64_t BrImm = ((int64_t)Value - 4) / 4;
+ if (!isInt<16>(BrImm))
+ report_fatal_error("branch size exceeds simm16");
+
uint16_t *Dst = (uint16_t*)(Data + Fixup.getOffset());
- *Dst = (Value - 4) / 4;
+ *Dst = BrImm;
break;
}
- case AMDGPU::fixup_si_rodata: {
- uint32_t *Dst = (uint32_t*)(Data + Fixup.getOffset());
- // We emit constant data at the end of the text section and generate its
- // address using the following code sequence:
- // s_getpc_b64 s[0:1]
- // s_add_u32 s0, s0, $symbol
- // s_addc_u32 s1, s1, 0
- //
- // s_getpc_b64 returns the address of the s_add_u32 instruction and then
- // the fixup replaces $symbol with a literal constant, which is a
- // pc-relative offset from the encoding of the $symbol operand to the
- // constant data.
- //
- // What we want here is an offset from the start of the s_add_u32
- // instruction to the constant data, but since the encoding of $symbol
- // starts 4 bytes after the start of the add instruction, we end up
- // with an offset that is 4 bytes too small. This requires us to
- // add 4 to the fixup value before applying it.
- *Dst = Value + 4;
- break;
- }
default: {
// FIXME: Copied from AArch64
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
@@ -144,7 +133,6 @@ const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
// name offset bits flags
{ "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_si_rodata", 0, 32, MCFixupKindInfo::FKF_IsPCRel }
};
if (Kind < FirstTargetFixupKind)
@@ -167,13 +155,15 @@ namespace {
class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
bool Is64Bit;
+ bool HasRelocationAddend;
public:
- ELFAMDGPUAsmBackend(const Target &T, bool Is64Bit) :
- AMDGPUAsmBackend(T), Is64Bit(Is64Bit) { }
+ ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
+ AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
+ HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { }
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
- return createAMDGPUELFObjectWriter(Is64Bit, OS);
+ return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS);
}
};
@@ -182,8 +172,6 @@ public:
MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
const Triple &TT, StringRef CPU) {
- Triple TargetTriple(TT);
-
// Use 64-bit ELF for amdgcn
- return new ELFAMDGPUAsmBackend(T, TargetTriple.getArch() == Triple::amdgcn);
+ return new ELFAMDGPUAsmBackend(T, TT);
}
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
index 820f17df8960..b4e3b8e896bd 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
@@ -18,23 +18,56 @@ namespace {
class AMDGPUELFObjectWriter : public MCELFObjectTargetWriter {
public:
- AMDGPUELFObjectWriter(bool Is64Bit);
+ AMDGPUELFObjectWriter(bool Is64Bit, bool HasRelocationAddend);
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override {
- return Fixup.getKind();
- }
-
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
};
} // End anonymous namespace
-AMDGPUELFObjectWriter::AMDGPUELFObjectWriter(bool Is64Bit)
- : MCELFObjectTargetWriter(Is64Bit, ELF::ELFOSABI_AMDGPU_HSA,
- ELF::EM_AMDGPU, false) { }
+AMDGPUELFObjectWriter::AMDGPUELFObjectWriter(bool Is64Bit,
+ bool HasRelocationAddend)
+ : MCELFObjectTargetWriter(Is64Bit,
+ ELF::ELFOSABI_AMDGPU_HSA,
+ ELF::EM_AMDGPU,
+ HasRelocationAddend) { }
+
+unsigned AMDGPUELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ // SCRATCH_RSRC_DWORD[01] is a special global variable that represents
+ // the scratch buffer.
+ if (Target.getSymA()->getSymbol().getName() == "SCRATCH_RSRC_DWORD0")
+ return ELF::R_AMDGPU_ABS32_LO;
+ if (Target.getSymA()->getSymbol().getName() == "SCRATCH_RSRC_DWORD1")
+ return ELF::R_AMDGPU_ABS32_HI;
+
+ switch (Target.getAccessVariant()) {
+ default:
+ break;
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ return ELF::R_AMDGPU_GOTPCREL;
+ }
+
+ switch (Fixup.getKind()) {
+ default: break;
+ case FK_PCRel_4:
+ return ELF::R_AMDGPU_REL32;
+ case FK_SecRel_4:
+ return ELF::R_AMDGPU_ABS32;
+ }
+
+ llvm_unreachable("unhandled relocation type");
+}
+
-MCObjectWriter *llvm::createAMDGPUELFObjectWriter(bool Is64Bit, raw_pwrite_stream &OS) {
- MCELFObjectTargetWriter *MOTW = new AMDGPUELFObjectWriter(Is64Bit);
+MCObjectWriter *llvm::createAMDGPUELFObjectWriter(bool Is64Bit,
+ bool HasRelocationAddend,
+ raw_pwrite_stream &OS) {
+ MCELFObjectTargetWriter *MOTW =
+ new AMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend);
return createELFObjectWriter(MOTW, OS, true);
}
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.cpp
index 9ff9fe794d2b..43338a5bebd2 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.cpp
@@ -12,11 +12,6 @@
using namespace llvm;
-void AMDGPUELFStreamer::InitSections(bool NoExecStack) {
- // Start with the .hsatext section by default.
- SwitchSection(AMDGPU::getHSATextSection(getContext()));
-}
-
MCELFStreamer *llvm::createAMDGPUELFStreamer(MCContext &Context,
MCAsmBackend &MAB,
raw_pwrite_stream &OS,
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.h
index 488d7e74d741..5319b65d65f9 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFStreamer.h
@@ -1,4 +1,4 @@
-//===-------- AMDGPUELFStreamer.h - ELF Object Output ---------------------===//
+//===-------- AMDGPUELFStreamer.h - ELF Object Output -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -29,7 +29,6 @@ public:
MCCodeEmitter *Emitter)
: MCELFStreamer(Context, MAB, OS, Emitter) { }
- virtual void InitSections(bool NoExecStac) override;
};
MCELFStreamer *createAMDGPUELFStreamer(MCContext &Context, MCAsmBackend &MAB,
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h
index 59a9178082f6..20c1adfbc6b9 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUFIXUPKINDS_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUFIXUPKINDS_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUFIXUPKINDS_H
+#define LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUFIXUPKINDS_H
#include "llvm/MC/MCFixup.h"
@@ -18,9 +18,6 @@ enum Fixups {
/// 16-bit PC relative fixup for SOPP branch instructions.
fixup_si_sopp_br = FirstTargetFixupKind,
- /// fixup for global addresses with constant initializers
- fixup_si_rodata,
-
// Marker
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 4bc80a028936..1655591abf39 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -9,12 +9,15 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUMCAsmInfo.h"
+#include "llvm/ADT/Triple.h"
using namespace llvm;
+
AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Triple &TT) : MCAsmInfoELF() {
HasSingleParameterDotFile = false;
//===------------------------------------------------------------------===//
- MaxInstLength = 16;
+ MinInstAlignment = 4;
+ MaxInstLength = (TT.getArch() == Triple::amdgcn) ? 8 : 16;
SeparatorString = "\n";
CommentString = ";";
PrivateLabelPrefix = "";
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
index a546961705d7..8cb33a3179cd 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCASMINFO_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCASMINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCASMINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCASMINFO_H
#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h
index c95742762233..c942ea904085 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCCODEEMITTER_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCCODEEMITTER_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCCODEEMITTER_H
+#define LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCCODEEMITTER_H
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
index f70409470276..a0d9aab114fc 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -18,7 +18,6 @@
#include "AMDGPUTargetStreamer.h"
#include "InstPrinter/AMDGPUInstPrinter.h"
#include "SIDefines.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -56,15 +55,6 @@ createAMDGPUMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
return createAMDGPUMCSubtargetInfoImpl(TT, CPU, FS);
}
-static MCCodeGenInfo *createAMDGPUMCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
-}
-
static MCInstPrinter *createAMDGPUMCInstPrinter(const Triple &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -99,7 +89,6 @@ extern "C" void LLVMInitializeAMDGPUTargetMC() {
for (Target *T : {&TheAMDGPUTarget, &TheGCNTarget}) {
RegisterMCAsmInfo<AMDGPUMCAsmInfo> X(*T);
- TargetRegistry::RegisterMCCodeGenInfo(*T, createAMDGPUMCCodeGenInfo);
TargetRegistry::RegisterMCInstrInfo(*T, createAMDGPUMCInstrInfo);
TargetRegistry::RegisterMCRegInfo(*T, createAMDGPUMCRegisterInfo);
TargetRegistry::RegisterMCSubtargetInfo(*T, createAMDGPUMCSubtargetInfo);
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
index 5d1b86b8c0c2..9ab7940812ba 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
@@ -13,13 +13,13 @@
//===----------------------------------------------------------------------===//
//
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCTARGETDESC_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCTARGETDESC_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCTARGETDESC_H
+#define LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUMCTARGETDESC_H
#include "llvm/Support/DataTypes.h"
-#include "llvm/ADT/StringRef.h"
namespace llvm {
+class StringRef;
class MCAsmBackend;
class MCCodeEmitter;
class MCContext;
@@ -47,6 +47,7 @@ MCAsmBackend *createAMDGPUAsmBackend(const Target &T, const MCRegisterInfo &MRI,
const Triple &TT, StringRef CPU);
MCObjectWriter *createAMDGPUELFObjectWriter(bool Is64Bit,
+ bool HasRelocationAddend,
raw_pwrite_stream &OS);
} // End llvm namespace
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
index b91134d2ee9b..83dcaacb738f 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
@@ -312,10 +312,6 @@ AMDGPUTargetELFStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) {
MCStreamer &OS = getStreamer();
OS.PushSection();
- // The MCObjectFileInfo that is available to the assembler is a generic
- // implementation and not AMDGPUHSATargetObjectFile, so we can't use
- // MCObjectFileInfo::getTextSection() here for fetching the HSATextSection.
- OS.SwitchSection(AMDGPU::getHSATextSection(OS.getContext()));
OS.EmitBytes(StringRef((const char*)&Header, sizeof(Header)));
OS.PopSection();
}
diff --git a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
index 83bb728f541c..b3d59e8f396e 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
@@ -7,16 +7,16 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUTARGETSTREAMER_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUTARGETSTREAMER_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUTARGETSTREAMER_H
+#define LLVM_LIB_TARGET_AMDGPU_MCTARGETDESC_AMDGPUTARGETSTREAMER_H
#include "AMDKernelCodeT.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/Debug.h"
+
namespace llvm {
class MCELFStreamer;
+class MCSymbol;
class AMDGPUTargetStreamer : public MCTargetStreamer {
public:
diff --git a/lib/Target/AMDGPU/MCTargetDesc/Makefile b/lib/Target/AMDGPU/MCTargetDesc/Makefile
deleted file mode 100644
index 5ad68662d98c..000000000000
--- a/lib/Target/AMDGPU/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/AMDGPU/TargetDesc/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAMDGPUDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
index 3c1142dd664b..5e8e6ceb7ca2 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
@@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "R600Defines.h"
+#include "MCTargetDesc/AMDGPUFixupKinds.h"
#include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/MC/MCCodeEmitter.h"
@@ -51,12 +52,9 @@ public:
const MCSubtargetInfo &STI) const override;
private:
- void EmitByte(unsigned int byte, raw_ostream &OS) const;
-
void Emit(uint32_t value, raw_ostream &OS) const;
void Emit(uint64_t value, raw_ostream &OS) const;
- unsigned getHWRegChan(unsigned reg) const;
unsigned getHWReg(unsigned regNo) const;
};
@@ -142,10 +140,6 @@ void R600MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
}
}
-void R600MCCodeEmitter::EmitByte(unsigned int Byte, raw_ostream &OS) const {
- OS.write((uint8_t) Byte & 0xff);
-}
-
void R600MCCodeEmitter::Emit(uint32_t Value, raw_ostream &OS) const {
support::endian::Writer<support::little>(OS).write(Value);
}
@@ -154,17 +148,13 @@ void R600MCCodeEmitter::Emit(uint64_t Value, raw_ostream &OS) const {
support::endian::Writer<support::little>(OS).write(Value);
}
-unsigned R600MCCodeEmitter::getHWRegChan(unsigned reg) const {
- return MRI.getEncodingValue(reg) >> HW_CHAN_SHIFT;
-}
-
unsigned R600MCCodeEmitter::getHWReg(unsigned RegNo) const {
return MRI.getEncodingValue(RegNo) & HW_REG_MASK;
}
uint64_t R600MCCodeEmitter::getMachineOpValue(const MCInst &MI,
const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixup,
+ SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
if (MO.isReg()) {
if (HAS_NATIVE_OPERANDS(MCII.get(MI.getOpcode()).TSFlags))
@@ -172,6 +162,18 @@ uint64_t R600MCCodeEmitter::getMachineOpValue(const MCInst &MI,
return getHWReg(MO.getReg());
}
+ if (MO.isExpr()) {
+ // We put rodata at the end of code section, then map the entire
+ // code secetion as vtx buf. Thus the section relative address is the
+ // correct one.
+ // Each R600 literal instruction has two operands
+ // We can't easily get the order of the current one, so compare against
+ // the first one and adjust offset.
+ const unsigned offset = (&MO == &MI.getOperand(0)) ? 0 : 4;
+ Fixups.push_back(MCFixup::create(offset, MO.getExpr(), FK_SecRel_4, MI.getLoc()));
+ return 0;
+ }
+
assert(MO.isImm());
return MO.getImm();
}
diff --git a/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp b/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
index 9eb3dadbc5e2..71b585c25ac5 100644
--- a/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
@@ -162,20 +162,30 @@ static uint32_t getLit64Encoding(uint64_t Val) {
uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
unsigned OpSize) const {
- if (MO.isExpr())
- return 255;
- assert(!MO.isFPImm());
+ int64_t Imm;
+ if (MO.isExpr()) {
+ const MCConstantExpr *C = dyn_cast<MCConstantExpr>(MO.getExpr());
+ if (!C)
+ return 255;
+
+ Imm = C->getValue();
+ } else {
- if (!MO.isImm())
- return ~0;
+ assert(!MO.isFPImm());
+
+ if (!MO.isImm())
+ return ~0;
+
+ Imm = MO.getImm();
+ }
if (OpSize == 4)
- return getLit32Encoding(static_cast<uint32_t>(MO.getImm()));
+ return getLit32Encoding(static_cast<uint32_t>(Imm));
assert(OpSize == 8);
- return getLit64Encoding(static_cast<uint64_t>(MO.getImm()));
+ return getLit64Encoding(static_cast<uint64_t>(Imm));
}
void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
@@ -213,7 +223,11 @@ void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (Op.isImm())
Imm = Op.getImm();
- else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
+ else if (Op.isExpr()) {
+ if (const MCConstantExpr *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
+ Imm = C->getValue();
+
+ } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
llvm_unreachable("Must be immediate or expr");
for (unsigned j = 0; j < 4; j++) {
@@ -247,10 +261,14 @@ uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
if (MO.isReg())
return MRI.getEncodingValue(MO.getReg());
- if (MO.isExpr()) {
- const MCSymbolRefExpr *Expr = cast<MCSymbolRefExpr>(MO.getExpr());
- MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_rodata;
- Fixups.push_back(MCFixup::create(4, Expr, Kind, MI.getLoc()));
+ if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
+ const MCSymbolRefExpr *Expr = dyn_cast<MCSymbolRefExpr>(MO.getExpr());
+ MCFixupKind Kind;
+ if (Expr && Expr->getSymbol().isExternal())
+ Kind = FK_Data_4;
+ else
+ Kind = FK_PCRel_4;
+ Fixups.push_back(MCFixup::create(4, MO.getExpr(), Kind, MI.getLoc()));
}
// Figure out the operand number, needed for isSrcOperand check
diff --git a/lib/Target/AMDGPU/Makefile b/lib/Target/AMDGPU/Makefile
deleted file mode 100644
index 219f34daa24f..000000000000
--- a/lib/Target/AMDGPU/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- lib/Target/R600/Makefile ---------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMAMDGPUCodeGen
-TARGET = AMDGPU
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = AMDGPUGenRegisterInfo.inc AMDGPUGenInstrInfo.inc \
- AMDGPUGenDAGISel.inc AMDGPUGenSubtargetInfo.inc \
- AMDGPUGenMCCodeEmitter.inc AMDGPUGenCallingConv.inc \
- AMDGPUGenIntrinsics.inc AMDGPUGenDFAPacketizer.inc \
- AMDGPUGenAsmWriter.inc AMDGPUGenAsmMatcher.inc
-
-DIRS = AsmParser InstPrinter TargetInfo MCTargetDesc Utils
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/Processors.td b/lib/Target/AMDGPU/Processors.td
index 4300d972d46b..f5f1eb14e993 100644
--- a/lib/Target/AMDGPU/Processors.td
+++ b/lib/Target/AMDGPU/Processors.td
@@ -13,11 +13,8 @@ class Proc<string Name, ProcessorItineraries itin, list<SubtargetFeature> Featur
//===----------------------------------------------------------------------===//
// R600
//===----------------------------------------------------------------------===//
-def : Proc<"", R600_VLIW5_Itin,
- [FeatureR600, FeatureVertexCache]>;
-
def : Proc<"r600", R600_VLIW5_Itin,
- [FeatureR600 , FeatureVertexCache, FeatureWavefrontSize64]>;
+ [FeatureR600, FeatureVertexCache, FeatureWavefrontSize64]>;
def : Proc<"r630", R600_VLIW5_Itin,
[FeatureR600, FeatureVertexCache, FeatureWavefrontSize32]>;
@@ -84,11 +81,11 @@ def : Proc<"cayman", R600_VLIW4_Itin,
//===----------------------------------------------------------------------===//
def : ProcessorModel<"SI", SIFullSpeedModel,
- [FeatureSouthernIslands, FeatureFastFMAF32]
+ [FeatureSouthernIslands, FeatureFastFMAF32, HalfRate64Ops]
>;
-def : ProcessorModel<"tahiti", SIFullSpeedModel,
- [FeatureSouthernIslands, FeatureFastFMAF32]
+def : ProcessorModel<"tahiti", SIFullSpeedModel,
+ [FeatureSouthernIslands, FeatureFastFMAF32, HalfRate64Ops]
>;
def : ProcessorModel<"pitcairn", SIQuarterSpeedModel, [FeatureSouthernIslands]>;
@@ -116,8 +113,8 @@ def : ProcessorModel<"kaveri", SIQuarterSpeedModel,
>;
def : ProcessorModel<"hawaii", SIFullSpeedModel,
- [FeatureSeaIslands, FeatureFastFMAF32, FeatureLDSBankCount32,
- FeatureISAVersion7_0_1]
+ [FeatureSeaIslands, FeatureFastFMAF32, HalfRate64Ops,
+ FeatureLDSBankCount32, FeatureISAVersion7_0_1]
>;
def : ProcessorModel<"mullins", SIQuarterSpeedModel,
@@ -148,3 +145,11 @@ def : ProcessorModel<"fiji", SIQuarterSpeedModel,
def : ProcessorModel<"stoney", SIQuarterSpeedModel,
[FeatureVolcanicIslands, FeatureISAVersion8_0_1, FeatureLDSBankCount16]
>;
+
+def : ProcessorModel<"polaris10", SIQuarterSpeedModel,
+ [FeatureVolcanicIslands, FeatureISAVersion8_0_1, FeatureLDSBankCount32]
+>;
+
+def : ProcessorModel<"polaris11", SIQuarterSpeedModel,
+ [FeatureVolcanicIslands, FeatureISAVersion8_0_1, FeatureLDSBankCount32]
+>;
diff --git a/lib/Target/AMDGPU/R600ClauseMergePass.cpp b/lib/Target/AMDGPU/R600ClauseMergePass.cpp
index 3cb90218a7d5..3ccde79e2df4 100644
--- a/lib/Target/AMDGPU/R600ClauseMergePass.cpp
+++ b/lib/Target/AMDGPU/R600ClauseMergePass.cpp
@@ -31,8 +31,8 @@ using namespace llvm;
namespace {
-static bool isCFAlu(const MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static bool isCFAlu(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
case AMDGPU::CF_ALU:
case AMDGPU::CF_ALU_PUSH_BEFORE:
return true;
@@ -47,19 +47,19 @@ private:
static char ID;
const R600InstrInfo *TII;
- unsigned getCFAluSize(const MachineInstr *MI) const;
- bool isCFAluEnabled(const MachineInstr *MI) const;
+ unsigned getCFAluSize(const MachineInstr &MI) const;
+ bool isCFAluEnabled(const MachineInstr &MI) const;
/// IfCvt pass can generate "disabled" ALU clause marker that need to be
/// removed and their content affected to the previous alu clause.
/// This function parse instructions after CFAlu until it find a disabled
/// CFAlu and merge the content, or an enabled CFAlu.
- void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const;
+ void cleanPotentialDisabledCFAlu(MachineInstr &CFAlu) const;
/// Check whether LatrCFAlu can be merged into RootCFAlu and do it if
/// it is the case.
- bool mergeIfPossible(MachineInstr *RootCFAlu, const MachineInstr *LatrCFAlu)
- const;
+ bool mergeIfPossible(MachineInstr &RootCFAlu,
+ const MachineInstr &LatrCFAlu) const;
public:
R600ClauseMergePass(TargetMachine &tm) : MachineFunctionPass(ID) { }
@@ -71,38 +71,40 @@ public:
char R600ClauseMergePass::ID = 0;
-unsigned R600ClauseMergePass::getCFAluSize(const MachineInstr *MI) const {
+unsigned R600ClauseMergePass::getCFAluSize(const MachineInstr &MI) const {
assert(isCFAlu(MI));
- return MI->getOperand(
- TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::COUNT)).getImm();
+ return MI
+ .getOperand(TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::COUNT))
+ .getImm();
}
-bool R600ClauseMergePass::isCFAluEnabled(const MachineInstr *MI) const {
+bool R600ClauseMergePass::isCFAluEnabled(const MachineInstr &MI) const {
assert(isCFAlu(MI));
- return MI->getOperand(
- TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::Enabled)).getImm();
+ return MI
+ .getOperand(TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::Enabled))
+ .getImm();
}
-void R600ClauseMergePass::cleanPotentialDisabledCFAlu(MachineInstr *CFAlu)
- const {
+void R600ClauseMergePass::cleanPotentialDisabledCFAlu(
+ MachineInstr &CFAlu) const {
int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT);
- MachineBasicBlock::iterator I = CFAlu, E = CFAlu->getParent()->end();
+ MachineBasicBlock::iterator I = CFAlu, E = CFAlu.getParent()->end();
I++;
do {
- while (I!= E && !isCFAlu(I))
+ while (I != E && !isCFAlu(*I))
I++;
if (I == E)
return;
- MachineInstr *MI = I++;
+ MachineInstr &MI = *I++;
if (isCFAluEnabled(MI))
break;
- CFAlu->getOperand(CntIdx).setImm(getCFAluSize(CFAlu) + getCFAluSize(MI));
- MI->eraseFromParent();
+ CFAlu.getOperand(CntIdx).setImm(getCFAluSize(CFAlu) + getCFAluSize(MI));
+ MI.eraseFromParent();
} while (I != E);
}
-bool R600ClauseMergePass::mergeIfPossible(MachineInstr *RootCFAlu,
- const MachineInstr *LatrCFAlu) const {
+bool R600ClauseMergePass::mergeIfPossible(MachineInstr &RootCFAlu,
+ const MachineInstr &LatrCFAlu) const {
assert(isCFAlu(RootCFAlu) && isCFAlu(LatrCFAlu));
int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT);
unsigned RootInstCount = getCFAluSize(RootCFAlu),
@@ -112,7 +114,7 @@ bool R600ClauseMergePass::mergeIfPossible(MachineInstr *RootCFAlu,
DEBUG(dbgs() << "Excess inst counts\n");
return false;
}
- if (RootCFAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
+ if (RootCFAlu.getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
return false;
// Is KCache Bank 0 compatible ?
int Mode0Idx =
@@ -121,12 +123,12 @@ bool R600ClauseMergePass::mergeIfPossible(MachineInstr *RootCFAlu,
TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK0);
int KBank0LineIdx =
TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR0);
- if (LatrCFAlu->getOperand(Mode0Idx).getImm() &&
- RootCFAlu->getOperand(Mode0Idx).getImm() &&
- (LatrCFAlu->getOperand(KBank0Idx).getImm() !=
- RootCFAlu->getOperand(KBank0Idx).getImm() ||
- LatrCFAlu->getOperand(KBank0LineIdx).getImm() !=
- RootCFAlu->getOperand(KBank0LineIdx).getImm())) {
+ if (LatrCFAlu.getOperand(Mode0Idx).getImm() &&
+ RootCFAlu.getOperand(Mode0Idx).getImm() &&
+ (LatrCFAlu.getOperand(KBank0Idx).getImm() !=
+ RootCFAlu.getOperand(KBank0Idx).getImm() ||
+ LatrCFAlu.getOperand(KBank0LineIdx).getImm() !=
+ RootCFAlu.getOperand(KBank0LineIdx).getImm())) {
DEBUG(dbgs() << "Wrong KC0\n");
return false;
}
@@ -137,56 +139,61 @@ bool R600ClauseMergePass::mergeIfPossible(MachineInstr *RootCFAlu,
TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK1);
int KBank1LineIdx =
TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR1);
- if (LatrCFAlu->getOperand(Mode1Idx).getImm() &&
- RootCFAlu->getOperand(Mode1Idx).getImm() &&
- (LatrCFAlu->getOperand(KBank1Idx).getImm() !=
- RootCFAlu->getOperand(KBank1Idx).getImm() ||
- LatrCFAlu->getOperand(KBank1LineIdx).getImm() !=
- RootCFAlu->getOperand(KBank1LineIdx).getImm())) {
+ if (LatrCFAlu.getOperand(Mode1Idx).getImm() &&
+ RootCFAlu.getOperand(Mode1Idx).getImm() &&
+ (LatrCFAlu.getOperand(KBank1Idx).getImm() !=
+ RootCFAlu.getOperand(KBank1Idx).getImm() ||
+ LatrCFAlu.getOperand(KBank1LineIdx).getImm() !=
+ RootCFAlu.getOperand(KBank1LineIdx).getImm())) {
DEBUG(dbgs() << "Wrong KC0\n");
return false;
}
- if (LatrCFAlu->getOperand(Mode0Idx).getImm()) {
- RootCFAlu->getOperand(Mode0Idx).setImm(
- LatrCFAlu->getOperand(Mode0Idx).getImm());
- RootCFAlu->getOperand(KBank0Idx).setImm(
- LatrCFAlu->getOperand(KBank0Idx).getImm());
- RootCFAlu->getOperand(KBank0LineIdx).setImm(
- LatrCFAlu->getOperand(KBank0LineIdx).getImm());
+ if (LatrCFAlu.getOperand(Mode0Idx).getImm()) {
+ RootCFAlu.getOperand(Mode0Idx).setImm(
+ LatrCFAlu.getOperand(Mode0Idx).getImm());
+ RootCFAlu.getOperand(KBank0Idx).setImm(
+ LatrCFAlu.getOperand(KBank0Idx).getImm());
+ RootCFAlu.getOperand(KBank0LineIdx)
+ .setImm(LatrCFAlu.getOperand(KBank0LineIdx).getImm());
}
- if (LatrCFAlu->getOperand(Mode1Idx).getImm()) {
- RootCFAlu->getOperand(Mode1Idx).setImm(
- LatrCFAlu->getOperand(Mode1Idx).getImm());
- RootCFAlu->getOperand(KBank1Idx).setImm(
- LatrCFAlu->getOperand(KBank1Idx).getImm());
- RootCFAlu->getOperand(KBank1LineIdx).setImm(
- LatrCFAlu->getOperand(KBank1LineIdx).getImm());
+ if (LatrCFAlu.getOperand(Mode1Idx).getImm()) {
+ RootCFAlu.getOperand(Mode1Idx).setImm(
+ LatrCFAlu.getOperand(Mode1Idx).getImm());
+ RootCFAlu.getOperand(KBank1Idx).setImm(
+ LatrCFAlu.getOperand(KBank1Idx).getImm());
+ RootCFAlu.getOperand(KBank1LineIdx)
+ .setImm(LatrCFAlu.getOperand(KBank1LineIdx).getImm());
}
- RootCFAlu->getOperand(CntIdx).setImm(CumuledInsts);
- RootCFAlu->setDesc(TII->get(LatrCFAlu->getOpcode()));
+ RootCFAlu.getOperand(CntIdx).setImm(CumuledInsts);
+ RootCFAlu.setDesc(TII->get(LatrCFAlu.getOpcode()));
return true;
}
bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) {
- TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
+ const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
+ TII = ST.getInstrInfo();
+
for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
BB != BB_E; ++BB) {
MachineBasicBlock &MBB = *BB;
MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
MachineBasicBlock::iterator LatestCFAlu = E;
while (I != E) {
- MachineInstr *MI = I++;
+ MachineInstr &MI = *I++;
if ((!TII->canBeConsideredALU(MI) && !isCFAlu(MI)) ||
- TII->mustBeLastInClause(MI->getOpcode()))
+ TII->mustBeLastInClause(MI.getOpcode()))
LatestCFAlu = E;
if (!isCFAlu(MI))
continue;
cleanPotentialDisabledCFAlu(MI);
- if (LatestCFAlu != E && mergeIfPossible(LatestCFAlu, MI)) {
- MI->eraseFromParent();
+ if (LatestCFAlu != E && mergeIfPossible(*LatestCFAlu, MI)) {
+ MI.eraseFromParent();
} else {
- assert(MI->getOperand(8).getImm() && "CF ALU instruction disabled");
+ assert(MI.getOperand(8).getImm() && "CF ALU instruction disabled");
LatestCFAlu = MI;
}
}
diff --git a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index bd80bb211b4f..d5bda4a8303e 100644
--- a/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
+++ b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
@@ -39,16 +39,16 @@ struct CFStack {
FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3
};
- const AMDGPUSubtarget *ST;
+ const R600Subtarget *ST;
std::vector<StackItem> BranchStack;
std::vector<StackItem> LoopStack;
unsigned MaxStackSize;
unsigned CurrentEntries;
unsigned CurrentSubEntries;
- CFStack(const AMDGPUSubtarget *st, unsigned ShaderType) : ST(st),
+ CFStack(const R600Subtarget *st, CallingConv::ID cc) : ST(st),
// We need to reserve a stack entry for CALL_FS in vertex shaders.
- MaxStackSize(ShaderType == ShaderType::VERTEX ? 1 : 0),
+ MaxStackSize(cc == CallingConv::AMDGPU_VS ? 1 : 0),
CurrentEntries(0), CurrentSubEntries(0) { }
unsigned getLoopDepth();
@@ -119,7 +119,7 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
return 0;
case CFStack::FIRST_NON_WQM_PUSH:
assert(!ST->hasCaymanISA());
- if (ST->getGeneration() <= AMDGPUSubtarget::R700) {
+ if (ST->getGeneration() <= R600Subtarget::R700) {
// +1 For the push operation.
// +2 Extra space required.
return 3;
@@ -132,7 +132,7 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
return 2;
}
case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
- assert(ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN);
+ assert(ST->getGeneration() >= R600Subtarget::EVERGREEN);
// +1 For the push operation.
// +1 Extra space required.
return 2;
@@ -142,8 +142,8 @@ unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
}
void CFStack::updateMaxStackSize() {
- unsigned CurrentStackSize = CurrentEntries +
- (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
+ unsigned CurrentStackSize =
+ CurrentEntries + (alignTo(CurrentSubEntries, 4) / 4);
MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
}
@@ -159,7 +159,7 @@ void CFStack::pushBranch(unsigned Opcode, bool isWQM) {
// See comment in
// CFStack::getSubEntrySize()
else if (CurrentEntries > 0 &&
- ST->getGeneration() > AMDGPUSubtarget::EVERGREEN &&
+ ST->getGeneration() > R600Subtarget::EVERGREEN &&
!ST->hasCaymanISA() &&
!branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY))
Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY;
@@ -220,10 +220,10 @@ private:
const R600InstrInfo *TII;
const R600RegisterInfo *TRI;
unsigned MaxFetchInst;
- const AMDGPUSubtarget *ST;
+ const R600Subtarget *ST;
- bool IsTrivialInst(MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+ bool IsTrivialInst(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
case AMDGPU::KILL:
case AMDGPU::RETURN:
return true;
@@ -234,7 +234,7 @@ private:
const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
unsigned Opcode = 0;
- bool isEg = (ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN);
+ bool isEg = (ST->getGeneration() >= R600Subtarget::EVERGREEN);
switch (CFI) {
case CF_TC:
Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
@@ -278,11 +278,12 @@ private:
return TII->get(Opcode);
}
- bool isCompatibleWithClause(const MachineInstr *MI,
- std::set<unsigned> &DstRegs) const {
+ bool isCompatibleWithClause(const MachineInstr &MI,
+ std::set<unsigned> &DstRegs) const {
unsigned DstMI, SrcMI;
- for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
- E = MI->operands_end(); I != E; ++I) {
+ for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
+ E = MI.operands_end();
+ I != E; ++I) {
const MachineOperand &MO = *I;
if (!MO.isReg())
continue;
@@ -318,20 +319,20 @@ private:
MachineBasicBlock::iterator ClauseHead = I;
std::vector<MachineInstr *> ClauseContent;
unsigned AluInstCount = 0;
- bool IsTex = TII->usesTextureCache(ClauseHead);
+ bool IsTex = TII->usesTextureCache(*ClauseHead);
std::set<unsigned> DstRegs;
for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
- if (IsTrivialInst(I))
+ if (IsTrivialInst(*I))
continue;
if (AluInstCount >= MaxFetchInst)
break;
- if ((IsTex && !TII->usesTextureCache(I)) ||
- (!IsTex && !TII->usesVertexCache(I)))
+ if ((IsTex && !TII->usesTextureCache(*I)) ||
+ (!IsTex && !TII->usesVertexCache(*I)))
break;
- if (!isCompatibleWithClause(I, DstRegs))
+ if (!isCompatibleWithClause(*I, DstRegs))
break;
AluInstCount ++;
- ClauseContent.push_back(I);
+ ClauseContent.push_back(&*I);
}
MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
getHWInstrDesc(IsTex?CF_TC:CF_VC))
@@ -340,28 +341,37 @@ private:
return ClauseFile(MIb, std::move(ClauseContent));
}
- void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
+ void getLiteral(MachineInstr &MI, std::vector<MachineOperand *> &Lits) const {
static const unsigned LiteralRegs[] = {
AMDGPU::ALU_LITERAL_X,
AMDGPU::ALU_LITERAL_Y,
AMDGPU::ALU_LITERAL_Z,
AMDGPU::ALU_LITERAL_W
};
- const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
+ const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs =
TII->getSrcs(MI);
- for (unsigned i = 0, e = Srcs.size(); i < e; ++i) {
- if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X)
+ for (const auto &Src:Srcs) {
+ if (Src.first->getReg() != AMDGPU::ALU_LITERAL_X)
continue;
- int64_t Imm = Srcs[i].second;
- std::vector<int64_t>::iterator It =
- std::find(Lits.begin(), Lits.end(), Imm);
+ int64_t Imm = Src.second;
+ std::vector<MachineOperand*>::iterator It =
+ std::find_if(Lits.begin(), Lits.end(),
+ [&](MachineOperand* val)
+ { return val->isImm() && (val->getImm() == Imm);});
+
+ // Get corresponding Operand
+ MachineOperand &Operand = MI.getOperand(
+ TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::literal));
+
if (It != Lits.end()) {
+ // Reuse existing literal reg
unsigned Index = It - Lits.begin();
- Srcs[i].first->setReg(LiteralRegs[Index]);
+ Src.first->setReg(LiteralRegs[Index]);
} else {
+ // Allocate new literal reg
assert(Lits.size() < 4 && "Too many literals in Instruction Group");
- Srcs[i].first->setReg(LiteralRegs[Lits.size()]);
- Lits.push_back(Imm);
+ Src.first->setReg(LiteralRegs[Lits.size()]);
+ Lits.push_back(&Operand);
}
}
}
@@ -384,56 +394,66 @@ private:
ClauseFile
MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
const {
- MachineBasicBlock::iterator ClauseHead = I;
+ MachineInstr &ClauseHead = *I;
std::vector<MachineInstr *> ClauseContent;
I++;
for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) {
- if (IsTrivialInst(I)) {
+ if (IsTrivialInst(*I)) {
++I;
continue;
}
if (!I->isBundle() && !TII->isALUInstr(I->getOpcode()))
break;
- std::vector<int64_t> Literals;
+ std::vector<MachineOperand *>Literals;
if (I->isBundle()) {
- MachineInstr *DeleteMI = I;
+ MachineInstr &DeleteMI = *I;
MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
while (++BI != E && BI->isBundledWithPred()) {
BI->unbundleFromPred();
- for (unsigned i = 0, e = BI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = BI->getOperand(i);
+ for (MachineOperand &MO : BI->operands()) {
if (MO.isReg() && MO.isInternalRead())
MO.setIsInternalRead(false);
}
- getLiteral(&*BI, Literals);
+ getLiteral(*BI, Literals);
ClauseContent.push_back(&*BI);
}
I = BI;
- DeleteMI->eraseFromParent();
+ DeleteMI.eraseFromParent();
} else {
- getLiteral(I, Literals);
- ClauseContent.push_back(I);
+ getLiteral(*I, Literals);
+ ClauseContent.push_back(&*I);
I++;
}
- for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
- unsigned literal0 = Literals[i];
- unsigned literal2 = (i + 1 < e)?Literals[i + 1]:0;
- MachineInstr *MILit = BuildMI(MBB, I, I->getDebugLoc(),
- TII->get(AMDGPU::LITERALS))
- .addImm(literal0)
- .addImm(literal2);
+ for (unsigned i = 0, e = Literals.size(); i < e; i += 2) {
+ MachineInstrBuilder MILit = BuildMI(MBB, I, I->getDebugLoc(),
+ TII->get(AMDGPU::LITERALS));
+ if (Literals[i]->isImm()) {
+ MILit.addImm(Literals[i]->getImm());
+ } else {
+ MILit.addGlobalAddress(Literals[i]->getGlobal(),
+ Literals[i]->getOffset());
+ }
+ if (i + 1 < e) {
+ if (Literals[i + 1]->isImm()) {
+ MILit.addImm(Literals[i + 1]->getImm());
+ } else {
+ MILit.addGlobalAddress(Literals[i + 1]->getGlobal(),
+ Literals[i + 1]->getOffset());
+ }
+ } else
+ MILit.addImm(0);
ClauseContent.push_back(MILit);
}
}
assert(ClauseContent.size() < 128 && "ALU clause is too big");
- ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
- return ClauseFile(ClauseHead, std::move(ClauseContent));
+ ClauseHead.getOperand(7).setImm(ClauseContent.size() - 1);
+ return ClauseFile(&ClauseHead, std::move(ClauseContent));
}
void
EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
unsigned &CfCount) {
- CounterPropagateAddr(Clause.first, CfCount);
+ CounterPropagateAddr(*Clause.first, CfCount);
MachineBasicBlock *BB = Clause.first->getParent();
BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE))
.addImm(CfCount);
@@ -447,7 +467,7 @@ private:
EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
unsigned &CfCount) {
Clause.first->getOperand(0).setImm(0);
- CounterPropagateAddr(Clause.first, CfCount);
+ CounterPropagateAddr(*Clause.first, CfCount);
MachineBasicBlock *BB = Clause.first->getParent();
BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
.addImm(CfCount);
@@ -457,13 +477,13 @@ private:
CfCount += Clause.second.size();
}
- void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
- MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
+ void CounterPropagateAddr(MachineInstr &MI, unsigned Addr) const {
+ MI.getOperand(0).setImm(Addr + MI.getOperand(0).getImm());
}
void CounterPropagateAddr(const std::set<MachineInstr *> &MIs,
unsigned Addr) const {
for (MachineInstr *MI : MIs) {
- CounterPropagateAddr(MI, Addr);
+ CounterPropagateAddr(*MI, Addr);
}
}
@@ -472,20 +492,21 @@ public:
: MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), ST(nullptr) {}
bool runOnMachineFunction(MachineFunction &MF) override {
- ST = &MF.getSubtarget<AMDGPUSubtarget>();
+ ST = &MF.getSubtarget<R600Subtarget>();
MaxFetchInst = ST->getTexVTXClauseSize();
- TII = static_cast<const R600InstrInfo *>(ST->getInstrInfo());
- TRI = static_cast<const R600RegisterInfo *>(ST->getRegisterInfo());
+ TII = ST->getInstrInfo();
+ TRI = ST->getRegisterInfo();
+
R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
- CFStack CFStack(ST, MFI->getShaderType());
+ CFStack CFStack(ST, MF.getFunction()->getCallingConv());
for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
++MB) {
MachineBasicBlock &MBB = *MB;
unsigned CfCount = 0;
std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
std::vector<MachineInstr * > IfThenElseStack;
- if (MFI->getShaderType() == ShaderType::VERTEX) {
+ if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_VS) {
BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
getHWInstrDesc(CF_CALL_FS));
CfCount++;
@@ -493,10 +514,10 @@ public:
std::vector<ClauseFile> FetchClauses, AluClauses;
std::vector<MachineInstr *> LastAlu(1);
std::vector<MachineInstr *> ToPopAfter;
-
+
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E;) {
- if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
+ if (TII->usesTextureCache(*I) || TII->usesVertexCache(*I)) {
DEBUG(dbgs() << CfCount << ":"; I->dump(););
FetchClauses.push_back(MakeFetchClause(MBB, I));
CfCount++;
@@ -508,7 +529,7 @@ public:
if (MI->getOpcode() != AMDGPU::ENDIF)
LastAlu.back() = nullptr;
if (MI->getOpcode() == AMDGPU::CF_ALU)
- LastAlu.back() = MI;
+ LastAlu.back() = &*MI;
I++;
bool RequiresWorkAround =
CFStack.requiresWorkAroundForInst(MI->getOpcode());
@@ -571,7 +592,7 @@ public:
case AMDGPU::ELSE: {
MachineInstr * JumpInst = IfThenElseStack.back();
IfThenElseStack.pop_back();
- CounterPropagateAddr(JumpInst, CfCount);
+ CounterPropagateAddr(*JumpInst, CfCount);
MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
getHWInstrDesc(CF_ELSE))
.addImm(0)
@@ -595,10 +616,10 @@ public:
DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
CfCount++;
}
-
+
MachineInstr *IfOrElseInst = IfThenElseStack.back();
IfThenElseStack.pop_back();
- CounterPropagateAddr(IfOrElseInst, CfCount);
+ CounterPropagateAddr(*IfOrElseInst, CfCount);
IfOrElseInst->getOperand(1).setImm(1);
LastAlu.pop_back();
MI->eraseFromParent();
@@ -625,15 +646,16 @@ public:
case AMDGPU::RETURN: {
BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
CfCount++;
- MI->eraseFromParent();
if (CfCount % 2) {
BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
CfCount++;
}
+ MI->eraseFromParent();
for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
EmitFetchClause(I, FetchClauses[i], CfCount);
for (unsigned i = 0, e = AluClauses.size(); i < e; i++)
EmitALUClause(I, AluClauses[i], CfCount);
+ break;
}
default:
if (TII->isExport(MI->getOpcode())) {
diff --git a/lib/Target/AMDGPU/R600Defines.h b/lib/Target/AMDGPU/R600Defines.h
index 51d87eda31d1..534461adc59f 100644
--- a/lib/Target/AMDGPU/R600Defines.h
+++ b/lib/Target/AMDGPU/R600Defines.h
@@ -8,8 +8,8 @@
/// \file
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_R600DEFINES_H
-#define LLVM_LIB_TARGET_R600_R600DEFINES_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_R600DEFINES_H
+#define LLVM_LIB_TARGET_AMDGPU_R600DEFINES_H
#include "llvm/MC/MCRegisterInfo.h"
diff --git a/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp b/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
index fdc20302f4a3..93ed5be94a54 100644
--- a/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
+++ b/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
@@ -38,8 +38,8 @@ private:
const R600InstrInfo *TII;
int Address;
- unsigned OccupiedDwords(MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+ unsigned OccupiedDwords(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
@@ -53,17 +53,17 @@ private:
// These will be expanded to two ALU instructions in the
// ExpandSpecialInstructions pass.
- if (TII->isLDSRetInstr(MI->getOpcode()))
+ if (TII->isLDSRetInstr(MI.getOpcode()))
return 2;
- if(TII->isVector(*MI) ||
- TII->isCubeOp(MI->getOpcode()) ||
- TII->isReductionOp(MI->getOpcode()))
+ if (TII->isVector(MI) || TII->isCubeOp(MI.getOpcode()) ||
+ TII->isReductionOp(MI.getOpcode()))
return 4;
unsigned NumLiteral = 0;
- for (MachineInstr::mop_iterator It = MI->operands_begin(),
- E = MI->operands_end(); It != E; ++It) {
+ for (MachineInstr::mop_iterator It = MI.operands_begin(),
+ E = MI.operands_end();
+ It != E; ++It) {
MachineOperand &MO = *It;
if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
++NumLiteral;
@@ -71,12 +71,12 @@ private:
return 1 + NumLiteral;
}
- bool isALU(const MachineInstr *MI) const {
- if (TII->isALUInstr(MI->getOpcode()))
+ bool isALU(const MachineInstr &MI) const {
+ if (TII->isALUInstr(MI.getOpcode()))
return true;
- if (TII->isVector(*MI) || TII->isCubeOp(MI->getOpcode()))
+ if (TII->isVector(MI) || TII->isCubeOp(MI.getOpcode()))
return true;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case AMDGPU::PRED_X:
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
@@ -89,8 +89,8 @@ private:
}
}
- bool IsTrivialInst(MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+ bool IsTrivialInst(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
case AMDGPU::KILL:
case AMDGPU::RETURN:
case AMDGPU::IMPLICIT_DEF:
@@ -114,18 +114,20 @@ private:
((((Sel >> 2) - 512) & 4095) >> 5) << 1);
}
- bool SubstituteKCacheBank(MachineInstr *MI,
- std::vector<std::pair<unsigned, unsigned> > &CachedConsts,
- bool UpdateInstr = true) const {
+ bool
+ SubstituteKCacheBank(MachineInstr &MI,
+ std::vector<std::pair<unsigned, unsigned>> &CachedConsts,
+ bool UpdateInstr = true) const {
std::vector<std::pair<unsigned, unsigned> > UsedKCache;
- if (!TII->isALUInstr(MI->getOpcode()) && MI->getOpcode() != AMDGPU::DOT_4)
+ if (!TII->isALUInstr(MI.getOpcode()) && MI.getOpcode() != AMDGPU::DOT_4)
return true;
- const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts =
+ const SmallVectorImpl<std::pair<MachineOperand *, int64_t>> &Consts =
TII->getSrcs(MI);
- assert((TII->isALUInstr(MI->getOpcode()) ||
- MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const");
+ assert(
+ (TII->isALUInstr(MI.getOpcode()) || MI.getOpcode() == AMDGPU::DOT_4) &&
+ "Can't assign Const");
for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
if (Consts[i].first->getReg() != AMDGPU::ALU_CONST)
continue;
@@ -194,9 +196,9 @@ private:
// in the clause.
unsigned LastUseCount = 0;
for (MachineBasicBlock::iterator UseI = Def; UseI != BBEnd; ++UseI) {
- AluInstCount += OccupiedDwords(UseI);
+ AluInstCount += OccupiedDwords(*UseI);
// Make sure we won't need to end the clause due to KCache limitations.
- if (!SubstituteKCacheBank(UseI, KCacheBanks, false))
+ if (!SubstituteKCacheBank(*UseI, KCacheBanks, false))
return false;
// We have reached the maximum instruction limit before finding the
@@ -230,9 +232,9 @@ private:
bool PushBeforeModifier = false;
unsigned AluInstCount = 0;
for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
- if (IsTrivialInst(I))
+ if (IsTrivialInst(*I))
continue;
- if (!isALU(I))
+ if (!isALU(*I))
break;
if (AluInstCount > TII->getMaxAlusPerClause())
break;
@@ -245,7 +247,7 @@ private:
// clause as predicated alus).
if (AluInstCount > 0)
break;
- if (TII->getFlagOp(I).getImm() & MO_FLAG_PUSH)
+ if (TII->getFlagOp(*I).getImm() & MO_FLAG_PUSH)
PushBeforeModifier = true;
AluInstCount ++;
continue;
@@ -267,16 +269,16 @@ private:
if (!canClauseLocalKillFitInClause(AluInstCount, KCacheBanks, I, E))
break;
- if (!SubstituteKCacheBank(I, KCacheBanks))
+ if (!SubstituteKCacheBank(*I, KCacheBanks))
break;
- AluInstCount += OccupiedDwords(I);
+ AluInstCount += OccupiedDwords(*I);
}
unsigned Opcode = PushBeforeModifier ?
AMDGPU::CF_ALU_PUSH_BEFORE : AMDGPU::CF_ALU;
BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), TII->get(Opcode))
// We don't use the ADDR field until R600ControlFlowFinalizer pass, where
// it is safe to assume it is 0. However if we always put 0 here, the ifcvt
- // pass may assume that identical ALU clause starter at the beginning of a
+ // pass may assume that identical ALU clause starter at the beginning of a
// true and false branch can be factorized which is not the case.
.addImm(Address++) // ADDR
.addImm(KCacheBanks.empty()?0:KCacheBanks[0].first) // KB0
@@ -298,7 +300,8 @@ public:
}
bool runOnMachineFunction(MachineFunction &MF) override {
- TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
+ TII = ST.getInstrInfo();
for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
BB != BB_E; ++BB) {
@@ -307,7 +310,7 @@ public:
if (I->getOpcode() == AMDGPU::CF_ALU)
continue; // BB was already parsed
for (MachineBasicBlock::iterator E = MBB.end(); I != E;) {
- if (isALU(I))
+ if (isALU(*I))
I = MakeALUClause(MBB, I);
else
++I;
diff --git a/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
index 211d392e8fcc..0385b6283f37 100644
--- a/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
+++ b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
@@ -29,7 +29,6 @@ using namespace llvm;
namespace {
class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
-
private:
static char ID;
const R600InstrInfo *TII;
@@ -61,12 +60,13 @@ void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI,
int OpIdx = TII->getOperandIdx(*OldMI, Op);
if (OpIdx > -1) {
uint64_t Val = OldMI->getOperand(OpIdx).getImm();
- TII->setImmOperand(NewMI, Op, Val);
+ TII->setImmOperand(*NewMI, Op, Val);
}
}
bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
- TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
+ TII = ST.getInstrInfo();
const R600RegisterInfo &TRI = TII->getRegisterInfo();
@@ -107,11 +107,11 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
MI.getOperand(0).getReg(), // dst
MI.getOperand(1).getReg(), // src0
AMDGPU::ZERO); // src1
- TII->addFlag(PredSet, 0, MO_FLAG_MASK);
+ TII->addFlag(*PredSet, 0, MO_FLAG_MASK);
if (Flags & MO_FLAG_PUSH) {
- TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
+ TII->setImmOperand(*PredSet, AMDGPU::OpName::update_exec_mask, 1);
} else {
- TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
+ TII->setImmOperand(*PredSet, AMDGPU::OpName::update_pred, 1);
}
MI.eraseFromParent();
continue;
@@ -137,9 +137,9 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
BMI->bundleWithPred();
}
if (Chan >= 2)
- TII->addFlag(BMI, 0, MO_FLAG_MASK);
+ TII->addFlag(*BMI, 0, MO_FLAG_MASK);
if (Chan != 3)
- TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
+ TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
}
MI.eraseFromParent();
@@ -166,9 +166,9 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
BMI->bundleWithPred();
}
if (Chan < 2)
- TII->addFlag(BMI, 0, MO_FLAG_MASK);
+ TII->addFlag(*BMI, 0, MO_FLAG_MASK);
if (Chan != 3)
- TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
+ TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
}
MI.eraseFromParent();
@@ -189,7 +189,7 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
BMI->bundleWithPred();
}
if (Chan != 3)
- TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
+ TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
}
MI.eraseFromParent();
@@ -212,10 +212,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
BMI->bundleWithPred();
}
if (Mask) {
- TII->addFlag(BMI, 0, MO_FLAG_MASK);
+ TII->addFlag(*BMI, 0, MO_FLAG_MASK);
}
if (Chan != 3)
- TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
+ TII->addFlag(*BMI, 0, MO_FLAG_NOT_LAST);
unsigned Opcode = BMI->getOpcode();
// While not strictly necessary from hw point of view, we force
// all src operands of a dot4 inst to belong to the same slot.
@@ -330,10 +330,10 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
if (Chan != 0)
NewMI->bundleWithPred();
if (Mask) {
- TII->addFlag(NewMI, 0, MO_FLAG_MASK);
+ TII->addFlag(*NewMI, 0, MO_FLAG_MASK);
}
if (NotLast) {
- TII->addFlag(NewMI, 0, MO_FLAG_NOT_LAST);
+ TII->addFlag(*NewMI, 0, MO_FLAG_NOT_LAST);
}
SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::clamp);
SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::literal);
diff --git a/lib/Target/AMDGPU/R600FrameLowering.cpp b/lib/Target/AMDGPU/R600FrameLowering.cpp
new file mode 100644
index 000000000000..dd5681ff5e8b
--- /dev/null
+++ b/lib/Target/AMDGPU/R600FrameLowering.cpp
@@ -0,0 +1,15 @@
+//===----------------------- R600FrameLowering.cpp ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+
+#include "R600FrameLowering.h"
+
+using namespace llvm;
+
+R600FrameLowering::~R600FrameLowering() {
+}
diff --git a/lib/Target/AMDGPU/R600FrameLowering.h b/lib/Target/AMDGPU/R600FrameLowering.h
new file mode 100644
index 000000000000..5fe4e0d201ac
--- /dev/null
+++ b/lib/Target/AMDGPU/R600FrameLowering.h
@@ -0,0 +1,30 @@
+//===--------------------- R600FrameLowering.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_R600FRAMELOWERING_H
+#define LLVM_LIB_TARGET_AMDGPU_R600FRAMELOWERING_H
+
+#include "AMDGPUFrameLowering.h"
+
+namespace llvm {
+
+class R600FrameLowering : public AMDGPUFrameLowering {
+public:
+ R600FrameLowering(StackDirection D, unsigned StackAl, int LAO,
+ unsigned TransAl = 1) :
+ AMDGPUFrameLowering(D, StackAl, LAO, TransAl) {}
+ virtual ~R600FrameLowering();
+
+ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const {}
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {}
+};
+
+}
+
+#endif
diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp
index 124a9c6e0f56..8f78edd76a51 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -30,18 +30,61 @@
using namespace llvm;
-R600TargetLowering::R600TargetLowering(TargetMachine &TM,
- const AMDGPUSubtarget &STI)
+R600TargetLowering::R600TargetLowering(const TargetMachine &TM,
+ const R600Subtarget &STI)
: AMDGPUTargetLowering(TM, STI), Gen(STI.getGeneration()) {
- addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass);
- addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass);
addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass);
addRegisterClass(MVT::v2f32, &AMDGPU::R600_Reg64RegClass);
addRegisterClass(MVT::v2i32, &AMDGPU::R600_Reg64RegClass);
+ addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
+ addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass);
computeRegisterProperties(STI.getRegisterInfo());
+ // Legalize loads and stores to the private address space.
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
+
+ // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address
+ // spaces, so it is custom lowered to handle those where it isn't.
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Custom);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Custom);
+
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Custom);
+
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Custom);
+ }
+
+ // Workaround for LegalizeDAG asserting on expansion of i1 vector loads.
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, MVT::v2i1, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, MVT::v2i1, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, MVT::v2i1, Expand);
+
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i1, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i1, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i1, Expand);
+
+
+ setOperationAction(ISD::STORE, MVT::i8, Custom);
+ setOperationAction(ISD::STORE, MVT::i32, Custom);
+ setOperationAction(ISD::STORE, MVT::v2i32, Custom);
+ setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+
+ setTruncStoreAction(MVT::i32, MVT::i8, Custom);
+ setTruncStoreAction(MVT::i32, MVT::i16, Custom);
+
+ // Workaround for LegalizeDAG asserting on expansion of i1 vector stores.
+ setTruncStoreAction(MVT::v2i32, MVT::v2i1, Expand);
+ setTruncStoreAction(MVT::v4i32, MVT::v4i1, Expand);
+
// Set condition code actions
setCondCodeAction(ISD::SETO, MVT::f32, Expand);
setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
@@ -73,10 +116,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM,
setOperationAction(ISD::FSUB, MVT::f32, Expand);
- setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom);
-
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
@@ -122,37 +161,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand);
-
- // Legalize loads and stores to the private address space.
- setOperationAction(ISD::LOAD, MVT::i32, Custom);
- setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
- setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
-
- // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address
- // spaces, so it is custom lowered to handle those where it isn't.
- for (MVT VT : MVT::integer_valuetypes()) {
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Custom);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Custom);
-
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Custom);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Custom);
-
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Custom);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Custom);
- }
-
- setOperationAction(ISD::STORE, MVT::i8, Custom);
- setOperationAction(ISD::STORE, MVT::i32, Custom);
- setOperationAction(ISD::STORE, MVT::v2i32, Custom);
- setOperationAction(ISD::STORE, MVT::v4i32, Custom);
- setTruncStoreAction(MVT::i32, MVT::i8, Custom);
- setTruncStoreAction(MVT::i32, MVT::i16, Custom);
-
- setOperationAction(ISD::LOAD, MVT::i32, Custom);
- setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Custom);
@@ -165,12 +173,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM,
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
- setTargetDAGCombine(ISD::FP_ROUND);
- setTargetDAGCombine(ISD::FP_TO_SINT);
- setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
- setTargetDAGCombine(ISD::SELECT_CC);
- setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
-
// We don't have 64-bit shifts. Thus we need either SHX i64 or SHX_PARTS i32
// to be Legal/Custom in order to avoid library calls.
setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
@@ -188,119 +190,138 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM,
}
setSchedulingPreference(Sched::Source);
+
+
+ setTargetDAGCombine(ISD::FP_ROUND);
+ setTargetDAGCombine(ISD::FP_TO_SINT);
+ setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
+ setTargetDAGCombine(ISD::SELECT_CC);
+ setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
+}
+
+const R600Subtarget *R600TargetLowering::getSubtarget() const {
+ return static_cast<const R600Subtarget *>(Subtarget);
}
static inline bool isEOP(MachineBasicBlock::iterator I) {
return std::next(I)->getOpcode() == AMDGPU::RETURN;
}
-MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
- MachineInstr * MI, MachineBasicBlock * BB) const {
+MachineBasicBlock *
+R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
MachineFunction * MF = BB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
- MachineBasicBlock::iterator I = *MI;
- const R600InstrInfo *TII =
- static_cast<const R600InstrInfo *>(Subtarget->getInstrInfo());
+ MachineBasicBlock::iterator I = MI;
+ const R600InstrInfo *TII = getSubtarget()->getInstrInfo();
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
// Replace LDS_*_RET instruction that don't have any uses with the
// equivalent LDS_*_NORET instruction.
- if (TII->isLDSRetInstr(MI->getOpcode())) {
- int DstIdx = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ if (TII->isLDSRetInstr(MI.getOpcode())) {
+ int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
assert(DstIdx != -1);
MachineInstrBuilder NewMI;
// FIXME: getLDSNoRetOp method only handles LDS_1A1D LDS ops. Add
// LDS_1A2D support and remove this special case.
- if (!MRI.use_empty(MI->getOperand(DstIdx).getReg()) ||
- MI->getOpcode() == AMDGPU::LDS_CMPST_RET)
+ if (!MRI.use_empty(MI.getOperand(DstIdx).getReg()) ||
+ MI.getOpcode() == AMDGPU::LDS_CMPST_RET)
return BB;
NewMI = BuildMI(*BB, I, BB->findDebugLoc(I),
- TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode())));
- for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) {
- NewMI.addOperand(MI->getOperand(i));
+ TII->get(AMDGPU::getLDSNoRetOp(MI.getOpcode())));
+ for (unsigned i = 1, e = MI.getNumOperands(); i < e; ++i) {
+ NewMI.addOperand(MI.getOperand(i));
}
} else {
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
}
break;
case AMDGPU::CLAMP_R600: {
- MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
- AMDGPU::MOV,
- MI->getOperand(0).getReg(),
- MI->getOperand(1).getReg());
- TII->addFlag(NewMI, 0, MO_FLAG_CLAMP);
+ MachineInstr *NewMI = TII->buildDefaultInstruction(
+ *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(),
+ MI.getOperand(1).getReg());
+ TII->addFlag(*NewMI, 0, MO_FLAG_CLAMP);
break;
}
case AMDGPU::FABS_R600: {
- MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
- AMDGPU::MOV,
- MI->getOperand(0).getReg(),
- MI->getOperand(1).getReg());
- TII->addFlag(NewMI, 0, MO_FLAG_ABS);
+ MachineInstr *NewMI = TII->buildDefaultInstruction(
+ *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(),
+ MI.getOperand(1).getReg());
+ TII->addFlag(*NewMI, 0, MO_FLAG_ABS);
break;
}
case AMDGPU::FNEG_R600: {
- MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
- AMDGPU::MOV,
- MI->getOperand(0).getReg(),
- MI->getOperand(1).getReg());
- TII->addFlag(NewMI, 0, MO_FLAG_NEG);
+ MachineInstr *NewMI = TII->buildDefaultInstruction(
+ *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(),
+ MI.getOperand(1).getReg());
+ TII->addFlag(*NewMI, 0, MO_FLAG_NEG);
break;
}
case AMDGPU::MASK_WRITE: {
- unsigned maskedRegister = MI->getOperand(0).getReg();
+ unsigned maskedRegister = MI.getOperand(0).getReg();
assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
- TII->addFlag(defInstr, 0, MO_FLAG_MASK);
+ TII->addFlag(*defInstr, 0, MO_FLAG_MASK);
break;
}
case AMDGPU::MOV_IMM_F32:
- TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
- MI->getOperand(1).getFPImm()->getValueAPF()
- .bitcastToAPInt().getZExtValue());
+ TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(), MI.getOperand(1)
+ .getFPImm()
+ ->getValueAPF()
+ .bitcastToAPInt()
+ .getZExtValue());
break;
case AMDGPU::MOV_IMM_I32:
- TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
- MI->getOperand(1).getImm());
+ TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(),
+ MI.getOperand(1).getImm());
break;
+ case AMDGPU::MOV_IMM_GLOBAL_ADDR: {
+ //TODO: Perhaps combine this instruction with the next if possible
+ auto MIB = TII->buildDefaultInstruction(
+ *BB, MI, AMDGPU::MOV, MI.getOperand(0).getReg(), AMDGPU::ALU_LITERAL_X);
+ int Idx = TII->getOperandIdx(*MIB, AMDGPU::OpName::literal);
+ //TODO: Ugh this is rather ugly
+ MIB->getOperand(Idx) = MI.getOperand(1);
+ break;
+ }
case AMDGPU::CONST_COPY: {
- MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
- MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
- TII->setImmOperand(NewMI, AMDGPU::OpName::src0_sel,
- MI->getOperand(1).getImm());
+ MachineInstr *NewMI = TII->buildDefaultInstruction(
+ *BB, MI, AMDGPU::MOV, MI.getOperand(0).getReg(), AMDGPU::ALU_CONST);
+ TII->setImmOperand(*NewMI, AMDGPU::OpName::src0_sel,
+ MI.getOperand(1).getImm());
break;
}
case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
case AMDGPU::RAT_WRITE_CACHELESS_64_eg:
case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addImm(isEOP(I)); // Set End of program bit
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addImm(isEOP(I)); // Set End of program bit
break;
}
case AMDGPU::RAT_STORE_TYPED_eg: {
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addOperand(MI->getOperand(2))
- .addImm(isEOP(I)); // Set End of program bit
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addOperand(MI.getOperand(2))
+ .addImm(isEOP(I)); // Set End of program bit
break;
}
case AMDGPU::TXD: {
unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
- MachineOperand &RID = MI->getOperand(4);
- MachineOperand &SID = MI->getOperand(5);
- unsigned TextureId = MI->getOperand(6).getImm();
+ MachineOperand &RID = MI.getOperand(4);
+ MachineOperand &SID = MI.getOperand(5);
+ unsigned TextureId = MI.getOperand(6).getImm();
unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
@@ -333,75 +354,77 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
CTZ = 0;
break;
}
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
- .addOperand(MI->getOperand(3))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
- .addOperand(MI->getOperand(2))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H),
+ T0)
+ .addOperand(MI.getOperand(3))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V),
+ T1)
+ .addOperand(MI.getOperand(2))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW)
- .addReg(T0, RegState::Implicit)
- .addReg(T1, RegState::Implicit);
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW)
+ .addReg(T0, RegState::Implicit)
+ .addReg(T1, RegState::Implicit);
break;
}
case AMDGPU::TXD_SHADOW: {
unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
- MachineOperand &RID = MI->getOperand(4);
- MachineOperand &SID = MI->getOperand(5);
- unsigned TextureId = MI->getOperand(6).getImm();
+ MachineOperand &RID = MI.getOperand(4);
+ MachineOperand &SID = MI.getOperand(5);
+ unsigned TextureId = MI.getOperand(6).getImm();
unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
@@ -435,99 +458,101 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
break;
}
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
- .addOperand(MI->getOperand(3))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
- .addOperand(MI->getOperand(2))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H),
+ T0)
+ .addOperand(MI.getOperand(3))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V),
+ T1)
+ .addOperand(MI.getOperand(2))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW)
- .addReg(T0, RegState::Implicit)
- .addReg(T1, RegState::Implicit);
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW)
+ .addReg(T0, RegState::Implicit)
+ .addReg(T1, RegState::Implicit);
break;
}
case AMDGPU::BRANCH:
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
- .addOperand(MI->getOperand(0));
- break;
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
+ .addOperand(MI.getOperand(0));
+ break;
case AMDGPU::BRANCH_COND_f32: {
MachineInstr *NewMI =
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
- AMDGPU::PREDICATE_BIT)
- .addOperand(MI->getOperand(1))
- .addImm(OPCODE_IS_NOT_ZERO)
- .addImm(0); // Flags
- TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
+ AMDGPU::PREDICATE_BIT)
+ .addOperand(MI.getOperand(1))
+ .addImm(OPCODE_IS_NOT_ZERO)
+ .addImm(0); // Flags
+ TII->addFlag(*NewMI, 0, MO_FLAG_PUSH);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
- .addOperand(MI->getOperand(0))
- .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+ .addOperand(MI.getOperand(0))
+ .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
break;
}
case AMDGPU::BRANCH_COND_i32: {
MachineInstr *NewMI =
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
- AMDGPU::PREDICATE_BIT)
- .addOperand(MI->getOperand(1))
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
+ AMDGPU::PREDICATE_BIT)
+ .addOperand(MI.getOperand(1))
.addImm(OPCODE_IS_NOT_ZERO_INT)
.addImm(0); // Flags
- TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
+ TII->addFlag(*NewMI, 0, MO_FLAG_PUSH);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
- .addOperand(MI->getOperand(0))
- .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+ .addOperand(MI.getOperand(0))
+ .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
break;
}
@@ -535,7 +560,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::R600_ExportSwz: {
// Instruction is left unmodified if its not the last one of its type
bool isLastInstructionOfItsType = true;
- unsigned InstExportType = MI->getOperand(1).getImm();
+ unsigned InstExportType = MI.getOperand(1).getImm();
for (MachineBasicBlock::iterator NextExportInst = std::next(I),
EndBlock = BB->end(); NextExportInst != EndBlock;
NextExportInst = std::next(NextExportInst)) {
@@ -552,17 +577,17 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
bool EOP = isEOP(I);
if (!EOP && !isLastInstructionOfItsType)
return BB;
- unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40;
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addOperand(MI->getOperand(2))
- .addOperand(MI->getOperand(3))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6))
- .addImm(CfInst)
- .addImm(EOP);
+ unsigned CfInst = (MI.getOpcode() == AMDGPU::EG_ExportSwz) ? 84 : 40;
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addOperand(MI.getOperand(2))
+ .addOperand(MI.getOperand(3))
+ .addOperand(MI.getOperand(4))
+ .addOperand(MI.getOperand(5))
+ .addOperand(MI.getOperand(6))
+ .addImm(CfInst)
+ .addImm(EOP);
break;
}
case AMDGPU::RETURN: {
@@ -576,7 +601,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
}
}
- MI->eraseFromParent();
+ MI.eraseFromParent();
return BB;
}
@@ -610,18 +635,13 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
+ case ISD::FrameIndex: return lowerFrameIndex(Op, DAG);
case ISD::INTRINSIC_VOID: {
SDValue Chain = Op.getOperand(0);
unsigned IntrinsicID =
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
switch (IntrinsicID) {
- case AMDGPUIntrinsic::AMDGPU_store_output: {
- int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
- unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
- MFI->LiveOuts.push_back(Reg);
- return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2));
- }
- case AMDGPUIntrinsic::R600_store_swizzle: {
+ case AMDGPUIntrinsic::r600_store_swizzle: {
SDLoc DL(Op);
const SDValue Args[8] = {
Chain,
@@ -649,114 +669,48 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
SDLoc DL(Op);
switch(IntrinsicID) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
- case AMDGPUIntrinsic::R600_load_input: {
- int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
- MachineFunction &MF = DAG.getMachineFunction();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- MRI.addLiveIn(Reg);
- return DAG.getCopyFromReg(DAG.getEntryNode(),
- SDLoc(DAG.getEntryNode()), Reg, VT);
- }
-
- case AMDGPUIntrinsic::R600_interp_input: {
- int slot = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- int ijb = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
- MachineSDNode *interp;
- if (ijb < 0) {
- const R600InstrInfo *TII =
- static_cast<const R600InstrInfo *>(Subtarget->getInstrInfo());
- interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL,
- MVT::v4f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32));
- return DAG.getTargetExtractSubreg(
- TII->getRegisterInfo().getSubRegFromChannel(slot % 4),
- DL, MVT::f32, SDValue(interp, 0));
- }
- MachineFunction &MF = DAG.getMachineFunction();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- unsigned RegisterI = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb);
- unsigned RegisterJ = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1);
- MRI.addLiveIn(RegisterI);
- MRI.addLiveIn(RegisterJ);
- SDValue RegisterINode = DAG.getCopyFromReg(DAG.getEntryNode(),
- SDLoc(DAG.getEntryNode()), RegisterI, MVT::f32);
- SDValue RegisterJNode = DAG.getCopyFromReg(DAG.getEntryNode(),
- SDLoc(DAG.getEntryNode()), RegisterJ, MVT::f32);
-
- if (slot % 4 < 2)
- interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
- MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32),
- RegisterJNode, RegisterINode);
- else
- interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
- MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32),
- RegisterJNode, RegisterINode);
- return SDValue(interp, slot % 2);
- }
- case AMDGPUIntrinsic::R600_interp_xy:
- case AMDGPUIntrinsic::R600_interp_zw: {
- int slot = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- MachineSDNode *interp;
- SDValue RegisterINode = Op.getOperand(2);
- SDValue RegisterJNode = Op.getOperand(3);
-
- if (IntrinsicID == AMDGPUIntrinsic::R600_interp_xy)
- interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
- MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32),
- RegisterJNode, RegisterINode);
- else
- interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
- MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32),
- RegisterJNode, RegisterINode);
- return DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f32,
- SDValue(interp, 0), SDValue(interp, 1));
- }
- case AMDGPUIntrinsic::R600_tex:
- case AMDGPUIntrinsic::R600_texc:
- case AMDGPUIntrinsic::R600_txl:
- case AMDGPUIntrinsic::R600_txlc:
- case AMDGPUIntrinsic::R600_txb:
- case AMDGPUIntrinsic::R600_txbc:
- case AMDGPUIntrinsic::R600_txf:
- case AMDGPUIntrinsic::R600_txq:
- case AMDGPUIntrinsic::R600_ddx:
- case AMDGPUIntrinsic::R600_ddy:
- case AMDGPUIntrinsic::R600_ldptr: {
+ case AMDGPUIntrinsic::r600_tex:
+ case AMDGPUIntrinsic::r600_texc:
+ case AMDGPUIntrinsic::r600_txl:
+ case AMDGPUIntrinsic::r600_txlc:
+ case AMDGPUIntrinsic::r600_txb:
+ case AMDGPUIntrinsic::r600_txbc:
+ case AMDGPUIntrinsic::r600_txf:
+ case AMDGPUIntrinsic::r600_txq:
+ case AMDGPUIntrinsic::r600_ddx:
+ case AMDGPUIntrinsic::r600_ddy: {
unsigned TextureOp;
switch (IntrinsicID) {
- case AMDGPUIntrinsic::R600_tex:
+ case AMDGPUIntrinsic::r600_tex:
TextureOp = 0;
break;
- case AMDGPUIntrinsic::R600_texc:
+ case AMDGPUIntrinsic::r600_texc:
TextureOp = 1;
break;
- case AMDGPUIntrinsic::R600_txl:
+ case AMDGPUIntrinsic::r600_txl:
TextureOp = 2;
break;
- case AMDGPUIntrinsic::R600_txlc:
+ case AMDGPUIntrinsic::r600_txlc:
TextureOp = 3;
break;
- case AMDGPUIntrinsic::R600_txb:
+ case AMDGPUIntrinsic::r600_txb:
TextureOp = 4;
break;
- case AMDGPUIntrinsic::R600_txbc:
+ case AMDGPUIntrinsic::r600_txbc:
TextureOp = 5;
break;
- case AMDGPUIntrinsic::R600_txf:
+ case AMDGPUIntrinsic::r600_txf:
TextureOp = 6;
break;
- case AMDGPUIntrinsic::R600_txq:
+ case AMDGPUIntrinsic::r600_txq:
TextureOp = 7;
break;
- case AMDGPUIntrinsic::R600_ddx:
+ case AMDGPUIntrinsic::r600_ddx:
TextureOp = 8;
break;
- case AMDGPUIntrinsic::R600_ddy:
+ case AMDGPUIntrinsic::r600_ddy:
TextureOp = 9;
break;
- case AMDGPUIntrinsic::R600_ldptr:
- TextureOp = 10;
- break;
default:
llvm_unreachable("Unknow Texture Operation");
}
@@ -784,7 +738,7 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
};
return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs);
}
- case AMDGPUIntrinsic::AMDGPU_dp4: {
+ case AMDGPUIntrinsic::r600_dot4: {
SDValue Args[8] = {
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
DAG.getConstant(0, DL, MVT::i32)),
@@ -806,6 +760,11 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args);
}
+ case Intrinsic::r600_implicitarg_ptr: {
+ MVT PtrVT = getPointerTy(DAG.getDataLayout(), AMDGPUAS::PARAM_I_ADDRESS);
+ uint32_t ByteOffset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
+ return DAG.getConstant(ByteOffset, DL, PtrVT);
+ }
case Intrinsic::r600_read_ngroups_x:
return LowerImplicitParameter(DAG, VT, DL, 0);
case Intrinsic::r600_read_ngroups_y:
@@ -825,7 +784,8 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::r600_read_local_size_z:
return LowerImplicitParameter(DAG, VT, DL, 8);
- case Intrinsic::AMDGPU_read_workdim: {
+ case Intrinsic::r600_read_workdim:
+ case AMDGPUIntrinsic::AMDGPU_read_workdim: { // Legacy name.
uint32_t ByteOffset = getImplicitParameterOffset(MFI, GRID_DIM);
return LowerImplicitParameter(DAG, VT, DL, ByteOffset / 4);
}
@@ -848,14 +808,14 @@ SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::r600_read_tidig_z:
return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
AMDGPU::T0_Z, VT);
- case Intrinsic::AMDGPU_rsq:
- // XXX - I'm assuming SI's RSQ_LEGACY matches R600's behavior.
- return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
- case AMDGPUIntrinsic::AMDGPU_fract:
- case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
- return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
+ case Intrinsic::r600_recipsqrt_ieee:
+ return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
+
+ case Intrinsic::r600_recipsqrt_clamped:
+ return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
}
+
// break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode())
break;
}
@@ -950,6 +910,22 @@ SDValue R600TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
return vectorToVerticalVector(DAG, Insert);
}
+SDValue R600TargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
+ SDValue Op,
+ SelectionDAG &DAG) const {
+
+ GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
+ if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
+ return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
+
+ const DataLayout &DL = DAG.getDataLayout();
+ const GlobalValue *GV = GSD->getGlobal();
+ MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
+
+ SDValue GA = DAG.getTargetGlobalAddress(GV, SDLoc(GSD), ConstPtrVT);
+ return DAG.getNode(AMDGPUISD::CONST_DATA_PTR, SDLoc(GSD), ConstPtrVT, GA);
+}
+
SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
// On hw >= R700, COS/SIN input must be between -1. and 1.
// Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5)
@@ -977,7 +953,7 @@ SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
SDValue TrigVal = DAG.getNode(TrigNode, DL, VT,
DAG.getNode(ISD::FADD, DL, VT, FractPart,
DAG.getConstantFP(-0.5, DL, MVT::f32)));
- if (Gen >= AMDGPUSubtarget::R700)
+ if (Gen >= R600Subtarget::R700)
return TrigVal;
// On R600 hw, COS/SIN input must be between -Pi and Pi.
return DAG.getNode(ISD::FMUL, DL, VT, TrigVal,
@@ -1088,7 +1064,7 @@ SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
}
SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
- SDLoc DL,
+ const SDLoc &DL,
unsigned DwordOffset) const {
unsigned ByteOffset = DwordOffset * 4;
PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
@@ -1099,8 +1075,7 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
return DAG.getLoad(VT, DL, DAG.getEntryNode(),
DAG.getConstant(ByteOffset, DL, MVT::i32), // PTR
- MachinePointerInfo(ConstantPointerNull::get(PtrType)),
- false, false, false, 0);
+ MachinePointerInfo(ConstantPointerNull::get(PtrType)));
}
bool R600TargetLowering::isZero(SDValue Op) const {
@@ -1113,6 +1088,20 @@ bool R600TargetLowering::isZero(SDValue Op) const {
}
}
+bool R600TargetLowering::isHWTrueValue(SDValue Op) const {
+ if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
+ return CFP->isExactlyValue(1.0);
+ }
+ return isAllOnesConstant(Op);
+}
+
+bool R600TargetLowering::isHWFalseValue(SDValue Op) const {
+ if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
+ return CFP->getValueAPF().isZero();
+ }
+ return isNullConstant(Op);
+}
+
SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = Op.getValueType();
@@ -1311,19 +1300,73 @@ void R600TargetLowering::getStackAddress(unsigned StackWidth,
}
}
+SDValue R600TargetLowering::lowerPrivateTruncStore(StoreSDNode *Store,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Store);
+
+ unsigned Mask = 0;
+ if (Store->getMemoryVT() == MVT::i8) {
+ Mask = 0xff;
+ } else if (Store->getMemoryVT() == MVT::i16) {
+ Mask = 0xffff;
+ }
+
+ SDValue Chain = Store->getChain();
+ SDValue BasePtr = Store->getBasePtr();
+ EVT MemVT = Store->getMemoryVT();
+
+ SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
+ DAG.getConstant(2, DL, MVT::i32));
+ SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
+ Chain, Ptr,
+ DAG.getTargetConstant(0, DL, MVT::i32));
+
+ SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
+ DAG.getConstant(0x3, DL, MVT::i32));
+
+ SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
+ DAG.getConstant(3, DL, MVT::i32));
+
+ SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
+ Store->getValue());
+
+ SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
+
+ SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
+ MaskedValue, ShiftAmt);
+
+ SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32,
+ DAG.getConstant(Mask, DL, MVT::i32),
+ ShiftAmt);
+ DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
+ DAG.getConstant(0xffffffff, DL, MVT::i32));
+ Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
+
+ SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
+ return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
+ Chain, Value, Ptr,
+ DAG.getTargetConstant(0, DL, MVT::i32));
+}
+
SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- SDLoc DL(Op);
+ if (SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG))
+ return Result;
+
StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue Value = Op.getOperand(1);
- SDValue Ptr = Op.getOperand(2);
+ unsigned AS = StoreNode->getAddressSpace();
+ SDValue Value = StoreNode->getValue();
+ EVT ValueVT = Value.getValueType();
- SDValue Result = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
- if (Result.getNode()) {
- return Result;
+ if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS) &&
+ ValueVT.isVector()) {
+ return SplitVectorStore(Op, DAG);
}
- if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS) {
+ SDLoc DL(Op);
+ SDValue Chain = StoreNode->getChain();
+ SDValue Ptr = StoreNode->getBasePtr();
+
+ if (AS == AMDGPUAS::GLOBAL_ADDRESS) {
if (StoreNode->isTruncatingStore()) {
EVT VT = Value.getValueType();
assert(VT.bitsLE(MVT::i32));
@@ -1352,13 +1395,13 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
DAG.getConstant(0, DL, MVT::i32),
Mask
};
- SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src);
+ SDValue Input = DAG.getBuildVector(MVT::v4i32, DL, Src);
SDValue Args[3] = { Chain, Input, DWordAddr };
return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL,
Op->getVTList(), Args, MemVT,
StoreNode->getMemOperand());
} else if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR &&
- Value.getValueType().bitsGE(MVT::i32)) {
+ ValueVT.bitsGE(MVT::i32)) {
// Convert pointer from byte address to dword address.
Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(),
DAG.getNode(ISD::SRL, DL, Ptr.getValueType(),
@@ -1373,21 +1416,16 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
}
}
- EVT ValueVT = Value.getValueType();
-
- if (StoreNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
+ if (AS != AMDGPUAS::PRIVATE_ADDRESS)
return SDValue();
- }
- SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
- if (Ret.getNode()) {
- return Ret;
- }
- // Lowering for indirect addressing
+ EVT MemVT = StoreNode->getMemoryVT();
+ if (MemVT.bitsLT(MVT::i32))
+ return lowerPrivateTruncStore(StoreNode, DAG);
+ // Lowering for indirect addressing
const MachineFunction &MF = DAG.getMachineFunction();
- const AMDGPUFrameLowering *TFL =
- static_cast<const AMDGPUFrameLowering *>(Subtarget->getFrameLowering());
+ const R600FrameLowering *TFL = getSubtarget()->getFrameLowering();
unsigned StackWidth = TFL->getStackWidth(MF);
Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG);
@@ -1465,37 +1503,81 @@ ConstantAddressBlock(unsigned AddressSpace) {
}
}
-SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
-{
- EVT VT = Op.getValueType();
+SDValue R600TargetLowering::lowerPrivateExtLoad(SDValue Op,
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
- LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
- SDValue Chain = Op.getOperand(0);
- SDValue Ptr = Op.getOperand(1);
- SDValue LoweredLoad;
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
+ ISD::LoadExtType ExtType = Load->getExtensionType();
+ EVT MemVT = Load->getMemoryVT();
+
+ // <SI && AS=PRIVATE && EXTLOAD && size < 32bit,
+ // register (2-)byte extract.
+
+ // Get Register holding the target.
+ SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
+ DAG.getConstant(2, DL, MVT::i32));
+ // Load the Register.
+ SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
+ Load->getChain(),
+ Ptr,
+ DAG.getTargetConstant(0, DL, MVT::i32),
+ Op.getOperand(2));
+
+ // Get offset within the register.
+ SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
+ Load->getBasePtr(),
+ DAG.getConstant(0x3, DL, MVT::i32));
+
+ // Bit offset of target byte (byteIdx * 8).
+ SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
+ DAG.getConstant(3, DL, MVT::i32));
+
+ // Shift to the right.
+ Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
+
+ // Eliminate the upper bits by setting them to ...
+ EVT MemEltVT = MemVT.getScalarType();
+
+ // ... ones.
+ if (ExtType == ISD::SEXTLOAD) {
+ SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
+
+ SDValue Ops[] = {
+ DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
+ Load->getChain()
+ };
- if (SDValue Ret = AMDGPUTargetLowering::LowerLOAD(Op, DAG))
- return Ret;
+ return DAG.getMergeValues(Ops, DL);
+ }
+
+ // ... or zeros.
+ SDValue Ops[] = {
+ DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
+ Load->getChain()
+ };
- // Lower loads constant address space global variable loads
- if (LoadNode->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
- isa<GlobalVariable>(GetUnderlyingObject(
- LoadNode->getMemOperand()->getValue(), DAG.getDataLayout()))) {
+ return DAG.getMergeValues(Ops, DL);
+}
- SDValue Ptr = DAG.getZExtOrTrunc(
- LoadNode->getBasePtr(), DL,
- getPointerTy(DAG.getDataLayout(), AMDGPUAS::PRIVATE_ADDRESS));
- Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
- DAG.getConstant(2, DL, MVT::i32));
- return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op->getVTList(),
- LoadNode->getChain(), Ptr,
- DAG.getTargetConstant(0, DL, MVT::i32),
- Op.getOperand(2));
+SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
+ unsigned AS = LoadNode->getAddressSpace();
+ EVT MemVT = LoadNode->getMemoryVT();
+ ISD::LoadExtType ExtType = LoadNode->getExtensionType();
+
+ if (AS == AMDGPUAS::PRIVATE_ADDRESS &&
+ ExtType != ISD::NON_EXTLOAD && MemVT.bitsLT(MVT::i32)) {
+ return lowerPrivateExtLoad(Op, DAG);
}
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue Chain = LoadNode->getChain();
+ SDValue Ptr = LoadNode->getBasePtr();
+
if (LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && VT.isVector()) {
SDValue MergedValues[2] = {
- ScalarizeVectorLoad(Op, DAG),
+ scalarizeVectorLoad(LoadNode, DAG),
Chain
};
return DAG.getMergeValues(MergedValues, DL);
@@ -1526,8 +1608,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
NewVT = VT;
NumElements = VT.getVectorNumElements();
}
- Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT,
- makeArrayRef(Slots, NumElements));
+ Result = DAG.getBuildVector(NewVT, DL, makeArrayRef(Slots, NumElements));
} else {
// non-constant ptr can't be folded, keeps it as a v4f32 load
Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
@@ -1550,6 +1631,8 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
return DAG.getMergeValues(MergedValues, DL);
}
+ SDValue LoweredLoad;
+
// For most operations returning SDValue() will result in the node being
// expanded by the DAG Legalizer. This is not the case for ISD::LOAD, so we
// need to manually expand loads that may be legal in some address spaces and
@@ -1560,12 +1643,9 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
if (LoadNode->getExtensionType() == ISD::SEXTLOAD) {
EVT MemVT = LoadNode->getMemoryVT();
assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8));
- SDValue NewLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT, Chain, Ptr,
- LoadNode->getPointerInfo(), MemVT,
- LoadNode->isVolatile(),
- LoadNode->isNonTemporal(),
- LoadNode->isInvariant(),
- LoadNode->getAlignment());
+ SDValue NewLoad = DAG.getExtLoad(
+ ISD::EXTLOAD, DL, VT, Chain, Ptr, LoadNode->getPointerInfo(), MemVT,
+ LoadNode->getAlignment(), LoadNode->getMemOperand()->getFlags());
SDValue Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, NewLoad,
DAG.getValueType(MemVT));
@@ -1579,8 +1659,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
// Lowering for indirect addressing
const MachineFunction &MF = DAG.getMachineFunction();
- const AMDGPUFrameLowering *TFL =
- static_cast<const AMDGPUFrameLowering *>(Subtarget->getFrameLowering());
+ const R600FrameLowering *TFL = getSubtarget()->getFrameLowering();
unsigned StackWidth = TFL->getStackWidth(MF);
Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG);
@@ -1590,6 +1669,7 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
EVT ElemVT = VT.getVectorElementType();
SDValue Loads[4];
+ assert(NumElemVT <= 4);
assert(NumElemVT >= StackWidth && "Stack width cannot be greater than "
"vector width in load");
@@ -1603,11 +1683,8 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
DAG.getTargetConstant(Channel, DL, MVT::i32),
Op.getOperand(2));
}
- for (unsigned i = NumElemVT; i < 4; ++i) {
- Loads[i] = DAG.getUNDEF(ElemVT);
- }
- EVT TargetVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 4);
- LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads);
+ EVT TargetVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, NumElemVT);
+ LoweredLoad = DAG.getBuildVector(TargetVT, DL, makeArrayRef(Loads, NumElemVT));
} else {
LoweredLoad = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, VT,
Chain, Ptr,
@@ -1632,16 +1709,28 @@ SDValue R600TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Chain, Jump, Cond);
}
+SDValue R600TargetLowering::lowerFrameIndex(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const R600FrameLowering *TFL = getSubtarget()->getFrameLowering();
+
+ FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
+
+ unsigned FrameIndex = FIN->getIndex();
+ unsigned IgnoredFrameReg;
+ unsigned Offset =
+ TFL->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
+ return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), SDLoc(Op),
+ Op.getValueType());
+}
+
/// XXX Only kernel functions are supported, so we can assume for now that
/// every function is a kernel function, but in the future we should use
/// separate calling conventions for kernel and non-kernel functions.
SDValue R600TargetLowering::LowerFormalArguments(
- SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
@@ -1664,7 +1753,7 @@ SDValue R600TargetLowering::LowerFormalArguments(
MemVT = MemVT.getVectorElementType();
}
- if (MFI->getShaderType() != ShaderType::COMPUTE) {
+ if (AMDGPU::isShader(CallConv)) {
unsigned Reg = MF.addLiveIn(VA.getLocReg(), &AMDGPU::R600_Reg128RegClass);
SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT);
InVals.push_back(Register);
@@ -1699,11 +1788,11 @@ SDValue R600TargetLowering::LowerFormalArguments(
unsigned Offset = 36 + VA.getLocMemOffset();
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy), PartOffset - ValBase);
- SDValue Arg = DAG.getLoad(ISD::UNINDEXED, Ext, VT, DL, Chain,
- DAG.getConstant(Offset, DL, MVT::i32),
- DAG.getUNDEF(MVT::i32),
- PtrInfo,
- MemVT, false, true, true, 4);
+ SDValue Arg = DAG.getLoad(
+ ISD::UNINDEXED, Ext, VT, DL, Chain,
+ DAG.getConstant(Offset, DL, MVT::i32), DAG.getUNDEF(MVT::i32), PtrInfo,
+ MemVT, /* Alignment = */ 4,
+ MachineMemOperand::MONonTemporal | MachineMemOperand::MOInvariant);
// 4 is the preferred alignment for the CONSTANT memory space.
InVals.push_back(Arg);
@@ -1719,6 +1808,26 @@ EVT R600TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
return VT.changeVectorElementTypeToInteger();
}
+bool R600TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
+ unsigned AddrSpace,
+ unsigned Align,
+ bool *IsFast) const {
+ if (IsFast)
+ *IsFast = false;
+
+ if (!VT.isSimple() || VT == MVT::Other)
+ return false;
+
+ if (VT.bitsLT(MVT::i32))
+ return false;
+
+ // TODO: This is a rough estimate.
+ if (IsFast)
+ *IsFast = true;
+
+ return VT.bitsGT(MVT::i32) && Align % 4 == 0;
+}
+
static SDValue CompactSwizzlableVector(
SelectionDAG &DAG, SDValue VectorEntry,
DenseMap<unsigned, unsigned> &RemapSwizzle) {
@@ -1732,7 +1841,7 @@ static SDValue CompactSwizzlableVector(
};
for (unsigned i = 0; i < 4; i++) {
- if (NewBldVec[i].getOpcode() == ISD::UNDEF)
+ if (NewBldVec[i].isUndef())
// We mask write here to teach later passes that the ith element of this
// vector is undef. Thus we can use it to reduce 128 bits reg usage,
// break false dependencies and additionnaly make assembly easier to read.
@@ -1747,7 +1856,7 @@ static SDValue CompactSwizzlableVector(
}
}
- if (NewBldVec[i].getOpcode() == ISD::UNDEF)
+ if (NewBldVec[i].isUndef())
continue;
for (unsigned j = 0; j < i; j++) {
if (NewBldVec[i] == NewBldVec[j]) {
@@ -1758,8 +1867,8 @@ static SDValue CompactSwizzlableVector(
}
}
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
- VectorEntry.getValueType(), NewBldVec);
+ return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry),
+ NewBldVec);
}
static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
@@ -1796,14 +1905,13 @@ static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
}
}
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
- VectorEntry.getValueType(), NewBldVec);
+ return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry),
+ NewBldVec);
}
-
-SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector,
- SDValue Swz[4], SelectionDAG &DAG,
- SDLoc DL) const {
+SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[4],
+ SelectionDAG &DAG,
+ const SDLoc &DL) const {
assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR);
// Old -> New swizzle values
DenseMap<unsigned, unsigned> SwizzleRemap;
@@ -1886,7 +1994,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
SDLoc dl(N);
// If the inserted element is an UNDEF, just use the input vector.
- if (InVal.getOpcode() == ISD::UNDEF)
+ if (InVal.isUndef())
return InVec;
EVT VT = InVec.getValueType();
@@ -1907,7 +2015,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
Ops.append(InVec.getNode()->op_begin(),
InVec.getNode()->op_end());
- } else if (InVec.getOpcode() == ISD::UNDEF) {
+ } else if (InVec.isUndef()) {
unsigned NElts = VT.getVectorNumElements();
Ops.append(NElts, DAG.getUNDEF(InVal.getValueType()));
} else {
@@ -1927,7 +2035,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
}
// Return the new vector
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
+ return DAG.getBuildVector(VT, dl, Ops);
}
// Extract_vec (Build_vector) generated by custom lowering
@@ -1953,8 +2061,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SELECT_CC: {
// Try common optimizations
- SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
- if (Ret.getNode())
+ if (SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI))
return Ret;
// fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq ->
@@ -2053,13 +2160,14 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
-static bool
-FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
- SDValue &Abs, SDValue &Sel, SDValue &Imm, SelectionDAG &DAG) {
- const R600InstrInfo *TII =
- static_cast<const R600InstrInfo *>(DAG.getSubtarget().getInstrInfo());
+bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx,
+ SDValue &Src, SDValue &Neg, SDValue &Abs,
+ SDValue &Sel, SDValue &Imm,
+ SelectionDAG &DAG) const {
+ const R600InstrInfo *TII = getSubtarget()->getInstrInfo();
if (!Src.isMachineOpcode())
return false;
+
switch (Src.getMachineOpcode()) {
case AMDGPU::FNEG_R600:
if (!Neg.getNode())
@@ -2127,6 +2235,13 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
Src = DAG.getRegister(AMDGPU::ALU_CONST, MVT::f32);
return true;
}
+ case AMDGPU::MOV_IMM_GLOBAL_ADDR:
+ // Check if the Imm slot is used. Taken from below.
+ if (cast<ConstantSDNode>(Imm)->getZExtValue())
+ return false;
+ Imm = Src.getOperand(0);
+ Src = DAG.getRegister(AMDGPU::ALU_LITERAL_X, MVT::i32);
+ return true;
case AMDGPU::MOV_IMM_I32:
case AMDGPU::MOV_IMM_F32: {
unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
@@ -2177,14 +2292,13 @@ FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
}
}
-
/// \brief Fold the instructions after selecting them
SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
- const R600InstrInfo *TII =
- static_cast<const R600InstrInfo *>(DAG.getSubtarget().getInstrInfo());
+ const R600InstrInfo *TII = getSubtarget()->getInstrInfo();
if (!Node->isMachineOpcode())
return Node;
+
unsigned Opcode = Node->getMachineOpcode();
SDValue FakeOp;
diff --git a/lib/Target/AMDGPU/R600ISelLowering.h b/lib/Target/AMDGPU/R600ISelLowering.h
index 4dbac97af2a1..2fb6ee25caa9 100644
--- a/lib/Target/AMDGPU/R600ISelLowering.h
+++ b/lib/Target/AMDGPU/R600ISelLowering.h
@@ -12,55 +12,69 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_R600ISELLOWERING_H
-#define LLVM_LIB_TARGET_R600_R600ISELLOWERING_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_R600ISELLOWERING_H
+#define LLVM_LIB_TARGET_AMDGPU_R600ISELLOWERING_H
#include "AMDGPUISelLowering.h"
namespace llvm {
class R600InstrInfo;
+class R600Subtarget;
-class R600TargetLowering : public AMDGPUTargetLowering {
+class R600TargetLowering final : public AMDGPUTargetLowering {
public:
- R600TargetLowering(TargetMachine &TM, const AMDGPUSubtarget &STI);
- MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock * BB) const override;
+ R600TargetLowering(const TargetMachine &TM, const R600Subtarget &STI);
+
+ const R600Subtarget *getSubtarget() const;
+
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
void ReplaceNodeResults(SDNode * N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const override;
- SDValue LowerFormalArguments(
- SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &,
EVT VT) const override;
+ bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
+ unsigned Align,
+ bool *IsFast) const override;
+
private:
unsigned Gen;
/// Each OpenCL kernel has nine implicit parameters that are stored in the
/// first nine dwords of a Vertex Buffer. These implicit parameters are
/// lowered to load instructions which retrieve the values from the Vertex
/// Buffer.
- SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
- SDLoc DL, unsigned DwordOffset) const;
+ SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT, const SDLoc &DL,
+ unsigned DwordOffset) const;
void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
MachineRegisterInfo & MRI, unsigned dword_offset) const;
SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG,
- SDLoc DL) const;
+ const SDLoc &DL) const;
SDValue vectorToVerticalVector(SelectionDAG &DAG, SDValue Vector) const;
+ SDValue lowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
+ SelectionDAG &DAG) const override;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue lowerPrivateTruncStore(StoreSDNode *Store, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue lowerPrivateExtLoad(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
@@ -74,6 +88,13 @@ private:
void getStackAddress(unsigned StackWidth, unsigned ElemIdx,
unsigned &Channel, unsigned &PtrIncr) const;
bool isZero(SDValue Op) const;
+ bool isHWTrueValue(SDValue Op) const;
+ bool isHWFalseValue(SDValue Op) const;
+
+ bool FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src,
+ SDValue &Neg, SDValue &Abs, SDValue &Sel, SDValue &Imm,
+ SelectionDAG &DAG) const;
+
SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
};
diff --git a/lib/Target/AMDGPU/R600InstrInfo.cpp b/lib/Target/AMDGPU/R600InstrInfo.cpp
index 8b6eea17130b..1c5f7ec1b6ef 100644
--- a/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -28,26 +28,17 @@ using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "AMDGPUGenDFAPacketizer.inc"
-R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st)
- : AMDGPUInstrInfo(st), RI() {}
-
-const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
- return RI;
-}
-
-bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
- return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
-}
+R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
+ : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
bool R600InstrInfo::isVector(const MachineInstr &MI) const {
return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
}
-void
-R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
unsigned VectorComponents = 0;
if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
@@ -91,10 +82,9 @@ bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
}
bool R600InstrInfo::isMov(unsigned Opcode) const {
-
-
switch(Opcode) {
- default: return false;
+ default:
+ return false;
case AMDGPU::MOV:
case AMDGPU::MOV_IMM_F32:
case AMDGPU::MOV_IMM_I32:
@@ -102,17 +92,6 @@ bool R600InstrInfo::isMov(unsigned Opcode) const {
}
}
-// Some instructions act as place holders to emulate operations that the GPU
-// hardware does automatically. This function can be used to check if
-// an opcode falls into this category.
-bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
- switch (Opcode) {
- default: return false;
- case AMDGPU::RETURN:
- return true;
- }
-}
-
bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
return false;
}
@@ -150,20 +129,16 @@ bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
(TargetFlags & R600_InstFlag::LDS_1A2D));
}
-bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const {
- return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1;
-}
-
bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
}
-bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
- if (isALUInstr(MI->getOpcode()))
+bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
+ if (isALUInstr(MI.getOpcode()))
return true;
- if (isVector(*MI) || isCubeOp(MI->getOpcode()))
+ if (isVector(MI) || isCubeOp(MI.getOpcode()))
return true;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case AMDGPU::PRED_X:
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
@@ -182,16 +157,16 @@ bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
}
-bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
- return isTransOnly(MI->getOpcode());
+bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
+ return isTransOnly(MI.getOpcode());
}
bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
}
-bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
- return isVectorOnly(MI->getOpcode());
+bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
+ return isVectorOnly(MI.getOpcode());
}
bool R600InstrInfo::isExport(unsigned Opcode) const {
@@ -202,23 +177,21 @@ bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
return ST.hasVertexCache() && IS_VTX(get(Opcode));
}
-bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
- const MachineFunction *MF = MI->getParent()->getParent();
- const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
- return MFI->getShaderType() != ShaderType::COMPUTE &&
- usesVertexCache(MI->getOpcode());
+bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
+ const MachineFunction *MF = MI.getParent()->getParent();
+ return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
+ usesVertexCache(MI.getOpcode());
}
bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
}
-bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
- const MachineFunction *MF = MI->getParent()->getParent();
- const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
- return (MFI->getShaderType() == ShaderType::COMPUTE &&
- usesVertexCache(MI->getOpcode())) ||
- usesTextureCache(MI->getOpcode());
+bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
+ const MachineFunction *MF = MI.getParent()->getParent();
+ return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
+ usesVertexCache(MI.getOpcode())) ||
+ usesTextureCache(MI.getOpcode());
}
bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
@@ -231,20 +204,21 @@ bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
}
}
-bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
- return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
+bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
+ return MI.findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
}
-bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
- return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
+bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
+ return MI.findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
}
-bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
- if (!isALUInstr(MI->getOpcode())) {
+bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
+ if (!isALUInstr(MI.getOpcode())) {
return false;
}
- for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
- E = MI->operands_end(); I != E; ++I) {
+ for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
+ E = MI.operands_end();
+ I != E; ++I) {
if (!I->isReg() || !I->isUse() ||
TargetRegisterInfo::isVirtualRegister(I->getReg()))
continue;
@@ -255,17 +229,6 @@ bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
return false;
}
-int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
- static const unsigned OpTable[] = {
- AMDGPU::OpName::src0,
- AMDGPU::OpName::src1,
- AMDGPU::OpName::src2
- };
-
- assert (SrcNum < 3);
- return getOperandIdx(Opcode, OpTable[SrcNum]);
-}
-
int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
static const unsigned SrcSelTable[][2] = {
{AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
@@ -290,10 +253,10 @@ int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
}
SmallVector<std::pair<MachineOperand *, int64_t>, 3>
-R600InstrInfo::getSrcs(MachineInstr *MI) const {
+R600InstrInfo::getSrcs(MachineInstr &MI) const {
SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
- if (MI->getOpcode() == AMDGPU::DOT_4) {
+ if (MI.getOpcode() == AMDGPU::DOT_4) {
static const unsigned OpTable[8][2] = {
{AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
{AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
@@ -306,13 +269,13 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
};
for (unsigned j = 0; j < 8; j++) {
- MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
- OpTable[j][0]));
+ MachineOperand &MO =
+ MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
unsigned Reg = MO.getReg();
if (Reg == AMDGPU::ALU_CONST) {
- unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
- OpTable[j][1])).getImm();
- Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
+ MachineOperand &Sel =
+ MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
+ Result.push_back(std::make_pair(&MO, Sel.getImm()));
continue;
}
@@ -327,30 +290,33 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const {
};
for (unsigned j = 0; j < 3; j++) {
- int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
+ int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
if (SrcIdx < 0)
break;
- MachineOperand &MO = MI->getOperand(SrcIdx);
- unsigned Reg = MI->getOperand(SrcIdx).getReg();
+ MachineOperand &MO = MI.getOperand(SrcIdx);
+ unsigned Reg = MO.getReg();
if (Reg == AMDGPU::ALU_CONST) {
- unsigned Sel = MI->getOperand(
- getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
- Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
+ MachineOperand &Sel =
+ MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
+ Result.push_back(std::make_pair(&MO, Sel.getImm()));
continue;
}
if (Reg == AMDGPU::ALU_LITERAL_X) {
- unsigned Imm = MI->getOperand(
- getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
- Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
- continue;
+ MachineOperand &Operand =
+ MI.getOperand(getOperandIdx(MI.getOpcode(), AMDGPU::OpName::literal));
+ if (Operand.isImm()) {
+ Result.push_back(std::make_pair(&MO, Operand.getImm()));
+ continue;
+ }
+ assert(Operand.isGlobal());
}
- Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
+ Result.push_back(std::make_pair(&MO, 0));
}
return Result;
}
-std::vector<std::pair<int, unsigned> >
-R600InstrInfo::ExtractSrcs(MachineInstr *MI,
+std::vector<std::pair<int, unsigned>>
+R600InstrInfo::ExtractSrcs(MachineInstr &MI,
const DenseMap<unsigned, unsigned> &PV,
unsigned &ConstCount) const {
ConstCount = 0;
@@ -360,13 +326,13 @@ R600InstrInfo::ExtractSrcs(MachineInstr *MI,
unsigned i = 0;
for (unsigned n = Srcs.size(); i < n; ++i) {
unsigned Reg = Srcs[i].first->getReg();
- unsigned Index = RI.getEncodingValue(Reg) & 0xff;
+ int Index = RI.getEncodingValue(Reg) & 0xff;
if (Reg == AMDGPU::OQAP) {
- Result.push_back(std::pair<int, unsigned>(Index, 0));
+ Result.push_back(std::make_pair(Index, 0U));
}
if (PV.find(Reg) != PV.end()) {
// 255 is used to tells its a PS/PV reg
- Result.push_back(std::pair<int, unsigned>(255, 0));
+ Result.push_back(std::make_pair(255, 0U));
continue;
}
if (Index > 127) {
@@ -375,7 +341,7 @@ R600InstrInfo::ExtractSrcs(MachineInstr *MI,
continue;
}
unsigned Chan = RI.getHWRegChan(Reg);
- Result.push_back(std::pair<int, unsigned>(Index, Chan));
+ Result.push_back(std::make_pair(Index, Chan));
}
for (; i < 3; ++i)
Result.push_back(DummyPair);
@@ -411,8 +377,7 @@ Swizzle(std::vector<std::pair<int, unsigned> > Src,
return Src;
}
-static unsigned
-getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
+static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
switch (Swz) {
case R600InstrInfo::ALU_VEC_012_SCL_210: {
unsigned Cycles[3] = { 2, 1, 0};
@@ -432,7 +397,6 @@ getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
}
default:
llvm_unreachable("Wrong Swizzle for Trans Slot");
- return 0;
}
}
@@ -557,7 +521,7 @@ R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
unsigned ConstCount;
BankSwizzle TransBS = ALU_VEC_012_SCL_210;
for (unsigned i = 0, e = IG.size(); i < e; ++i) {
- IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
+ IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
unsigned Op = getOperandIdx(IG[i]->getOpcode(),
AMDGPU::OpName::bank_swizzle);
ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
@@ -624,14 +588,13 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
std::vector<unsigned> Consts;
SmallSet<int64_t, 4> Literals;
for (unsigned i = 0, n = MIs.size(); i < n; i++) {
- MachineInstr *MI = MIs[i];
- if (!isALUInstr(MI->getOpcode()))
+ MachineInstr &MI = *MIs[i];
+ if (!isALUInstr(MI.getOpcode()))
continue;
ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI);
- for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
- std::pair<MachineOperand *, unsigned> Src = Srcs[j];
+ for (const auto &Src:Srcs) {
if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
Literals.insert(Src.second);
if (Literals.size() > 4)
@@ -652,7 +615,7 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
DFAPacketizer *
R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
const InstrItineraryData *II = STI.getInstrItineraryData();
- return static_cast<const AMDGPUSubtarget &>(STI).createDFAPacketizer(II);
+ return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II);
}
static bool
@@ -670,9 +633,9 @@ findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) {
while (I != MBB.begin()) {
--I;
- MachineInstr *MI = I;
- if (isPredicateSetter(MI->getOpcode()))
- return MI;
+ MachineInstr &MI = *I;
+ if (isPredicateSetter(MI.getOpcode()))
+ return &MI;
}
return nullptr;
@@ -688,12 +651,11 @@ static bool isBranch(unsigned Opcode) {
Opcode == AMDGPU::BRANCH_COND_f32;
}
-bool
-R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
+bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
// Most of the following comes from the ARM implementation of AnalyzeBranch
// If the block has no terminators, it just falls into the block after it.
@@ -716,21 +678,21 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
I->removeFromParent();
I = PriorI;
}
- MachineInstr *LastInst = I;
+ MachineInstr &LastInst = *I;
// If there is only one terminator instruction, process it.
- unsigned LastOpc = LastInst->getOpcode();
+ unsigned LastOpc = LastInst.getOpcode();
if (I == MBB.begin() ||
!isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
if (LastOpc == AMDGPU::JUMP) {
- TBB = LastInst->getOperand(0).getMBB();
+ TBB = LastInst.getOperand(0).getMBB();
return false;
} else if (LastOpc == AMDGPU::JUMP_COND) {
- MachineInstr *predSet = I;
+ auto predSet = I;
while (!isPredicateSetter(predSet->getOpcode())) {
predSet = --I;
}
- TBB = LastInst->getOperand(0).getMBB();
+ TBB = LastInst.getOperand(0).getMBB();
Cond.push_back(predSet->getOperand(1));
Cond.push_back(predSet->getOperand(2));
Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
@@ -740,17 +702,17 @@ R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// Get the instruction before it if it is a terminator.
- MachineInstr *SecondLastInst = I;
- unsigned SecondLastOpc = SecondLastInst->getOpcode();
+ MachineInstr &SecondLastInst = *I;
+ unsigned SecondLastOpc = SecondLastInst.getOpcode();
// If the block ends with a B and a Bcc, handle it.
if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
- MachineInstr *predSet = --I;
+ auto predSet = --I;
while (!isPredicateSetter(predSet->getOpcode())) {
predSet = --I;
}
- TBB = SecondLastInst->getOperand(0).getMBB();
- FBB = LastInst->getOperand(0).getMBB();
+ TBB = SecondLastInst.getOperand(0).getMBB();
+ FBB = LastInst.getOperand(0).getMBB();
Cond.push_back(predSet->getOperand(1));
Cond.push_back(predSet->getOperand(2));
Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
@@ -772,12 +734,11 @@ MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
return MBB.end();
}
-unsigned
-R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+unsigned R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
if (!FBB) {
@@ -787,7 +748,7 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
} else {
MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
assert(PredSet && "No previous predicate !");
- addFlag(PredSet, 0, MO_FLAG_PUSH);
+ addFlag(*PredSet, 0, MO_FLAG_PUSH);
PredSet->getOperand(2).setImm(Cond[1].getImm());
BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
@@ -803,7 +764,7 @@ R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
} else {
MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
assert(PredSet && "No previous predicate !");
- addFlag(PredSet, 0, MO_FLAG_PUSH);
+ addFlag(*PredSet, 0, MO_FLAG_PUSH);
PredSet->getOperand(2).setImm(Cond[1].getImm());
BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
.addMBB(TBB)
@@ -835,7 +796,7 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 0;
case AMDGPU::JUMP_COND: {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
- clearFlag(predSet, 0, MO_FLAG_PUSH);
+ clearFlag(*predSet, 0, MO_FLAG_PUSH);
I->eraseFromParent();
MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
if (CfAlu == MBB.end())
@@ -860,7 +821,7 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 1;
case AMDGPU::JUMP_COND: {
MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
- clearFlag(predSet, 0, MO_FLAG_PUSH);
+ clearFlag(*predSet, 0, MO_FLAG_PUSH);
I->eraseFromParent();
MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
if (CfAlu == MBB.end())
@@ -876,13 +837,12 @@ R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 2;
}
-bool
-R600InstrInfo::isPredicated(const MachineInstr *MI) const {
- int idx = MI->findFirstPredOperandIdx();
+bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
+ int idx = MI.findFirstPredOperandIdx();
if (idx < 0)
return false;
- unsigned Reg = MI->getOperand(idx).getReg();
+ unsigned Reg = MI.getOperand(idx).getReg();
switch (Reg) {
default: return false;
case AMDGPU::PRED_SEL_ONE:
@@ -892,25 +852,22 @@ R600InstrInfo::isPredicated(const MachineInstr *MI) const {
}
}
-bool
-R600InstrInfo::isPredicable(MachineInstr *MI) const {
+bool R600InstrInfo::isPredicable(MachineInstr &MI) const {
// XXX: KILL* instructions can be predicated, but they must be the last
// instruction in a clause, so this means any instructions after them cannot
// be predicated. Until we have proper support for instruction clauses in the
// backend, we will mark KILL* instructions as unpredicable.
- if (MI->getOpcode() == AMDGPU::KILLGT) {
+ if (MI.getOpcode() == AMDGPU::KILLGT) {
return false;
- } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
+ } else if (MI.getOpcode() == AMDGPU::CF_ALU) {
// If the clause start in the middle of MBB then the MBB has more
// than a single clause, unable to predicate several clauses.
- if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
+ if (MI.getParent()->begin() != MachineBasicBlock::iterator(MI))
return false;
// TODO: We don't support KC merging atm
- if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
- return false;
- return true;
- } else if (isVector(*MI)) {
+ return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
+ } else if (isVector(MI)) {
return false;
} else {
return AMDGPUInstrInfo::isPredicable(MI);
@@ -986,48 +943,39 @@ R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) con
return false;
}
-bool
-R600InstrInfo::DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const {
- return isPredicateSetter(MI->getOpcode());
+bool R600InstrInfo::DefinesPredicate(MachineInstr &MI,
+ std::vector<MachineOperand> &Pred) const {
+ return isPredicateSetter(MI.getOpcode());
}
-bool
-R600InstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
- ArrayRef<MachineOperand> Pred2) const {
- return false;
-}
-
-
-bool
-R600InstrInfo::PredicateInstruction(MachineInstr *MI,
- ArrayRef<MachineOperand> Pred) const {
- int PIdx = MI->findFirstPredOperandIdx();
+bool R600InstrInfo::PredicateInstruction(MachineInstr &MI,
+ ArrayRef<MachineOperand> Pred) const {
+ int PIdx = MI.findFirstPredOperandIdx();
- if (MI->getOpcode() == AMDGPU::CF_ALU) {
- MI->getOperand(8).setImm(0);
+ if (MI.getOpcode() == AMDGPU::CF_ALU) {
+ MI.getOperand(8).setImm(0);
return true;
}
- if (MI->getOpcode() == AMDGPU::DOT_4) {
- MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_X))
+ if (MI.getOpcode() == AMDGPU::DOT_4) {
+ MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_X))
.setReg(Pred[2].getReg());
- MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Y))
+ MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Y))
.setReg(Pred[2].getReg());
- MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Z))
+ MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Z))
.setReg(Pred[2].getReg());
- MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_W))
+ MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_W))
.setReg(Pred[2].getReg());
- MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
+ MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
return true;
}
if (PIdx != -1) {
- MachineOperand &PMO = MI->getOperand(PIdx);
+ MachineOperand &PMO = MI.getOperand(PIdx);
PMO.setReg(Pred[2].getReg());
- MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
+ MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
return true;
}
@@ -1035,45 +983,94 @@ R600InstrInfo::PredicateInstruction(MachineInstr *MI,
return false;
}
-unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
+unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
return 2;
}
unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
+ const MachineInstr &,
unsigned *PredCost) const {
if (PredCost)
*PredCost = 2;
return 2;
}
-bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
+ unsigned Channel) const {
+ assert(Channel == 0);
+ return RegIndex;
+}
- switch(MI->getOpcode()) {
- default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
+bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ default: {
+ MachineBasicBlock *MBB = MI.getParent();
+ int OffsetOpIdx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::addr);
+ // addr is a custom operand with multiple MI operands, and only the
+ // first MI operand is given a name.
+ int RegOpIdx = OffsetOpIdx + 1;
+ int ChanOpIdx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::chan);
+ if (isRegisterLoad(MI)) {
+ int DstOpIdx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
+ unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
+ unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
+ unsigned Address = calculateIndirectAddress(RegIndex, Channel);
+ unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
+ if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
+ buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
+ getIndirectAddrRegClass()->getRegister(Address));
+ } else {
+ buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
+ OffsetReg);
+ }
+ } else if (isRegisterStore(MI)) {
+ int ValOpIdx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::val);
+ unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
+ unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
+ unsigned Address = calculateIndirectAddress(RegIndex, Channel);
+ unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
+ if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
+ buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
+ MI.getOperand(ValOpIdx).getReg());
+ } else {
+ buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
+ calculateIndirectAddress(RegIndex, Channel),
+ OffsetReg);
+ }
+ } else {
+ return false;
+ }
+
+ MBB->erase(MI);
+ return true;
+ }
case AMDGPU::R600_EXTRACT_ELT_V2:
case AMDGPU::R600_EXTRACT_ELT_V4:
- buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(),
- RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address
- MI->getOperand(2).getReg(),
- RI.getHWRegChan(MI->getOperand(1).getReg()));
+ buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
+ RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
+ MI.getOperand(2).getReg(),
+ RI.getHWRegChan(MI.getOperand(1).getReg()));
break;
case AMDGPU::R600_INSERT_ELT_V2:
case AMDGPU::R600_INSERT_ELT_V4:
- buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value
- RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address
- MI->getOperand(3).getReg(), // Offset
- RI.getHWRegChan(MI->getOperand(1).getReg())); // Channel
+ buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
+ RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
+ MI.getOperand(3).getReg(), // Offset
+ RI.getHWRegChan(MI.getOperand(1).getReg())); // Channel
break;
}
- MI->eraseFromParent();
+ MI.eraseFromParent();
return true;
}
void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
const MachineFunction &MF) const {
- const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
- MF.getSubtarget().getFrameLowering());
+ const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
+ const R600FrameLowering *TFL = ST.getFrameLowering();
unsigned StackWidth = TFL->getStackWidth(MF);
int End = getIndirectIndexEnd(MF);
@@ -1091,13 +1088,6 @@ void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
}
}
-unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const {
- // XXX: Remove when we support a stack width > 2
- assert(Channel == 0);
- return RegIndex;
-}
-
const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
return &AMDGPU::R600_TReg32_XRegClass;
}
@@ -1124,13 +1114,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
}
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X, OffsetReg);
- setImmOperand(MOVA, AMDGPU::OpName::write, 0);
+ setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
AddrReg, ValueReg)
.addReg(AMDGPU::AR_X,
RegState::Implicit | RegState::Kill);
- setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
+ setImmOperand(*Mov, AMDGPU::OpName::dst_rel, 1);
return Mov;
}
@@ -1157,17 +1147,74 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
AMDGPU::AR_X,
OffsetReg);
- setImmOperand(MOVA, AMDGPU::OpName::write, 0);
+ setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
ValueReg,
AddrReg)
.addReg(AMDGPU::AR_X,
RegState::Implicit | RegState::Kill);
- setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
+ setImmOperand(*Mov, AMDGPU::OpName::src0_rel, 1);
return Mov;
}
+int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ int Offset = -1;
+
+ if (MFI->getNumObjects() == 0) {
+ return -1;
+ }
+
+ if (MRI.livein_empty()) {
+ return 0;
+ }
+
+ const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
+ for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
+ LE = MRI.livein_end();
+ LI != LE; ++LI) {
+ unsigned Reg = LI->first;
+ if (TargetRegisterInfo::isVirtualRegister(Reg) ||
+ !IndirectRC->contains(Reg))
+ continue;
+
+ unsigned RegIndex;
+ unsigned RegEnd;
+ for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
+ ++RegIndex) {
+ if (IndirectRC->getRegister(RegIndex) == Reg)
+ break;
+ }
+ Offset = std::max(Offset, (int)RegIndex);
+ }
+
+ return Offset + 1;
+}
+
+int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
+ int Offset = 0;
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Variable sized objects are not supported
+ if (MFI->hasVarSizedObjects()) {
+ return -1;
+ }
+
+ if (MFI->getNumObjects() == 0) {
+ return -1;
+ }
+
+ const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
+ const R600FrameLowering *TFL = ST.getFrameLowering();
+
+ unsigned IgnoredFrameReg;
+ Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg);
+
+ return getIndirectIndexBegin(MF) + Offset;
+}
+
unsigned R600InstrInfo::getMaxAlusPerClause() const {
return 115;
}
@@ -1256,7 +1303,7 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
const {
assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
unsigned Opcode;
- if (ST.getGeneration() <= AMDGPUSubtarget::R700)
+ if (ST.getGeneration() <= R600Subtarget::R700)
Opcode = AMDGPU::DOT4_r600;
else
Opcode = AMDGPU::DOT4_eg;
@@ -1293,7 +1340,7 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
MachineOperand &MO = MI->getOperand(
getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
assert (MO.isImm());
- setImmOperand(MIB, Operands[i], MO.getImm());
+ setImmOperand(*MIB, Operands[i], MO.getImm());
}
MIB->getOperand(20).setImm(0);
return MIB;
@@ -1305,7 +1352,7 @@ MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
uint64_t Imm) const {
MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
AMDGPU::ALU_LITERAL_X);
- setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
+ setImmOperand(*MovImm, AMDGPU::OpName::literal, Imm);
return MovImm;
}
@@ -1323,25 +1370,21 @@ int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
return AMDGPU::getNamedOperandIdx(Opcode, Op);
}
-void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
+void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
int64_t Imm) const {
- int Idx = getOperandIdx(*MI, Op);
+ int Idx = getOperandIdx(MI, Op);
assert(Idx != -1 && "Operand not supported for this instruction.");
- assert(MI->getOperand(Idx).isImm());
- MI->getOperand(Idx).setImm(Imm);
+ assert(MI.getOperand(Idx).isImm());
+ MI.getOperand(Idx).setImm(Imm);
}
//===----------------------------------------------------------------------===//
// Instruction flag getters/setters
//===----------------------------------------------------------------------===//
-bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
- return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
-}
-
-MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
+MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
unsigned Flag) const {
- unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
+ unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
int FlagIndex = 0;
if (Flag != 0) {
// If we pass something other than the default value of Flag to this
@@ -1351,20 +1394,26 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
switch (Flag) {
case MO_FLAG_CLAMP:
- FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
+ FlagIndex = getOperandIdx(MI, AMDGPU::OpName::clamp);
break;
case MO_FLAG_MASK:
- FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
+ FlagIndex = getOperandIdx(MI, AMDGPU::OpName::write);
break;
case MO_FLAG_NOT_LAST:
case MO_FLAG_LAST:
- FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
+ FlagIndex = getOperandIdx(MI, AMDGPU::OpName::last);
break;
case MO_FLAG_NEG:
switch (SrcIdx) {
- case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
- case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
- case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
+ case 0:
+ FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_neg);
+ break;
+ case 1:
+ FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_neg);
+ break;
+ case 2:
+ FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src2_neg);
+ break;
}
break;
@@ -1373,8 +1422,12 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
"instructions.");
(void)IsOP3;
switch (SrcIdx) {
- case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
- case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
+ case 0:
+ FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_abs);
+ break;
+ case 1:
+ FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_abs);
+ break;
}
break;
@@ -1389,14 +1442,14 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
"Instruction flags not supported for this instruction");
}
- MachineOperand &FlagOp = MI->getOperand(FlagIndex);
+ MachineOperand &FlagOp = MI.getOperand(FlagIndex);
assert(FlagOp.isImm());
return FlagOp;
}
-void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
+void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
unsigned Flag) const {
- unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
+ unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
if (Flag == 0) {
return;
}
@@ -1415,9 +1468,9 @@ void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
}
}
-void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
+void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
unsigned Flag) const {
- unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
+ unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
if (HAS_NATIVE_OPERANDS(TargetFlags)) {
MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
FlagOp.setImm(0);
@@ -1428,3 +1481,11 @@ void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
FlagOp.setImm(InstFlags);
}
}
+
+bool R600InstrInfo::isRegisterStore(const MachineInstr &MI) const {
+ return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
+}
+
+bool R600InstrInfo::isRegisterLoad(const MachineInstr &MI) const {
+ return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
+}
diff --git a/lib/Target/AMDGPU/R600InstrInfo.h b/lib/Target/AMDGPU/R600InstrInfo.h
index e7251c31107b..feaca98def44 100644
--- a/lib/Target/AMDGPU/R600InstrInfo.h
+++ b/lib/Target/AMDGPU/R600InstrInfo.h
@@ -12,30 +12,28 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_R600INSTRINFO_H
-#define LLVM_LIB_TARGET_R600_R600INSTRINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_R600INSTRINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_R600INSTRINFO_H
#include "AMDGPUInstrInfo.h"
-#include "R600Defines.h"
#include "R600RegisterInfo.h"
-#include <map>
namespace llvm {
-
- class AMDGPUTargetMachine;
- class DFAPacketizer;
- class ScheduleDAG;
- class MachineFunction;
- class MachineInstr;
- class MachineInstrBuilder;
-
- class R600InstrInfo : public AMDGPUInstrInfo {
- private:
+class AMDGPUTargetMachine;
+class DFAPacketizer;
+class MachineFunction;
+class MachineInstr;
+class MachineInstrBuilder;
+class R600Subtarget;
+
+class R600InstrInfo final : public AMDGPUInstrInfo {
+private:
const R600RegisterInfo RI;
+ const R600Subtarget &ST;
- std::vector<std::pair<int, unsigned> >
- ExtractSrcs(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PV, unsigned &ConstCount) const;
-
+ std::vector<std::pair<int, unsigned>>
+ ExtractSrcs(MachineInstr &MI, const DenseMap<unsigned, unsigned> &PV,
+ unsigned &ConstCount) const;
MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
@@ -44,11 +42,11 @@ namespace llvm {
unsigned AddrChan) const;
MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg, unsigned Address,
- unsigned OffsetReg,
- unsigned AddrChan) const;
- public:
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg, unsigned Address,
+ unsigned OffsetReg,
+ unsigned AddrChan) const;
+public:
enum BankSwizzle {
ALU_VEC_012_SCL_210 = 0,
ALU_VEC_021_SCL_122,
@@ -58,18 +56,18 @@ namespace llvm {
ALU_VEC_210
};
- explicit R600InstrInfo(const AMDGPUSubtarget &st);
+ explicit R600InstrInfo(const R600Subtarget &);
- const R600RegisterInfo &getRegisterInfo() const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ const R600RegisterInfo &getRegisterInfo() const {
+ return RI;
+ }
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const override;
- bool isTrig(const MachineInstr &MI) const;
- bool isPlaceHolderOpcode(unsigned opcode) const;
bool isReductionOp(unsigned opcode) const;
bool isCubeOp(unsigned opcode) const;
@@ -77,32 +75,28 @@ namespace llvm {
bool isALUInstr(unsigned Opcode) const;
bool hasInstrModifiers(unsigned Opcode) const;
bool isLDSInstr(unsigned Opcode) const;
- bool isLDSNoRetInstr(unsigned Opcode) const;
bool isLDSRetInstr(unsigned Opcode) const;
/// \returns true if this \p Opcode represents an ALU instruction or an
/// instruction that will be lowered in ExpandSpecialInstrs Pass.
- bool canBeConsideredALU(const MachineInstr *MI) const;
+ bool canBeConsideredALU(const MachineInstr &MI) const;
bool isTransOnly(unsigned Opcode) const;
- bool isTransOnly(const MachineInstr *MI) const;
+ bool isTransOnly(const MachineInstr &MI) const;
bool isVectorOnly(unsigned Opcode) const;
- bool isVectorOnly(const MachineInstr *MI) const;
+ bool isVectorOnly(const MachineInstr &MI) const;
bool isExport(unsigned Opcode) const;
bool usesVertexCache(unsigned Opcode) const;
- bool usesVertexCache(const MachineInstr *MI) const;
+ bool usesVertexCache(const MachineInstr &MI) const;
bool usesTextureCache(unsigned Opcode) const;
- bool usesTextureCache(const MachineInstr *MI) const;
+ bool usesTextureCache(const MachineInstr &MI) const;
bool mustBeLastInClause(unsigned Opcode) const;
- bool usesAddressRegister(MachineInstr *MI) const;
- bool definesAddressRegister(MachineInstr *MI) const;
- bool readsLDSSrcReg(const MachineInstr *MI) const;
+ bool usesAddressRegister(MachineInstr &MI) const;
+ bool definesAddressRegister(MachineInstr &MI) const;
+ bool readsLDSSrcReg(const MachineInstr &MI) const;
- /// \returns The operand index for the given source number. Legal values
- /// for SrcNum are 0, 1, and 2.
- int getSrcIdx(unsigned Opcode, unsigned SrcNum) const;
/// \returns The operand Index for the Sel operand given an index to one
/// of the instruction's src operands.
int getSelIdx(unsigned Opcode, unsigned SrcIdx) const;
@@ -113,7 +107,7 @@ namespace llvm {
/// If register is ALU_LITERAL, second member is IMM.
/// Otherwise, second member value is undefined.
SmallVector<std::pair<MachineOperand *, int64_t>, 3>
- getSrcs(MachineInstr *MI) const;
+ getSrcs(MachineInstr &MI) const;
unsigned isLegalUpTo(
const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
@@ -152,89 +146,107 @@ namespace llvm {
/// instruction slots within an instruction group.
bool isVector(const MachineInstr &MI) const;
- bool isMov(unsigned Opcode) const override;
+ bool isMov(unsigned Opcode) const;
DFAPacketizer *
CreateTargetScheduleState(const TargetSubtargetInfo &) const override;
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+ bool ReverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const override;
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
- bool isPredicated(const MachineInstr *MI) const override;
+ bool isPredicated(const MachineInstr &MI) const override;
- bool isPredicable(MachineInstr *MI) const override;
+ bool isPredicable(MachineInstr &MI) const override;
- bool
- isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
- BranchProbability Probability) const override;
+ bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ BranchProbability Probability) const override;
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
unsigned ExtraPredCycles,
BranchProbability Probability) const override ;
- bool
- isProfitableToIfCvt(MachineBasicBlock &TMBB,
- unsigned NumTCycles, unsigned ExtraTCycles,
- MachineBasicBlock &FMBB,
- unsigned NumFCycles, unsigned ExtraFCycles,
- BranchProbability Probability) const override;
-
- bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const override;
+ bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumTCycles, unsigned ExtraTCycles,
+ MachineBasicBlock &FMBB,
+ unsigned NumFCycles, unsigned ExtraFCycles,
+ BranchProbability Probability) const override;
- bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
- ArrayRef<MachineOperand> Pred2) const override;
+ bool DefinesPredicate(MachineInstr &MI,
+ std::vector<MachineOperand> &Pred) const override;
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
- MachineBasicBlock &FMBB) const override;
+ MachineBasicBlock &FMBB) const override;
- bool PredicateInstruction(MachineInstr *MI,
+ bool PredicateInstruction(MachineInstr &MI,
ArrayRef<MachineOperand> Pred) const override;
- unsigned int getPredicationCost(const MachineInstr *) const override;
+ unsigned int getPredicationCost(const MachineInstr &) const override;
unsigned int getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
+ const MachineInstr &MI,
unsigned *PredCost = nullptr) const override;
- int getInstrLatency(const InstrItineraryData *ItinData,
- SDNode *Node) const override { return 1;}
-
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
/// \brief Reserve the registers that may be accesed using indirect addressing.
void reserveIndirectRegisters(BitVector &Reserved,
const MachineFunction &MF) const;
- unsigned calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const override;
+ /// Calculate the "Indirect Address" for the given \p RegIndex and
+ /// \p Channel
+ ///
+ /// We model indirect addressing using a virtual address space that can be
+ /// accesed with loads and stores. The "Indirect Address" is the memory
+ /// address in this virtual address space that maps to the given \p RegIndex
+ /// and \p Channel.
+ unsigned calculateIndirectAddress(unsigned RegIndex, unsigned Channel) const;
+
+
+ /// \returns The register class to be used for loading and storing values
+ /// from an "Indirect Address" .
+ const TargetRegisterClass *getIndirectAddrRegClass() const;
+
+ /// \returns the smallest register index that will be accessed by an indirect
+ /// read or write or -1 if indirect addressing is not used by this program.
+ int getIndirectIndexBegin(const MachineFunction &MF) const;
- const TargetRegisterClass *getIndirectAddrRegClass() const override;
+ /// \returns the largest register index that will be accessed by an indirect
+ /// read or write or -1 if indirect addressing is not used by this program.
+ int getIndirectIndexEnd(const MachineFunction &MF) const;
+ /// \brief Build instruction(s) for an indirect register write.
+ ///
+ /// \returns The instruction that performs the indirect register write
MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg, unsigned Address,
- unsigned OffsetReg) const override;
+ MachineBasicBlock::iterator I,
+ unsigned ValueReg, unsigned Address,
+ unsigned OffsetReg) const;
+ /// \brief Build instruction(s) for an indirect register read.
+ ///
+ /// \returns The instruction that performs the indirect register read
MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
unsigned ValueReg, unsigned Address,
- unsigned OffsetReg) const override;
+ unsigned OffsetReg) const;
unsigned getMaxAlusPerClause() const;
- ///buildDefaultInstruction - This function returns a MachineInstr with
- /// all the instruction modifiers initialized to their default values.
- /// You can use this function to avoid manually specifying each instruction
- /// modifier operand when building a new instruction.
+ /// buildDefaultInstruction - This function returns a MachineInstr with all
+ /// the instruction modifiers initialized to their default values. You can
+ /// use this function to avoid manually specifying each instruction modifier
+ /// operand when building a new instruction.
///
/// \returns a MachineInstr with all the instruction modifiers initialized
/// to their default values.
@@ -251,13 +263,13 @@ namespace llvm {
unsigned DstReg) const;
MachineInstr *buildMovImm(MachineBasicBlock &BB,
- MachineBasicBlock::iterator I,
- unsigned DstReg,
- uint64_t Imm) const;
+ MachineBasicBlock::iterator I,
+ unsigned DstReg,
+ uint64_t Imm) const;
MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I,
- unsigned DstReg, unsigned SrcReg) const override;
+ unsigned DstReg, unsigned SrcReg) const;
/// \brief Get the index of Op in the MachineInstr.
///
@@ -270,13 +282,10 @@ namespace llvm {
int getOperandIdx(unsigned Opcode, unsigned Op) const;
/// \brief Helper function for setting instruction flag values.
- void setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const;
-
- /// \returns true if this instruction has an operand for storing target flags.
- bool hasFlagOperand(const MachineInstr &MI) const;
+ void setImmOperand(MachineInstr &MI, unsigned Op, int64_t Imm) const;
///\brief Add one of the MO_FLAG* flags to the specified \p Operand.
- void addFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
+ void addFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const;
///\brief Determine if the specified \p Flag is set on this \p Operand.
bool isFlagSet(const MachineInstr &MI, unsigned Operand, unsigned Flag) const;
@@ -285,11 +294,15 @@ namespace llvm {
/// \param Flag The flag being set.
///
/// \returns the operand containing the flags for this instruction.
- MachineOperand &getFlagOp(MachineInstr *MI, unsigned SrcIdx = 0,
+ MachineOperand &getFlagOp(MachineInstr &MI, unsigned SrcIdx = 0,
unsigned Flag = 0) const;
/// \brief Clear the specified flag on the instruction.
- void clearFlag(MachineInstr *MI, unsigned Operand, unsigned Flag) const;
+ void clearFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const;
+
+ // Helper functions that check the opcode for status information
+ bool isRegisterStore(const MachineInstr &MI) const;
+ bool isRegisterLoad(const MachineInstr &MI) const;
};
namespace AMDGPU {
diff --git a/lib/Target/AMDGPU/R600Instructions.td b/lib/Target/AMDGPU/R600Instructions.td
index 33ef6a4e19ea..b6b576d95278 100644
--- a/lib/Target/AMDGPU/R600Instructions.td
+++ b/lib/Target/AMDGPU/R600Instructions.td
@@ -15,7 +15,7 @@
include "R600Intrinsics.td"
include "R600InstrFormats.td"
-class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern> :
+class InstR600ISA <dag outs, dag ins, string asm, list<dag> pattern = []> :
InstR600 <outs, ins, asm, pattern, NullALU> {
let Namespace = "AMDGPU";
@@ -160,7 +160,8 @@ class R600_2OP <bits<11> inst, string opName, list<dag> pattern,
let Inst{63-32} = Word1;
}
-class R600_2OP_Helper <bits<11> inst, string opName, SDPatternOperator node,
+class R600_2OP_Helper <bits<11> inst, string opName,
+ SDPatternOperator node = null_frag,
InstrItinClass itin = AnyALU> :
R600_2OP <inst, opName,
[(set R600_Reg32:$dst, (node R600_Reg32:$src0,
@@ -283,7 +284,7 @@ class EG_CF_RAT <bits <8> cfinst, bits <6> ratinst, bits<4> ratid, bits<4> mask,
}
class VTX_READ <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
- : InstR600ISA <outs, (ins MEMxi:$src_gpr), name, pattern>,
+ : InstR600ISA <outs, (ins MEMxi:$src_gpr), !strconcat(" ", name), pattern>,
VTX_WORD1_GPR {
// Static fields
@@ -328,18 +329,44 @@ class VTX_READ <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
class LoadParamFrag <PatFrag load_type> : PatFrag <
(ops node:$ptr), (load_type node:$ptr),
- [{ return isConstantLoad(dyn_cast<LoadSDNode>(N), 0); }]
+ [{ return isConstantLoad(cast<LoadSDNode>(N), 0) ||
+ (cast<LoadSDNode>(N)->getAddressSpace() == AMDGPUAS::PARAM_I_ADDRESS); }]
>;
def load_param : LoadParamFrag<load>;
def load_param_exti8 : LoadParamFrag<az_extloadi8>;
def load_param_exti16 : LoadParamFrag<az_extloadi16>;
-def isR600 : Predicate<"Subtarget->getGeneration() <= AMDGPUSubtarget::R700">;
+class LoadVtxId1 <PatFrag load> : PatFrag <
+ (ops node:$ptr), (load node:$ptr), [{
+ const MemSDNode *LD = cast<MemSDNode>(N);
+ return LD->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
+ (LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
+ !isa<GlobalValue>(GetUnderlyingObject(
+ LD->getMemOperand()->getValue(), CurDAG->getDataLayout())));
+}]>;
+
+def vtx_id1_az_extloadi8 : LoadVtxId1 <az_extloadi8>;
+def vtx_id1_az_extloadi16 : LoadVtxId1 <az_extloadi16>;
+def vtx_id1_load : LoadVtxId1 <load>;
+
+class LoadVtxId2 <PatFrag load> : PatFrag <
+ (ops node:$ptr), (load node:$ptr), [{
+ const MemSDNode *LD = cast<MemSDNode>(N);
+ return LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
+ isa<GlobalValue>(GetUnderlyingObject(
+ LD->getMemOperand()->getValue(), CurDAG->getDataLayout()));
+}]>;
+
+def vtx_id2_az_extloadi8 : LoadVtxId2 <az_extloadi8>;
+def vtx_id2_az_extloadi16 : LoadVtxId2 <az_extloadi16>;
+def vtx_id2_load : LoadVtxId2 <load>;
+
+def isR600 : Predicate<"Subtarget->getGeneration() <= R600Subtarget::R700">;
def isR600toCayman
: Predicate<
- "Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS">;
+ "Subtarget->getGeneration() <= R600Subtarget::NORTHERN_ISLANDS">;
//===----------------------------------------------------------------------===//
// R600 SDNodes
@@ -407,8 +434,7 @@ def : Pat<(TEXTURE_FETCH (i32 TextureOp), vt:$SRC_GPR,
def INTERP_VEC_LOAD : AMDGPUShaderInst <
(outs R600_Reg128:$dst),
(ins i32imm:$src0),
- "INTERP_LOAD $src0 : $dst",
- [(set R600_Reg128:$dst, (int_R600_interp_const imm:$src0))]>;
+ "INTERP_LOAD $src0 : $dst">;
def INTERP_XY : R600_2OP <0xD6, "INTERP_XY", []> {
let bank_swizzle = 5;
@@ -474,28 +500,6 @@ class ExportBufWord1 {
}
multiclass ExportPattern<Instruction ExportInst, bits<8> cf_inst> {
- def : Pat<(int_R600_store_pixel_depth R600_Reg32:$reg),
- (ExportInst
- (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), $reg, sub0),
- 0, 61, 0, 7, 7, 7, cf_inst, 0)
- >;
-
- def : Pat<(int_R600_store_pixel_stencil R600_Reg32:$reg),
- (ExportInst
- (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), $reg, sub0),
- 0, 61, 7, 0, 7, 7, cf_inst, 0)
- >;
-
- def : Pat<(int_R600_store_dummy (i32 imm:$type)),
- (ExportInst
- (v4f32 (IMPLICIT_DEF)), imm:$type, 0, 7, 7, 7, 7, cf_inst, 0)
- >;
-
- def : Pat<(int_R600_store_dummy 1),
- (ExportInst
- (v4f32 (IMPLICIT_DEF)), 1, 60, 7, 7, 7, 7, cf_inst, 0)
- >;
-
def : Pat<(EXPORT (v4f32 R600_Reg128:$src), (i32 imm:$base), (i32 imm:$type),
(i32 imm:$swz_x), (i32 imm:$swz_y), (i32 imm:$swz_z), (i32 imm:$swz_w)),
(ExportInst R600_Reg128:$src, imm:$type, imm:$base,
@@ -507,22 +511,22 @@ multiclass ExportPattern<Instruction ExportInst, bits<8> cf_inst> {
multiclass SteamOutputExportPattern<Instruction ExportInst,
bits<8> buf0inst, bits<8> buf1inst, bits<8> buf2inst, bits<8> buf3inst> {
// Stream0
- def : Pat<(int_R600_store_stream_output (v4f32 R600_Reg128:$src),
+ def : Pat<(int_r600_store_stream_output (v4f32 R600_Reg128:$src),
(i32 imm:$arraybase), (i32 0), (i32 imm:$mask)),
(ExportInst R600_Reg128:$src, 0, imm:$arraybase,
4095, imm:$mask, buf0inst, 0)>;
// Stream1
- def : Pat<(int_R600_store_stream_output (v4f32 R600_Reg128:$src),
+ def : Pat<(int_r600_store_stream_output (v4f32 R600_Reg128:$src),
(i32 imm:$arraybase), (i32 1), (i32 imm:$mask)),
(ExportInst $src, 0, imm:$arraybase,
4095, imm:$mask, buf1inst, 0)>;
// Stream2
- def : Pat<(int_R600_store_stream_output (v4f32 R600_Reg128:$src),
+ def : Pat<(int_r600_store_stream_output (v4f32 R600_Reg128:$src),
(i32 imm:$arraybase), (i32 2), (i32 imm:$mask)),
(ExportInst $src, 0, imm:$arraybase,
4095, imm:$mask, buf2inst, 0)>;
// Stream3
- def : Pat<(int_R600_store_stream_output (v4f32 R600_Reg128:$src),
+ def : Pat<(int_r600_store_stream_output (v4f32 R600_Reg128:$src),
(i32 imm:$arraybase), (i32 3), (i32 imm:$mask)),
(ExportInst $src, 0, imm:$arraybase,
4095, imm:$mask, buf3inst, 0)>;
@@ -678,7 +682,7 @@ let Predicates = [isR600toCayman] in {
def ADD : R600_2OP_Helper <0x0, "ADD", fadd>;
// Non-IEEE MUL: 0 * anything = 0
-def MUL : R600_2OP_Helper <0x1, "MUL NON-IEEE", int_AMDGPU_mul>;
+def MUL : R600_2OP_Helper <0x1, "MUL NON-IEEE">;
def MUL_IEEE : R600_2OP_Helper <0x2, "MUL_IEEE", fmul>;
// TODO: Do these actually match the regular fmin/fmax behavior?
def MAX : R600_2OP_Helper <0x3, "MAX", AMDGPUfmax_legacy>;
@@ -733,6 +737,7 @@ def SETNE_DX10 : R600_2OP <
[(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_UNE_NE))]
>;
+// FIXME: Need combine for AMDGPUfract
def FRACT : R600_1OP_Helper <0x10, "FRACT", AMDGPUfract>;
def TRUNC : R600_1OP_Helper <0x11, "TRUNC", ftrunc>;
def CEIL : R600_1OP_Helper <0x12, "CEIL", fceil>;
@@ -758,6 +763,13 @@ def : Pat <
(MOV_IMM_I32 imm:$val)
>;
+def MOV_IMM_GLOBAL_ADDR : MOV_IMM<iPTR, i32imm>;
+def : Pat <
+ (AMDGPUconstdata_ptr tglobaladdr:$addr),
+ (MOV_IMM_GLOBAL_ADDR tglobaladdr:$addr)
+>;
+
+
def MOV_IMM_F32 : MOV_IMM<f32, f32imm>;
def : Pat <
(fpimm:$val),
@@ -851,7 +863,7 @@ class R600_TEX <bits<11> inst, string opName> :
i32imm:$RESOURCE_ID, i32imm:$SAMPLER_ID,
CT:$COORD_TYPE_X, CT:$COORD_TYPE_Y, CT:$COORD_TYPE_Z,
CT:$COORD_TYPE_W),
- !strconcat(opName,
+ !strconcat(" ", opName,
" $DST_GPR.$DST_SEL_X$DST_SEL_Y$DST_SEL_Z$DST_SEL_W, "
"$SRC_GPR.$srcx$srcy$srcz$srcw "
"RID:$RESOURCE_ID SID:$SAMPLER_ID "
@@ -1099,14 +1111,13 @@ class RECIP_UINT_Common <bits<11> inst> : R600_1OP_Helper <
// Clamped to maximum.
class RECIPSQRT_CLAMPED_Common <bits<11> inst> : R600_1OP_Helper <
- inst, "RECIPSQRT_CLAMPED", AMDGPUrsq_clamped
+ inst, "RECIPSQRT_CLAMPED", AMDGPUrsq_clamp
> {
let Itinerary = TransALU;
}
class RECIPSQRT_IEEE_Common <bits<11> inst> : R600_1OP_Helper <
- inst, "RECIPSQRT_IEEE", AMDGPUrsq_legacy
-> {
+ inst, "RECIPSQRT_IEEE", AMDGPUrsq> {
let Itinerary = TransALU;
}
@@ -1135,11 +1146,6 @@ def FNEG_R600 : FNEG<R600_Reg32>;
// FIXME: Should be predicated on unsafe fp math.
multiclass DIV_Common <InstR600 recip_ieee> {
def : Pat<
- (int_AMDGPU_div f32:$src0, f32:$src1),
- (MUL_IEEE $src0, (recip_ieee $src1))
->;
-
-def : Pat<
(fdiv f32:$src0, f32:$src1),
(MUL_IEEE $src0, (recip_ieee $src1))
>;
@@ -1147,12 +1153,6 @@ def : Pat<
def : RcpPat<recip_ieee, f32>;
}
-class TGSI_LIT_Z_Common <InstR600 mul_lit, InstR600 log_clamped, InstR600 exp_ieee>
- : Pat <
- (int_TGSI_lit_z f32:$src_x, f32:$src_y, f32:$src_w),
- (exp_ieee (mul_lit (log_clamped (MAX $src_y, (f32 ZERO))), $src_w, $src_x))
->;
-
//===----------------------------------------------------------------------===//
// R600 / R700 Instructions
//===----------------------------------------------------------------------===//
@@ -1191,7 +1191,6 @@ let Predicates = [isR600] in {
defm DIV_r600 : DIV_Common<RECIP_IEEE_r600>;
def : POW_Common <LOG_IEEE_r600, EXP_IEEE_r600, MUL>;
- def TGSI_LIT_Z_r600 : TGSI_LIT_Z_Common<MUL_LIT_r600, LOG_CLAMPED_r600, EXP_IEEE_r600>;
def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_r600 $src))>;
def : RsqPat<RECIPSQRT_IEEE_r600, f32>;
@@ -1332,9 +1331,7 @@ def TXD: InstR600 <
(outs R600_Reg128:$dst),
(ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2,
i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget),
- "TXD $dst, $src0, $src1, $src2, $resourceId, $samplerId, $textureTarget",
- [(set v4f32:$dst, (int_AMDGPU_txd v4f32:$src0, v4f32:$src1, v4f32:$src2,
- imm:$resourceId, imm:$samplerId, imm:$textureTarget))],
+ "TXD $dst, $src0, $src1, $src2, $resourceId, $samplerId, $textureTarget", [],
NullALU > {
let TEXInst = 1;
}
@@ -1344,10 +1341,7 @@ def TXD_SHADOW: InstR600 <
(ins R600_Reg128:$src0, R600_Reg128:$src1, R600_Reg128:$src2,
i32imm:$resourceId, i32imm:$samplerId, i32imm:$textureTarget),
"TXD_SHADOW $dst, $src0, $src1, $src2, $resourceId, $samplerId, $textureTarget",
- [(set v4f32:$dst, (int_AMDGPU_txd v4f32:$src0, v4f32:$src1, v4f32:$src2,
- imm:$resourceId, imm:$samplerId, TEX_SHADOW:$textureTarget))],
- NullALU
-> {
+ [], NullALU> {
let TEXInst = 1;
}
} // End isPseudo = 1
@@ -1426,8 +1420,7 @@ def TEX_VTX_CONSTBUF :
}
def TEX_VTX_TEXBUF:
- InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "TEX_VTX_EXPLICIT_READ $dst, $ptr",
- [(set v4f32:$dst, (int_R600_load_texbuf ADDRGA_VAR_OFFSET:$ptr, imm:$BUFFER_ID))]>,
+ InstR600ISA <(outs R600_Reg128:$dst), (ins MEMxi:$ptr, i32imm:$BUFFER_ID), "TEX_VTX_EXPLICIT_READ $dst, $ptr">,
VTX_WORD1_GPR, VTX_WORD0_eg {
let VC_INST = 0;
@@ -1542,8 +1535,9 @@ let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
//===---------------------------------------------------------------------===//
let isTerminator = 1, isReturn = 1, hasCtrlDep = 1,
usesCustomInserter = 1 in {
- def RETURN : ILFormat<(outs), (ins variable_ops),
- "RETURN", [(IL_retflag)]>;
+ def RETURN : ILFormat<(outs), (ins variable_ops),
+ "RETURN", [(AMDGPUendpgm)]
+ >;
}
//===----------------------------------------------------------------------===//
@@ -1729,12 +1723,6 @@ def : DwordAddrPat <i32, R600_Reg32>;
} // End isR600toCayman Predicate
-let Predicates = [isR600] in {
-// Intrinsic patterns
-defm : Expand24IBitOps<MULLO_INT_r600, ADD_INT>;
-defm : Expand24UBitOps<MULLO_UINT_r600, ADD_INT>;
-} // End isR600
-
def getLDSNoRetOp : InstrMapping {
let FilterClass = "R600_LDS_1A1D";
let RowFields = ["BaseOp"];
diff --git a/lib/Target/AMDGPU/R600Intrinsics.td b/lib/Target/AMDGPU/R600Intrinsics.td
index 9681747006d9..a5310e9fd6d0 100644
--- a/lib/Target/AMDGPU/R600Intrinsics.td
+++ b/lib/Target/AMDGPU/R600Intrinsics.td
@@ -11,65 +11,57 @@
//
//===----------------------------------------------------------------------===//
-let TargetPrefix = "R600", isTarget = 1 in {
- class TextureIntrinsicFloatInput :
- Intrinsic<[llvm_v4f32_ty], [
- llvm_v4f32_ty, // Coord
- llvm_i32_ty, // offset_x
- llvm_i32_ty, // offset_y,
- llvm_i32_ty, // offset_z,
- llvm_i32_ty, // resource_id
- llvm_i32_ty, // samplerid
- llvm_i32_ty, // coord_type_x
- llvm_i32_ty, // coord_type_y
- llvm_i32_ty, // coord_type_z
- llvm_i32_ty // coord_type_w
- ], [IntrNoMem]>;
- class TextureIntrinsicInt32Input :
- Intrinsic<[llvm_v4i32_ty], [
- llvm_v4i32_ty, // Coord
- llvm_i32_ty, // offset_x
- llvm_i32_ty, // offset_y,
- llvm_i32_ty, // offset_z,
- llvm_i32_ty, // resource_id
- llvm_i32_ty, // samplerid
- llvm_i32_ty, // coord_type_x
- llvm_i32_ty, // coord_type_y
- llvm_i32_ty, // coord_type_z
- llvm_i32_ty // coord_type_w
- ], [IntrNoMem]>;
+class TextureIntrinsicFloatInput : Intrinsic<[llvm_v4f32_ty], [
+ llvm_v4f32_ty, // Coord
+ llvm_i32_ty, // offset_x
+ llvm_i32_ty, // offset_y,
+ llvm_i32_ty, // offset_z,
+ llvm_i32_ty, // resource_id
+ llvm_i32_ty, // samplerid
+ llvm_i32_ty, // coord_type_x
+ llvm_i32_ty, // coord_type_y
+ llvm_i32_ty, // coord_type_z
+ llvm_i32_ty], // coord_type_w
+ [IntrNoMem]
+>;
- def int_R600_load_input :
- Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
- def int_R600_interp_input :
- Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_R600_interp_const :
- Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty], [IntrNoMem]>;
-def int_R600_interp_xy :
- Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-def int_R600_interp_zw :
- Intrinsic<[llvm_v2f32_ty], [llvm_i32_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
- def int_R600_load_texbuf :
- Intrinsic<[llvm_v4f32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_R600_tex : TextureIntrinsicFloatInput;
- def int_R600_texc : TextureIntrinsicFloatInput;
- def int_R600_txl : TextureIntrinsicFloatInput;
- def int_R600_txlc : TextureIntrinsicFloatInput;
- def int_R600_txb : TextureIntrinsicFloatInput;
- def int_R600_txbc : TextureIntrinsicFloatInput;
- def int_R600_txf : TextureIntrinsicInt32Input;
- def int_R600_ldptr : TextureIntrinsicInt32Input;
- def int_R600_txq : TextureIntrinsicInt32Input;
- def int_R600_ddx : TextureIntrinsicFloatInput;
- def int_R600_ddy : TextureIntrinsicFloatInput;
- def int_R600_store_swizzle :
- Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []>;
- def int_R600_store_stream_output :
- Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
- def int_R600_store_pixel_depth :
- Intrinsic<[], [llvm_float_ty], []>;
- def int_R600_store_pixel_stencil :
- Intrinsic<[], [llvm_float_ty], []>;
- def int_R600_store_dummy :
- Intrinsic<[], [llvm_i32_ty], []>;
-}
+class TextureIntrinsicInt32Input : Intrinsic<[llvm_v4i32_ty], [
+ llvm_v4i32_ty, // Coord
+ llvm_i32_ty, // offset_x
+ llvm_i32_ty, // offset_y,
+ llvm_i32_ty, // offset_z,
+ llvm_i32_ty, // resource_id
+ llvm_i32_ty, // samplerid
+ llvm_i32_ty, // coord_type_x
+ llvm_i32_ty, // coord_type_y
+ llvm_i32_ty, // coord_type_z
+ llvm_i32_ty], // coord_type_w
+ [IntrNoMem]
+>;
+
+let TargetPrefix = "r600", isTarget = 1 in {
+
+def int_r600_store_swizzle :
+ Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []
+>;
+
+def int_r600_store_stream_output : Intrinsic<
+ [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []
+>;
+
+def int_r600_tex : TextureIntrinsicFloatInput;
+def int_r600_texc : TextureIntrinsicFloatInput;
+def int_r600_txl : TextureIntrinsicFloatInput;
+def int_r600_txlc : TextureIntrinsicFloatInput;
+def int_r600_txb : TextureIntrinsicFloatInput;
+def int_r600_txbc : TextureIntrinsicFloatInput;
+def int_r600_txf : TextureIntrinsicInt32Input;
+def int_r600_txq : TextureIntrinsicInt32Input;
+def int_r600_ddx : TextureIntrinsicFloatInput;
+def int_r600_ddy : TextureIntrinsicFloatInput;
+
+def int_r600_dot4 : Intrinsic<[llvm_float_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]
+>;
+
+} // End TargetPrefix = "r600", isTarget = 1
diff --git a/lib/Target/AMDGPU/R600MachineFunctionInfo.h b/lib/Target/AMDGPU/R600MachineFunctionInfo.h
index 263561edd30d..04a4436ebe03 100644
--- a/lib/Target/AMDGPU/R600MachineFunctionInfo.h
+++ b/lib/Target/AMDGPU/R600MachineFunctionInfo.h
@@ -10,17 +10,16 @@
/// \file
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_R600MACHINEFUNCTIONINFO_H
-#define LLVM_LIB_TARGET_R600_R600MACHINEFUNCTIONINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_R600MACHINEFUNCTIONINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_R600MACHINEFUNCTIONINFO_H
#include "AMDGPUMachineFunction.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include <vector>
namespace llvm {
-class R600MachineFunctionInfo : public AMDGPUMachineFunction {
+class R600MachineFunctionInfo final : public AMDGPUMachineFunction {
void anchor() override;
public:
R600MachineFunctionInfo(const MachineFunction &MF);
diff --git a/lib/Target/AMDGPU/R600MachineScheduler.cpp b/lib/Target/AMDGPU/R600MachineScheduler.cpp
index bcde5fb50dac..db18e5bd1afa 100644
--- a/lib/Target/AMDGPU/R600MachineScheduler.cpp
+++ b/lib/Target/AMDGPU/R600MachineScheduler.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "R600MachineScheduler.h"
+#include "R600InstrInfo.h"
#include "AMDGPUSubtarget.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Pass.h"
@@ -26,7 +27,7 @@ using namespace llvm;
void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness");
DAG = static_cast<ScheduleDAGMILive*>(dag);
- const AMDGPUSubtarget &ST = DAG->MF.getSubtarget<AMDGPUSubtarget>();
+ const R600Subtarget &ST = DAG->MF.getSubtarget<R600Subtarget>();
TII = static_cast<const R600InstrInfo*>(DAG->TII);
TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
VLIW5 = !ST.hasCaymanISA();
@@ -48,8 +49,7 @@ void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
QSrc.clear();
}
-static
-unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
+static unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
assert (GPRCount && "GPRCount cannot be 0");
return 248 / GPRCount;
}
@@ -222,75 +222,74 @@ bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
MachineInstr *MI = SU->getInstr();
- if (TII->isTransOnly(MI))
+ if (TII->isTransOnly(*MI))
return AluTrans;
- switch (MI->getOpcode()) {
- case AMDGPU::PRED_X:
- return AluPredX;
- case AMDGPU::INTERP_PAIR_XY:
- case AMDGPU::INTERP_PAIR_ZW:
- case AMDGPU::INTERP_VEC_LOAD:
- case AMDGPU::DOT_4:
- return AluT_XYZW;
- case AMDGPU::COPY:
- if (MI->getOperand(1).isUndef()) {
- // MI will become a KILL, don't considers it in scheduling
- return AluDiscarded;
- }
- default:
- break;
- }
-
- // Does the instruction take a whole IG ?
- // XXX: Is it possible to add a helper function in R600InstrInfo that can
- // be used here and in R600PacketizerList::isSoloInstruction() ?
- if(TII->isVector(*MI) ||
- TII->isCubeOp(MI->getOpcode()) ||
- TII->isReductionOp(MI->getOpcode()) ||
- MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
- return AluT_XYZW;
+ switch (MI->getOpcode()) {
+ case AMDGPU::PRED_X:
+ return AluPredX;
+ case AMDGPU::INTERP_PAIR_XY:
+ case AMDGPU::INTERP_PAIR_ZW:
+ case AMDGPU::INTERP_VEC_LOAD:
+ case AMDGPU::DOT_4:
+ return AluT_XYZW;
+ case AMDGPU::COPY:
+ if (MI->getOperand(1).isUndef()) {
+ // MI will become a KILL, don't considers it in scheduling
+ return AluDiscarded;
}
+ default:
+ break;
+ }
- if (TII->isLDSInstr(MI->getOpcode())) {
- return AluT_X;
- }
+ // Does the instruction take a whole IG ?
+ // XXX: Is it possible to add a helper function in R600InstrInfo that can
+ // be used here and in R600PacketizerList::isSoloInstruction() ?
+ if(TII->isVector(*MI) ||
+ TII->isCubeOp(MI->getOpcode()) ||
+ TII->isReductionOp(MI->getOpcode()) ||
+ MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
+ return AluT_XYZW;
+ }
- // Is the result already assigned to a channel ?
- unsigned DestSubReg = MI->getOperand(0).getSubReg();
- switch (DestSubReg) {
- case AMDGPU::sub0:
- return AluT_X;
- case AMDGPU::sub1:
- return AluT_Y;
- case AMDGPU::sub2:
- return AluT_Z;
- case AMDGPU::sub3:
- return AluT_W;
- default:
- break;
- }
+ if (TII->isLDSInstr(MI->getOpcode())) {
+ return AluT_X;
+ }
- // Is the result already member of a X/Y/Z/W class ?
- unsigned DestReg = MI->getOperand(0).getReg();
- if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
- regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
- return AluT_X;
- if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
- return AluT_Y;
- if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
- return AluT_Z;
- if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
- return AluT_W;
- if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
- return AluT_XYZW;
-
- // LDS src registers cannot be used in the Trans slot.
- if (TII->readsLDSSrcReg(MI))
- return AluT_XYZW;
-
- return AluAny;
+ // Is the result already assigned to a channel ?
+ unsigned DestSubReg = MI->getOperand(0).getSubReg();
+ switch (DestSubReg) {
+ case AMDGPU::sub0:
+ return AluT_X;
+ case AMDGPU::sub1:
+ return AluT_Y;
+ case AMDGPU::sub2:
+ return AluT_Z;
+ case AMDGPU::sub3:
+ return AluT_W;
+ default:
+ break;
+ }
+ // Is the result already member of a X/Y/Z/W class ?
+ unsigned DestReg = MI->getOperand(0).getReg();
+ if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
+ regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
+ return AluT_X;
+ if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
+ return AluT_Y;
+ if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
+ return AluT_Z;
+ if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
+ return AluT_W;
+ if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
+ return AluT_XYZW;
+
+ // LDS src registers cannot be used in the Trans slot.
+ if (TII->readsLDSSrcReg(*MI))
+ return AluT_XYZW;
+
+ return AluAny;
}
int R600SchedStrategy::getInstKind(SUnit* SU) {
@@ -324,9 +323,8 @@ SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
It != E; ++It) {
SUnit *SU = *It;
InstructionsGroupCandidate.push_back(SU->getInstr());
- if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)
- && (!AnyALU || !TII->isVectorOnly(SU->getInstr()))
- ) {
+ if (TII->fitsConstReadLimitations(InstructionsGroupCandidate) &&
+ (!AnyALU || !TII->isVectorOnly(*SU->getInstr()))) {
InstructionsGroupCandidate.pop_back();
Q.erase((It + 1).base());
return SU;
@@ -350,7 +348,7 @@ void R600SchedStrategy::PrepareNextSlot() {
DEBUG(dbgs() << "New Slot\n");
assert (OccupedSlotsMask && "Slot wasn't filled");
OccupedSlotsMask = 0;
-// if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS)
+// if (HwGen == R600Subtarget::NORTHERN_ISLANDS)
// OccupedSlotsMask |= 16;
InstructionsGroupCandidate.clear();
LoadAlu();
diff --git a/lib/Target/AMDGPU/R600MachineScheduler.h b/lib/Target/AMDGPU/R600MachineScheduler.h
index fc5b95c28e71..16d5d939708c 100644
--- a/lib/Target/AMDGPU/R600MachineScheduler.h
+++ b/lib/Target/AMDGPU/R600MachineScheduler.h
@@ -12,20 +12,19 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_R600MACHINESCHEDULER_H
-#define LLVM_LIB_TARGET_R600_R600MACHINESCHEDULER_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_R600MACHINESCHEDULER_H
+#define LLVM_LIB_TARGET_AMDGPU_R600MACHINESCHEDULER_H
-#include "R600InstrInfo.h"
-#include "llvm/ADT/PriorityQueue.h"
#include "llvm/CodeGen/MachineScheduler.h"
-#include "llvm/Support/Debug.h"
using namespace llvm;
namespace llvm {
-class R600SchedStrategy : public MachineSchedStrategy {
+class R600InstrInfo;
+struct R600RegisterInfo;
+class R600SchedStrategy final : public MachineSchedStrategy {
const ScheduleDAGMILive *DAG;
const R600InstrInfo *TII;
const R600RegisterInfo *TRI;
diff --git a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 5efb3b9fc20e..ecae27d2233d 100644
--- a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -29,6 +29,7 @@
#include "AMDGPU.h"
#include "AMDGPUSubtarget.h"
+#include "R600Defines.h"
#include "R600InstrInfo.h"
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -210,9 +211,9 @@ MachineInstr *R600VectorRegMerger::RebuildVector(
(void)Tmp;
SrcVec = DstReg;
}
- Pos = BuildMI(MBB, Pos, DL, TII->get(AMDGPU::COPY), Reg)
- .addReg(SrcVec);
- DEBUG(dbgs() << " ->"; Pos->dump(););
+ MachineInstr *NewMI =
+ BuildMI(MBB, Pos, DL, TII->get(AMDGPU::COPY), Reg).addReg(SrcVec);
+ DEBUG(dbgs() << " ->"; NewMI->dump(););
DEBUG(dbgs() << " Updating Swizzle:\n");
for (MachineRegisterInfo::use_instr_iterator It = MRI->use_instr_begin(Reg),
@@ -224,11 +225,11 @@ MachineInstr *R600VectorRegMerger::RebuildVector(
RSI->Instr->eraseFromParent();
// Update RSI
- RSI->Instr = Pos;
+ RSI->Instr = NewMI;
RSI->RegToChan = UpdatedRegToChan;
RSI->UndefReg = UpdatedUndef;
- return Pos;
+ return NewMI;
}
void R600VectorRegMerger::RemoveMI(MachineInstr *MI) {
@@ -314,8 +315,13 @@ void R600VectorRegMerger::trackRSI(const RegSeqInfo &RSI) {
}
bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
- TII = static_cast<const R600InstrInfo *>(Fn.getSubtarget().getInstrInfo());
- MRI = &(Fn.getRegInfo());
+ if (skipFunction(*Fn.getFunction()))
+ return false;
+
+ const R600Subtarget &ST = Fn.getSubtarget<R600Subtarget>();
+ TII = ST.getInstrInfo();
+ MRI = &Fn.getRegInfo();
+
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
MachineBasicBlock *MB = &*MBB;
@@ -325,10 +331,10 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
for (MachineBasicBlock::iterator MII = MB->begin(), MIIE = MB->end();
MII != MIIE; ++MII) {
- MachineInstr *MI = MII;
- if (MI->getOpcode() != AMDGPU::REG_SEQUENCE) {
- if (TII->get(MI->getOpcode()).TSFlags & R600_InstFlag::TEX_INST) {
- unsigned Reg = MI->getOperand(1).getReg();
+ MachineInstr &MI = *MII;
+ if (MI.getOpcode() != AMDGPU::REG_SEQUENCE) {
+ if (TII->get(MI.getOpcode()).TSFlags & R600_InstFlag::TEX_INST) {
+ unsigned Reg = MI.getOperand(1).getReg();
for (MachineRegisterInfo::def_instr_iterator
It = MRI->def_instr_begin(Reg), E = MRI->def_instr_end();
It != E; ++It) {
@@ -338,17 +344,17 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
continue;
}
-
- RegSeqInfo RSI(*MRI, MI);
+ RegSeqInfo RSI(*MRI, &MI);
// All uses of MI are swizzeable ?
- unsigned Reg = MI->getOperand(0).getReg();
+ unsigned Reg = MI.getOperand(0).getReg();
if (!areAllUsesSwizzeable(Reg))
continue;
- DEBUG (dbgs() << "Trying to optimize ";
- MI->dump();
- );
+ DEBUG({
+ dbgs() << "Trying to optimize ";
+ MI.dump();
+ });
RegSeqInfo CandidateRSI;
std::vector<std::pair<unsigned, unsigned> > RemapChan;
diff --git a/lib/Target/AMDGPU/R600Packetizer.cpp b/lib/Target/AMDGPU/R600Packetizer.cpp
index 21269613a305..c84866469ae8 100644
--- a/lib/Target/AMDGPU/R600Packetizer.cpp
+++ b/lib/Target/AMDGPU/R600Packetizer.cpp
@@ -56,15 +56,14 @@ public:
char R600Packetizer::ID = 0;
class R600PacketizerList : public VLIWPacketizerList {
-
private:
const R600InstrInfo *TII;
const R600RegisterInfo &TRI;
bool VLIW5;
bool ConsideredInstUsesAlreadyWrittenVectorElement;
- unsigned getSlot(const MachineInstr *MI) const {
- return TRI.getHWRegChan(MI->getOperand(0).getReg());
+ unsigned getSlot(const MachineInstr &MI) const {
+ return TRI.getHWRegChan(MI.getOperand(0).getReg());
}
/// \returns register to PV chan mapping for bundle/single instructions that
@@ -81,11 +80,11 @@ private:
int LastDstChan = -1;
do {
bool isTrans = false;
- int BISlot = getSlot(&*BI);
+ int BISlot = getSlot(*BI);
if (LastDstChan >= BISlot)
isTrans = true;
LastDstChan = BISlot;
- if (TII->isPredicated(&*BI))
+ if (TII->isPredicated(*BI))
continue;
int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write);
if (OperandIdx > -1 && BI->getOperand(OperandIdx).getImm() == 0)
@@ -95,7 +94,7 @@ private:
continue;
}
unsigned Dst = BI->getOperand(DstIdx).getReg();
- if (isTrans || TII->isTransOnly(&*BI)) {
+ if (isTrans || TII->isTransOnly(*BI)) {
Result[Dst] = AMDGPU::PS;
continue;
}
@@ -129,7 +128,7 @@ private:
return Result;
}
- void substitutePV(MachineInstr *MI, const DenseMap<unsigned, unsigned> &PVs)
+ void substitutePV(MachineInstr &MI, const DenseMap<unsigned, unsigned> &PVs)
const {
unsigned Ops[] = {
AMDGPU::OpName::src0,
@@ -137,23 +136,23 @@ private:
AMDGPU::OpName::src2
};
for (unsigned i = 0; i < 3; i++) {
- int OperandIdx = TII->getOperandIdx(MI->getOpcode(), Ops[i]);
+ int OperandIdx = TII->getOperandIdx(MI.getOpcode(), Ops[i]);
if (OperandIdx < 0)
continue;
- unsigned Src = MI->getOperand(OperandIdx).getReg();
+ unsigned Src = MI.getOperand(OperandIdx).getReg();
const DenseMap<unsigned, unsigned>::const_iterator It = PVs.find(Src);
if (It != PVs.end())
- MI->getOperand(OperandIdx).setReg(It->second);
+ MI.getOperand(OperandIdx).setReg(It->second);
}
}
public:
// Ctor.
- R600PacketizerList(MachineFunction &MF, MachineLoopInfo &MLI)
+ R600PacketizerList(MachineFunction &MF, const R600Subtarget &ST,
+ MachineLoopInfo &MLI)
: VLIWPacketizerList(MF, MLI, nullptr),
- TII(static_cast<const R600InstrInfo *>(
- MF.getSubtarget().getInstrInfo())),
+ TII(ST.getInstrInfo()),
TRI(TII->getRegisterInfo()) {
- VLIW5 = !MF.getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
+ VLIW5 = !ST.hasCaymanISA();
}
// initPacketizerState - initialize some internal flags.
@@ -162,32 +161,30 @@ public:
}
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
- bool ignorePseudoInstruction(const MachineInstr *MI,
+ bool ignorePseudoInstruction(const MachineInstr &MI,
const MachineBasicBlock *MBB) override {
return false;
}
// isSoloInstruction - return true if instruction MI can not be packetized
// with any other instruction, which means that MI itself is a packet.
- bool isSoloInstruction(const MachineInstr *MI) override {
- if (TII->isVector(*MI))
+ bool isSoloInstruction(const MachineInstr &MI) override {
+ if (TII->isVector(MI))
return true;
- if (!TII->isALUInstr(MI->getOpcode()))
+ if (!TII->isALUInstr(MI.getOpcode()))
return true;
- if (MI->getOpcode() == AMDGPU::GROUP_BARRIER)
+ if (MI.getOpcode() == AMDGPU::GROUP_BARRIER)
return true;
// XXX: This can be removed once the packetizer properly handles all the
// LDS instruction group restrictions.
- if (TII->isLDSInstr(MI->getOpcode()))
- return true;
- return false;
+ return TII->isLDSInstr(MI.getOpcode());
}
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
// together.
bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) override {
MachineInstr *MII = SUI->getInstr(), *MIJ = SUJ->getInstr();
- if (getSlot(MII) == getSlot(MIJ))
+ if (getSlot(*MII) == getSlot(*MIJ))
ConsideredInstUsesAlreadyWrittenVectorElement = true;
// Does MII and MIJ share the same pred_sel ?
int OpI = TII->getOperandIdx(MII->getOpcode(), AMDGPU::OpName::pred_sel),
@@ -210,14 +207,12 @@ public:
}
}
- bool ARDef = TII->definesAddressRegister(MII) ||
- TII->definesAddressRegister(MIJ);
- bool ARUse = TII->usesAddressRegister(MII) ||
- TII->usesAddressRegister(MIJ);
- if (ARDef && ARUse)
- return false;
+ bool ARDef =
+ TII->definesAddressRegister(*MII) || TII->definesAddressRegister(*MIJ);
+ bool ARUse =
+ TII->usesAddressRegister(*MII) || TII->usesAddressRegister(*MIJ);
- return true;
+ return !ARDef || !ARUse;
}
// isLegalToPruneDependencies - Is it legal to prune dependece between SUI
@@ -231,7 +226,7 @@ public:
MI->getOperand(LastOp).setImm(Bit);
}
- bool isBundlableWithCurrentPMI(MachineInstr *MI,
+ bool isBundlableWithCurrentPMI(MachineInstr &MI,
const DenseMap<unsigned, unsigned> &PV,
std::vector<R600InstrInfo::BankSwizzle> &BS,
bool &isTransSlot) {
@@ -240,11 +235,14 @@ public:
// Is the dst reg sequence legal ?
if (!isTransSlot && !CurrentPacketMIs.empty()) {
- if (getSlot(MI) <= getSlot(CurrentPacketMIs.back())) {
- if (ConsideredInstUsesAlreadyWrittenVectorElement &&
+ if (getSlot(MI) <= getSlot(*CurrentPacketMIs.back())) {
+ if (ConsideredInstUsesAlreadyWrittenVectorElement &&
!TII->isVectorOnly(MI) && VLIW5) {
isTransSlot = true;
- DEBUG(dbgs() << "Considering as Trans Inst :"; MI->dump(););
+ DEBUG({
+ dbgs() << "Considering as Trans Inst :";
+ MI.dump();
+ });
}
else
return false;
@@ -252,18 +250,18 @@ public:
}
// Are the Constants limitations met ?
- CurrentPacketMIs.push_back(MI);
+ CurrentPacketMIs.push_back(&MI);
if (!TII->fitsConstReadLimitations(CurrentPacketMIs)) {
- DEBUG(
+ DEBUG({
dbgs() << "Couldn't pack :\n";
- MI->dump();
+ MI.dump();
dbgs() << "with the following packets :\n";
for (unsigned i = 0, e = CurrentPacketMIs.size() - 1; i < e; i++) {
CurrentPacketMIs[i]->dump();
dbgs() << "\n";
}
dbgs() << "because of Consts read limitations\n";
- );
+ });
CurrentPacketMIs.pop_back();
return false;
}
@@ -271,16 +269,16 @@ public:
// Is there a BankSwizzle set that meet Read Port limitations ?
if (!TII->fitsReadPortLimitations(CurrentPacketMIs,
PV, BS, isTransSlot)) {
- DEBUG(
+ DEBUG({
dbgs() << "Couldn't pack :\n";
- MI->dump();
+ MI.dump();
dbgs() << "with the following packets :\n";
for (unsigned i = 0, e = CurrentPacketMIs.size() - 1; i < e; i++) {
CurrentPacketMIs[i]->dump();
dbgs() << "\n";
}
dbgs() << "because of Read port limitations\n";
- );
+ });
CurrentPacketMIs.pop_back();
return false;
}
@@ -293,9 +291,9 @@ public:
return true;
}
- MachineBasicBlock::iterator addToPacket(MachineInstr *MI) override {
+ MachineBasicBlock::iterator addToPacket(MachineInstr &MI) override {
MachineBasicBlock::iterator FirstInBundle =
- CurrentPacketMIs.empty() ? MI : CurrentPacketMIs.front();
+ CurrentPacketMIs.empty() ? &MI : CurrentPacketMIs.front();
const DenseMap<unsigned, unsigned> &PV =
getPreviousVector(FirstInBundle);
std::vector<R600InstrInfo::BankSwizzle> BS;
@@ -308,9 +306,9 @@ public:
AMDGPU::OpName::bank_swizzle);
MI->getOperand(Op).setImm(BS[i]);
}
- unsigned Op = TII->getOperandIdx(MI->getOpcode(),
- AMDGPU::OpName::bank_swizzle);
- MI->getOperand(Op).setImm(BS.back());
+ unsigned Op =
+ TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::bank_swizzle);
+ MI.getOperand(Op).setImm(BS.back());
if (!CurrentPacketMIs.empty())
setIsLastBit(CurrentPacketMIs.back(), 0);
substitutePV(MI, PV);
@@ -320,7 +318,7 @@ public:
}
return It;
}
- endPacket(MI->getParent(), MI);
+ endPacket(MI.getParent(), MI);
if (TII->isTransOnly(MI))
return MI;
return VLIWPacketizerList::addToPacket(MI);
@@ -328,15 +326,20 @@ public:
};
bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
- const TargetInstrInfo *TII = Fn.getSubtarget().getInstrInfo();
+ const R600Subtarget &ST = Fn.getSubtarget<R600Subtarget>();
+ const R600InstrInfo *TII = ST.getInstrInfo();
+
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
// Instantiate the packetizer.
- R600PacketizerList Packetizer(Fn, MLI);
+ R600PacketizerList Packetizer(Fn, ST, MLI);
// DFA state table should not be empty.
assert(Packetizer.getResourceTracker() && "Empty DFA table!");
+ if (Packetizer.getResourceTracker()->getInstrItins()->isEmpty())
+ return false;
+
//
// Loop over all basic blocks and remove KILL pseudo-instructions
// These instructions confuse the dependence analysis. Consider:
@@ -375,7 +378,7 @@ bool R600Packetizer::runOnMachineFunction(MachineFunction &Fn) {
// instruction stream until we find the nearest boundary.
MachineBasicBlock::iterator I = RegionEnd;
for(;I != MBB->begin(); --I, --RemainingCount) {
- if (TII->isSchedulingBoundary(&*std::prev(I), &*MBB, Fn))
+ if (TII->isSchedulingBoundary(*std::prev(I), &*MBB, Fn))
break;
}
I = MBB->begin();
diff --git a/lib/Target/AMDGPU/R600RegisterInfo.cpp b/lib/Target/AMDGPU/R600RegisterInfo.cpp
index fb0359cfc651..dfdc602b80cd 100644
--- a/lib/Target/AMDGPU/R600RegisterInfo.cpp
+++ b/lib/Target/AMDGPU/R600RegisterInfo.cpp
@@ -28,8 +28,8 @@ R600RegisterInfo::R600RegisterInfo() : AMDGPURegisterInfo() {
BitVector R600RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
- const R600InstrInfo *TII =
- static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
+ const R600InstrInfo *TII = ST.getInstrInfo();
Reserved.set(AMDGPU::ZERO);
Reserved.set(AMDGPU::HALF);
@@ -89,3 +89,10 @@ bool R600RegisterInfo::isPhysRegLiveAcrossClauses(unsigned Reg) const {
return true;
}
}
+
+void R600RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ llvm_unreachable("Subroutines not supported yet");
+}
diff --git a/lib/Target/AMDGPU/R600RegisterInfo.h b/lib/Target/AMDGPU/R600RegisterInfo.h
index 4f8a129ce4a6..9dfb3106c6cc 100644
--- a/lib/Target/AMDGPU/R600RegisterInfo.h
+++ b/lib/Target/AMDGPU/R600RegisterInfo.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_R600REGISTERINFO_H
-#define LLVM_LIB_TARGET_R600_R600REGISTERINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_R600REGISTERINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_R600REGISTERINFO_H
#include "AMDGPURegisterInfo.h"
@@ -21,7 +21,7 @@ namespace llvm {
class AMDGPUSubtarget;
-struct R600RegisterInfo : public AMDGPURegisterInfo {
+struct R600RegisterInfo final : public AMDGPURegisterInfo {
RegClassWeight RCW;
R600RegisterInfo();
@@ -31,7 +31,7 @@ struct R600RegisterInfo : public AMDGPURegisterInfo {
/// \brief get the HW encoding for a register's channel.
unsigned getHWRegChan(unsigned reg) const;
- unsigned getHWRegIndex(unsigned Reg) const override;
+ unsigned getHWRegIndex(unsigned Reg) const;
/// \brief get the register class of the specified type to use in the
/// CFGStructurizer
@@ -40,8 +40,13 @@ struct R600RegisterInfo : public AMDGPURegisterInfo {
const RegClassWeight &
getRegClassWeight(const TargetRegisterClass *RC) const override;
- // \returns true if \p Reg can be defined in one ALU caluse and used in another.
+ // \returns true if \p Reg can be defined in one ALU clause and used in
+ // another.
bool isPhysRegLiveAcrossClauses(unsigned Reg) const;
+
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
};
} // End namespace llvm
diff --git a/lib/Target/AMDGPU/R600Schedule.td b/lib/Target/AMDGPU/R600Schedule.td
index df62bf85c0ad..70fb46c1a7d6 100644
--- a/lib/Target/AMDGPU/R600Schedule.td
+++ b/lib/Target/AMDGPU/R600Schedule.td
@@ -9,7 +9,7 @@
//
// R600 has a VLIW architecture. On pre-cayman cards there are 5 instruction
// slots ALU.X, ALU.Y, ALU.Z, ALU.W, and TRANS. For cayman cards, the TRANS
-// slot has been removed.
+// slot has been removed.
//
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp b/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp
deleted file mode 100644
index 2fc7b02f673f..000000000000
--- a/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp
+++ /dev/null
@@ -1,303 +0,0 @@
-//===-- R600TextureIntrinsicsReplacer.cpp ---------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// This pass translates tgsi-like texture intrinsics into R600 texture
-/// closer to hardware intrinsics.
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Passes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/InstVisitor.h"
-
-using namespace llvm;
-
-namespace {
-class R600TextureIntrinsicsReplacer :
- public FunctionPass, public InstVisitor<R600TextureIntrinsicsReplacer> {
- static char ID;
-
- Module *Mod;
- Type *FloatType;
- Type *Int32Type;
- Type *V4f32Type;
- Type *V4i32Type;
- FunctionType *TexSign;
- FunctionType *TexQSign;
-
- void getAdjustmentFromTextureTarget(unsigned TextureType, bool hasLOD,
- unsigned SrcSelect[4], unsigned CT[4],
- bool &useShadowVariant) {
- enum TextureTypes {
- TEXTURE_1D = 1,
- TEXTURE_2D,
- TEXTURE_3D,
- TEXTURE_CUBE,
- TEXTURE_RECT,
- TEXTURE_SHADOW1D,
- TEXTURE_SHADOW2D,
- TEXTURE_SHADOWRECT,
- TEXTURE_1D_ARRAY,
- TEXTURE_2D_ARRAY,
- TEXTURE_SHADOW1D_ARRAY,
- TEXTURE_SHADOW2D_ARRAY,
- TEXTURE_SHADOWCUBE,
- TEXTURE_2D_MSAA,
- TEXTURE_2D_ARRAY_MSAA,
- TEXTURE_CUBE_ARRAY,
- TEXTURE_SHADOWCUBE_ARRAY
- };
-
- switch (TextureType) {
- case 0:
- useShadowVariant = false;
- return;
- case TEXTURE_RECT:
- case TEXTURE_1D:
- case TEXTURE_2D:
- case TEXTURE_3D:
- case TEXTURE_CUBE:
- case TEXTURE_1D_ARRAY:
- case TEXTURE_2D_ARRAY:
- case TEXTURE_CUBE_ARRAY:
- case TEXTURE_2D_MSAA:
- case TEXTURE_2D_ARRAY_MSAA:
- useShadowVariant = false;
- break;
- case TEXTURE_SHADOW1D:
- case TEXTURE_SHADOW2D:
- case TEXTURE_SHADOWRECT:
- case TEXTURE_SHADOW1D_ARRAY:
- case TEXTURE_SHADOW2D_ARRAY:
- case TEXTURE_SHADOWCUBE:
- case TEXTURE_SHADOWCUBE_ARRAY:
- useShadowVariant = true;
- break;
- default:
- llvm_unreachable("Unknow Texture Type");
- }
-
- if (TextureType == TEXTURE_RECT ||
- TextureType == TEXTURE_SHADOWRECT) {
- CT[0] = 0;
- CT[1] = 0;
- }
-
- if (TextureType == TEXTURE_CUBE_ARRAY ||
- TextureType == TEXTURE_SHADOWCUBE_ARRAY)
- CT[2] = 0;
-
- if (TextureType == TEXTURE_1D_ARRAY ||
- TextureType == TEXTURE_SHADOW1D_ARRAY) {
- if (hasLOD && useShadowVariant) {
- CT[1] = 0;
- } else {
- CT[2] = 0;
- SrcSelect[2] = 1;
- }
- } else if (TextureType == TEXTURE_2D_ARRAY ||
- TextureType == TEXTURE_SHADOW2D_ARRAY) {
- CT[2] = 0;
- }
-
- if ((TextureType == TEXTURE_SHADOW1D ||
- TextureType == TEXTURE_SHADOW2D ||
- TextureType == TEXTURE_SHADOWRECT ||
- TextureType == TEXTURE_SHADOW1D_ARRAY) &&
- !(hasLOD && useShadowVariant))
- SrcSelect[3] = 2;
- }
-
- void ReplaceCallInst(CallInst &I, FunctionType *FT, const char *Name,
- unsigned SrcSelect[4], Value *Offset[3], Value *Resource,
- Value *Sampler, unsigned CT[4], Value *Coord) {
- IRBuilder<> Builder(&I);
- Constant *Mask[] = {
- ConstantInt::get(Int32Type, SrcSelect[0]),
- ConstantInt::get(Int32Type, SrcSelect[1]),
- ConstantInt::get(Int32Type, SrcSelect[2]),
- ConstantInt::get(Int32Type, SrcSelect[3])
- };
- Value *SwizzleMask = ConstantVector::get(Mask);
- Value *SwizzledCoord =
- Builder.CreateShuffleVector(Coord, Coord, SwizzleMask);
-
- Value *Args[] = {
- SwizzledCoord,
- Offset[0],
- Offset[1],
- Offset[2],
- Resource,
- Sampler,
- ConstantInt::get(Int32Type, CT[0]),
- ConstantInt::get(Int32Type, CT[1]),
- ConstantInt::get(Int32Type, CT[2]),
- ConstantInt::get(Int32Type, CT[3])
- };
-
- Function *F = Mod->getFunction(Name);
- if (!F) {
- F = Function::Create(FT, GlobalValue::ExternalLinkage, Name, Mod);
- F->addFnAttr(Attribute::ReadNone);
- }
- I.replaceAllUsesWith(Builder.CreateCall(F, Args));
- I.eraseFromParent();
- }
-
- void ReplaceTexIntrinsic(CallInst &I, bool hasLOD, FunctionType *FT,
- const char *VanillaInt,
- const char *ShadowInt) {
- Value *Coord = I.getArgOperand(0);
- Value *ResourceId = I.getArgOperand(1);
- Value *SamplerId = I.getArgOperand(2);
-
- unsigned TextureType =
- cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
-
- unsigned SrcSelect[4] = { 0, 1, 2, 3 };
- unsigned CT[4] = {1, 1, 1, 1};
- Value *Offset[3] = {
- ConstantInt::get(Int32Type, 0),
- ConstantInt::get(Int32Type, 0),
- ConstantInt::get(Int32Type, 0)
- };
- bool useShadowVariant;
-
- getAdjustmentFromTextureTarget(TextureType, hasLOD, SrcSelect, CT,
- useShadowVariant);
-
- ReplaceCallInst(I, FT, useShadowVariant?ShadowInt:VanillaInt, SrcSelect,
- Offset, ResourceId, SamplerId, CT, Coord);
- }
-
- void ReplaceTXF(CallInst &I) {
- Value *Coord = I.getArgOperand(0);
- Value *ResourceId = I.getArgOperand(4);
- Value *SamplerId = I.getArgOperand(5);
-
- unsigned TextureType =
- cast<ConstantInt>(I.getArgOperand(6))->getZExtValue();
-
- unsigned SrcSelect[4] = { 0, 1, 2, 3 };
- unsigned CT[4] = {1, 1, 1, 1};
- Value *Offset[3] = {
- I.getArgOperand(1),
- I.getArgOperand(2),
- I.getArgOperand(3),
- };
- bool useShadowVariant;
-
- getAdjustmentFromTextureTarget(TextureType, false, SrcSelect, CT,
- useShadowVariant);
-
- ReplaceCallInst(I, TexQSign, "llvm.R600.txf", SrcSelect,
- Offset, ResourceId, SamplerId, CT, Coord);
- }
-
-public:
- R600TextureIntrinsicsReplacer():
- FunctionPass(ID) {
- }
-
- bool doInitialization(Module &M) override {
- LLVMContext &Ctx = M.getContext();
- Mod = &M;
- FloatType = Type::getFloatTy(Ctx);
- Int32Type = Type::getInt32Ty(Ctx);
- V4f32Type = VectorType::get(FloatType, 4);
- V4i32Type = VectorType::get(Int32Type, 4);
- Type *ArgsType[] = {
- V4f32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- };
- TexSign = FunctionType::get(V4f32Type, ArgsType, /*isVarArg=*/false);
- Type *ArgsQType[] = {
- V4i32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- Int32Type,
- };
- TexQSign = FunctionType::get(V4f32Type, ArgsQType, /*isVarArg=*/false);
- return false;
- }
-
- bool runOnFunction(Function &F) override {
- visit(F);
- return false;
- }
-
- const char *getPassName() const override {
- return "R600 Texture Intrinsics Replacer";
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- }
-
- void visitCallInst(CallInst &I) {
- if (!I.getCalledFunction())
- return;
-
- StringRef Name = I.getCalledFunction()->getName();
- if (Name == "llvm.AMDGPU.tex") {
- ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.tex", "llvm.R600.texc");
- return;
- }
- if (Name == "llvm.AMDGPU.txl") {
- ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txl", "llvm.R600.txlc");
- return;
- }
- if (Name == "llvm.AMDGPU.txb") {
- ReplaceTexIntrinsic(I, true, TexSign, "llvm.R600.txb", "llvm.R600.txbc");
- return;
- }
- if (Name == "llvm.AMDGPU.txf") {
- ReplaceTXF(I);
- return;
- }
- if (Name == "llvm.AMDGPU.txq") {
- ReplaceTexIntrinsic(I, false, TexQSign, "llvm.R600.txq", "llvm.R600.txq");
- return;
- }
- if (Name == "llvm.AMDGPU.ddx") {
- ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddx", "llvm.R600.ddx");
- return;
- }
- if (Name == "llvm.AMDGPU.ddy") {
- ReplaceTexIntrinsic(I, false, TexSign, "llvm.R600.ddy", "llvm.R600.ddy");
- return;
- }
- }
-
-};
-
-char R600TextureIntrinsicsReplacer::ID = 0;
-
-}
-
-FunctionPass *llvm::createR600TextureIntrinsicsReplacer() {
- return new R600TextureIntrinsicsReplacer();
-}
diff --git a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index fa4d24a2f25a..5f182c5304c6 100644
--- a/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -14,6 +14,7 @@
#include "AMDGPU.h"
#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/Analysis/DivergenceAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
@@ -34,17 +35,16 @@ typedef std::pair<BasicBlock *, Value *> StackEntry;
typedef SmallVector<StackEntry, 16> StackVector;
// Intrinsic names the control flow is annotated with
-static const char *const IfIntrinsic = "llvm.SI.if";
-static const char *const ElseIntrinsic = "llvm.SI.else";
-static const char *const BreakIntrinsic = "llvm.SI.break";
-static const char *const IfBreakIntrinsic = "llvm.SI.if.break";
-static const char *const ElseBreakIntrinsic = "llvm.SI.else.break";
-static const char *const LoopIntrinsic = "llvm.SI.loop";
-static const char *const EndCfIntrinsic = "llvm.SI.end.cf";
+static const char *const IfIntrinsic = "llvm.amdgcn.if";
+static const char *const ElseIntrinsic = "llvm.amdgcn.else";
+static const char *const BreakIntrinsic = "llvm.amdgcn.break";
+static const char *const IfBreakIntrinsic = "llvm.amdgcn.if.break";
+static const char *const ElseBreakIntrinsic = "llvm.amdgcn.else.break";
+static const char *const LoopIntrinsic = "llvm.amdgcn.loop";
+static const char *const EndCfIntrinsic = "llvm.amdgcn.end.cf";
class SIAnnotateControlFlow : public FunctionPass {
-
- static char ID;
+ DivergenceAnalysis *DA;
Type *Boolean;
Type *Void;
@@ -69,6 +69,8 @@ class SIAnnotateControlFlow : public FunctionPass {
LoopInfo *LI;
+ bool isUniform(BranchInst *T);
+
bool isTopOfStack(BasicBlock *BB);
Value *popSaved();
@@ -83,13 +85,16 @@ class SIAnnotateControlFlow : public FunctionPass {
void insertElse(BranchInst *Term);
- Value *handleLoopCondition(Value *Cond, PHINode *Broken, llvm::Loop *L);
+ Value *handleLoopCondition(Value *Cond, PHINode *Broken,
+ llvm::Loop *L, BranchInst *Term);
void handleLoop(BranchInst *Term);
void closeControlFlow(BasicBlock *BB);
public:
+ static char ID;
+
SIAnnotateControlFlow():
FunctionPass(ID) { }
@@ -104,6 +109,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<DivergenceAnalysis>();
AU.addPreserved<DominatorTreeWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
}
@@ -112,6 +118,12 @@ public:
} // end anonymous namespace
+INITIALIZE_PASS_BEGIN(SIAnnotateControlFlow, DEBUG_TYPE,
+ "Annotate SI Control Flow", false, false)
+INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
+INITIALIZE_PASS_END(SIAnnotateControlFlow, DEBUG_TYPE,
+ "Annotate SI Control Flow", false, false)
+
char SIAnnotateControlFlow::ID = 0;
/// \brief Initialize all the types and constants used in the pass
@@ -152,6 +164,13 @@ bool SIAnnotateControlFlow::doInitialization(Module &M) {
return false;
}
+/// \brief Is the branch condition uniform or did the StructurizeCFG pass
+/// consider it as such?
+bool SIAnnotateControlFlow::isUniform(BranchInst *T) {
+ return DA->isUniform(T->getCondition()) ||
+ T->getMetadata("structurizecfg.uniform") != nullptr;
+}
+
/// \brief Is BB the last block saved on the stack ?
bool SIAnnotateControlFlow::isTopOfStack(BasicBlock *BB) {
return !Stack.empty() && Stack.back().first == BB;
@@ -194,6 +213,9 @@ void SIAnnotateControlFlow::eraseIfUnused(PHINode *Phi) {
/// \brief Open a new "If" block
void SIAnnotateControlFlow::openIf(BranchInst *Term) {
+ if (isUniform(Term)) {
+ return;
+ }
Value *Ret = CallInst::Create(If, Term->getCondition(), "", Term);
Term->setCondition(ExtractValueInst::Create(Ret, 0, "", Term));
push(Term->getSuccessor(1), ExtractValueInst::Create(Ret, 1, "", Term));
@@ -201,6 +223,9 @@ void SIAnnotateControlFlow::openIf(BranchInst *Term) {
/// \brief Close the last "If" block and open a new "Else" block
void SIAnnotateControlFlow::insertElse(BranchInst *Term) {
+ if (isUniform(Term)) {
+ return;
+ }
Value *Ret = CallInst::Create(Else, popSaved(), "", Term);
Term->setCondition(ExtractValueInst::Create(Ret, 0, "", Term));
push(Term->getSuccessor(1), ExtractValueInst::Create(Ret, 1, "", Term));
@@ -208,7 +233,7 @@ void SIAnnotateControlFlow::insertElse(BranchInst *Term) {
/// \brief Recursively handle the condition leading to a loop
Value *SIAnnotateControlFlow::handleLoopCondition(Value *Cond, PHINode *Broken,
- llvm::Loop *L) {
+ llvm::Loop *L, BranchInst *Term) {
// Only search through PHI nodes which are inside the loop. If we try this
// with PHI nodes that are outside of the loop, we end up inserting new PHI
@@ -232,7 +257,7 @@ Value *SIAnnotateControlFlow::handleLoopCondition(Value *Cond, PHINode *Broken,
}
Phi->setIncomingValue(i, BoolFalse);
- Value *PhiArg = handleLoopCondition(Incoming, Broken, L);
+ Value *PhiArg = handleLoopCondition(Incoming, Broken, L, Term);
NewPhi->addIncoming(PhiArg, From);
}
@@ -246,7 +271,23 @@ Value *SIAnnotateControlFlow::handleLoopCondition(Value *Cond, PHINode *Broken,
BasicBlock *From = Phi->getIncomingBlock(i);
if (From == IDom) {
+ // We're in the following situation:
+ // IDom/From
+ // | \
+ // | If-block
+ // | /
+ // Parent
+ // where we want to break out of the loop if the If-block is not taken.
+ // Due to the depth-first traversal, there should be an end.cf
+ // intrinsic in Parent, and we insert an else.break before it.
+ //
+ // Note that the end.cf need not be the first non-phi instruction
+ // of parent, particularly when we're dealing with a multi-level
+ // break, but it should occur within a group of intrinsic calls
+ // at the beginning of the block.
CallInst *OldEnd = dyn_cast<CallInst>(Parent->getFirstInsertionPt());
+ while (OldEnd && OldEnd->getCalledFunction() != EndCf)
+ OldEnd = dyn_cast<CallInst>(OldEnd->getNextNode());
if (OldEnd && OldEnd->getCalledFunction() == EndCf) {
Value *Args[] = { OldEnd->getArgOperand(0), NewPhi };
Ret = CallInst::Create(ElseBreak, Args, "", OldEnd);
@@ -271,14 +312,23 @@ Value *SIAnnotateControlFlow::handleLoopCondition(Value *Cond, PHINode *Broken,
Value *Args[] = { Cond, Broken };
return CallInst::Create(IfBreak, Args, "", Insert);
+ // Insert IfBreak before TERM for constant COND.
+ } else if (isa<ConstantInt>(Cond)) {
+ Value *Args[] = { Cond, Broken };
+ return CallInst::Create(IfBreak, Args, "", Term);
+
} else {
llvm_unreachable("Unhandled loop condition!");
}
- return 0;
+ return nullptr;
}
/// \brief Handle a back edge (loop)
void SIAnnotateControlFlow::handleLoop(BranchInst *Term) {
+ if (isUniform(Term)) {
+ return;
+ }
+
BasicBlock *BB = Term->getParent();
llvm::Loop *L = LI->getLoopFor(BB);
BasicBlock *Target = Term->getSuccessor(1);
@@ -286,7 +336,7 @@ void SIAnnotateControlFlow::handleLoop(BranchInst *Term) {
Value *Cond = Term->getCondition();
Term->setCondition(BoolTrue);
- Value *Arg = handleLoopCondition(Cond, Broken, L);
+ Value *Arg = handleLoopCondition(Cond, Broken, L, Term);
for (pred_iterator PI = pred_begin(Target), PE = pred_end(Target);
PI != PE; ++PI) {
@@ -300,6 +350,8 @@ void SIAnnotateControlFlow::handleLoop(BranchInst *Term) {
void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
llvm::Loop *L = LI->getLoopFor(BB);
+ assert(Stack.back().first == BB);
+
if (L && L->getHeader() == BB) {
// We can't insert an EndCF call into a loop header, because it will
// get executed on every iteration of the loop, when it should be
@@ -315,14 +367,18 @@ void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
BB = llvm::SplitBlockPredecessors(BB, Preds, "endcf.split", DT, LI, false);
}
- CallInst::Create(EndCf, popSaved(), "", &*BB->getFirstInsertionPt());
+ Value *Exec = popSaved();
+ if (!isa<UndefValue>(Exec))
+ CallInst::Create(EndCf, Exec, "", &*BB->getFirstInsertionPt());
}
/// \brief Annotate the control flow with intrinsics so the backend can
/// recognize if/then/else and loops.
bool SIAnnotateControlFlow::runOnFunction(Function &F) {
+
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ DA = &getAnalysis<DivergenceAnalysis>();
for (df_iterator<BasicBlock *> I = df_begin(&F.getEntryBlock()),
E = df_end(&F.getEntryBlock()); I != E; ++I) {
@@ -332,12 +388,14 @@ bool SIAnnotateControlFlow::runOnFunction(Function &F) {
if (!Term || Term->isUnconditional()) {
if (isTopOfStack(*I))
closeControlFlow(*I);
+
continue;
}
if (I.nodeVisited(Term->getSuccessor(1))) {
if (isTopOfStack(*I))
closeControlFlow(*I);
+
handleLoop(Term);
continue;
}
diff --git a/lib/Target/AMDGPU/SIDebuggerInsertNops.cpp b/lib/Target/AMDGPU/SIDebuggerInsertNops.cpp
new file mode 100644
index 000000000000..65ceff3930ac
--- /dev/null
+++ b/lib/Target/AMDGPU/SIDebuggerInsertNops.cpp
@@ -0,0 +1,96 @@
+//===--- SIDebuggerInsertNops.cpp - Inserts nops for debugger usage -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief Inserts one nop instruction for each high level source statement for
+/// debugger usage.
+///
+/// Tools, such as a debugger, need to pause execution based on user input (i.e.
+/// breakpoint). In order to do this, one nop instruction is inserted before the
+/// first isa instruction of each high level source statement. Further, the
+/// debugger may replace nop instructions with trap instructions based on user
+/// input.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SIInstrInfo.h"
+#include "AMDGPUSubtarget.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "si-debugger-insert-nops"
+#define PASS_NAME "SI Debugger Insert Nops"
+
+namespace {
+
+class SIDebuggerInsertNops : public MachineFunctionPass {
+public:
+ static char ID;
+
+ SIDebuggerInsertNops() : MachineFunctionPass(ID) { }
+ const char *getPassName() const override { return PASS_NAME; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // anonymous namespace
+
+INITIALIZE_PASS(SIDebuggerInsertNops, DEBUG_TYPE, PASS_NAME, false, false)
+
+char SIDebuggerInsertNops::ID = 0;
+char &llvm::SIDebuggerInsertNopsID = SIDebuggerInsertNops::ID;
+
+FunctionPass *llvm::createSIDebuggerInsertNopsPass() {
+ return new SIDebuggerInsertNops();
+}
+
+bool SIDebuggerInsertNops::runOnMachineFunction(MachineFunction &MF) {
+ // Skip this pass if "amdgpu-debugger-insert-nops" attribute was not
+ // specified.
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ if (!ST.debuggerInsertNops())
+ return false;
+
+ // Skip machine functions without debug info.
+ if (!MF.getMMI().hasDebugInfo())
+ return false;
+
+ // Target instruction info.
+ const SIInstrInfo *TII = ST.getInstrInfo();
+
+ // Set containing line numbers that have nop inserted.
+ DenseSet<unsigned> NopInserted;
+
+ for (auto &MBB : MF) {
+ for (auto MI = MBB.begin(); MI != MBB.end(); ++MI) {
+ // Skip DBG_VALUE instructions and instructions without location.
+ if (MI->isDebugValue() || !MI->getDebugLoc())
+ continue;
+
+ // Insert nop instruction if line number does not have nop inserted.
+ auto DL = MI->getDebugLoc();
+ if (NopInserted.find(DL.getLine()) == NopInserted.end()) {
+ BuildMI(MBB, *MI, DL, TII->get(AMDGPU::S_NOP))
+ .addImm(0);
+ NopInserted.insert(DL.getLine());
+ }
+ }
+ }
+
+ return true;
+}
diff --git a/lib/Target/AMDGPU/SIDefines.h b/lib/Target/AMDGPU/SIDefines.h
index aa1e352ed748..54efdc0a0466 100644
--- a/lib/Target/AMDGPU/SIDefines.h
+++ b/lib/Target/AMDGPU/SIDefines.h
@@ -10,8 +10,8 @@
#include "llvm/MC/MCInstrDesc.h"
-#ifndef LLVM_LIB_TARGET_R600_SIDEFINES_H
-#define LLVM_LIB_TARGET_R600_SIDEFINES_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_SIDEFINES_H
+#define LLVM_LIB_TARGET_AMDGPU_SIDEFINES_H
namespace SIInstrFlags {
// This needs to be kept in sync with the field bits in InstSI.
@@ -29,16 +29,19 @@ enum {
VOP2 = 1 << 11,
VOP3 = 1 << 12,
VOPC = 1 << 13,
+ SDWA = 1 << 14,
+ DPP = 1 << 15,
- MUBUF = 1 << 14,
- MTBUF = 1 << 15,
- SMRD = 1 << 16,
- DS = 1 << 17,
- MIMG = 1 << 18,
- FLAT = 1 << 19,
- WQM = 1 << 20,
- VGPRSpill = 1 << 21,
- VOPAsmPrefer32Bit = 1 << 22
+ MUBUF = 1 << 16,
+ MTBUF = 1 << 17,
+ SMRD = 1 << 18,
+ DS = 1 << 19,
+ MIMG = 1 << 20,
+ FLAT = 1 << 21,
+ WQM = 1 << 22,
+ VGPRSpill = 1 << 23,
+ VOPAsmPrefer32Bit = 1 << 24,
+ Gather4 = 1 << 25
};
}
@@ -46,9 +49,14 @@ namespace llvm {
namespace AMDGPU {
enum OperandType {
/// Operand with register or 32-bit immediate
- OPERAND_REG_IMM32 = llvm::MCOI::OPERAND_FIRST_TARGET,
+ OPERAND_REG_IMM32 = MCOI::OPERAND_FIRST_TARGET,
/// Operand with register or inline constant
- OPERAND_REG_INLINE_C
+ OPERAND_REG_INLINE_C,
+
+ /// Operand with 32-bit immediate that uses the constant bus. The standard
+ /// OPERAND_IMMEDIATE should be used for special immediates such as source
+ /// modifiers.
+ OPERAND_KIMM32
};
}
}
@@ -77,10 +85,13 @@ namespace SIInstrFlags {
};
}
+// Input operand modifiers bit-masks
+// NEG and SEXT share same bit-mask because they can't be set simultaneously.
namespace SISrcMods {
enum {
- NEG = 1 << 0,
- ABS = 1 << 1
+ NEG = 1 << 0, // Floating-point negate modifier
+ ABS = 1 << 1, // Floating-point absolute modifier
+ SEXT = 1 << 0 // Integer sign-extend modifier
};
}
@@ -93,6 +104,109 @@ namespace SIOutMods {
};
}
+namespace llvm {
+namespace AMDGPU {
+namespace EncValues { // Encoding values of enum9/8/7 operands
+
+enum {
+ SGPR_MIN = 0,
+ SGPR_MAX = 101,
+ TTMP_MIN = 112,
+ TTMP_MAX = 123,
+ INLINE_INTEGER_C_MIN = 128,
+ INLINE_INTEGER_C_POSITIVE_MAX = 192, // 64
+ INLINE_INTEGER_C_MAX = 208,
+ INLINE_FLOATING_C_MIN = 240,
+ INLINE_FLOATING_C_MAX = 248,
+ LITERAL_CONST = 255,
+ VGPR_MIN = 256,
+ VGPR_MAX = 511
+};
+
+} // namespace EncValues
+} // namespace AMDGPU
+} // namespace llvm
+
+namespace llvm {
+namespace AMDGPU {
+namespace SendMsg { // Encoding of SIMM16 used in s_sendmsg* insns.
+
+enum Id { // Message ID, width(4) [3:0].
+ ID_UNKNOWN_ = -1,
+ ID_INTERRUPT = 1,
+ ID_GS,
+ ID_GS_DONE,
+ ID_SYSMSG = 15,
+ ID_GAPS_LAST_, // Indicate that sequence has gaps.
+ ID_GAPS_FIRST_ = ID_INTERRUPT,
+ ID_SHIFT_ = 0,
+ ID_WIDTH_ = 4,
+ ID_MASK_ = (((1 << ID_WIDTH_) - 1) << ID_SHIFT_)
+};
+
+enum Op { // Both GS and SYS operation IDs.
+ OP_UNKNOWN_ = -1,
+ OP_SHIFT_ = 4,
+ // width(2) [5:4]
+ OP_GS_NOP = 0,
+ OP_GS_CUT,
+ OP_GS_EMIT,
+ OP_GS_EMIT_CUT,
+ OP_GS_LAST_,
+ OP_GS_FIRST_ = OP_GS_NOP,
+ OP_GS_WIDTH_ = 2,
+ OP_GS_MASK_ = (((1 << OP_GS_WIDTH_) - 1) << OP_SHIFT_),
+ // width(3) [6:4]
+ OP_SYS_ECC_ERR_INTERRUPT = 1,
+ OP_SYS_REG_RD,
+ OP_SYS_HOST_TRAP_ACK,
+ OP_SYS_TTRACE_PC,
+ OP_SYS_LAST_,
+ OP_SYS_FIRST_ = OP_SYS_ECC_ERR_INTERRUPT,
+ OP_SYS_WIDTH_ = 3,
+ OP_SYS_MASK_ = (((1 << OP_SYS_WIDTH_) - 1) << OP_SHIFT_)
+};
+
+enum StreamId { // Stream ID, (2) [9:8].
+ STREAM_ID_DEFAULT_ = 0,
+ STREAM_ID_LAST_ = 4,
+ STREAM_ID_FIRST_ = STREAM_ID_DEFAULT_,
+ STREAM_ID_SHIFT_ = 8,
+ STREAM_ID_WIDTH_= 2,
+ STREAM_ID_MASK_ = (((1 << STREAM_ID_WIDTH_) - 1) << STREAM_ID_SHIFT_)
+};
+
+} // namespace SendMsg
+
+namespace Hwreg { // Encoding of SIMM16 used in s_setreg/getreg* insns.
+
+enum Id { // HwRegCode, (6) [5:0]
+ ID_UNKNOWN_ = -1,
+ ID_SYMBOLIC_FIRST_ = 1, // There are corresponding symbolic names defined.
+ ID_SYMBOLIC_LAST_ = 8,
+ ID_SHIFT_ = 0,
+ ID_WIDTH_ = 6,
+ ID_MASK_ = (((1 << ID_WIDTH_) - 1) << ID_SHIFT_)
+};
+
+enum Offset { // Offset, (5) [10:6]
+ OFFSET_DEFAULT_ = 0,
+ OFFSET_SHIFT_ = 6,
+ OFFSET_WIDTH_ = 5,
+ OFFSET_MASK_ = (((1 << OFFSET_WIDTH_) - 1) << OFFSET_SHIFT_)
+};
+
+enum WidthMinusOne { // WidthMinusOne, (5) [15:11]
+ WIDTH_M1_DEFAULT_ = 31,
+ WIDTH_M1_SHIFT_ = 11,
+ WIDTH_M1_WIDTH_ = 5,
+ WIDTH_M1_MASK_ = (((1 << WIDTH_M1_WIDTH_) - 1) << WIDTH_M1_SHIFT_)
+};
+
+} // namespace Hwreg
+} // namespace AMDGPU
+} // namespace llvm
+
#define R_00B028_SPI_SHADER_PGM_RSRC1_PS 0x00B028
#define R_00B02C_SPI_SHADER_PGM_RSRC2_PS 0x00B02C
#define S_00B02C_EXTRA_LDS_SIZE(x) (((x) & 0xFF) << 8)
@@ -134,7 +248,7 @@ namespace SIOutMods {
#define C_00B84C_LDS_SIZE 0xFF007FFF
#define S_00B84C_EXCP_EN(x) (((x) & 0x7F) << 24)
#define G_00B84C_EXCP_EN(x) (((x) >> 24) & 0x7F)
-#define C_00B84C_EXCP_EN
+#define C_00B84C_EXCP_EN
#define R_0286CC_SPI_PS_INPUT_ENA 0x0286CC
#define R_0286D0_SPI_PS_INPUT_ADDR 0x0286D0
@@ -194,5 +308,7 @@ namespace SIOutMods {
#define R_0286E8_SPI_TMPRING_SIZE 0x0286E8
#define S_0286E8_WAVESIZE(x) (((x) & 0x1FFF) << 12)
+#define R_SPILLED_SGPRS 0x4
+#define R_SPILLED_VGPRS 0x8
#endif
diff --git a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index f59d9948f98e..9e0086b79087 100644
--- a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -77,7 +77,7 @@
using namespace llvm;
-#define DEBUG_TYPE "sgpr-copies"
+#define DEBUG_TYPE "si-fix-sgpr-copies"
namespace {
@@ -237,11 +237,10 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
}
bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
MachineRegisterInfo &MRI = MF.getRegInfo();
- const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const SIRegisterInfo *TRI = ST.getRegisterInfo();
+ const SIInstrInfo *TII = ST.getInstrInfo();
SmallVector<MachineInstr *, 16> Worklist;
diff --git a/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp b/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp
deleted file mode 100644
index 8bda283f0fca..000000000000
--- a/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp
+++ /dev/null
@@ -1,219 +0,0 @@
-//===-- SIFixSGPRLiveRanges.cpp - Fix SGPR live ranges ----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file SALU instructions ignore the execution mask, so we need to modify the
-/// live ranges of the registers they define in some cases.
-///
-/// The main case we need to handle is when a def is used in one side of a
-/// branch and not another. For example:
-///
-/// %def
-/// IF
-/// ...
-/// ...
-/// ELSE
-/// %use
-/// ...
-/// ENDIF
-///
-/// Here we need the register allocator to avoid assigning any of the defs
-/// inside of the IF to the same register as %def. In traditional live
-/// interval analysis %def is not live inside the IF branch, however, since
-/// SALU instructions inside of IF will be executed even if the branch is not
-/// taken, there is the chance that one of the instructions will overwrite the
-/// value of %def, so the use in ELSE will see the wrong value.
-///
-/// The strategy we use for solving this is to add an extra use after the ENDIF:
-///
-/// %def
-/// IF
-/// ...
-/// ...
-/// ELSE
-/// %use
-/// ...
-/// ENDIF
-/// %use
-///
-/// Adding this use will make the def live throughout the IF branch, which is
-/// what we want.
-
-#include "AMDGPU.h"
-#include "SIInstrInfo.h"
-#include "SIRegisterInfo.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/LiveVariables.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachinePostDominators.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "si-fix-sgpr-live-ranges"
-
-namespace {
-
-class SIFixSGPRLiveRanges : public MachineFunctionPass {
-public:
- static char ID;
-
-public:
- SIFixSGPRLiveRanges() : MachineFunctionPass(ID) {
- initializeSIFixSGPRLiveRangesPass(*PassRegistry::getPassRegistry());
- }
-
- bool runOnMachineFunction(MachineFunction &MF) override;
-
- const char *getPassName() const override {
- return "SI Fix SGPR live ranges";
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<LiveVariables>();
- AU.addPreserved<LiveVariables>();
-
- AU.addRequired<MachinePostDominatorTree>();
- AU.addPreserved<MachinePostDominatorTree>();
- AU.setPreservesCFG();
-
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-};
-
-} // End anonymous namespace.
-
-INITIALIZE_PASS_BEGIN(SIFixSGPRLiveRanges, DEBUG_TYPE,
- "SI Fix SGPR Live Ranges", false, false)
-INITIALIZE_PASS_DEPENDENCY(LiveVariables)
-INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
-INITIALIZE_PASS_END(SIFixSGPRLiveRanges, DEBUG_TYPE,
- "SI Fix SGPR Live Ranges", false, false)
-
-char SIFixSGPRLiveRanges::ID = 0;
-
-char &llvm::SIFixSGPRLiveRangesID = SIFixSGPRLiveRanges::ID;
-
-FunctionPass *llvm::createSIFixSGPRLiveRangesPass() {
- return new SIFixSGPRLiveRanges();
-}
-
-bool SIFixSGPRLiveRanges::runOnMachineFunction(MachineFunction &MF) {
- MachineRegisterInfo &MRI = MF.getRegInfo();
- const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
- const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
- MF.getSubtarget().getRegisterInfo());
- bool MadeChange = false;
-
- MachinePostDominatorTree *PDT = &getAnalysis<MachinePostDominatorTree>();
- SmallVector<unsigned, 16> SGPRLiveRanges;
-
- LiveVariables *LV = &getAnalysis<LiveVariables>();
- MachineBasicBlock *Entry = &MF.front();
-
- // Use a depth first order so that in SSA, we encounter all defs before
- // uses. Once the defs of the block have been found, attempt to insert
- // SGPR_USE instructions in successor blocks if required.
- for (MachineBasicBlock *MBB : depth_first(Entry)) {
- for (const MachineInstr &MI : *MBB) {
- for (const MachineOperand &MO : MI.defs()) {
- // We should never see a live out def of a physical register, so we also
- // do not need to worry about implicit_defs().
- unsigned Def = MO.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Def)) {
- if (TRI->isSGPRClass(MRI.getRegClass(Def))) {
- // Only consider defs that are live outs. We don't care about def /
- // use within the same block.
-
- // LiveVariables does not consider registers that are only used in a
- // phi in a sucessor block as live out, unlike LiveIntervals.
- //
- // This is OK because SIFixSGPRCopies replaced any SGPR phis with
- // VGPRs.
- if (LV->isLiveOut(Def, *MBB))
- SGPRLiveRanges.push_back(Def);
- }
- }
- }
- }
-
- if (MBB->succ_size() < 2)
- continue;
-
- // We have structured control flow, so the number of successors should be
- // two.
- assert(MBB->succ_size() == 2);
- MachineBasicBlock *SuccA = *MBB->succ_begin();
- MachineBasicBlock *SuccB = *(++MBB->succ_begin());
- MachineBasicBlock *NCD = PDT->findNearestCommonDominator(SuccA, SuccB);
-
- if (!NCD)
- continue;
-
- MachineBasicBlock::iterator NCDTerm = NCD->getFirstTerminator();
-
- if (NCDTerm != NCD->end() && NCDTerm->getOpcode() == AMDGPU::SI_ELSE) {
- assert(NCD->succ_size() == 2);
- // We want to make sure we insert the Use after the ENDIF, not after
- // the ELSE.
- NCD = PDT->findNearestCommonDominator(*NCD->succ_begin(),
- *(++NCD->succ_begin()));
- }
-
- for (unsigned Reg : SGPRLiveRanges) {
- // FIXME: We could be smarter here. If the register is Live-In to one
- // block, but the other doesn't have any SGPR defs, then there won't be a
- // conflict. Also, if the branch condition is uniform then there will be
- // no conflict.
- bool LiveInToA = LV->isLiveIn(Reg, *SuccA);
- bool LiveInToB = LV->isLiveIn(Reg, *SuccB);
-
- if (!LiveInToA && !LiveInToB) {
- DEBUG(dbgs() << PrintReg(Reg, TRI, 0)
- << " is live into neither successor\n");
- continue;
- }
-
- if (LiveInToA && LiveInToB) {
- DEBUG(dbgs() << PrintReg(Reg, TRI, 0)
- << " is live into both successors\n");
- continue;
- }
-
- // This interval is live in to one successor, but not the other, so
- // we need to update its range so it is live in to both.
- DEBUG(dbgs() << "Possible SGPR conflict detected for "
- << PrintReg(Reg, TRI, 0)
- << " BB#" << SuccA->getNumber()
- << ", BB#" << SuccB->getNumber()
- << " with NCD = BB#" << NCD->getNumber() << '\n');
-
- assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
- "Not expecting to extend live range of physreg");
-
- // FIXME: Need to figure out how to update LiveRange here so this pass
- // will be able to preserve LiveInterval analysis.
- MachineInstr *NCDSGPRUse =
- BuildMI(*NCD, NCD->getFirstNonPHI(), DebugLoc(),
- TII->get(AMDGPU::SGPR_USE))
- .addReg(Reg, RegState::Implicit);
-
- MadeChange = true;
- LV->HandleVirtRegUse(Reg, NCD, NCDSGPRUse);
-
- DEBUG(NCDSGPRUse->dump());
- }
- }
-
- return MadeChange;
-}
diff --git a/lib/Target/AMDGPU/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp
index 6230d1e28b74..4ecc0fcc6232 100644
--- a/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -13,12 +13,9 @@
#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
@@ -44,8 +41,6 @@ public:
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<MachineDominatorTree>();
- AU.addPreserved<MachineDominatorTree>();
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -76,11 +71,8 @@ struct FoldCandidate {
} // End anonymous namespace.
-INITIALIZE_PASS_BEGIN(SIFoldOperands, DEBUG_TYPE,
- "SI Fold Operands", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_END(SIFoldOperands, DEBUG_TYPE,
- "SI Fold Operands", false, false)
+INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
+ "SI Fold Operands", false, false)
char SIFoldOperands::ID = 0;
@@ -140,7 +132,7 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
MachineInstr *MI, unsigned OpNo,
MachineOperand *OpToFold,
const SIInstrInfo *TII) {
- if (!TII->isOperandLegal(MI, OpNo, OpToFold)) {
+ if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
// Special case for v_mac_f32_e64 if we are trying to fold into src2
unsigned Opc = MI->getOpcode();
@@ -167,7 +159,7 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
// see if this makes it possible to fold.
unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
- bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1);
+ bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
if (CanCommute) {
if (CommuteIdx0 == OpNo)
@@ -185,10 +177,10 @@ static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
return false;
if (!CanCommute ||
- !TII->commuteInstruction(MI, false, CommuteIdx0, CommuteIdx1))
+ !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
return false;
- if (!TII->isOperandLegal(MI, OpNo, OpToFold))
+ if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
return false;
}
@@ -301,9 +293,13 @@ static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
}
bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+
MachineRegisterInfo &MRI = MF.getRegInfo();
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo &TRI = TII->getRegisterInfo();
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
diff --git a/lib/Target/AMDGPU/SIFrameLowering.cpp b/lib/Target/AMDGPU/SIFrameLowering.cpp
index 7d20509c464d..03b11f0fd38d 100644
--- a/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -11,6 +11,8 @@
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
+#include "AMDGPUSubtarget.h"
+
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -21,24 +23,13 @@ using namespace llvm;
static bool hasOnlySGPRSpills(const SIMachineFunctionInfo *FuncInfo,
const MachineFrameInfo *FrameInfo) {
- if (!FuncInfo->hasSpilledSGPRs())
- return false;
-
- if (FuncInfo->hasSpilledVGPRs())
- return false;
-
- for (int I = FrameInfo->getObjectIndexBegin(),
- E = FrameInfo->getObjectIndexEnd(); I != E; ++I) {
- if (!FrameInfo->isSpillSlotObjectIndex(I))
- return false;
- }
-
- return true;
+ return FuncInfo->hasSpilledSGPRs() &&
+ (!FuncInfo->hasSpilledVGPRs() && !FuncInfo->hasNonSpillStackObjects());
}
static ArrayRef<MCPhysReg> getAllSGPR128() {
- return makeArrayRef(AMDGPU::SReg_128RegClass.begin(),
- AMDGPU::SReg_128RegClass.getNumRegs());
+ return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
+ AMDGPU::SGPR_128RegClass.getNumRegs());
}
static ArrayRef<MCPhysReg> getAllSGPRs() {
@@ -48,6 +39,12 @@ static ArrayRef<MCPhysReg> getAllSGPRs() {
void SIFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
+ // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was
+ // specified.
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ if (ST.debuggerEmitPrologue())
+ emitDebuggerPrologue(MF, MBB);
+
if (!MF.getFrameInfo()->hasStackObjects())
return;
@@ -63,10 +60,10 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
if (hasOnlySGPRSpills(MFI, MF.getFrameInfo()))
return;
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo *TRI = &TII->getRegisterInfo();
- const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ MachineBasicBlock::iterator I = MBB.begin();
// We need to insert initialization of the scratch resource descriptor.
unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
@@ -84,6 +81,46 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER);
}
+ if (MFI->hasFlatScratchInit()) {
+ // We don't need this if we only have spills since there is no user facing
+ // scratch.
+
+ // TODO: If we know we don't have flat instructions earlier, we can omit
+ // this from the input registers.
+ //
+ // TODO: We only need to know if we access scratch space through a flat
+ // pointer. Because we only detect if flat instructions are used at all,
+ // this will be used more often than necessary on VI.
+
+ // Debug location must be unknown since the first debug location is used to
+ // determine the end of the prologue.
+ DebugLoc DL;
+
+ unsigned FlatScratchInitReg
+ = TRI->getPreloadedValue(MF, SIRegisterInfo::FLAT_SCRATCH_INIT);
+
+ MRI.addLiveIn(FlatScratchInitReg);
+ MBB.addLiveIn(FlatScratchInitReg);
+
+ // Copy the size in bytes.
+ unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::FLAT_SCR_LO)
+ .addReg(FlatScrInitHi, RegState::Kill);
+
+ unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
+
+ // Add wave offset in bytes to private base offset.
+ // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
+ .addReg(FlatScrInitLo)
+ .addReg(ScratchWaveOffsetReg);
+
+ // Convert offset to 256-byte units.
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
+ .addReg(FlatScrInitLo, RegState::Kill)
+ .addImm(8);
+ }
+
// If we reserved the original input registers, we don't need to copy to the
// reserved registers.
if (ScratchRsrcReg == PreloadedPrivateBufferReg) {
@@ -96,7 +133,6 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
// We added live-ins during argument lowering, but since they were not used
// they were deleted. We're adding the uses now, so add them back.
- MachineRegisterInfo &MRI = MF.getRegInfo();
MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
@@ -137,15 +173,28 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
if (ScratchWaveOffsetReg == TRI->reservedPrivateSegmentWaveByteOffsetReg(MF)) {
MachineRegisterInfo &MRI = MF.getRegInfo();
- // Skip the last 2 elements because the last one is reserved for VCC, and
- // this is the 2nd to last element already.
unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
- for (MCPhysReg Reg : getAllSGPRs().drop_back(6).slice(NumPreloaded)) {
+
+ // We need to drop register from the end of the list that we cannot use
+ // for the scratch wave offset.
+ // + 2 s102 and s103 do not exist on VI.
+ // + 2 for vcc
+ // + 2 for xnack_mask
+ // + 2 for flat_scratch
+ // + 4 for registers reserved for scratch resource register
+ // + 1 for register reserved for scratch wave offset. (By exluding this
+ // register from the list to consider, it means that when this
+ // register is being used for the scratch wave offset and there
+ // are no other free SGPRs, then the value will stay in this register.
+ // ----
+ // 13
+ for (MCPhysReg Reg : getAllSGPRs().drop_back(13).slice(NumPreloaded)) {
// Pick the first unallocated SGPR. Be careful not to pick an alias of the
// scratch descriptor, since we haven’t added its uses yet.
if (!MRI.isPhysRegUsed(Reg)) {
- assert(MRI.isAllocatable(Reg) &&
- !TRI->isSubRegisterEq(ScratchRsrcReg, Reg));
+ if (!MRI.isAllocatable(Reg) ||
+ TRI->isSubRegisterEq(ScratchRsrcReg, Reg))
+ continue;
MRI.replaceRegWith(ScratchWaveOffsetReg, Reg);
ScratchWaveOffsetReg = Reg;
@@ -160,7 +209,6 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
assert(!TRI->isSubRegister(ScratchRsrcReg, ScratchWaveOffsetReg));
const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
- MachineBasicBlock::iterator I = MBB.begin();
DebugLoc DL;
if (PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
@@ -223,6 +271,11 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
}
}
+void SIFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+
+}
+
void SIFrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF,
RegScavenger *RS) const {
@@ -243,3 +296,44 @@ void SIFrameLowering::processFunctionBeforeFrameFinalized(
RS->addScavengingFrameIndex(ScavengeFI);
}
}
+
+void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = ST.getInstrInfo();
+ const SIRegisterInfo *TRI = &TII->getRegisterInfo();
+ const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+
+ MachineBasicBlock::iterator I = MBB.begin();
+ DebugLoc DL;
+
+ // For each dimension:
+ for (unsigned i = 0; i < 3; ++i) {
+ // Get work group ID SGPR, and make it live-in again.
+ unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i);
+ MF.getRegInfo().addLiveIn(WorkGroupIDSGPR);
+ MBB.addLiveIn(WorkGroupIDSGPR);
+
+ // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in
+ // order to spill it to scratch.
+ unsigned WorkGroupIDVGPR =
+ MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR)
+ .addReg(WorkGroupIDSGPR);
+
+ // Spill work group ID.
+ int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i);
+ TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false,
+ WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
+
+ // Get work item ID VGPR, and make it live-in again.
+ unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i);
+ MF.getRegInfo().addLiveIn(WorkItemIDVGPR);
+ MBB.addLiveIn(WorkItemIDVGPR);
+
+ // Spill work item ID.
+ int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i);
+ TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false,
+ WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
+ }
+}
diff --git a/lib/Target/AMDGPU/SIFrameLowering.h b/lib/Target/AMDGPU/SIFrameLowering.h
index a9152fd8b2aa..37417d098f31 100644
--- a/lib/Target/AMDGPU/SIFrameLowering.h
+++ b/lib/Target/AMDGPU/SIFrameLowering.h
@@ -23,10 +23,16 @@ public:
void emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const override;
+ void emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const override;
void processFunctionBeforeFrameFinalized(
MachineFunction &MF,
RegScavenger *RS = nullptr) const override;
+
+private:
+ /// \brief Emits debugger prologue.
+ void emitDebuggerPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const;
};
}
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index 544867513d9c..51241cf0a432 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -18,33 +18,46 @@
#include <cmath>
#endif
-#include "SIISelLowering.h"
#include "AMDGPU.h"
-#include "AMDGPUDiagnosticInfoUnsupported.h"
#include "AMDGPUIntrinsicInfo.h"
#include "AMDGPUSubtarget.h"
+#include "SIISelLowering.h"
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
-#include "llvm/ADT/SmallString.h"
using namespace llvm;
-SITargetLowering::SITargetLowering(TargetMachine &TM,
- const AMDGPUSubtarget &STI)
+// -amdgpu-fast-fdiv - Command line option to enable faster 2.5 ulp fdiv.
+static cl::opt<bool> EnableAMDGPUFastFDIV(
+ "amdgpu-fast-fdiv",
+ cl::desc("Enable faster 2.5 ulp fdiv"),
+ cl::init(false));
+
+static unsigned findFirstFreeSGPR(CCState &CCInfo) {
+ unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
+ for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
+ if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
+ return AMDGPU::SGPR0 + Reg;
+ }
+ }
+ llvm_unreachable("Cannot allocate sgpr");
+}
+
+SITargetLowering::SITargetLowering(const TargetMachine &TM,
+ const SISubtarget &STI)
: AMDGPUTargetLowering(TM, STI) {
addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
- addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass);
- addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
-
addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
@@ -66,34 +79,25 @@ SITargetLowering::SITargetLowering(TargetMachine &TM,
computeRegisterProperties(STI.getRegisterInfo());
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
-
- setOperationAction(ISD::ADD, MVT::i32, Legal);
- setOperationAction(ISD::ADDC, MVT::i32, Legal);
- setOperationAction(ISD::ADDE, MVT::i32, Legal);
- setOperationAction(ISD::SUBC, MVT::i32, Legal);
- setOperationAction(ISD::SUBE, MVT::i32, Legal);
-
- setOperationAction(ISD::FSIN, MVT::f32, Custom);
- setOperationAction(ISD::FCOS, MVT::f32, Custom);
-
- setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
- setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
-
// We need to custom lower vector stores from local memory
+ setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
+ setOperationAction(ISD::LOAD, MVT::i1, Custom);
+ setOperationAction(ISD::STORE, MVT::v2i32, Custom);
+ setOperationAction(ISD::STORE, MVT::v4i32, Custom);
setOperationAction(ISD::STORE, MVT::v8i32, Custom);
setOperationAction(ISD::STORE, MVT::v16i32, Custom);
-
setOperationAction(ISD::STORE, MVT::i1, Custom);
- setOperationAction(ISD::STORE, MVT::v4i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+ setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
+ setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
+
+ setOperationAction(ISD::SELECT, MVT::i1, Promote);
setOperationAction(ISD::SELECT, MVT::i64, Custom);
setOperationAction(ISD::SELECT, MVT::f64, Promote);
AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
@@ -102,109 +106,39 @@ SITargetLowering::SITargetLowering(TargetMachine &TM,
setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SETCC, MVT::i1, Promote);
setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
- setOperationAction(ISD::BSWAP, MVT::i32, Legal);
- setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
+ setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
+ setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v16i8, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
- setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
-
- for (MVT VT : MVT::integer_valuetypes()) {
- if (VT == MVT::i64)
- continue;
-
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
-
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
-
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
- }
-
- for (MVT VT : MVT::integer_vector_valuetypes()) {
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i16, Expand);
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v16i16, Expand);
- }
-
- for (MVT VT : MVT::fp_valuetypes())
- setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
-
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
-
- setTruncStoreAction(MVT::i64, MVT::i32, Expand);
- setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
- setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
- setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
-
-
- setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
-
- setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
- setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
-
- setOperationAction(ISD::LOAD, MVT::i1, Custom);
-
- setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
- AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
-
- setOperationAction(ISD::STORE, MVT::v2i64, Promote);
- AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
-
- setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
-
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
- setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
-
- // These should use UDIVREM, so set them to expand
- setOperationAction(ISD::UDIV, MVT::i64, Expand);
- setOperationAction(ISD::UREM, MVT::i64, Expand);
-
- setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
- setOperationAction(ISD::SELECT, MVT::i1, Promote);
-
- setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
-
-
- setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
// We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
- switch(Op) {
+ switch (Op) {
case ISD::LOAD:
case ISD::STORE:
case ISD::BUILD_VECTOR:
@@ -241,13 +175,46 @@ SITargetLowering::SITargetLowering(TargetMachine &TM,
AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
}
- if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
+
+ // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
+ // and output demarshalling
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
+
+ // We can't return success/failure, only the old value,
+ // let LLVM add the comparison
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
+
+ if (getSubtarget()->hasFlatAddressSpace()) {
+ setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
+ setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
+ }
+
+ setOperationAction(ISD::BSWAP, MVT::i32, Legal);
+ setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
+
+ // On SI this is s_memtime and s_memrealtime on VI.
+ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
+ setOperationAction(ISD::TRAP, MVT::Other, Custom);
+
+ setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
+
+ if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) {
setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
setOperationAction(ISD::FCEIL, MVT::f64, Legal);
setOperationAction(ISD::FRINT, MVT::f64, Legal);
}
setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
+
+ setOperationAction(ISD::FSIN, MVT::f32, Custom);
+ setOperationAction(ISD::FCOS, MVT::f32, Custom);
setOperationAction(ISD::FDIV, MVT::f32, Custom);
setOperationAction(ISD::FDIV, MVT::f64, Custom);
@@ -263,6 +230,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM,
setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::UINT_TO_FP);
+ setTargetDAGCombine(ISD::FCANONICALIZE);
// All memory operations. Some folding on the pointer operand is done to help
// matching the constant offsets in the addressing modes.
@@ -287,10 +255,33 @@ SITargetLowering::SITargetLowering(TargetMachine &TM,
setSchedulingPreference(Sched::RegPressure);
}
+const SISubtarget *SITargetLowering::getSubtarget() const {
+ return static_cast<const SISubtarget *>(Subtarget);
+}
+
//===----------------------------------------------------------------------===//
// TargetLowering queries
//===----------------------------------------------------------------------===//
+bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &CI,
+ unsigned IntrID) const {
+ switch (IntrID) {
+ case Intrinsic::amdgcn_atomic_inc:
+ case Intrinsic::amdgcn_atomic_dec:
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(CI.getType());
+ Info.ptrVal = CI.getOperand(0);
+ Info.align = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = true;
+ return true;
+ default:
+ return false;
+ }
+}
+
bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &,
EVT) const {
// SI has some legal vector types, but no legal vector operations. Say no
@@ -348,7 +339,7 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
switch (AS) {
case AMDGPUAS::GLOBAL_ADDRESS: {
- if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
// Assume the we will use FLAT for all global memory accesses
// on VI.
// FIXME: This assumption is currently wrong. On VI we still use
@@ -376,16 +367,16 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
if (DL.getTypeStoreSize(Ty) < 4)
return isLegalMUBUFAddressingMode(AM);
- if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
// SMRD instructions have an 8-bit, dword offset on SI.
if (!isUInt<8>(AM.BaseOffs / 4))
return false;
- } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) {
+ } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) {
// On CI+, this can also be a 32-bit literal constant offset. If it fits
// in 8-bits, it can use a smaller encoding.
if (!isUInt<32>(AM.BaseOffs / 4))
return false;
- } else if (Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) {
// On VI, these use the SMEM format and the offset is 20-bit in bytes.
if (!isUInt<20>(AM.BaseOffs))
return false;
@@ -402,7 +393,6 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
}
case AMDGPUAS::PRIVATE_ADDRESS:
- case AMDGPUAS::UNKNOWN_ADDRESS_SPACE:
return isLegalMUBUFAddressingMode(AM);
case AMDGPUAS::LOCAL_ADDRESS:
@@ -423,6 +413,12 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
return false;
}
case AMDGPUAS::FLAT_ADDRESS:
+ case AMDGPUAS::UNKNOWN_ADDRESS_SPACE:
+ // For an unknown address space, this usually means that this is for some
+ // reason being used for pure arithmetic, and not based on some addressing
+ // computation. We don't have instructions that compute pointers with any
+ // addressing modes, so treat them as having no offset like flat
+ // instructions.
return isLegalFlatAddressingMode(AM);
default:
@@ -442,24 +438,30 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
if (!VT.isSimple() || VT == MVT::Other)
return false;
- // TODO - CI+ supports unaligned memory accesses, but this requires driver
- // support.
-
- // XXX - The only mention I see of this in the ISA manual is for LDS direct
- // reads the "byte address and must be dword aligned". Is it also true for the
- // normal loads and stores?
- if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) {
+ if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
+ AddrSpace == AMDGPUAS::REGION_ADDRESS) {
// ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
// aligned, 8 byte access in a single operation using ds_read2/write2_b32
// with adjacent offsets.
bool AlignedBy4 = (Align % 4 == 0);
if (IsFast)
*IsFast = AlignedBy4;
+
return AlignedBy4;
}
+ if (Subtarget->hasUnalignedBufferAccess()) {
+ // If we have an uniform constant load, it still requires using a slow
+ // buffer instruction if unaligned.
+ if (IsFast) {
+ *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ?
+ (Align % 4 == 0) : true;
+ }
+
+ return true;
+ }
+
// Smaller than dword value must be aligned.
- // FIXME: This should be allowed on CI+
if (VT.bitsLT(MVT::i32))
return false;
@@ -500,21 +502,22 @@ static bool isFlatGlobalAddrSpace(unsigned AS) {
bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
unsigned DestAS) const {
- return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
+ return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
}
-
bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
const MemSDNode *MemNode = cast<MemSDNode>(N);
const Value *Ptr = MemNode->getMemOperand()->getValue();
// UndefValue means this is a load of a kernel input. These are uniform.
- // Sometimes LDS instructions have constant pointers
- if (isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || isa<Constant>(Ptr) ||
- isa<GlobalValue>(Ptr))
+ // Sometimes LDS instructions have constant pointers.
+ // If Ptr is null, then that means this mem operand contains a
+ // PseudoSourceValue like GOT.
+ if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
+ isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
return true;
- const Instruction *I = dyn_cast_or_null<Instruction>(Ptr);
+ const Instruction *I = dyn_cast<Instruction>(Ptr);
return I && I->getMetadata("amdgpu.uniform");
}
@@ -528,29 +531,42 @@ SITargetLowering::getPreferredVectorAction(EVT VT) const {
bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const {
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
return TII->isInlineConstant(Imm);
}
-SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
- SDLoc SL, SDValue Chain,
- unsigned Offset, bool Signed) const {
+bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
+
+ // SimplifySetCC uses this function to determine whether or not it should
+ // create setcc with i1 operands. We don't have instructions for i1 setcc.
+ if (VT == MVT::i1 && Op == ISD::SETCC)
+ return false;
+
+ return TargetLowering::isTypeDesirableForOp(Op, VT);
+}
+
+SDValue SITargetLowering::LowerParameterPtr(SelectionDAG &DAG,
+ const SDLoc &SL, SDValue Chain,
+ unsigned Offset) const {
const DataLayout &DL = DAG.getDataLayout();
MachineFunction &MF = DAG.getMachineFunction();
- const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
+ const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
- Type *Ty = VT.getTypeForEVT(*DAG.getContext());
-
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
- PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
MRI.getLiveInVirtReg(InputPtrReg), PtrVT);
- SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
- DAG.getConstant(Offset, SL, PtrVT));
+ return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
+ DAG.getConstant(Offset, SL, PtrVT));
+}
+SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
+ const SDLoc &SL, SDValue Chain,
+ unsigned Offset, bool Signed) const {
+ const DataLayout &DL = DAG.getDataLayout();
+ Type *Ty = VT.getTypeForEVT(*DAG.getContext());
+ MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
+ PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
SDValue PtrOffset = DAG.getUNDEF(PtrVT);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
@@ -560,34 +576,35 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
if (MemVT.isFloatingPoint())
ExtTy = ISD::EXTLOAD;
- return DAG.getLoad(ISD::UNINDEXED, ExtTy,
- VT, SL, Chain, Ptr, PtrOffset, PtrInfo, MemVT,
- false, // isVolatile
- true, // isNonTemporal
- true, // isInvariant
- Align); // Alignment
+ SDValue Ptr = LowerParameterPtr(DAG, SL, Chain, Offset);
+ return DAG.getLoad(ISD::UNINDEXED, ExtTy, VT, SL, Chain, Ptr, PtrOffset,
+ PtrInfo, MemVT, Align, MachineMemOperand::MONonTemporal |
+ MachineMemOperand::MOInvariant);
}
SDValue SITargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
MachineFunction &MF = DAG.getMachineFunction();
FunctionType *FType = MF.getFunction()->getFunctionType();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
- const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
- if (Subtarget->isAmdHsaOS() && Info->getShaderType() != ShaderType::COMPUTE) {
+ if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
const Function *Fn = MF.getFunction();
- DiagnosticInfoUnsupported NoGraphicsHSA(*Fn, "non-compute shaders with HSA");
+ DiagnosticInfoUnsupported NoGraphicsHSA(
+ *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
DAG.getContext()->diagnose(NoGraphicsHSA);
- return SDValue();
+ return DAG.getEntryNode();
}
- // FIXME: We currently assume all calling conventions are kernels.
+ // Create stack objects that are used for emitting debugger prologue if
+ // "amdgpu-debugger-emit-prologue" attribute was specified.
+ if (ST.debuggerEmitPrologue())
+ createDebuggerPrologueStackObjects(MF);
SmallVector<ISD::InputArg, 16> Splits;
BitVector Skipped(Ins.size());
@@ -596,7 +613,7 @@ SDValue SITargetLowering::LowerFormalArguments(
const ISD::InputArg &Arg = Ins[i];
// First check if it's a PS input addr
- if (Info->getShaderType() == ShaderType::PIXEL && !Arg.Flags.isInReg() &&
+ if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
!Arg.Flags.isByVal() && PSInputNum <= 15) {
if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
@@ -613,25 +630,26 @@ SDValue SITargetLowering::LowerFormalArguments(
++PSInputNum;
}
- // Second split vertices into their elements
- if (Info->getShaderType() != ShaderType::COMPUTE && Arg.VT.isVector()) {
- ISD::InputArg NewArg = Arg;
- NewArg.Flags.setSplit();
- NewArg.VT = Arg.VT.getVectorElementType();
-
- // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
- // three or five element vertex only needs three or five registers,
- // NOT four or eight.
- Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
- unsigned NumElements = ParamType->getVectorNumElements();
-
- for (unsigned j = 0; j != NumElements; ++j) {
- Splits.push_back(NewArg);
- NewArg.PartOffset += NewArg.VT.getStoreSize();
+ if (AMDGPU::isShader(CallConv)) {
+ // Second split vertices into their elements
+ if (Arg.VT.isVector()) {
+ ISD::InputArg NewArg = Arg;
+ NewArg.Flags.setSplit();
+ NewArg.VT = Arg.VT.getVectorElementType();
+
+ // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
+ // three or five element vertex only needs three or five registers,
+ // NOT four or eight.
+ Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
+ unsigned NumElements = ParamType->getVectorNumElements();
+
+ for (unsigned j = 0; j != NumElements; ++j) {
+ Splits.push_back(NewArg);
+ NewArg.PartOffset += NewArg.VT.getStoreSize();
+ }
+ } else {
+ Splits.push_back(Arg);
}
-
- } else if (Info->getShaderType() != ShaderType::COMPUTE) {
- Splits.push_back(Arg);
}
}
@@ -651,19 +669,27 @@ SDValue SITargetLowering::LowerFormalArguments(
// - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
// - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
// enabled too.
- if (Info->getShaderType() == ShaderType::PIXEL &&
+ if (CallConv == CallingConv::AMDGPU_PS &&
((Info->getPSInputAddr() & 0x7F) == 0 ||
- ((Info->getPSInputAddr() & 0xF) == 0 &&
- Info->isPSInputAllocated(11)))) {
+ ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11)))) {
CCInfo.AllocateReg(AMDGPU::VGPR0);
CCInfo.AllocateReg(AMDGPU::VGPR1);
Info->markPSInputAllocated(0);
Info->PSInputEna |= 1;
}
- if (Info->getShaderType() == ShaderType::COMPUTE) {
+ if (!AMDGPU::isShader(CallConv)) {
getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
Splits);
+
+ assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
+ } else {
+ assert(!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() &&
+ !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
+ !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
+ !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
+ !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
+ !Info->hasWorkItemIDZ());
}
// FIXME: How should these inputs interact with inreg / custom SGPR inputs?
@@ -679,12 +705,24 @@ SDValue SITargetLowering::LowerFormalArguments(
CCInfo.AllocateReg(DispatchPtrReg);
}
+ if (Info->hasQueuePtr()) {
+ unsigned QueuePtrReg = Info->addQueuePtr(*TRI);
+ MF.addLiveIn(QueuePtrReg, &AMDGPU::SReg_64RegClass);
+ CCInfo.AllocateReg(QueuePtrReg);
+ }
+
if (Info->hasKernargSegmentPtr()) {
unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI);
MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass);
CCInfo.AllocateReg(InputPtrReg);
}
+ if (Info->hasFlatScratchInit()) {
+ unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
+ MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SReg_64RegClass);
+ CCInfo.AllocateReg(FlatScratchInitReg);
+ }
+
AnalyzeFormalArguments(CCInfo, Splits);
SmallVector<SDValue, 16> Chains;
@@ -713,7 +751,7 @@ SDValue SITargetLowering::LowerFormalArguments(
auto *ParamTy =
dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
- if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
+ if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
// On SI local pointers are just offsets into LDS, so they are always
// less than 16-bits. On CI and newer they could potentially be
@@ -765,7 +803,7 @@ SDValue SITargetLowering::LowerFormalArguments(
NumElements = Arg.VT.getVectorNumElements() - NumElements;
Regs.append(NumElements, DAG.getUNDEF(VT));
- InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs));
+ InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
continue;
}
@@ -780,8 +818,7 @@ SDValue SITargetLowering::LowerFormalArguments(
unsigned Reg = Info->addWorkGroupIDX();
MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass);
CCInfo.AllocateReg(Reg);
- } else
- llvm_unreachable("work group id x is always enabled");
+ }
if (Info->hasWorkGroupIDY()) {
unsigned Reg = Info->addWorkGroupIDY();
@@ -803,8 +840,13 @@ SDValue SITargetLowering::LowerFormalArguments(
if (Info->hasPrivateSegmentWaveByteOffset()) {
// Scratch wave offset passed in system SGPR.
- unsigned PrivateSegmentWaveByteOffsetReg
- = Info->addPrivateSegmentWaveByteOffset();
+ unsigned PrivateSegmentWaveByteOffsetReg;
+
+ if (AMDGPU::isShader(CallConv)) {
+ PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
+ Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
+ } else
+ PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset();
MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
@@ -812,8 +854,11 @@ SDValue SITargetLowering::LowerFormalArguments(
// Now that we've figured out where the scratch register inputs are, see if
// should reserve the arguments and use them directly.
-
bool HasStackObjects = MF.getFrameInfo()->hasStackObjects();
+ // Record that we know we have non-spill stack objects so we don't need to
+ // check all stack objects later.
+ if (HasStackObjects)
+ Info->setHasNonSpillStackObjects(true);
if (ST.isAmdHsaOS()) {
// TODO: Assume we will spill without optimizations.
@@ -866,8 +911,7 @@ SDValue SITargetLowering::LowerFormalArguments(
unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X);
MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
CCInfo.AllocateReg(Reg);
- } else
- llvm_unreachable("workitem id x should always be enabled");
+ }
if (Info->hasWorkItemIDY()) {
unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y);
@@ -887,16 +931,16 @@ SDValue SITargetLowering::LowerFormalArguments(
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
}
-SDValue SITargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
+SDValue
+SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
- if (Info->getShaderType() == ShaderType::COMPUTE)
+ if (!AMDGPU::isShader(CallConv))
return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
OutVals, DL, DAG);
@@ -975,17 +1019,131 @@ SDValue SITargetLowering::LowerReturn(SDValue Chain,
if (Flag.getNode())
RetOps.push_back(Flag);
- return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, RetOps);
+ unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN;
+ return DAG.getNode(Opc, DL, MVT::Other, RetOps);
}
-MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
- MachineInstr * MI, MachineBasicBlock * BB) const {
-
- switch (MI->getOpcode()) {
+unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
+ SelectionDAG &DAG) const {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("m0", AMDGPU::M0)
+ .Case("exec", AMDGPU::EXEC)
+ .Case("exec_lo", AMDGPU::EXEC_LO)
+ .Case("exec_hi", AMDGPU::EXEC_HI)
+ .Case("flat_scratch", AMDGPU::FLAT_SCR)
+ .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
+ .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
+ .Default(AMDGPU::NoRegister);
+
+ if (Reg == AMDGPU::NoRegister) {
+ report_fatal_error(Twine("invalid register name \""
+ + StringRef(RegName) + "\"."));
+
+ }
+
+ if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
+ Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
+ report_fatal_error(Twine("invalid register \""
+ + StringRef(RegName) + "\" for subtarget."));
+ }
+
+ switch (Reg) {
+ case AMDGPU::M0:
+ case AMDGPU::EXEC_LO:
+ case AMDGPU::EXEC_HI:
+ case AMDGPU::FLAT_SCR_LO:
+ case AMDGPU::FLAT_SCR_HI:
+ if (VT.getSizeInBits() == 32)
+ return Reg;
+ break;
+ case AMDGPU::EXEC:
+ case AMDGPU::FLAT_SCR:
+ if (VT.getSizeInBits() == 64)
+ return Reg;
+ break;
default:
- return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
+ llvm_unreachable("missing register type checking");
+ }
+
+ report_fatal_error(Twine("invalid type for register \""
+ + StringRef(RegName) + "\"."));
+}
+
+// If kill is not the last instruction, split the block so kill is always a
+// proper terminator.
+MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
+
+ MachineBasicBlock::iterator SplitPoint(&MI);
+ ++SplitPoint;
+
+ if (SplitPoint == BB->end()) {
+ // Don't bother with a new block.
+ MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR));
+ return BB;
+ }
+
+ MachineFunction *MF = BB->getParent();
+ MachineBasicBlock *SplitBB
+ = MF->CreateMachineBasicBlock(BB->getBasicBlock());
+
+ // Fix the block phi references to point to the new block for the defs in the
+ // second piece of the block.
+ for (MachineBasicBlock *Succ : BB->successors()) {
+ for (MachineInstr &MI : *Succ) {
+ if (!MI.isPHI())
+ break;
+
+ for (unsigned I = 2, E = MI.getNumOperands(); I != E; I += 2) {
+ MachineOperand &FromBB = MI.getOperand(I);
+ if (BB == FromBB.getMBB()) {
+ FromBB.setMBB(SplitBB);
+ break;
+ }
+ }
+ }
+ }
+
+ MF->insert(++MachineFunction::iterator(BB), SplitBB);
+ SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
+
+ SplitBB->transferSuccessors(BB);
+ BB->addSuccessor(SplitBB);
+
+ MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR));
+ return SplitBB;
+}
+
+MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
+ MachineInstr &MI, MachineBasicBlock *BB) const {
+ switch (MI.getOpcode()) {
+ case AMDGPU::SI_INIT_M0: {
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
+ BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
+ TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+ .addOperand(MI.getOperand(0));
+ MI.eraseFromParent();
+ break;
+ }
case AMDGPU::BRANCH:
return BB;
+ case AMDGPU::GET_GROUPSTATICSIZE: {
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
+
+ MachineFunction *MF = BB->getParent();
+ SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
+ DebugLoc DL = MI.getDebugLoc();
+ BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOVK_I32))
+ .addOperand(MI.getOperand(0))
+ .addImm(MFI->LDSSize);
+ MI.eraseFromParent();
+ return BB;
+ }
+ case AMDGPU::SI_KILL:
+ return splitKillBlock(MI, BB);
+ default:
+ return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
}
return BB;
}
@@ -1072,6 +1230,7 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return LowerTrig(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::FDIV: return LowerFDIV(Op, DAG);
+ case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
case ISD::GlobalAddress: {
MachineFunction &MF = DAG.getMachineFunction();
@@ -1079,7 +1238,10 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return LowerGlobalAddress(MFI, Op, DAG);
}
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
+ case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
+ case ISD::TRAP: return lowerTRAP(Op, DAG);
}
return SDValue();
}
@@ -1106,25 +1268,78 @@ SDValue SITargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const {
FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Op);
unsigned FrameIndex = FINode->getIndex();
- // A FrameIndex node represents a 32-bit offset into scratch memory. If
- // the high bit of a frame index offset were to be set, this would mean
- // that it represented an offset of ~2GB * 64 = ~128GB from the start of the
- // scratch buffer, with 64 being the number of threads per wave.
+ // A FrameIndex node represents a 32-bit offset into scratch memory. If the
+ // high bit of a frame index offset were to be set, this would mean that it
+ // represented an offset of ~2GB * 64 = ~128GB from the start of the scratch
+ // buffer, with 64 being the number of threads per wave.
//
- // If we know the machine uses less than 128GB of scratch, then we can
- // amrk the high bit of the FrameIndex node as known zero,
- // which is important, because it means in most situations we can
- // prove that values derived from FrameIndex nodes are non-negative.
- // This enables us to take advantage of more addressing modes when
- // accessing scratch buffers, since for scratch reads/writes, the register
- // offset must always be positive.
+ // The maximum private allocation for the entire GPU is 4G, and we are
+ // concerned with the largest the index could ever be for an individual
+ // workitem. This will occur with the minmum dispatch size. If a program
+ // requires more, the dispatch size will be reduced.
+ //
+ // With this limit, we can mark the high bit of the FrameIndex node as known
+ // zero, which is important, because it means in most situations we can prove
+ // that values derived from FrameIndex nodes are non-negative. This enables us
+ // to take advantage of more addressing modes when accessing scratch buffers,
+ // since for scratch reads/writes, the register offset must always be
+ // positive.
- SDValue TFI = DAG.getTargetFrameIndex(FrameIndex, MVT::i32);
- if (Subtarget->enableHugeScratchBuffer())
- return TFI;
+ uint64_t MaxGPUAlloc = UINT64_C(4) * 1024 * 1024 * 1024;
+ // XXX - It is unclear if partial dispatch works. Assume it works at half wave
+ // granularity. It is probably a full wave.
+ uint64_t MinGranularity = 32;
+
+ unsigned KnownBits = Log2_64(MaxGPUAlloc / MinGranularity);
+ EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), KnownBits);
+
+ SDValue TFI = DAG.getTargetFrameIndex(FrameIndex, MVT::i32);
return DAG.getNode(ISD::AssertZext, SL, MVT::i32, TFI,
- DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), 31)));
+ DAG.getValueType(ExtVT));
+}
+
+bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
+ if (Intr->getOpcode() != ISD::INTRINSIC_W_CHAIN)
+ return false;
+
+ switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
+ default: return false;
+ case AMDGPUIntrinsic::amdgcn_if:
+ case AMDGPUIntrinsic::amdgcn_else:
+ case AMDGPUIntrinsic::amdgcn_break:
+ case AMDGPUIntrinsic::amdgcn_if_break:
+ case AMDGPUIntrinsic::amdgcn_else_break:
+ case AMDGPUIntrinsic::amdgcn_loop:
+ case AMDGPUIntrinsic::amdgcn_end_cf:
+ return true;
+ }
+}
+
+void SITargetLowering::createDebuggerPrologueStackObjects(
+ MachineFunction &MF) const {
+ // Create stack objects that are used for emitting debugger prologue.
+ //
+ // Debugger prologue writes work group IDs and work item IDs to scratch memory
+ // at fixed location in the following format:
+ // offset 0: work group ID x
+ // offset 4: work group ID y
+ // offset 8: work group ID z
+ // offset 16: work item ID x
+ // offset 20: work item ID y
+ // offset 24: work item ID z
+ SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
+ int ObjectIdx = 0;
+
+ // For each dimension:
+ for (unsigned i = 0; i < 3; ++i) {
+ // Create fixed stack object for work group ID.
+ ObjectIdx = MF.getFrameInfo()->CreateFixedObject(4, i * 4, true);
+ Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
+ // Create fixed stack object for work item ID.
+ ObjectIdx = MF.getFrameInfo()->CreateFixedObject(4, i * 4 + 16, true);
+ Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
+ }
}
/// This transforms the control flow intrinsics to get the branch destination as
@@ -1137,13 +1352,11 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
SDNode *Intr = BRCOND.getOperand(1).getNode();
SDValue Target = BRCOND.getOperand(2);
SDNode *BR = nullptr;
+ SDNode *SetCC = nullptr;
if (Intr->getOpcode() == ISD::SETCC) {
// As long as we negate the condition everything is fine
- SDNode *SetCC = Intr;
- assert(SetCC->getConstantOperandVal(1) == 1);
- assert(cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
- ISD::SETNE);
+ SetCC = Intr;
Intr = SetCC->getOperand(0).getNode();
} else {
@@ -1152,7 +1365,15 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
Target = BR->getOperand(1);
}
- assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN);
+ if (!isCFIntrinsic(Intr)) {
+ // This is a uniform branch so we don't need to legalize.
+ return BRCOND;
+ }
+
+ assert(!SetCC ||
+ (SetCC->getConstantOperandVal(1) == 1 &&
+ cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
+ ISD::SETNE));
// Build the result and
ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
@@ -1204,37 +1425,185 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
return Chain;
}
+SDValue SITargetLowering::getSegmentAperture(unsigned AS,
+ SelectionDAG &DAG) const {
+ SDLoc SL;
+ MachineFunction &MF = DAG.getMachineFunction();
+ SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
+ unsigned UserSGPR = Info->getQueuePtrUserSGPR();
+ assert(UserSGPR != AMDGPU::NoRegister);
+
+ SDValue QueuePtr = CreateLiveInRegister(
+ DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
+
+ // Offset into amd_queue_t for group_segment_aperture_base_hi /
+ // private_segment_aperture_base_hi.
+ uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
+
+ SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr,
+ DAG.getConstant(StructOffset, SL, MVT::i64));
+
+ // TODO: Use custom target PseudoSourceValue.
+ // TODO: We should use the value from the IR intrinsic call, but it might not
+ // be available and how do we get it?
+ Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
+ AMDGPUAS::CONSTANT_ADDRESS));
+
+ MachinePointerInfo PtrInfo(V, StructOffset);
+ return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, PtrInfo,
+ MinAlign(64, StructOffset),
+ MachineMemOperand::MOInvariant);
+}
+
+SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc SL(Op);
+ const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
+
+ SDValue Src = ASC->getOperand(0);
+
+ // FIXME: Really support non-0 null pointers.
+ SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32);
+ SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
+
+ // flat -> local/private
+ if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
+ if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
+ ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
+ SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
+ SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
+
+ return DAG.getNode(ISD::SELECT, SL, MVT::i32,
+ NonNull, Ptr, SegmentNullPtr);
+ }
+ }
+
+ // local/private -> flat
+ if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
+ if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
+ ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
+ SDValue NonNull
+ = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
+
+ SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG);
+ SDValue CvtPtr
+ = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
+
+ return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
+ DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
+ FlatNullPtr);
+ }
+ }
+
+ // global <-> flat are no-ops and never emitted.
+
+ const MachineFunction &MF = DAG.getMachineFunction();
+ DiagnosticInfoUnsupported InvalidAddrSpaceCast(
+ *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
+ DAG.getContext()->diagnose(InvalidAddrSpaceCast);
+
+ return DAG.getUNDEF(ASC->getValueType(0));
+}
+
+static bool shouldEmitGOTReloc(const GlobalValue *GV,
+ const TargetMachine &TM) {
+ return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
+ !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
+}
+
+bool
+SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
+ // We can fold offsets for anything that doesn't require a GOT relocation.
+ return GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
+ !shouldEmitGOTReloc(GA->getGlobal(), getTargetMachine());
+}
+
+static SDValue buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
+ SDLoc DL, unsigned Offset, EVT PtrVT,
+ unsigned GAFlags = SIInstrInfo::MO_NONE) {
+ // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
+ // lowered to the following code sequence:
+ // s_getpc_b64 s[0:1]
+ // s_add_u32 s0, s0, $symbol
+ // s_addc_u32 s1, s1, 0
+ //
+ // s_getpc_b64 returns the address of the s_add_u32 instruction and then
+ // a fixup or relocation is emitted to replace $symbol with a literal
+ // constant, which is a pc-relative offset from the encoding of the $symbol
+ // operand to the global variable.
+ //
+ // What we want here is an offset from the value returned by s_getpc
+ // (which is the address of the s_add_u32 instruction) to the global
+ // variable, but since the encoding of $symbol starts 4 bytes after the start
+ // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
+ // small. This requires us to add 4 to the global variable offset in order to
+ // compute the correct address.
+ SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
+ GAFlags);
+ return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, GA);
+}
+
SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
SDValue Op,
SelectionDAG &DAG) const {
GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
- if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
+ if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
+ GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS)
return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
SDLoc DL(GSD);
const GlobalValue *GV = GSD->getGlobal();
- MVT PtrVT = getPointerTy(DAG.getDataLayout(), GSD->getAddressSpace());
+ EVT PtrVT = Op.getValueType();
+
+ if (!shouldEmitGOTReloc(GV, getTargetMachine()))
+ return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
+
+ SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
+ SIInstrInfo::MO_GOTPCREL);
+
+ Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
+ PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
+ const DataLayout &DataLayout = DAG.getDataLayout();
+ unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
+ // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
+ MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
- SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
- return DAG.getNode(AMDGPUISD::CONST_DATA_PTR, DL, PtrVT, GA);
+ return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
+ MachineMemOperand::MOInvariant);
}
-SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL,
- SDValue V) const {
+SDValue SITargetLowering::lowerTRAP(SDValue Op,
+ SelectionDAG &DAG) const {
+ const MachineFunction &MF = DAG.getMachineFunction();
+ DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
+ "trap handler not supported",
+ Op.getDebugLoc(),
+ DS_Warning);
+ DAG.getContext()->diagnose(NoTrap);
+
+ // Emit s_endpgm.
+
+ // FIXME: This should really be selected to s_trap, but that requires
+ // setting up the trap handler for it o do anything.
+ return DAG.getNode(AMDGPUISD::ENDPGM, SDLoc(Op), MVT::Other,
+ Op.getOperand(0));
+}
+
+SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
+ const SDLoc &DL, SDValue V) const {
+ // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
+ // the destination register.
+ //
// We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
// so we will end up with redundant moves to m0.
//
- // We can't use S_MOV_B32, because there is no way to specify m0 as the
- // destination register.
- //
- // We have to use them both. Machine cse will combine all the S_MOV_B32
- // instructions and the register coalescer eliminate the extra copies.
- SDNode *M0 = DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, V.getValueType(), V);
- return DAG.getCopyToReg(Chain, DL, DAG.getRegister(AMDGPU::M0, MVT::i32),
- SDValue(M0, 0), SDValue()); // Glue
- // A Null SDValue creates
- // a glue result.
+ // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
+
+ // A Null SDValue creates a glue result.
+ SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
+ V, Chain);
+ return SDValue(M0, 0);
}
SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
@@ -1249,12 +1618,27 @@ SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
DAG.getValueType(VT));
}
+static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
+ DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
+ "non-hsa intrinsic with hsa target",
+ DL.getDebugLoc());
+ DAG.getContext()->diagnose(BadIntrin);
+ return DAG.getUNDEF(VT);
+}
+
+static SDValue emitRemovedIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
+ DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
+ "intrinsic not supported on subtarget",
+ DL.getDebugLoc());
+ DAG.getContext()->diagnose(BadIntrin);
+ return DAG.getUNDEF(VT);
+}
+
SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
auto MFI = MF.getInfo<SIMachineFunctionInfo>();
- const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
+ const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
EVT VT = Op.getValueType();
SDLoc DL(Op);
@@ -1264,62 +1648,134 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
switch (IntrinsicID) {
case Intrinsic::amdgcn_dispatch_ptr:
+ case Intrinsic::amdgcn_queue_ptr: {
if (!Subtarget->isAmdHsaOS()) {
- DiagnosticInfoUnsupported BadIntrin(*MF.getFunction(),
- "hsa intrinsic without hsa target");
+ DiagnosticInfoUnsupported BadIntrin(
+ *MF.getFunction(), "unsupported hsa intrinsic without hsa target",
+ DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
return DAG.getUNDEF(VT);
}
+ auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
+ SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR;
return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass,
- TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_PTR), VT);
-
+ TRI->getPreloadedValue(MF, Reg), VT);
+ }
+ case Intrinsic::amdgcn_implicitarg_ptr: {
+ unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
+ return LowerParameterPtr(DAG, DL, DAG.getEntryNode(), offset);
+ }
+ case Intrinsic::amdgcn_kernarg_segment_ptr: {
+ unsigned Reg
+ = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
+ return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
+ }
+ case Intrinsic::amdgcn_rcp:
+ return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
+ case Intrinsic::amdgcn_rsq:
+ case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name
+ return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
+ case Intrinsic::amdgcn_rsq_legacy: {
+ if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
+ return emitRemovedIntrinsicError(DAG, DL, VT);
+
+ return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
+ }
+ case Intrinsic::amdgcn_rsq_clamp: {
+ if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
+ return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
+
+ Type *Type = VT.getTypeForEVT(*DAG.getContext());
+ APFloat Max = APFloat::getLargest(Type->getFltSemantics());
+ APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
+
+ SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
+ SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
+ DAG.getConstantFP(Max, DL, VT));
+ return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
+ DAG.getConstantFP(Min, DL, VT));
+ }
case Intrinsic::r600_read_ngroups_x:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::NGROUPS_X, false);
case Intrinsic::r600_read_ngroups_y:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::NGROUPS_Y, false);
case Intrinsic::r600_read_ngroups_z:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::NGROUPS_Z, false);
case Intrinsic::r600_read_global_size_x:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
case Intrinsic::r600_read_global_size_y:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
case Intrinsic::r600_read_global_size_z:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
case Intrinsic::r600_read_local_size_x:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return lowerImplicitZextParam(DAG, Op, MVT::i16,
SI::KernelInputOffsets::LOCAL_SIZE_X);
case Intrinsic::r600_read_local_size_y:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return lowerImplicitZextParam(DAG, Op, MVT::i16,
SI::KernelInputOffsets::LOCAL_SIZE_Y);
case Intrinsic::r600_read_local_size_z:
+ if (Subtarget->isAmdHsaOS())
+ return emitNonHSAIntrinsicError(DAG, DL, VT);
+
return lowerImplicitZextParam(DAG, Op, MVT::i16,
SI::KernelInputOffsets::LOCAL_SIZE_Z);
- case Intrinsic::AMDGPU_read_workdim:
+ case Intrinsic::amdgcn_read_workdim:
+ case AMDGPUIntrinsic::AMDGPU_read_workdim: // Legacy name.
// Really only 2 bits.
return lowerImplicitZextParam(DAG, Op, MVT::i8,
getImplicitParameterOffset(MFI, GRID_DIM));
+ case Intrinsic::amdgcn_workgroup_id_x:
case Intrinsic::r600_read_tgid_x:
return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT);
+ case Intrinsic::amdgcn_workgroup_id_y:
case Intrinsic::r600_read_tgid_y:
return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT);
+ case Intrinsic::amdgcn_workgroup_id_z:
case Intrinsic::r600_read_tgid_z:
return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT);
+ case Intrinsic::amdgcn_workitem_id_x:
case Intrinsic::r600_read_tidig_x:
return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT);
+ case Intrinsic::amdgcn_workitem_id_y:
case Intrinsic::r600_read_tidig_y:
return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT);
+ case Intrinsic::amdgcn_workitem_id_z:
case Intrinsic::r600_read_tidig_z:
return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT);
@@ -1336,24 +1792,12 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
Op->getVTList(), Ops, VT, MMO);
}
- case AMDGPUIntrinsic::SI_sample:
- return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG);
- case AMDGPUIntrinsic::SI_sampleb:
- return LowerSampleIntrinsic(AMDGPUISD::SAMPLEB, Op, DAG);
- case AMDGPUIntrinsic::SI_sampled:
- return LowerSampleIntrinsic(AMDGPUISD::SAMPLED, Op, DAG);
- case AMDGPUIntrinsic::SI_samplel:
- return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG);
case AMDGPUIntrinsic::SI_vs_load_input:
return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT,
Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
- case AMDGPUIntrinsic::AMDGPU_fract:
- case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
- return DAG.getNode(ISD::FSUB, DL, VT, Op.getOperand(1),
- DAG.getNode(ISD::FFLOOR, DL, VT, Op.getOperand(1)));
case AMDGPUIntrinsic::SI_fs_constant: {
SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3));
SDValue Glue = M0.getValue(1);
@@ -1393,11 +1837,93 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
Glue);
}
+ case Intrinsic::amdgcn_sin:
+ return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
+
+ case Intrinsic::amdgcn_cos:
+ return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
+
+ case Intrinsic::amdgcn_log_clamp: {
+ if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
+ return SDValue();
+
+ DiagnosticInfoUnsupported BadIntrin(
+ *MF.getFunction(), "intrinsic not supported on subtarget",
+ DL.getDebugLoc());
+ DAG.getContext()->diagnose(BadIntrin);
+ return DAG.getUNDEF(VT);
+ }
+ case Intrinsic::amdgcn_ldexp:
+ return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+
+ case Intrinsic::amdgcn_fract:
+ return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
+
+ case Intrinsic::amdgcn_class:
+ return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::amdgcn_div_fmas:
+ return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
+ Op.getOperand(4));
+
+ case Intrinsic::amdgcn_div_fixup:
+ return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+
+ case Intrinsic::amdgcn_trig_preop:
+ return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::amdgcn_div_scale: {
+ // 3rd parameter required to be a constant.
+ const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
+ if (!Param)
+ return DAG.getUNDEF(VT);
+
+ // Translate to the operands expected by the machine instruction. The
+ // first parameter must be the same as the first instruction.
+ SDValue Numerator = Op.getOperand(1);
+ SDValue Denominator = Op.getOperand(2);
+
+ // Note this order is opposite of the machine instruction's operations,
+ // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
+ // intrinsic has the numerator as the first operand to match a normal
+ // division operation.
+
+ SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
+
+ return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
+ Denominator, Numerator);
+ }
default:
return AMDGPUTargetLowering::LowerOperation(Op, DAG);
}
}
+SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ switch (IntrID) {
+ case Intrinsic::amdgcn_atomic_inc:
+ case Intrinsic::amdgcn_atomic_dec: {
+ MemSDNode *M = cast<MemSDNode>(Op);
+ unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ?
+ AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC;
+ SDValue Ops[] = {
+ M->getOperand(0), // Chain
+ M->getOperand(2), // Ptr
+ M->getOperand(3) // Value
+ };
+
+ return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
+ M->getMemoryVT(), M->getMemOperand());
+ }
+ default:
+ return SDValue();
+ }
+}
+
SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -1439,6 +1965,14 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
Op->getVTList(), Ops, VT, MMO);
}
+ case AMDGPUIntrinsic::AMDGPU_kill: {
+ if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Op.getOperand(2))) {
+ if (!K->isNegative())
+ return Chain;
+ }
+
+ return Op;
+ }
default:
return SDValue();
}
@@ -1447,48 +1981,92 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
LoadSDNode *Load = cast<LoadSDNode>(Op);
+ ISD::LoadExtType ExtType = Load->getExtensionType();
+ EVT MemVT = Load->getMemoryVT();
- if (Op.getValueType().isVector()) {
- assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
- "Custom lowering for non-i32 vectors hasn't been implemented.");
- unsigned NumElements = Op.getValueType().getVectorNumElements();
- assert(NumElements != 2 && "v2 loads are supported for all address spaces.");
+ if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
+ assert(MemVT == MVT::i1 && "Only i1 non-extloads expected");
+ // FIXME: Copied from PPC
+ // First, load into 32 bits, then truncate to 1 bit.
- switch (Load->getAddressSpace()) {
- default: break;
- case AMDGPUAS::CONSTANT_ADDRESS:
- if (isMemOpUniform(Load))
- break;
- // Non-uniform loads will be selected to MUBUF instructions, so they
- // have the same legalization requires ments as global and private
- // loads.
- //
- // Fall-through
- case AMDGPUAS::GLOBAL_ADDRESS:
- case AMDGPUAS::PRIVATE_ADDRESS:
- if (NumElements >= 8)
- return SplitVectorLoad(Op, DAG);
-
- // v4 loads are supported for private and global memory.
- if (NumElements <= 4)
- break;
- // fall-through
- case AMDGPUAS::LOCAL_ADDRESS:
- // If properly aligned, if we split we might be able to use ds_read_b64.
+ SDValue Chain = Load->getChain();
+ SDValue BasePtr = Load->getBasePtr();
+ MachineMemOperand *MMO = Load->getMemOperand();
+
+ SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
+ BasePtr, MVT::i8, MMO);
+
+ SDValue Ops[] = {
+ DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
+ NewLD.getValue(1)
+ };
+
+ return DAG.getMergeValues(Ops, DL);
+ }
+
+ if (!MemVT.isVector())
+ return SDValue();
+
+ assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
+ "Custom lowering for non-i32 vectors hasn't been implemented.");
+
+ unsigned AS = Load->getAddressSpace();
+ if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
+ AS, Load->getAlignment())) {
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
+ return DAG.getMergeValues(Ops, DL);
+ }
+
+ unsigned NumElements = MemVT.getVectorNumElements();
+ switch (AS) {
+ case AMDGPUAS::CONSTANT_ADDRESS:
+ if (isMemOpUniform(Load))
+ return SDValue();
+ // Non-uniform loads will be selected to MUBUF instructions, so they
+ // have the same legalization requires ments as global and private
+ // loads.
+ //
+ // Fall-through
+ case AMDGPUAS::GLOBAL_ADDRESS:
+ case AMDGPUAS::FLAT_ADDRESS:
+ if (NumElements > 4)
+ return SplitVectorLoad(Op, DAG);
+ // v4 loads are supported for private and global memory.
+ return SDValue();
+ case AMDGPUAS::PRIVATE_ADDRESS: {
+ // Depending on the setting of the private_element_size field in the
+ // resource descriptor, we can only make private accesses up to a certain
+ // size.
+ switch (Subtarget->getMaxPrivateElementSize()) {
+ case 4:
+ return scalarizeVectorLoad(Load, DAG);
+ case 8:
+ if (NumElements > 2)
+ return SplitVectorLoad(Op, DAG);
+ return SDValue();
+ case 16:
+ // Same as global/flat
+ if (NumElements > 4)
return SplitVectorLoad(Op, DAG);
+ return SDValue();
+ default:
+ llvm_unreachable("unsupported private_element_size");
}
}
+ case AMDGPUAS::LOCAL_ADDRESS: {
+ if (NumElements > 2)
+ return SplitVectorLoad(Op, DAG);
- return AMDGPUTargetLowering::LowerLOAD(Op, DAG);
-}
+ if (NumElements == 2)
+ return SDValue();
-SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode,
- const SDValue &Op,
- SelectionDAG &DAG) const {
- return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1),
- Op.getOperand(2),
- Op.getOperand(3),
- Op.getOperand(4));
+ // If properly aligned, if we split we might be able to use ds_read_b64.
+ return SplitVectorLoad(Op, DAG);
+ }
+ default:
+ return SDValue();
+ }
}
SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
@@ -1514,7 +2092,7 @@ SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
- SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, Lo, Hi);
+ SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
}
@@ -1547,7 +2125,9 @@ SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const {
}
}
- if (Unsafe) {
+ const SDNodeFlags *Flags = Op->getFlags();
+
+ if (Unsafe || Flags->hasAllowReciprocal()) {
// Turn into multiply by the reciprocal.
// x / y -> x * (1.0 / y)
SDNodeFlags Flags;
@@ -1560,45 +2140,71 @@ SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const {
}
SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
- SDValue FastLowered = LowerFastFDIV(Op, DAG);
- if (FastLowered.getNode())
+ if (SDValue FastLowered = LowerFastFDIV(Op, DAG))
return FastLowered;
- // This uses v_rcp_f32 which does not handle denormals. Let this hit a
- // selection error for now rather than do something incorrect.
- if (Subtarget->hasFP32Denormals())
- return SDValue();
-
SDLoc SL(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
+ // faster 2.5 ulp fdiv when using -amdgpu-fast-fdiv flag
+ if (EnableAMDGPUFastFDIV) {
+ // This does not support denormals.
+ SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
+
+ const APFloat K0Val(BitsToFloat(0x6f800000));
+ const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
+
+ const APFloat K1Val(BitsToFloat(0x2f800000));
+ const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
+
+ const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
+
+ EVT SetCCVT =
+ getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
+
+ SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
+
+ SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
+
+ // TODO: Should this propagate fast-math-flags?
- const APFloat K0Val(BitsToFloat(0x6f800000));
- const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
+ r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
- const APFloat K1Val(BitsToFloat(0x2f800000));
- const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
+ // rcp does not support denormals.
+ SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
+ SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
+
+ return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
+ }
+
+ // Generates more precise fpdiv32.
const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
- EVT SetCCVT =
- getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
+ SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
- SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
+ SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, RHS, RHS, LHS);
+ SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, LHS, RHS, LHS);
- SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
+ // Denominator is scaled to not be denormal, so using rcp is ok.
+ SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, DenominatorScaled);
- // TODO: Should this propagate fast-math-flags?
+ SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, DenominatorScaled);
- r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
+ SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, ApproxRcp, One);
+ SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, ApproxRcp);
- SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
+ SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, NumeratorScaled, Fma1);
- SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
+ SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, NumeratorScaled);
+ SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f32, Fma2, Fma1, Mul);
+ SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, NumeratorScaled);
- return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
+ SDValue Scale = NumeratorScaled.getValue(1);
+ SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, Fma4, Fma1, Fma3, Scale);
+
+ return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
}
SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
@@ -1635,7 +2241,7 @@ SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
SDValue Scale;
- if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
// Workaround a hardware bug on SI where the condition output from div_scale
// is not usable.
@@ -1685,26 +2291,57 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
StoreSDNode *Store = cast<StoreSDNode>(Op);
EVT VT = Store->getMemoryVT();
- // These stores are legal.
- if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
- if (VT.isVector() && VT.getVectorNumElements() > 4)
- return ScalarizeVectorStore(Op, DAG);
- return SDValue();
+ if (VT == MVT::i1) {
+ return DAG.getTruncStore(Store->getChain(), DL,
+ DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
+ Store->getBasePtr(), MVT::i1, Store->getMemOperand());
}
- SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
- if (Ret.getNode())
- return Ret;
+ assert(VT.isVector() &&
+ Store->getValue().getValueType().getScalarType() == MVT::i32);
- if (VT.isVector() && VT.getVectorNumElements() >= 8)
+ unsigned AS = Store->getAddressSpace();
+ if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
+ AS, Store->getAlignment())) {
+ return expandUnalignedStore(Store, DAG);
+ }
+
+ unsigned NumElements = VT.getVectorNumElements();
+ switch (AS) {
+ case AMDGPUAS::GLOBAL_ADDRESS:
+ case AMDGPUAS::FLAT_ADDRESS:
+ if (NumElements > 4)
+ return SplitVectorStore(Op, DAG);
+ return SDValue();
+ case AMDGPUAS::PRIVATE_ADDRESS: {
+ switch (Subtarget->getMaxPrivateElementSize()) {
+ case 4:
+ return scalarizeVectorStore(Store, DAG);
+ case 8:
+ if (NumElements > 2)
+ return SplitVectorStore(Op, DAG);
+ return SDValue();
+ case 16:
+ if (NumElements > 4)
+ return SplitVectorStore(Op, DAG);
+ return SDValue();
+ default:
+ llvm_unreachable("unsupported private_element_size");
+ }
+ }
+ case AMDGPUAS::LOCAL_ADDRESS: {
+ if (NumElements > 2)
return SplitVectorStore(Op, DAG);
- if (VT == MVT::i1)
- return DAG.getTruncStore(Store->getChain(), DL,
- DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
- Store->getBasePtr(), MVT::i1, Store->getMemOperand());
+ if (NumElements == 2)
+ return Op;
- return SDValue();
+ // If properly aligned, if we split we might be able to use ds_write_b64.
+ return SplitVectorStore(Op, DAG);
+ }
+ default:
+ llvm_unreachable("unhandled address space");
+ }
}
SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
@@ -1727,6 +2364,33 @@ SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
}
}
+SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
+ AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
+ assert(AtomicNode->isCompareAndSwap());
+ unsigned AS = AtomicNode->getAddressSpace();
+
+ // No custom lowering required for local address space
+ if (!isFlatGlobalAddrSpace(AS))
+ return Op;
+
+ // Non-local address space requires custom lowering for atomic compare
+ // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
+ SDLoc DL(Op);
+ SDValue ChainIn = Op.getOperand(0);
+ SDValue Addr = Op.getOperand(1);
+ SDValue Old = Op.getOperand(2);
+ SDValue New = Op.getOperand(3);
+ EVT VT = Op.getValueType();
+ MVT SimpleVT = VT.getSimpleVT();
+ MVT VecType = MVT::getVectorVT(SimpleVT, 2);
+
+ SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
+ SDValue Ops[] = { ChainIn, Addr, NewOld };
+
+ return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
+ Ops, VT, AtomicNode->getMemOperand());
+}
+
//===----------------------------------------------------------------------===//
// Custom DAG optimizations
//===----------------------------------------------------------------------===//
@@ -1756,88 +2420,13 @@ SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
}
}
- // We are primarily trying to catch operations on illegal vector types
- // before they are expanded.
- // For scalars, we can use the more flexible method of checking masked bits
- // after legalization.
- if (!DCI.isBeforeLegalize() ||
- !SrcVT.isVector() ||
- SrcVT.getVectorElementType() != MVT::i8) {
- return SDValue();
- }
-
- assert(DCI.isBeforeLegalize() && "Unexpected legal type");
-
- // Weird sized vectors are a pain to handle, but we know 3 is really the same
- // size as 4.
- unsigned NElts = SrcVT.getVectorNumElements();
- if (!SrcVT.isSimple() && NElts != 3)
- return SDValue();
-
- // Handle v4i8 -> v4f32 extload. Replace the v4i8 with a legal i32 load to
- // prevent a mess from expanding to v4i32 and repacking.
- if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
- EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT);
- EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT);
- EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts);
- LoadSDNode *Load = cast<LoadSDNode>(Src);
-
- unsigned AS = Load->getAddressSpace();
- unsigned Align = Load->getAlignment();
- Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
-
- // Don't try to replace the load if we have to expand it due to alignment
- // problems. Otherwise we will end up scalarizing the load, and trying to
- // repack into the vector for no real reason.
- if (Align < ABIAlignment &&
- !allowsMisalignedMemoryAccesses(LoadVT, AS, Align, nullptr)) {
- return SDValue();
- }
-
- SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT,
- Load->getChain(),
- Load->getBasePtr(),
- LoadVT,
- Load->getMemOperand());
-
- // Make sure successors of the original load stay after it by updating
- // them to use the new Chain.
- DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), NewLoad.getValue(1));
-
- SmallVector<SDValue, 4> Elts;
- if (RegVT.isVector())
- DAG.ExtractVectorElements(NewLoad, Elts);
- else
- Elts.push_back(NewLoad);
-
- SmallVector<SDValue, 4> Ops;
-
- unsigned EltIdx = 0;
- for (SDValue Elt : Elts) {
- unsigned ComponentsInElt = std::min(4u, NElts - 4 * EltIdx);
- for (unsigned I = 0; I < ComponentsInElt; ++I) {
- unsigned Opc = AMDGPUISD::CVT_F32_UBYTE0 + I;
- SDValue Cvt = DAG.getNode(Opc, DL, MVT::f32, Elt);
- DCI.AddToWorklist(Cvt.getNode());
- Ops.push_back(Cvt);
- }
-
- ++EltIdx;
- }
-
- assert(Ops.size() == NElts);
-
- return DAG.getNode(ISD::BUILD_VECTOR, DL, FloatVT, Ops);
- }
-
return SDValue();
}
/// \brief Return true if the given offset Size in bytes can be folded into
/// the immediate offsets of a memory instruction for the given address space.
static bool canFoldOffset(unsigned OffsetSize, unsigned AS,
- const AMDGPUSubtarget &STI) {
+ const SISubtarget &STI) {
switch (AS) {
case AMDGPUAS::GLOBAL_ADDRESS: {
// MUBUF instructions a 12-bit offset in bytes.
@@ -1846,7 +2435,7 @@ static bool canFoldOffset(unsigned OffsetSize, unsigned AS,
case AMDGPUAS::CONSTANT_ADDRESS: {
// SMRD instructions have an 8-bit offset in dwords on SI and
// a 20-bit offset in bytes on VI.
- if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
+ if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
return isUInt<20>(OffsetSize);
else
return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
@@ -1897,7 +2486,7 @@ SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
// If the resulting offset is too large, we can't fold it into the addressing
// mode offset.
APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
- if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *Subtarget))
+ if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget()))
return SDValue();
SelectionDAG &DAG = DCI.DAG;
@@ -1915,6 +2504,9 @@ SDValue SITargetLowering::performAndCombine(SDNode *N,
if (DCI.isBeforeLegalize())
return SDValue();
+ if (SDValue Base = AMDGPUTargetLowering::performAndCombine(N, DCI))
+ return Base;
+
SelectionDAG &DAG = DCI.DAG;
// (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
@@ -1970,6 +2562,36 @@ SDValue SITargetLowering::performOrCombine(SDNode *N,
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+ if (VT == MVT::i64) {
+ // TODO: This could be a generic combine with a predicate for extracting the
+ // high half of an integer being free.
+
+ // (or i64:x, (zero_extend i32:y)) ->
+ // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
+ if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
+ RHS.getOpcode() != ISD::ZERO_EXTEND)
+ std::swap(LHS, RHS);
+
+ if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
+ SDValue ExtSrc = RHS.getOperand(0);
+ EVT SrcVT = ExtSrc.getValueType();
+ if (SrcVT == MVT::i32) {
+ SDLoc SL(N);
+ SDValue LowLHS, HiBits;
+ std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
+ SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
+
+ DCI.AddToWorklist(LowOr.getNode());
+ DCI.AddToWorklist(HiBits.getNode());
+
+ SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
+ LowOr, HiBits);
+ return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
+ }
+ }
+ }
+
// or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
@@ -2005,9 +2627,52 @@ SDValue SITargetLowering::performClassCombine(SDNode *N,
return DAG.getConstant(0, SDLoc(N), MVT::i1);
}
+ if (N->getOperand(0).isUndef())
+ return DAG.getUNDEF(MVT::i1);
+
return SDValue();
}
+// Constant fold canonicalize.
+SDValue SITargetLowering::performFCanonicalizeCombine(
+ SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
+ if (!CFP)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ const APFloat &C = CFP->getValueAPF();
+
+ // Flush denormals to 0 if not enabled.
+ if (C.isDenormal()) {
+ EVT VT = N->getValueType(0);
+ if (VT == MVT::f32 && !Subtarget->hasFP32Denormals())
+ return DAG.getConstantFP(0.0, SDLoc(N), VT);
+
+ if (VT == MVT::f64 && !Subtarget->hasFP64Denormals())
+ return DAG.getConstantFP(0.0, SDLoc(N), VT);
+ }
+
+ if (C.isNaN()) {
+ EVT VT = N->getValueType(0);
+ APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
+ if (C.isSignaling()) {
+ // Quiet a signaling NaN.
+ return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
+ }
+
+ // Make sure it is the canonical NaN bitpattern.
+ //
+ // TODO: Can we use -1 as the canonical NaN value since it's an inline
+ // immediate?
+ if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
+ return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
+ }
+
+ return SDValue(CFP, 0);
+}
+
static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
switch (Opc) {
case ISD::FMAXNUM:
@@ -2027,8 +2692,64 @@ static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
}
}
-SDValue SITargetLowering::performMin3Max3Combine(SDNode *N,
- DAGCombinerInfo &DCI) const {
+static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
+ SDValue Op0, SDValue Op1, bool Signed) {
+ ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
+ if (!K1)
+ return SDValue();
+
+ ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
+ if (!K0)
+ return SDValue();
+
+ if (Signed) {
+ if (K0->getAPIntValue().sge(K1->getAPIntValue()))
+ return SDValue();
+ } else {
+ if (K0->getAPIntValue().uge(K1->getAPIntValue()))
+ return SDValue();
+ }
+
+ EVT VT = K0->getValueType(0);
+ return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT,
+ Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
+}
+
+static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
+ if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
+ return true;
+
+ return DAG.isKnownNeverNaN(Op);
+}
+
+static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
+ SDValue Op0, SDValue Op1) {
+ ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1);
+ if (!K1)
+ return SDValue();
+
+ ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1));
+ if (!K0)
+ return SDValue();
+
+ // Ordered >= (although NaN inputs should have folded away by now).
+ APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
+ if (Cmp == APFloat::cmpGreaterThan)
+ return SDValue();
+
+ // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
+ // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then
+ // give the other result, which is different from med3 with a NaN input.
+ SDValue Var = Op0.getOperand(0);
+ if (!isKnownNeverSNan(DAG, Var))
+ return SDValue();
+
+ return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
+ Var, SDValue(K0, 0), SDValue(K1, 0));
+}
+
+SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
unsigned Opc = N->getOpcode();
@@ -2038,26 +2759,51 @@ SDValue SITargetLowering::performMin3Max3Combine(SDNode *N,
// Only do this if the inner op has one use since this will just increases
// register pressure for no benefit.
- // max(max(a, b), c)
- if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
- SDLoc DL(N);
- return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
- DL,
- N->getValueType(0),
- Op0.getOperand(0),
- Op0.getOperand(1),
- Op1);
+ if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) {
+ // max(max(a, b), c) -> max3(a, b, c)
+ // min(min(a, b), c) -> min3(a, b, c)
+ if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
+ SDLoc DL(N);
+ return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
+ DL,
+ N->getValueType(0),
+ Op0.getOperand(0),
+ Op0.getOperand(1),
+ Op1);
+ }
+
+ // Try commuted.
+ // max(a, max(b, c)) -> max3(a, b, c)
+ // min(a, min(b, c)) -> min3(a, b, c)
+ if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
+ SDLoc DL(N);
+ return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
+ DL,
+ N->getValueType(0),
+ Op0,
+ Op1.getOperand(0),
+ Op1.getOperand(1));
+ }
}
- // max(a, max(b, c))
- if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
- SDLoc DL(N);
- return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
- DL,
- N->getValueType(0),
- Op0,
- Op1.getOperand(0),
- Op1.getOperand(1));
+ // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
+ if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
+ if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
+ return Med3;
+ }
+
+ if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
+ if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
+ return Med3;
+ }
+
+ // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
+ if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
+ (Opc == AMDGPUISD::FMIN_LEGACY &&
+ Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
+ N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) {
+ if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
+ return Res;
}
return SDValue();
@@ -2104,16 +2850,18 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
case ISD::SETCC:
return performSetCCCombine(N, DCI);
- case ISD::FMAXNUM: // TODO: What about fmax_legacy?
+ case ISD::FMAXNUM:
case ISD::FMINNUM:
case ISD::SMAX:
case ISD::SMIN:
case ISD::UMAX:
- case ISD::UMIN: {
+ case ISD::UMIN:
+ case AMDGPUISD::FMIN_LEGACY:
+ case AMDGPUISD::FMAX_LEGACY: {
if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
N->getValueType(0) != MVT::f64 &&
getTargetMachine().getOptLevel() > CodeGenOpt::None)
- return performMin3Max3Combine(N, DCI);
+ return performMinMaxCombine(N, DCI);
break;
}
@@ -2122,8 +2870,23 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case AMDGPUISD::CVT_F32_UBYTE2:
case AMDGPUISD::CVT_F32_UBYTE3: {
unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
-
SDValue Src = N->getOperand(0);
+
+ // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
+ if (Src.getOpcode() == ISD::SRL) {
+ // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
+ // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
+ // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
+
+ if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src.getOperand(1))) {
+ unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
+ if (SrcOffset < 32 && SrcOffset % 8 == 0) {
+ return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, DL,
+ MVT::f32, Src.getOperand(0));
+ }
+ }
+ }
+
APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
APInt KnownZero, KnownOne;
@@ -2238,7 +3001,9 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_UMAX: { // TODO: Target mem intrinsics.
+ case ISD::ATOMIC_LOAD_UMAX:
+ case AMDGPUISD::ATOMIC_INC:
+ case AMDGPUISD::ATOMIC_DEC: { // TODO: Target mem intrinsics.
if (DCI.isBeforeLegalize())
break;
@@ -2264,6 +3029,19 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
return performOrCombine(N, DCI);
case AMDGPUISD::FP_CLASS:
return performClassCombine(N, DCI);
+ case ISD::FCANONICALIZE:
+ return performFCanonicalizeCombine(N, DCI);
+ case AMDGPUISD::FRACT:
+ case AMDGPUISD::RCP:
+ case AMDGPUISD::RSQ:
+ case AMDGPUISD::RSQ_LEGACY:
+ case AMDGPUISD::RSQ_CLAMP:
+ case AMDGPUISD::LDEXP: {
+ SDValue Src = N->getOperand(0);
+ if (Src.isUndef())
+ return Src;
+ break;
+ }
}
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
@@ -2273,9 +3051,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
/// Returns -1 if it isn't an immediate, 0 if it's and inline immediate
/// and the immediate value if it's a literal immediate
int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const {
-
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) {
if (TII->isInlineConstant(Node->getAPIntValue()))
@@ -2314,7 +3090,8 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
SelectionDAG &DAG) const {
SDNode *Users[4] = { };
unsigned Lane = 0;
- unsigned OldDmask = Node->getConstantOperandVal(0);
+ unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3;
+ unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
unsigned NewDmask = 0;
// Try to figure out the used register components
@@ -2354,8 +3131,9 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
// Adjust the writemask in the node
std::vector<SDValue> Ops;
+ Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
- Ops.insert(Ops.end(), Node->op_begin() + 1, Node->op_end());
+ Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
// If we only got one lane, replace it with a copy
@@ -2421,14 +3199,15 @@ void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
/// \brief Fold the instructions after selecting them.
SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const {
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
+ unsigned Opcode = Node->getMachineOpcode();
- if (TII->isMIMG(Node->getMachineOpcode()))
+ if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
+ !TII->isGather4(Opcode))
adjustWritemask(Node, DAG);
- if (Node->getMachineOpcode() == AMDGPU::INSERT_SUBREG ||
- Node->getMachineOpcode() == AMDGPU::REG_SEQUENCE) {
+ if (Opcode == AMDGPU::INSERT_SUBREG ||
+ Opcode == AMDGPU::REG_SEQUENCE) {
legalizeTargetIndependentNode(Node, DAG);
return Node;
}
@@ -2437,22 +3216,22 @@ SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
/// \brief Assign the register class depending on the number of
/// bits set in the writemask
-void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
+void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const {
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
- MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
- if (TII->isVOP3(MI->getOpcode())) {
+ if (TII->isVOP3(MI.getOpcode())) {
// Make sure constant bus requirements are respected.
TII->legalizeOperandsVOP3(MRI, MI);
return;
}
- if (TII->isMIMG(*MI)) {
- unsigned VReg = MI->getOperand(0).getReg();
- unsigned Writemask = MI->getOperand(1).getImm();
+ if (TII->isMIMG(MI)) {
+ unsigned VReg = MI.getOperand(0).getReg();
+ unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4;
+ unsigned Writemask = MI.getOperand(DmaskIdx).getImm();
unsigned BitsSet = 0;
for (unsigned i = 0; i < 4; ++i)
BitsSet += Writemask & (1 << i) ? 1 : 0;
@@ -2465,34 +3244,58 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
case 3: RC = &AMDGPU::VReg_96RegClass; break;
}
- unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
- MI->setDesc(TII->get(NewOpcode));
+ unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet);
+ MI.setDesc(TII->get(NewOpcode));
MRI.setRegClass(VReg, RC);
return;
}
// Replace unused atomics with the no return version.
- int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI->getOpcode());
+ int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
if (NoRetAtomicOp != -1) {
if (!Node->hasAnyUseOfValue(0)) {
- MI->setDesc(TII->get(NoRetAtomicOp));
- MI->RemoveOperand(0);
+ MI.setDesc(TII->get(NoRetAtomicOp));
+ MI.RemoveOperand(0);
+ return;
}
+ // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
+ // instruction, because the return type of these instructions is a vec2 of
+ // the memory type, so it can be tied to the input operand.
+ // This means these instructions always have a use, so we need to add a
+ // special case to check if the atomic has only one extract_subreg use,
+ // which itself has no uses.
+ if ((Node->hasNUsesOfValue(1, 0) &&
+ Node->use_begin()->isMachineOpcode() &&
+ Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
+ !Node->use_begin()->hasAnyUseOfValue(0))) {
+ unsigned Def = MI.getOperand(0).getReg();
+
+ // Change this into a noret atomic.
+ MI.setDesc(TII->get(NoRetAtomicOp));
+ MI.RemoveOperand(0);
+
+ // If we only remove the def operand from the atomic instruction, the
+ // extract_subreg will be left with a use of a vreg without a def.
+ // So we need to insert an implicit_def to avoid machine verifier
+ // errors.
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
+ TII->get(AMDGPU::IMPLICIT_DEF), Def);
+ }
return;
}
}
-static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) {
+static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
+ uint64_t Val) {
SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
}
MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
- SDLoc DL,
+ const SDLoc &DL,
SDValue Ptr) const {
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
// Build the half of the subregister with the constants before building the
// full 128-bit register. If we are building multiple resource descriptors,
@@ -2524,10 +3327,8 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
/// of the resource descriptor) to create an offset, which is added to
/// the resource pointer.
-MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG,
- SDLoc DL,
- SDValue Ptr,
- uint32_t RsrcDword1,
+MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
+ SDValue Ptr, uint32_t RsrcDword1,
uint64_t RsrcDword2And3) const {
SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
diff --git a/lib/Target/AMDGPU/SIISelLowering.h b/lib/Target/AMDGPU/SIISelLowering.h
index f01b2c0d09f3..8e055eea58c2 100644
--- a/lib/Target/AMDGPU/SIISelLowering.h
+++ b/lib/Target/AMDGPU/SIISelLowering.h
@@ -12,26 +12,26 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_SIISELLOWERING_H
-#define LLVM_LIB_TARGET_R600_SIISELLOWERING_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
+#define LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
#include "AMDGPUISelLowering.h"
#include "SIInstrInfo.h"
namespace llvm {
-class SITargetLowering : public AMDGPUTargetLowering {
- SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, SDLoc DL,
+class SITargetLowering final : public AMDGPUTargetLowering {
+ SDValue LowerParameterPtr(SelectionDAG &DAG, const SDLoc &SL, SDValue Chain,
+ unsigned Offset) const;
+ SDValue LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL,
SDValue Chain, unsigned Offset, bool Signed) const;
- SDValue LowerSampleIntrinsic(unsigned Opcode, const SDValue &Op,
- SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
SelectionDAG &DAG) const override;
-
SDValue lowerImplicitZextParam(SelectionDAG &DAG, SDValue Op,
MVT VT, unsigned Offset) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
@@ -43,8 +43,13 @@ class SITargetLowering : public AMDGPUTargetLowering {
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool Signed) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue getSegmentAperture(unsigned AS, SelectionDAG &DAG) const;
+ SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerTRAP(SDValue Op, SelectionDAG &DAG) const;
+
void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
SDValue performUCharToFloatCombine(SDNode *N,
@@ -55,14 +60,25 @@ class SITargetLowering : public AMDGPUTargetLowering {
SDValue performAndCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performOrCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performClassCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue performFCanonicalizeCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+
+ SDValue performMinMaxCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- SDValue performMin3Max3Combine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;
bool isLegalFlatAddressingMode(const AddrMode &AM) const;
bool isLegalMUBUFAddressingMode(const AddrMode &AM) const;
+
+ bool isCFIntrinsic(const SDNode *Intr) const;
+
+ void createDebuggerPrologueStackObjects(MachineFunction &MF) const;
public:
- SITargetLowering(TargetMachine &tm, const AMDGPUSubtarget &STI);
+ SITargetLowering(const TargetMachine &tm, const SISubtarget &STI);
+
+ const SISubtarget *getSubtarget() const;
+
+ bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
+ unsigned IntrinsicID) const override;
bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
EVT /*VT*/) const override;
@@ -89,21 +105,30 @@ public:
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override;
+ bool isTypeDesirableForOp(unsigned Op, EVT VT) const override;
+
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
+
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
+ const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
- SDValue LowerReturn(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const override;
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+ SelectionDAG &DAG) const override;
+
+ unsigned getRegisterByName(const char* RegName, EVT VT,
+ SelectionDAG &DAG) const override;
+
+ MachineBasicBlock *splitKillBlock(MachineInstr &MI,
+ MachineBasicBlock *BB) const;
- MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr * MI,
- MachineBasicBlock * BB) const override;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
bool enableAggressiveFMAFusion(EVT VT) const override;
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
@@ -112,7 +137,7 @@ public:
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
- void AdjustInstrPostInstrSelection(MachineInstr *MI,
+ void AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const override;
int32_t analyzeImmediate(const SDNode *N) const;
@@ -120,17 +145,16 @@ public:
unsigned Reg, EVT VT) const override;
void legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const;
- MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, SDLoc DL, SDValue Ptr) const;
- MachineSDNode *buildRSRC(SelectionDAG &DAG,
- SDLoc DL,
- SDValue Ptr,
- uint32_t RsrcDword1,
- uint64_t RsrcDword2And3) const;
+ MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL,
+ SDValue Ptr) const;
+ MachineSDNode *buildRSRC(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr,
+ uint32_t RsrcDword1, uint64_t RsrcDword2And3) const;
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
ConstraintType getConstraintType(StringRef Constraint) const override;
- SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL, SDValue V) const;
+ SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, const SDLoc &DL,
+ SDValue V) const;
};
} // End namespace llvm
diff --git a/lib/Target/AMDGPU/SIInsertWaits.cpp b/lib/Target/AMDGPU/SIInsertWaits.cpp
index 94e614750d2f..d24588d6c143 100644
--- a/lib/Target/AMDGPU/SIInsertWaits.cpp
+++ b/lib/Target/AMDGPU/SIInsertWaits.cpp
@@ -26,6 +26,8 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#define DEBUG_TYPE "si-insert-waits"
+
using namespace llvm;
namespace {
@@ -53,7 +55,7 @@ typedef std::pair<unsigned, unsigned> RegInterval;
class SIInsertWaits : public MachineFunctionPass {
private:
- static char ID;
+ const SISubtarget *ST;
const SIInstrInfo *TII;
const SIRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
@@ -67,6 +69,10 @@ private:
/// \brief Counter values we have already waited on.
Counters WaitedOn;
+ /// \brief Counter values that we must wait on before the next counter
+ /// increase.
+ Counters DelayedWaitOn;
+
/// \brief Counter values for last instruction issued.
Counters LastIssued;
@@ -87,6 +93,9 @@ private:
/// \brief Whether the machine function returns void
bool ReturnsVoid;
+ /// Whether the VCCZ bit is possibly corrupt
+ bool VCCZCorrupt;
+
/// \brief Get increment/decrement amount for this instruction.
Counters getHwCounts(MachineInstr &MI);
@@ -99,13 +108,17 @@ private:
/// \brief Handle instructions async components
void pushInstruction(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I);
+ MachineBasicBlock::iterator I,
+ const Counters& Increment);
/// \brief Insert the actual wait instruction
bool insertWait(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const Counters &Counts);
+ /// \brief Handle existing wait instructions (from intrinsics)
+ void handleExistingWait(MachineBasicBlock::iterator I);
+
/// \brief Do we need def2def checks?
bool unorderedDefines(MachineInstr &MI);
@@ -115,12 +128,20 @@ private:
/// \brief Insert S_NOP between an instruction writing M0 and S_SENDMSG.
void handleSendMsg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I);
+ /// Return true if there are LGKM instrucitons that haven't been waited on
+ /// yet.
+ bool hasOutstandingLGKM() const;
+
public:
- SIInsertWaits(TargetMachine &tm) :
+ static char ID;
+
+ SIInsertWaits() :
MachineFunctionPass(ID),
+ ST(nullptr),
TII(nullptr),
TRI(nullptr),
- ExpInstrTypesSeen(0) { }
+ ExpInstrTypesSeen(0),
+ VCCZCorrupt(false) { }
bool runOnMachineFunction(MachineFunction &MF) override;
@@ -136,13 +157,28 @@ public:
} // End anonymous namespace
+INITIALIZE_PASS_BEGIN(SIInsertWaits, DEBUG_TYPE,
+ "SI Insert Waits", false, false)
+INITIALIZE_PASS_END(SIInsertWaits, DEBUG_TYPE,
+ "SI Insert Waits", false, false)
+
char SIInsertWaits::ID = 0;
-const Counters SIInsertWaits::WaitCounts = { { 15, 7, 7 } };
+char &llvm::SIInsertWaitsID = SIInsertWaits::ID;
+
+FunctionPass *llvm::createSIInsertWaitsPass() {
+ return new SIInsertWaits();
+}
+
+const Counters SIInsertWaits::WaitCounts = { { 15, 7, 15 } };
const Counters SIInsertWaits::ZeroCounts = { { 0, 0, 0 } };
-FunctionPass *llvm::createSIInsertWaits(TargetMachine &tm) {
- return new SIInsertWaits(tm);
+static bool readsVCCZ(unsigned Opcode) {
+ return Opcode == AMDGPU::S_CBRANCH_VCCNZ || Opcode == AMDGPU::S_CBRANCH_VCCZ;
+}
+
+bool SIInsertWaits::hasOutstandingLGKM() const {
+ return WaitedOn.Named.LGKM != LastIssued.Named.LGKM;
}
Counters SIInsertWaits::getHwCounts(MachineInstr &MI) {
@@ -205,24 +241,23 @@ bool SIInsertWaits::isOpRelevant(MachineOperand &Op) {
return false;
// Check if this operand is the value being stored.
- // Special case for DS instructions, since the address
+ // Special case for DS/FLAT instructions, since the address
// operand comes before the value operand and it may have
// multiple data operands.
- if (TII->isDS(MI)) {
+ if (TII->isDS(MI) || TII->isFLAT(MI)) {
MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::data);
if (Data && Op.isIdenticalTo(*Data))
return true;
+ }
+ if (TII->isDS(MI)) {
MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0);
if (Data0 && Op.isIdenticalTo(*Data0))
return true;
MachineOperand *Data1 = TII->getNamedOperand(MI, AMDGPU::OpName::data1);
- if (Data1 && Op.isIdenticalTo(*Data1))
- return true;
-
- return false;
+ return Data1 && Op.isIdenticalTo(*Data1);
}
// NOTE: This assumes that the value operand is before the
@@ -250,10 +285,10 @@ RegInterval SIInsertWaits::getRegInterval(const TargetRegisterClass *RC,
}
void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) {
+ MachineBasicBlock::iterator I,
+ const Counters &Increment) {
// Get the hardware counter increments and sum them up
- Counters Increment = getHwCounts(*I);
Counters Limit = ZeroCounts;
unsigned Sum = 0;
@@ -270,8 +305,7 @@ void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
return;
}
- if (MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >=
- AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (ST->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
// Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM
// or SMEM clause, respectively.
//
@@ -281,8 +315,7 @@ void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
// and destination registers don't overlap, e.g. this is illegal:
// r0 = load r2
// r2 = load r0
- if ((LastOpcodeType == SMEM && TII->isSMRD(*I)) ||
- (LastOpcodeType == VMEM && Increment.Named.VM)) {
+ if (LastOpcodeType == VMEM && Increment.Named.VM) {
// Insert a NOP to break the clause.
BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP))
.addImm(0);
@@ -379,7 +412,7 @@ bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT))
.addImm((Counts.Named.VM & 0xF) |
((Counts.Named.EXP & 0x7) << 4) |
- ((Counts.Named.LGKM & 0x7) << 8));
+ ((Counts.Named.LGKM & 0xF) << 8));
LastOpcodeType = OTHER;
LastInstWritesM0 = false;
@@ -393,16 +426,38 @@ static void increaseCounters(Counters &Dst, const Counters &Src) {
Dst.Array[i] = std::max(Dst.Array[i], Src.Array[i]);
}
+/// \brief check whether any of the counters is non-zero
+static bool countersNonZero(const Counters &Counter) {
+ for (unsigned i = 0; i < 3; ++i)
+ if (Counter.Array[i])
+ return true;
+ return false;
+}
+
+void SIInsertWaits::handleExistingWait(MachineBasicBlock::iterator I) {
+ assert(I->getOpcode() == AMDGPU::S_WAITCNT);
+
+ unsigned Imm = I->getOperand(0).getImm();
+ Counters Counts, WaitOn;
+
+ Counts.Named.VM = Imm & 0xF;
+ Counts.Named.EXP = (Imm >> 4) & 0x7;
+ Counts.Named.LGKM = (Imm >> 8) & 0xF;
+
+ for (unsigned i = 0; i < 3; ++i) {
+ if (Counts.Array[i] <= LastIssued.Array[i])
+ WaitOn.Array[i] = LastIssued.Array[i] - Counts.Array[i];
+ else
+ WaitOn.Array[i] = 0;
+ }
+
+ increaseCounters(DelayedWaitOn, WaitOn);
+}
+
Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
Counters Result = ZeroCounts;
- // S_SENDMSG implicitly waits for all outstanding LGKM transfers to finish,
- // but we also want to wait for any other outstanding transfers before
- // signalling other hardware blocks
- if (MI.getOpcode() == AMDGPU::S_SENDMSG)
- return LastIssued;
-
// For each register affected by this instruction increase the result
// sequence.
//
@@ -432,8 +487,7 @@ Counters SIInsertWaits::handleOperands(MachineInstr &MI) {
void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) {
- if (MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <
- AMDGPUSubtarget::VOLCANIC_ISLANDS)
+ if (ST->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
return;
// There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG.
@@ -460,13 +514,13 @@ void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB,
bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
bool Changes = false;
- TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
- TRI =
- static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
-
+ ST = &MF.getSubtarget<SISubtarget>();
+ TII = ST->getInstrInfo();
+ TRI = &TII->getRegisterInfo();
MRI = &MF.getRegInfo();
WaitedOn = ZeroCounts;
+ DelayedWaitOn = ZeroCounts;
LastIssued = ZeroCounts;
LastOpcodeType = OTHER;
LastInstWritesM0 = false;
@@ -475,6 +529,8 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
memset(&UsedRegs, 0, sizeof(UsedRegs));
memset(&DefinedRegs, 0, sizeof(DefinedRegs));
+ SmallVector<MachineInstr *, 4> RemoveMI;
+
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
BI != BE; ++BI) {
@@ -482,27 +538,81 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
+ if (ST->getGeneration() <= SISubtarget::SEA_ISLANDS) {
+ // There is a hardware bug on CI/SI where SMRD instruction may corrupt
+ // vccz bit, so when we detect that an instruction may read from a
+ // corrupt vccz bit, we need to:
+ // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD operations to
+ // complete.
+ // 2. Restore the correct value of vccz by writing the current value
+ // of vcc back to vcc.
+
+ if (TII->isSMRD(I->getOpcode())) {
+ VCCZCorrupt = true;
+ } else if (!hasOutstandingLGKM() && I->modifiesRegister(AMDGPU::VCC, TRI)) {
+ // FIXME: We only care about SMRD instructions here, not LDS or GDS.
+ // Whenever we store a value in vcc, the correct value of vccz is
+ // restored.
+ VCCZCorrupt = false;
+ }
+
+ // Check if we need to apply the bug work-around
+ if (readsVCCZ(I->getOpcode()) && VCCZCorrupt) {
+ DEBUG(dbgs() << "Inserting vccz bug work-around before: " << *I << '\n');
+
+ // Wait on everything, not just LGKM. vccz reads usually come from
+ // terminators, and we always wait on everything at the end of the
+ // block, so if we only wait on LGKM here, we might end up with
+ // another s_waitcnt inserted right after this if there are non-LGKM
+ // instructions still outstanding.
+ insertWait(MBB, I, LastIssued);
+
+ // Restore the vccz bit. Any time a value is written to vcc, the vcc
+ // bit is updated, so we can restore the bit by reading the value of
+ // vcc and then writing it back to the register.
+ BuildMI(MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
+ AMDGPU::VCC)
+ .addReg(AMDGPU::VCC);
+ }
+ }
+
+ // Record pre-existing, explicitly requested waits
+ if (I->getOpcode() == AMDGPU::S_WAITCNT) {
+ handleExistingWait(*I);
+ RemoveMI.push_back(&*I);
+ continue;
+ }
+
+ Counters Required;
+
// Wait for everything before a barrier.
- if (I->getOpcode() == AMDGPU::S_BARRIER)
- Changes |= insertWait(MBB, I, LastIssued);
+ //
+ // S_SENDMSG implicitly waits for all outstanding LGKM transfers to finish,
+ // but we also want to wait for any other outstanding transfers before
+ // signalling other hardware blocks
+ if (I->getOpcode() == AMDGPU::S_BARRIER ||
+ I->getOpcode() == AMDGPU::S_SENDMSG)
+ Required = LastIssued;
else
- Changes |= insertWait(MBB, I, handleOperands(*I));
+ Required = handleOperands(*I);
+
+ Counters Increment = getHwCounts(*I);
- pushInstruction(MBB, I);
+ if (countersNonZero(Required) || countersNonZero(Increment))
+ increaseCounters(Required, DelayedWaitOn);
+
+ Changes |= insertWait(MBB, I, Required);
+
+ pushInstruction(MBB, I, Increment);
handleSendMsg(MBB, I);
}
// Wait for everything at the end of the MBB
Changes |= insertWait(MBB, MBB.getFirstTerminator(), LastIssued);
-
- // Functions returning something shouldn't contain S_ENDPGM, because other
- // bytecode will be appended after it.
- if (!ReturnsVoid) {
- MachineBasicBlock::iterator I = MBB.getFirstTerminator();
- if (I != MBB.end() && I->getOpcode() == AMDGPU::S_ENDPGM)
- I->eraseFromParent();
- }
}
+ for (MachineInstr *I : RemoveMI)
+ I->eraseFromParent();
+
return Changes;
}
diff --git a/lib/Target/AMDGPU/SIInstrFormats.td b/lib/Target/AMDGPU/SIInstrFormats.td
index 0e883f64caa3..2f63d4ed13b3 100644
--- a/lib/Target/AMDGPU/SIInstrFormats.td
+++ b/lib/Target/AMDGPU/SIInstrFormats.td
@@ -11,8 +11,9 @@
//
//===----------------------------------------------------------------------===//
-class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
- AMDGPUInst<outs, ins, asm, pattern>, PredicateControl {
+class InstSI <dag outs, dag ins, string asm = "",
+ list<dag> pattern = []> :
+ AMDGPUInst<outs, ins, asm, pattern>, PredicateControl {
field bits<1> VM_CNT = 0;
field bits<1> EXP_CNT = 0;
@@ -31,6 +32,8 @@ class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
field bits<1> VOP2 = 0;
field bits<1> VOP3 = 0;
field bits<1> VOPC = 0;
+ field bits<1> SDWA = 0;
+ field bits<1> DPP = 0;
field bits<1> MUBUF = 0;
field bits<1> MTBUF = 0;
@@ -45,6 +48,8 @@ class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
// is unable to infer the encoding from the operands.
field bits<1> VOPAsmPrefer32Bit = 0;
+ field bits<1> Gather4 = 0;
+
// These need to be kept in sync with the enum in SIInstrFlags.
let TSFlags{0} = VM_CNT;
let TSFlags{1} = EXP_CNT;
@@ -63,18 +68,33 @@ class InstSI <dag outs, dag ins, string asm, list<dag> pattern> :
let TSFlags{11} = VOP2;
let TSFlags{12} = VOP3;
let TSFlags{13} = VOPC;
-
- let TSFlags{14} = MUBUF;
- let TSFlags{15} = MTBUF;
- let TSFlags{16} = SMRD;
- let TSFlags{17} = DS;
- let TSFlags{18} = MIMG;
- let TSFlags{19} = FLAT;
- let TSFlags{20} = WQM;
- let TSFlags{21} = VGPRSpill;
- let TSFlags{22} = VOPAsmPrefer32Bit;
+ let TSFlags{14} = SDWA;
+ let TSFlags{15} = DPP;
+
+ let TSFlags{16} = MUBUF;
+ let TSFlags{17} = MTBUF;
+ let TSFlags{18} = SMRD;
+ let TSFlags{19} = DS;
+ let TSFlags{20} = MIMG;
+ let TSFlags{21} = FLAT;
+ let TSFlags{22} = WQM;
+ let TSFlags{23} = VGPRSpill;
+ let TSFlags{24} = VOPAsmPrefer32Bit;
+ let TSFlags{25} = Gather4;
let SchedRW = [Write32Bit];
+
+ field bits<1> DisableSIDecoder = 0;
+ field bits<1> DisableVIDecoder = 0;
+ field bits<1> DisableDecoder = 0;
+
+ let isAsmParserOnly = !if(!eq(DisableDecoder{0}, {0}), 0, 1);
+}
+
+class PseudoInstSI<dag outs, dag ins, list<dag> pattern = []>
+ : InstSI<outs, ins, "", pattern> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
}
class Enc32 {
@@ -123,8 +143,10 @@ class VOP2Common <dag outs, dag ins, string asm, list<dag> pattern> :
let Size = 4;
}
-class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
- VOPAnyCommon <outs, ins, asm, pattern> {
+class VOP3Common <dag outs, dag ins, string asm = "",
+ list<dag> pattern = [], bit HasMods = 0,
+ bit VOP3Only = 0> :
+ VOPAnyCommon <outs, ins, asm, pattern> {
// Using complex patterns gives VOP3 patterns a very high complexity rating,
// but standalone patterns are almost always prefered, so we need to adjust the
@@ -135,7 +157,11 @@ class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
let VOP3 = 1;
let VALU = 1;
- let AsmMatchConverter = "cvtVOP3";
+ let AsmMatchConverter =
+ !if(!eq(VOP3Only,1),
+ "cvtVOP3",
+ !if(!eq(HasMods,1), "cvtVOP3_2_mod", ""));
+
let isCodeGenOnly = 0;
int Size = 8;
@@ -154,9 +180,9 @@ class VOP3Common <dag outs, dag ins, string asm, list<dag> pattern> :
class SOP1e <bits<8> op> : Enc32 {
bits<7> sdst;
- bits<8> ssrc0;
+ bits<8> src0;
- let Inst{7-0} = ssrc0;
+ let Inst{7-0} = src0;
let Inst{15-8} = op;
let Inst{22-16} = sdst;
let Inst{31-23} = 0x17d; //encoding;
@@ -164,22 +190,22 @@ class SOP1e <bits<8> op> : Enc32 {
class SOP2e <bits<7> op> : Enc32 {
bits<7> sdst;
- bits<8> ssrc0;
- bits<8> ssrc1;
+ bits<8> src0;
+ bits<8> src1;
- let Inst{7-0} = ssrc0;
- let Inst{15-8} = ssrc1;
+ let Inst{7-0} = src0;
+ let Inst{15-8} = src1;
let Inst{22-16} = sdst;
let Inst{29-23} = op;
let Inst{31-30} = 0x2; // encoding
}
class SOPCe <bits<7> op> : Enc32 {
- bits<8> ssrc0;
- bits<8> ssrc1;
+ bits<8> src0;
+ bits<8> src1;
- let Inst{7-0} = ssrc0;
- let Inst{15-8} = ssrc1;
+ let Inst{7-0} = src0;
+ let Inst{15-8} = src1;
let Inst{22-16} = op;
let Inst{31-23} = 0x17e;
}
@@ -218,9 +244,7 @@ class SOPPe <bits<7> op> : Enc32 {
class SMRDe <bits<5> op, bits<1> imm> : Enc32 {
bits<7> sdst;
bits<7> sbase;
- bits<8> offset;
- let Inst{7-0} = offset;
let Inst{8} = imm;
let Inst{14-9} = sbase{6-1};
let Inst{21-15} = sdst;
@@ -228,6 +252,18 @@ class SMRDe <bits<5> op, bits<1> imm> : Enc32 {
let Inst{31-27} = 0x18; //encoding
}
+class SMRD_IMMe <bits<5> op> : SMRDe<op, 1> {
+ bits<8> offset;
+ let Inst{7-0} = offset;
+}
+
+class SMRD_SOFFe <bits<5> op> : SMRDe<op, 0> {
+ bits<8> soff;
+ let Inst{7-0} = soff;
+}
+
+
+
class SMRD_IMMe_ci <bits<5> op> : Enc64 {
bits<7> sdst;
bits<7> sbase;
@@ -348,19 +384,18 @@ class VOP2_MADKe <bits<6> op> : Enc64 {
bits<8> vdst;
bits<9> src0;
- bits<8> vsrc1;
- bits<32> src2;
+ bits<8> src1;
+ bits<32> imm;
let Inst{8-0} = src0;
- let Inst{16-9} = vsrc1;
+ let Inst{16-9} = src1;
let Inst{24-17} = vdst;
let Inst{30-25} = op;
let Inst{31} = 0x0; // encoding
- let Inst{63-32} = src2;
+ let Inst{63-32} = imm;
}
-class VOP3e <bits<9> op> : Enc64 {
- bits<8> vdst;
+class VOP3a <bits<9> op> : Enc64 {
bits<2> src0_modifiers;
bits<9> src0;
bits<2> src1_modifiers;
@@ -370,7 +405,6 @@ class VOP3e <bits<9> op> : Enc64 {
bits<1> clamp;
bits<2> omod;
- let Inst{7-0} = vdst;
let Inst{8} = src0_modifiers{1};
let Inst{9} = src1_modifiers{1};
let Inst{10} = src2_modifiers{1};
@@ -386,6 +420,20 @@ class VOP3e <bits<9> op> : Enc64 {
let Inst{63} = src2_modifiers{0};
}
+class VOP3e <bits<9> op> : VOP3a <op> {
+ bits<8> vdst;
+
+ let Inst{7-0} = vdst;
+}
+
+// Encoding used for VOPC instructions encoded as VOP3
+// Differs from VOP3e by destination name (sdst) as VOPC doesn't have vector dst
+class VOP3ce <bits<9> op> : VOP3a <op> {
+ bits<8> sdst;
+
+ let Inst{7-0} = sdst;
+}
+
class VOP3be <bits<9> op> : Enc64 {
bits<8> vdst;
bits<2> src0_modifiers;
@@ -412,10 +460,10 @@ class VOP3be <bits<9> op> : Enc64 {
class VOPCe <bits<8> op> : Enc32 {
bits<9> src0;
- bits<8> vsrc1;
+ bits<8> src1;
let Inst{8-0} = src0;
- let Inst{16-9} = vsrc1;
+ let Inst{16-9} = src1;
let Inst{24-17} = op;
let Inst{31-25} = 0x3e;
}
@@ -675,17 +723,17 @@ class FLAT <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let UseNamedOperandTable = 1;
let hasSideEffects = 0;
- let AsmMatchConverter = "cvtFlat";
let SchedRW = [WriteVMEM];
}
-class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
- InstSI <outs, ins, asm, pattern>, MIMGe <op> {
+class MIMG <dag outs, dag ins, string asm, list<dag> pattern> :
+ InstSI <outs, ins, asm, pattern> {
let VM_CNT = 1;
let EXP_CNT = 1;
let MIMG = 1;
let Uses = [EXEC];
+ let UseNamedOperandTable = 1;
let hasSideEffects = 0; // XXX ????
}
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index 1e10d25e8fb7..d171e21c8a4f 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -12,14 +12,15 @@
//
//===----------------------------------------------------------------------===//
-
#include "SIInstrInfo.h"
#include "AMDGPUTargetMachine.h"
+#include "GCNHazardRecognizer.h"
#include "SIDefines.h"
#include "SIMachineFunctionInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/IR/Function.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/MC/MCInstrDesc.h"
@@ -27,8 +28,8 @@
using namespace llvm;
-SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st)
- : AMDGPUInstrInfo(st), RI() {}
+SIInstrInfo::SIInstrInfo(const SISubtarget &ST)
+ : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
//===----------------------------------------------------------------------===//
// TargetInstrInfo callbacks
@@ -74,12 +75,12 @@ static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
}
-bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
+bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const {
// TODO: The generic check fails for VALU instructions that should be
// rematerializable due to implicit reads of exec. We really want all of the
// generic logic for this except for this.
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case AMDGPU::V_MOV_B32_e32:
case AMDGPU::V_MOV_B32_e64:
case AMDGPU::V_MOV_B64_PSEUDO:
@@ -201,18 +202,18 @@ static bool isStride64(unsigned Opc) {
}
}
-bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
+bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const {
- unsigned Opc = LdSt->getOpcode();
+ unsigned Opc = LdSt.getOpcode();
- if (isDS(*LdSt)) {
- const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
- AMDGPU::OpName::offset);
+ if (isDS(LdSt)) {
+ const MachineOperand *OffsetImm =
+ getNamedOperand(LdSt, AMDGPU::OpName::offset);
if (OffsetImm) {
// Normal, single offset LDS instruction.
- const MachineOperand *AddrReg = getNamedOperand(*LdSt,
- AMDGPU::OpName::addr);
+ const MachineOperand *AddrReg =
+ getNamedOperand(LdSt, AMDGPU::OpName::addr);
BaseReg = AddrReg->getReg();
Offset = OffsetImm->getImm();
@@ -222,10 +223,10 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
// The 2 offset instructions use offset0 and offset1 instead. We can treat
// these as a load with a single offset if the 2 offsets are consecutive. We
// will use this for some partially aligned loads.
- const MachineOperand *Offset0Imm = getNamedOperand(*LdSt,
- AMDGPU::OpName::offset0);
- const MachineOperand *Offset1Imm = getNamedOperand(*LdSt,
- AMDGPU::OpName::offset1);
+ const MachineOperand *Offset0Imm =
+ getNamedOperand(LdSt, AMDGPU::OpName::offset0);
+ const MachineOperand *Offset1Imm =
+ getNamedOperand(LdSt, AMDGPU::OpName::offset1);
uint8_t Offset0 = Offset0Imm->getImm();
uint8_t Offset1 = Offset1Imm->getImm();
@@ -235,19 +236,19 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
// to bytes of the individual reads.
unsigned EltSize;
- if (LdSt->mayLoad())
- EltSize = getOpRegClass(*LdSt, 0)->getSize() / 2;
+ if (LdSt.mayLoad())
+ EltSize = getOpRegClass(LdSt, 0)->getSize() / 2;
else {
- assert(LdSt->mayStore());
+ assert(LdSt.mayStore());
int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
- EltSize = getOpRegClass(*LdSt, Data0Idx)->getSize();
+ EltSize = getOpRegClass(LdSt, Data0Idx)->getSize();
}
if (isStride64(Opc))
EltSize *= 64;
- const MachineOperand *AddrReg = getNamedOperand(*LdSt,
- AMDGPU::OpName::addr);
+ const MachineOperand *AddrReg =
+ getNamedOperand(LdSt, AMDGPU::OpName::addr);
BaseReg = AddrReg->getReg();
Offset = EltSize * Offset0;
return true;
@@ -256,63 +257,91 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
return false;
}
- if (isMUBUF(*LdSt) || isMTBUF(*LdSt)) {
+ if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1)
return false;
- const MachineOperand *AddrReg = getNamedOperand(*LdSt,
- AMDGPU::OpName::vaddr);
+ const MachineOperand *AddrReg =
+ getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
if (!AddrReg)
return false;
- const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
- AMDGPU::OpName::offset);
+ const MachineOperand *OffsetImm =
+ getNamedOperand(LdSt, AMDGPU::OpName::offset);
BaseReg = AddrReg->getReg();
Offset = OffsetImm->getImm();
return true;
}
- if (isSMRD(*LdSt)) {
- const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
- AMDGPU::OpName::offset);
+ if (isSMRD(LdSt)) {
+ const MachineOperand *OffsetImm =
+ getNamedOperand(LdSt, AMDGPU::OpName::offset);
if (!OffsetImm)
return false;
- const MachineOperand *SBaseReg = getNamedOperand(*LdSt,
- AMDGPU::OpName::sbase);
+ const MachineOperand *SBaseReg =
+ getNamedOperand(LdSt, AMDGPU::OpName::sbase);
BaseReg = SBaseReg->getReg();
Offset = OffsetImm->getImm();
return true;
}
+ if (isFLAT(LdSt)) {
+ const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::addr);
+ BaseReg = AddrReg->getReg();
+ Offset = 0;
+ return true;
+ }
+
return false;
}
-bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
- MachineInstr *SecondLdSt,
- unsigned NumLoads) const {
- // TODO: This needs finer tuning
- if (NumLoads > 4)
+bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
+ MachineInstr &SecondLdSt,
+ unsigned NumLoads) const {
+ const MachineOperand *FirstDst = nullptr;
+ const MachineOperand *SecondDst = nullptr;
+
+ if (isDS(FirstLdSt) && isDS(SecondLdSt)) {
+ FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst);
+ SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst);
+ }
+
+ if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) {
+ FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst);
+ SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst);
+ }
+
+ if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) ||
+ (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt))) {
+ FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata);
+ SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata);
+ }
+
+ if (!FirstDst || !SecondDst)
return false;
- if (isDS(*FirstLdSt) && isDS(*SecondLdSt))
- return true;
+ // Try to limit clustering based on the total number of bytes loaded
+ // rather than the number of instructions. This is done to help reduce
+ // register pressure. The method used is somewhat inexact, though,
+ // because it assumes that all loads in the cluster will load the
+ // same number of bytes as FirstLdSt.
- if (isSMRD(*FirstLdSt) && isSMRD(*SecondLdSt))
- return true;
+ // The unit of this value is bytes.
+ // FIXME: This needs finer tuning.
+ unsigned LoadClusterThreshold = 16;
- if ((isMUBUF(*FirstLdSt) || isMTBUF(*FirstLdSt)) &&
- (isMUBUF(*SecondLdSt) || isMTBUF(*SecondLdSt)))
- return true;
+ const MachineRegisterInfo &MRI =
+ FirstLdSt.getParent()->getParent()->getRegInfo();
+ const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
- return false;
+ return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
}
-void
-SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
// If we are trying to copy to or from SCC, there is a bug somewhere else in
// the backend. While it may be theoretically possible to do this, it should
@@ -361,7 +390,6 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned Opcode;
ArrayRef<int16_t> SubIndices;
- bool Forward;
if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
@@ -445,10 +473,7 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
llvm_unreachable("Can't copy register!");
}
- if (RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg))
- Forward = true;
- else
- Forward = false;
+ bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
unsigned SubIdx;
@@ -463,10 +488,12 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Builder.addReg(RI.getSubReg(SrcReg, SubIdx));
if (Idx == SubIndices.size() - 1)
- Builder.addReg(SrcReg, RegState::Kill | RegState::Implicit);
+ Builder.addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
if (Idx == 0)
Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
+
+ Builder.addReg(SrcReg, RegState::Implicit);
}
}
@@ -525,6 +552,8 @@ static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
return AMDGPU::SI_SPILL_V32_SAVE;
case 8:
return AMDGPU::SI_SPILL_V64_SAVE;
+ case 12:
+ return AMDGPU::SI_SPILL_V96_SAVE;
case 16:
return AMDGPU::SI_SPILL_V128_SAVE;
case 32:
@@ -558,19 +587,25 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
if (RI.isSGPRClass(RC)) {
MFI->setHasSpilledSGPRs();
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) {
+ // m0 may not be allowed for readlane.
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
+ }
+
// We are only allowed to create one new instruction when spilling
// registers, so we need to use pseudo instruction for spilling
// SGPRs.
unsigned Opcode = getSGPRSpillSaveOpcode(RC->getSize());
BuildMI(MBB, MI, DL, get(Opcode))
- .addReg(SrcReg) // src
+ .addReg(SrcReg, getKillRegState(isKill)) // src
.addFrameIndex(FrameIndex) // frame_idx
.addMemOperand(MMO);
return;
}
- if (!ST.isVGPRSpillingEnabled(MFI)) {
+ if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
LLVMContext &Ctx = MF->getFunction()->getContext();
Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
" spill register");
@@ -585,10 +620,11 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize());
MFI->setHasSpilledVGPRs();
BuildMI(MBB, MI, DL, get(Opcode))
- .addReg(SrcReg) // src
+ .addReg(SrcReg, getKillRegState(isKill)) // src
.addFrameIndex(FrameIndex) // frame_idx
.addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
.addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
+ .addImm(0) // offset
.addMemOperand(MMO);
}
@@ -615,6 +651,8 @@ static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
return AMDGPU::SI_SPILL_V32_RESTORE;
case 8:
return AMDGPU::SI_SPILL_V64_RESTORE;
+ case 12:
+ return AMDGPU::SI_SPILL_V96_RESTORE;
case 16:
return AMDGPU::SI_SPILL_V128_RESTORE;
case 32:
@@ -648,6 +686,13 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
// FIXME: Maybe this should not include a memoperand because it will be
// lowered to non-memory instructions.
unsigned Opcode = getSGPRSpillRestoreOpcode(RC->getSize());
+
+ if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) {
+ // m0 may not be allowed for readlane.
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
+ }
+
BuildMI(MBB, MI, DL, get(Opcode), DestReg)
.addFrameIndex(FrameIndex) // frame_idx
.addMemOperand(MMO);
@@ -655,7 +700,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
return;
}
- if (!ST.isVGPRSpillingEnabled(MFI)) {
+ if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
LLVMContext &Ctx = MF->getFunction()->getContext();
Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
" restore register");
@@ -671,20 +716,18 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FrameIndex) // frame_idx
.addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
.addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
+ .addImm(0) // offset
.addMemOperand(MMO);
}
/// \param @Offset Offset in bytes of the FrameIndex being spilled
-unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- RegScavenger *RS, unsigned TmpReg,
- unsigned FrameOffset,
- unsigned Size) const {
+unsigned SIInstrInfo::calculateLDSSpillAddress(
+ MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg,
+ unsigned FrameOffset, unsigned Size) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
- const AMDGPUSubtarget &ST = MF->getSubtarget<AMDGPUSubtarget>();
- const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo*>(ST.getRegisterInfo());
+ const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
+ const SIRegisterInfo *TRI = ST.getRegisterInfo();
DebugLoc DL = MBB.findDebugLoc(MI);
unsigned WorkGroupSize = MFI->getMaximumWorkGroupSize(*MF);
unsigned WavefrontSize = ST.getWavefrontSize();
@@ -699,8 +742,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB,
if (TIDReg == AMDGPU::NoRegister)
return TIDReg;
-
- if (MFI->getShaderType() == ShaderType::COMPUTE &&
+ if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
WorkGroupSize > WavefrontSize) {
unsigned TIDIGXReg
@@ -716,7 +758,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB,
Entry.addLiveIn(Reg);
}
- RS->enterBasicBlock(&Entry);
+ RS->enterBasicBlock(Entry);
// FIXME: Can we scavenge an SReg_64 and access the subregs?
unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
@@ -773,8 +815,10 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB,
return TmpReg;
}
-void SIInstrInfo::insertWaitStates(MachineBasicBlock::iterator MI,
+void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
int Count) const {
+ DebugLoc DL = MBB.findDebugLoc(MI);
while (Count > 0) {
int Arg;
if (Count >= 8)
@@ -782,76 +826,87 @@ void SIInstrInfo::insertWaitStates(MachineBasicBlock::iterator MI,
else
Arg = Count - 1;
Count -= 8;
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP))
+ BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP))
.addImm(Arg);
}
}
-bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
- MachineBasicBlock &MBB = *MI->getParent();
+void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ insertWaitStates(MBB, MI, 1);
+}
+
+unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ default: return 1; // FIXME: Do wait states equal cycles?
+
+ case AMDGPU::S_NOP:
+ return MI.getOperand(0).getImm() + 1;
+ }
+}
+
+bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MBB.findDebugLoc(MI);
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
- case AMDGPU::SGPR_USE:
- // This is just a placeholder for register allocation.
- MI->eraseFromParent();
- break;
-
case AMDGPU::V_MOV_B64_PSEUDO: {
- unsigned Dst = MI->getOperand(0).getReg();
+ unsigned Dst = MI.getOperand(0).getReg();
unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
- const MachineOperand &SrcOp = MI->getOperand(1);
+ const MachineOperand &SrcOp = MI.getOperand(1);
// FIXME: Will this work for 64-bit floating point immediates?
assert(!SrcOp.isFPImm());
if (SrcOp.isImm()) {
APInt Imm(64, SrcOp.getImm());
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
- .addImm(Imm.getLoBits(32).getZExtValue())
- .addReg(Dst, RegState::Implicit);
+ .addImm(Imm.getLoBits(32).getZExtValue())
+ .addReg(Dst, RegState::Implicit | RegState::Define);
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
- .addImm(Imm.getHiBits(32).getZExtValue())
- .addReg(Dst, RegState::Implicit);
+ .addImm(Imm.getHiBits(32).getZExtValue())
+ .addReg(Dst, RegState::Implicit | RegState::Define);
} else {
assert(SrcOp.isReg());
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
- .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
- .addReg(Dst, RegState::Implicit);
+ .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
+ .addReg(Dst, RegState::Implicit | RegState::Define);
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
- .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
- .addReg(Dst, RegState::Implicit);
+ .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
+ .addReg(Dst, RegState::Implicit | RegState::Define);
}
- MI->eraseFromParent();
+ MI.eraseFromParent();
break;
}
case AMDGPU::V_CNDMASK_B64_PSEUDO: {
- unsigned Dst = MI->getOperand(0).getReg();
+ unsigned Dst = MI.getOperand(0).getReg();
unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
- unsigned Src0 = MI->getOperand(1).getReg();
- unsigned Src1 = MI->getOperand(2).getReg();
- const MachineOperand &SrcCond = MI->getOperand(3);
+ unsigned Src0 = MI.getOperand(1).getReg();
+ unsigned Src1 = MI.getOperand(2).getReg();
+ const MachineOperand &SrcCond = MI.getOperand(3);
BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
- .addReg(RI.getSubReg(Src0, AMDGPU::sub0))
- .addReg(RI.getSubReg(Src1, AMDGPU::sub0))
- .addOperand(SrcCond);
+ .addReg(RI.getSubReg(Src0, AMDGPU::sub0))
+ .addReg(RI.getSubReg(Src1, AMDGPU::sub0))
+ .addReg(SrcCond.getReg())
+ .addReg(Dst, RegState::Implicit | RegState::Define);
BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
- .addReg(RI.getSubReg(Src0, AMDGPU::sub1))
- .addReg(RI.getSubReg(Src1, AMDGPU::sub1))
- .addOperand(SrcCond);
- MI->eraseFromParent();
+ .addReg(RI.getSubReg(Src0, AMDGPU::sub1))
+ .addReg(RI.getSubReg(Src1, AMDGPU::sub1))
+ .addReg(SrcCond.getReg(), getKillRegState(SrcCond.isKill()))
+ .addReg(Dst, RegState::Implicit | RegState::Define);
+ MI.eraseFromParent();
break;
}
- case AMDGPU::SI_CONSTDATA_PTR: {
- const SIRegisterInfo *TRI =
- static_cast<const SIRegisterInfo *>(ST.getRegisterInfo());
+ case AMDGPU::SI_PC_ADD_REL_OFFSET: {
+ const SIRegisterInfo *TRI
+ = static_cast<const SIRegisterInfo *>(ST.getRegisterInfo());
MachineFunction &MF = *MBB.getParent();
- unsigned Reg = MI->getOperand(0).getReg();
+ unsigned Reg = MI.getOperand(0).getReg();
unsigned RegLo = TRI->getSubReg(Reg, AMDGPU::sub0);
unsigned RegHi = TRI->getSubReg(Reg, AMDGPU::sub1);
@@ -863,15 +918,15 @@ bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
// Add 32-bit offset from this instruction to the start of the
// constant data.
Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
- .addReg(RegLo)
- .addOperand(MI->getOperand(1)));
+ .addReg(RegLo)
+ .addOperand(MI.getOperand(1)));
Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
.addReg(RegHi)
.addImm(0));
llvm::finalizeBundle(MBB, Bundler.begin());
- MI->eraseFromParent();
+ MI.eraseFromParent();
break;
}
}
@@ -885,22 +940,21 @@ bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
/// non-commutable pair of operand indices OpIdx0 and OpIdx1.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
-MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr *MI,
- bool NewMI,
+MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx0,
unsigned OpIdx1) const {
- int CommutedOpcode = commuteOpcode(*MI);
+ int CommutedOpcode = commuteOpcode(MI);
if (CommutedOpcode == -1)
return nullptr;
- int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
- AMDGPU::OpName::src0);
- MachineOperand &Src0 = MI->getOperand(Src0Idx);
+ int Src0Idx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
+ MachineOperand &Src0 = MI.getOperand(Src0Idx);
if (!Src0.isReg())
return nullptr;
- int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
- AMDGPU::OpName::src1);
+ int Src1Idx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
if ((OpIdx0 != static_cast<unsigned>(Src0Idx) ||
OpIdx1 != static_cast<unsigned>(Src1Idx)) &&
@@ -908,33 +962,32 @@ MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr *MI,
OpIdx1 != static_cast<unsigned>(Src0Idx)))
return nullptr;
- MachineOperand &Src1 = MI->getOperand(Src1Idx);
+ MachineOperand &Src1 = MI.getOperand(Src1Idx);
-
- if (isVOP2(*MI)) {
- const MCInstrDesc &InstrDesc = MI->getDesc();
- // For VOP2 instructions, any operand type is valid to use for src0. Make
- // sure we can use the src1 as src0.
+ if (isVOP2(MI) || isVOPC(MI)) {
+ const MCInstrDesc &InstrDesc = MI.getDesc();
+ // For VOP2 and VOPC instructions, any operand type is valid to use for
+ // src0. Make sure we can use the src0 as src1.
//
// We could be stricter here and only allow commuting if there is a reason
// to do so. i.e. if both operands are VGPRs there is no real benefit,
// although MachineCSE attempts to find matches by commuting.
- const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
if (!isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0))
return nullptr;
}
+ MachineInstr *CommutedMI = &MI;
if (!Src1.isReg()) {
// Allow commuting instructions with Imm operands.
- if (NewMI || !Src1.isImm() ||
- (!isVOP2(*MI) && !isVOP3(*MI))) {
+ if (NewMI || !Src1.isImm() || (!isVOP2(MI) && !isVOP3(MI))) {
return nullptr;
}
// Be sure to copy the source modifiers to the right place.
- if (MachineOperand *Src0Mods
- = getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) {
- MachineOperand *Src1Mods
- = getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers);
+ if (MachineOperand *Src0Mods =
+ getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)) {
+ MachineOperand *Src1Mods =
+ getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
int Src0ModsVal = Src0Mods->getImm();
if (!Src1Mods && Src0ModsVal != 0)
@@ -959,26 +1012,26 @@ MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr *MI,
Src1.ChangeToRegister(Reg, false);
Src1.setSubReg(SubReg);
} else {
- MI = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx0, OpIdx1);
+ CommutedMI =
+ TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx0, OpIdx1);
}
- if (MI)
- MI->setDesc(get(CommutedOpcode));
+ if (CommutedMI)
+ CommutedMI->setDesc(get(CommutedOpcode));
- return MI;
+ return CommutedMI;
}
// This needs to be implemented because the source modifiers may be inserted
// between the true commutable operands, and the base
// TargetInstrInfo::commuteInstruction uses it.
-bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI,
- unsigned &SrcOpIdx0,
+bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0,
unsigned &SrcOpIdx1) const {
- const MCInstrDesc &MCID = MI->getDesc();
+ const MCInstrDesc &MCID = MI.getDesc();
if (!MCID.isCommutable())
return false;
- unsigned Opc = MI->getOpcode();
+ unsigned Opc = MI.getOpcode();
int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
if (Src0Idx == -1)
return false;
@@ -986,24 +1039,24 @@ bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI,
// FIXME: Workaround TargetInstrInfo::commuteInstruction asserting on
// immediate. Also, immediate src0 operand is not handled in
// SIInstrInfo::commuteInstruction();
- if (!MI->getOperand(Src0Idx).isReg())
+ if (!MI.getOperand(Src0Idx).isReg())
return false;
int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
if (Src1Idx == -1)
return false;
- MachineOperand &Src1 = MI->getOperand(Src1Idx);
+ MachineOperand &Src1 = MI.getOperand(Src1Idx);
if (Src1.isImm()) {
// SIInstrInfo::commuteInstruction() does support commuting the immediate
// operand src1 in 2 and 3 operand instructions.
- if (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))
+ if (!isVOP2(MI.getOpcode()) && !isVOP3(MI.getOpcode()))
return false;
} else if (Src1.isReg()) {
// If any source modifiers are set, the generic instruction commuting won't
// understand how to copy the source modifiers.
- if (hasModifiersSet(*MI, AMDGPU::OpName::src0_modifiers) ||
- hasModifiersSet(*MI, AMDGPU::OpName::src1_modifiers))
+ if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
+ hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))
return false;
} else
return false;
@@ -1011,23 +1064,135 @@ bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI,
return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
}
-MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned DstReg,
- unsigned SrcReg) const {
- return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
- DstReg) .addReg(SrcReg);
+unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
+ switch (Cond) {
+ case SIInstrInfo::SCC_TRUE:
+ return AMDGPU::S_CBRANCH_SCC1;
+ case SIInstrInfo::SCC_FALSE:
+ return AMDGPU::S_CBRANCH_SCC0;
+ case SIInstrInfo::VCCNZ:
+ return AMDGPU::S_CBRANCH_VCCNZ;
+ case SIInstrInfo::VCCZ:
+ return AMDGPU::S_CBRANCH_VCCZ;
+ case SIInstrInfo::EXECNZ:
+ return AMDGPU::S_CBRANCH_EXECNZ;
+ case SIInstrInfo::EXECZ:
+ return AMDGPU::S_CBRANCH_EXECZ;
+ default:
+ llvm_unreachable("invalid branch predicate");
+ }
+}
+
+SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
+ switch (Opcode) {
+ case AMDGPU::S_CBRANCH_SCC0:
+ return SCC_FALSE;
+ case AMDGPU::S_CBRANCH_SCC1:
+ return SCC_TRUE;
+ case AMDGPU::S_CBRANCH_VCCNZ:
+ return VCCNZ;
+ case AMDGPU::S_CBRANCH_VCCZ:
+ return VCCZ;
+ case AMDGPU::S_CBRANCH_EXECNZ:
+ return EXECNZ;
+ case AMDGPU::S_CBRANCH_EXECZ:
+ return EXECZ;
+ default:
+ return INVALID_BR;
+ }
}
-bool SIInstrInfo::isMov(unsigned Opcode) const {
- switch(Opcode) {
- default: return false;
- case AMDGPU::S_MOV_B32:
- case AMDGPU::S_MOV_B64:
- case AMDGPU::V_MOV_B32_e32:
- case AMDGPU::V_MOV_B32_e64:
+bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ MachineBasicBlock::iterator I = MBB.getFirstTerminator();
+
+ if (I == MBB.end())
+ return false;
+
+ if (I->getOpcode() == AMDGPU::S_BRANCH) {
+ // Unconditional Branch
+ TBB = I->getOperand(0).getMBB();
+ return false;
+ }
+
+ BranchPredicate Pred = getBranchPredicate(I->getOpcode());
+ if (Pred == INVALID_BR)
return true;
+
+ MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(Pred));
+
+ ++I;
+
+ if (I == MBB.end()) {
+ // Conditional branch followed by fall-through.
+ TBB = CondBB;
+ return false;
+ }
+
+ if (I->getOpcode() == AMDGPU::S_BRANCH) {
+ TBB = CondBB;
+ FBB = I->getOperand(0).getMBB();
+ return false;
+ }
+
+ return true;
+}
+
+unsigned SIInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator I = MBB.getFirstTerminator();
+
+ unsigned Count = 0;
+ while (I != MBB.end()) {
+ MachineBasicBlock::iterator Next = std::next(I);
+ I->eraseFromParent();
+ ++Count;
+ I = Next;
+ }
+
+ return Count;
+}
+
+unsigned SIInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
+
+ if (!FBB && Cond.empty()) {
+ BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
+ .addMBB(TBB);
+ return 1;
+ }
+
+ assert(TBB && Cond[0].isImm());
+
+ unsigned Opcode
+ = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
+
+ if (!FBB) {
+ BuildMI(&MBB, DL, get(Opcode))
+ .addMBB(TBB);
+ return 1;
}
+
+ assert(TBB && FBB);
+
+ BuildMI(&MBB, DL, get(Opcode))
+ .addMBB(TBB);
+ BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
+ .addMBB(FBB);
+
+ return 2;
+}
+
+bool SIInstrInfo::ReverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ assert(Cond.size() == 1);
+ Cond[0].setImm(-Cond[0].getImm());
+ return false;
}
static void removeModOperands(MachineInstr &MI) {
@@ -1044,81 +1209,76 @@ static void removeModOperands(MachineInstr &MI) {
MI.RemoveOperand(Src0ModIdx);
}
-bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+// TODO: Maybe this should be removed this and custom fold everything in
+// SIFoldOperands?
+bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const {
if (!MRI->hasOneNonDBGUse(Reg))
return false;
- unsigned Opc = UseMI->getOpcode();
+ unsigned Opc = UseMI.getOpcode();
if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64) {
// Don't fold if we are using source modifiers. The new VOP2 instructions
// don't have them.
- if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) ||
- hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) ||
- hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) {
+ if (hasModifiersSet(UseMI, AMDGPU::OpName::src0_modifiers) ||
+ hasModifiersSet(UseMI, AMDGPU::OpName::src1_modifiers) ||
+ hasModifiersSet(UseMI, AMDGPU::OpName::src2_modifiers)) {
return false;
}
- MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0);
- MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1);
- MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2);
+ const MachineOperand &ImmOp = DefMI.getOperand(1);
+
+ // If this is a free constant, there's no reason to do this.
+ // TODO: We could fold this here instead of letting SIFoldOperands do it
+ // later.
+ if (isInlineConstant(ImmOp, 4))
+ return false;
+
+ MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
+ MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
+ MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
// Multiplied part is the constant: Use v_madmk_f32
// We should only expect these to be on src0 due to canonicalizations.
if (Src0->isReg() && Src0->getReg() == Reg) {
- if (!Src1->isReg() ||
- (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
+ if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
return false;
- if (!Src2->isReg() ||
- (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))))
+ if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
return false;
- // We need to do some weird looking operand shuffling since the madmk
- // operands are out of the normal expected order with the multiplied
- // constant as the last operand.
- //
- // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1
- // src0 -> src2 K
- // src1 -> src0
- // src2 -> src1
+ // We need to swap operands 0 and 1 since madmk constant is at operand 1.
- const int64_t Imm = DefMI->getOperand(1).getImm();
+ const int64_t Imm = DefMI.getOperand(1).getImm();
// FIXME: This would be a lot easier if we could return a new instruction
// instead of having to modify in place.
// Remove these first since they are at the end.
- UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc,
- AMDGPU::OpName::omod));
- UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc,
- AMDGPU::OpName::clamp));
+ UseMI.RemoveOperand(
+ AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
+ UseMI.RemoveOperand(
+ AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
unsigned Src1Reg = Src1->getReg();
unsigned Src1SubReg = Src1->getSubReg();
- unsigned Src2Reg = Src2->getReg();
- unsigned Src2SubReg = Src2->getSubReg();
Src0->setReg(Src1Reg);
Src0->setSubReg(Src1SubReg);
Src0->setIsKill(Src1->isKill());
- Src1->setReg(Src2Reg);
- Src1->setSubReg(Src2SubReg);
- Src1->setIsKill(Src2->isKill());
-
if (Opc == AMDGPU::V_MAC_F32_e64) {
- UseMI->untieRegOperand(
- AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
+ UseMI.untieRegOperand(
+ AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
}
- Src2->ChangeToImmediate(Imm);
+ Src1->ChangeToImmediate(Imm);
- removeModOperands(*UseMI);
- UseMI->setDesc(get(AMDGPU::V_MADMK_F32));
+ removeModOperands(UseMI);
+ UseMI.setDesc(get(AMDGPU::V_MADMK_F32));
bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
if (DeleteDef)
- DefMI->eraseFromParent();
+ DefMI.eraseFromParent();
return true;
}
@@ -1131,36 +1291,35 @@ bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
(Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
return false;
- if (!Src1->isReg() ||
- (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
+ if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
return false;
- const int64_t Imm = DefMI->getOperand(1).getImm();
+ const int64_t Imm = DefMI.getOperand(1).getImm();
// FIXME: This would be a lot easier if we could return a new instruction
// instead of having to modify in place.
// Remove these first since they are at the end.
- UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc,
- AMDGPU::OpName::omod));
- UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc,
- AMDGPU::OpName::clamp));
+ UseMI.RemoveOperand(
+ AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
+ UseMI.RemoveOperand(
+ AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
if (Opc == AMDGPU::V_MAC_F32_e64) {
- UseMI->untieRegOperand(
- AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
+ UseMI.untieRegOperand(
+ AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
}
// ChangingToImmediate adds Src2 back to the instruction.
Src2->ChangeToImmediate(Imm);
// These come before src2.
- removeModOperands(*UseMI);
- UseMI->setDesc(get(AMDGPU::V_MADAK_F32));
+ removeModOperands(UseMI);
+ UseMI.setDesc(get(AMDGPU::V_MADAK_F32));
bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
if (DeleteDef)
- DefMI->eraseFromParent();
+ DefMI.eraseFromParent();
return true;
}
@@ -1177,17 +1336,20 @@ static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
return LowOffset + LowWidth <= HighOffset;
}
-bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
- MachineInstr *MIb) const {
- unsigned BaseReg0, Offset0;
- unsigned BaseReg1, Offset1;
+bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
+ MachineInstr &MIb) const {
+ unsigned BaseReg0, BaseReg1;
+ int64_t Offset0, Offset1;
if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
- assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() &&
- "read2 / write2 not expected here yet");
- unsigned Width0 = (*MIa->memoperands_begin())->getSize();
- unsigned Width1 = (*MIb->memoperands_begin())->getSize();
+
+ if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
+ // FIXME: Handle ds_read2 / ds_write2.
+ return false;
+ }
+ unsigned Width0 = (*MIa.memoperands_begin())->getSize();
+ unsigned Width1 = (*MIb.memoperands_begin())->getSize();
if (BaseReg0 == BaseReg1 &&
offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
return true;
@@ -1197,19 +1359,19 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
return false;
}
-bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
- MachineInstr *MIb,
+bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
+ MachineInstr &MIb,
AliasAnalysis *AA) const {
- assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
+ assert((MIa.mayLoad() || MIa.mayStore()) &&
"MIa must load from or modify a memory location");
- assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
+ assert((MIb.mayLoad() || MIb.mayStore()) &&
"MIb must load from or modify a memory location");
- if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects())
+ if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
return false;
// XXX - Can we relax this between address spaces?
- if (MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
+ if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
return false;
// TODO: Should we check the address space from the MachineMemOperand? That
@@ -1217,29 +1379,29 @@ bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
// underlying address space, even if it was lowered to a different one,
// e.g. private accesses lowered to use MUBUF instructions on a scratch
// buffer.
- if (isDS(*MIa)) {
- if (isDS(*MIb))
+ if (isDS(MIa)) {
+ if (isDS(MIb))
return checkInstOffsetsDoNotOverlap(MIa, MIb);
- return !isFLAT(*MIb);
+ return !isFLAT(MIb);
}
- if (isMUBUF(*MIa) || isMTBUF(*MIa)) {
- if (isMUBUF(*MIb) || isMTBUF(*MIb))
+ if (isMUBUF(MIa) || isMTBUF(MIa)) {
+ if (isMUBUF(MIb) || isMTBUF(MIb))
return checkInstOffsetsDoNotOverlap(MIa, MIb);
- return !isFLAT(*MIb) && !isSMRD(*MIb);
+ return !isFLAT(MIb) && !isSMRD(MIb);
}
- if (isSMRD(*MIa)) {
- if (isSMRD(*MIb))
+ if (isSMRD(MIa)) {
+ if (isSMRD(MIb))
return checkInstOffsetsDoNotOverlap(MIa, MIb);
- return !isFLAT(*MIb) && !isMUBUF(*MIa) && !isMTBUF(*MIa);
+ return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa);
}
- if (isFLAT(*MIa)) {
- if (isFLAT(*MIb))
+ if (isFLAT(MIa)) {
+ if (isFLAT(MIb))
return checkInstOffsetsDoNotOverlap(MIa, MIb);
return false;
@@ -1249,35 +1411,49 @@ bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
}
MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
- MachineBasicBlock::iterator &MI,
- LiveVariables *LV) const {
-
- switch (MI->getOpcode()) {
- default: return nullptr;
- case AMDGPU::V_MAC_F32_e64: break;
- case AMDGPU::V_MAC_F32_e32: {
- const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0);
- if (Src0->isImm() && !isInlineConstant(*Src0, 4))
- return nullptr;
- break;
- }
+ MachineInstr &MI,
+ LiveVariables *LV) const {
+
+ switch (MI.getOpcode()) {
+ default:
+ return nullptr;
+ case AMDGPU::V_MAC_F32_e64:
+ break;
+ case AMDGPU::V_MAC_F32_e32: {
+ const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
+ if (Src0->isImm() && !isInlineConstant(*Src0, 4))
+ return nullptr;
+ break;
+ }
}
- const MachineOperand *Dst = getNamedOperand(*MI, AMDGPU::OpName::dst);
- const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0);
- const MachineOperand *Src1 = getNamedOperand(*MI, AMDGPU::OpName::src1);
- const MachineOperand *Src2 = getNamedOperand(*MI, AMDGPU::OpName::src2);
+ const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
+ const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
+ const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
+ const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
- return BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_MAD_F32))
- .addOperand(*Dst)
- .addImm(0) // Src0 mods
- .addOperand(*Src0)
- .addImm(0) // Src1 mods
- .addOperand(*Src1)
- .addImm(0) // Src mods
- .addOperand(*Src2)
- .addImm(0) // clamp
- .addImm(0); // omod
+ return BuildMI(*MBB, MI, MI.getDebugLoc(), get(AMDGPU::V_MAD_F32))
+ .addOperand(*Dst)
+ .addImm(0) // Src0 mods
+ .addOperand(*Src0)
+ .addImm(0) // Src1 mods
+ .addOperand(*Src1)
+ .addImm(0) // Src mods
+ .addOperand(*Src2)
+ .addImm(0) // clamp
+ .addImm(0); // omod
+}
+
+bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const {
+ // XXX - Do we want the SP check in the base implementation?
+
+ // Target-independent instructions do not have an implicit-use of EXEC, even
+ // when they operate on VGPRs. Treating EXEC modifications as scheduling
+ // boundaries prevents incorrect movements of such instructions.
+ return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) ||
+ MI.modifiesRegister(AMDGPU::EXEC, &RI);
}
bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
@@ -1355,9 +1531,9 @@ static bool compareMachineOp(const MachineOperand &Op0,
}
}
-bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
- const MachineOperand &MO) const {
- const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo];
+bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
+ const MachineOperand &MO) const {
+ const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo];
assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
@@ -1418,14 +1594,10 @@ bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
return true;
// SGPRs use the constant bus
- if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
- (!MO.isImplicit() &&
- (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
- AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
- return true;
- }
-
- return false;
+ return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 ||
+ (!MO.isImplicit() &&
+ (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
+ AMDGPU::SGPR_64RegClass.contains(MO.getReg()))));
}
static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
@@ -1448,10 +1620,33 @@ static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
return AMDGPU::NoRegister;
}
-bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
+static bool shouldReadExec(const MachineInstr &MI) {
+ if (SIInstrInfo::isVALU(MI)) {
+ switch (MI.getOpcode()) {
+ case AMDGPU::V_READLANE_B32:
+ case AMDGPU::V_READLANE_B32_si:
+ case AMDGPU::V_READLANE_B32_vi:
+ case AMDGPU::V_WRITELANE_B32:
+ case AMDGPU::V_WRITELANE_B32_si:
+ case AMDGPU::V_WRITELANE_B32_vi:
+ return false;
+ }
+
+ return true;
+ }
+
+ if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
+ SIInstrInfo::isSALU(MI) ||
+ SIInstrInfo::isSMRD(MI))
+ return false;
+
+ return true;
+}
+
+bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const {
- uint16_t Opcode = MI->getOpcode();
- const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ uint16_t Opcode = MI.getOpcode();
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
@@ -1459,14 +1654,14 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
// Make sure the number of operands is correct.
const MCInstrDesc &Desc = get(Opcode);
if (!Desc.isVariadic() &&
- Desc.getNumOperands() != MI->getNumExplicitOperands()) {
- ErrInfo = "Instruction has wrong number of operands.";
- return false;
+ Desc.getNumOperands() != MI.getNumExplicitOperands()) {
+ ErrInfo = "Instruction has wrong number of operands.";
+ return false;
}
// Make sure the register classes are correct.
for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
- if (MI->getOperand(i).isFPImm()) {
+ if (MI.getOperand(i).isFPImm()) {
ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
"all fp values to integers.";
return false;
@@ -1476,7 +1671,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
switch (Desc.OpInfo[i].OperandType) {
case MCOI::OPERAND_REGISTER:
- if (MI->getOperand(i).isImm()) {
+ if (MI.getOperand(i).isImm()) {
ErrInfo = "Illegal immediate value for operand.";
return false;
}
@@ -1484,17 +1679,18 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
case AMDGPU::OPERAND_REG_IMM32:
break;
case AMDGPU::OPERAND_REG_INLINE_C:
- if (isLiteralConstant(MI->getOperand(i),
+ if (isLiteralConstant(MI.getOperand(i),
RI.getRegClass(RegClass)->getSize())) {
ErrInfo = "Illegal immediate value for operand.";
return false;
}
break;
case MCOI::OPERAND_IMMEDIATE:
+ case AMDGPU::OPERAND_KIMM32:
// Check if this operand is an immediate.
// FrameIndex operands will be replaced by immediates, so they are
// allowed.
- if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFI()) {
+ if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
ErrInfo = "Expected immediate, but got non-immediate";
return false;
}
@@ -1503,12 +1699,13 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
continue;
}
- if (!MI->getOperand(i).isReg())
+ if (!MI.getOperand(i).isReg())
continue;
if (RegClass != -1) {
- unsigned Reg = MI->getOperand(i).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg))
+ unsigned Reg = MI.getOperand(i).getReg();
+ if (Reg == AMDGPU::NoRegister ||
+ TargetRegisterInfo::isVirtualRegister(Reg))
continue;
const TargetRegisterClass *RC = RI.getRegClass(RegClass);
@@ -1519,23 +1716,26 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
}
}
-
// Verify VOP*
- if (isVOP1(*MI) || isVOP2(*MI) || isVOP3(*MI) || isVOPC(*MI)) {
+ if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) {
// Only look at the true operands. Only a real operand can use the constant
// bus, and we don't want to check pseudo-operands like the source modifier
// flags.
const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
unsigned ConstantBusCount = 0;
- unsigned SGPRUsed = findImplicitSGPRRead(*MI);
+
+ if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
+ ++ConstantBusCount;
+
+ unsigned SGPRUsed = findImplicitSGPRRead(MI);
if (SGPRUsed != AMDGPU::NoRegister)
++ConstantBusCount;
for (int OpIdx : OpIndices) {
if (OpIdx == -1)
break;
- const MachineOperand &MO = MI->getOperand(OpIdx);
+ const MachineOperand &MO = MI.getOperand(OpIdx);
if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) {
if (MO.isReg()) {
if (MO.getReg() != SGPRUsed)
@@ -1555,9 +1755,9 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
// Verify misc. restrictions on specific instructions.
if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
- const MachineOperand &Src0 = MI->getOperand(Src0Idx);
- const MachineOperand &Src1 = MI->getOperand(Src1Idx);
- const MachineOperand &Src2 = MI->getOperand(Src2Idx);
+ const MachineOperand &Src0 = MI.getOperand(Src0Idx);
+ const MachineOperand &Src1 = MI.getOperand(Src1Idx);
+ const MachineOperand &Src2 = MI.getOperand(Src2Idx);
if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
if (!compareMachineOp(Src0, Src1) &&
!compareMachineOp(Src0, Src2)) {
@@ -1569,9 +1769,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
// Make sure we aren't losing exec uses in the td files. This mostly requires
// being careful when using let Uses to try to add other use registers.
- if (!isGenericOpcode(Opcode) && !isSALU(Opcode) && !isSMRD(Opcode)) {
- const MachineOperand *Exec = MI->findRegisterUseOperand(AMDGPU::EXEC);
- if (!Exec || !Exec->isImplicit()) {
+ if (shouldReadExec(MI)) {
+ if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
ErrInfo = "VALU instruction does not implicitly read exec mask";
return false;
}
@@ -1624,22 +1823,18 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
- case AMDGPU::S_LOAD_DWORD_IMM:
- case AMDGPU::S_LOAD_DWORD_SGPR:
- case AMDGPU::S_LOAD_DWORD_IMM_ci:
- return AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
- case AMDGPU::S_LOAD_DWORDX2_IMM:
- case AMDGPU::S_LOAD_DWORDX2_SGPR:
- case AMDGPU::S_LOAD_DWORDX2_IMM_ci:
- return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
- case AMDGPU::S_LOAD_DWORDX4_IMM:
- case AMDGPU::S_LOAD_DWORDX4_SGPR:
- case AMDGPU::S_LOAD_DWORDX4_IMM_ci:
- return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
+ case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
+ case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
+ case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
+ case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
+ case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
+ case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
+ case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
+ case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
}
}
@@ -1676,12 +1871,12 @@ bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
}
}
-void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
+void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
MachineBasicBlock::iterator I = MI;
- MachineBasicBlock *MBB = MI->getParent();
- MachineOperand &MO = MI->getOperand(OpIdx);
+ MachineBasicBlock *MBB = MI.getParent();
+ MachineOperand &MO = MI.getOperand(OpIdx);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
+ unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
const TargetRegisterClass *RC = RI.getRegClass(RCID);
unsigned Opcode = AMDGPU::V_MOV_B32_e32;
if (MO.isReg())
@@ -1689,7 +1884,6 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
else if (RI.isSGPRClass(RC))
Opcode = AMDGPU::S_MOV_B32;
-
const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
VRC = &AMDGPU::VReg_64RegClass;
@@ -1698,8 +1892,7 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
unsigned Reg = MRI.createVirtualRegister(VRC);
DebugLoc DL = MBB->findDebugLoc(I);
- BuildMI(*MI->getParent(), I, DL, get(Opcode), Reg)
- .addOperand(MO);
+ BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).addOperand(MO);
MO.ChangeToRegister(Reg, false);
}
@@ -1758,11 +1951,11 @@ MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
}
// Change the order of operands from (0, 1, 2) to (0, 2, 1)
-void SIInstrInfo::swapOperands(MachineBasicBlock::iterator Inst) const {
- assert(Inst->getNumExplicitOperands() == 3);
- MachineOperand Op1 = Inst->getOperand(1);
- Inst->RemoveOperand(1);
- Inst->addOperand(Op1);
+void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
+ assert(Inst.getNumExplicitOperands() == 3);
+ MachineOperand Op1 = Inst.getOperand(1);
+ Inst.RemoveOperand(1);
+ Inst.addOperand(Op1);
}
bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
@@ -1804,26 +1997,32 @@ bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
return true;
}
-bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
+bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand *MO) const {
- const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
- const MCInstrDesc &InstDesc = get(MI->getOpcode());
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ const MCInstrDesc &InstDesc = MI.getDesc();
const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
const TargetRegisterClass *DefinedRC =
OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
if (!MO)
- MO = &MI->getOperand(OpIdx);
+ MO = &MI.getOperand(OpIdx);
+
+ if (isVALU(MI) && usesConstantBus(MRI, *MO, DefinedRC->getSize())) {
+
+ RegSubRegPair SGPRUsed;
+ if (MO->isReg())
+ SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg());
- if (isVALU(*MI) &&
- usesConstantBus(MRI, *MO, DefinedRC->getSize())) {
- unsigned SGPRUsed =
- MO->isReg() ? MO->getReg() : (unsigned)AMDGPU::NoRegister;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
if (i == OpIdx)
continue;
- const MachineOperand &Op = MI->getOperand(i);
- if (Op.isReg() && Op.getReg() != SGPRUsed &&
- usesConstantBus(MRI, Op, getOpSize(*MI, i))) {
+ const MachineOperand &Op = MI.getOperand(i);
+ if (Op.isReg()) {
+ if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) &&
+ usesConstantBus(MRI, Op, getOpSize(MI, i))) {
+ return false;
+ }
+ } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
return false;
}
}
@@ -1834,7 +2033,6 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
return isLegalRegOperand(MRI, OpInfo, *MO);
}
-
// Handle non-register types that are treated like immediates.
assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
@@ -1847,12 +2045,12 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
}
void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
- MachineInstr *MI) const {
- unsigned Opc = MI->getOpcode();
+ MachineInstr &MI) const {
+ unsigned Opc = MI.getOpcode();
const MCInstrDesc &InstrDesc = get(Opc);
int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
- MachineOperand &Src1 = MI->getOperand(Src1Idx);
+ MachineOperand &Src1 = MI.getOperand(Src1Idx);
// If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
// we need to only have one constant bus use.
@@ -1860,10 +2058,10 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
// Note we do not need to worry about literal constants here. They are
// disabled for the operand type for instructions because they will always
// violate the one constant bus use rule.
- bool HasImplicitSGPR = findImplicitSGPRRead(*MI) != AMDGPU::NoRegister;
+ bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
if (HasImplicitSGPR) {
int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
- MachineOperand &Src0 = MI->getOperand(Src0Idx);
+ MachineOperand &Src0 = MI.getOperand(Src0Idx);
if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg()))
legalizeOpWithMove(MI, Src0Idx);
@@ -1878,13 +2076,13 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
// commute if it is possible. We only want to commute here if it improves
// legality. This can be called a fairly large number of times so don't waste
// compile time pointlessly swapping and checking legality again.
- if (HasImplicitSGPR || !MI->isCommutable()) {
+ if (HasImplicitSGPR || !MI.isCommutable()) {
legalizeOpWithMove(MI, Src1Idx);
return;
}
int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
- MachineOperand &Src0 = MI->getOperand(Src0Idx);
+ MachineOperand &Src0 = MI.getOperand(Src0Idx);
// If src0 can be used as src1, commuting will make the operands legal.
// Otherwise we have to give up and insert a move.
@@ -1897,13 +2095,13 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
return;
}
- int CommutedOpc = commuteOpcode(*MI);
+ int CommutedOpc = commuteOpcode(MI);
if (CommutedOpc == -1) {
legalizeOpWithMove(MI, Src1Idx);
return;
}
- MI->setDesc(get(CommutedOpc));
+ MI.setDesc(get(CommutedOpc));
unsigned Src0Reg = Src0.getReg();
unsigned Src0SubReg = Src0.getSubReg();
@@ -1925,10 +2123,9 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
// operand, and since literal constants are not allowed and should never be
// seen, we only need to worry about inserting copies if we use multiple SGPR
// operands.
-void SIInstrInfo::legalizeOperandsVOP3(
- MachineRegisterInfo &MRI,
- MachineInstr *MI) const {
- unsigned Opc = MI->getOpcode();
+void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
+ MachineInstr &MI) const {
+ unsigned Opc = MI.getOpcode();
int VOP3Idx[3] = {
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
@@ -1943,7 +2140,7 @@ void SIInstrInfo::legalizeOperandsVOP3(
int Idx = VOP3Idx[i];
if (Idx == -1)
break;
- MachineOperand &MO = MI->getOperand(Idx);
+ MachineOperand &MO = MI.getOperand(Idx);
// We should never see a VOP3 instruction with an illegal immediate operand.
if (!MO.isReg())
@@ -1964,32 +2161,78 @@ void SIInstrInfo::legalizeOperandsVOP3(
}
}
-void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
- MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
+ MachineRegisterInfo &MRI) const {
+ const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
+ const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
+ unsigned DstReg = MRI.createVirtualRegister(SRC);
+ unsigned SubRegs = VRC->getSize() / 4;
+
+ SmallVector<unsigned, 8> SRegs;
+ for (unsigned i = 0; i < SubRegs; ++i) {
+ unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
+ BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
+ get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
+ .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
+ SRegs.push_back(SGPR);
+ }
+
+ MachineInstrBuilder MIB =
+ BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
+ get(AMDGPU::REG_SEQUENCE), DstReg);
+ for (unsigned i = 0; i < SubRegs; ++i) {
+ MIB.addReg(SRegs[i]);
+ MIB.addImm(RI.getSubRegFromChannel(i));
+ }
+ return DstReg;
+}
+
+void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
+ MachineInstr &MI) const {
+
+ // If the pointer is store in VGPRs, then we need to move them to
+ // SGPRs using v_readfirstlane. This is safe because we only select
+ // loads with uniform pointers to SMRD instruction so we know the
+ // pointer value is uniform.
+ MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
+ if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
+ unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
+ SBase->setReg(SGPR);
+ }
+}
+
+void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
+ MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
// Legalize VOP2
- if (isVOP2(*MI)) {
+ if (isVOP2(MI) || isVOPC(MI)) {
legalizeOperandsVOP2(MRI, MI);
return;
}
// Legalize VOP3
- if (isVOP3(*MI)) {
+ if (isVOP3(MI)) {
legalizeOperandsVOP3(MRI, MI);
return;
}
+ // Legalize SMRD
+ if (isSMRD(MI)) {
+ legalizeOperandsSMRD(MRI, MI);
+ return;
+ }
+
// Legalize REG_SEQUENCE and PHI
// The register class of the operands much be the same type as the register
// class of the output.
- if (MI->getOpcode() == AMDGPU::PHI) {
+ if (MI.getOpcode() == AMDGPU::PHI) {
const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
- for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
- if (!MI->getOperand(i).isReg() ||
- !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
+ for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
+ if (!MI.getOperand(i).isReg() ||
+ !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
continue;
const TargetRegisterClass *OpRC =
- MRI.getRegClass(MI->getOperand(i).getReg());
+ MRI.getRegClass(MI.getOperand(i).getReg());
if (RI.hasVGPRs(OpRC)) {
VRC = OpRC;
} else {
@@ -2000,7 +2243,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
// If any of the operands are VGPR registers, then they all most be
// otherwise we will create illegal VGPR->SGPR copies when legalizing
// them.
- if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
+ if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
if (!VRC) {
assert(SRC);
VRC = RI.getEquivalentVGPRClass(SRC);
@@ -2011,18 +2254,18 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
}
// Update all the operands so they have the same type.
- for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) {
- MachineOperand &Op = MI->getOperand(I);
+ for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
+ MachineOperand &Op = MI.getOperand(I);
if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
continue;
unsigned DstReg = MRI.createVirtualRegister(RC);
// MI is a PHI instruction.
- MachineBasicBlock *InsertBB = MI->getOperand(I + 1).getMBB();
+ MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
- BuildMI(*InsertBB, Insert, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg)
- .addOperand(Op);
+ BuildMI(*InsertBB, Insert, MI.getDebugLoc(), get(AMDGPU::COPY), DstReg)
+ .addOperand(Op);
Op.setReg(DstReg);
}
}
@@ -2030,15 +2273,15 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
// REG_SEQUENCE doesn't really require operand legalization, but if one has a
// VGPR dest type and SGPR sources, insert copies so all operands are
// VGPRs. This seems to help operand folding / the register coalescer.
- if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
- MachineBasicBlock *MBB = MI->getParent();
- const TargetRegisterClass *DstRC = getOpRegClass(*MI, 0);
+ if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
+ MachineBasicBlock *MBB = MI.getParent();
+ const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
if (RI.hasVGPRs(DstRC)) {
// Update all the operands so they are VGPR register classes. These may
// not be the same register class because REG_SEQUENCE supports mixing
// subregister index types e.g. sub0_sub1 + sub2 + sub3
- for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) {
- MachineOperand &Op = MI->getOperand(I);
+ for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
+ MachineOperand &Op = MI.getOperand(I);
if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
continue;
@@ -2049,8 +2292,8 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
unsigned DstReg = MRI.createVirtualRegister(VRC);
- BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg)
- .addOperand(Op);
+ BuildMI(*MBB, MI, MI.getDebugLoc(), get(AMDGPU::COPY), DstReg)
+ .addOperand(Op);
Op.setReg(DstReg);
Op.setIsKill();
@@ -2062,17 +2305,33 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
// Legalize INSERT_SUBREG
// src0 must have the same register class as dst
- if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) {
- unsigned Dst = MI->getOperand(0).getReg();
- unsigned Src0 = MI->getOperand(1).getReg();
+ if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
+ unsigned Dst = MI.getOperand(0).getReg();
+ unsigned Src0 = MI.getOperand(1).getReg();
const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
if (DstRC != Src0RC) {
- MachineBasicBlock &MBB = *MI->getParent();
+ MachineBasicBlock &MBB = *MI.getParent();
unsigned NewSrc0 = MRI.createVirtualRegister(DstRC);
- BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
- .addReg(Src0);
- MI->getOperand(1).setReg(NewSrc0);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
+ .addReg(Src0);
+ MI.getOperand(1).setReg(NewSrc0);
+ }
+ return;
+ }
+
+ // Legalize MIMG
+ if (isMIMG(MI)) {
+ MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
+ if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
+ unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
+ SRsrc->setReg(SGPR);
+ }
+
+ MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
+ if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
+ unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
+ SSamp->setReg(SGPR);
}
return;
}
@@ -2081,11 +2340,11 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
// FIXME: If we start using the non-addr64 instructions for compute, we
// may need to legalize them here.
int SRsrcIdx =
- AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc);
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
if (SRsrcIdx != -1) {
// We have an MUBUF instruction
- MachineOperand *SRsrc = &MI->getOperand(SRsrcIdx);
- unsigned SRsrcRC = get(MI->getOpcode()).OpInfo[SRsrcIdx].RegClass;
+ MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx);
+ unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass;
if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
RI.getRegClass(SRsrcRC))) {
// The operands are legal.
@@ -2093,7 +2352,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
return;
}
- MachineBasicBlock &MBB = *MI->getParent();
+ MachineBasicBlock &MBB = *MI.getParent();
// Extract the ptr from the resource descriptor.
unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
@@ -2107,30 +2366,27 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
// Zero64 = 0
- BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
- Zero64)
- .addImm(0);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64)
+ .addImm(0);
// SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
- BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
- SRsrcFormatLo)
- .addImm(RsrcDataFormat & 0xFFFFFFFF);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
+ .addImm(RsrcDataFormat & 0xFFFFFFFF);
// SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
- BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
- SRsrcFormatHi)
- .addImm(RsrcDataFormat >> 32);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
+ .addImm(RsrcDataFormat >> 32);
// NewSRsrc = {Zero64, SRsrcFormat}
- BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc)
- .addReg(Zero64)
- .addImm(AMDGPU::sub0_sub1)
- .addReg(SRsrcFormatLo)
- .addImm(AMDGPU::sub2)
- .addReg(SRsrcFormatHi)
- .addImm(AMDGPU::sub3);
-
- MachineOperand *VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc)
+ .addReg(Zero64)
+ .addImm(AMDGPU::sub0_sub1)
+ .addReg(SRsrcFormatLo)
+ .addImm(AMDGPU::sub2)
+ .addReg(SRsrcFormatHi)
+ .addImm(AMDGPU::sub3);
+
+ MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
if (VAddr) {
// This is already an ADDR64 instruction so we need to add the pointer
@@ -2139,7 +2395,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
// NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
.addReg(SRsrcPtr, 0, AMDGPU::sub0)
.addReg(VAddr->getReg(), 0, AMDGPU::sub0);
@@ -2150,82 +2406,82 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
.addReg(VAddr->getReg(), 0, AMDGPU::sub1);
// NewVaddr = {NewVaddrHi, NewVaddrLo}
- BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
- .addReg(NewVAddrLo)
- .addImm(AMDGPU::sub0)
- .addReg(NewVAddrHi)
- .addImm(AMDGPU::sub1);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
+ .addReg(NewVAddrLo)
+ .addImm(AMDGPU::sub0)
+ .addReg(NewVAddrHi)
+ .addImm(AMDGPU::sub1);
} else {
// This instructions is the _OFFSET variant, so we need to convert it to
// ADDR64.
- assert(MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration()
- < AMDGPUSubtarget::VOLCANIC_ISLANDS &&
+ assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration()
+ < SISubtarget::VOLCANIC_ISLANDS &&
"FIXME: Need to emit flat atomics here");
- MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata);
- MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset);
- MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset);
- unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode());
+ MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
+ MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
+ MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
+ unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
// Atomics rith return have have an additional tied operand and are
// missing some of the special bits.
- MachineOperand *VDataIn = getNamedOperand(*MI, AMDGPU::OpName::vdata_in);
+ MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
MachineInstr *Addr64;
if (!VDataIn) {
// Regular buffer load / store.
- MachineInstrBuilder MIB
- = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode))
- .addOperand(*VData)
- .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
- // This will be replaced later
- // with the new value of vaddr.
- .addOperand(*SRsrc)
- .addOperand(*SOffset)
- .addOperand(*Offset);
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
+ .addOperand(*VData)
+ .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
+ // This will be replaced later
+ // with the new value of vaddr.
+ .addOperand(*SRsrc)
+ .addOperand(*SOffset)
+ .addOperand(*Offset);
// Atomics do not have this operand.
- if (const MachineOperand *GLC
- = getNamedOperand(*MI, AMDGPU::OpName::glc)) {
+ if (const MachineOperand *GLC =
+ getNamedOperand(MI, AMDGPU::OpName::glc)) {
MIB.addImm(GLC->getImm());
}
- MIB.addImm(getNamedImmOperand(*MI, AMDGPU::OpName::slc));
+ MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
- if (const MachineOperand *TFE
- = getNamedOperand(*MI, AMDGPU::OpName::tfe)) {
+ if (const MachineOperand *TFE =
+ getNamedOperand(MI, AMDGPU::OpName::tfe)) {
MIB.addImm(TFE->getImm());
}
- MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
Addr64 = MIB;
} else {
// Atomics with return.
- Addr64 = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode))
- .addOperand(*VData)
- .addOperand(*VDataIn)
- .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
- // This will be replaced later
- // with the new value of vaddr.
- .addOperand(*SRsrc)
- .addOperand(*SOffset)
- .addOperand(*Offset)
- .addImm(getNamedImmOperand(*MI, AMDGPU::OpName::slc))
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
+ .addOperand(*VData)
+ .addOperand(*VDataIn)
+ .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
+ // This will be replaced later
+ // with the new value of vaddr.
+ .addOperand(*SRsrc)
+ .addOperand(*SOffset)
+ .addOperand(*Offset)
+ .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
+ .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
}
- MI->removeFromParent();
- MI = Addr64;
+ MI.removeFromParent();
// NewVaddr = {NewVaddrHi, NewVaddrLo}
- BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
- .addReg(SRsrcPtr, 0, AMDGPU::sub0)
- .addImm(AMDGPU::sub0)
- .addReg(SRsrcPtr, 0, AMDGPU::sub1)
- .addImm(AMDGPU::sub1);
-
- VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr);
- SRsrc = getNamedOperand(*MI, AMDGPU::OpName::srsrc);
+ BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
+ NewVAddr)
+ .addReg(SRsrcPtr, 0, AMDGPU::sub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(SRsrcPtr, 0, AMDGPU::sub1)
+ .addImm(AMDGPU::sub1);
+
+ VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr);
+ SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc);
}
// Update the instruction to use NewVaddr
@@ -2235,300 +2491,85 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
}
}
-void SIInstrInfo::splitSMRD(MachineInstr *MI,
- const TargetRegisterClass *HalfRC,
- unsigned HalfImmOp, unsigned HalfSGPROp,
- MachineInstr *&Lo, MachineInstr *&Hi) const {
-
- DebugLoc DL = MI->getDebugLoc();
- MachineBasicBlock *MBB = MI->getParent();
- MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- unsigned RegLo = MRI.createVirtualRegister(HalfRC);
- unsigned RegHi = MRI.createVirtualRegister(HalfRC);
- unsigned HalfSize = HalfRC->getSize();
- const MachineOperand *OffOp =
- getNamedOperand(*MI, AMDGPU::OpName::offset);
- const MachineOperand *SBase = getNamedOperand(*MI, AMDGPU::OpName::sbase);
-
- // The SMRD has an 8-bit offset in dwords on SI and a 20-bit offset in bytes
- // on VI.
-
- bool IsKill = SBase->isKill();
- if (OffOp) {
- bool isVI =
- MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >=
- AMDGPUSubtarget::VOLCANIC_ISLANDS;
- unsigned OffScale = isVI ? 1 : 4;
- // Handle the _IMM variant
- unsigned LoOffset = OffOp->getImm() * OffScale;
- unsigned HiOffset = LoOffset + HalfSize;
- Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo)
- // Use addReg instead of addOperand
- // to make sure kill flag is cleared.
- .addReg(SBase->getReg(), 0, SBase->getSubReg())
- .addImm(LoOffset / OffScale);
-
- if (!isUInt<20>(HiOffset) || (!isVI && !isUInt<8>(HiOffset / OffScale))) {
- unsigned OffsetSGPR =
- MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR)
- .addImm(HiOffset); // The offset in register is in bytes.
- Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi)
- .addReg(SBase->getReg(), getKillRegState(IsKill),
- SBase->getSubReg())
- .addReg(OffsetSGPR);
- } else {
- Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi)
- .addReg(SBase->getReg(), getKillRegState(IsKill),
- SBase->getSubReg())
- .addImm(HiOffset / OffScale);
- }
- } else {
- // Handle the _SGPR variant
- MachineOperand *SOff = getNamedOperand(*MI, AMDGPU::OpName::soff);
- Lo = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegLo)
- .addReg(SBase->getReg(), 0, SBase->getSubReg())
- .addOperand(*SOff);
- unsigned OffsetSGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
- BuildMI(*MBB, MI, DL, get(AMDGPU::S_ADD_I32), OffsetSGPR)
- .addReg(SOff->getReg(), 0, SOff->getSubReg())
- .addImm(HalfSize);
- Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi)
- .addReg(SBase->getReg(), getKillRegState(IsKill),
- SBase->getSubReg())
- .addReg(OffsetSGPR);
- }
-
- unsigned SubLo, SubHi;
- const TargetRegisterClass *NewDstRC;
- switch (HalfSize) {
- case 4:
- SubLo = AMDGPU::sub0;
- SubHi = AMDGPU::sub1;
- NewDstRC = &AMDGPU::VReg_64RegClass;
- break;
- case 8:
- SubLo = AMDGPU::sub0_sub1;
- SubHi = AMDGPU::sub2_sub3;
- NewDstRC = &AMDGPU::VReg_128RegClass;
- break;
- case 16:
- SubLo = AMDGPU::sub0_sub1_sub2_sub3;
- SubHi = AMDGPU::sub4_sub5_sub6_sub7;
- NewDstRC = &AMDGPU::VReg_256RegClass;
- break;
- case 32:
- SubLo = AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
- SubHi = AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15;
- NewDstRC = &AMDGPU::VReg_512RegClass;
- break;
- default:
- llvm_unreachable("Unhandled HalfSize");
- }
-
- unsigned OldDst = MI->getOperand(0).getReg();
- unsigned NewDst = MRI.createVirtualRegister(NewDstRC);
-
- MRI.replaceRegWith(OldDst, NewDst);
-
- BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewDst)
- .addReg(RegLo)
- .addImm(SubLo)
- .addReg(RegHi)
- .addImm(SubHi);
-}
-
-void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI,
- MachineRegisterInfo &MRI,
- SmallVectorImpl<MachineInstr *> &Worklist) const {
- MachineBasicBlock *MBB = MI->getParent();
- int DstIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
- assert(DstIdx != -1);
- unsigned DstRCID = get(MI->getOpcode()).OpInfo[DstIdx].RegClass;
- switch(RI.getRegClass(DstRCID)->getSize()) {
- case 4:
- case 8:
- case 16: {
- unsigned NewOpcode = getVALUOp(*MI);
- unsigned RegOffset;
- unsigned ImmOffset;
-
- if (MI->getOperand(2).isReg()) {
- RegOffset = MI->getOperand(2).getReg();
- ImmOffset = 0;
- } else {
- assert(MI->getOperand(2).isImm());
- // SMRD instructions take a dword offsets on SI and byte offset on VI
- // and MUBUF instructions always take a byte offset.
- ImmOffset = MI->getOperand(2).getImm();
- if (MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <=
- AMDGPUSubtarget::SEA_ISLANDS)
- ImmOffset <<= 2;
- RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
-
- if (isUInt<12>(ImmOffset)) {
- BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
- RegOffset)
- .addImm(0);
- } else {
- BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
- RegOffset)
- .addImm(ImmOffset);
- ImmOffset = 0;
- }
- }
-
- unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
- unsigned DWord0 = RegOffset;
- unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
- unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
- unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
- uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
-
- BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
- .addImm(0);
- BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
- .addImm(RsrcDataFormat & 0xFFFFFFFF);
- BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
- .addImm(RsrcDataFormat >> 32);
- BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
- .addReg(DWord0)
- .addImm(AMDGPU::sub0)
- .addReg(DWord1)
- .addImm(AMDGPU::sub1)
- .addReg(DWord2)
- .addImm(AMDGPU::sub2)
- .addReg(DWord3)
- .addImm(AMDGPU::sub3);
-
- const MCInstrDesc &NewInstDesc = get(NewOpcode);
- const TargetRegisterClass *NewDstRC
- = RI.getRegClass(NewInstDesc.OpInfo[0].RegClass);
- unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
- unsigned DstReg = MI->getOperand(0).getReg();
- MRI.replaceRegWith(DstReg, NewDstReg);
-
- MachineInstr *NewInst =
- BuildMI(*MBB, MI, MI->getDebugLoc(), NewInstDesc, NewDstReg)
- .addOperand(MI->getOperand(1)) // sbase
- .addReg(SRsrc)
- .addImm(0)
- .addImm(ImmOffset)
- .addImm(0) // glc
- .addImm(0) // slc
- .addImm(0) // tfe
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
- MI->eraseFromParent();
-
- legalizeOperands(NewInst);
- addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
- break;
- }
- case 32: {
- MachineInstr *Lo, *Hi;
- splitSMRD(MI, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM,
- AMDGPU::S_LOAD_DWORDX4_SGPR, Lo, Hi);
- MI->eraseFromParent();
- moveSMRDToVALU(Lo, MRI, Worklist);
- moveSMRDToVALU(Hi, MRI, Worklist);
- break;
- }
-
- case 64: {
- MachineInstr *Lo, *Hi;
- splitSMRD(MI, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM,
- AMDGPU::S_LOAD_DWORDX8_SGPR, Lo, Hi);
- MI->eraseFromParent();
- moveSMRDToVALU(Lo, MRI, Worklist);
- moveSMRDToVALU(Hi, MRI, Worklist);
- break;
- }
- }
-}
-
void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
SmallVector<MachineInstr *, 128> Worklist;
Worklist.push_back(&TopInst);
while (!Worklist.empty()) {
- MachineInstr *Inst = Worklist.pop_back_val();
- MachineBasicBlock *MBB = Inst->getParent();
+ MachineInstr &Inst = *Worklist.pop_back_val();
+ MachineBasicBlock *MBB = Inst.getParent();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- unsigned Opcode = Inst->getOpcode();
- unsigned NewOpcode = getVALUOp(*Inst);
+ unsigned Opcode = Inst.getOpcode();
+ unsigned NewOpcode = getVALUOp(Inst);
// Handle some special cases
switch (Opcode) {
default:
- if (isSMRD(*Inst)) {
- moveSMRDToVALU(Inst, MRI, Worklist);
- continue;
- }
break;
case AMDGPU::S_AND_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
- Inst->eraseFromParent();
+ Inst.eraseFromParent();
continue;
case AMDGPU::S_OR_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64);
- Inst->eraseFromParent();
+ Inst.eraseFromParent();
continue;
case AMDGPU::S_XOR_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64);
- Inst->eraseFromParent();
+ Inst.eraseFromParent();
continue;
case AMDGPU::S_NOT_B64:
splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32);
- Inst->eraseFromParent();
+ Inst.eraseFromParent();
continue;
case AMDGPU::S_BCNT1_I32_B64:
splitScalar64BitBCNT(Worklist, Inst);
- Inst->eraseFromParent();
+ Inst.eraseFromParent();
continue;
case AMDGPU::S_BFE_I64: {
splitScalar64BitBFE(Worklist, Inst);
- Inst->eraseFromParent();
+ Inst.eraseFromParent();
continue;
}
case AMDGPU::S_LSHL_B32:
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
swapOperands(Inst);
}
break;
case AMDGPU::S_ASHR_I32:
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
swapOperands(Inst);
}
break;
case AMDGPU::S_LSHR_B32:
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
swapOperands(Inst);
}
break;
case AMDGPU::S_LSHL_B64:
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
NewOpcode = AMDGPU::V_LSHLREV_B64;
swapOperands(Inst);
}
break;
case AMDGPU::S_ASHR_I64:
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
NewOpcode = AMDGPU::V_ASHRREV_I64;
swapOperands(Inst);
}
break;
case AMDGPU::S_LSHR_B64:
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
NewOpcode = AMDGPU::V_LSHRREV_B64;
swapOperands(Inst);
}
@@ -2536,9 +2577,18 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
case AMDGPU::S_ABS_I32:
lowerScalarAbs(Worklist, Inst);
- Inst->eraseFromParent();
+ Inst.eraseFromParent();
continue;
+ case AMDGPU::S_CBRANCH_SCC0:
+ case AMDGPU::S_CBRANCH_SCC1:
+ // Clear unused bits of vcc
+ BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
+ AMDGPU::VCC)
+ .addReg(AMDGPU::EXEC)
+ .addReg(AMDGPU::VCC);
+ break;
+
case AMDGPU::S_BFE_U64:
case AMDGPU::S_BFM_B64:
llvm_unreachable("Moving this op to VALU not implemented");
@@ -2553,34 +2603,36 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
// Use the new VALU Opcode.
const MCInstrDesc &NewDesc = get(NewOpcode);
- Inst->setDesc(NewDesc);
+ Inst.setDesc(NewDesc);
// Remove any references to SCC. Vector instructions can't read from it, and
// We're just about to add the implicit use / defs of VCC, and we don't want
// both.
- for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
- MachineOperand &Op = Inst->getOperand(i);
- if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
- Inst->RemoveOperand(i);
+ for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
+ MachineOperand &Op = Inst.getOperand(i);
+ if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
+ Inst.RemoveOperand(i);
+ addSCCDefUsersToVALUWorklist(Inst, Worklist);
+ }
}
if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
// We are converting these to a BFE, so we need to add the missing
// operands for the size and offset.
unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
- Inst->addOperand(MachineOperand::CreateImm(0));
- Inst->addOperand(MachineOperand::CreateImm(Size));
+ Inst.addOperand(MachineOperand::CreateImm(0));
+ Inst.addOperand(MachineOperand::CreateImm(Size));
} else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
// The VALU version adds the second operand to the result, so insert an
// extra 0 operand.
- Inst->addOperand(MachineOperand::CreateImm(0));
+ Inst.addOperand(MachineOperand::CreateImm(0));
}
- Inst->addImplicitDefUseOperands(*Inst->getParent()->getParent());
+ Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
- const MachineOperand &OffsetWidthOp = Inst->getOperand(2);
+ const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
// If we need to move this to VGPRs, we need to unpack the second operand
// back into the 2 separate ones for bit offset and width.
assert(OffsetWidthOp.isImm() &&
@@ -2589,50 +2641,41 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
- Inst->RemoveOperand(2); // Remove old immediate.
- Inst->addOperand(MachineOperand::CreateImm(Offset));
- Inst->addOperand(MachineOperand::CreateImm(BitWidth));
+ Inst.RemoveOperand(2); // Remove old immediate.
+ Inst.addOperand(MachineOperand::CreateImm(Offset));
+ Inst.addOperand(MachineOperand::CreateImm(BitWidth));
}
- // Update the destination register class.
- const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(*Inst);
- if (!NewDstRC)
- continue;
+ bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
+ unsigned NewDstReg = AMDGPU::NoRegister;
+ if (HasDst) {
+ // Update the destination register class.
+ const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
+ if (!NewDstRC)
+ continue;
- unsigned DstReg = Inst->getOperand(0).getReg();
- unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
- MRI.replaceRegWith(DstReg, NewDstReg);
+ unsigned DstReg = Inst.getOperand(0).getReg();
+ NewDstReg = MRI.createVirtualRegister(NewDstRC);
+ MRI.replaceRegWith(DstReg, NewDstReg);
+ }
// Legalize the operands
legalizeOperands(Inst);
- addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
+ if (HasDst)
+ addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
}
}
-//===----------------------------------------------------------------------===//
-// Indirect addressing callbacks
-//===----------------------------------------------------------------------===//
-
-unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const {
- assert(Channel == 0);
- return RegIndex;
-}
-
-const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
- return &AMDGPU::VGPR_32RegClass;
-}
-
void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst) const {
- MachineBasicBlock &MBB = *Inst->getParent();
+ MachineInstr &Inst) const {
+ MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineBasicBlock::iterator MII = Inst;
- DebugLoc DL = Inst->getDebugLoc();
+ DebugLoc DL = Inst.getDebugLoc();
- MachineOperand &Dest = Inst->getOperand(0);
- MachineOperand &Src = Inst->getOperand(1);
+ MachineOperand &Dest = Inst.getOperand(0);
+ MachineOperand &Src = Inst.getOperand(1);
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
@@ -2649,15 +2692,14 @@ void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
}
void SIInstrInfo::splitScalar64BitUnaryOp(
- SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst,
- unsigned Opcode) const {
- MachineBasicBlock &MBB = *Inst->getParent();
+ SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
+ unsigned Opcode) const {
+ MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
- MachineOperand &Dest = Inst->getOperand(0);
- MachineOperand &Src0 = Inst->getOperand(1);
- DebugLoc DL = Inst->getDebugLoc();
+ MachineOperand &Dest = Inst.getOperand(0);
+ MachineOperand &Src0 = Inst.getOperand(1);
+ DebugLoc DL = Inst.getDebugLoc();
MachineBasicBlock::iterator MII = Inst;
@@ -2703,16 +2745,15 @@ void SIInstrInfo::splitScalar64BitUnaryOp(
}
void SIInstrInfo::splitScalar64BitBinaryOp(
- SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst,
- unsigned Opcode) const {
- MachineBasicBlock &MBB = *Inst->getParent();
+ SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
+ unsigned Opcode) const {
+ MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
- MachineOperand &Dest = Inst->getOperand(0);
- MachineOperand &Src0 = Inst->getOperand(1);
- MachineOperand &Src1 = Inst->getOperand(2);
- DebugLoc DL = Inst->getDebugLoc();
+ MachineOperand &Dest = Inst.getOperand(0);
+ MachineOperand &Src0 = Inst.getOperand(1);
+ MachineOperand &Src1 = Inst.getOperand(2);
+ DebugLoc DL = Inst.getDebugLoc();
MachineBasicBlock::iterator MII = Inst;
@@ -2738,9 +2779,9 @@ void SIInstrInfo::splitScalar64BitBinaryOp(
const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
- MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
- .addOperand(SrcReg0Sub0)
- .addOperand(SrcReg1Sub0);
+ MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
+ .addOperand(SrcReg0Sub0)
+ .addOperand(SrcReg1Sub0);
MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
AMDGPU::sub1, Src0SubRC);
@@ -2748,9 +2789,9 @@ void SIInstrInfo::splitScalar64BitBinaryOp(
AMDGPU::sub1, Src1SubRC);
unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
- MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
- .addOperand(SrcReg0Sub1)
- .addOperand(SrcReg1Sub1);
+ MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
+ .addOperand(SrcReg0Sub1)
+ .addOperand(SrcReg1Sub1);
unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
@@ -2770,16 +2811,16 @@ void SIInstrInfo::splitScalar64BitBinaryOp(
addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
}
-void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst) const {
- MachineBasicBlock &MBB = *Inst->getParent();
+void SIInstrInfo::splitScalar64BitBCNT(
+ SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const {
+ MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineBasicBlock::iterator MII = Inst;
- DebugLoc DL = Inst->getDebugLoc();
+ DebugLoc DL = Inst.getDebugLoc();
- MachineOperand &Dest = Inst->getOperand(0);
- MachineOperand &Src = Inst->getOperand(1);
+ MachineOperand &Dest = Inst.getOperand(0);
+ MachineOperand &Src = Inst.getOperand(1);
const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
const TargetRegisterClass *SrcRC = Src.isReg() ?
@@ -2812,24 +2853,22 @@ void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist
}
void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst) const {
- MachineBasicBlock &MBB = *Inst->getParent();
+ MachineInstr &Inst) const {
+ MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
MachineBasicBlock::iterator MII = Inst;
- DebugLoc DL = Inst->getDebugLoc();
+ DebugLoc DL = Inst.getDebugLoc();
- MachineOperand &Dest = Inst->getOperand(0);
- uint32_t Imm = Inst->getOperand(2).getImm();
+ MachineOperand &Dest = Inst.getOperand(0);
+ uint32_t Imm = Inst.getOperand(2).getImm();
uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
(void) Offset;
// Only sext_inreg cases handled.
- assert(Inst->getOpcode() == AMDGPU::S_BFE_I64 &&
- BitWidth <= 32 &&
- Offset == 0 &&
- "Not implemented");
+ assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
+ Offset == 0 && "Not implemented");
if (BitWidth < 32) {
unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
@@ -2837,9 +2876,9 @@ void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
- .addReg(Inst->getOperand(1).getReg(), 0, AMDGPU::sub0)
- .addImm(0)
- .addImm(BitWidth);
+ .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
+ .addImm(0)
+ .addImm(BitWidth);
BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
.addImm(31)
@@ -2856,7 +2895,7 @@ void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
return;
}
- MachineOperand &Src = Inst->getOperand(1);
+ MachineOperand &Src = Inst.getOperand(1);
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
@@ -2887,6 +2926,22 @@ void SIInstrInfo::addUsersToMoveToVALUWorklist(
}
}
+void SIInstrInfo::addSCCDefUsersToVALUWorklist(
+ MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const {
+ // This assumes that all the users of SCC are in the same block
+ // as the SCC def.
+ for (MachineInstr &MI :
+ llvm::make_range(MachineBasicBlock::iterator(SCCDefInst),
+ SCCDefInst.getParent()->end())) {
+ // Exit if we find another SCC def.
+ if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1)
+ return;
+
+ if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1)
+ Worklist.push_back(&MI);
+ }
+}
+
const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
const MachineInstr &Inst) const {
const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
@@ -2912,9 +2967,9 @@ const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
}
// Find the one SGPR operand we are allowed to use.
-unsigned SIInstrInfo::findUsedSGPR(const MachineInstr *MI,
+unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
int OpIndices[3]) const {
- const MCInstrDesc &Desc = MI->getDesc();
+ const MCInstrDesc &Desc = MI.getDesc();
// Find the one SGPR operand we are allowed to use.
//
@@ -2925,19 +2980,19 @@ unsigned SIInstrInfo::findUsedSGPR(const MachineInstr *MI,
//
// If the operand's class is an SGPR, we can never move it.
- unsigned SGPRReg = findImplicitSGPRRead(*MI);
+ unsigned SGPRReg = findImplicitSGPRRead(MI);
if (SGPRReg != AMDGPU::NoRegister)
return SGPRReg;
unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
- const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
for (unsigned i = 0; i < 3; ++i) {
int Idx = OpIndices[i];
if (Idx == -1)
break;
- const MachineOperand &MO = MI->getOperand(Idx);
+ const MachineOperand &MO = MI.getOperand(Idx);
if (!MO.isReg())
continue;
@@ -2981,70 +3036,6 @@ unsigned SIInstrInfo::findUsedSGPR(const MachineInstr *MI,
return SGPRReg;
}
-MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
- MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg,
- unsigned Address, unsigned OffsetReg) const {
- const DebugLoc &DL = MBB->findDebugLoc(I);
- unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister(
- getIndirectIndexBegin(*MBB->getParent()));
-
- return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
- .addReg(IndirectBaseReg, RegState::Define)
- .addOperand(I->getOperand(0))
- .addReg(IndirectBaseReg)
- .addReg(OffsetReg)
- .addImm(0)
- .addReg(ValueReg);
-}
-
-MachineInstrBuilder SIInstrInfo::buildIndirectRead(
- MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg,
- unsigned Address, unsigned OffsetReg) const {
- const DebugLoc &DL = MBB->findDebugLoc(I);
- unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister(
- getIndirectIndexBegin(*MBB->getParent()));
-
- return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC_V1))
- .addOperand(I->getOperand(0))
- .addOperand(I->getOperand(1))
- .addReg(IndirectBaseReg)
- .addReg(OffsetReg)
- .addImm(0);
-
-}
-
-void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
- const MachineFunction &MF) const {
- int End = getIndirectIndexEnd(MF);
- int Begin = getIndirectIndexBegin(MF);
-
- if (End == -1)
- return;
-
-
- for (int Index = Begin; Index <= End; ++Index)
- Reserved.set(AMDGPU::VGPR_32RegClass.getRegister(Index));
-
- for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
- Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
-
- for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
- Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
-
- for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
- Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
-
- for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
- Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
-
- for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
- Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));
-}
-
MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
unsigned OperandName) const {
int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
@@ -3059,9 +3050,9 @@ uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
if (ST.isAmdHsaOS()) {
RsrcDataFormat |= (1ULL << 56);
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
- // Set MTYPE = 2
- RsrcDataFormat |= (2ULL << 59);
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
+ // Set MTYPE = 2
+ RsrcDataFormat |= (2ULL << 59);
}
return RsrcDataFormat;
@@ -3072,22 +3063,103 @@ uint64_t SIInstrInfo::getScratchRsrcWords23() const {
AMDGPU::RSRC_TID_ENABLE |
0xffffffff; // Size;
+ uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1;
+
+ Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) |
+ // IndexStride = 64
+ (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT);
+
// If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
// Clear them unless we want a huge stride.
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
return Rsrc23;
}
-bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr *MI) const {
- unsigned Opc = MI->getOpcode();
+bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
+ unsigned Opc = MI.getOpcode();
return isSMRD(Opc);
}
-bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr *MI) const {
- unsigned Opc = MI->getOpcode();
+bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const {
+ unsigned Opc = MI.getOpcode();
return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc);
}
+
+unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
+ unsigned Opc = MI.getOpcode();
+ const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
+ unsigned DescSize = Desc.getSize();
+
+ // If we have a definitive size, we can use it. Otherwise we need to inspect
+ // the operands to know the size.
+ if (DescSize == 8 || DescSize == 4)
+ return DescSize;
+
+ assert(DescSize == 0);
+
+ // 4-byte instructions may have a 32-bit literal encoded after them. Check
+ // operands that coud ever be literals.
+ if (isVALU(MI) || isSALU(MI)) {
+ int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
+ if (Src0Idx == -1)
+ return 4; // No operands.
+
+ if (isLiteralConstant(MI.getOperand(Src0Idx), getOpSize(MI, Src0Idx)))
+ return 8;
+
+ int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
+ if (Src1Idx == -1)
+ return 4;
+
+ if (isLiteralConstant(MI.getOperand(Src1Idx), getOpSize(MI, Src1Idx)))
+ return 8;
+
+ return 4;
+ }
+
+ switch (Opc) {
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::DBG_VALUE:
+ case TargetOpcode::BUNDLE:
+ case TargetOpcode::EH_LABEL:
+ return 0;
+ case TargetOpcode::INLINEASM: {
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const char *AsmStr = MI.getOperand(0).getSymbolName();
+ return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
+ }
+ default:
+ llvm_unreachable("unable to find instruction size");
+ }
+}
+
+ArrayRef<std::pair<int, const char *>>
+SIInstrInfo::getSerializableTargetIndices() const {
+ static const std::pair<int, const char *> TargetIndices[] = {
+ {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
+ {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
+ {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
+ {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
+ {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
+ return makeArrayRef(TargetIndices);
+}
+
+/// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The
+/// post-RA version of misched uses CreateTargetMIHazardRecognizer.
+ScheduleHazardRecognizer *
+SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const {
+ return new GCNHazardRecognizer(DAG->MF);
+}
+
+/// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
+/// pass.
+ScheduleHazardRecognizer *
+SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
+ return new GCNHazardRecognizer(MF);
+}
diff --git a/lib/Target/AMDGPU/SIInstrInfo.h b/lib/Target/AMDGPU/SIInstrInfo.h
index cce1ae725611..227b817227c2 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/lib/Target/AMDGPU/SIInstrInfo.h
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_SIINSTRINFO_H
-#define LLVM_LIB_TARGET_R600_SIINSTRINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_SIINSTRINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_SIINSTRINFO_H
#include "AMDGPUInstrInfo.h"
#include "SIDefines.h"
@@ -22,9 +22,24 @@
namespace llvm {
-class SIInstrInfo : public AMDGPUInstrInfo {
+class SIInstrInfo final : public AMDGPUInstrInfo {
private:
const SIRegisterInfo RI;
+ const SISubtarget &ST;
+
+ // The the inverse predicate should have the negative value.
+ enum BranchPredicate {
+ INVALID_BR = 0,
+ SCC_TRUE = 1,
+ SCC_FALSE = -1,
+ VCCNZ = 2,
+ VCCZ = -2,
+ EXECNZ = -3,
+ EXECZ = 3
+ };
+
+ static unsigned getBranchOpcode(BranchPredicate Cond);
+ static BranchPredicate getBranchPredicate(unsigned Opcode);
unsigned buildExtractSubReg(MachineBasicBlock::iterator MI,
MachineRegisterInfo &MRI,
@@ -39,87 +54,89 @@ private:
unsigned SubIdx,
const TargetRegisterClass *SubRC) const;
- void swapOperands(MachineBasicBlock::iterator Inst) const;
+ void swapOperands(MachineInstr &Inst) const;
void lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst) const;
+ MachineInstr &Inst) const;
void splitScalar64BitUnaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst, unsigned Opcode) const;
+ MachineInstr &Inst, unsigned Opcode) const;
void splitScalar64BitBinaryOp(SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst, unsigned Opcode) const;
+ MachineInstr &Inst, unsigned Opcode) const;
void splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst) const;
+ MachineInstr &Inst) const;
void splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
- MachineInstr *Inst) const;
+ MachineInstr &Inst) const;
void addUsersToMoveToVALUWorklist(
unsigned Reg, MachineRegisterInfo &MRI,
SmallVectorImpl<MachineInstr *> &Worklist) const;
+ void
+ addSCCDefUsersToVALUWorklist(MachineInstr &SCCDefInst,
+ SmallVectorImpl<MachineInstr *> &Worklist) const;
+
const TargetRegisterClass *
getDestEquivalentVGPRClass(const MachineInstr &Inst) const;
- bool checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
- MachineInstr *MIb) const;
+ bool checkInstOffsetsDoNotOverlap(MachineInstr &MIa, MachineInstr &MIb) const;
- unsigned findUsedSGPR(const MachineInstr *MI, int OpIndices[3]) const;
+ unsigned findUsedSGPR(const MachineInstr &MI, int OpIndices[3]) const;
protected:
- MachineInstr *commuteInstructionImpl(MachineInstr *MI,
- bool NewMI,
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx0,
unsigned OpIdx1) const override;
public:
- explicit SIInstrInfo(const AMDGPUSubtarget &st);
- const SIRegisterInfo &getRegisterInfo() const override {
+ enum TargetOperandFlags {
+ MO_NONE = 0,
+ MO_GOTPCREL = 1
+ };
+
+ explicit SIInstrInfo(const SISubtarget &);
+
+ const SIRegisterInfo &getRegisterInfo() const {
return RI;
}
- bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
+ bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const override;
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
int64_t &Offset1,
int64_t &Offset2) const override;
- bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
+ bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const final;
- bool shouldClusterLoads(MachineInstr *FirstLdSt,
- MachineInstr *SecondLdSt,
- unsigned NumLoads) const final;
+ bool shouldClusterMemOps(MachineInstr &FirstLdSt, MachineInstr &SecondLdSt,
+ unsigned NumLoads) const final;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
- unsigned calculateLDSSpillAddress(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- RegScavenger *RS,
- unsigned TmpReg,
- unsigned Offset,
- unsigned Size) const;
+ unsigned calculateLDSSpillAddress(MachineBasicBlock &MBB, MachineInstr &MI,
+ RegScavenger *RS, unsigned TmpReg,
+ unsigned Offset, unsigned Size) const;
void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIndex,
+ MachineBasicBlock::iterator MI, unsigned SrcReg,
+ bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
+ MachineBasicBlock::iterator MI, unsigned DestReg,
+ int FrameIndex, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
// \brief Returns an opcode that can be used to move a value to a \p DstRC
// register. If there is no hardware instruction that can store to \p
@@ -129,28 +146,40 @@ public:
LLVM_READONLY
int commuteOpcode(const MachineInstr &MI) const;
- bool findCommutedOpIndices(MachineInstr *MI,
- unsigned &SrcOpIdx1,
+ bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
- bool areMemAccessesTriviallyDisjoint(
- MachineInstr *MIa, MachineInstr *MIb,
- AliasAnalysis *AA = nullptr) const override;
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const override;
- MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned DstReg, unsigned SrcReg) const override;
- bool isMov(unsigned Opcode) const override;
+ bool ReverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const override;
- bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
- unsigned Reg, MachineRegisterInfo *MRI) const final;
+ bool
+ areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ AliasAnalysis *AA = nullptr) const override;
+
+ bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
+ MachineRegisterInfo *MRI) const final;
unsigned getMachineCSELookAheadLimit() const override { return 500; }
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MBB,
- MachineBasicBlock::iterator &MI,
+ MachineInstr &MI,
LiveVariables *LV) const override;
+ bool isSchedulingBoundary(const MachineInstr &MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const override;
+
static bool isSALU(const MachineInstr &MI) {
return MI.getDesc().TSFlags & SIInstrFlags::SALU;
}
@@ -167,6 +196,14 @@ public:
return get(Opcode).TSFlags & SIInstrFlags::VALU;
}
+ static bool isVMEM(const MachineInstr &MI) {
+ return isMUBUF(MI) || isMTBUF(MI) || isMIMG(MI);
+ }
+
+ bool isVMEM(uint16_t Opcode) const {
+ return isMUBUF(Opcode) || isMTBUF(Opcode) || isMIMG(Opcode);
+ }
+
static bool isSOP1(const MachineInstr &MI) {
return MI.getDesc().TSFlags & SIInstrFlags::SOP1;
}
@@ -279,6 +316,14 @@ public:
return get(Opcode).TSFlags & SIInstrFlags::MIMG;
}
+ static bool isGather4(const MachineInstr &MI) {
+ return MI.getDesc().TSFlags & SIInstrFlags::Gather4;
+ }
+
+ bool isGather4(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::Gather4;
+ }
+
static bool isFLAT(const MachineInstr &MI) {
return MI.getDesc().TSFlags & SIInstrFlags::FLAT;
}
@@ -303,11 +348,35 @@ public:
return get(Opcode).TSFlags & SIInstrFlags::VGPRSpill;
}
+ static bool isDPP(const MachineInstr &MI) {
+ return MI.getDesc().TSFlags & SIInstrFlags::DPP;
+ }
+
+ bool isDPP(uint16_t Opcode) const {
+ return get(Opcode).TSFlags & SIInstrFlags::DPP;
+ }
+
+ static bool isScalarUnit(const MachineInstr &MI) {
+ return MI.getDesc().TSFlags & (SIInstrFlags::SALU | SIInstrFlags::SMRD);
+ }
+
+ static bool usesVM_CNT(const MachineInstr &MI) {
+ return MI.getDesc().TSFlags & SIInstrFlags::VM_CNT;
+ }
+
+ bool isVGPRCopy(const MachineInstr &MI) const {
+ assert(MI.isCopy());
+ unsigned Dest = MI.getOperand(0).getReg();
+ const MachineFunction &MF = *MI.getParent()->getParent();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ return !RI.isSGPRReg(MRI, Dest);
+ }
+
bool isInlineConstant(const APInt &Imm) const;
bool isInlineConstant(const MachineOperand &MO, unsigned OpSize) const;
bool isLiteralConstant(const MachineOperand &MO, unsigned OpSize) const;
- bool isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
+ bool isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
const MachineOperand &MO) const;
/// \brief Return true if this 64-bit VALU instruction has a 32-bit encoding.
@@ -326,7 +395,7 @@ public:
bool hasModifiersSet(const MachineInstr &MI,
unsigned OpName) const;
- bool verifyInstruction(const MachineInstr *MI,
+ bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const override;
static unsigned getVALUOp(const MachineInstr &MI);
@@ -374,11 +443,11 @@ public:
///
/// If the operand being legalized is a register, then a COPY will be used
/// instead of MOV.
- void legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const;
+ void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const;
/// \brief Check if \p MO is a legal operand if it was the \p OpIdx Operand
/// for \p MI.
- bool isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
+ bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand *MO = nullptr) const;
/// \brief Check if \p MO would be a valid operand for the given operand
@@ -396,52 +465,38 @@ public:
/// \brief Legalize operands in \p MI by either commuting it or inserting a
/// copy of src1.
- void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr *MI) const;
+ void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const;
/// \brief Fix operands in \p MI to satisfy constant bus requirements.
- void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr *MI) const;
+ void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const;
- /// \brief Legalize all operands in this instruction. This function may
- /// create new instruction and insert them before \p MI.
- void legalizeOperands(MachineInstr *MI) const;
+ /// Copy a value from a VGPR (\p SrcReg) to SGPR. This function can only
+ /// be used when it is know that the value in SrcReg is same across all
+ /// threads in the wave.
+ /// \returns The SGPR register that \p SrcReg was copied to.
+ unsigned readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
+ MachineRegisterInfo &MRI) const;
- /// \brief Split an SMRD instruction into two smaller loads of half the
- // size storing the results in \p Lo and \p Hi.
- void splitSMRD(MachineInstr *MI, const TargetRegisterClass *HalfRC,
- unsigned HalfImmOp, unsigned HalfSGPROp,
- MachineInstr *&Lo, MachineInstr *&Hi) const;
+ void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const;
- void moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI,
- SmallVectorImpl<MachineInstr *> &Worklist) const;
+ /// \brief Legalize all operands in this instruction. This function may
+ /// create new instruction and insert them before \p MI.
+ void legalizeOperands(MachineInstr &MI) const;
/// \brief Replace this instruction's opcode with the equivalent VALU
/// opcode. This function will also move the users of \p MI to the
/// VALU if necessary.
void moveToVALU(MachineInstr &MI) const;
- unsigned calculateIndirectAddress(unsigned RegIndex,
- unsigned Channel) const override;
-
- const TargetRegisterClass *getIndirectAddrRegClass() const override;
+ void insertWaitStates(MachineBasicBlock &MBB,MachineBasicBlock::iterator MI,
+ int Count) const;
- MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg,
- unsigned Address,
- unsigned OffsetReg) const override;
+ void insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
- MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator I,
- unsigned ValueReg,
- unsigned Address,
- unsigned OffsetReg) const override;
- void reserveIndirectRegisters(BitVector &Reserved,
- const MachineFunction &MF) const;
-
- void LoadM0(MachineInstr *MoveRel, MachineBasicBlock::iterator I,
- unsigned SavReg, unsigned IndexReg) const;
-
- void insertWaitStates(MachineBasicBlock::iterator MI, int Count) const;
+ /// \brief Return the number of wait states that result from executing this
+ /// instruction.
+ unsigned getNumWaitStates(const MachineInstr &MI) const;
/// \brief Returns the operand named \p Op. If \p MI does not have an
/// operand named \c Op, this function returns nullptr.
@@ -463,8 +518,26 @@ public:
uint64_t getDefaultRsrcDataFormat() const;
uint64_t getScratchRsrcWords23() const;
- bool isLowLatencyInstruction(const MachineInstr *MI) const;
- bool isHighLatencyInstruction(const MachineInstr *MI) const;
+ bool isLowLatencyInstruction(const MachineInstr &MI) const;
+ bool isHighLatencyInstruction(const MachineInstr &MI) const;
+
+ /// \brief Return the descriptor of the target-specific machine instruction
+ /// that corresponds to the specified pseudo or native opcode.
+ const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const {
+ return get(pseudoToMCOpcode(Opcode));
+ }
+
+ unsigned getInstSizeInBytes(const MachineInstr &MI) const;
+
+ ArrayRef<std::pair<int, const char *>>
+ getSerializableTargetIndices() const override;
+
+ ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const override;
+
+ ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const override;
};
namespace AMDGPU {
@@ -490,8 +563,9 @@ namespace AMDGPU {
int getAtomicNoRetOp(uint16_t Opcode);
const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
- const uint64_t RSRC_TID_ENABLE = 1LL << 55;
-
+ const uint64_t RSRC_ELEMENT_SIZE_SHIFT = (32 + 19);
+ const uint64_t RSRC_INDEX_STRIDE_SHIFT = (32 + 21);
+ const uint64_t RSRC_TID_ENABLE = UINT64_C(1) << (32 + 23);
} // End namespace AMDGPU
namespace SI {
diff --git a/lib/Target/AMDGPU/SIInstrInfo.td b/lib/Target/AMDGPU/SIInstrInfo.td
index 8735277149a6..253cc32b27e4 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/lib/Target/AMDGPU/SIInstrInfo.td
@@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
def isCI : Predicate<"Subtarget->getGeneration() "
- ">= AMDGPUSubtarget::SEA_ISLANDS">;
+ ">= SISubtarget::SEA_ISLANDS">;
def isCIOnly : Predicate<"Subtarget->getGeneration() =="
- "AMDGPUSubtarget::SEA_ISLANDS">,
+ "SISubtarget::SEA_ISLANDS">,
AssemblerPredicate <"FeatureSeaIslands">;
def DisableInst : Predicate <"false">, AssemblerPredicate<"FeatureDisable">;
@@ -69,6 +69,11 @@ class sopk <bits<5> si, bits<5> vi = si> {
field bits<5> VI = vi;
}
+class dsop <bits<8> si, bits<8> vi = si> {
+ field bits<8> SI = si;
+ field bits<8> VI = vi;
+}
+
// Specify an SMRD opcode for SI and SMEM opcode for VI
// FIXME: This should really be bits<5> si, Tablegen crashes if
@@ -78,9 +83,9 @@ class smrd<bits<8> si, bits<8> vi = si> {
field bits<8> VI = vi;
}
-// Execpt for the NONE field, this must be kept in sync with the SISubtarget enum
-// in AMDGPUInstrInfo.cpp
-def SISubtarget {
+// Execpt for the NONE field, this must be kept in sync with the
+// SIEncodingFamily enum in AMDGPUInstrInfo.cpp
+def SIEncodingFamily {
int NONE = -1;
int SI = 0;
int VI = 1;
@@ -95,6 +100,14 @@ def SIload_constant : SDNode<"AMDGPUISD::LOAD_CONSTANT",
[SDNPMayLoad, SDNPMemOperand]
>;
+def SIatomic_inc : SDNode<"AMDGPUISD::ATOMIC_INC", SDTAtomic2,
+ [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
+>;
+
+def SIatomic_dec : SDNode<"AMDGPUISD::ATOMIC_DEC", SDTAtomic2,
+ [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
+>;
+
def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT",
SDTypeProfile<0, 13,
[SDTCisVT<0, v4i32>, // rsrc(SGPR)
@@ -120,7 +133,7 @@ def SIload_input : SDNode<"AMDGPUISD::LOAD_INPUT",
>;
class SDSample<string opcode> : SDNode <opcode,
- SDTypeProfile<1, 4, [SDTCisVT<0, v4f32>, SDTCisVT<2, v32i8>,
+ SDTypeProfile<1, 4, [SDTCisVT<0, v4f32>, SDTCisVT<2, v8i32>,
SDTCisVT<3, v4i32>, SDTCisVT<4, i32>]>
>;
@@ -129,9 +142,8 @@ def SIsampleb : SDSample<"AMDGPUISD::SAMPLEB">;
def SIsampled : SDSample<"AMDGPUISD::SAMPLED">;
def SIsamplel : SDSample<"AMDGPUISD::SAMPLEL">;
-def SIconstdata_ptr : SDNode<
- "AMDGPUISD::CONST_DATA_PTR", SDTypeProfile <1, 1, [SDTCisVT<0, i64>,
- SDTCisVT<0, i64>]>
+def SIpc_add_rel_offset : SDNode<"AMDGPUISD::PC_ADD_REL_OFFSET",
+ SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>]>
>;
//===----------------------------------------------------------------------===//
@@ -140,12 +152,14 @@ def SIconstdata_ptr : SDNode<
class flat_ld <SDPatternOperator ld> : PatFrag<(ops node:$ptr),
(ld node:$ptr), [{
- return isFlatLoad(dyn_cast<LoadSDNode>(N)) ||
- isGlobalLoad(dyn_cast<LoadSDNode>(N)) ||
- isConstantLoad(cast<LoadSDNode>(N), -1);
+ const MemSDNode *LD = cast<MemSDNode>(N);
+ return LD->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
+ LD->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
+ LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
}]>;
def flat_load : flat_ld <load>;
+def atomic_flat_load : flat_ld<atomic_load>;
def flat_az_extloadi8 : flat_ld <az_extloadi8>;
def flat_sextloadi8 : flat_ld <sextloadi8>;
def flat_az_extloadi16 : flat_ld <az_extloadi16>;
@@ -153,26 +167,50 @@ def flat_sextloadi16 : flat_ld <sextloadi16>;
class flat_st <SDPatternOperator st> : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
- return isFlatStore(dyn_cast<StoreSDNode>(N)) ||
- isGlobalStore(dyn_cast<StoreSDNode>(N));
+ const MemSDNode *ST = cast<MemSDNode>(N);
+ return ST->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
+ ST->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
}]>;
def flat_store: flat_st <store>;
+def atomic_flat_store: flat_st <atomic_store>;
def flat_truncstorei8 : flat_st <truncstorei8>;
def flat_truncstorei16 : flat_st <truncstorei16>;
+class MubufLoad <SDPatternOperator op> : PatFrag <
+ (ops node:$ptr), (op node:$ptr), [{
-def mubuf_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{
- return isGlobalLoad(cast<LoadSDNode>(N)) ||
- isConstantLoad(cast<LoadSDNode>(N), -1);
+ const MemSDNode *LD = cast<MemSDNode>(N);
+ return LD->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
+ LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
}]>;
+def mubuf_load : MubufLoad <load>;
+def mubuf_az_extloadi8 : MubufLoad <az_extloadi8>;
+def mubuf_sextloadi8 : MubufLoad <sextloadi8>;
+def mubuf_az_extloadi16 : MubufLoad <az_extloadi16>;
+def mubuf_sextloadi16 : MubufLoad <sextloadi16>;
+
+def mubuf_load_atomic : MubufLoad <atomic_load>;
+
def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{
- return isConstantLoad(cast<LoadSDNode>(N), -1) &&
- static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpUniform(N);
+ auto Ld = cast<LoadSDNode>(N);
+ return Ld->getAlignment() >= 4 &&
+ Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
+ static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpUniform(N);
}]>;
//===----------------------------------------------------------------------===//
+// PatFrags for global memory operations
+//===----------------------------------------------------------------------===//
+
+def atomic_inc_global : global_binary_atomic_op<SIatomic_inc>;
+def atomic_dec_global : global_binary_atomic_op<SIatomic_dec>;
+
+def atomic_inc_flat : flat_binary_atomic_op<SIatomic_inc>;
+def atomic_dec_flat : flat_binary_atomic_op<SIatomic_dec>;
+
+//===----------------------------------------------------------------------===//
// SDNodes and PatFrag for local loads and stores to enable s_mov_b32 m0, -1
// to be glued to the memory instructions.
//===----------------------------------------------------------------------===//
@@ -182,7 +220,7 @@ def SIld_local : SDNode <"ISD::LOAD", SDTLoad,
>;
def si_ld_local : PatFrag <(ops node:$ptr), (SIld_local node:$ptr), [{
- return isLocalLoad(cast<LoadSDNode>(N));
+ return cast<LoadSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
}]>;
def si_load_local : PatFrag <(ops node:$ptr), (si_ld_local node:$ptr), [{
@@ -219,7 +257,7 @@ def SIst_local : SDNode <"ISD::STORE", SDTStore,
def si_st_local : PatFrag <
(ops node:$val, node:$ptr), (SIst_local node:$val, node:$ptr), [{
- return isLocalStore(cast<StoreSDNode>(N));
+ return cast<StoreSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
}]>;
def si_store_local : PatFrag <
@@ -247,9 +285,34 @@ def si_truncstore_local_i16 : PatFrag <
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
-multiclass SIAtomicM0Glue2 <string op_name> {
+def si_setcc_uniform : PatFrag <
+ (ops node:$lhs, node:$rhs, node:$cond),
+ (setcc node:$lhs, node:$rhs, node:$cond), [{
+ for (SDNode *Use : N->uses()) {
+ if (Use->isMachineOpcode() || Use->getOpcode() != ISD::CopyToReg)
+ return false;
+
+ unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
+ if (Reg != AMDGPU::SCC)
+ return false;
+ }
+ return true;
+}]>;
+
+def si_uniform_br : PatFrag <
+ (ops node:$cond, node:$bb), (brcond node:$cond, node:$bb), [{
+ return isUniformBr(N);
+}]>;
+
+def si_uniform_br_scc : PatFrag <
+ (ops node:$cond, node:$bb), (si_uniform_br node:$cond, node:$bb), [{
+ return isCBranchSCC(N);
+}]>;
+
+multiclass SIAtomicM0Glue2 <string op_name, bit is_amdgpu = 0> {
- def _glue : SDNode <"ISD::ATOMIC_"#op_name, SDTAtomic2,
+ def _glue : SDNode <
+ !if(is_amdgpu, "AMDGPUISD", "ISD")#"::ATOMIC_"#op_name, SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
>;
@@ -257,11 +320,13 @@ multiclass SIAtomicM0Glue2 <string op_name> {
}
defm si_atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
+defm si_atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
+defm si_atomic_inc : SIAtomicM0Glue2 <"INC", 1>;
+defm si_atomic_dec : SIAtomicM0Glue2 <"DEC", 1>;
defm si_atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
defm si_atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
defm si_atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
defm si_atomic_load_or : SIAtomicM0Glue2 <"LOAD_OR">;
-defm si_atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
defm si_atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">;
defm si_atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">;
defm si_atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">;
@@ -347,6 +412,10 @@ def IMM16bit : PatLeaf <(imm),
[{return isUInt<16>(N->getZExtValue());}]
>;
+def SIMM16bit : PatLeaf <(imm),
+ [{return isInt<16>(N->getSExtValue());}]
+>;
+
def IMM20bit : PatLeaf <(imm),
[{return isUInt<20>(N->getZExtValue());}]
>;
@@ -369,7 +438,7 @@ class InlineFPImm <ValueType vt> : PatLeaf <(vt fpimm), [{
}]>;
class SGPRImm <dag frag> : PatLeaf<frag, [{
- if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+ if (Subtarget->getGeneration() < SISubtarget::SOUTHERN_ISLANDS) {
return false;
}
const SIRegisterInfo *SIRI =
@@ -402,188 +471,133 @@ def sopp_brtarget : Operand<OtherVT> {
let ParserMatchClass = SoppBrTarget;
}
-def const_ga : Operand<iPTR>;
-
-include "SIInstrFormats.td"
-include "VIInstrFormats.td"
+def si_ga : Operand<iPTR>;
-def MubufOffsetMatchClass : AsmOperandClass {
- let Name = "MubufOffset";
- let ParserMethod = "parseMubufOptionalOps";
- let RenderMethod = "addImmOperands";
+def InterpSlot : Operand<i32> {
+ let PrintMethod = "printInterpSlot";
}
-class DSOffsetBaseMatchClass <string parser> : AsmOperandClass {
- let Name = "DSOffset"#parser;
- let ParserMethod = parser;
+def SendMsgMatchClass : AsmOperandClass {
+ let Name = "SendMsg";
+ let PredicateMethod = "isSendMsg";
+ let ParserMethod = "parseSendMsgOp";
let RenderMethod = "addImmOperands";
- let PredicateMethod = "isDSOffset";
}
-def DSOffsetMatchClass : DSOffsetBaseMatchClass <"parseDSOptionalOps">;
-def DSOffsetGDSMatchClass : DSOffsetBaseMatchClass <"parseDSOffsetOptional">;
-
-def DSOffset01MatchClass : AsmOperandClass {
- let Name = "DSOffset1";
- let ParserMethod = "parseDSOff01OptionalOps";
- let RenderMethod = "addImmOperands";
- let PredicateMethod = "isDSOffset01";
+def SendMsgImm : Operand<i32> {
+ let PrintMethod = "printSendMsg";
+ let ParserMatchClass = SendMsgMatchClass;
}
-class GDSBaseMatchClass <string parser> : AsmOperandClass {
- let Name = "GDS"#parser;
- let PredicateMethod = "isImm";
- let ParserMethod = parser;
+def SWaitMatchClass : AsmOperandClass {
+ let Name = "SWaitCnt";
let RenderMethod = "addImmOperands";
+ let ParserMethod = "parseSWaitCntOps";
}
-def GDSMatchClass : GDSBaseMatchClass <"parseDSOptionalOps">;
-def GDS01MatchClass : GDSBaseMatchClass <"parseDSOff01OptionalOps">;
-
-class GLCBaseMatchClass <string parser> : AsmOperandClass {
- let Name = "GLC"#parser;
- let PredicateMethod = "isImm";
- let ParserMethod = parser;
- let RenderMethod = "addImmOperands";
+def WAIT_FLAG : Operand <i32> {
+ let ParserMatchClass = SWaitMatchClass;
+ let PrintMethod = "printWaitFlag";
}
-def GLCMubufMatchClass : GLCBaseMatchClass <"parseMubufOptionalOps">;
-def GLCFlatMatchClass : GLCBaseMatchClass <"parseFlatOptionalOps">;
+include "SIInstrFormats.td"
+include "VIInstrFormats.td"
-class SLCBaseMatchClass <string parser> : AsmOperandClass {
- let Name = "SLC"#parser;
- let PredicateMethod = "isImm";
- let ParserMethod = parser;
+class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass {
+ let Name = "Imm"#CName;
+ let PredicateMethod = "is"#CName;
+ let ParserMethod = !if(Optional, "parseOptionalOperand", "parse"#CName);
let RenderMethod = "addImmOperands";
+ let IsOptional = Optional;
+ let DefaultMethod = !if(Optional, "default"#CName, ?);
}
-def SLCMubufMatchClass : SLCBaseMatchClass <"parseMubufOptionalOps">;
-def SLCFlatMatchClass : SLCBaseMatchClass <"parseFlatOptionalOps">;
-def SLCFlatAtomicMatchClass : SLCBaseMatchClass <"parseFlatAtomicOptionalOps">;
-
-class TFEBaseMatchClass <string parser> : AsmOperandClass {
- let Name = "TFE"#parser;
- let PredicateMethod = "isImm";
- let ParserMethod = parser;
- let RenderMethod = "addImmOperands";
+class NamedOperandBit<string Name, AsmOperandClass MatchClass> : Operand<i1> {
+ let PrintMethod = "print"#Name;
+ let ParserMatchClass = MatchClass;
}
-def TFEMubufMatchClass : TFEBaseMatchClass <"parseMubufOptionalOps">;
-def TFEFlatMatchClass : TFEBaseMatchClass <"parseFlatOptionalOps">;
-def TFEFlatAtomicMatchClass : TFEBaseMatchClass <"parseFlatAtomicOptionalOps">;
-
-def OModMatchClass : AsmOperandClass {
- let Name = "OMod";
- let PredicateMethod = "isImm";
- let ParserMethod = "parseVOP3OptionalOps";
- let RenderMethod = "addImmOperands";
+class NamedOperandU8<string Name, AsmOperandClass MatchClass> : Operand<i8> {
+ let PrintMethod = "print"#Name;
+ let ParserMatchClass = MatchClass;
}
-def ClampMatchClass : AsmOperandClass {
- let Name = "Clamp";
- let PredicateMethod = "isImm";
- let ParserMethod = "parseVOP3OptionalOps";
- let RenderMethod = "addImmOperands";
+class NamedOperandU16<string Name, AsmOperandClass MatchClass> : Operand<i16> {
+ let PrintMethod = "print"#Name;
+ let ParserMatchClass = MatchClass;
}
-class SMRDOffsetBaseMatchClass <string predicate> : AsmOperandClass {
- let Name = "SMRDOffset"#predicate;
- let PredicateMethod = predicate;
- let RenderMethod = "addImmOperands";
+class NamedOperandU32<string Name, AsmOperandClass MatchClass> : Operand<i32> {
+ let PrintMethod = "print"#Name;
+ let ParserMatchClass = MatchClass;
}
-def SMRDOffsetMatchClass : SMRDOffsetBaseMatchClass <"isSMRDOffset">;
-def SMRDLiteralOffsetMatchClass : SMRDOffsetBaseMatchClass <
- "isSMRDLiteralOffset"
->;
-
let OperandType = "OPERAND_IMMEDIATE" in {
-def offen : Operand<i1> {
- let PrintMethod = "printOffen";
-}
-def idxen : Operand<i1> {
- let PrintMethod = "printIdxen";
-}
-def addr64 : Operand<i1> {
- let PrintMethod = "printAddr64";
-}
-def mbuf_offset : Operand<i16> {
- let PrintMethod = "printMBUFOffset";
- let ParserMatchClass = MubufOffsetMatchClass;
-}
-class ds_offset_base <AsmOperandClass mc> : Operand<i16> {
- let PrintMethod = "printDSOffset";
- let ParserMatchClass = mc;
-}
-def ds_offset : ds_offset_base <DSOffsetMatchClass>;
-def ds_offset_gds : ds_offset_base <DSOffsetGDSMatchClass>;
+def offen : NamedOperandBit<"Offen", NamedMatchClass<"Offen">>;
+def idxen : NamedOperandBit<"Idxen", NamedMatchClass<"Idxen">>;
+def addr64 : NamedOperandBit<"Addr64", NamedMatchClass<"Addr64">>;
-def ds_offset0 : Operand<i8> {
- let PrintMethod = "printDSOffset0";
- let ParserMatchClass = DSOffset01MatchClass;
-}
-def ds_offset1 : Operand<i8> {
- let PrintMethod = "printDSOffset1";
- let ParserMatchClass = DSOffset01MatchClass;
-}
-class gds_base <AsmOperandClass mc> : Operand <i1> {
- let PrintMethod = "printGDS";
- let ParserMatchClass = mc;
-}
-def gds : gds_base <GDSMatchClass>;
+def offset : NamedOperandU16<"Offset", NamedMatchClass<"Offset">>;
+def offset0 : NamedOperandU8<"Offset0", NamedMatchClass<"Offset0">>;
+def offset1 : NamedOperandU8<"Offset1", NamedMatchClass<"Offset1">>;
-def gds01 : gds_base <GDS01MatchClass>;
+def gds : NamedOperandBit<"GDS", NamedMatchClass<"GDS">>;
-class glc_base <AsmOperandClass mc> : Operand <i1> {
- let PrintMethod = "printGLC";
- let ParserMatchClass = mc;
-}
+def omod : NamedOperandU32<"OModSI", NamedMatchClass<"OModSI">>;
+def clampmod : NamedOperandBit<"ClampSI", NamedMatchClass<"ClampSI">>;
-def glc : glc_base <GLCMubufMatchClass>;
-def glc_flat : glc_base <GLCFlatMatchClass>;
+def smrd_offset : NamedOperandU32<"SMRDOffset", NamedMatchClass<"SMRDOffset">>;
+def smrd_literal_offset : NamedOperandU32<"SMRDLiteralOffset", NamedMatchClass<"SMRDLiteralOffset">>;
-class slc_base <AsmOperandClass mc> : Operand <i1> {
- let PrintMethod = "printSLC";
- let ParserMatchClass = mc;
-}
+def glc : NamedOperandBit<"GLC", NamedMatchClass<"GLC">>;
+def slc : NamedOperandBit<"SLC", NamedMatchClass<"SLC">>;
+def tfe : NamedOperandBit<"TFE", NamedMatchClass<"TFE">>;
+def unorm : NamedOperandBit<"UNorm", NamedMatchClass<"UNorm">>;
+def da : NamedOperandBit<"DA", NamedMatchClass<"DA">>;
+def r128 : NamedOperandBit<"R128", NamedMatchClass<"R128">>;
+def lwe : NamedOperandBit<"LWE", NamedMatchClass<"LWE">>;
-def slc : slc_base <SLCMubufMatchClass>;
-def slc_flat : slc_base <SLCFlatMatchClass>;
-def slc_flat_atomic : slc_base <SLCFlatAtomicMatchClass>;
+def dmask : NamedOperandU16<"DMask", NamedMatchClass<"DMask">>;
-class tfe_base <AsmOperandClass mc> : Operand <i1> {
- let PrintMethod = "printTFE";
- let ParserMatchClass = mc;
-}
+def dpp_ctrl : NamedOperandU32<"DPPCtrl", NamedMatchClass<"DPPCtrl", 0>>;
+def row_mask : NamedOperandU32<"RowMask", NamedMatchClass<"RowMask">>;
+def bank_mask : NamedOperandU32<"BankMask", NamedMatchClass<"BankMask">>;
+def bound_ctrl : NamedOperandBit<"BoundCtrl", NamedMatchClass<"BoundCtrl">>;
-def tfe : tfe_base <TFEMubufMatchClass>;
-def tfe_flat : tfe_base <TFEFlatMatchClass>;
-def tfe_flat_atomic : tfe_base <TFEFlatAtomicMatchClass>;
+def dst_sel : NamedOperandU32<"SDWADstSel", NamedMatchClass<"SDWADstSel">>;
+def src0_sel : NamedOperandU32<"SDWASrc0Sel", NamedMatchClass<"SDWASrc0Sel">>;
+def src1_sel : NamedOperandU32<"SDWASrc1Sel", NamedMatchClass<"SDWASrc1Sel">>;
+def dst_unused : NamedOperandU32<"SDWADstUnused", NamedMatchClass<"SDWADstUnused">>;
-def omod : Operand <i32> {
- let PrintMethod = "printOModSI";
- let ParserMatchClass = OModMatchClass;
-}
+def hwreg : NamedOperandU16<"Hwreg", NamedMatchClass<"Hwreg", 0>>;
+
+} // End OperandType = "OPERAND_IMMEDIATE"
-def ClampMod : Operand <i1> {
- let PrintMethod = "printClampSI";
- let ParserMatchClass = ClampMatchClass;
-}
-def smrd_offset : Operand <i32> {
- let PrintMethod = "printU32ImmOperand";
- let ParserMatchClass = SMRDOffsetMatchClass;
+def VOPDstS64 : VOPDstOperand <SReg_64>;
+
+def FPInputModsMatchClass : AsmOperandClass {
+ let Name = "RegOrImmWithFPInputMods";
+ let ParserMethod = "parseRegOrImmWithFPInputMods";
+ let PredicateMethod = "isRegOrImmWithInputMods";
}
-def smrd_literal_offset : Operand <i32> {
- let PrintMethod = "printU32ImmOperand";
- let ParserMatchClass = SMRDLiteralOffsetMatchClass;
+def FPInputMods : Operand <i32> {
+ let PrintMethod = "printOperandAndFPInputMods";
+ let ParserMatchClass = FPInputModsMatchClass;
}
-} // End OperandType = "OPERAND_IMMEDIATE"
+def IntInputModsMatchClass : AsmOperandClass {
+ let Name = "RegOrImmWithIntInputMods";
+ let ParserMethod = "parseRegOrImmWithIntInputMods";
+ let PredicateMethod = "isRegOrImmWithInputMods";
+}
-def VOPDstS64 : VOPDstOperand <SReg_64>;
+def IntInputMods: Operand <i32> {
+ let PrintMethod = "printOperandAndIntInputMods";
+ let ParserMatchClass = IntInputModsMatchClass;
+}
//===----------------------------------------------------------------------===//
// Complex patterns
@@ -595,9 +609,13 @@ def DS64Bit4ByteAligned : ComplexPattern<i32, 3, "SelectDS64Bit4ByteAligned">;
def MUBUFAddr32 : ComplexPattern<i64, 9, "SelectMUBUFAddr32">;
def MUBUFAddr64 : ComplexPattern<i64, 7, "SelectMUBUFAddr64">;
def MUBUFAddr64Atomic : ComplexPattern<i64, 5, "SelectMUBUFAddr64">;
+def FLATAtomic : ComplexPattern<i64, 3, "SelectFlat">;
def MUBUFScratch : ComplexPattern<i64, 4, "SelectMUBUFScratch">;
def MUBUFOffset : ComplexPattern<i64, 6, "SelectMUBUFOffset">;
+def MUBUFOffsetNoGLC : ComplexPattern<i64, 3, "SelectMUBUFOffset">;
def MUBUFOffsetAtomic : ComplexPattern<i64, 4, "SelectMUBUFOffset">;
+def MUBUFIntrinsicOffset : ComplexPattern<i32, 2, "SelectMUBUFIntrinsicOffset">;
+def MUBUFIntrinsicVOffset : ComplexPattern<i32, 3, "SelectMUBUFIntrinsicVOffset">;
def SMRDImm : ComplexPattern<i64, 2, "SelectSMRDImm">;
def SMRDImm32 : ComplexPattern<i64, 2, "SelectSMRDImm32">;
@@ -606,6 +624,8 @@ def SMRDBufferImm : ComplexPattern<i32, 1, "SelectSMRDBufferImm">;
def SMRDBufferImm32 : ComplexPattern<i32, 1, "SelectSMRDBufferImm32">;
def SMRDBufferSgpr : ComplexPattern<i32, 1, "SelectSMRDBufferSgpr">;
+def MOVRELOffset : ComplexPattern<i32, 2, "SelectMOVRELOffset">;
+
def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
def VOP3NoMods0 : ComplexPattern<untyped, 4, "SelectVOP3NoMods0">;
def VOP3Mods0Clamp : ComplexPattern<untyped, 3, "SelectVOP3Mods0Clamp">;
@@ -670,17 +690,24 @@ class EXPCommon : InstSI<
let EXP_CNT = 1;
let Uses = [EXEC];
+ let SchedRW = [WriteExport];
}
multiclass EXP_m {
let isPseudo = 1, isCodeGenOnly = 1 in {
- def "" : EXPCommon, SIMCInstr <"exp", SISubtarget.NONE> ;
+ def "" : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.NONE> ;
}
- def _si : EXPCommon, SIMCInstr <"exp", SISubtarget.SI>, EXPe;
+ def _si : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.SI>, EXPe {
+ let DecoderNamespace="SICI";
+ let DisableDecoder = DisableSIDecoder;
+ }
- def _vi : EXPCommon, SIMCInstr <"exp", SISubtarget.VI>, EXPe_vi;
+ def _vi : EXPCommon, SIMCInstr <"exp", SIEncodingFamily.VI>, EXPe_vi {
+ let DecoderNamespace="VI";
+ let DisableDecoder = DisableVIDecoder;
+ }
}
//===----------------------------------------------------------------------===//
@@ -689,7 +716,7 @@ multiclass EXP_m {
class SOP1_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
SOP1 <outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -697,17 +724,21 @@ class SOP1_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
class SOP1_Real_si <sop1 op, string opName, dag outs, dag ins, string asm> :
SOP1 <outs, ins, asm, []>,
SOP1e <op.SI>,
- SIMCInstr<opName, SISubtarget.SI> {
+ SIMCInstr<opName, SIEncodingFamily.SI> {
let isCodeGenOnly = 0;
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
class SOP1_Real_vi <sop1 op, string opName, dag outs, dag ins, string asm> :
SOP1 <outs, ins, asm, []>,
SOP1e <op.VI>,
- SIMCInstr<opName, SISubtarget.VI> {
+ SIMCInstr<opName, SIEncodingFamily.VI> {
let isCodeGenOnly = 0;
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
multiclass SOP1_m <sop1 op, string opName, dag outs, dag ins, string asm,
@@ -722,27 +753,27 @@ multiclass SOP1_m <sop1 op, string opName, dag outs, dag ins, string asm,
}
multiclass SOP1_32 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
- op, opName, (outs SReg_32:$dst), (ins SSrc_32:$src0),
- opName#" $dst, $src0", pattern
+ op, opName, (outs SReg_32:$sdst), (ins SSrc_32:$src0),
+ opName#" $sdst, $src0", pattern
>;
multiclass SOP1_64 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
- op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0),
- opName#" $dst, $src0", pattern
+ op, opName, (outs SReg_64:$sdst), (ins SSrc_64:$src0),
+ opName#" $sdst, $src0", pattern
>;
// no input, 64-bit output.
multiclass SOP1_64_0 <sop1 op, string opName, list<dag> pattern> {
- def "" : SOP1_Pseudo <opName, (outs SReg_64:$dst), (ins), pattern>;
+ def "" : SOP1_Pseudo <opName, (outs SReg_64:$sdst), (ins), pattern>;
- def _si : SOP1_Real_si <op, opName, (outs SReg_64:$dst), (ins),
- opName#" $dst"> {
- let ssrc0 = 0;
+ def _si : SOP1_Real_si <op, opName, (outs SReg_64:$sdst), (ins),
+ opName#" $sdst"> {
+ let src0 = 0;
}
- def _vi : SOP1_Real_vi <op, opName, (outs SReg_64:$dst), (ins),
- opName#" $dst"> {
- let ssrc0 = 0;
+ def _vi : SOP1_Real_vi <op, opName, (outs SReg_64:$sdst), (ins),
+ opName#" $sdst"> {
+ let src0 = 0;
}
}
@@ -763,13 +794,19 @@ multiclass SOP1_1 <sop1 op, string opName, list<dag> pattern> {
// 64-bit input, 32-bit output.
multiclass SOP1_32_64 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
- op, opName, (outs SReg_32:$dst), (ins SSrc_64:$src0),
- opName#" $dst, $src0", pattern
+ op, opName, (outs SReg_32:$sdst), (ins SSrc_64:$src0),
+ opName#" $sdst, $src0", pattern
+>;
+
+// 32-bit input, 64-bit output.
+multiclass SOP1_64_32 <sop1 op, string opName, list<dag> pattern> : SOP1_m <
+ op, opName, (outs SReg_64:$sdst), (ins SSrc_32:$src0),
+ opName#" $sdst, $src0", pattern
>;
class SOP2_Pseudo<string opName, dag outs, dag ins, list<dag> pattern> :
SOP2<outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let Size = 4;
@@ -784,15 +821,19 @@ class SOP2_Pseudo<string opName, dag outs, dag ins, list<dag> pattern> :
class SOP2_Real_si<sop2 op, string opName, dag outs, dag ins, string asm> :
SOP2<outs, ins, asm, []>,
SOP2e<op.SI>,
- SIMCInstr<opName, SISubtarget.SI> {
+ SIMCInstr<opName, SIEncodingFamily.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
class SOP2_Real_vi<sop2 op, string opName, dag outs, dag ins, string asm> :
SOP2<outs, ins, asm, []>,
SOP2e<op.VI>,
- SIMCInstr<opName, SISubtarget.VI> {
+ SIMCInstr<opName, SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
multiclass SOP2_m <sop2 op, string opName, dag outs, dag ins, string asm,
@@ -807,36 +848,49 @@ multiclass SOP2_m <sop2 op, string opName, dag outs, dag ins, string asm,
}
multiclass SOP2_32 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
- op, opName, (outs SReg_32:$dst), (ins SSrc_32:$src0, SSrc_32:$src1),
- opName#" $dst, $src0, $src1", pattern
+ op, opName, (outs SReg_32:$sdst), (ins SSrc_32:$src0, SSrc_32:$src1),
+ opName#" $sdst, $src0, $src1", pattern
>;
multiclass SOP2_64 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
- op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_64:$src1),
- opName#" $dst, $src0, $src1", pattern
+ op, opName, (outs SReg_64:$sdst), (ins SSrc_64:$src0, SSrc_64:$src1),
+ opName#" $sdst, $src0, $src1", pattern
>;
multiclass SOP2_64_32 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
- op, opName, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_32:$src1),
- opName#" $dst, $src0, $src1", pattern
+ op, opName, (outs SReg_64:$sdst), (ins SSrc_64:$src0, SSrc_32:$src1),
+ opName#" $sdst, $src0, $src1", pattern
>;
-class SOPC_Helper <bits<7> op, RegisterOperand rc, ValueType vt,
- string opName, PatLeaf cond> : SOPC <
- op, (outs), (ins rc:$src0, rc:$src1),
- opName#" $src0, $src1", []> {
+multiclass SOP2_64_32_32 <sop2 op, string opName, list<dag> pattern> : SOP2_m <
+ op, opName, (outs SReg_64:$sdst), (ins SSrc_32:$src0, SSrc_32:$src1),
+ opName#" $sdst, $src0, $src1", pattern
+>;
+
+class SOPC_Base <bits<7> op, RegisterOperand rc0, RegisterOperand rc1,
+ string opName, list<dag> pattern = []> : SOPC <
+ op, (outs), (ins rc0:$src0, rc1:$src1),
+ opName#" $src0, $src1", pattern > {
let Defs = [SCC];
}
+class SOPC_Helper <bits<7> op, RegisterOperand rc, ValueType vt,
+ string opName, PatLeaf cond> : SOPC_Base <
+ op, rc, rc, opName,
+ [(set SCC, (si_setcc_uniform vt:$src0, vt:$src1, cond))] > {
+}
-class SOPC_32<bits<7> op, string opName, PatLeaf cond = COND_NULL>
+class SOPC_CMP_32<bits<7> op, string opName, PatLeaf cond = COND_NULL>
: SOPC_Helper<op, SSrc_32, i32, opName, cond>;
-class SOPC_64<bits<7> op, string opName, PatLeaf cond = COND_NULL>
- : SOPC_Helper<op, SSrc_64, i64, opName, cond>;
+class SOPC_32<bits<7> op, string opName, list<dag> pattern = []>
+ : SOPC_Base<op, SSrc_32, SSrc_32, opName, pattern>;
+
+class SOPC_64_32<bits<7> op, string opName, list<dag> pattern = []>
+ : SOPC_Base<op, SSrc_64, SSrc_32, opName, pattern>;
class SOPK_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
SOPK <outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -844,16 +898,20 @@ class SOPK_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
class SOPK_Real_si <sopk op, string opName, dag outs, dag ins, string asm> :
SOPK <outs, ins, asm, []>,
SOPKe <op.SI>,
- SIMCInstr<opName, SISubtarget.SI> {
+ SIMCInstr<opName, SIEncodingFamily.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
let isCodeGenOnly = 0;
}
class SOPK_Real_vi <sopk op, string opName, dag outs, dag ins, string asm> :
SOPK <outs, ins, asm, []>,
SOPKe <op.VI>,
- SIMCInstr<opName, SISubtarget.VI> {
+ SIMCInstr<opName, SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
let isCodeGenOnly = 0;
}
@@ -868,14 +926,14 @@ multiclass SOPK_m <sopk op, string opName, dag outs, dag ins, string opAsm,
}
multiclass SOPK_32 <sopk op, string opName, list<dag> pattern> {
- def "" : SOPK_Pseudo <opName, (outs SReg_32:$dst), (ins u16imm:$src0),
+ def "" : SOPK_Pseudo <opName, (outs SReg_32:$sdst), (ins u16imm:$simm16),
pattern>;
- def _si : SOPK_Real_si <op, opName, (outs SReg_32:$dst), (ins u16imm:$src0),
- opName#" $dst, $src0">;
+ def _si : SOPK_Real_si <op, opName, (outs SReg_32:$sdst), (ins u16imm:$simm16),
+ opName#" $sdst, $simm16">;
- def _vi : SOPK_Real_vi <op, opName, (outs SReg_32:$dst), (ins u16imm:$src0),
- opName#" $dst, $src0">;
+ def _vi : SOPK_Real_vi <op, opName, (outs SReg_32:$sdst), (ins u16imm:$simm16),
+ opName#" $sdst, $simm16">;
}
multiclass SOPK_SCC <sopk op, string opName, list<dag> pattern> {
@@ -908,15 +966,19 @@ multiclass SOPK_IMM32 <sopk op, string opName, dag outs, dag ins,
def _si : SOPK <outs, ins, asm, []>,
SOPK64e <op.SI>,
- SIMCInstr<opName, SISubtarget.SI> {
+ SIMCInstr<opName, SIEncodingFamily.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
let isCodeGenOnly = 0;
}
def _vi : SOPK <outs, ins, asm, []>,
SOPK64e <op.VI>,
- SIMCInstr<opName, SISubtarget.VI> {
+ SIMCInstr<opName, SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
let isCodeGenOnly = 0;
}
}
@@ -926,86 +988,145 @@ multiclass SOPK_IMM32 <sopk op, string opName, dag outs, dag ins,
class SMRD_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
SMRD <outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
-class SMRD_Real_si <bits<5> op, string opName, bit imm, dag outs, dag ins,
- string asm> :
+class SMRD_IMM_Real_si <bits<5> op, string opName, dag outs, dag ins,
+ string asm> :
+ SMRD <outs, ins, asm, []>,
+ SMRD_IMMe <op>,
+ SIMCInstr<opName, SIEncodingFamily.SI> {
+ let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
+}
+
+class SMRD_SOFF_Real_si <bits<5> op, string opName, dag outs, dag ins,
+ string asm> :
SMRD <outs, ins, asm, []>,
- SMRDe <op, imm>,
- SIMCInstr<opName, SISubtarget.SI> {
+ SMRD_SOFFe <op>,
+ SIMCInstr<opName, SIEncodingFamily.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
+}
+
+
+class SMRD_IMM_Real_vi <bits<8> op, string opName, dag outs, dag ins,
+ string asm, list<dag> pattern = []> :
+ SMRD <outs, ins, asm, pattern>,
+ SMEM_IMMe_vi <op>,
+ SIMCInstr<opName, SIEncodingFamily.VI> {
+ let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
-class SMRD_Real_vi <bits<8> op, string opName, bit imm, dag outs, dag ins,
- string asm, list<dag> pattern = []> :
+class SMRD_SOFF_Real_vi <bits<8> op, string opName, dag outs, dag ins,
+ string asm, list<dag> pattern = []> :
SMRD <outs, ins, asm, pattern>,
- SMEMe_vi <op, imm>,
- SIMCInstr<opName, SISubtarget.VI> {
+ SMEM_SOFFe_vi <op>,
+ SIMCInstr<opName, SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
-multiclass SMRD_m <smrd op, string opName, bit imm, dag outs, dag ins,
+
+multiclass SMRD_IMM_m <smrd op, string opName, dag outs, dag ins,
string asm, list<dag> pattern> {
def "" : SMRD_Pseudo <opName, outs, ins, pattern>;
- def _si : SMRD_Real_si <op.SI, opName, imm, outs, ins, asm>;
+ def _si : SMRD_IMM_Real_si <op.SI, opName, outs, ins, asm>;
// glc is only applicable to scalar stores, which are not yet
// implemented.
let glc = 0 in {
- def _vi : SMRD_Real_vi <op.VI, opName, imm, outs, ins, asm>;
+ def _vi : SMRD_IMM_Real_vi <op.VI, opName, outs, ins, asm>;
}
}
-multiclass SMRD_Inval <smrd op, string opName,
- SDPatternOperator node> {
- let hasSideEffects = 1, mayStore = 1 in {
- def "" : SMRD_Pseudo <opName, (outs), (ins), [(node)]>;
+multiclass SMRD_SOFF_m <smrd op, string opName, dag outs, dag ins,
+ string asm, list<dag> pattern> {
- let sbase = 0, offset = 0 in {
- let sdst = 0 in {
- def _si : SMRD_Real_si <op.SI, opName, 0, (outs), (ins), opName>;
- }
+ def "" : SMRD_Pseudo <opName, outs, ins, pattern>;
+
+ def _si : SMRD_SOFF_Real_si <op.SI, opName, outs, ins, asm>;
+
+ // glc is only applicable to scalar stores, which are not yet
+ // implemented.
+ let glc = 0 in {
+ def _vi : SMRD_SOFF_Real_vi <op.VI, opName, outs, ins, asm>;
+ }
+}
+
+multiclass SMRD_Special <smrd op, string opName, dag outs,
+ int sdst_ = ?,
+ string opStr = "",
+ list<dag> pattern = []> {
+ let hasSideEffects = 1 in {
+ def "" : SMRD_Pseudo <opName, outs, (ins), pattern>;
+
+ let sbase = 0, soff = 0, sdst = sdst_ in {
+ def _si : SMRD_SOFF_Real_si <op.SI, opName, outs, (ins), opName#opStr>;
- let glc = 0, sdata = 0 in {
- def _vi : SMRD_Real_vi <op.VI, opName, 0, (outs), (ins), opName>;
+ let glc = 0 in {
+ def _vi : SMRD_SOFF_Real_vi <op.VI, opName, outs, (ins), opName#opStr>;
}
}
}
}
+multiclass SMRD_Inval <smrd op, string opName,
+ SDPatternOperator node> {
+ let mayStore = 1 in {
+ defm : SMRD_Special<op, opName, (outs), 0, "", [(node)]>;
+ }
+}
+
class SMEM_Inval <bits<8> op, string opName, SDPatternOperator node> :
- SMRD_Real_vi<op, opName, 0, (outs), (ins), opName, [(node)]> {
+ SMRD_SOFF_Real_vi<op, opName, (outs), (ins), opName, [(node)]> {
let hasSideEffects = 1;
let mayStore = 1;
let sbase = 0;
- let sdata = 0;
+ let sdst = 0;
+ let glc = 0;
+ let soff = 0;
+}
+
+class SMEM_Ret <bits<8> op, string opName, SDPatternOperator node> :
+ SMRD_SOFF_Real_vi<op, opName, (outs SReg_64:$sdst), (ins),
+ opName#" $sdst", [(set i64:$sdst, (node))]> {
+ let hasSideEffects = 1;
+ let mayStore = ?;
+ let mayLoad = ?;
+ let sbase = 0;
let glc = 0;
- let offset = 0;
+ let soff = 0;
}
multiclass SMRD_Helper <smrd op, string opName, RegisterClass baseClass,
RegisterClass dstClass> {
- defm _IMM : SMRD_m <
- op, opName#"_IMM", 1, (outs dstClass:$dst),
+ defm _IMM : SMRD_IMM_m <
+ op, opName#"_IMM", (outs dstClass:$sdst),
(ins baseClass:$sbase, smrd_offset:$offset),
- opName#" $dst, $sbase, $offset", []
+ opName#" $sdst, $sbase, $offset", []
>;
def _IMM_ci : SMRD <
- (outs dstClass:$dst), (ins baseClass:$sbase, smrd_literal_offset:$offset),
- opName#" $dst, $sbase, $offset", []>, SMRD_IMMe_ci <op.SI> {
+ (outs dstClass:$sdst), (ins baseClass:$sbase, smrd_literal_offset:$offset),
+ opName#" $sdst, $sbase, $offset", []>, SMRD_IMMe_ci <op.SI> {
let AssemblerPredicates = [isCIOnly];
+ let DecoderNamespace = "CI";
}
- defm _SGPR : SMRD_m <
- op, opName#"_SGPR", 0, (outs dstClass:$dst),
+ defm _SGPR : SMRD_SOFF_m <
+ op, opName#"_SGPR", (outs dstClass:$sdst),
(ins baseClass:$sbase, SReg_32:$soff),
- opName#" $dst, $sbase, $soff", []
+ opName#" $sdst, $sbase, $soff", []
>;
}
@@ -1013,20 +1134,6 @@ multiclass SMRD_Helper <smrd op, string opName, RegisterClass baseClass,
// Vector ALU classes
//===----------------------------------------------------------------------===//
-// This must always be right before the operand being input modified.
-def InputMods : OperandWithDefaultOps <i32, (ops (i32 0))> {
- let PrintMethod = "printOperandAndMods";
-}
-
-def InputModsMatchClass : AsmOperandClass {
- let Name = "RegWithInputMods";
-}
-
-def InputModsNoDefault : Operand <i32> {
- let PrintMethod = "printOperandAndMods";
- let ParserMatchClass = InputModsMatchClass;
-}
-
class getNumSrcArgs<ValueType Src0, ValueType Src1, ValueType Src2> {
int ret =
!if (!eq(Src0.Value, untyped.Value), 0,
@@ -1050,12 +1157,12 @@ class getVOPSrc0ForVT<ValueType VT> {
RegisterOperand ret = !if(!eq(VT.Size, 64), VSrc_64, VSrc_32);
}
-// Returns the register class to use for source 1 of VOP[12C] for the
-// given VT.
-class getVOPSrc1ForVT<ValueType VT> {
+// Returns the vreg register class to use for source operand given VT
+class getVregSrcForVT<ValueType VT> {
RegisterClass ret = !if(!eq(VT.Size, 64), VReg_64, VGPR_32);
}
+
// Returns the register class to use for sources of VOP3 instructions for the
// given VT.
class getVOP3SrcForVT<ValueType VT> {
@@ -1072,8 +1179,10 @@ class getVOP3SrcForVT<ValueType VT> {
// Returns 1 if the source arguments have modifiers, 0 if they do not.
// XXX - do f16 instructions?
class hasModifiers<ValueType SrcVT> {
- bit ret = !if(!eq(SrcVT.Value, f32.Value), 1,
- !if(!eq(SrcVT.Value, f64.Value), 1, 0));
+ bit ret =
+ !if(!eq(SrcVT.Value, f32.Value), 1,
+ !if(!eq(SrcVT.Value, f64.Value), 1,
+ 0));
}
// Returns the input arguments for VOP[12C] instructions for the given SrcVT.
@@ -1089,11 +1198,15 @@ class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
bit HasModifiers> {
dag ret =
+ !if (!eq(NumSrcArgs, 0),
+ // VOP1 without input operands (V_NOP, V_CLREXCP)
+ (ins),
+ /* else */
!if (!eq(NumSrcArgs, 1),
!if (!eq(HasModifiers, 1),
// VOP1 with modifiers
- (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0,
- ClampMod:$clamp, omod:$omod)
+ (ins FPInputMods:$src0_modifiers, Src0RC:$src0,
+ clampmod:$clamp, omod:$omod)
/* else */,
// VOP1 without modifiers
(ins Src0RC:$src0)
@@ -1101,9 +1214,9 @@ class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
!if (!eq(NumSrcArgs, 2),
!if (!eq(HasModifiers, 1),
// VOP 2 with modifiers
- (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0,
- InputModsNoDefault:$src1_modifiers, Src1RC:$src1,
- ClampMod:$clamp, omod:$omod)
+ (ins FPInputMods:$src0_modifiers, Src0RC:$src0,
+ FPInputMods:$src1_modifiers, Src1RC:$src1,
+ clampmod:$clamp, omod:$omod)
/* else */,
// VOP2 without modifiers
(ins Src0RC:$src0, Src1RC:$src1)
@@ -1111,21 +1224,109 @@ class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
/* NumSrcArgs == 3 */,
!if (!eq(HasModifiers, 1),
// VOP3 with modifiers
- (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0,
- InputModsNoDefault:$src1_modifiers, Src1RC:$src1,
- InputModsNoDefault:$src2_modifiers, Src2RC:$src2,
- ClampMod:$clamp, omod:$omod)
+ (ins FPInputMods:$src0_modifiers, Src0RC:$src0,
+ FPInputMods:$src1_modifiers, Src1RC:$src1,
+ FPInputMods:$src2_modifiers, Src2RC:$src2,
+ clampmod:$clamp, omod:$omod)
/* else */,
// VOP3 without modifiers
(ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2)
- /* endif */ )));
+ /* endif */ ))));
+}
+
+class getInsDPP <RegisterClass Src0RC, RegisterClass Src1RC, int NumSrcArgs,
+ bit HasModifiers> {
+
+ dag ret = !if (!eq(NumSrcArgs, 0),
+ // VOP1 without input operands (V_NOP)
+ (ins dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
+ bank_mask:$bank_mask, bound_ctrl:$bound_ctrl),
+ !if (!eq(NumSrcArgs, 1),
+ !if (!eq(HasModifiers, 1),
+ // VOP1_DPP with modifiers
+ (ins FPInputMods:$src0_modifiers, Src0RC:$src0,
+ dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
+ bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
+ /* else */,
+ // VOP1_DPP without modifiers
+ (ins Src0RC:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
+ bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
+ /* endif */)
+ /* NumSrcArgs == 2 */,
+ !if (!eq(HasModifiers, 1),
+ // VOP2_DPP with modifiers
+ (ins FPInputMods:$src0_modifiers, Src0RC:$src0,
+ FPInputMods:$src1_modifiers, Src1RC:$src1,
+ dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
+ bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
+ /* else */,
+ // VOP2_DPP without modifiers
+ (ins Src0RC:$src0, Src1RC:$src1, dpp_ctrl:$dpp_ctrl,
+ row_mask:$row_mask, bank_mask:$bank_mask,
+ bound_ctrl:$bound_ctrl)
+ /* endif */)));
+}
+
+class getInsSDWA <RegisterClass Src0RC, RegisterClass Src1RC, int NumSrcArgs,
+ bit HasFloatModifiers, ValueType DstVT> {
+
+ dag ret = !if(!eq(NumSrcArgs, 0),
+ // VOP1 without input operands (V_NOP)
+ (ins),
+ !if(!eq(NumSrcArgs, 1),
+ !if(HasFloatModifiers,
+ // VOP1_SDWA with float modifiers
+ (ins FPInputMods:$src0_fmodifiers, Src0RC:$src0,
+ clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused,
+ src0_sel:$src0_sel)
+ /* else */,
+ // VOP1_SDWA with sext modifier
+ (ins IntInputMods:$src0_imodifiers, Src0RC:$src0,
+ clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused,
+ src0_sel:$src0_sel)
+ /* endif */)
+ /* NumSrcArgs == 2 */,
+ !if(HasFloatModifiers,
+ !if(!eq(DstVT.Size, 1),
+ // VOPC_SDWA with float modifiers
+ (ins FPInputMods:$src0_fmodifiers, Src0RC:$src0,
+ FPInputMods:$src1_fmodifiers, Src1RC:$src1,
+ clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel),
+ // VOP2_SDWA or VOPC_SDWA with float modifiers
+ (ins FPInputMods:$src0_fmodifiers, Src0RC:$src0,
+ FPInputMods:$src1_fmodifiers, Src1RC:$src1,
+ clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused,
+ src0_sel:$src0_sel, src1_sel:$src1_sel)
+ ),
+ /* else */
+ !if(!eq(DstVT.Size, 1),
+ // VOPC_SDWA with sext modifiers
+ (ins IntInputMods:$src0_imodifiers, Src0RC:$src0,
+ IntInputMods:$src1_imodifiers, Src1RC:$src1,
+ clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel),
+ // VOP2_SDWA or VOPC_SDWA with sext modifier
+ (ins IntInputMods:$src0_imodifiers, Src0RC:$src0,
+ IntInputMods:$src1_imodifiers, Src1RC:$src1,
+ clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused,
+ src0_sel:$src0_sel, src1_sel:$src1_sel)
+ )
+ /* endif */)));
+}
+
+// Outs for DPP and SDWA
+class getOutsExt <bit HasDst, ValueType DstVT, RegisterOperand DstRCDPP> {
+ dag ret = !if(HasDst,
+ !if(!eq(DstVT.Size, 1),
+ (outs), // no dst for VOPC, we use "vcc"-token as dst in SDWA VOPC instructions
+ (outs DstRCDPP:$vdst)),
+ (outs)); // V_NOP
}
// Returns the assembly string for the inputs and outputs of a VOP[12C]
// instruction. This does not add the _e32 suffix, so it can be reused
// by getAsm64.
-class getAsm32 <bit HasDst, int NumSrcArgs> {
- string dst = "$dst";
+class getAsm32 <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> {
+ string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
string src0 = ", $src0";
string src1 = ", $src1";
string src2 = ", $src2";
@@ -1137,7 +1338,8 @@ class getAsm32 <bit HasDst, int NumSrcArgs> {
// Returns the assembly string for the inputs and outputs of a VOP3
// instruction.
-class getAsm64 <bit HasDst, int NumSrcArgs, bit HasModifiers> {
+class getAsm64 <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
+ string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
string src1 = !if(!eq(NumSrcArgs, 1), "",
!if(!eq(NumSrcArgs, 2), " $src1_modifiers",
@@ -1145,8 +1347,71 @@ class getAsm64 <bit HasDst, int NumSrcArgs, bit HasModifiers> {
string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
string ret =
!if(!eq(HasModifiers, 0),
- getAsm32<HasDst, NumSrcArgs>.ret,
- "$dst, "#src0#src1#src2#"$clamp"#"$omod");
+ getAsm32<HasDst, NumSrcArgs, DstVT>.ret,
+ dst#", "#src0#src1#src2#"$clamp"#"$omod");
+}
+
+class getAsmDPP <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
+ string dst = !if(HasDst,
+ !if(!eq(DstVT.Size, 1),
+ "$sdst",
+ "$vdst"),
+ ""); // use $sdst for VOPC
+ string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
+ string src1 = !if(!eq(NumSrcArgs, 1), "",
+ !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
+ " $src1_modifiers,"));
+ string args = !if(!eq(HasModifiers, 0),
+ getAsm32<0, NumSrcArgs, DstVT>.ret,
+ ", "#src0#src1);
+ string ret = dst#args#" $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
+}
+
+class getAsmSDWA <bit HasDst, int NumSrcArgs, bit HasFloatModifiers,
+ ValueType DstVT = i32> {
+ string dst = !if(HasDst,
+ !if(!eq(DstVT.Size, 1),
+ " vcc", // use vcc token as dst for VOPC instructioins
+ "$vdst"),
+ "");
+ string src0 = !if(HasFloatModifiers, "$src0_fmodifiers", "$src0_imodifiers");
+ string src1 = !if(HasFloatModifiers, "$src1_fmodifiers", "$src1_imodifiers");
+ string args = !if(!eq(NumSrcArgs, 0),
+ "",
+ !if(!eq(NumSrcArgs, 1),
+ ", "#src0#"$clamp",
+ ", "#src0#", "#src1#"$clamp"
+ )
+ );
+ string sdwa = !if(!eq(NumSrcArgs, 0),
+ "",
+ !if(!eq(NumSrcArgs, 1),
+ " $dst_sel $dst_unused $src0_sel",
+ !if(!eq(DstVT.Size, 1),
+ " $src0_sel $src1_sel", // No dst_sel and dst_unused for VOPC
+ " $dst_sel $dst_unused $src0_sel $src1_sel"
+ )
+ )
+ );
+ string ret = dst#args#sdwa;
+}
+
+// Function that checks if instruction supports DPP and SDWA
+class getHasExt <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32,
+ ValueType Src1VT = i32> {
+ bit ret = !if(!eq(NumSrcArgs, 3),
+ 0, // NumSrcArgs == 3 - No DPP or SDWA for VOP3
+ !if(!eq(DstVT.Size, 64),
+ 0, // 64-bit dst - No DPP or SDWA for 64-bit operands
+ !if(!eq(Src0VT.Size, 64),
+ 0, // 64-bit src0
+ !if(!eq(Src0VT.Size, 64),
+ 0, // 64-bit src2
+ 1
+ )
+ )
+ )
+ );
}
class VOPProfile <list<ValueType> _ArgVT> {
@@ -1158,30 +1423,48 @@ class VOPProfile <list<ValueType> _ArgVT> {
field ValueType Src1VT = ArgVT[2];
field ValueType Src2VT = ArgVT[3];
field RegisterOperand DstRC = getVALUDstForVT<DstVT>.ret;
+ field RegisterOperand DstRCDPP = getVALUDstForVT<DstVT>.ret;
+ field RegisterOperand DstRCSDWA = getVALUDstForVT<DstVT>.ret;
field RegisterOperand Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret;
- field RegisterClass Src1RC32 = getVOPSrc1ForVT<Src1VT>.ret;
+ field RegisterClass Src1RC32 = getVregSrcForVT<Src1VT>.ret;
field RegisterOperand Src0RC64 = getVOP3SrcForVT<Src0VT>.ret;
field RegisterOperand Src1RC64 = getVOP3SrcForVT<Src1VT>.ret;
field RegisterOperand Src2RC64 = getVOP3SrcForVT<Src2VT>.ret;
+ field RegisterClass Src0DPP = getVregSrcForVT<Src0VT>.ret;
+ field RegisterClass Src1DPP = getVregSrcForVT<Src1VT>.ret;
+ field RegisterClass Src0SDWA = getVregSrcForVT<Src0VT>.ret;
+ field RegisterClass Src1SDWA = getVregSrcForVT<Src1VT>.ret;
field bit HasDst = !if(!eq(DstVT.Value, untyped.Value), 0, 1);
field bit HasDst32 = HasDst;
field int NumSrcArgs = getNumSrcArgs<Src0VT, Src1VT, Src2VT>.ret;
field bit HasModifiers = hasModifiers<Src0VT>.ret;
- field dag Outs = !if(HasDst,(outs DstRC:$dst),(outs));
+ field bit HasExt = getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret;
+
+ field dag Outs = !if(HasDst,(outs DstRC:$vdst),(outs));
// VOP3b instructions are a special case with a second explicit
// output. This is manually overridden for them.
field dag Outs32 = Outs;
field dag Outs64 = Outs;
+ field dag OutsDPP = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
+ field dag OutsSDWA = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
field dag Ins32 = getIns32<Src0RC32, Src1RC32, NumSrcArgs>.ret;
field dag Ins64 = getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
HasModifiers>.ret;
+ field dag InsDPP = getInsDPP<Src0DPP, Src1DPP, NumSrcArgs, HasModifiers>.ret;
+ field dag InsSDWA = getInsSDWA<Src0SDWA, Src1SDWA, NumSrcArgs, HasModifiers, DstVT>.ret;
+
+ field string Asm32 = getAsm32<HasDst, NumSrcArgs, DstVT>.ret;
+ field string Asm64 = getAsm64<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
+ field string AsmDPP = getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
+ field string AsmSDWA = getAsmSDWA<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
+}
- field string Asm32 = getAsm32<HasDst, NumSrcArgs>.ret;
- field string Asm64 = getAsm64<HasDst, NumSrcArgs, HasModifiers>.ret;
+class VOP_NO_EXT <VOPProfile p> : VOPProfile <p.ArgVT> {
+ let HasExt = 0;
}
// FIXME: I think these F16/I16 profiles will need to use f16/i16 types in order
@@ -1194,6 +1477,9 @@ def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i32, untyped]>;
def VOP_I16_I16_I16 : VOPProfile <[i32, i32, i32, untyped]>;
+def VOP_I16_I16_I16_I16 : VOPProfile <[i32, i32, i32, i32, untyped]>;
+def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>;
+
def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>;
def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>;
@@ -1216,10 +1502,10 @@ def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
// Write out to vcc or arbitrary SGPR.
def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped]> {
- let Asm32 = "$dst, vcc, $src0, $src1";
- let Asm64 = "$dst, $sdst, $src0, $src1";
- let Outs32 = (outs DstRC:$dst);
- let Outs64 = (outs DstRC:$dst, SReg_64:$sdst);
+ let Asm32 = "$vdst, vcc, $src0, $src1";
+ let Asm64 = "$vdst, $sdst, $src0, $src1";
+ let Outs32 = (outs DstRC:$vdst);
+ let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
}
// Write out to vcc or arbitrary SGPR and read in from vcc or
@@ -1231,10 +1517,23 @@ def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1]> {
// restriction. SGPRs are still allowed because it should
// technically be possible to use VCC again as src0.
let Src0RC32 = VCSrc_32;
- let Asm32 = "$dst, vcc, $src0, $src1, vcc";
- let Asm64 = "$dst, $sdst, $src0, $src1, $src2";
- let Outs32 = (outs DstRC:$dst);
- let Outs64 = (outs DstRC:$dst, SReg_64:$sdst);
+ let Asm32 = "$vdst, vcc, $src0, $src1, vcc";
+ let Asm64 = "$vdst, $sdst, $src0, $src1, $src2";
+ let Outs32 = (outs DstRC:$vdst);
+ let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
+
+ // Suppress src2 implied by type since the 32-bit encoding uses an
+ // implicit VCC use.
+ let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1);
+}
+
+// Read in from vcc or arbitrary SGPR
+def VOP2e_I32_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1]> {
+ let Src0RC32 = VCSrc_32; // See comment in def VOP2b_I32_I1_I32_I32_I1 above.
+ let Asm32 = "$vdst, $src0, $src1, vcc";
+ let Asm64 = "$vdst, $src0, $src1, $src2";
+ let Outs32 = (outs DstRC:$vdst);
+ let Outs64 = (outs DstRC:$vdst);
// Suppress src2 implied by type since the 32-bit encoding uses an
// implicit VCC use.
@@ -1263,11 +1562,17 @@ class VOPC_Profile<ValueType vt0, ValueType vt1 = vt0> : VOPProfile <[i1, vt0, v
let Asm32 = "vcc, $src0, $src1";
// The destination for 32-bit encoding is implicit.
let HasDst32 = 0;
+ let Outs64 = (outs DstRC:$sdst);
}
class VOPC_Class_Profile<ValueType vt> : VOPC_Profile<vt, i32> {
- let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
- let Asm64 = "$dst, $src0_modifiers, $src1";
+ let Ins64 = (ins FPInputMods:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
+ let Asm64 = "$sdst, $src0_modifiers, $src1";
+ let InsSDWA = (ins FPInputMods:$src0_fmodifiers, Src0RC64:$src0,
+ IntInputMods:$src1_imodifiers, Src1RC64:$src1,
+ clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel);
+ let AsmSDWA = " vcc, $src0_fmodifiers, $src1_imodifiers$clamp $src0_sel $src1_sel";
+
}
def VOPC_I1_F32_F32 : VOPC_Profile<f32>;
@@ -1281,28 +1586,42 @@ def VOPC_I1_F64_I32 : VOPC_Class_Profile<f64>;
def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>;
def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>;
-def VOP_CNDMASK : VOPProfile <[i32, i32, i32, untyped]> {
- let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1);
- let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, SSrc_64:$src2);
- let Asm64 = "$dst, $src0, $src1, $src2";
-}
def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>;
-def VOP_MADK : VOPProfile <[f32, f32, f32, f32]> {
- field dag Ins = (ins VCSrc_32:$src0, VGPR_32:$vsrc1, u32imm:$src2);
- field string Asm = "$dst, $src0, $vsrc1, $src2";
+def VOP_MADAK : VOPProfile <[f32, f32, f32, f32]> {
+ field dag Ins32 = (ins VCSrc_32:$src0, VGPR_32:$src1, u32kimm:$imm);
+ field string Asm32 = "$vdst, $src0, $src1, $imm";
+ field bit HasExt = 0;
+}
+def VOP_MADMK : VOPProfile <[f32, f32, f32, f32]> {
+ field dag Ins32 = (ins VCSrc_32:$src0, u32kimm:$imm, VGPR_32:$src1);
+ field string Asm32 = "$vdst, $src0, $imm, $src1";
+ field bit HasExt = 0;
}
def VOP_MAC : VOPProfile <[f32, f32, f32, f32]> {
let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, VGPR_32:$src2);
let Ins64 = getIns64<Src0RC64, Src1RC64, RegisterOperand<VGPR_32>, 3,
HasModifiers>.ret;
- let Asm32 = getAsm32<1, 2>.ret;
- let Asm64 = getAsm64<1, 2, HasModifiers>.ret;
+ let InsDPP = (ins FPInputMods:$src0_modifiers, Src0RC32:$src0,
+ FPInputMods:$src1_modifiers, Src1RC32:$src1,
+ VGPR_32:$src2, // stub argument
+ dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
+ bank_mask:$bank_mask, bound_ctrl:$bound_ctrl);
+ let InsSDWA = (ins FPInputMods:$src0_fmodifiers, Src0RC32:$src0,
+ FPInputMods:$src1_fmodifiers, Src1RC32:$src1,
+ VGPR_32:$src2, // stub argument
+ clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused,
+ src0_sel:$src0_sel, src1_sel:$src1_sel);
+ let Asm32 = getAsm32<1, 2, f32>.ret;
+ let Asm64 = getAsm64<1, 2, HasModifiers, f32>.ret;
+ let AsmDPP = getAsmDPP<1, 2, HasModifiers, f32>.ret;
+ let AsmSDWA = getAsmSDWA<1, 2, HasModifiers, f32>.ret;
}
def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>;
def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>;
def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>;
+// This class is used only with VOPC instructions. Use $sdst for out operand
class SIInstAlias <string asm, Instruction inst, VOPProfile p> :
InstAlias <asm, (inst)>, PredicateControl {
@@ -1313,13 +1632,13 @@ class SIInstAlias <string asm, Instruction inst, VOPProfile p> :
!if (p.HasDst32,
!if (!eq(p.NumSrcArgs, 0),
// 1 dst, 0 src
- (inst p.DstRC:$dst),
+ (inst p.DstRC:$sdst),
!if (!eq(p.NumSrcArgs, 1),
// 1 dst, 1 src
- (inst p.DstRC:$dst, p.Src0RC32:$src0),
+ (inst p.DstRC:$sdst, p.Src0RC32:$src0),
!if (!eq(p.NumSrcArgs, 2),
// 1 dst, 2 src
- (inst p.DstRC:$dst, p.Src0RC32:$src0, p.Src1RC32:$src1),
+ (inst p.DstRC:$sdst, p.Src0RC32:$src0, p.Src1RC32:$src1),
// else - unreachable
(inst)))),
// else
@@ -1368,7 +1687,7 @@ class AtomicNoRet <string noRetOp, bit isRet> {
class VOP1_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
VOP1Common <outs, ins, "", pattern>,
VOP <opName>,
- SIMCInstr <opName#"_e32", SISubtarget.NONE>,
+ SIMCInstr <opName#"_e32", SIEncodingFamily.NONE>,
MnemonicAlias<opName#"_e32", opName> {
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -1379,14 +1698,18 @@ class VOP1_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
class VOP1_Real_si <string opName, vop1 op, dag outs, dag ins, string asm> :
VOP1<op.SI, outs, ins, asm, []>,
- SIMCInstr <opName#"_e32", SISubtarget.SI> {
+ SIMCInstr <opName#"_e32", SIEncodingFamily.SI> {
let AssemblerPredicate = SIAssemblerPredicate;
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
class VOP1_Real_vi <string opName, vop1 op, dag outs, dag ins, string asm> :
VOP1<op.VI, outs, ins, asm, []>,
- SIMCInstr <opName#"_e32", SISubtarget.VI> {
+ SIMCInstr <opName#"_e32", SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
multiclass VOP1_m <vop1 op, string opName, VOPProfile p, list<dag> pattern,
@@ -1399,6 +1722,49 @@ multiclass VOP1_m <vop1 op, string opName, VOPProfile p, list<dag> pattern,
}
+class VOP1_DPP <vop1 op, string opName, VOPProfile p> :
+ VOP1_DPPe <op.VI>,
+ VOP_DPP <p.OutsDPP, p.InsDPP, opName#p.AsmDPP, [], p.HasModifiers> {
+ let AssemblerPredicates = !if(p.HasExt, [isVI], [DisableInst]);
+ let DecoderNamespace = "DPP";
+ let DisableDecoder = DisableVIDecoder;
+ let src0_modifiers = !if(p.HasModifiers, ?, 0);
+ let src1_modifiers = 0;
+}
+
+class SDWADisableFields <VOPProfile p> {
+ bits<8> src0 = !if(!eq(p.NumSrcArgs, 0), 0, ?);
+ bits<3> src0_sel = !if(!eq(p.NumSrcArgs, 0), 6, ?);
+ bits<2> src0_fmodifiers = !if(!eq(p.NumSrcArgs, 0),
+ 0,
+ !if(p.HasModifiers, ?, 0));
+ bits<1> src0_imodifiers = !if(!eq(p.NumSrcArgs, 0),
+ 0,
+ !if(p.HasModifiers, 0, ?));
+ bits<3> src1_sel = !if(!eq(p.NumSrcArgs, 0), 6,
+ !if(!eq(p.NumSrcArgs, 1), 6,
+ ?));
+ bits<2> src1_fmodifiers = !if(!eq(p.NumSrcArgs, 0), 0,
+ !if(!eq(p.NumSrcArgs, 1), 0,
+ !if(p.HasModifiers, ?, 0)));
+ bits<1> src1_imodifiers = !if(!eq(p.NumSrcArgs, 0), 0,
+ !if(!eq(p.NumSrcArgs, 1), 0,
+ !if(p.HasModifiers, 0, ?)));
+ bits<3> dst_sel = !if(p.HasDst, ?, 6);
+ bits<2> dst_unused = !if(p.HasDst, ?, 2);
+ bits<1> clamp = !if(!eq(p.NumSrcArgs, 0), 0, ?);
+}
+
+class VOP1_SDWA <vop1 op, string opName, VOPProfile p> :
+ VOP1_SDWAe <op.VI>,
+ VOP_SDWA <p.OutsSDWA, p.InsSDWA, opName#p.AsmSDWA, [], p.HasModifiers>,
+ SDWADisableFields <p> {
+ let AsmMatchConverter = "cvtSdwaVOP1";
+ let AssemblerPredicates = !if(p.HasExt, [isVI], [DisableInst]);
+ let DecoderNamespace = "SDWA";
+ let DisableDecoder = DisableVIDecoder;
+}
+
multiclass VOP1SI_m <vop1 op, string opName, VOPProfile p, list<dag> pattern,
string asm = opName#p.Asm32> {
@@ -1410,7 +1776,7 @@ multiclass VOP1SI_m <vop1 op, string opName, VOPProfile p, list<dag> pattern,
class VOP2_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
VOP2Common <outs, ins, "", pattern>,
VOP <opName>,
- SIMCInstr<opName#"_e32", SISubtarget.NONE>,
+ SIMCInstr<opName#"_e32", SIEncodingFamily.NONE>,
MnemonicAlias<opName#"_e32", opName> {
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -1418,14 +1784,18 @@ class VOP2_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
class VOP2_Real_si <string opName, vop2 op, dag outs, dag ins, string asm> :
VOP2 <op.SI, outs, ins, opName#asm, []>,
- SIMCInstr <opName#"_e32", SISubtarget.SI> {
+ SIMCInstr <opName#"_e32", SIEncodingFamily.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
class VOP2_Real_vi <string opName, vop2 op, dag outs, dag ins, string asm> :
VOP2 <op.VI, outs, ins, opName#asm, []>,
- SIMCInstr <opName#"_e32", SISubtarget.VI> {
+ SIMCInstr <opName#"_e32", SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
multiclass VOP2SI_m <vop2 op, string opName, VOPProfile p, list<dag> pattern,
@@ -1449,6 +1819,26 @@ multiclass VOP2_m <vop2 op, string opName, VOPProfile p, list <dag> pattern,
}
+class VOP2_DPP <vop2 op, string opName, VOPProfile p> :
+ VOP2_DPPe <op.VI>,
+ VOP_DPP <p.OutsDPP, p.InsDPP, opName#p.AsmDPP, [], p.HasModifiers> {
+ let AssemblerPredicates = !if(p.HasExt, [isVI], [DisableInst]);
+ let DecoderNamespace = "DPP";
+ let DisableDecoder = DisableVIDecoder;
+ let src0_modifiers = !if(p.HasModifiers, ?, 0);
+ let src1_modifiers = !if(p.HasModifiers, ?, 0);
+}
+
+class VOP2_SDWA <vop2 op, string opName, VOPProfile p> :
+ VOP2_SDWAe <op.VI>,
+ VOP_SDWA <p.OutsSDWA, p.InsSDWA, opName#p.AsmSDWA, [], p.HasModifiers>,
+ SDWADisableFields <p> {
+ let AsmMatchConverter = "cvtSdwaVOP2";
+ let AssemblerPredicates = !if(p.HasExt, [isVI], [DisableInst]);
+ let DecoderNamespace = "SDWA";
+ let DisableDecoder = DisableVIDecoder;
+}
+
class VOP3DisableFields <bit HasSrc1, bit HasSrc2, bit HasModifiers> {
bits<2> src0_modifiers = !if(HasModifiers, ?, 0);
@@ -1471,10 +1861,11 @@ class VOP3DisableModFields <bit HasSrc0Mods,
bits<1> clamp = !if(HasOutputMods, ?, 0);
}
-class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
- VOP3Common <outs, ins, "", pattern>,
+class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, "", pattern, HasMods, VOP3Only>,
VOP <opName>,
- SIMCInstr<opName#"_e64", SISubtarget.NONE>,
+ SIMCInstr<opName#"_e64", SIEncodingFamily.NONE>,
MnemonicAlias<opName#"_e64", opName> {
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -1483,44 +1874,96 @@ class VOP3_Pseudo <dag outs, dag ins, list<dag> pattern, string opName> :
field bit src0;
}
-class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
- VOP3Common <outs, ins, asm, []>,
+class VOP3_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3e <op>,
- SIMCInstr<opName#"_e64", SISubtarget.SI> {
+ SIMCInstr<opName#"_e64", SIEncodingFamily.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
-class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
- VOP3Common <outs, ins, asm, []>,
+class VOP3_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3e_vi <op>,
- SIMCInstr <opName#"_e64", SISubtarget.VI> {
+ SIMCInstr <opName#"_e64", SIEncodingFamily.VI> {
+ let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
+}
+
+class VOP3_C_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
+ VOP3ce <op>,
+ SIMCInstr<opName#"_e64", SIEncodingFamily.SI> {
+ let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
+}
+
+class VOP3_C_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
+ VOP3ce_vi <op>,
+ SIMCInstr <opName#"_e64", SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
-class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName> :
- VOP3Common <outs, ins, asm, []>,
+class VOP3b_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3be <op>,
- SIMCInstr<opName#"_e64", SISubtarget.SI> {
+ SIMCInstr<opName#"_e64", SIEncodingFamily.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
-class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName> :
- VOP3Common <outs, ins, asm, []>,
+class VOP3b_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
VOP3be_vi <op>,
- SIMCInstr <opName#"_e64", SISubtarget.VI> {
+ SIMCInstr <opName#"_e64", SIEncodingFamily.VI> {
+ let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
+}
+
+class VOP3e_Real_si <bits<9> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
+ VOP3e <op>,
+ SIMCInstr<opName#"_e64", SIEncodingFamily.SI> {
+ let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
+}
+
+class VOP3e_Real_vi <bits<10> op, dag outs, dag ins, string asm, string opName,
+ bit HasMods = 0, bit VOP3Only = 0> :
+ VOP3Common <outs, ins, asm, [], HasMods, VOP3Only>,
+ VOP3e_vi <op>,
+ SIMCInstr <opName#"_e64", SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
- string opName, int NumSrcArgs, bit HasMods = 1> {
+ string opName, int NumSrcArgs, bit HasMods = 1, bit VOP3Only = 0> {
def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>;
- def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<!if(!eq(NumSrcArgs, 1), 0, 1),
!if(!eq(NumSrcArgs, 2), 0, 1),
HasMods>;
@@ -1529,21 +1972,21 @@ multiclass VOP3_m <vop op, dag outs, dag ins, string asm, list<dag> pattern,
multiclass VOP3_1_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, bit HasMods = 1> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
- def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
}
multiclass VOP3SI_1_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, bit HasMods = 1> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<0, 0, HasMods>;
// No VI instruction. This class is for SI only.
}
@@ -1552,13 +1995,13 @@ multiclass VOP3_2_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, string revOp,
bit HasMods = 1> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
- def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
}
@@ -1566,10 +2009,10 @@ multiclass VOP3SI_2_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, string revOp,
bit HasMods = 1> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)>;
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods>;
// No VI instruction. This class is for SI only.
@@ -1579,13 +2022,26 @@ multiclass VOP3SI_2_m <vop op, dag outs, dag ins, string asm,
// instead of an implicit VCC as in the VOP2b format.
multiclass VOP3b_2_3_m <vop op, dag outs, dag ins, string asm,
list<dag> pattern, string opName, string revOp,
- bit HasMods = 1, bit useSrc2Input = 0> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>;
+ bit HasMods = 1, bit useSrc2Input = 0, bit VOP3Only = 0> {
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods, VOP3Only>;
+
+ def _si : VOP3b_Real_si <op.SI3, outs, ins, asm, opName, HasMods, VOP3Only>,
+ VOP3DisableFields<1, useSrc2Input, HasMods>;
+
+ def _vi : VOP3b_Real_vi <op.VI3, outs, ins, asm, opName, HasMods, VOP3Only>,
+ VOP3DisableFields<1, useSrc2Input, HasMods>;
+}
- def _si : VOP3b_Real_si <op.SI3, outs, ins, asm, opName>,
+// Same as VOP3b_2_3_m but no 2nd destination (sdst), e.g. v_cndmask_b32.
+multiclass VOP3e_2_3_m <vop op, dag outs, dag ins, string asm,
+ list<dag> pattern, string opName, string revOp,
+ bit HasMods = 1, bit useSrc2Input = 0, bit VOP3Only = 0> {
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods, VOP3Only>;
+
+ def _si : VOP3e_Real_si <op.SI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<1, useSrc2Input, HasMods>;
- def _vi : VOP3b_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3e_Real_vi <op.VI3, outs, ins, asm, opName, HasMods, VOP3Only>,
VOP3DisableFields<1, useSrc2Input, HasMods>;
}
@@ -1594,19 +2050,19 @@ multiclass VOP3_C_m <vop op, dag outs, dag ins, string asm,
bit HasMods, bit defExec,
string revOp, list<SchedReadWrite> sched> {
- def "" : VOP3_Pseudo <outs, ins, pattern, opName>,
+ def "" : VOP3_Pseudo <outs, ins, pattern, opName, HasMods>,
VOP2_REV<revOp#"_e64", !eq(revOp, opName)> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
}
- def _si : VOP3_Real_si <op.SI3, outs, ins, asm, opName>,
+ def _si : VOP3_C_Real_si <op.SI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
}
- def _vi : VOP3_Real_vi <op.VI3, outs, ins, asm, opName>,
+ def _vi : VOP3_C_Real_vi <op.VI3, outs, ins, asm, opName, HasMods>,
VOP3DisableFields<1, 0, HasMods> {
let Defs = !if(defExec, [EXEC], []);
let SchedRW = sched;
@@ -1618,19 +2074,23 @@ multiclass VOP2SI_3VI_m <vop3 op, string opName, dag outs, dag ins,
string asm, list<dag> pattern = []> {
let isPseudo = 1, isCodeGenOnly = 1 in {
def "" : VOPAnyCommon <outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE>;
+ SIMCInstr<opName, SIEncodingFamily.NONE>;
}
def _si : VOP2 <op.SI3{5-0}, outs, ins, asm, []>,
- SIMCInstr <opName, SISubtarget.SI> {
+ SIMCInstr <opName, SIEncodingFamily.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
def _vi : VOP3Common <outs, ins, asm, []>,
VOP3e_vi <op.VI3>,
VOP3DisableFields <1, 0, 0>,
- SIMCInstr <opName, SISubtarget.VI> {
+ SIMCInstr <opName, SIEncodingFamily.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
}
@@ -1641,15 +2101,19 @@ multiclass VOP1_Helper <vop1 op, string opName, VOPProfile p, list<dag> pat32,
defm _e64 : VOP3_1_m <op, p.Outs, p.Ins64, opName#p.Asm64, pat64, opName,
p.HasModifiers>;
+
+ def _dpp : VOP1_DPP <op, opName, p>;
+
+ def _sdwa : VOP1_SDWA <op, opName, p>;
}
multiclass VOP1Inst <vop1 op, string opName, VOPProfile P,
SDPatternOperator node = null_frag> : VOP1_Helper <
op, opName, P, [],
!if(P.HasModifiers,
- [(set P.DstVT:$dst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0,
+ [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0,
i32:$src0_modifiers, i1:$clamp, i32:$omod))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0))])
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0))])
>;
multiclass VOP1InstSI <vop1 op, string opName, VOPProfile P,
@@ -1659,9 +2123,9 @@ multiclass VOP1InstSI <vop1 op, string opName, VOPProfile P,
defm _e64 : VOP3SI_1_m <op, P.Outs, P.Ins64, opName#P.Asm64,
!if(P.HasModifiers,
- [(set P.DstVT:$dst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0,
+ [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0,
i32:$src0_modifiers, i1:$clamp, i32:$omod))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0))]),
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0))]),
opName, P.HasModifiers>;
}
@@ -1672,6 +2136,10 @@ multiclass VOP2_Helper <vop2 op, string opName, VOPProfile p, list<dag> pat32,
defm _e64 : VOP3_2_m <op, p.Outs, p.Ins64, opName#p.Asm64, pat64, opName,
revOp, p.HasModifiers>;
+
+ def _dpp : VOP2_DPP <op, opName, p>;
+
+ def _sdwa : VOP2_SDWA <op, opName, p>;
}
multiclass VOP2Inst <vop2 op, string opName, VOPProfile P,
@@ -1679,11 +2147,11 @@ multiclass VOP2Inst <vop2 op, string opName, VOPProfile P,
string revOp = opName> : VOP2_Helper <
op, opName, P, [],
!if(P.HasModifiers,
- [(set P.DstVT:$dst,
+ [(set P.DstVT:$vdst,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod)),
(P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
revOp
>;
@@ -1695,14 +2163,41 @@ multiclass VOP2InstSI <vop2 op, string opName, VOPProfile P,
defm _e64 : VOP3SI_2_m <op, P.Outs, P.Ins64, opName#P.Asm64,
!if(P.HasModifiers,
- [(set P.DstVT:$dst,
+ [(set P.DstVT:$vdst,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod)),
(P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
opName, revOp, P.HasModifiers>;
}
+multiclass VOP2e_Helper <vop2 op, string opName, VOPProfile p,
+ list<dag> pat32, list<dag> pat64,
+ string revOp, bit useSGPRInput> {
+
+ let SchedRW = [Write32Bit] in {
+ let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]) in {
+ defm _e32 : VOP2_m <op, opName, p, pat32, revOp>;
+ }
+
+ defm _e64 : VOP3e_2_3_m <op, p.Outs64, p.Ins64, opName#p.Asm64, pat64,
+ opName, revOp, p.HasModifiers, useSGPRInput>;
+ }
+}
+
+multiclass VOP2eInst <vop2 op, string opName, VOPProfile P,
+ SDPatternOperator node = null_frag,
+ string revOp = opName> : VOP2e_Helper <
+ op, opName, P, [],
+ !if(P.HasModifiers,
+ [(set P.DstVT:$vdst,
+ (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
+ i1:$clamp, i32:$omod)),
+ (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
+ revOp, !eq(P.NumSrcArgs, 3)
+>;
+
multiclass VOP2b_Helper <vop2 op, string opName, VOPProfile p,
list<dag> pat32, list<dag> pat64,
string revOp, bit useSGPRInput> {
@@ -1722,11 +2217,11 @@ multiclass VOP2bInst <vop2 op, string opName, VOPProfile P,
string revOp = opName> : VOP2b_Helper <
op, opName, P, [],
!if(P.HasModifiers,
- [(set P.DstVT:$dst,
+ [(set P.DstVT:$vdst,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod)),
(P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
revOp, !eq(P.NumSrcArgs, 3)
>;
@@ -1746,31 +2241,35 @@ multiclass VOP2_VI3_Inst <vop23 op, string opName, VOPProfile P,
: VOP2_VI3_Helper <
op, opName, P, [],
!if(P.HasModifiers,
- [(set P.DstVT:$dst,
+ [(set P.DstVT:$vdst,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod)),
(P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]),
revOp
>;
-multiclass VOP2MADK <vop2 op, string opName, list<dag> pattern = []> {
+multiclass VOP2MADK <vop2 op, string opName, VOPProfile P, list<dag> pattern = []> {
- def "" : VOP2_Pseudo <VOP_MADK.Outs, VOP_MADK.Ins, pattern, opName>;
+ def "" : VOP2_Pseudo <P.Outs, P.Ins32, pattern, opName>;
let isCodeGenOnly = 0 in {
- def _si : VOP2Common <VOP_MADK.Outs, VOP_MADK.Ins,
- !strconcat(opName, VOP_MADK.Asm), []>,
- SIMCInstr <opName#"_e32", SISubtarget.SI>,
+ def _si : VOP2Common <P.Outs, P.Ins32,
+ !strconcat(opName, P.Asm32), []>,
+ SIMCInstr <opName#"_e32", SIEncodingFamily.SI>,
VOP2_MADKe <op.SI> {
let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
- def _vi : VOP2Common <VOP_MADK.Outs, VOP_MADK.Ins,
- !strconcat(opName, VOP_MADK.Asm), []>,
- SIMCInstr <opName#"_e32", SISubtarget.VI>,
+ def _vi : VOP2Common <P.Outs, P.Ins32,
+ !strconcat(opName, P.Asm32), []>,
+ SIMCInstr <opName#"_e32", SIEncodingFamily.VI>,
VOP2_MADKe <op.VI> {
let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
} // End isCodeGenOnly = 0
}
@@ -1778,37 +2277,55 @@ let isCodeGenOnly = 0 in {
class VOPC_Pseudo <dag ins, list<dag> pattern, string opName> :
VOPCCommon <ins, "", pattern>,
VOP <opName>,
- SIMCInstr<opName#"_e32", SISubtarget.NONE> {
+ SIMCInstr<opName#"_e32", SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
+class VOPC_SDWA <vopc op, string opName, bit DefExec, VOPProfile p> :
+ VOPC_SDWAe <op.VI>,
+ VOP_SDWA <p.OutsSDWA, p.InsSDWA, opName#p.AsmSDWA, [], p.HasModifiers>,
+ SDWADisableFields <p> {
+ let Defs = !if(DefExec, [VCC, EXEC], [VCC]);
+ let hasSideEffects = DefExec;
+ let AsmMatchConverter = "cvtSdwaVOPC";
+ let AssemblerPredicates = !if(p.HasExt, [isVI], [DisableInst]);
+ let DecoderNamespace = "SDWA";
+ let DisableDecoder = DisableVIDecoder;
+}
+
multiclass VOPC_m <vopc op, dag ins, string op_asm, list<dag> pattern,
string opName, bit DefExec, VOPProfile p,
list<SchedReadWrite> sched,
string revOpName = "", string asm = opName#"_e32 "#op_asm,
string alias_asm = opName#" "#op_asm> {
- def "" : VOPC_Pseudo <ins, pattern, opName> {
+ def "" : VOPC_Pseudo <ins, pattern, opName>,
+ VOP2_REV<revOpName#"_e32", !eq(revOpName, opName)> {
let Defs = !if(DefExec, [VCC, EXEC], [VCC]);
let SchedRW = sched;
+ let isConvergent = DefExec;
}
let AssemblerPredicates = [isSICI] in {
def _si : VOPC<op.SI, ins, asm, []>,
- SIMCInstr <opName#"_e32", SISubtarget.SI> {
+ SIMCInstr <opName#"_e32", SIEncodingFamily.SI> {
let Defs = !if(DefExec, [VCC, EXEC], [VCC]);
- let hasSideEffects = DefExec;
+ let isConvergent = DefExec;
let SchedRW = sched;
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
}
} // End AssemblerPredicates = [isSICI]
let AssemblerPredicates = [isVI] in {
def _vi : VOPC<op.VI, ins, asm, []>,
- SIMCInstr <opName#"_e32", SISubtarget.VI> {
+ SIMCInstr <opName#"_e32", SIEncodingFamily.VI> {
let Defs = !if(DefExec, [VCC, EXEC], [VCC]);
- let hasSideEffects = DefExec;
+ let isConvergent = DefExec;
let SchedRW = sched;
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
}
} // End AssemblerPredicates = [isVI]
@@ -1819,10 +2336,13 @@ multiclass VOPC_m <vopc op, dag ins, string op_asm, list<dag> pattern,
multiclass VOPC_Helper <vopc op, string opName, list<dag> pat32,
list<dag> pat64, bit DefExec, string revOp,
VOPProfile p, list<SchedReadWrite> sched> {
- defm _e32 : VOPC_m <op, p.Ins32, p.Asm32, pat32, opName, DefExec, p, sched>;
+ defm _e32 : VOPC_m <op, p.Ins32, p.Asm32, pat32, opName, DefExec, p, sched,
+ revOp>;
- defm _e64 : VOP3_C_m <op, (outs VOPDstS64:$dst), p.Ins64, opName#p.Asm64, pat64,
+ defm _e64 : VOP3_C_m <op, (outs VOPDstS64:$sdst), p.Ins64, opName#p.Asm64, pat64,
opName, p.HasModifiers, DefExec, revOp, sched>;
+
+ def _sdwa : VOPC_SDWA <op, opName, DefExec, p>;
}
// Special case for class instructions which only have modifiers on
@@ -1832,9 +2352,14 @@ multiclass VOPC_Class_Helper <vopc op, string opName, list<dag> pat32,
VOPProfile p, list<SchedReadWrite> sched> {
defm _e32 : VOPC_m <op, p.Ins32, p.Asm32, pat32, opName, DefExec, p, sched>;
- defm _e64 : VOP3_C_m <op, (outs VOPDstS64:$dst), p.Ins64, opName#p.Asm64, pat64,
+ defm _e64 : VOP3_C_m <op, (outs VOPDstS64:$sdst), p.Ins64, opName#p.Asm64, pat64,
opName, p.HasModifiers, DefExec, revOp, sched>,
VOP3DisableModFields<1, 0, 0>;
+
+ def _sdwa : VOPC_SDWA <op, opName, DefExec, p> {
+ let src1_fmodifiers = 0;
+ let src1_imodifiers = ?;
+ }
}
multiclass VOPCInst <vopc op, string opName,
@@ -1845,12 +2370,12 @@ multiclass VOPCInst <vopc op, string opName,
VOPC_Helper <
op, opName, [],
!if(P.HasModifiers,
- [(set i1:$dst,
+ [(set i1:$sdst,
(setcc (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod)),
(P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
cond))],
- [(set i1:$dst, (setcc P.Src0VT:$src0, P.Src1VT:$src1, cond))]),
+ [(set i1:$sdst, (setcc P.Src0VT:$src0, P.Src1VT:$src1, cond))]),
DefExec, revOp, P, sched
>;
@@ -1859,9 +2384,9 @@ multiclass VOPCClassInst <vopc op, string opName, VOPProfile P,
list<SchedReadWrite> sched> : VOPC_Class_Helper <
op, opName, [],
!if(P.HasModifiers,
- [(set i1:$dst,
+ [(set i1:$sdst,
(AMDGPUfp_class (P.Src0VT (VOP3Mods0Clamp0OMod P.Src0VT:$src0, i32:$src0_modifiers)), P.Src1VT:$src1))],
- [(set i1:$dst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]),
+ [(set i1:$sdst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]),
DefExec, opName, P, sched
>;
@@ -1897,10 +2422,6 @@ multiclass VOPCX_I32 <vopc op, string opName, string revOp = opName> :
multiclass VOPCX_I64 <vopc op, string opName, string revOp = opName> :
VOPCX <op, opName, VOPC_I1_I64_I64, COND_NULL, [Write64Bit], revOp>;
-multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
- list<dag> pat, int NumSrcArgs, bit HasMods> : VOP3_m <
- op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods
->;
multiclass VOPC_CLASS_F32 <vopc op, string opName> :
VOPCClassInst <op, opName, VOPC_I1_F32_I32, 0, [Write32Bit]>;
@@ -1914,32 +2435,40 @@ multiclass VOPC_CLASS_F64 <vopc op, string opName> :
multiclass VOPCX_CLASS_F64 <vopc op, string opName> :
VOPCClassInst <op, opName, VOPC_I1_F64_I32, 1, [WriteDoubleAdd]>;
+
+multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
+ list<dag> pat, int NumSrcArgs, bit HasMods,
+ bit VOP3Only = 0> : VOP3_m <
+ op, outs, ins, opName#" "#asm, pat, opName, NumSrcArgs, HasMods, VOP3Only
+>;
+
multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
- SDPatternOperator node = null_frag> : VOP3_Helper <
- op, opName, (outs P.DstRC.RegClass:$dst), P.Ins64, P.Asm64,
+ SDPatternOperator node = null_frag, bit VOP3Only = 0> :
+ VOP3_Helper <
+ op, opName, (outs P.DstRC.RegClass:$vdst), P.Ins64, P.Asm64,
!if(!eq(P.NumSrcArgs, 3),
!if(P.HasModifiers,
- [(set P.DstVT:$dst,
+ [(set P.DstVT:$vdst,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod)),
(P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
(P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1,
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1,
P.Src2VT:$src2))]),
!if(!eq(P.NumSrcArgs, 2),
!if(P.HasModifiers,
- [(set P.DstVT:$dst,
+ [(set P.DstVT:$vdst,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod)),
(P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))])
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))])
/* P.NumSrcArgs == 1 */,
!if(P.HasModifiers,
- [(set P.DstVT:$dst,
+ [(set P.DstVT:$vdst,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod))))],
- [(set P.DstVT:$dst, (node P.Src0VT:$src0))]))),
- P.NumSrcArgs, P.HasModifiers
+ [(set P.DstVT:$vdst, (node P.Src0VT:$src0))]))),
+ P.NumSrcArgs, P.HasModifiers, VOP3Only
>;
// Special case for v_div_fmas_{f32|f64}, since it seems to be the
@@ -1948,14 +2477,14 @@ multiclass VOP3_VCC_Inst <vop3 op, string opName,
VOPProfile P,
SDPatternOperator node = null_frag> : VOP3_Helper <
op, opName,
- (outs P.DstRC.RegClass:$dst),
- (ins InputModsNoDefault:$src0_modifiers, P.Src0RC64:$src0,
- InputModsNoDefault:$src1_modifiers, P.Src1RC64:$src1,
- InputModsNoDefault:$src2_modifiers, P.Src2RC64:$src2,
- ClampMod:$clamp,
+ (outs P.DstRC.RegClass:$vdst),
+ (ins FPInputMods:$src0_modifiers, P.Src0RC64:$src0,
+ FPInputMods:$src1_modifiers, P.Src1RC64:$src1,
+ FPInputMods:$src2_modifiers, P.Src2RC64:$src2,
+ clampmod:$clamp,
omod:$omod),
- "$dst, $src0_modifiers, $src1_modifiers, $src2_modifiers"#"$clamp"#"$omod",
- [(set P.DstVT:$dst,
+ "$vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers"#"$clamp"#"$omod",
+ [(set P.DstVT:$vdst,
(node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers,
i1:$clamp, i32:$omod)),
(P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)),
@@ -1964,11 +2493,11 @@ multiclass VOP3_VCC_Inst <vop3 op, string opName,
3, 1
>;
-multiclass VOP3bInst <vop op, string opName, VOPProfile P, list<dag> pattern = []> :
+multiclass VOP3bInst <vop op, string opName, VOPProfile P, list<dag> pattern = [], bit VOP3Only = 0> :
VOP3b_2_3_m <
op, P.Outs64, P.Ins64,
opName#" "#P.Asm64, pattern,
- opName, "", 1, 1
+ opName, "", 1, 1, VOP3Only
>;
class Vop3ModPat<Instruction Inst, VOPProfile P, SDPatternOperator node> : Pat<
@@ -1987,7 +2516,7 @@ class Vop3ModPat<Instruction Inst, VOPProfile P, SDPatternOperator node> : Pat<
class VINTRP_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
VINTRPCommon <outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -1996,13 +2525,21 @@ class VINTRP_Real_si <bits <2> op, string opName, dag outs, dag ins,
string asm> :
VINTRPCommon <outs, ins, asm, []>,
VINTRPe <op>,
- SIMCInstr<opName, SISubtarget.SI>;
+ SIMCInstr<opName, SIEncodingFamily.SI> {
+ let AssemblerPredicate = SIAssemblerPredicate;
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
+}
class VINTRP_Real_vi <bits <2> op, string opName, dag outs, dag ins,
string asm> :
VINTRPCommon <outs, ins, asm, []>,
VINTRPe_vi <op>,
- SIMCInstr<opName, SISubtarget.VI>;
+ SIMCInstr<opName, SIEncodingFamily.VI> {
+ let AssemblerPredicate = VIAssemblerPredicate;
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
+}
multiclass VINTRP_m <bits <2> op, dag outs, dag ins, string asm,
list<dag> pattern = []> {
@@ -2019,7 +2556,7 @@ multiclass VINTRP_m <bits <2> op, dag outs, dag ins, string asm,
class DS_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
DS <outs, ins, "", pattern>,
- SIMCInstr <opName, SISubtarget.NONE> {
+ SIMCInstr <opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -2027,14 +2564,22 @@ class DS_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
class DS_Real_si <bits<8> op, string opName, dag outs, dag ins, string asm> :
DS <outs, ins, asm, []>,
DSe <op>,
- SIMCInstr <opName, SISubtarget.SI> {
+ SIMCInstr <opName, SIEncodingFamily.SI> {
let isCodeGenOnly = 0;
+ let AssemblerPredicates = [isSICI];
+ let DecoderNamespace="SICI";
+ let DisableDecoder = DisableSIDecoder;
}
class DS_Real_vi <bits<8> op, string opName, dag outs, dag ins, string asm> :
DS <outs, ins, asm, []>,
DSe_vi <op>,
- SIMCInstr <opName, SISubtarget.VI>;
+ SIMCInstr <opName, SIEncodingFamily.VI> {
+ let isCodeGenOnly = 0;
+ let AssemblerPredicates = [isVI];
+ let DecoderNamespace="VI";
+ let DisableDecoder = DisableVIDecoder;
+}
class DS_Off16_Real_si <bits<8> op, string opName, dag outs, dag ins, string asm> :
DS_Real_si <op,opName, outs, ins, asm> {
@@ -2043,7 +2588,6 @@ class DS_Off16_Real_si <bits<8> op, string opName, dag outs, dag ins, string asm
bits<16> offset;
let offset0 = offset{7-0};
let offset1 = offset{15-8};
- let isCodeGenOnly = 0;
}
class DS_Off16_Real_vi <bits<8> op, string opName, dag outs, dag ins, string asm> :
@@ -2055,9 +2599,24 @@ class DS_Off16_Real_vi <bits<8> op, string opName, dag outs, dag ins, string asm
let offset1 = offset{15-8};
}
+multiclass DS_1A_RET_ <dsop op, string opName, RegisterClass rc,
+ dag outs = (outs rc:$vdst),
+ dag ins = (ins VGPR_32:$addr, offset:$offset, gds:$gds),
+ string asm = opName#" $vdst, $addr"#"$offset$gds"> {
+
+ def "" : DS_Pseudo <opName, outs, ins, []>;
+
+ let data0 = 0, data1 = 0 in {
+ def _si : DS_Off16_Real_si <op.SI, opName, outs, ins, asm>;
+ def _vi : DS_Off16_Real_vi <op.VI, opName, outs, ins, asm>;
+ }
+}
+
+// TODO: DS_1A_RET can be inherited from DS_1A_RET_ but its not working
+// for some reason. In fact we can remove this class if use dsop everywhere
multiclass DS_1A_RET <bits<8> op, string opName, RegisterClass rc,
dag outs = (outs rc:$vdst),
- dag ins = (ins VGPR_32:$addr, ds_offset:$offset, gds:$gds),
+ dag ins = (ins VGPR_32:$addr, offset:$offset, gds:$gds),
string asm = opName#" $vdst, $addr"#"$offset$gds"> {
def "" : DS_Pseudo <opName, outs, ins, []>;
@@ -2070,8 +2629,8 @@ multiclass DS_1A_RET <bits<8> op, string opName, RegisterClass rc,
multiclass DS_1A_Off8_RET <bits<8> op, string opName, RegisterClass rc,
dag outs = (outs rc:$vdst),
- dag ins = (ins VGPR_32:$addr, ds_offset0:$offset0, ds_offset1:$offset1,
- gds01:$gds),
+ dag ins = (ins VGPR_32:$addr, offset0:$offset0, offset1:$offset1,
+ gds:$gds),
string asm = opName#" $vdst, $addr"#"$offset0"#"$offset1$gds"> {
def "" : DS_Pseudo <opName, outs, ins, []>;
@@ -2084,7 +2643,7 @@ multiclass DS_1A_Off8_RET <bits<8> op, string opName, RegisterClass rc,
multiclass DS_1A1D_NORET <bits<8> op, string opName, RegisterClass rc,
dag outs = (outs),
- dag ins = (ins VGPR_32:$addr, rc:$data0, ds_offset:$offset, gds:$gds),
+ dag ins = (ins VGPR_32:$addr, rc:$data0, offset:$offset, gds:$gds),
string asm = opName#" $addr, $data0"#"$offset$gds"> {
def "" : DS_Pseudo <opName, outs, ins, []>,
@@ -2096,11 +2655,25 @@ multiclass DS_1A1D_NORET <bits<8> op, string opName, RegisterClass rc,
}
}
-multiclass DS_1A1D_Off8_NORET <bits<8> op, string opName, RegisterClass rc,
+multiclass DS_1A_Off8_NORET <bits<8> op, string opName,
+ dag outs = (outs),
+ dag ins = (ins VGPR_32:$addr,
+ offset0:$offset0, offset1:$offset1, gds:$gds),
+ string asm = opName#" $addr $offset0"#"$offset1$gds"> {
+
+ def "" : DS_Pseudo <opName, outs, ins, []>;
+
+ let data0 = 0, data1 = 0, vdst = 0, AsmMatchConverter = "cvtDSOffset01" in {
+ def _si : DS_Real_si <op, opName, outs, ins, asm>;
+ def _vi : DS_Real_vi <op, opName, outs, ins, asm>;
+ }
+}
+
+multiclass DS_1A2D_Off8_NORET <bits<8> op, string opName, RegisterClass rc,
dag outs = (outs),
dag ins = (ins VGPR_32:$addr, rc:$data0, rc:$data1,
- ds_offset0:$offset0, ds_offset1:$offset1, gds01:$gds),
- string asm = opName#" $addr, $data0, $data1"#"$offset0"#"$offset1"#"$gds"> {
+ offset0:$offset0, offset1:$offset1, gds:$gds),
+ string asm = opName#" $addr, $data0, $data1$offset0$offset1$gds"> {
def "" : DS_Pseudo <opName, outs, ins, []>;
@@ -2113,7 +2686,7 @@ multiclass DS_1A1D_Off8_NORET <bits<8> op, string opName, RegisterClass rc,
multiclass DS_1A1D_RET <bits<8> op, string opName, RegisterClass rc,
string noRetOp = "",
dag outs = (outs rc:$vdst),
- dag ins = (ins VGPR_32:$addr, rc:$data0, ds_offset:$offset, gds:$gds),
+ dag ins = (ins VGPR_32:$addr, rc:$data0, offset:$offset, gds:$gds),
string asm = opName#" $vdst, $addr, $data0"#"$offset$gds"> {
let hasPostISelHook = 1 in {
@@ -2127,6 +2700,23 @@ multiclass DS_1A1D_RET <bits<8> op, string opName, RegisterClass rc,
}
}
+multiclass DS_1A1D_PERMUTE <bits<8> op, string opName, RegisterClass rc,
+ SDPatternOperator node = null_frag,
+ dag outs = (outs rc:$vdst),
+ dag ins = (ins VGPR_32:$addr, rc:$data0, offset:$offset),
+ string asm = opName#" $vdst, $addr, $data0"#"$offset"> {
+
+ let mayLoad = 0, mayStore = 0, isConvergent = 1 in {
+ def "" : DS_Pseudo <opName, outs, ins,
+ [(set i32:$vdst,
+ (node (DS1Addr1Offset i32:$addr, i16:$offset), i32:$data0))]>;
+
+ let data1 = 0, gds = 0 in {
+ def "_vi" : DS_Off16_Real_vi <op, opName, outs, ins, asm>;
+ }
+ }
+}
+
multiclass DS_1A2D_RET_m <bits<8> op, string opName, RegisterClass rc,
string noRetOp = "", dag ins,
dag outs = (outs rc:$vdst),
@@ -2145,14 +2735,14 @@ multiclass DS_1A2D_RET <bits<8> op, string asm, RegisterClass rc,
string noRetOp = "", RegisterClass src = rc> :
DS_1A2D_RET_m <op, asm, rc, noRetOp,
(ins VGPR_32:$addr, src:$data0, src:$data1,
- ds_offset:$offset, gds:$gds)
+ offset:$offset, gds:$gds)
>;
multiclass DS_1A2D_NORET <bits<8> op, string opName, RegisterClass rc,
string noRetOp = opName,
dag outs = (outs),
dag ins = (ins VGPR_32:$addr, rc:$data0, rc:$data1,
- ds_offset:$offset, gds:$gds),
+ offset:$offset, gds:$gds),
string asm = opName#" $addr, $data0, $data1"#"$offset"#"$gds"> {
def "" : DS_Pseudo <opName, outs, ins, []>,
@@ -2166,7 +2756,7 @@ multiclass DS_1A2D_NORET <bits<8> op, string opName, RegisterClass rc,
multiclass DS_0A_RET <bits<8> op, string opName,
dag outs = (outs VGPR_32:$vdst),
- dag ins = (ins ds_offset:$offset, gds:$gds),
+ dag ins = (ins offset:$offset, gds:$gds),
string asm = opName#" $vdst"#"$offset"#"$gds"> {
let mayLoad = 1, mayStore = 1 in {
@@ -2181,7 +2771,7 @@ multiclass DS_0A_RET <bits<8> op, string opName,
multiclass DS_1A_RET_GDS <bits<8> op, string opName,
dag outs = (outs VGPR_32:$vdst),
- dag ins = (ins VGPR_32:$addr, ds_offset_gds:$offset),
+ dag ins = (ins VGPR_32:$addr, offset:$offset),
string asm = opName#" $vdst, $addr"#"$offset gds"> {
def "" : DS_Pseudo <opName, outs, ins, []>;
@@ -2207,7 +2797,7 @@ multiclass DS_1A_GDS <bits<8> op, string opName,
multiclass DS_1A <bits<8> op, string opName,
dag outs = (outs),
- dag ins = (ins VGPR_32:$addr, ds_offset:$offset, gds:$gds),
+ dag ins = (ins VGPR_32:$addr, offset:$offset, gds:$gds),
string asm = opName#" $addr"#"$offset"#"$gds"> {
let mayLoad = 1, mayStore = 1 in {
@@ -2226,7 +2816,7 @@ multiclass DS_1A <bits<8> op, string opName,
class MTBUF_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
MTBUF <outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -2235,12 +2825,18 @@ class MTBUF_Real_si <bits<3> op, string opName, dag outs, dag ins,
string asm> :
MTBUF <outs, ins, asm, []>,
MTBUFe <op>,
- SIMCInstr<opName, SISubtarget.SI>;
+ SIMCInstr<opName, SIEncodingFamily.SI> {
+ let DecoderNamespace="SICI";
+ let DisableDecoder = DisableSIDecoder;
+}
class MTBUF_Real_vi <bits<4> op, string opName, dag outs, dag ins, string asm> :
MTBUF <outs, ins, asm, []>,
MTBUFe_vi <op>,
- SIMCInstr <opName, SISubtarget.VI>;
+ SIMCInstr <opName, SIEncodingFamily.VI> {
+ let DecoderNamespace="VI";
+ let DisableDecoder = DisableVIDecoder;
+}
multiclass MTBUF_m <bits<3> op, string opName, dag outs, dag ins, string asm,
list<dag> pattern> {
@@ -2311,7 +2907,7 @@ class MUBUFAddr64Table <bit is_addr64, string suffix = ""> {
class MUBUF_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
MUBUF <outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -2329,16 +2925,22 @@ class MUBUF_Real_si <mubuf op, string opName, dag outs, dag ins,
string asm> :
MUBUF <outs, ins, asm, []>,
MUBUFe <op.SI>,
- SIMCInstr<opName, SISubtarget.SI> {
+ SIMCInstr<opName, SIEncodingFamily.SI> {
let lds = 0;
+ let AssemblerPredicate = SIAssemblerPredicate;
+ let DecoderNamespace="SICI";
+ let DisableDecoder = DisableSIDecoder;
}
class MUBUF_Real_vi <mubuf op, string opName, dag outs, dag ins,
string asm> :
MUBUF <outs, ins, asm, []>,
MUBUFe_vi <op.VI>,
- SIMCInstr<opName, SISubtarget.VI> {
+ SIMCInstr<opName, SIEncodingFamily.VI> {
let lds = 0;
+ let AssemblerPredicate = VIAssemblerPredicate;
+ let DecoderNamespace="VI";
+ let DisableDecoder = DisableVIDecoder;
}
multiclass MUBUF_m <mubuf op, string opName, dag outs, dag ins, string asm,
@@ -2399,38 +3001,82 @@ multiclass MUBUFAtomicAddr64_m <mubuf op, string opName, dag outs, dag ins,
// for VI appropriately.
}
+multiclass MUBUFAtomicOther_m <mubuf op, string opName, dag outs, dag ins,
+ string asm, list<dag> pattern, bit is_return> {
+
+ def "" : MUBUF_Pseudo <opName, outs, ins, pattern>,
+ AtomicNoRet<opName, is_return>;
+
+ let tfe = 0 in {
+ let addr64 = 0 in {
+ def _si : MUBUF_Real_si <op, opName, outs, ins, asm>;
+ }
+
+ def _vi : MUBUF_Real_vi <op, opName, outs, ins, asm>;
+ }
+}
+
multiclass MUBUF_Atomic <mubuf op, string name, RegisterClass rc,
ValueType vt, SDPatternOperator atomic> {
- let mayStore = 1, mayLoad = 1, hasPostISelHook = 1 in {
+ let mayStore = 1, mayLoad = 1, hasPostISelHook = 1, hasSideEffects = 1 in {
// No return variants
- let glc = 0 in {
+ let glc = 0, AsmMatchConverter = "cvtMubufAtomic" in {
defm _ADDR64 : MUBUFAtomicAddr64_m <
op, name#"_addr64", (outs),
(ins rc:$vdata, VReg_64:$vaddr, SReg_128:$srsrc,
- SCSrc_32:$soffset, mbuf_offset:$offset, slc:$slc),
- name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset"#"$slc", [], 0
+ SCSrc_32:$soffset, offset:$offset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset addr64$offset$slc", [], 0
>;
defm _OFFSET : MUBUFAtomicOffset_m <
op, name#"_offset", (outs),
- (ins rc:$vdata, SReg_128:$srsrc, SCSrc_32:$soffset, mbuf_offset:$offset,
+ (ins rc:$vdata, SReg_128:$srsrc, SCSrc_32:$soffset, offset:$offset,
slc:$slc),
- name#" $vdata, $srsrc, $soffset"#"$offset"#"$slc", [], 0
+ name#" $vdata, off, $srsrc, $soffset$offset$slc", [], 0
>;
+
+ let offen = 1, idxen = 0 in {
+ defm _OFFEN : MUBUFAtomicOther_m <
+ op, name#"_offen", (outs),
+ (ins rc:$vdata, VGPR_32:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
+ offset:$offset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset offen$offset$slc", [], 0
+ >;
+ }
+
+ let offen = 0, idxen = 1 in {
+ defm _IDXEN : MUBUFAtomicOther_m <
+ op, name#"_idxen", (outs),
+ (ins rc:$vdata, VGPR_32:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
+ offset:$offset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen$offset$slc", [], 0
+ >;
+ }
+
+ let offen = 1, idxen = 1 in {
+ defm _BOTHEN : MUBUFAtomicOther_m <
+ op, name#"_bothen", (outs),
+ (ins rc:$vdata, VReg_64:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
+ offset:$offset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen offen$offset$slc",
+ [], 0
+ >;
+ }
} // glc = 0
// Variant that return values
let glc = 1, Constraints = "$vdata = $vdata_in",
+ AsmMatchConverter = "cvtMubufAtomicReturn",
DisableEncoding = "$vdata_in" in {
defm _RTN_ADDR64 : MUBUFAtomicAddr64_m <
op, name#"_rtn_addr64", (outs rc:$vdata),
(ins rc:$vdata_in, VReg_64:$vaddr, SReg_128:$srsrc,
- SCSrc_32:$soffset, mbuf_offset:$offset, slc:$slc),
- name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset"#" glc"#"$slc",
+ SCSrc_32:$soffset, offset:$offset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset addr64$offset glc$slc",
[(set vt:$vdata,
(atomic (MUBUFAddr64Atomic v4i32:$srsrc, i64:$vaddr, i32:$soffset,
i16:$offset, i1:$slc), vt:$vdata_in))], 1
@@ -2439,13 +3085,42 @@ multiclass MUBUF_Atomic <mubuf op, string name, RegisterClass rc,
defm _RTN_OFFSET : MUBUFAtomicOffset_m <
op, name#"_rtn_offset", (outs rc:$vdata),
(ins rc:$vdata_in, SReg_128:$srsrc, SCSrc_32:$soffset,
- mbuf_offset:$offset, slc:$slc),
- name#" $vdata, $srsrc, $soffset"#"$offset"#" glc$slc",
+ offset:$offset, slc:$slc),
+ name#" $vdata, off, $srsrc, $soffset$offset glc$slc",
[(set vt:$vdata,
(atomic (MUBUFOffsetAtomic v4i32:$srsrc, i32:$soffset, i16:$offset,
i1:$slc), vt:$vdata_in))], 1
>;
+ let offen = 1, idxen = 0 in {
+ defm _RTN_OFFEN : MUBUFAtomicOther_m <
+ op, name#"_rtn_offen", (outs rc:$vdata),
+ (ins rc:$vdata_in, VGPR_32:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
+ offset:$offset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset offen$offset glc$slc",
+ [], 1
+ >;
+ }
+
+ let offen = 0, idxen = 1 in {
+ defm _RTN_IDXEN : MUBUFAtomicOther_m <
+ op, name#"_rtn_idxen", (outs rc:$vdata),
+ (ins rc:$vdata_in, VGPR_32:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
+ offset:$offset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen$offset glc$slc",
+ [], 1
+ >;
+ }
+
+ let offen = 1, idxen = 1 in {
+ defm _RTN_BOTHEN : MUBUFAtomicOther_m <
+ op, name#"_rtn_bothen", (outs rc:$vdata),
+ (ins rc:$vdata_in, VReg_64:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
+ offset:$offset, slc:$slc),
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen offen$offset glc$slc",
+ [], 1
+ >;
+ }
} // glc = 1
} // mayStore = 1, mayLoad = 1, hasPostISelHook = 1
@@ -2461,8 +3136,8 @@ multiclass MUBUF_Load_Helper <mubuf op, string name, RegisterClass regClass,
let offen = 0, idxen = 0, vaddr = 0 in {
defm _OFFSET : MUBUF_m <op, name#"_offset", (outs regClass:$vdata),
(ins SReg_128:$srsrc, SCSrc_32:$soffset,
- mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
- name#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe",
+ offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
+ name#" $vdata, off, $srsrc, $soffset$offset$glc$slc$tfe",
[(set load_vt:$vdata, (ld (MUBUFOffset v4i32:$srsrc,
i32:$soffset, i16:$offset,
i1:$glc, i1:$slc, i1:$tfe)))]>;
@@ -2471,33 +3146,32 @@ multiclass MUBUF_Load_Helper <mubuf op, string name, RegisterClass regClass,
let offen = 1, idxen = 0 in {
defm _OFFEN : MUBUF_m <op, name#"_offen", (outs regClass:$vdata),
(ins VGPR_32:$vaddr, SReg_128:$srsrc,
- SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc, slc:$slc,
+ SCSrc_32:$soffset, offset:$offset, glc:$glc, slc:$slc,
tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
+ name#" $vdata, $vaddr, $srsrc, $soffset offen$offset$glc$slc$tfe", []>;
}
let offen = 0, idxen = 1 in {
defm _IDXEN : MUBUF_m <op, name#"_idxen", (outs regClass:$vdata),
(ins VGPR_32:$vaddr, SReg_128:$srsrc,
- SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc,
+ SCSrc_32:$soffset, offset:$offset, glc:$glc,
slc:$slc, tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset idxen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen$offset$glc$slc$tfe", []>;
}
let offen = 1, idxen = 1 in {
defm _BOTHEN : MUBUF_m <op, name#"_bothen", (outs regClass:$vdata),
(ins VReg_64:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
- mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset idxen offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
+ offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen offen$offset$glc$slc$tfe", []>;
}
let offen = 0, idxen = 0 in {
defm _ADDR64 : MUBUFAddr64_m <op, name#"_addr64", (outs regClass:$vdata),
(ins VReg_64:$vaddr, SReg_128:$srsrc,
- SCSrc_32:$soffset, mbuf_offset:$offset,
+ SCSrc_32:$soffset, offset:$offset,
glc:$glc, slc:$slc, tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset addr64"#"$offset"#
- "$glc"#"$slc"#"$tfe",
+ name#" $vdata, $vaddr, $srsrc, $soffset addr64$offset$glc$slc$tfe",
[(set load_vt:$vdata, (ld (MUBUFAddr64 v4i32:$srsrc,
i64:$vaddr, i32:$soffset,
i16:$offset, i1:$glc, i1:$slc,
@@ -2509,18 +3183,11 @@ multiclass MUBUF_Load_Helper <mubuf op, string name, RegisterClass regClass,
multiclass MUBUF_Store_Helper <mubuf op, string name, RegisterClass vdataClass,
ValueType store_vt = i32, SDPatternOperator st = null_frag> {
let mayLoad = 0, mayStore = 1 in {
- defm : MUBUF_m <op, name, (outs),
- (ins vdataClass:$vdata, VGPR_32:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
- mbuf_offset:$offset, offen:$offen, idxen:$idxen, glc:$glc, slc:$slc,
- tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset"#"$offen"#"$idxen"#"$offset"#
- "$glc"#"$slc"#"$tfe", []>;
-
let offen = 0, idxen = 0, vaddr = 0 in {
defm _OFFSET : MUBUF_m <op, name#"_offset",(outs),
(ins vdataClass:$vdata, SReg_128:$srsrc, SCSrc_32:$soffset,
- mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
- name#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe",
+ offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
+ name#" $vdata, off, $srsrc, $soffset$offset$glc$slc$tfe",
[(st store_vt:$vdata, (MUBUFOffset v4i32:$srsrc, i32:$soffset,
i16:$offset, i1:$glc, i1:$slc, i1:$tfe))]>;
} // offen = 0, idxen = 0, vaddr = 0
@@ -2528,35 +3195,35 @@ multiclass MUBUF_Store_Helper <mubuf op, string name, RegisterClass vdataClass,
let offen = 1, idxen = 0 in {
defm _OFFEN : MUBUF_m <op, name#"_offen", (outs),
(ins vdataClass:$vdata, VGPR_32:$vaddr, SReg_128:$srsrc,
- SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc,
+ SCSrc_32:$soffset, offset:$offset, glc:$glc,
slc:$slc, tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"#
- "$glc"#"$slc"#"$tfe", []>;
+ name#" $vdata, $vaddr, $srsrc, $soffset offen"#
+ "$offset$glc$slc$tfe", []>;
} // end offen = 1, idxen = 0
let offen = 0, idxen = 1 in {
defm _IDXEN : MUBUF_m <op, name#"_idxen", (outs),
(ins vdataClass:$vdata, VGPR_32:$vaddr, SReg_128:$srsrc,
- SCSrc_32:$soffset, mbuf_offset:$offset, glc:$glc,
+ SCSrc_32:$soffset, offset:$offset, glc:$glc,
slc:$slc, tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset idxen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen$offset$glc$slc$tfe", []>;
}
let offen = 1, idxen = 1 in {
defm _BOTHEN : MUBUF_m <op, name#"_bothen", (outs),
(ins vdataClass:$vdata, VReg_64:$vaddr, SReg_128:$srsrc, SCSrc_32:$soffset,
- mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
- name#" $vdata, $vaddr, $srsrc, $soffset idxen offen"#"$offset"#"$glc"#"$slc"#"$tfe", []>;
+ offset:$offset, glc:$glc, slc:$slc, tfe:$tfe),
+ name#" $vdata, $vaddr, $srsrc, $soffset idxen offen$offset$glc$slc$tfe", []>;
}
let offen = 0, idxen = 0 in {
defm _ADDR64 : MUBUFAddr64_m <op, name#"_addr64", (outs),
(ins vdataClass:$vdata, VReg_64:$vaddr, SReg_128:$srsrc,
SCSrc_32:$soffset,
- mbuf_offset:$offset, glc:$glc, slc:$slc,
+ offset:$offset, glc:$glc, slc:$slc,
tfe:$tfe),
name#" $vdata, $vaddr, $srsrc, $soffset addr64"#
- "$offset"#"$glc"#"$slc"#"$tfe",
+ "$offset$glc$slc$tfe",
[(st store_vt:$vdata,
(MUBUFAddr64 v4i32:$srsrc, i64:$vaddr,
i32:$soffset, i16:$offset,
@@ -2593,21 +3260,24 @@ class flat <bits<7> ci, bits<7> vi = ci> {
class FLAT_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
FLAT <0, outs, ins, "", pattern>,
- SIMCInstr<opName, SISubtarget.NONE> {
+ SIMCInstr<opName, SIEncodingFamily.NONE> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
class FLAT_Real_ci <bits<7> op, string opName, dag outs, dag ins, string asm> :
FLAT <op, outs, ins, asm, []>,
- SIMCInstr<opName, SISubtarget.SI> {
+ SIMCInstr<opName, SIEncodingFamily.SI> {
let AssemblerPredicate = isCIOnly;
+ let DecoderNamespace="CI";
}
class FLAT_Real_vi <bits<7> op, string opName, dag outs, dag ins, string asm> :
FLAT <op, outs, ins, asm, []>,
- SIMCInstr<opName, SISubtarget.VI> {
+ SIMCInstr<opName, SIEncodingFamily.VI> {
let AssemblerPredicate = VIAssemblerPredicate;
+ let DecoderNamespace="VI";
+ let DisableDecoder = DisableVIDecoder;
}
multiclass FLAT_AtomicRet_m <flat op, dag outs, dag ins, string asm,
@@ -2623,8 +3293,8 @@ multiclass FLAT_AtomicRet_m <flat op, dag outs, dag ins, string asm,
multiclass FLAT_Load_Helper <flat op, string asm_name,
RegisterClass regClass,
dag outs = (outs regClass:$vdst),
- dag ins = (ins VReg_64:$addr, glc_flat:$glc, slc_flat:$slc, tfe_flat:$tfe),
- string asm = asm_name#" $vdst, $addr"#"$glc"#"$slc"#"$tfe"> {
+ dag ins = (ins VReg_64:$addr, glc:$glc, slc:$slc, tfe:$tfe),
+ string asm = asm_name#" $vdst, $addr$glc$slc$tfe"> {
let data = 0, mayLoad = 1 in {
@@ -2639,9 +3309,9 @@ multiclass FLAT_Load_Helper <flat op, string asm_name,
multiclass FLAT_Store_Helper <flat op, string asm_name,
RegisterClass vdataClass,
dag outs = (outs),
- dag ins = (ins vdataClass:$data, VReg_64:$addr, glc_flat:$glc,
- slc_flat:$slc, tfe_flat:$tfe),
- string asm = asm_name#" $data, $addr"#"$glc"#"$slc"#"$tfe"> {
+ dag ins = (ins VReg_64:$addr, vdataClass:$data, glc:$glc,
+ slc:$slc, tfe:$tfe),
+ string asm = asm_name#" $addr, $data$glc$slc$tfe"> {
let mayLoad = 0, mayStore = 1, vdst = 0 in {
@@ -2654,32 +3324,36 @@ multiclass FLAT_Store_Helper <flat op, string asm_name,
}
multiclass FLAT_ATOMIC <flat op, string asm_name, RegisterClass vdst_rc,
+ ValueType vt, SDPatternOperator atomic = null_frag,
+ ValueType data_vt = vt,
RegisterClass data_rc = vdst_rc,
- dag outs_noret = (outs),
string asm_noret = asm_name#" $addr, $data"#"$slc"#"$tfe"> {
let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0 in {
- def "" : FLAT_Pseudo <NAME, outs_noret,
+ def "" : FLAT_Pseudo <NAME, (outs),
(ins VReg_64:$addr, data_rc:$data,
- slc_flat_atomic:$slc, tfe_flat_atomic:$tfe), []>,
+ slc:$slc, tfe:$tfe), []>,
AtomicNoRet <NAME, 0>;
- def _ci : FLAT_Real_ci <op.CI, NAME, outs_noret,
+ def _ci : FLAT_Real_ci <op.CI, NAME, (outs),
(ins VReg_64:$addr, data_rc:$data,
- slc_flat_atomic:$slc, tfe_flat_atomic:$tfe),
+ slc:$slc, tfe:$tfe),
asm_noret>;
- def _vi : FLAT_Real_vi <op.VI, NAME, outs_noret,
+ def _vi : FLAT_Real_vi <op.VI, NAME, (outs),
(ins VReg_64:$addr, data_rc:$data,
- slc_flat_atomic:$slc, tfe_flat_atomic:$tfe),
+ slc:$slc, tfe:$tfe),
asm_noret>;
}
let glc = 1, hasPostISelHook = 1 in {
- defm _RTN : FLAT_AtomicRet_m <op, (outs vdst_rc:$vdst),
- (ins VReg_64:$addr, data_rc:$data, slc_flat_atomic:$slc,
- tfe_flat_atomic:$tfe),
- asm_name#" $vdst, $addr, $data glc"#"$slc"#"$tfe", []>;
+ defm _RTN : FLAT_AtomicRet_m <
+ op, (outs vdst_rc:$vdst),
+ (ins VReg_64:$addr, data_rc:$data, slc:$slc, tfe:$tfe),
+ asm_name#" $vdst, $addr, $data glc$slc$tfe",
+ [(set vt:$vdst,
+ (atomic (FLATAtomic i64:$addr, i1:$slc, i1:$tfe), data_vt:$data))]
+ >;
}
}
@@ -2688,27 +3362,39 @@ class MIMG_Mask <string op, int channels> {
int Channels = channels;
}
+class mimg <bits<7> si, bits<7> vi = si> {
+ field bits<7> SI = si;
+ field bits<7> VI = vi;
+}
+
+class MIMG_Helper <dag outs, dag ins, string asm,
+ string dns=""> : MIMG<outs, ins, asm,[]> {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let hasPostISelHook = 1;
+ let DecoderNamespace = dns;
+ let isAsmParserOnly = !if(!eq(dns,""), 1, 0);
+ let AsmMatchConverter = "cvtMIMG";
+}
+
class MIMG_NoSampler_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
- RegisterClass src_rc> : MIMG <
- op,
+ RegisterClass addr_rc,
+ string dns=""> : MIMG_Helper <
(outs dst_rc:$vdata),
- (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
- i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
- SReg_256:$srsrc),
- asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
- #" $tfe, $lwe, $slc, $vaddr, $srsrc",
- []> {
+ (ins addr_rc:$vaddr, SReg_256:$srsrc,
+ dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc,
+ r128:$r128, tfe:$tfe, lwe:$lwe, da:$da),
+ asm#" $vdata, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da",
+ dns>, MIMGe<op> {
let ssamp = 0;
- let mayLoad = 1;
- let mayStore = 0;
- let hasPostISelHook = 1;
}
multiclass MIMG_NoSampler_Src_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
int channels> {
- def _V1 : MIMG_NoSampler_Helper <op, asm, dst_rc, VGPR_32>,
+ def _V1 : MIMG_NoSampler_Helper <op, asm, dst_rc, VGPR_32,
+ !if(!eq(channels, 1), "AMDGPU", "")>,
MIMG_Mask<asm#"_V1", channels>;
def _V2 : MIMG_NoSampler_Helper <op, asm, dst_rc, VReg_64>,
MIMG_Mask<asm#"_V2", channels>;
@@ -2723,27 +3409,116 @@ multiclass MIMG_NoSampler <bits<7> op, string asm> {
defm _V4 : MIMG_NoSampler_Src_Helper <op, asm, VReg_128, 4>;
}
+class MIMG_Store_Helper <bits<7> op, string asm,
+ RegisterClass data_rc,
+ RegisterClass addr_rc> : MIMG_Helper <
+ (outs),
+ (ins data_rc:$vdata, addr_rc:$vaddr, SReg_256:$srsrc,
+ dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc,
+ r128:$r128, tfe:$tfe, lwe:$lwe, da:$da),
+ asm#" $vdata, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da"
+ >, MIMGe<op> {
+ let ssamp = 0;
+ let mayLoad = 1; // TableGen requires this for matching with the intrinsics
+ let mayStore = 1;
+ let hasSideEffects = 1;
+ let hasPostISelHook = 0;
+}
+
+multiclass MIMG_Store_Addr_Helper <bits<7> op, string asm,
+ RegisterClass data_rc,
+ int channels> {
+ def _V1 : MIMG_Store_Helper <op, asm, data_rc, VGPR_32>,
+ MIMG_Mask<asm#"_V1", channels>;
+ def _V2 : MIMG_Store_Helper <op, asm, data_rc, VReg_64>,
+ MIMG_Mask<asm#"_V2", channels>;
+ def _V4 : MIMG_Store_Helper <op, asm, data_rc, VReg_128>,
+ MIMG_Mask<asm#"_V4", channels>;
+}
+
+multiclass MIMG_Store <bits<7> op, string asm> {
+ defm _V1 : MIMG_Store_Addr_Helper <op, asm, VGPR_32, 1>;
+ defm _V2 : MIMG_Store_Addr_Helper <op, asm, VReg_64, 2>;
+ defm _V3 : MIMG_Store_Addr_Helper <op, asm, VReg_96, 3>;
+ defm _V4 : MIMG_Store_Addr_Helper <op, asm, VReg_128, 4>;
+}
+
+class MIMG_Atomic_Helper <string asm, RegisterClass data_rc,
+ RegisterClass addr_rc> : MIMG_Helper <
+ (outs data_rc:$vdst),
+ (ins data_rc:$vdata, addr_rc:$vaddr, SReg_256:$srsrc,
+ dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc,
+ r128:$r128, tfe:$tfe, lwe:$lwe, da:$da),
+ asm#" $vdst, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da"
+ > {
+ let mayStore = 1;
+ let hasSideEffects = 1;
+ let hasPostISelHook = 0;
+ let Constraints = "$vdst = $vdata";
+ let AsmMatchConverter = "cvtMIMGAtomic";
+}
+
+class MIMG_Atomic_Real_si<mimg op, string name, string asm,
+ RegisterClass data_rc, RegisterClass addr_rc> :
+ MIMG_Atomic_Helper<asm, data_rc, addr_rc>,
+ SIMCInstr<name, SIEncodingFamily.SI>,
+ MIMGe<op.SI> {
+ let isCodeGenOnly = 0;
+ let AssemblerPredicates = [isSICI];
+ let DecoderNamespace = "SICI";
+ let DisableDecoder = DisableSIDecoder;
+}
+
+class MIMG_Atomic_Real_vi<mimg op, string name, string asm,
+ RegisterClass data_rc, RegisterClass addr_rc> :
+ MIMG_Atomic_Helper<asm, data_rc, addr_rc>,
+ SIMCInstr<name, SIEncodingFamily.VI>,
+ MIMGe<op.VI> {
+ let isCodeGenOnly = 0;
+ let AssemblerPredicates = [isVI];
+ let DecoderNamespace = "VI";
+ let DisableDecoder = DisableVIDecoder;
+}
+
+multiclass MIMG_Atomic_Helper_m <mimg op, string name, string asm,
+ RegisterClass data_rc, RegisterClass addr_rc> {
+ let isPseudo = 1, isCodeGenOnly = 1 in {
+ def "" : MIMG_Atomic_Helper<asm, data_rc, addr_rc>,
+ SIMCInstr<name, SIEncodingFamily.NONE>;
+ }
+
+ let ssamp = 0 in {
+ def _si : MIMG_Atomic_Real_si<op, name, asm, data_rc, addr_rc>;
+
+ def _vi : MIMG_Atomic_Real_vi<op, name, asm, data_rc, addr_rc>;
+ }
+}
+
+multiclass MIMG_Atomic <mimg op, string asm, RegisterClass data_rc = VGPR_32> {
+ defm _V1 : MIMG_Atomic_Helper_m <op, asm # "_V1", asm, data_rc, VGPR_32>;
+ defm _V2 : MIMG_Atomic_Helper_m <op, asm # "_V2", asm, data_rc, VReg_64>;
+ defm _V4 : MIMG_Atomic_Helper_m <op, asm # "_V3", asm, data_rc, VReg_128>;
+}
+
class MIMG_Sampler_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
- RegisterClass src_rc, int wqm> : MIMG <
- op,
+ RegisterClass src_rc,
+ int wqm,
+ string dns=""> : MIMG_Helper <
(outs dst_rc:$vdata),
- (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
- i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
- SReg_256:$srsrc, SReg_128:$ssamp),
- asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
- #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
- []> {
- let mayLoad = 1;
- let mayStore = 0;
- let hasPostISelHook = 1;
+ (ins src_rc:$vaddr, SReg_256:$srsrc, SReg_128:$ssamp,
+ dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc,
+ r128:$r128, tfe:$tfe, lwe:$lwe, da:$da),
+ asm#" $vdata, $vaddr, $srsrc, $ssamp$dmask$unorm$glc$slc$r128$tfe$lwe$da",
+ dns>, MIMGe<op> {
let WQM = wqm;
}
multiclass MIMG_Sampler_Src_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
int channels, int wqm> {
- def _V1 : MIMG_Sampler_Helper <op, asm, dst_rc, VGPR_32, wqm>,
+ def _V1 : MIMG_Sampler_Helper <op, asm, dst_rc, VGPR_32, wqm,
+ !if(!eq(channels, 1), "AMDGPU", "")>,
MIMG_Mask<asm#"_V1", channels>;
def _V2 : MIMG_Sampler_Helper <op, asm, dst_rc, VReg_64, wqm>,
MIMG_Mask<asm#"_V2", channels>;
@@ -2755,31 +3530,24 @@ multiclass MIMG_Sampler_Src_Helper <bits<7> op, string asm,
MIMG_Mask<asm#"_V16", channels>;
}
-multiclass MIMG_Sampler <bits<7> op, string asm> {
- defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VGPR_32, 1, 0>;
- defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2, 0>;
- defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3, 0>;
- defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4, 0>;
+multiclass MIMG_Sampler <bits<7> op, string asm, int wqm=0> {
+ defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VGPR_32, 1, wqm>;
+ defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2, wqm>;
+ defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3, wqm>;
+ defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4, wqm>;
}
-multiclass MIMG_Sampler_WQM <bits<7> op, string asm> {
- defm _V1 : MIMG_Sampler_Src_Helper<op, asm, VGPR_32, 1, 1>;
- defm _V2 : MIMG_Sampler_Src_Helper<op, asm, VReg_64, 2, 1>;
- defm _V3 : MIMG_Sampler_Src_Helper<op, asm, VReg_96, 3, 1>;
- defm _V4 : MIMG_Sampler_Src_Helper<op, asm, VReg_128, 4, 1>;
-}
+multiclass MIMG_Sampler_WQM <bits<7> op, string asm> : MIMG_Sampler<op, asm, 1>;
class MIMG_Gather_Helper <bits<7> op, string asm,
RegisterClass dst_rc,
RegisterClass src_rc, int wqm> : MIMG <
- op,
(outs dst_rc:$vdata),
- (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128,
- i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr,
- SReg_256:$srsrc, SReg_128:$ssamp),
- asm#" $vdata, $dmask, $unorm, $glc, $da, $r128,"
- #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp",
- []> {
+ (ins src_rc:$vaddr, SReg_256:$srsrc, SReg_128:$ssamp,
+ dmask:$dmask, unorm:$unorm, glc:$glc, slc:$slc,
+ r128:$r128, tfe:$tfe, lwe:$lwe, da:$da),
+ asm#" $vdata, $vaddr, $srsrc, $ssamp$dmask$unorm$glc$slc$r128$tfe$lwe$da",
+ []>, MIMGe<op> {
let mayLoad = 1;
let mayStore = 0;
@@ -2789,10 +3557,12 @@ class MIMG_Gather_Helper <bits<7> op, string asm,
// 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
// (red,red,red,red) etc.) The ISA document doesn't mention
// this.
- // Therefore, disable all code which updates DMASK by setting these two:
- let MIMG = 0;
+ // Therefore, disable all code which updates DMASK by setting this:
+ let Gather4 = 1;
let hasPostISelHook = 0;
let WQM = wqm;
+
+ let isAsmParserOnly = 1; // TBD: fix it later
}
multiclass MIMG_Gather_Src_Helper <bits<7> op, string asm,
@@ -2810,19 +3580,14 @@ multiclass MIMG_Gather_Src_Helper <bits<7> op, string asm,
MIMG_Mask<asm#"_V16", channels>;
}
-multiclass MIMG_Gather <bits<7> op, string asm> {
- defm _V1 : MIMG_Gather_Src_Helper<op, asm, VGPR_32, 1, 0>;
- defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2, 0>;
- defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3, 0>;
- defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4, 0>;
+multiclass MIMG_Gather <bits<7> op, string asm, int wqm=0> {
+ defm _V1 : MIMG_Gather_Src_Helper<op, asm, VGPR_32, 1, wqm>;
+ defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2, wqm>;
+ defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3, wqm>;
+ defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4, wqm>;
}
-multiclass MIMG_Gather_WQM <bits<7> op, string asm> {
- defm _V1 : MIMG_Gather_Src_Helper<op, asm, VGPR_32, 1, 1>;
- defm _V2 : MIMG_Gather_Src_Helper<op, asm, VReg_64, 2, 1>;
- defm _V3 : MIMG_Gather_Src_Helper<op, asm, VReg_96, 3, 1>;
- defm _V4 : MIMG_Gather_Src_Helper<op, asm, VReg_128, 4, 1>;
-}
+multiclass MIMG_Gather_WQM <bits<7> op, string asm> : MIMG_Gather<op, asm, 1>;
//===----------------------------------------------------------------------===//
// Vector instruction mappings
@@ -2894,8 +3659,9 @@ def getMCOpcodeGen : InstrMapping {
let FilterClass = "SIMCInstr";
let RowFields = ["PseudoInstr"];
let ColFields = ["Subtarget"];
- let KeyCol = [!cast<string>(SISubtarget.NONE)];
- let ValueCols = [[!cast<string>(SISubtarget.SI)],[!cast<string>(SISubtarget.VI)]];
+ let KeyCol = [!cast<string>(SIEncodingFamily.NONE)];
+ let ValueCols = [[!cast<string>(SIEncodingFamily.SI)],
+ [!cast<string>(SIEncodingFamily.VI)]];
}
def getAddr64Inst : InstrMapping {
diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td
index 89692ab71f4d..6427db87cd6f 100644
--- a/lib/Target/AMDGPU/SIInstructions.td
+++ b/lib/Target/AMDGPU/SIInstructions.td
@@ -18,35 +18,17 @@ int P20 = 1;
}
def INTERP : InterpSlots;
-def InterpSlot : Operand<i32> {
- let PrintMethod = "printInterpSlot";
-}
-
-def SendMsgImm : Operand<i32> {
- let PrintMethod = "printSendMsg";
-}
-
def isGCN : Predicate<"Subtarget->getGeneration() "
- ">= AMDGPUSubtarget::SOUTHERN_ISLANDS">,
+ ">= SISubtarget::SOUTHERN_ISLANDS">,
AssemblerPredicate<"FeatureGCN">;
def isSI : Predicate<"Subtarget->getGeneration() "
- "== AMDGPUSubtarget::SOUTHERN_ISLANDS">,
+ "== SISubtarget::SOUTHERN_ISLANDS">,
AssemblerPredicate<"FeatureSouthernIslands">;
def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
-def SWaitMatchClass : AsmOperandClass {
- let Name = "SWaitCnt";
- let RenderMethod = "addImmOperands";
- let ParserMethod = "parseSWaitCntOps";
-}
-
-def WAIT_FLAG : InstFlag<"printWaitFlag"> {
- let ParserMatchClass = SWaitMatchClass;
-}
-
let SubtargetPredicate = isGCN in {
//===----------------------------------------------------------------------===//
@@ -59,17 +41,17 @@ defm EXP : EXP_m;
// SMRD Instructions
//===----------------------------------------------------------------------===//
-// We are using the SGPR_32 and not the SReg_32 register class for 32-bit
-// SMRD instructions, because the SGPR_32 register class does not include M0
+// We are using the SReg_32_XM0 and not the SReg_32 register class for 32-bit
+// SMRD instructions, because the SReg_32_XM0 register class does not include M0
// and writing to M0 from an SMRD instruction will hang the GPU.
-defm S_LOAD_DWORD : SMRD_Helper <smrd<0x00>, "s_load_dword", SReg_64, SGPR_32>;
+defm S_LOAD_DWORD : SMRD_Helper <smrd<0x00>, "s_load_dword", SReg_64, SReg_32_XM0>;
defm S_LOAD_DWORDX2 : SMRD_Helper <smrd<0x01>, "s_load_dwordx2", SReg_64, SReg_64>;
defm S_LOAD_DWORDX4 : SMRD_Helper <smrd<0x02>, "s_load_dwordx4", SReg_64, SReg_128>;
defm S_LOAD_DWORDX8 : SMRD_Helper <smrd<0x03>, "s_load_dwordx8", SReg_64, SReg_256>;
defm S_LOAD_DWORDX16 : SMRD_Helper <smrd<0x04>, "s_load_dwordx16", SReg_64, SReg_512>;
defm S_BUFFER_LOAD_DWORD : SMRD_Helper <
- smrd<0x08>, "s_buffer_load_dword", SReg_128, SGPR_32
+ smrd<0x08>, "s_buffer_load_dword", SReg_128, SReg_32_XM0
>;
defm S_BUFFER_LOAD_DWORDX2 : SMRD_Helper <
@@ -88,7 +70,15 @@ defm S_BUFFER_LOAD_DWORDX16 : SMRD_Helper <
smrd<0x0c>, "s_buffer_load_dwordx16", SReg_128, SReg_512
>;
-//def S_MEMTIME : SMRD_ <0x0000001e, "s_memtime", []>;
+let mayStore = ? in {
+// FIXME: mayStore = ? is a workaround for tablegen bug for different
+// inferred mayStore flags for the instruction pattern vs. standalone
+// Pat. Each considers the other contradictory.
+
+defm S_MEMTIME : SMRD_Special <smrd<0x1e, 0x24>, "s_memtime",
+ (outs SReg_64:$sdst), ?, " $sdst", [(set i64:$sdst, (int_amdgcn_s_memtime))]
+>;
+}
defm S_DCACHE_INV : SMRD_Inval <smrd<0x1f, 0x20>, "s_dcache_inv",
int_amdgcn_s_dcache_inv>;
@@ -101,7 +91,7 @@ let isMoveImm = 1 in {
let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
defm S_MOV_B32 : SOP1_32 <sop1<0x03, 0x00>, "s_mov_b32", []>;
defm S_MOV_B64 : SOP1_64 <sop1<0x04, 0x01>, "s_mov_b64", []>;
- } // let isRematerializeable = 1
+ } // End isRematerializeable = 1
let Uses = [SCC] in {
defm S_CMOV_B32 : SOP1_32 <sop1<0x05, 0x02>, "s_cmov_b32", []>;
@@ -111,11 +101,11 @@ let isMoveImm = 1 in {
let Defs = [SCC] in {
defm S_NOT_B32 : SOP1_32 <sop1<0x07, 0x04>, "s_not_b32",
- [(set i32:$dst, (not i32:$src0))]
+ [(set i32:$sdst, (not i32:$src0))]
>;
defm S_NOT_B64 : SOP1_64 <sop1<0x08, 0x05>, "s_not_b64",
- [(set i64:$dst, (not i64:$src0))]
+ [(set i64:$sdst, (not i64:$src0))]
>;
defm S_WQM_B32 : SOP1_32 <sop1<0x09, 0x06>, "s_wqm_b32", []>;
defm S_WQM_B64 : SOP1_64 <sop1<0x0a, 0x07>, "s_wqm_b64", []>;
@@ -123,7 +113,7 @@ let Defs = [SCC] in {
defm S_BREV_B32 : SOP1_32 <sop1<0x0b, 0x08>, "s_brev_b32",
- [(set i32:$dst, (bitreverse i32:$src0))]
+ [(set i32:$sdst, (bitreverse i32:$src0))]
>;
defm S_BREV_B64 : SOP1_64 <sop1<0x0c, 0x09>, "s_brev_b64", []>;
@@ -131,7 +121,7 @@ let Defs = [SCC] in {
defm S_BCNT0_I32_B32 : SOP1_32 <sop1<0x0d, 0x0a>, "s_bcnt0_i32_b32", []>;
defm S_BCNT0_I32_B64 : SOP1_32_64 <sop1<0x0e, 0x0b>, "s_bcnt0_i32_b64", []>;
defm S_BCNT1_I32_B32 : SOP1_32 <sop1<0x0f, 0x0c>, "s_bcnt1_i32_b32",
- [(set i32:$dst, (ctpop i32:$src0))]
+ [(set i32:$sdst, (ctpop i32:$src0))]
>;
defm S_BCNT1_I32_B64 : SOP1_32_64 <sop1<0x10, 0x0d>, "s_bcnt1_i32_b64", []>;
} // End Defs = [SCC]
@@ -139,34 +129,34 @@ let Defs = [SCC] in {
defm S_FF0_I32_B32 : SOP1_32 <sop1<0x11, 0x0e>, "s_ff0_i32_b32", []>;
defm S_FF0_I32_B64 : SOP1_32_64 <sop1<0x12, 0x0f>, "s_ff0_i32_b64", []>;
defm S_FF1_I32_B32 : SOP1_32 <sop1<0x13, 0x10>, "s_ff1_i32_b32",
- [(set i32:$dst, (cttz_zero_undef i32:$src0))]
+ [(set i32:$sdst, (cttz_zero_undef i32:$src0))]
>;
defm S_FF1_I32_B64 : SOP1_32_64 <sop1<0x14, 0x11>, "s_ff1_i32_b64", []>;
defm S_FLBIT_I32_B32 : SOP1_32 <sop1<0x15, 0x12>, "s_flbit_i32_b32",
- [(set i32:$dst, (AMDGPUffbh_u32 i32:$src0))]
+ [(set i32:$sdst, (AMDGPUffbh_u32 i32:$src0))]
>;
defm S_FLBIT_I32_B64 : SOP1_32_64 <sop1<0x16, 0x13>, "s_flbit_i32_b64", []>;
defm S_FLBIT_I32 : SOP1_32 <sop1<0x17, 0x14>, "s_flbit_i32",
- [(set i32:$dst, (int_AMDGPU_flbit_i32 i32:$src0))]
+ [(set i32:$sdst, (int_AMDGPU_flbit_i32 i32:$src0))]
>;
defm S_FLBIT_I32_I64 : SOP1_32_64 <sop1<0x18, 0x15>, "s_flbit_i32_i64", []>;
defm S_SEXT_I32_I8 : SOP1_32 <sop1<0x19, 0x16>, "s_sext_i32_i8",
- [(set i32:$dst, (sext_inreg i32:$src0, i8))]
+ [(set i32:$sdst, (sext_inreg i32:$src0, i8))]
>;
defm S_SEXT_I32_I16 : SOP1_32 <sop1<0x1a, 0x17>, "s_sext_i32_i16",
- [(set i32:$dst, (sext_inreg i32:$src0, i16))]
+ [(set i32:$sdst, (sext_inreg i32:$src0, i16))]
>;
defm S_BITSET0_B32 : SOP1_32 <sop1<0x1b, 0x18>, "s_bitset0_b32", []>;
-defm S_BITSET0_B64 : SOP1_64 <sop1<0x1c, 0x19>, "s_bitset0_b64", []>;
+defm S_BITSET0_B64 : SOP1_64_32 <sop1<0x1c, 0x19>, "s_bitset0_b64", []>;
defm S_BITSET1_B32 : SOP1_32 <sop1<0x1d, 0x1a>, "s_bitset1_b32", []>;
-defm S_BITSET1_B64 : SOP1_64 <sop1<0x1e, 0x1b>, "s_bitset1_b64", []>;
+defm S_BITSET1_B64 : SOP1_64_32 <sop1<0x1e, 0x1b>, "s_bitset1_b64", []>;
defm S_GETPC_B64 : SOP1_64_0 <sop1<0x1f, 0x1c>, "s_getpc_b64", []>;
-defm S_SETPC_B64 : SOP1_64 <sop1<0x20, 0x1d>, "s_setpc_b64", []>;
+defm S_SETPC_B64 : SOP1_1 <sop1<0x20, 0x1d>, "s_setpc_b64", []>;
defm S_SWAPPC_B64 : SOP1_64 <sop1<0x21, 0x1e>, "s_swappc_b64", []>;
-defm S_RFE_B64 : SOP1_64 <sop1<0x22, 0x1f>, "s_rfe_b64", []>;
+defm S_RFE_B64 : SOP1_1 <sop1<0x22, 0x1f>, "s_rfe_b64", []>;
let hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] in {
@@ -206,36 +196,36 @@ let Defs = [SCC] in { // Carry out goes to SCC
let isCommutable = 1 in {
defm S_ADD_U32 : SOP2_32 <sop2<0x00>, "s_add_u32", []>;
defm S_ADD_I32 : SOP2_32 <sop2<0x02>, "s_add_i32",
- [(set i32:$dst, (add SSrc_32:$src0, SSrc_32:$src1))]
+ [(set i32:$sdst, (add SSrc_32:$src0, SSrc_32:$src1))]
>;
} // End isCommutable = 1
defm S_SUB_U32 : SOP2_32 <sop2<0x01>, "s_sub_u32", []>;
defm S_SUB_I32 : SOP2_32 <sop2<0x03>, "s_sub_i32",
- [(set i32:$dst, (sub SSrc_32:$src0, SSrc_32:$src1))]
+ [(set i32:$sdst, (sub SSrc_32:$src0, SSrc_32:$src1))]
>;
let Uses = [SCC] in { // Carry in comes from SCC
let isCommutable = 1 in {
defm S_ADDC_U32 : SOP2_32 <sop2<0x04>, "s_addc_u32",
- [(set i32:$dst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
+ [(set i32:$sdst, (adde (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
} // End isCommutable = 1
defm S_SUBB_U32 : SOP2_32 <sop2<0x05>, "s_subb_u32",
- [(set i32:$dst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
+ [(set i32:$sdst, (sube (i32 SSrc_32:$src0), (i32 SSrc_32:$src1)))]>;
} // End Uses = [SCC]
defm S_MIN_I32 : SOP2_32 <sop2<0x06>, "s_min_i32",
- [(set i32:$dst, (smin i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (smin i32:$src0, i32:$src1))]
>;
defm S_MIN_U32 : SOP2_32 <sop2<0x07>, "s_min_u32",
- [(set i32:$dst, (umin i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (umin i32:$src0, i32:$src1))]
>;
defm S_MAX_I32 : SOP2_32 <sop2<0x08>, "s_max_i32",
- [(set i32:$dst, (smax i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (smax i32:$src0, i32:$src1))]
>;
defm S_MAX_U32 : SOP2_32 <sop2<0x09>, "s_max_u32",
- [(set i32:$dst, (umax i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (umax i32:$src0, i32:$src1))]
>;
} // End Defs = [SCC]
@@ -247,27 +237,27 @@ let Uses = [SCC] in {
let Defs = [SCC] in {
defm S_AND_B32 : SOP2_32 <sop2<0x0e, 0x0c>, "s_and_b32",
- [(set i32:$dst, (and i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (and i32:$src0, i32:$src1))]
>;
defm S_AND_B64 : SOP2_64 <sop2<0x0f, 0x0d>, "s_and_b64",
- [(set i64:$dst, (and i64:$src0, i64:$src1))]
+ [(set i64:$sdst, (and i64:$src0, i64:$src1))]
>;
defm S_OR_B32 : SOP2_32 <sop2<0x10, 0x0e>, "s_or_b32",
- [(set i32:$dst, (or i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (or i32:$src0, i32:$src1))]
>;
defm S_OR_B64 : SOP2_64 <sop2<0x11, 0x0f>, "s_or_b64",
- [(set i64:$dst, (or i64:$src0, i64:$src1))]
+ [(set i64:$sdst, (or i64:$src0, i64:$src1))]
>;
defm S_XOR_B32 : SOP2_32 <sop2<0x12, 0x10>, "s_xor_b32",
- [(set i32:$dst, (xor i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (xor i32:$src0, i32:$src1))]
>;
defm S_XOR_B64 : SOP2_64 <sop2<0x13, 0x11>, "s_xor_b64",
- [(set i64:$dst, (xor i64:$src0, i64:$src1))]
+ [(set i64:$sdst, (xor i64:$src0, i64:$src1))]
>;
defm S_ANDN2_B32 : SOP2_32 <sop2<0x14, 0x12>, "s_andn2_b32", []>;
defm S_ANDN2_B64 : SOP2_64 <sop2<0x15, 0x13>, "s_andn2_b64", []>;
@@ -286,30 +276,30 @@ let AddedComplexity = 1 in {
let Defs = [SCC] in {
defm S_LSHL_B32 : SOP2_32 <sop2<0x1e, 0x1c>, "s_lshl_b32",
- [(set i32:$dst, (shl i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (shl i32:$src0, i32:$src1))]
>;
defm S_LSHL_B64 : SOP2_64_32 <sop2<0x1f, 0x1d>, "s_lshl_b64",
- [(set i64:$dst, (shl i64:$src0, i32:$src1))]
+ [(set i64:$sdst, (shl i64:$src0, i32:$src1))]
>;
defm S_LSHR_B32 : SOP2_32 <sop2<0x20, 0x1e>, "s_lshr_b32",
- [(set i32:$dst, (srl i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (srl i32:$src0, i32:$src1))]
>;
defm S_LSHR_B64 : SOP2_64_32 <sop2<0x21, 0x1f>, "s_lshr_b64",
- [(set i64:$dst, (srl i64:$src0, i32:$src1))]
+ [(set i64:$sdst, (srl i64:$src0, i32:$src1))]
>;
defm S_ASHR_I32 : SOP2_32 <sop2<0x22, 0x20>, "s_ashr_i32",
- [(set i32:$dst, (sra i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (sra i32:$src0, i32:$src1))]
>;
defm S_ASHR_I64 : SOP2_64_32 <sop2<0x23, 0x21>, "s_ashr_i64",
- [(set i64:$dst, (sra i64:$src0, i32:$src1))]
+ [(set i64:$sdst, (sra i64:$src0, i32:$src1))]
>;
} // End Defs = [SCC]
defm S_BFM_B32 : SOP2_32 <sop2<0x24, 0x22>, "s_bfm_b32",
- [(set i32:$dst, (AMDGPUbfm i32:$src0, i32:$src1))]>;
-defm S_BFM_B64 : SOP2_64 <sop2<0x25, 0x23>, "s_bfm_b64", []>;
+ [(set i32:$sdst, (AMDGPUbfm i32:$src0, i32:$src1))]>;
+defm S_BFM_B64 : SOP2_64_32_32 <sop2<0x25, 0x23>, "s_bfm_b64", []>;
defm S_MUL_I32 : SOP2_32 <sop2<0x26, 0x24>, "s_mul_i32",
- [(set i32:$dst, (mul i32:$src0, i32:$src1))]
+ [(set i32:$sdst, (mul i32:$src0, i32:$src1))]
>;
} // End AddedComplexity = 1
@@ -317,7 +307,7 @@ defm S_MUL_I32 : SOP2_32 <sop2<0x26, 0x24>, "s_mul_i32",
let Defs = [SCC] in {
defm S_BFE_U32 : SOP2_32 <sop2<0x27, 0x25>, "s_bfe_u32", []>;
defm S_BFE_I32 : SOP2_32 <sop2<0x28, 0x26>, "s_bfe_i32", []>;
-defm S_BFE_U64 : SOP2_64 <sop2<0x29, 0x27>, "s_bfe_u64", []>;
+defm S_BFE_U64 : SOP2_64_32 <sop2<0x29, 0x27>, "s_bfe_u64", []>;
defm S_BFE_I64 : SOP2_64_32 <sop2<0x2a, 0x28>, "s_bfe_i64", []>;
} // End Defs = [SCC]
@@ -336,23 +326,23 @@ defm S_ABSDIFF_I32 : SOP2_32 <sop2<0x2c, 0x2a>, "s_absdiff_i32", []>;
// SOPC Instructions
//===----------------------------------------------------------------------===//
-def S_CMP_EQ_I32 : SOPC_32 <0x00000000, "s_cmp_eq_i32">;
-def S_CMP_LG_I32 : SOPC_32 <0x00000001, "s_cmp_lg_i32">;
-def S_CMP_GT_I32 : SOPC_32 <0x00000002, "s_cmp_gt_i32">;
-def S_CMP_GE_I32 : SOPC_32 <0x00000003, "s_cmp_ge_i32">;
-def S_CMP_LT_I32 : SOPC_32 <0x00000004, "s_cmp_lt_i32">;
-def S_CMP_LE_I32 : SOPC_32 <0x00000005, "s_cmp_le_i32">;
-def S_CMP_EQ_U32 : SOPC_32 <0x00000006, "s_cmp_eq_u32">;
-def S_CMP_LG_U32 : SOPC_32 <0x00000007, "s_cmp_lg_u32">;
-def S_CMP_GT_U32 : SOPC_32 <0x00000008, "s_cmp_gt_u32">;
-def S_CMP_GE_U32 : SOPC_32 <0x00000009, "s_cmp_ge_u32">;
-def S_CMP_LT_U32 : SOPC_32 <0x0000000a, "s_cmp_lt_u32">;
-def S_CMP_LE_U32 : SOPC_32 <0x0000000b, "s_cmp_le_u32">;
-////def S_BITCMP0_B32 : SOPC_BITCMP0 <0x0000000c, "s_bitcmp0_b32", []>;
-////def S_BITCMP1_B32 : SOPC_BITCMP1 <0x0000000d, "s_bitcmp1_b32", []>;
-////def S_BITCMP0_B64 : SOPC_BITCMP0 <0x0000000e, "s_bitcmp0_b64", []>;
-////def S_BITCMP1_B64 : SOPC_BITCMP1 <0x0000000f, "s_bitcmp1_b64", []>;
-//def S_SETVSKIP : SOPC_ <0x00000010, "s_setvskip", []>;
+def S_CMP_EQ_I32 : SOPC_CMP_32 <0x00000000, "s_cmp_eq_i32", COND_EQ>;
+def S_CMP_LG_I32 : SOPC_CMP_32 <0x00000001, "s_cmp_lg_i32", COND_NE>;
+def S_CMP_GT_I32 : SOPC_CMP_32 <0x00000002, "s_cmp_gt_i32", COND_SGT>;
+def S_CMP_GE_I32 : SOPC_CMP_32 <0x00000003, "s_cmp_ge_i32", COND_SGE>;
+def S_CMP_LT_I32 : SOPC_CMP_32 <0x00000004, "s_cmp_lt_i32", COND_SLT>;
+def S_CMP_LE_I32 : SOPC_CMP_32 <0x00000005, "s_cmp_le_i32", COND_SLE>;
+def S_CMP_EQ_U32 : SOPC_CMP_32 <0x00000006, "s_cmp_eq_u32", COND_EQ>;
+def S_CMP_LG_U32 : SOPC_CMP_32 <0x00000007, "s_cmp_lg_u32", COND_NE >;
+def S_CMP_GT_U32 : SOPC_CMP_32 <0x00000008, "s_cmp_gt_u32", COND_UGT>;
+def S_CMP_GE_U32 : SOPC_CMP_32 <0x00000009, "s_cmp_ge_u32", COND_UGE>;
+def S_CMP_LT_U32 : SOPC_CMP_32 <0x0000000a, "s_cmp_lt_u32", COND_ULT>;
+def S_CMP_LE_U32 : SOPC_CMP_32 <0x0000000b, "s_cmp_le_u32", COND_ULE>;
+def S_BITCMP0_B32 : SOPC_32 <0x0000000c, "s_bitcmp0_b32">;
+def S_BITCMP1_B32 : SOPC_32 <0x0000000d, "s_bitcmp1_b32">;
+def S_BITCMP0_B64 : SOPC_64_32 <0x0000000e, "s_bitcmp0_b64">;
+def S_BITCMP1_B64 : SOPC_64_32 <0x0000000f, "s_bitcmp1_b64">;
+def S_SETVSKIP : SOPC_32 <0x00000010, "s_setvskip">;
//===----------------------------------------------------------------------===//
// SOPK Instructions
@@ -408,16 +398,23 @@ defm S_CBRANCH_I_FORK : SOPK_m <
sopk<0x11, 0x10>, "s_cbranch_i_fork", (outs),
(ins SReg_64:$sdst, u16imm:$simm16), " $sdst, $simm16"
>;
-defm S_GETREG_B32 : SOPK_32 <sopk<0x12, 0x11>, "s_getreg_b32", []>;
+
+let mayLoad = 1 in {
+defm S_GETREG_B32 : SOPK_m <
+ sopk<0x12, 0x11>, "s_getreg_b32", (outs SReg_32:$sdst),
+ (ins hwreg:$simm16), " $sdst, $simm16"
+>;
+}
+
defm S_SETREG_B32 : SOPK_m <
sopk<0x13, 0x12>, "s_setreg_b32", (outs),
- (ins SReg_32:$sdst, u16imm:$simm16), " $sdst, $simm16"
+ (ins SReg_32:$sdst, hwreg:$simm16), " $simm16, $sdst"
>;
// FIXME: Not on SI?
//defm S_GETREG_REGRD_B32 : SOPK_32 <sopk<0x14, 0x13>, "s_getreg_regrd_b32", []>;
defm S_SETREG_IMM32_B32 : SOPK_IMM32 <
sopk<0x15, 0x14>, "s_setreg_imm32_b32", (outs),
- (ins i32imm:$imm, u16imm:$simm16), " $imm, $simm16"
+ (ins i32imm:$imm, hwreg:$simm16), " $simm16, $imm"
>;
//===----------------------------------------------------------------------===//
@@ -429,10 +426,11 @@ def S_NOP : SOPP <0x00000000, (ins i16imm:$simm16), "s_nop $simm16">;
let isTerminator = 1 in {
def S_ENDPGM : SOPP <0x00000001, (ins), "s_endpgm",
- [(IL_retflag)]> {
+ [(AMDGPUendpgm)]> {
let simm16 = 0;
let isBarrier = 1;
let hasCtrlDep = 1;
+ let hasSideEffects = 1;
}
let isBranch = 1 in {
@@ -449,7 +447,8 @@ def S_CBRANCH_SCC0 : SOPP <
>;
def S_CBRANCH_SCC1 : SOPP <
0x00000005, (ins sopp_brtarget:$simm16),
- "s_cbranch_scc1 $simm16"
+ "s_cbranch_scc1 $simm16",
+ [(si_uniform_br_scc SCC, bb:$simm16)]
>;
} // End Uses = [SCC]
@@ -481,7 +480,7 @@ def S_CBRANCH_EXECNZ : SOPP <
let hasSideEffects = 1 in {
def S_BARRIER : SOPP <0x0000000a, (ins), "s_barrier",
- [(int_AMDGPU_barrier_local)]
+ [(int_amdgcn_s_barrier)]
> {
let SchedRW = [WriteBarrier];
let simm16 = 0;
@@ -490,18 +489,31 @@ def S_BARRIER : SOPP <0x0000000a, (ins), "s_barrier",
let isConvergent = 1;
}
+let mayLoad = 1, mayStore = 1, hasSideEffects = 1 in
def S_WAITCNT : SOPP <0x0000000c, (ins WAIT_FLAG:$simm16), "s_waitcnt $simm16">;
def S_SETHALT : SOPP <0x0000000d, (ins i16imm:$simm16), "s_sethalt $simm16">;
-def S_SLEEP : SOPP <0x0000000e, (ins i16imm:$simm16), "s_sleep $simm16">;
-def S_SETPRIO : SOPP <0x0000000f, (ins i16imm:$sim16), "s_setprio $sim16">;
+
+// On SI the documentation says sleep for approximately 64 * low 2
+// bits, consistent with the reported maximum of 448. On VI the
+// maximum reported is 960 cycles, so 960 / 64 = 15 max, so is the
+// maximum really 15 on VI?
+def S_SLEEP : SOPP <0x0000000e, (ins i32imm:$simm16),
+ "s_sleep $simm16", [(int_amdgcn_s_sleep SIMM16bit:$simm16)]> {
+ let hasSideEffects = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+def S_SETPRIO : SOPP <0x0000000f, (ins i16imm:$simm16), "s_setprio $simm16">;
let Uses = [EXEC, M0] in {
+ // FIXME: Should this be mayLoad+mayStore?
def S_SENDMSG : SOPP <0x00000010, (ins SendMsgImm:$simm16), "s_sendmsg $simm16",
[(AMDGPUsendmsg (i32 imm:$simm16))]
>;
} // End Uses = [EXEC, M0]
-def S_SENDMSGHALT : SOPP <0x00000011, (ins i16imm:$simm16), "s_sendmsghalt $simm16">;
+def S_SENDMSGHALT : SOPP <0x00000011, (ins SendMsgImm:$simm16), "s_sendmsghalt $simm16">;
def S_TRAP : SOPP <0x00000012, (ins i16imm:$simm16), "s_trap $simm16">;
def S_ICACHE_INV : SOPP <0x00000013, (ins), "s_icache_inv"> {
let simm16 = 0;
@@ -770,8 +782,8 @@ defm DS_XOR_B32 : DS_1A1D_NORET <0xb, "ds_xor_b32", VGPR_32>;
defm DS_MSKOR_B32 : DS_1A2D_NORET <0xc, "ds_mskor_b32", VGPR_32>;
let mayLoad = 0 in {
defm DS_WRITE_B32 : DS_1A1D_NORET <0xd, "ds_write_b32", VGPR_32>;
-defm DS_WRITE2_B32 : DS_1A1D_Off8_NORET <0xe, "ds_write2_b32", VGPR_32>;
-defm DS_WRITE2ST64_B32 : DS_1A1D_Off8_NORET <0xf, "ds_write2st64_b32", VGPR_32>;
+defm DS_WRITE2_B32 : DS_1A2D_Off8_NORET <0xe, "ds_write2_b32", VGPR_32>;
+defm DS_WRITE2ST64_B32 : DS_1A2D_Off8_NORET <0xf, "ds_write2st64_b32", VGPR_32>;
}
defm DS_CMPST_B32 : DS_1A2D_NORET <0x10, "ds_cmpst_b32", VGPR_32>;
defm DS_CMPST_F32 : DS_1A2D_NORET <0x11, "ds_cmpst_f32", VGPR_32>;
@@ -811,7 +823,11 @@ defm DS_CMPST_RTN_B32 : DS_1A2D_RET <0x30, "ds_cmpst_rtn_b32", VGPR_32, "ds_cmps
defm DS_CMPST_RTN_F32 : DS_1A2D_RET <0x31, "ds_cmpst_rtn_f32", VGPR_32, "ds_cmpst_f32">;
defm DS_MIN_RTN_F32 : DS_1A2D_RET <0x32, "ds_min_rtn_f32", VGPR_32, "ds_min_f32">;
defm DS_MAX_RTN_F32 : DS_1A2D_RET <0x33, "ds_max_rtn_f32", VGPR_32, "ds_max_f32">;
-defm DS_SWIZZLE_B32 : DS_1A_RET <0x35, "ds_swizzle_b32", VGPR_32>;
+
+let Uses = [EXEC], mayLoad =0, mayStore = 0, isConvergent = 1 in {
+defm DS_SWIZZLE_B32 : DS_1A_RET_ <dsop<0x35, 0x3d>, "ds_swizzle_b32", VGPR_32>;
+}
+
let mayStore = 0 in {
defm DS_READ_B32 : DS_1A_RET <0x36, "ds_read_b32", VGPR_32>;
defm DS_READ2_B32 : DS_1A_Off8_RET <0x37, "ds_read2_b32", VReg_64>;
@@ -839,8 +855,8 @@ defm DS_XOR_B64 : DS_1A1D_NORET <0x4b, "ds_xor_b64", VReg_64>;
defm DS_MSKOR_B64 : DS_1A2D_NORET <0x4c, "ds_mskor_b64", VReg_64>;
let mayLoad = 0 in {
defm DS_WRITE_B64 : DS_1A1D_NORET <0x4d, "ds_write_b64", VReg_64>;
-defm DS_WRITE2_B64 : DS_1A1D_Off8_NORET <0x4E, "ds_write2_b64", VReg_64>;
-defm DS_WRITE2ST64_B64 : DS_1A1D_Off8_NORET <0x4f, "ds_write2st64_b64", VReg_64>;
+defm DS_WRITE2_B64 : DS_1A2D_Off8_NORET <0x4E, "ds_write2_b64", VReg_64>;
+defm DS_WRITE2ST64_B64 : DS_1A2D_Off8_NORET <0x4f, "ds_write2st64_b64", VReg_64>;
}
defm DS_CMPST_B64 : DS_1A2D_NORET <0x50, "ds_cmpst_b64", VReg_64>;
defm DS_CMPST_F64 : DS_1A2D_NORET <0x51, "ds_cmpst_f64", VReg_64>;
@@ -886,7 +902,7 @@ defm DS_MAX_SRC2_U32 : DS_1A <0x88, "ds_max_src2_u32">;
defm DS_AND_SRC2_B32 : DS_1A <0x89, "ds_and_src_b32">;
defm DS_OR_SRC2_B32 : DS_1A <0x8a, "ds_or_src2_b32">;
defm DS_XOR_SRC2_B32 : DS_1A <0x8b, "ds_xor_src2_b32">;
-defm DS_WRITE_SRC2_B32 : DS_1A <0x8c, "ds_write_src2_b32">;
+defm DS_WRITE_SRC2_B32 : DS_1A_Off8_NORET <0x8d, "ds_write_src2_b32">;
defm DS_MIN_SRC2_F32 : DS_1A <0x92, "ds_min_src2_f32">;
defm DS_MAX_SRC2_F32 : DS_1A <0x93, "ds_max_src2_f32">;
@@ -903,7 +919,7 @@ defm DS_MAX_SRC2_U64 : DS_1A <0xc8, "ds_max_src2_u64">;
defm DS_AND_SRC2_B64 : DS_1A <0xc9, "ds_and_src2_b64">;
defm DS_OR_SRC2_B64 : DS_1A <0xca, "ds_or_src2_b64">;
defm DS_XOR_SRC2_B64 : DS_1A <0xcb, "ds_xor_src2_b64">;
-defm DS_WRITE_SRC2_B64 : DS_1A <0xcc, "ds_write_src2_b64">;
+defm DS_WRITE_SRC2_B64 : DS_1A_Off8_NORET <0xcd, "ds_write_src2_b64">;
defm DS_MIN_SRC2_F64 : DS_1A <0xd2, "ds_min_src2_f64">;
defm DS_MAX_SRC2_F64 : DS_1A <0xd3, "ds_max_src2_f64">;
@@ -937,16 +953,16 @@ defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper <
mubuf<0x07>, "buffer_store_format_xyzw", VReg_128
>;
defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <
- mubuf<0x08, 0x10>, "buffer_load_ubyte", VGPR_32, i32, az_extloadi8_global
+ mubuf<0x08, 0x10>, "buffer_load_ubyte", VGPR_32, i32, mubuf_az_extloadi8
>;
defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <
- mubuf<0x09, 0x11>, "buffer_load_sbyte", VGPR_32, i32, sextloadi8_global
+ mubuf<0x09, 0x11>, "buffer_load_sbyte", VGPR_32, i32, mubuf_sextloadi8
>;
defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <
- mubuf<0x0a, 0x12>, "buffer_load_ushort", VGPR_32, i32, az_extloadi16_global
+ mubuf<0x0a, 0x12>, "buffer_load_ushort", VGPR_32, i32, mubuf_az_extloadi16
>;
defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <
- mubuf<0x0b, 0x13>, "buffer_load_sshort", VGPR_32, i32, sextloadi16_global
+ mubuf<0x0b, 0x13>, "buffer_load_sshort", VGPR_32, i32, mubuf_sextloadi16
>;
defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <
mubuf<0x0c, 0x14>, "buffer_load_dword", VGPR_32, i32, mubuf_load
@@ -981,7 +997,9 @@ defm BUFFER_STORE_DWORDX4 : MUBUF_Store_Helper <
defm BUFFER_ATOMIC_SWAP : MUBUF_Atomic <
mubuf<0x30, 0x40>, "buffer_atomic_swap", VGPR_32, i32, atomic_swap_global
>;
-//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <mubuf<0x31, 0x41>, "buffer_atomic_cmpswap", []>;
+defm BUFFER_ATOMIC_CMPSWAP : MUBUF_Atomic <
+ mubuf<0x31, 0x41>, "buffer_atomic_cmpswap", VReg_64, v2i32, null_frag
+>;
defm BUFFER_ATOMIC_ADD : MUBUF_Atomic <
mubuf<0x32, 0x42>, "buffer_atomic_add", VGPR_32, i32, atomic_add_global
>;
@@ -1010,30 +1028,61 @@ defm BUFFER_ATOMIC_OR : MUBUF_Atomic <
defm BUFFER_ATOMIC_XOR : MUBUF_Atomic <
mubuf<0x3b, 0x4a>, "buffer_atomic_xor", VGPR_32, i32, atomic_xor_global
>;
-//def BUFFER_ATOMIC_INC : MUBUF_ <mubuf<0x3c, 0x4b>, "buffer_atomic_inc", []>;
-//def BUFFER_ATOMIC_DEC : MUBUF_ <mubuf<0x3d, 0x4c>, "buffer_atomic_dec", []>;
-//def BUFFER_ATOMIC_FCMPSWAP : MUBUF_ <mubuf<0x3e>, "buffer_atomic_fcmpswap", []>; // isn't on VI
-//def BUFFER_ATOMIC_FMIN : MUBUF_ <mubuf<0x3f>, "buffer_atomic_fmin", []>; // isn't on VI
-//def BUFFER_ATOMIC_FMAX : MUBUF_ <mubuf<0x40>, "buffer_atomic_fmax", []>; // isn't on VI
-//def BUFFER_ATOMIC_SWAP_X2 : MUBUF_X2 <mubuf<0x50, 0x60>, "buffer_atomic_swap_x2", []>;
-//def BUFFER_ATOMIC_CMPSWAP_X2 : MUBUF_X2 <mubuf<0x51, 0x61>, "buffer_atomic_cmpswap_x2", []>;
-//def BUFFER_ATOMIC_ADD_X2 : MUBUF_X2 <mubuf<0x52, 0x62>, "buffer_atomic_add_x2", []>;
-//def BUFFER_ATOMIC_SUB_X2 : MUBUF_X2 <mubuf<0x53, 0x63>, "buffer_atomic_sub_x2", []>;
-//def BUFFER_ATOMIC_RSUB_X2 : MUBUF_X2 <mubuf<0x54>, "buffer_atomic_rsub_x2", []>; // isn't on CI & VI
-//def BUFFER_ATOMIC_SMIN_X2 : MUBUF_X2 <mubuf<0x55, 0x64>, "buffer_atomic_smin_x2", []>;
-//def BUFFER_ATOMIC_UMIN_X2 : MUBUF_X2 <mubuf<0x56, 0x65>, "buffer_atomic_umin_x2", []>;
-//def BUFFER_ATOMIC_SMAX_X2 : MUBUF_X2 <mubuf<0x57, 0x66>, "buffer_atomic_smax_x2", []>;
-//def BUFFER_ATOMIC_UMAX_X2 : MUBUF_X2 <mubuf<0x58, 0x67>, "buffer_atomic_umax_x2", []>;
-//def BUFFER_ATOMIC_AND_X2 : MUBUF_X2 <mubuf<0x59, 0x68>, "buffer_atomic_and_x2", []>;
-//def BUFFER_ATOMIC_OR_X2 : MUBUF_X2 <mubuf<0x5a, 0x69>, "buffer_atomic_or_x2", []>;
-//def BUFFER_ATOMIC_XOR_X2 : MUBUF_X2 <mubuf<0x5b, 0x6a>, "buffer_atomic_xor_x2", []>;
-//def BUFFER_ATOMIC_INC_X2 : MUBUF_X2 <mubuf<0x5c, 0x6b>, "buffer_atomic_inc_x2", []>;
-//def BUFFER_ATOMIC_DEC_X2 : MUBUF_X2 <mubuf<0x5d, 0x6c>, "buffer_atomic_dec_x2", []>;
+defm BUFFER_ATOMIC_INC : MUBUF_Atomic <
+ mubuf<0x3c, 0x4b>, "buffer_atomic_inc", VGPR_32, i32, atomic_inc_global
+>;
+defm BUFFER_ATOMIC_DEC : MUBUF_Atomic <
+ mubuf<0x3d, 0x4c>, "buffer_atomic_dec", VGPR_32, i32, atomic_dec_global
+>;
+
+//def BUFFER_ATOMIC_FCMPSWAP : MUBUF_Atomic <mubuf<0x3e>, "buffer_atomic_fcmpswap", []>; // isn't on VI
+//def BUFFER_ATOMIC_FMIN : MUBUF_Atomic <mubuf<0x3f>, "buffer_atomic_fmin", []>; // isn't on VI
+//def BUFFER_ATOMIC_FMAX : MUBUF_Atomic <mubuf<0x40>, "buffer_atomic_fmax", []>; // isn't on VI
+defm BUFFER_ATOMIC_SWAP_X2 : MUBUF_Atomic <
+ mubuf<0x50, 0x60>, "buffer_atomic_swap_x2", VReg_64, i64, atomic_swap_global
+>;
+defm BUFFER_ATOMIC_CMPSWAP_X2 : MUBUF_Atomic <
+ mubuf<0x51, 0x61>, "buffer_atomic_cmpswap_x2", VReg_128, v2i64, null_frag
+>;
+defm BUFFER_ATOMIC_ADD_X2 : MUBUF_Atomic <
+ mubuf<0x52, 0x62>, "buffer_atomic_add_x2", VReg_64, i64, atomic_add_global
+>;
+defm BUFFER_ATOMIC_SUB_X2 : MUBUF_Atomic <
+ mubuf<0x53, 0x63>, "buffer_atomic_sub_x2", VReg_64, i64, atomic_sub_global
+>;
+//defm BUFFER_ATOMIC_RSUB_X2 : MUBUF_Atomic <mubuf<0x54>, "buffer_atomic_rsub_x2", []>; // isn't on CI & VI
+defm BUFFER_ATOMIC_SMIN_X2 : MUBUF_Atomic <
+ mubuf<0x55, 0x64>, "buffer_atomic_smin_x2", VReg_64, i64, atomic_min_global
+>;
+defm BUFFER_ATOMIC_UMIN_X2 : MUBUF_Atomic <
+ mubuf<0x56, 0x65>, "buffer_atomic_umin_x2", VReg_64, i64, atomic_umin_global
+>;
+defm BUFFER_ATOMIC_SMAX_X2 : MUBUF_Atomic <
+ mubuf<0x57, 0x66>, "buffer_atomic_smax_x2", VReg_64, i64, atomic_max_global
+>;
+defm BUFFER_ATOMIC_UMAX_X2 : MUBUF_Atomic <
+ mubuf<0x58, 0x67>, "buffer_atomic_umax_x2", VReg_64, i64, atomic_umax_global
+>;
+defm BUFFER_ATOMIC_AND_X2 : MUBUF_Atomic <
+ mubuf<0x59, 0x68>, "buffer_atomic_and_x2", VReg_64, i64, atomic_and_global
+>;
+defm BUFFER_ATOMIC_OR_X2 : MUBUF_Atomic <
+ mubuf<0x5a, 0x69>, "buffer_atomic_or_x2", VReg_64, i64, atomic_or_global
+>;
+defm BUFFER_ATOMIC_XOR_X2 : MUBUF_Atomic <
+ mubuf<0x5b, 0x6a>, "buffer_atomic_xor_x2", VReg_64, i64, atomic_xor_global
+>;
+defm BUFFER_ATOMIC_INC_X2 : MUBUF_Atomic <
+ mubuf<0x5c, 0x6b>, "buffer_atomic_inc_x2", VReg_64, i64, atomic_inc_global
+>;
+defm BUFFER_ATOMIC_DEC_X2 : MUBUF_Atomic <
+ mubuf<0x5d, 0x6c>, "buffer_atomic_dec_x2", VReg_64, i64, atomic_dec_global
+>;
//def BUFFER_ATOMIC_FCMPSWAP_X2 : MUBUF_X2 <mubuf<0x5e>, "buffer_atomic_fcmpswap_x2", []>; // isn't on VI
//def BUFFER_ATOMIC_FMIN_X2 : MUBUF_X2 <mubuf<0x5f>, "buffer_atomic_fmin_x2", []>; // isn't on VI
//def BUFFER_ATOMIC_FMAX_X2 : MUBUF_X2 <mubuf<0x60>, "buffer_atomic_fmax_x2", []>; // isn't on VI
-let SubtargetPredicate = isSI in {
+let SubtargetPredicate = isSI, DisableVIDecoder = 1 in {
defm BUFFER_WBINVL1_SC : MUBUF_Invalidate <mubuf<0x70>, "buffer_wbinvl1_sc", int_amdgcn_buffer_wbinvl1_sc>; // isn't on CI & VI
}
@@ -1062,28 +1111,28 @@ defm IMAGE_LOAD_MIP : MIMG_NoSampler <0x00000001, "image_load_mip">;
//def IMAGE_LOAD_PCK_SGN : MIMG_NoPattern_ <"image_load_pck_sgn", 0x00000003>;
//def IMAGE_LOAD_MIP_PCK : MIMG_NoPattern_ <"image_load_mip_pck", 0x00000004>;
//def IMAGE_LOAD_MIP_PCK_SGN : MIMG_NoPattern_ <"image_load_mip_pck_sgn", 0x00000005>;
-//def IMAGE_STORE : MIMG_NoPattern_ <"image_store", 0x00000008>;
-//def IMAGE_STORE_MIP : MIMG_NoPattern_ <"image_store_mip", 0x00000009>;
+defm IMAGE_STORE : MIMG_Store <0x00000008, "image_store">;
+defm IMAGE_STORE_MIP : MIMG_Store <0x00000009, "image_store_mip">;
//def IMAGE_STORE_PCK : MIMG_NoPattern_ <"image_store_pck", 0x0000000a>;
//def IMAGE_STORE_MIP_PCK : MIMG_NoPattern_ <"image_store_mip_pck", 0x0000000b>;
defm IMAGE_GET_RESINFO : MIMG_NoSampler <0x0000000e, "image_get_resinfo">;
-//def IMAGE_ATOMIC_SWAP : MIMG_NoPattern_ <"image_atomic_swap", 0x0000000f>;
-//def IMAGE_ATOMIC_CMPSWAP : MIMG_NoPattern_ <"image_atomic_cmpswap", 0x00000010>;
-//def IMAGE_ATOMIC_ADD : MIMG_NoPattern_ <"image_atomic_add", 0x00000011>;
-//def IMAGE_ATOMIC_SUB : MIMG_NoPattern_ <"image_atomic_sub", 0x00000012>;
-//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>;
-//def IMAGE_ATOMIC_SMIN : MIMG_NoPattern_ <"image_atomic_smin", 0x00000014>;
-//def IMAGE_ATOMIC_UMIN : MIMG_NoPattern_ <"image_atomic_umin", 0x00000015>;
-//def IMAGE_ATOMIC_SMAX : MIMG_NoPattern_ <"image_atomic_smax", 0x00000016>;
-//def IMAGE_ATOMIC_UMAX : MIMG_NoPattern_ <"image_atomic_umax", 0x00000017>;
-//def IMAGE_ATOMIC_AND : MIMG_NoPattern_ <"image_atomic_and", 0x00000018>;
-//def IMAGE_ATOMIC_OR : MIMG_NoPattern_ <"image_atomic_or", 0x00000019>;
-//def IMAGE_ATOMIC_XOR : MIMG_NoPattern_ <"image_atomic_xor", 0x0000001a>;
-//def IMAGE_ATOMIC_INC : MIMG_NoPattern_ <"image_atomic_inc", 0x0000001b>;
-//def IMAGE_ATOMIC_DEC : MIMG_NoPattern_ <"image_atomic_dec", 0x0000001c>;
-//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>;
-//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>;
-//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>;
+defm IMAGE_ATOMIC_SWAP : MIMG_Atomic <mimg<0x0f, 0x10>, "image_atomic_swap">;
+defm IMAGE_ATOMIC_CMPSWAP : MIMG_Atomic <mimg<0x10, 0x11>, "image_atomic_cmpswap", VReg_64>;
+defm IMAGE_ATOMIC_ADD : MIMG_Atomic <mimg<0x11, 0x12>, "image_atomic_add">;
+defm IMAGE_ATOMIC_SUB : MIMG_Atomic <mimg<0x12, 0x13>, "image_atomic_sub">;
+//def IMAGE_ATOMIC_RSUB : MIMG_NoPattern_ <"image_atomic_rsub", 0x00000013>; -- not on VI
+defm IMAGE_ATOMIC_SMIN : MIMG_Atomic <mimg<0x14>, "image_atomic_smin">;
+defm IMAGE_ATOMIC_UMIN : MIMG_Atomic <mimg<0x15>, "image_atomic_umin">;
+defm IMAGE_ATOMIC_SMAX : MIMG_Atomic <mimg<0x16>, "image_atomic_smax">;
+defm IMAGE_ATOMIC_UMAX : MIMG_Atomic <mimg<0x17>, "image_atomic_umax">;
+defm IMAGE_ATOMIC_AND : MIMG_Atomic <mimg<0x18>, "image_atomic_and">;
+defm IMAGE_ATOMIC_OR : MIMG_Atomic <mimg<0x19>, "image_atomic_or">;
+defm IMAGE_ATOMIC_XOR : MIMG_Atomic <mimg<0x1a>, "image_atomic_xor">;
+defm IMAGE_ATOMIC_INC : MIMG_Atomic <mimg<0x1b>, "image_atomic_inc">;
+defm IMAGE_ATOMIC_DEC : MIMG_Atomic <mimg<0x1c>, "image_atomic_dec">;
+//def IMAGE_ATOMIC_FCMPSWAP : MIMG_NoPattern_ <"image_atomic_fcmpswap", 0x0000001d>; -- not on VI
+//def IMAGE_ATOMIC_FMIN : MIMG_NoPattern_ <"image_atomic_fmin", 0x0000001e>; -- not on VI
+//def IMAGE_ATOMIC_FMAX : MIMG_NoPattern_ <"image_atomic_fmax", 0x0000001f>; -- not on VI
defm IMAGE_SAMPLE : MIMG_Sampler_WQM <0x00000020, "image_sample">;
defm IMAGE_SAMPLE_CL : MIMG_Sampler_WQM <0x00000021, "image_sample_cl">;
defm IMAGE_SAMPLE_D : MIMG_Sampler <0x00000022, "image_sample_d">;
@@ -1171,10 +1220,12 @@ let Uses = [EXEC] in {
def V_READFIRSTLANE_B32 : VOP1 <
0x00000002,
(outs SReg_32:$vdst),
- (ins VGPR_32:$src0),
+ (ins VS_32:$src0),
"v_readfirstlane_b32 $vdst, $src0",
[]
->;
+> {
+ let isConvergent = 1;
+}
}
@@ -1234,7 +1285,7 @@ defm V_CVT_F64_U32 : VOP1Inst <vop1<0x16>, "v_cvt_f64_u32",
VOP_F64_I32, uint_to_fp
>;
-} // let SchedRW = [WriteQuarterRate32]
+} // End SchedRW = [WriteQuarterRate32]
defm V_FRACT_F32 : VOP1Inst <vop1<0x20, 0x1b>, "v_fract_f32",
VOP_F32_F32, AMDGPUfract
@@ -1270,7 +1321,7 @@ defm V_RSQ_F32 : VOP1Inst <vop1<0x2e, 0x24>, "v_rsq_f32",
VOP_F32_F32, AMDGPUrsq
>;
-} //let SchedRW = [WriteQuarterRate32]
+} // End SchedRW = [WriteQuarterRate32]
let SchedRW = [WriteDouble] in {
@@ -1281,7 +1332,7 @@ defm V_RSQ_F64 : VOP1Inst <vop1<0x31, 0x26>, "v_rsq_f64",
VOP_F64_F64, AMDGPUrsq
>;
-} // let SchedRW = [WriteDouble];
+} // End SchedRW = [WriteDouble];
defm V_SQRT_F32 : VOP1Inst <vop1<0x33, 0x27>, "v_sqrt_f32",
VOP_F32_F32, fsqrt
@@ -1312,34 +1363,34 @@ defm V_FFBH_U32 : VOP1Inst <vop1<0x39, 0x2d>, "v_ffbh_u32", VOP_I32_I32>;
defm V_FFBL_B32 : VOP1Inst <vop1<0x3a, 0x2e>, "v_ffbl_b32", VOP_I32_I32>;
defm V_FFBH_I32 : VOP1Inst <vop1<0x3b, 0x2f>, "v_ffbh_i32", VOP_I32_I32>;
defm V_FREXP_EXP_I32_F64 : VOP1Inst <vop1<0x3c,0x30>, "v_frexp_exp_i32_f64",
- VOP_I32_F64
+ VOP_I32_F64, int_amdgcn_frexp_exp
>;
let SchedRW = [WriteDoubleAdd] in {
defm V_FREXP_MANT_F64 : VOP1Inst <vop1<0x3d, 0x31>, "v_frexp_mant_f64",
- VOP_F64_F64
+ VOP_F64_F64, int_amdgcn_frexp_mant
>;
defm V_FRACT_F64 : VOP1Inst <vop1<0x3e, 0x32>, "v_fract_f64",
- VOP_F64_F64
+ VOP_F64_F64, AMDGPUfract
>;
} // End SchedRW = [WriteDoubleAdd]
defm V_FREXP_EXP_I32_F32 : VOP1Inst <vop1<0x3f, 0x33>, "v_frexp_exp_i32_f32",
- VOP_I32_F32
+ VOP_I32_F32, int_amdgcn_frexp_exp
>;
defm V_FREXP_MANT_F32 : VOP1Inst <vop1<0x40, 0x34>, "v_frexp_mant_f32",
- VOP_F32_F32
+ VOP_F32_F32, int_amdgcn_frexp_mant
>;
let vdst = 0, src0 = 0, VOPAsmPrefer32Bit = 1 in {
-defm V_CLREXCP : VOP1Inst <vop1<0x41,0x35>, "v_clrexcp", VOP_NONE>;
+defm V_CLREXCP : VOP1Inst <vop1<0x41,0x35>, "v_clrexcp", VOP_NO_EXT<VOP_NONE>>;
}
let Uses = [M0, EXEC] in {
-defm V_MOVRELD_B32 : VOP1Inst <vop1<0x42, 0x36>, "v_movreld_b32", VOP_I32_I32>;
-defm V_MOVRELS_B32 : VOP1Inst <vop1<0x43, 0x37>, "v_movrels_b32", VOP_I32_I32>;
-defm V_MOVRELSD_B32 : VOP1Inst <vop1<0x44, 0x38>, "v_movrelsd_b32", VOP_I32_I32>;
+defm V_MOVRELD_B32 : VOP1Inst <vop1<0x42, 0x36>, "v_movreld_b32", VOP_NO_EXT<VOP_I32_I32>>;
+defm V_MOVRELS_B32 : VOP1Inst <vop1<0x43, 0x37>, "v_movrels_b32", VOP_NO_EXT<VOP_I32_I32>>;
+defm V_MOVRELSD_B32 : VOP1Inst <vop1<0x44, 0x38>, "v_movrelsd_b32", VOP_NO_EXT<VOP_I32_I32>>;
} // End Uses = [M0, EXEC]
// These instruction only exist on SI and CI
@@ -1348,11 +1399,12 @@ let SubtargetPredicate = isSICI in {
let SchedRW = [WriteQuarterRate32] in {
defm V_MOV_FED_B32 : VOP1InstSI <vop1<0x9>, "v_mov_fed_b32", VOP_I32_I32>;
-defm V_LOG_CLAMP_F32 : VOP1InstSI <vop1<0x26>, "v_log_clamp_f32", VOP_F32_F32>;
+defm V_LOG_CLAMP_F32 : VOP1InstSI <vop1<0x26>, "v_log_clamp_f32",
+ VOP_F32_F32, int_amdgcn_log_clamp>;
defm V_RCP_CLAMP_F32 : VOP1InstSI <vop1<0x28>, "v_rcp_clamp_f32", VOP_F32_F32>;
defm V_RCP_LEGACY_F32 : VOP1InstSI <vop1<0x29>, "v_rcp_legacy_f32", VOP_F32_F32>;
defm V_RSQ_CLAMP_F32 : VOP1InstSI <vop1<0x2c>, "v_rsq_clamp_f32",
- VOP_F32_F32, AMDGPUrsq_clamped
+ VOP_F32_F32, AMDGPUrsq_clamp
>;
defm V_RSQ_LEGACY_F32 : VOP1InstSI <vop1<0x2d>, "v_rsq_legacy_f32",
VOP_F32_F32, AMDGPUrsq_legacy
@@ -1364,7 +1416,7 @@ let SchedRW = [WriteDouble] in {
defm V_RCP_CLAMP_F64 : VOP1InstSI <vop1<0x30>, "v_rcp_clamp_f64", VOP_F64_F64>;
defm V_RSQ_CLAMP_F64 : VOP1InstSI <vop1<0x32>, "v_rsq_clamp_f64",
- VOP_F64_F64, AMDGPUrsq_clamped
+ VOP_F64_F64, AMDGPUrsq_clamp
>;
} // End SchedRW = [WriteDouble]
@@ -1394,11 +1446,11 @@ defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
} // End OtherPredicates = [has32BankLDS]
-let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $dst" in {
+let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $dst", isAsmParserOnly=1 in {
defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
-} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $dst"
+} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $dst", isAsmParserOnly=1
let DisableEncoding = "$src0", Constraints = "$src0 = $dst" in {
@@ -1426,15 +1478,9 @@ defm V_INTERP_MOV_F32 : VINTRP_m <
// VOP2 Instructions
//===----------------------------------------------------------------------===//
-multiclass V_CNDMASK <vop2 op, string name> {
- defm _e32 : VOP2_m <op, name, VOP_CNDMASK, [], name>;
-
- defm _e64 : VOP3_m <
- op, VOP_CNDMASK.Outs, VOP_CNDMASK.Ins64,
- name#!cast<string>(VOP_CNDMASK.Asm64), [], name, 3>;
-}
-
-defm V_CNDMASK_B32 : V_CNDMASK<vop2<0x0>, "v_cndmask_b32">;
+defm V_CNDMASK_B32 : VOP2eInst <vop2<0x0, 0x0>, "v_cndmask_b32",
+ VOP2e_I32_I32_I32_I1
+>;
let isCommutable = 1 in {
defm V_ADD_F32 : VOP2Inst <vop2<0x3, 0x1>, "v_add_f32",
@@ -1450,7 +1496,7 @@ defm V_SUBREV_F32 : VOP2Inst <vop2<0x5, 0x3>, "v_subrev_f32",
let isCommutable = 1 in {
defm V_MUL_LEGACY_F32 : VOP2Inst <vop2<0x7, 0x4>, "v_mul_legacy_f32",
- VOP_F32_F32_F32, int_AMDGPU_mul
+ VOP_F32_F32_F32
>;
defm V_MUL_F32 : VOP2Inst <vop2<0x8, 0x5>, "v_mul_f32",
@@ -1501,16 +1547,16 @@ defm V_AND_B32 : VOP2Inst <vop2<0x1b, 0x13>, "v_and_b32", VOP_I32_I32_I32>;
defm V_OR_B32 : VOP2Inst <vop2<0x1c, 0x14>, "v_or_b32", VOP_I32_I32_I32>;
defm V_XOR_B32 : VOP2Inst <vop2<0x1d, 0x15>, "v_xor_b32", VOP_I32_I32_I32>;
-let Constraints = "$dst = $src2", DisableEncoding="$src2",
+let Constraints = "$vdst = $src2", DisableEncoding="$src2",
isConvertibleToThreeAddress = 1 in {
defm V_MAC_F32 : VOP2Inst <vop2<0x1f, 0x16>, "v_mac_f32", VOP_MAC>;
}
} // End isCommutable = 1
-defm V_MADMK_F32 : VOP2MADK <vop2<0x20, 0x17>, "v_madmk_f32">;
+defm V_MADMK_F32 : VOP2MADK <vop2<0x20, 0x17>, "v_madmk_f32", VOP_MADMK>;
let isCommutable = 1 in {
-defm V_MADAK_F32 : VOP2MADK <vop2<0x21, 0x18>, "v_madak_f32">;
+defm V_MADAK_F32 : VOP2MADK <vop2<0x21, 0x18>, "v_madak_f32", VOP_MADAK>;
} // End isCommutable = 1
let isCommutable = 1 in {
@@ -1540,11 +1586,14 @@ defm V_SUBBREV_U32 : VOP2bInst <vop2<0x2a, 0x1e>, "v_subbrev_u32",
} // End isCommutable = 1
+// These are special and do not read the exec mask.
+let isConvergent = 1, Uses = []<Register> in {
+
defm V_READLANE_B32 : VOP2SI_3VI_m <
vop3 <0x001, 0x289>,
"v_readlane_b32",
(outs SReg_32:$vdst),
- (ins VGPR_32:$src0, SCSrc_32:$src1),
+ (ins VS_32:$src0, SCSrc_32:$src1),
"v_readlane_b32 $vdst, $src0, $src1"
>;
@@ -1556,6 +1605,8 @@ defm V_WRITELANE_B32 : VOP2SI_3VI_m <
"v_writelane_b32 $vdst, $src0, $src1"
>;
+} // End isConvergent = 1
+
// These instructions only exist on SI and CI
let SubtargetPredicate = isSICI in {
@@ -1636,16 +1687,16 @@ defm V_MAD_U32_U24 : VOP3Inst <vop3<0x143, 0x1c3>, "v_mad_u32_u24",
} // End isCommutable = 1
defm V_CUBEID_F32 : VOP3Inst <vop3<0x144, 0x1c4>, "v_cubeid_f32",
- VOP_F32_F32_F32_F32
+ VOP_F32_F32_F32_F32, int_amdgcn_cubeid
>;
defm V_CUBESC_F32 : VOP3Inst <vop3<0x145, 0x1c5>, "v_cubesc_f32",
- VOP_F32_F32_F32_F32
+ VOP_F32_F32_F32_F32, int_amdgcn_cubesc
>;
defm V_CUBETC_F32 : VOP3Inst <vop3<0x146, 0x1c6>, "v_cubetc_f32",
- VOP_F32_F32_F32_F32
+ VOP_F32_F32_F32_F32, int_amdgcn_cubetc
>;
defm V_CUBEMA_F32 : VOP3Inst <vop3<0x147, 0x1c7>, "v_cubema_f32",
- VOP_F32_F32_F32_F32
+ VOP_F32_F32_F32_F32, int_amdgcn_cubema
>;
defm V_BFE_U32 : VOP3Inst <vop3<0x148, 0x1c8>, "v_bfe_u32",
@@ -1666,6 +1717,10 @@ defm V_FMA_F32 : VOP3Inst <vop3<0x14b, 0x1cb>, "v_fma_f32",
defm V_FMA_F64 : VOP3Inst <vop3<0x14c, 0x1cc>, "v_fma_f64",
VOP_F64_F64_F64_F64, fma
>;
+
+defm V_LERP_U8 : VOP3Inst <vop3<0x14d, 0x1cd>, "v_lerp_u8",
+ VOP_I32_I32_I32_I32, int_amdgcn_lerp
+>;
} // End isCommutable = 1
//def V_LERP_U8 : VOP3_U8 <0x0000014d, "v_lerp_u8", []>;
@@ -1695,13 +1750,13 @@ defm V_MAX3_U32 : VOP3Inst <vop3<0x156, 0x1d5>, "v_max3_u32",
VOP_I32_I32_I32_I32, AMDGPUumax3
>;
defm V_MED3_F32 : VOP3Inst <vop3<0x157, 0x1d6>, "v_med3_f32",
- VOP_F32_F32_F32_F32
+ VOP_F32_F32_F32_F32, AMDGPUfmed3
>;
defm V_MED3_I32 : VOP3Inst <vop3<0x158, 0x1d7>, "v_med3_i32",
- VOP_I32_I32_I32_I32
+ VOP_I32_I32_I32_I32, AMDGPUsmed3
>;
defm V_MED3_U32 : VOP3Inst <vop3<0x159, 0x1d8>, "v_med3_u32",
- VOP_I32_I32_I32_I32
+ VOP_I32_I32_I32_I32, AMDGPUumed3
>;
//def V_SAD_U8 : VOP3_U8 <0x0000015a, "v_sad_u8", []>;
@@ -1710,7 +1765,7 @@ defm V_MED3_U32 : VOP3Inst <vop3<0x159, 0x1d8>, "v_med3_u32",
defm V_SAD_U32 : VOP3Inst <vop3<0x15d, 0x1dc>, "v_sad_u32",
VOP_I32_I32_I32_I32
>;
-////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "v_cvt_pk_u8_f32", []>;
+//def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "v_cvt_pk_u8_f32", []>;
defm V_DIV_FIXUP_F32 : VOP3Inst <
vop3<0x15f, 0x1de>, "v_div_fixup_f32", VOP_F32_F32_F32_F32, AMDGPUdiv_fixup
>;
@@ -1727,26 +1782,26 @@ let SchedRW = [WriteDoubleAdd] in {
let isCommutable = 1 in {
defm V_ADD_F64 : VOP3Inst <vop3<0x164, 0x280>, "v_add_f64",
- VOP_F64_F64_F64, fadd
+ VOP_F64_F64_F64, fadd, 1
>;
defm V_MUL_F64 : VOP3Inst <vop3<0x165, 0x281>, "v_mul_f64",
- VOP_F64_F64_F64, fmul
+ VOP_F64_F64_F64, fmul, 1
>;
defm V_MIN_F64 : VOP3Inst <vop3<0x166, 0x282>, "v_min_f64",
- VOP_F64_F64_F64, fminnum
+ VOP_F64_F64_F64, fminnum, 1
>;
defm V_MAX_F64 : VOP3Inst <vop3<0x167, 0x283>, "v_max_f64",
- VOP_F64_F64_F64, fmaxnum
+ VOP_F64_F64_F64, fmaxnum, 1
>;
-} // isCommutable = 1
+} // End isCommutable = 1
defm V_LDEXP_F64 : VOP3Inst <vop3<0x168, 0x284>, "v_ldexp_f64",
- VOP_F64_F64_I32, AMDGPUldexp
+ VOP_F64_F64_I32, AMDGPUldexp, 1
>;
-} // let SchedRW = [WriteDoubleAdd]
+} // End let SchedRW = [WriteDoubleAdd]
let isCommutable = 1, SchedRW = [WriteQuarterRate32] in {
@@ -1754,30 +1809,33 @@ defm V_MUL_LO_U32 : VOP3Inst <vop3<0x169, 0x285>, "v_mul_lo_u32",
VOP_I32_I32_I32
>;
defm V_MUL_HI_U32 : VOP3Inst <vop3<0x16a, 0x286>, "v_mul_hi_u32",
- VOP_I32_I32_I32
+ VOP_I32_I32_I32, mulhu
>;
+let DisableVIDecoder=1 in { // removed from VI as identical to V_MUL_LO_U32
defm V_MUL_LO_I32 : VOP3Inst <vop3<0x16b, 0x285>, "v_mul_lo_i32",
VOP_I32_I32_I32
>;
+}
+
defm V_MUL_HI_I32 : VOP3Inst <vop3<0x16c, 0x287>, "v_mul_hi_i32",
- VOP_I32_I32_I32
+ VOP_I32_I32_I32, mulhs
>;
-} // isCommutable = 1, SchedRW = [WriteQuarterRate32]
+} // End isCommutable = 1, SchedRW = [WriteQuarterRate32]
let SchedRW = [WriteFloatFMA, WriteSALU] in {
defm V_DIV_SCALE_F32 : VOP3bInst <vop3<0x16d, 0x1e0>, "v_div_scale_f32",
- VOP3b_F32_I1_F32_F32_F32
+ VOP3b_F32_I1_F32_F32_F32, [], 1
>;
}
let SchedRW = [WriteDouble, WriteSALU] in {
// Double precision division pre-scale.
defm V_DIV_SCALE_F64 : VOP3bInst <vop3<0x16e, 0x1e1>, "v_div_scale_f64",
- VOP3b_F64_I1_F64_F64_F64
+ VOP3b_F64_I1_F64_F64_F64, [], 1
>;
-} // let SchedRW = [WriteDouble]
+} // End SchedRW = [WriteDouble]
let isCommutable = 1, Uses = [VCC, EXEC] in {
@@ -1814,7 +1872,7 @@ defm V_TRIG_PREOP_F64 : VOP3Inst <
vop3<0x174, 0x292>, "v_trig_preop_f64", VOP_F64_F64_I32, AMDGPUtrig_preop
>;
-} // let SchedRW = [WriteDouble]
+} // End SchedRW = [WriteDouble]
// These instructions only exist on SI and CI
let SubtargetPredicate = isSICI in {
@@ -1828,7 +1886,7 @@ defm V_MULLIT_F32 : VOP3Inst <vop3<0x150>, "v_mullit_f32",
} // End SubtargetPredicate = isSICI
-let SubtargetPredicate = isVI in {
+let SubtargetPredicate = isVI, DisableSIDecoder = 1 in {
defm V_LSHLREV_B64 : VOP3Inst <vop3<0, 0x28f>, "v_lshlrev_b64",
VOP_I64_I32_I64
@@ -1845,113 +1903,145 @@ defm V_ASHRREV_I64 : VOP3Inst <vop3<0, 0x291>, "v_ashrrev_i64",
//===----------------------------------------------------------------------===//
// Pseudo Instructions
//===----------------------------------------------------------------------===//
-let isCodeGenOnly = 1, isPseudo = 1 in {
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
// For use in patterns
-def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$dst),
- (ins VSrc_64:$src0, VSrc_64:$src1, SSrc_64:$src2), "", []
->;
+def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
+ (ins VSrc_64:$src0, VSrc_64:$src1, SSrc_64:$src2), "", []> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
-let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
// pass to enable folding of inline immediates.
-def V_MOV_B64_PSEUDO : InstSI <(outs VReg_64:$dst), (ins VSrc_64:$src0), "", []>;
-} // end let hasSideEffects = 0, mayLoad = 0, mayStore = 0
-
-let hasSideEffects = 1, SALU = 1 in {
-def SGPR_USE : InstSI <(outs),(ins), "", []>;
+def V_MOV_B64_PSEUDO : PseudoInstSI <(outs VReg_64:$vdst), (ins VSrc_64:$src0)> {
+ let VALU = 1;
}
+} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
+
+let usesCustomInserter = 1, SALU = 1 in {
+def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins),
+ [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
+} // End let usesCustomInserter = 1, SALU = 1
// SI pseudo instructions. These are used by the CFG structurizer pass
// and should be lowered to ISA instructions prior to codegen.
-let mayLoad = 1, mayStore = 1, hasSideEffects = 1 in {
-let Uses = [EXEC], Defs = [EXEC] in {
+let hasSideEffects = 1 in {
+
+// Dummy terminator instruction to use after control flow instructions
+// replaced with exec mask operations.
+def SI_MASK_BRANCH : PseudoInstSI <
+ (outs), (ins brtarget:$target, SReg_64:$dst)> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let isBarrier = 1;
+ let SALU = 1;
+}
+
+let Uses = [EXEC], Defs = [EXEC, SCC] in {
let isBranch = 1, isTerminator = 1 in {
-def SI_IF: InstSI <
- (outs SReg_64:$dst),
- (ins SReg_64:$vcc, brtarget:$target),
- "",
- [(set i64:$dst, (int_SI_if i1:$vcc, bb:$target))]
->;
+def SI_IF: PseudoInstSI <
+ (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
+ [(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))]> {
+ let Constraints = "";
+}
-def SI_ELSE : InstSI <
- (outs SReg_64:$dst),
- (ins SReg_64:$src, brtarget:$target),
- "",
- [(set i64:$dst, (int_SI_else i64:$src, bb:$target))]
-> {
+def SI_ELSE : PseudoInstSI <
+ (outs SReg_64:$dst), (ins SReg_64:$src, brtarget:$target),
+ [(set i64:$dst, (int_amdgcn_else i64:$src, bb:$target))]> {
let Constraints = "$src = $dst";
}
-def SI_LOOP : InstSI <
- (outs),
- (ins SReg_64:$saved, brtarget:$target),
- "si_loop $saved, $target",
- [(int_SI_loop i64:$saved, bb:$target)]
+def SI_LOOP : PseudoInstSI <
+ (outs), (ins SReg_64:$saved, brtarget:$target),
+ [(int_amdgcn_loop i64:$saved, bb:$target)]
>;
-} // end isBranch = 1, isTerminator = 1
+} // End isBranch = 1, isTerminator = 1
-def SI_BREAK : InstSI <
- (outs SReg_64:$dst),
- (ins SReg_64:$src),
- "si_else $dst, $src",
- [(set i64:$dst, (int_SI_break i64:$src))]
+
+def SI_BREAK : PseudoInstSI <
+ (outs SReg_64:$dst), (ins SReg_64:$src),
+ [(set i64:$dst, (int_amdgcn_break i64:$src))]
>;
-def SI_IF_BREAK : InstSI <
- (outs SReg_64:$dst),
- (ins SReg_64:$vcc, SReg_64:$src),
- "si_if_break $dst, $vcc, $src",
- [(set i64:$dst, (int_SI_if_break i1:$vcc, i64:$src))]
+def SI_IF_BREAK : PseudoInstSI <
+ (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
+ [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]
>;
-def SI_ELSE_BREAK : InstSI <
- (outs SReg_64:$dst),
- (ins SReg_64:$src0, SReg_64:$src1),
- "si_else_break $dst, $src0, $src1",
- [(set i64:$dst, (int_SI_else_break i64:$src0, i64:$src1))]
+def SI_ELSE_BREAK : PseudoInstSI <
+ (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1),
+ [(set i64:$dst, (int_amdgcn_else_break i64:$src0, i64:$src1))]
>;
-def SI_END_CF : InstSI <
- (outs),
- (ins SReg_64:$saved),
- "si_end_cf $saved",
- [(int_SI_end_cf i64:$saved)]
+def SI_END_CF : PseudoInstSI <
+ (outs), (ins SReg_64:$saved),
+ [(int_amdgcn_end_cf i64:$saved)]
>;
-} // End Uses = [EXEC], Defs = [EXEC]
+} // End Uses = [EXEC], Defs = [EXEC, SCC]
let Uses = [EXEC], Defs = [EXEC,VCC] in {
-def SI_KILL : InstSI <
- (outs),
- (ins VSrc_32:$src),
- "si_kill $src",
- [(int_AMDGPU_kill f32:$src)]
->;
+def SI_KILL : PseudoInstSI <
+ (outs), (ins VSrc_32:$src),
+ [(int_AMDGPU_kill f32:$src)]> {
+ let isConvergent = 1;
+ let usesCustomInserter = 1;
+}
+
+def SI_KILL_TERMINATOR : PseudoInstSI <
+ (outs), (ins VSrc_32:$src)> {
+ let isTerminator = 1;
+}
+
} // End Uses = [EXEC], Defs = [EXEC,VCC]
-} // end mayLoad = 1, mayStore = 1, hasSideEffects = 1
+} // End mayLoad = 1, mayStore = 1, hasSideEffects = 1
-let Uses = [EXEC], Defs = [EXEC,VCC,M0] in {
+def SI_PS_LIVE : PseudoInstSI <
+ (outs SReg_64:$dst), (ins),
+ [(set i1:$dst, (int_amdgcn_ps_live))]> {
+ let SALU = 1;
+}
-class SI_INDIRECT_SRC<RegisterClass rc> : InstSI <
- (outs VGPR_32:$dst, SReg_64:$temp),
- (ins rc:$src, VSrc_32:$idx, i32imm:$off),
- "si_indirect_src $dst, $temp, $src, $idx, $off",
- []
->;
+// Used as an isel pseudo to directly emit initialization with an
+// s_mov_b32 rather than a copy of another initialized
+// register. MachineCSE skips copies, and we don't want to have to
+// fold operands before it runs.
+def SI_INIT_M0 : PseudoInstSI <(outs), (ins SSrc_32:$src)> {
+ let Defs = [M0];
+ let usesCustomInserter = 1;
+ let isAsCheapAsAMove = 1;
+ let SALU = 1;
+ let isReMaterializable = 1;
+}
-class SI_INDIRECT_DST<RegisterClass rc> : InstSI <
- (outs rc:$dst, SReg_64:$temp),
- (ins unknown:$src, VSrc_32:$idx, i32imm:$off, VGPR_32:$val),
- "si_indirect_dst $dst, $temp, $src, $idx, $off, $val",
- []
-> {
- let Constraints = "$src = $dst";
+def SI_RETURN : PseudoInstSI <
+ (outs), (ins variable_ops), [(AMDGPUreturn)]> {
+ let isTerminator = 1;
+ let isBarrier = 1;
+ let isReturn = 1;
+ let hasSideEffects = 1;
+ let SALU = 1;
+ let hasNoSchedulingInfo = 1;
+}
+
+let Uses = [EXEC], Defs = [EXEC, VCC, M0],
+ UseNamedOperandTable = 1 in {
+
+class SI_INDIRECT_SRC<RegisterClass rc> : PseudoInstSI <
+ (outs VGPR_32:$vdst, SReg_64:$sdst),
+ (ins rc:$src, VS_32:$idx, i32imm:$offset)>;
+
+class SI_INDIRECT_DST<RegisterClass rc> : PseudoInstSI <
+ (outs rc:$vdst, SReg_64:$sdst),
+ (ins unknown:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> {
+ let Constraints = "$src = $vdst";
}
// TODO: We can support indirect SGPR access.
@@ -1967,25 +2057,20 @@ def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
-} // Uses = [EXEC,VCC,M0], Defs = [EXEC,VCC,M0]
+} // End Uses = [EXEC], Defs = [EXEC,VCC,M0]
multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
-
let UseNamedOperandTable = 1, Uses = [EXEC] in {
- def _SAVE : InstSI <
+ def _SAVE : PseudoInstSI <
(outs),
- (ins sgpr_class:$src, i32imm:$frame_idx),
- "", []
- > {
+ (ins sgpr_class:$src, i32imm:$frame_idx)> {
let mayStore = 1;
let mayLoad = 0;
}
- def _RESTORE : InstSI <
+ def _RESTORE : PseudoInstSI <
(outs sgpr_class:$dst),
- (ins i32imm:$frame_idx),
- "", []
- > {
+ (ins i32imm:$frame_idx)> {
let mayStore = 0;
let mayLoad = 1;
}
@@ -1993,9 +2078,9 @@ multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
}
// It's unclear whether you can use M0 as the output of v_readlane_b32
-// instructions, so use SGPR_32 register class for spills to prevent
+// instructions, so use SReg_32_XM0 register class for spills to prevent
// this from happening.
-defm SI_SPILL_S32 : SI_SPILL_SGPR <SGPR_32>;
+defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32_XM0>;
defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
@@ -2003,21 +2088,18 @@ defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
let UseNamedOperandTable = 1, VGPRSpill = 1, Uses = [EXEC] in {
- def _SAVE : InstSI <
+ def _SAVE : PseudoInstSI <
(outs),
(ins vgpr_class:$src, i32imm:$frame_idx, SReg_128:$scratch_rsrc,
- SReg_32:$scratch_offset),
- "", []
- > {
+ SReg_32:$scratch_offset, i32imm:$offset)> {
let mayStore = 1;
let mayLoad = 0;
}
- def _RESTORE : InstSI <
+ def _RESTORE : PseudoInstSI <
(outs vgpr_class:$dst),
- (ins i32imm:$frame_idx, SReg_128:$scratch_rsrc, SReg_32:$scratch_offset),
- "", []
- > {
+ (ins i32imm:$frame_idx, SReg_128:$scratch_rsrc, SReg_32:$scratch_offset,
+ i32imm:$offset)> {
let mayStore = 0;
let mayLoad = 1;
}
@@ -2033,29 +2115,19 @@ defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
let Defs = [SCC] in {
-def SI_CONSTDATA_PTR : InstSI <
+def SI_PC_ADD_REL_OFFSET : PseudoInstSI <
(outs SReg_64:$dst),
- (ins const_ga:$ptr),
- "", [(set SReg_64:$dst, (i64 (SIconstdata_ptr (tglobaladdr:$ptr))))]
-> {
+ (ins si_ga:$ptr),
+ [(set SReg_64:$dst, (i64 (SIpc_add_rel_offset (tglobaladdr:$ptr))))]> {
let SALU = 1;
}
} // End Defs = [SCC]
-} // end IsCodeGenOnly, isPseudo
-
-} // end SubtargetPredicate = isGCN
+} // End SubtargetPredicate = isGCN
let Predicates = [isGCN] in {
-def : Pat<
- (int_AMDGPU_cndlt f32:$src0, f32:$src1, f32:$src2),
- (V_CNDMASK_B32_e64 $src2, $src1,
- (V_CMP_GT_F32_e64 SRCMODS.NONE, 0, SRCMODS.NONE, $src0,
- DSTCLAMP.NONE, DSTOMOD.NONE))
->;
-
def : Pat <
(int_AMDGPU_kilp),
(SI_KILL 0xbf800000)
@@ -2067,7 +2139,6 @@ def : Pat<
(BUFFER_LOAD_FORMAT_XYZW_IDXEN $buf_idx_vgpr, $tlst, 0, imm:$attr_offset, 0, 0, 0)
>;
-/* int_SI_export */
def : Pat <
(int_SI_export imm:$en, imm:$vm, imm:$done, imm:$tgt, imm:$compr,
f32:$src0, f32:$src1, f32:$src2, f32:$src3),
@@ -2076,6 +2147,217 @@ def : Pat <
>;
//===----------------------------------------------------------------------===//
+// buffer_load/store_format patterns
+//===----------------------------------------------------------------------===//
+
+multiclass MUBUF_LoadIntrinsicPat<SDPatternOperator name, ValueType vt,
+ string opcode> {
+ def : Pat<
+ (vt (name v4i32:$rsrc, 0,
+ (MUBUFIntrinsicOffset i32:$soffset, i16:$offset),
+ imm:$glc, imm:$slc)),
+ (!cast<MUBUF>(opcode # _OFFSET) $rsrc, $soffset, (as_i16imm $offset),
+ (as_i1imm $glc), (as_i1imm $slc), 0)
+ >;
+
+ def : Pat<
+ (vt (name v4i32:$rsrc, i32:$vindex,
+ (MUBUFIntrinsicOffset i32:$soffset, i16:$offset),
+ imm:$glc, imm:$slc)),
+ (!cast<MUBUF>(opcode # _IDXEN) $vindex, $rsrc, $soffset, (as_i16imm $offset),
+ (as_i1imm $glc), (as_i1imm $slc), 0)
+ >;
+
+ def : Pat<
+ (vt (name v4i32:$rsrc, 0,
+ (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset),
+ imm:$glc, imm:$slc)),
+ (!cast<MUBUF>(opcode # _OFFEN) $voffset, $rsrc, $soffset, (as_i16imm $offset),
+ (as_i1imm $glc), (as_i1imm $slc), 0)
+ >;
+
+ def : Pat<
+ (vt (name v4i32:$rsrc, i32:$vindex,
+ (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset),
+ imm:$glc, imm:$slc)),
+ (!cast<MUBUF>(opcode # _BOTHEN)
+ (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
+ $rsrc, $soffset, (as_i16imm $offset),
+ (as_i1imm $glc), (as_i1imm $slc), 0)
+ >;
+}
+
+defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load_format, f32, "BUFFER_LOAD_FORMAT_X">;
+defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load_format, v2f32, "BUFFER_LOAD_FORMAT_XY">;
+defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load_format, v4f32, "BUFFER_LOAD_FORMAT_XYZW">;
+defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load, f32, "BUFFER_LOAD_DWORD">;
+defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load, v2f32, "BUFFER_LOAD_DWORDX2">;
+defm : MUBUF_LoadIntrinsicPat<int_amdgcn_buffer_load, v4f32, "BUFFER_LOAD_DWORDX4">;
+
+multiclass MUBUF_StoreIntrinsicPat<SDPatternOperator name, ValueType vt,
+ string opcode> {
+ def : Pat<
+ (name vt:$vdata, v4i32:$rsrc, 0,
+ (MUBUFIntrinsicOffset i32:$soffset, i16:$offset),
+ imm:$glc, imm:$slc),
+ (!cast<MUBUF>(opcode # _OFFSET) $vdata, $rsrc, $soffset, (as_i16imm $offset),
+ (as_i1imm $glc), (as_i1imm $slc), 0)
+ >;
+
+ def : Pat<
+ (name vt:$vdata, v4i32:$rsrc, i32:$vindex,
+ (MUBUFIntrinsicOffset i32:$soffset, i16:$offset),
+ imm:$glc, imm:$slc),
+ (!cast<MUBUF>(opcode # _IDXEN) $vdata, $vindex, $rsrc, $soffset,
+ (as_i16imm $offset), (as_i1imm $glc),
+ (as_i1imm $slc), 0)
+ >;
+
+ def : Pat<
+ (name vt:$vdata, v4i32:$rsrc, 0,
+ (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset),
+ imm:$glc, imm:$slc),
+ (!cast<MUBUF>(opcode # _OFFEN) $vdata, $voffset, $rsrc, $soffset,
+ (as_i16imm $offset), (as_i1imm $glc),
+ (as_i1imm $slc), 0)
+ >;
+
+ def : Pat<
+ (name vt:$vdata, v4i32:$rsrc, i32:$vindex,
+ (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset),
+ imm:$glc, imm:$slc),
+ (!cast<MUBUF>(opcode # _BOTHEN)
+ $vdata,
+ (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
+ $rsrc, $soffset, (as_i16imm $offset),
+ (as_i1imm $glc), (as_i1imm $slc), 0)
+ >;
+}
+
+defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store_format, f32, "BUFFER_STORE_FORMAT_X">;
+defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store_format, v2f32, "BUFFER_STORE_FORMAT_XY">;
+defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store_format, v4f32, "BUFFER_STORE_FORMAT_XYZW">;
+defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store, f32, "BUFFER_STORE_DWORD">;
+defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store, v2f32, "BUFFER_STORE_DWORDX2">;
+defm : MUBUF_StoreIntrinsicPat<int_amdgcn_buffer_store, v4f32, "BUFFER_STORE_DWORDX4">;
+
+//===----------------------------------------------------------------------===//
+// buffer_atomic patterns
+//===----------------------------------------------------------------------===//
+multiclass BufferAtomicPatterns<SDPatternOperator name, string opcode> {
+ def : Pat<
+ (name i32:$vdata_in, v4i32:$rsrc, 0,
+ (MUBUFIntrinsicOffset i32:$soffset, i16:$offset),
+ imm:$slc),
+ (!cast<MUBUF>(opcode # _RTN_OFFSET) $vdata_in, $rsrc, $soffset,
+ (as_i16imm $offset), (as_i1imm $slc))
+ >;
+
+ def : Pat<
+ (name i32:$vdata_in, v4i32:$rsrc, i32:$vindex,
+ (MUBUFIntrinsicOffset i32:$soffset, i16:$offset),
+ imm:$slc),
+ (!cast<MUBUF>(opcode # _RTN_IDXEN) $vdata_in, $vindex, $rsrc, $soffset,
+ (as_i16imm $offset), (as_i1imm $slc))
+ >;
+
+ def : Pat<
+ (name i32:$vdata_in, v4i32:$rsrc, 0,
+ (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset),
+ imm:$slc),
+ (!cast<MUBUF>(opcode # _RTN_OFFEN) $vdata_in, $voffset, $rsrc, $soffset,
+ (as_i16imm $offset), (as_i1imm $slc))
+ >;
+
+ def : Pat<
+ (name i32:$vdata_in, v4i32:$rsrc, i32:$vindex,
+ (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset),
+ imm:$slc),
+ (!cast<MUBUF>(opcode # _RTN_BOTHEN)
+ $vdata_in,
+ (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
+ $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc))
+ >;
+}
+
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_swap, "BUFFER_ATOMIC_SWAP">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_add, "BUFFER_ATOMIC_ADD">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_sub, "BUFFER_ATOMIC_SUB">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_smin, "BUFFER_ATOMIC_SMIN">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_umin, "BUFFER_ATOMIC_UMIN">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_smax, "BUFFER_ATOMIC_SMAX">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_umax, "BUFFER_ATOMIC_UMAX">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_and, "BUFFER_ATOMIC_AND">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_or, "BUFFER_ATOMIC_OR">;
+defm : BufferAtomicPatterns<int_amdgcn_buffer_atomic_xor, "BUFFER_ATOMIC_XOR">;
+
+def : Pat<
+ (int_amdgcn_buffer_atomic_cmpswap
+ i32:$data, i32:$cmp, v4i32:$rsrc, 0,
+ (MUBUFIntrinsicOffset i32:$soffset, i16:$offset),
+ imm:$slc),
+ (EXTRACT_SUBREG
+ (BUFFER_ATOMIC_CMPSWAP_RTN_OFFSET
+ (REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1),
+ $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)),
+ sub0)
+>;
+
+def : Pat<
+ (int_amdgcn_buffer_atomic_cmpswap
+ i32:$data, i32:$cmp, v4i32:$rsrc, i32:$vindex,
+ (MUBUFIntrinsicOffset i32:$soffset, i16:$offset),
+ imm:$slc),
+ (EXTRACT_SUBREG
+ (BUFFER_ATOMIC_CMPSWAP_RTN_IDXEN
+ (REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1),
+ $vindex, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)),
+ sub0)
+>;
+
+def : Pat<
+ (int_amdgcn_buffer_atomic_cmpswap
+ i32:$data, i32:$cmp, v4i32:$rsrc, 0,
+ (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset),
+ imm:$slc),
+ (EXTRACT_SUBREG
+ (BUFFER_ATOMIC_CMPSWAP_RTN_OFFEN
+ (REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1),
+ $voffset, $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)),
+ sub0)
+>;
+
+def : Pat<
+ (int_amdgcn_buffer_atomic_cmpswap
+ i32:$data, i32:$cmp, v4i32:$rsrc, i32:$vindex,
+ (MUBUFIntrinsicVOffset i32:$soffset, i16:$offset, i32:$voffset),
+ imm:$slc),
+ (EXTRACT_SUBREG
+ (BUFFER_ATOMIC_CMPSWAP_RTN_BOTHEN
+ (REG_SEQUENCE VReg_64, $data, sub0, $cmp, sub1),
+ (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1),
+ $rsrc, $soffset, (as_i16imm $offset), (as_i1imm $slc)),
+ sub0)
+>;
+
+
+//===----------------------------------------------------------------------===//
+// S_GETREG_B32 Intrinsic Pattern.
+//===----------------------------------------------------------------------===//
+def : Pat <
+ (int_amdgcn_s_getreg imm:$simm16),
+ (S_GETREG_B32 (as_i16imm $simm16))
+>;
+
+//===----------------------------------------------------------------------===//
+// DS_SWIZZLE Intrinsic Pattern.
+//===----------------------------------------------------------------------===//
+def : Pat <
+ (int_amdgcn_ds_swizzle i32:$src, imm:$offset16),
+ (DS_SWIZZLE_B32 $src, (as_i16imm $offset16), (i1 0))
+>;
+
+//===----------------------------------------------------------------------===//
// SMRD Patterns
//===----------------------------------------------------------------------===//
@@ -2109,7 +2391,6 @@ let AddedComplexity = 100 in {
defm : SMRD_Pattern <"S_LOAD_DWORD", i32>;
defm : SMRD_Pattern <"S_LOAD_DWORDX2", v2i32>;
defm : SMRD_Pattern <"S_LOAD_DWORDX4", v4i32>;
-defm : SMRD_Pattern <"S_LOAD_DWORDX8", v32i8>;
defm : SMRD_Pattern <"S_LOAD_DWORDX8", v8i32>;
defm : SMRD_Pattern <"S_LOAD_DWORDX16", v16i32>;
@@ -2143,7 +2424,7 @@ def : Pat <
def : Pat <
(i64 (ctpop i64:$src)),
(i64 (REG_SEQUENCE SReg_64,
- (S_BCNT1_I32_B64 $src), sub0,
+ (i32 (COPY_TO_REGCLASS (S_BCNT1_I32_B64 $src), SReg_32)), sub0,
(S_MOV_B32 0), sub1))
>;
@@ -2168,8 +2449,8 @@ def : Pat <
//===----------------------------------------------------------------------===//
def : Pat <
- (int_AMDGPU_barrier_global),
- (S_BARRIER)
+ (int_amdgcn_s_waitcnt i32:$simm16),
+ (S_WAITCNT (as_i16imm $simm16))
>;
//===----------------------------------------------------------------------===//
@@ -2184,7 +2465,22 @@ let Predicates = [UnsafeFPMath] in {
def : RsqPat<V_RSQ_F32_e32, f32>;
def : RsqPat<V_RSQ_F64_e32, f64>;
-}
+
+// Convert (x - floor(x)) to fract(x)
+def : Pat <
+ (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
+ (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
+ (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
+>;
+
+// Convert (x + (-floor(x))) to fract(x)
+def : Pat <
+ (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
+ (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
+ (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
+>;
+
+} // End Predicates = [UnsafeFPMath]
//===----------------------------------------------------------------------===//
// VOP2 Patterns
@@ -2217,9 +2513,9 @@ def : Pat <
class SampleRawPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
(name vt:$addr, v8i32:$rsrc, v4i32:$sampler, i32:$dmask, i32:$unorm,
i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe),
- (opcode (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $da),
- (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $slc),
- $addr, $rsrc, $sampler)
+ (opcode $addr, $rsrc, $sampler,
+ (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $slc),
+ (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $da))
>;
multiclass SampleRawPatterns<SDPatternOperator name, string opcode> {
@@ -2232,11 +2528,11 @@ multiclass SampleRawPatterns<SDPatternOperator name, string opcode> {
// Image only
class ImagePattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v8i32:$rsrc, i32:$dmask, i32:$unorm,
- i32:$r128, i32:$da, i32:$glc, i32:$slc, i32:$tfe, i32:$lwe),
- (opcode (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $da),
- (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $slc),
- $addr, $rsrc)
+ (name vt:$addr, v8i32:$rsrc, imm:$dmask, imm:$unorm,
+ imm:$r128, imm:$da, imm:$glc, imm:$slc, imm:$tfe, imm:$lwe),
+ (opcode $addr, $rsrc,
+ (as_i32imm $dmask), (as_i1imm $unorm), (as_i1imm $glc), (as_i1imm $slc),
+ (as_i1imm $r128), (as_i1imm $tfe), (as_i1imm $lwe), (as_i1imm $da))
>;
multiclass ImagePatterns<SDPatternOperator name, string opcode> {
@@ -2245,6 +2541,54 @@ multiclass ImagePatterns<SDPatternOperator name, string opcode> {
def : ImagePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
}
+class ImageLoadPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
+ (name vt:$addr, v8i32:$rsrc, imm:$dmask, imm:$r128, imm:$da, imm:$glc,
+ imm:$slc),
+ (opcode $addr, $rsrc,
+ (as_i32imm $dmask), 1, (as_i1imm $glc), (as_i1imm $slc),
+ (as_i1imm $r128), 0, 0, (as_i1imm $da))
+>;
+
+multiclass ImageLoadPatterns<SDPatternOperator name, string opcode> {
+ def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V1), i32>;
+ def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>;
+ def : ImageLoadPattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
+}
+
+class ImageStorePattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
+ (name v4f32:$data, vt:$addr, v8i32:$rsrc, i32:$dmask, imm:$r128, imm:$da,
+ imm:$glc, imm:$slc),
+ (opcode $data, $addr, $rsrc,
+ (as_i32imm $dmask), 1, (as_i1imm $glc), (as_i1imm $slc),
+ (as_i1imm $r128), 0, 0, (as_i1imm $da))
+>;
+
+multiclass ImageStorePatterns<SDPatternOperator name, string opcode> {
+ def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V1), i32>;
+ def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V2), v2i32>;
+ def : ImageStorePattern<name, !cast<MIMG>(opcode # _V4_V4), v4i32>;
+}
+
+class ImageAtomicPattern<SDPatternOperator name, MIMG opcode, ValueType vt> : Pat <
+ (name i32:$vdata, vt:$addr, v8i32:$rsrc, imm:$r128, imm:$da, imm:$slc),
+ (opcode $vdata, $addr, $rsrc, 1, 1, 1, (as_i1imm $slc), (as_i1imm $r128), 0, 0, (as_i1imm $da))
+>;
+
+multiclass ImageAtomicPatterns<SDPatternOperator name, string opcode> {
+ def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V1), i32>;
+ def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V2), v2i32>;
+ def : ImageAtomicPattern<name, !cast<MIMG>(opcode # _V4), v4i32>;
+}
+
+class ImageAtomicCmpSwapPattern<MIMG opcode, ValueType vt> : Pat <
+ (int_amdgcn_image_atomic_cmpswap i32:$vsrc, i32:$vcmp, vt:$addr, v8i32:$rsrc,
+ imm:$r128, imm:$da, imm:$slc),
+ (EXTRACT_SUBREG
+ (opcode (REG_SEQUENCE VReg_64, $vsrc, sub0, $vcmp, sub1),
+ $addr, $rsrc, 3, 1, 1, (as_i1imm $slc), (as_i1imm $r128), 0, 0, (as_i1imm $da)),
+ sub0)
+>;
+
// Basic sample
defm : SampleRawPatterns<int_SI_image_sample, "IMAGE_SAMPLE">;
defm : SampleRawPatterns<int_SI_image_sample_cl, "IMAGE_SAMPLE_CL">;
@@ -2341,38 +2685,57 @@ def : SampleRawPattern<int_SI_getlod, IMAGE_GET_LOD_V4_V4, v4i32>;
def : ImagePattern<int_SI_getresinfo, IMAGE_GET_RESINFO_V4_V1, i32>;
defm : ImagePatterns<int_SI_image_load, "IMAGE_LOAD">;
defm : ImagePatterns<int_SI_image_load_mip, "IMAGE_LOAD_MIP">;
+defm : ImageLoadPatterns<int_amdgcn_image_load, "IMAGE_LOAD">;
+defm : ImageLoadPatterns<int_amdgcn_image_load_mip, "IMAGE_LOAD_MIP">;
+defm : ImageStorePatterns<int_amdgcn_image_store, "IMAGE_STORE">;
+defm : ImageStorePatterns<int_amdgcn_image_store_mip, "IMAGE_STORE_MIP">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_swap, "IMAGE_ATOMIC_SWAP">;
+def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V1, i32>;
+def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V2, v2i32>;
+def : ImageAtomicCmpSwapPattern<IMAGE_ATOMIC_CMPSWAP_V4, v4i32>;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_add, "IMAGE_ATOMIC_ADD">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_sub, "IMAGE_ATOMIC_SUB">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_smin, "IMAGE_ATOMIC_SMIN">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_umin, "IMAGE_ATOMIC_UMIN">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_smax, "IMAGE_ATOMIC_SMAX">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_umax, "IMAGE_ATOMIC_UMAX">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_and, "IMAGE_ATOMIC_AND">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_or, "IMAGE_ATOMIC_OR">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_xor, "IMAGE_ATOMIC_XOR">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_inc, "IMAGE_ATOMIC_INC">;
+defm : ImageAtomicPatterns<int_amdgcn_image_atomic_dec, "IMAGE_ATOMIC_DEC">;
/* SIsample for simple 1D texture lookup */
def : Pat <
- (SIsample i32:$addr, v32i8:$rsrc, v4i32:$sampler, imm),
- (IMAGE_SAMPLE_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
+ (SIsample i32:$addr, v8i32:$rsrc, v4i32:$sampler, imm),
+ (IMAGE_SAMPLE_V4_V1 $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 0)
>;
class SamplePattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, imm),
- (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
+ (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, imm),
+ (opcode $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 0)
>;
class SampleRectPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_RECT),
- (opcode 0xf, 1, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
+ (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, TEX_RECT),
+ (opcode $addr, $rsrc, $sampler, 0xf, 1, 0, 0, 0, 0, 0, 0)
>;
class SampleArrayPattern<SDNode name, MIMG opcode, ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_ARRAY),
- (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
+ (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, TEX_ARRAY),
+ (opcode $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 1)
>;
class SampleShadowPattern<SDNode name, MIMG opcode,
ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_SHADOW),
- (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc, $sampler)
+ (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, TEX_SHADOW),
+ (opcode $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 0)
>;
class SampleShadowArrayPattern<SDNode name, MIMG opcode,
ValueType vt> : Pat <
- (name vt:$addr, v32i8:$rsrc, v4i32:$sampler, TEX_SHADOW_ARRAY),
- (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc, $sampler)
+ (name vt:$addr, v8i32:$rsrc, v4i32:$sampler, TEX_SHADOW_ARRAY),
+ (opcode $addr, $rsrc, $sampler, 0xf, 0, 0, 0, 0, 0, 0, 1)
>;
/* SIsample* for texture lookups consuming more address parameters */
@@ -2422,68 +2785,10 @@ defm : SamplePatterns<IMAGE_SAMPLE_V4_V16, IMAGE_SAMPLE_C_V4_V16,
IMAGE_SAMPLE_D_V4_V16, IMAGE_SAMPLE_C_D_V4_V16,
v16i32>;
-/* int_SI_imageload for texture fetches consuming varying address parameters */
-class ImageLoadPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
- (name addr_type:$addr, v32i8:$rsrc, imm),
- (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadArrayPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
- (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY),
- (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
- (name addr_type:$addr, v32i8:$rsrc, TEX_MSAA),
- (opcode 0xf, 0, 0, 0, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-class ImageLoadArrayMSAAPattern<Intrinsic name, MIMG opcode, ValueType addr_type> : Pat <
- (name addr_type:$addr, v32i8:$rsrc, TEX_ARRAY_MSAA),
- (opcode 0xf, 0, 0, 1, 0, 0, 0, 0, $addr, $rsrc)
->;
-
-multiclass ImageLoadPatterns<MIMG opcode, ValueType addr_type> {
- def : ImageLoadPattern <int_SI_imageload, opcode, addr_type>;
- def : ImageLoadArrayPattern <int_SI_imageload, opcode, addr_type>;
-}
-
-multiclass ImageLoadMSAAPatterns<MIMG opcode, ValueType addr_type> {
- def : ImageLoadMSAAPattern <int_SI_imageload, opcode, addr_type>;
- def : ImageLoadArrayMSAAPattern <int_SI_imageload, opcode, addr_type>;
-}
-
-defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V2, v2i32>;
-defm : ImageLoadPatterns<IMAGE_LOAD_MIP_V4_V4, v4i32>;
-
-defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V2, v2i32>;
-defm : ImageLoadMSAAPatterns<IMAGE_LOAD_V4_V4, v4i32>;
-
-/* Image resource information */
-def : Pat <
- (int_SI_resinfo i32:$mipid, v32i8:$rsrc, imm),
- (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 0, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
->;
-
-def : Pat <
- (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY),
- (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
->;
-
-def : Pat <
- (int_SI_resinfo i32:$mipid, v32i8:$rsrc, TEX_ARRAY_MSAA),
- (IMAGE_GET_RESINFO_V4_V1 0xf, 0, 0, 1, 0, 0, 0, 0, (V_MOV_B32_e32 $mipid), $rsrc)
->;
-
/********** ============================================ **********/
/********** Extraction, Insertion, Building and Casting **********/
/********** ============================================ **********/
-//def : Extract_Element<i64, v2i64, 0, sub0_sub1>;
-//def : Extract_Element<i64, v2i64, 1, sub2_sub3>;
-//def : Extract_Element<f64, v2f64, 0, sub0_sub1>;
-//def : Extract_Element<f64, v2f64, 1, sub2_sub3>;
-
foreach Index = 0-2 in {
def Extract_Element_v2i32_#Index : Extract_Element <
i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
@@ -2548,50 +2853,47 @@ foreach Index = 0-15 in {
>;
}
-def : BitConvert <i32, f32, SReg_32>;
+// FIXME: Why do only some of these type combinations for SReg and
+// VReg?
+// 32-bit bitcast
def : BitConvert <i32, f32, VGPR_32>;
-
-def : BitConvert <f32, i32, SReg_32>;
def : BitConvert <f32, i32, VGPR_32>;
+def : BitConvert <i32, f32, SReg_32>;
+def : BitConvert <f32, i32, SReg_32>;
+// 64-bit bitcast
def : BitConvert <i64, f64, VReg_64>;
-
def : BitConvert <f64, i64, VReg_64>;
-
-def : BitConvert <v2f32, v2i32, VReg_64>;
def : BitConvert <v2i32, v2f32, VReg_64>;
-def : BitConvert <v2i32, i64, VReg_64>;
+def : BitConvert <v2f32, v2i32, VReg_64>;
def : BitConvert <i64, v2i32, VReg_64>;
-def : BitConvert <v2f32, i64, VReg_64>;
+def : BitConvert <v2i32, i64, VReg_64>;
def : BitConvert <i64, v2f32, VReg_64>;
-def : BitConvert <v2f32, f64, VReg_64>;
-def : BitConvert <v2i32, f64, VReg_64>;
+def : BitConvert <v2f32, i64, VReg_64>;
def : BitConvert <f64, v2f32, VReg_64>;
+def : BitConvert <v2f32, f64, VReg_64>;
def : BitConvert <f64, v2i32, VReg_64>;
-def : BitConvert <v4f32, v4i32, VReg_128>;
+def : BitConvert <v2i32, f64, VReg_64>;
def : BitConvert <v4i32, v4f32, VReg_128>;
+def : BitConvert <v4f32, v4i32, VReg_128>;
-
+// 128-bit bitcast
def : BitConvert <v2i64, v4i32, SReg_128>;
def : BitConvert <v4i32, v2i64, SReg_128>;
-
def : BitConvert <v2f64, v4f32, VReg_128>;
def : BitConvert <v2f64, v4i32, VReg_128>;
def : BitConvert <v4f32, v2f64, VReg_128>;
def : BitConvert <v4i32, v2f64, VReg_128>;
+def : BitConvert <v2i64, v2f64, VReg_128>;
+def : BitConvert <v2f64, v2i64, VReg_128>;
-
-
-
-def : BitConvert <v8f32, v8i32, SReg_256>;
+// 256-bit bitcast
def : BitConvert <v8i32, v8f32, SReg_256>;
-def : BitConvert <v8i32, v32i8, SReg_256>;
-def : BitConvert <v32i8, v8i32, SReg_256>;
-def : BitConvert <v8i32, v32i8, VReg_256>;
+def : BitConvert <v8f32, v8i32, SReg_256>;
def : BitConvert <v8i32, v8f32, VReg_256>;
def : BitConvert <v8f32, v8i32, VReg_256>;
-def : BitConvert <v32i8, v8i32, VReg_256>;
+// 512-bit bitcast
def : BitConvert <v16i32, v16f32, VReg_512>;
def : BitConvert <v16f32, v16i32, VReg_512>;
@@ -2613,7 +2915,7 @@ def : Pat <
def : Pat <
(fneg (fabs f32:$src)),
- (S_OR_B32 $src, 0x80000000) /* Set sign bit */
+ (S_OR_B32 $src, 0x80000000) // Set sign bit
>;
// FIXME: Should use S_OR_B32
@@ -2703,15 +3005,9 @@ def : Pat <
/********** Intrinsic Patterns **********/
/********** ================== **********/
-/* llvm.AMDGPU.pow */
def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
def : Pat <
- (int_AMDGPU_div f32:$src0, f32:$src1),
- (V_MUL_LEGACY_F32_e32 $src0, (V_RCP_LEGACY_F32_e32 $src1))
->;
-
-def : Pat <
(int_AMDGPU_cube v4f32:$src),
(REG_SEQUENCE VReg_128,
(V_CUBETC_F32 0 /* src0_modifiers */, (EXTRACT_SUBREG $src, sub0),
@@ -2745,7 +3041,7 @@ class Ext32Pat <SDNode ext> : Pat <
def : Ext32Pat <zext>;
def : Ext32Pat <anyext>;
-// Offset in an 32Bit VGPR
+// Offset in an 32-bit VGPR
def : Pat <
(SIload_constant v4i32:$sbase, i32:$voff),
(BUFFER_LOAD_DWORD_OFFEN $voff, $sbase, 0, 0, 0, 0, 0)
@@ -2759,12 +3055,6 @@ def : Pat <
(V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
>;
-def : Pat <
- (int_SI_tid),
- (V_MBCNT_HI_U32_B32_e64 0xffffffff,
- (V_MBCNT_LO_U32_B32_e64 0xffffffff, 0))
->;
-
//===----------------------------------------------------------------------===//
// VOP3 Patterns
//===----------------------------------------------------------------------===//
@@ -2772,16 +3062,6 @@ def : Pat <
def : IMad24Pat<V_MAD_I32_I24>;
def : UMad24Pat<V_MAD_U32_U24>;
-def : Pat <
- (mulhu i32:$src0, i32:$src1),
- (V_MUL_HI_U32 $src0, $src1)
->;
-
-def : Pat <
- (mulhs i32:$src0, i32:$src1),
- (V_MUL_HI_I32 $src0, $src1)
->;
-
defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
def : ROTRPattern <V_ALIGNBIT_B32>;
@@ -2839,19 +3119,6 @@ class DSAtomicRetPat<DS inst, ValueType vt, PatFrag frag> : Pat <
(inst $ptr, $value, (as_i16imm $offset), (i1 0))
>;
-// Special case of DSAtomicRetPat for add / sub 1 -> inc / dec
-//
-// We need to use something for the data0, so we set a register to
-// -1. For the non-rtn variants, the manual says it does
-// DS[A] = (DS[A] >= D0) ? 0 : DS[A] + 1, and setting D0 to uint_max
-// will always do the increment so I'm assuming it's the same.
-class DSAtomicIncRetPat<DS inst, ValueType vt,
- Instruction LoadImm, PatFrag frag> : Pat <
- (frag (DS1Addr1Offset i32:$ptr, i32:$offset), (vt 1)),
- (inst $ptr, (LoadImm (vt -1)), (as_i16imm $offset), (i1 0))
->;
-
-
class DSAtomicCmpXChg <DS inst, ValueType vt, PatFrag frag> : Pat <
(frag (DS1Addr1Offset i32:$ptr, i32:$offset), vt:$cmp, vt:$swap),
(inst $ptr, $cmp, $swap, (as_i16imm $offset), (i1 0))
@@ -2859,14 +3126,11 @@ class DSAtomicCmpXChg <DS inst, ValueType vt, PatFrag frag> : Pat <
// 32-bit atomics.
-def : DSAtomicIncRetPat<DS_INC_RTN_U32, i32,
- V_MOV_B32_e32, si_atomic_load_add_local>;
-def : DSAtomicIncRetPat<DS_DEC_RTN_U32, i32,
- V_MOV_B32_e32, si_atomic_load_sub_local>;
-
def : DSAtomicRetPat<DS_WRXCHG_RTN_B32, i32, si_atomic_swap_local>;
def : DSAtomicRetPat<DS_ADD_RTN_U32, i32, si_atomic_load_add_local>;
def : DSAtomicRetPat<DS_SUB_RTN_U32, i32, si_atomic_load_sub_local>;
+def : DSAtomicRetPat<DS_INC_RTN_U32, i32, si_atomic_inc_local>;
+def : DSAtomicRetPat<DS_DEC_RTN_U32, i32, si_atomic_dec_local>;
def : DSAtomicRetPat<DS_AND_RTN_B32, i32, si_atomic_load_and_local>;
def : DSAtomicRetPat<DS_OR_RTN_B32, i32, si_atomic_load_or_local>;
def : DSAtomicRetPat<DS_XOR_RTN_B32, i32, si_atomic_load_xor_local>;
@@ -2874,18 +3138,14 @@ def : DSAtomicRetPat<DS_MIN_RTN_I32, i32, si_atomic_load_min_local>;
def : DSAtomicRetPat<DS_MAX_RTN_I32, i32, si_atomic_load_max_local>;
def : DSAtomicRetPat<DS_MIN_RTN_U32, i32, si_atomic_load_umin_local>;
def : DSAtomicRetPat<DS_MAX_RTN_U32, i32, si_atomic_load_umax_local>;
-
def : DSAtomicCmpXChg<DS_CMPST_RTN_B32, i32, si_atomic_cmp_swap_32_local>;
// 64-bit atomics.
-def : DSAtomicIncRetPat<DS_INC_RTN_U64, i64,
- V_MOV_B64_PSEUDO, si_atomic_load_add_local>;
-def : DSAtomicIncRetPat<DS_DEC_RTN_U64, i64,
- V_MOV_B64_PSEUDO, si_atomic_load_sub_local>;
-
def : DSAtomicRetPat<DS_WRXCHG_RTN_B64, i64, si_atomic_swap_local>;
def : DSAtomicRetPat<DS_ADD_RTN_U64, i64, si_atomic_load_add_local>;
def : DSAtomicRetPat<DS_SUB_RTN_U64, i64, si_atomic_load_sub_local>;
+def : DSAtomicRetPat<DS_INC_RTN_U64, i64, si_atomic_inc_local>;
+def : DSAtomicRetPat<DS_DEC_RTN_U64, i64, si_atomic_dec_local>;
def : DSAtomicRetPat<DS_AND_RTN_B64, i64, si_atomic_load_and_local>;
def : DSAtomicRetPat<DS_OR_RTN_B64, i64, si_atomic_load_or_local>;
def : DSAtomicRetPat<DS_XOR_RTN_B64, i64, si_atomic_load_xor_local>;
@@ -2901,20 +3161,35 @@ def : DSAtomicCmpXChg<DS_CMPST_RTN_B64, i64, si_atomic_cmp_swap_64_local>;
// MUBUF Patterns
//===----------------------------------------------------------------------===//
-multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
- PatFrag constant_ld> {
- def : Pat <
+class MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
+ PatFrag constant_ld> : Pat <
(vt (constant_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset,
i16:$offset, i1:$glc, i1:$slc, i1:$tfe))),
(Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, $glc, $slc, $tfe)
>;
+
+multiclass MUBUFLoad_Atomic_Pattern <MUBUF Instr_ADDR64, MUBUF Instr_OFFSET,
+ ValueType vt, PatFrag atomic_ld> {
+ def : Pat <
+ (vt (atomic_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset,
+ i16:$offset, i1:$slc))),
+ (Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, 1, $slc, 0)
+ >;
+
+ def : Pat <
+ (vt (atomic_ld (MUBUFOffsetNoGLC v4i32:$rsrc, i32:$soffset, i16:$offset))),
+ (Instr_OFFSET $rsrc, $soffset, (as_i16imm $offset), 1, 0, 0)
+ >;
}
let Predicates = [isSICI] in {
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, sextloadi8_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, az_extloadi8_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32, sextloadi16_constant>;
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32, az_extloadi16_constant>;
+def : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, sextloadi8_constant>;
+def : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32, az_extloadi8_constant>;
+def : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32, sextloadi16_constant>;
+def : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32, az_extloadi16_constant>;
+
+defm : MUBUFLoad_Atomic_Pattern <BUFFER_LOAD_DWORD_ADDR64, BUFFER_LOAD_DWORD_OFFSET, i32, mubuf_load_atomic>;
+defm : MUBUFLoad_Atomic_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, BUFFER_LOAD_DWORDX2_OFFSET, i64, mubuf_load_atomic>;
} // End Predicates = [isSICI]
class MUBUFScratchLoadPat <MUBUF Instr, ValueType vt, PatFrag ld> : Pat <
@@ -2975,6 +3250,25 @@ defm : MUBUF_Load_Dword <v2i32, BUFFER_LOAD_DWORDX2_OFFSET, BUFFER_LOAD_DWORDX2_
defm : MUBUF_Load_Dword <v4i32, BUFFER_LOAD_DWORDX4_OFFSET, BUFFER_LOAD_DWORDX4_OFFEN,
BUFFER_LOAD_DWORDX4_IDXEN, BUFFER_LOAD_DWORDX4_BOTHEN>;
+multiclass MUBUFStore_Atomic_Pattern <MUBUF Instr_ADDR64, MUBUF Instr_OFFSET,
+ ValueType vt, PatFrag atomic_st> {
+ // Store follows atomic op convention so address is forst
+ def : Pat <
+ (atomic_st (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset,
+ i16:$offset, i1:$slc), vt:$val),
+ (Instr_ADDR64 $val, $vaddr, $srsrc, $soffset, $offset, 1, $slc, 0)
+ >;
+
+ def : Pat <
+ (atomic_st (MUBUFOffsetNoGLC v4i32:$rsrc, i32:$soffset, i16:$offset), vt:$val),
+ (Instr_OFFSET $val, $rsrc, $soffset, (as_i16imm $offset), 1, 0, 0)
+ >;
+}
+let Predicates = [isSICI] in {
+defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORD_ADDR64, BUFFER_STORE_DWORD_OFFSET, i32, global_store_atomic>;
+defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORDX2_ADDR64, BUFFER_STORE_DWORDX2_OFFSET, i64, global_store_atomic>;
+} // End Predicates = [isSICI]
+
class MUBUFScratchStorePat <MUBUF Instr, ValueType vt, PatFrag st> : Pat <
(st vt:$value, (MUBUFScratch v4i32:$srsrc, i32:$vaddr, i32:$soffset,
u16imm:$offset)),
@@ -2987,22 +3281,6 @@ def : MUBUFScratchStorePat <BUFFER_STORE_DWORD_OFFEN, i32, store_private>;
def : MUBUFScratchStorePat <BUFFER_STORE_DWORDX2_OFFEN, v2i32, store_private>;
def : MUBUFScratchStorePat <BUFFER_STORE_DWORDX4_OFFEN, v4i32, store_private>;
-/*
-class MUBUFStore_Pattern <MUBUF Instr, ValueType vt, PatFrag st> : Pat <
- (st vt:$value, (MUBUFScratch v4i32:$srsrc, i64:$vaddr, u16imm:$offset)),
- (Instr $value, $srsrc, $vaddr, $offset)
->;
-
-let Predicates = [isSICI] in {
-def : MUBUFStore_Pattern <BUFFER_STORE_BYTE_ADDR64, i32, truncstorei8_private>;
-def : MUBUFStore_Pattern <BUFFER_STORE_SHORT_ADDR64, i32, truncstorei16_private>;
-def : MUBUFStore_Pattern <BUFFER_STORE_DWORD_ADDR64, i32, store_private>;
-def : MUBUFStore_Pattern <BUFFER_STORE_DWORDX2_ADDR64, v2i32, store_private>;
-def : MUBUFStore_Pattern <BUFFER_STORE_DWORDX4_ADDR64, v4i32, store_private>;
-} // End Predicates = [isSICI]
-
-*/
-
//===----------------------------------------------------------------------===//
// MTBUF Patterns
//===----------------------------------------------------------------------===//
@@ -3029,29 +3307,16 @@ def : MTBUF_StoreResource <v4i32, 4, TBUFFER_STORE_FORMAT_XYZW>;
/********** ====================== **********/
multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> {
-
- // 1. Extract with offset
+ // Extract with offset
def : Pat<
- (eltvt (extractelt vt:$vec, (add i32:$idx, imm:$off))),
- (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $vec, $idx, imm:$off)
+ (eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))),
+ (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset)
>;
- // 2. Extract without offset
+ // Insert with offset
def : Pat<
- (eltvt (extractelt vt:$vec, i32:$idx)),
- (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $vec, $idx, 0)
- >;
-
- // 3. Insert with offset
- def : Pat<
- (insertelt vt:$vec, eltvt:$val, (add i32:$idx, imm:$off)),
- (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $vec, $idx, imm:$off, $val)
- >;
-
- // 4. Insert without offset
- def : Pat<
- (insertelt vt:$vec, eltvt:$val, i32:$idx),
- (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $vec, $idx, 0, $val)
+ (insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))),
+ (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val)
>;
}
@@ -3111,10 +3376,12 @@ def : ZExt_i64_i32_Pat<anyext>;
def : ZExt_i64_i1_Pat<zext>;
def : ZExt_i64_i1_Pat<anyext>;
+// FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that
+// REG_SEQUENCE patterns don't support instructions with multiple outputs.
def : Pat <
(i64 (sext i32:$src)),
(REG_SEQUENCE SReg_64, $src, sub0,
- (S_ASHR_I32 $src, 31), sub1)
+ (i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, 31), SReg_32_XM0)), sub1)
>;
def : Pat <
@@ -3214,6 +3481,23 @@ defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
def : BFEPattern <V_BFE_U32, S_MOV_B32>;
+let Predicates = [isSICI] in {
+def : Pat <
+ (i64 (readcyclecounter)),
+ (S_MEMTIME)
+>;
+}
+
+def : Pat<
+ (fcanonicalize f32:$src),
+ (V_MUL_F32_e64 0, CONST.FP32_ONE, 0, $src, 0, 0)
+>;
+
+def : Pat<
+ (fcanonicalize f64:$src),
+ (V_MUL_F64 0, CONST.FP64_ONE, 0, $src, 0, 0)
+>;
+
//===----------------------------------------------------------------------===//
// Fract Patterns
//===----------------------------------------------------------------------===//
@@ -3226,21 +3510,6 @@ let Predicates = [isSI] in {
// The workaround for the V_FRACT bug is:
// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
-// Convert (x + (-floor(x)) to fract(x)
-def : Pat <
- (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
- (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
- (V_CNDMASK_B64_PSEUDO
- (V_MIN_F64
- SRCMODS.NONE,
- (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
- SRCMODS.NONE,
- (V_MOV_B64_PSEUDO 0x3fefffffffffffff),
- DSTCLAMP.NONE, DSTOMOD.NONE),
- $x,
- (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, 3/*NaN*/))
->;
-
// Convert floor(x) to (x - fract(x))
def : Pat <
(f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
@@ -3268,6 +3537,9 @@ def : Pat <
def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
+def : IntMed3Pat<V_MED3_I32, smax, smax_oneuse, smin_oneuse>;
+def : IntMed3Pat<V_MED3_U32, umax, umax_oneuse, umin_oneuse>;
+
//============================================================================//
// Assembler aliases
//============================================================================//
diff --git a/lib/Target/AMDGPU/SIIntrinsics.td b/lib/Target/AMDGPU/SIIntrinsics.td
index 027a0a2f5167..a9b7c39096e7 100644
--- a/lib/Target/AMDGPU/SIIntrinsics.td
+++ b/lib/Target/AMDGPU/SIIntrinsics.td
@@ -13,8 +13,6 @@
let TargetPrefix = "SI", isTarget = 1 in {
-
- def int_SI_tid : Intrinsic <[llvm_i32_ty], [], [IntrNoMem]>;
def int_SI_packf16 : Intrinsic <[llvm_i32_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
def int_SI_export : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], []>;
def int_SI_load_const : Intrinsic <[llvm_float_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
@@ -50,9 +48,9 @@ let TargetPrefix = "SI", isTarget = 1 in {
llvm_i32_ty, // glc(imm)
llvm_i32_ty, // slc(imm)
llvm_i32_ty], // tfe(imm)
- [IntrReadArgMem]>;
+ [IntrReadMem, IntrArgMemOnly]>;
- def int_SI_sendmsg : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_SI_sendmsg : Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], []>;
// Fully-flexible SAMPLE instruction.
class SampleRaw : Intrinsic <
@@ -172,28 +170,20 @@ let TargetPrefix = "SI", isTarget = 1 in {
def int_SI_image_load_mip : Image;
def int_SI_getresinfo : Image;
- // Deprecated image and sample intrinsics.
- class Sample : Intrinsic <[llvm_v4f32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
-
- def int_SI_sample : Sample;
- def int_SI_sampleb : Sample;
- def int_SI_sampled : Sample;
- def int_SI_samplel : Sample;
- def int_SI_imageload : Intrinsic <[llvm_v4i32_ty], [llvm_anyvector_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_SI_resinfo : Intrinsic <[llvm_v4i32_ty], [llvm_i32_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
-
/* Interpolation Intrinsics */
def int_SI_fs_constant : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_SI_fs_interp : Intrinsic <[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_v2i32_ty], [IntrNoMem]>;
+} // End TargetPrefix = "SI", isTarget = 1
+let TargetPrefix = "amdgcn", isTarget = 1 in {
/* Control flow Intrinsics */
- def int_SI_if : Intrinsic<[llvm_i64_ty], [llvm_i1_ty, llvm_empty_ty], []>;
- def int_SI_else : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_empty_ty], []>;
- def int_SI_break : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], []>;
- def int_SI_if_break : Intrinsic<[llvm_i64_ty], [llvm_i1_ty, llvm_i64_ty], []>;
- def int_SI_else_break : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], []>;
- def int_SI_loop : Intrinsic<[], [llvm_i64_ty, llvm_empty_ty], []>;
- def int_SI_end_cf : Intrinsic<[], [llvm_i64_ty], []>;
+ def int_amdgcn_if : Intrinsic<[llvm_i64_ty], [llvm_i1_ty, llvm_empty_ty], []>;
+ def int_amdgcn_else : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_empty_ty], []>;
+ def int_amdgcn_break : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], []>;
+ def int_amdgcn_if_break : Intrinsic<[llvm_i64_ty], [llvm_i1_ty, llvm_i64_ty], []>;
+ def int_amdgcn_else_break : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], []>;
+ def int_amdgcn_loop : Intrinsic<[], [llvm_i64_ty, llvm_empty_ty], []>;
+ def int_amdgcn_end_cf : Intrinsic<[], [llvm_i64_ty], []>;
}
diff --git a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 1bdb1f0ee9f9..9e972a569a0f 100644
--- a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -26,7 +26,7 @@
//
// - Live interval recomputing seems inefficient. This currently only matches
// one pair, and recomputes live intervals and moves on to the next pair. It
-// would be better to compute a list of all merges that need to occur
+// would be better to compute a list of all merges that need to occur.
//
// - With a list of instructions to process, we can also merge more. If a
// cluster of loads have offsets that are too large to fit in the 8-bit
@@ -36,6 +36,7 @@
//===----------------------------------------------------------------------===//
#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
#include "SIRegisterInfo.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -61,7 +62,6 @@ private:
MachineRegisterInfo *MRI;
LiveIntervals *LIS;
-
static bool offsetsCanBeCombined(unsigned Offset0,
unsigned Offset1,
unsigned EltSize);
@@ -69,10 +69,6 @@ private:
MachineBasicBlock::iterator findMatchingDSInst(MachineBasicBlock::iterator I,
unsigned EltSize);
- void updateRegDefsUses(unsigned SrcReg,
- unsigned DstReg,
- unsigned SubIdx);
-
MachineBasicBlock::iterator mergeRead2Pair(
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
@@ -193,17 +189,6 @@ SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
return E;
}
-void SILoadStoreOptimizer::updateRegDefsUses(unsigned SrcReg,
- unsigned DstReg,
- unsigned SubIdx) {
- for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg),
- E = MRI->reg_end(); I != E; ) {
- MachineOperand &O = *I;
- ++I;
- O.substVirtReg(DstReg, SubIdx, *TRI);
- }
-}
-
MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
@@ -268,19 +253,19 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
.addOperand(*Dest1)
.addReg(DestReg, RegState::Kill, SubRegIdx1);
- LIS->InsertMachineInstrInMaps(Read2);
+ LIS->InsertMachineInstrInMaps(*Read2);
// repairLiveintervalsInRange() doesn't handle physical register, so we have
// to update the M0 range manually.
- SlotIndex PairedIndex = LIS->getInstructionIndex(Paired);
+ SlotIndex PairedIndex = LIS->getInstructionIndex(*Paired);
LiveRange &M0Range = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::M0, TRI));
LiveRange::Segment *M0Segment = M0Range.getSegmentContaining(PairedIndex);
bool UpdateM0Range = M0Segment->end == PairedIndex.getRegSlot();
// The new write to the original destination register is now the copy. Steal
// the old SlotIndex.
- LIS->ReplaceMachineInstrInMaps(I, Copy0);
- LIS->ReplaceMachineInstrInMaps(Paired, Copy1);
+ LIS->ReplaceMachineInstrInMaps(*I, *Copy0);
+ LIS->ReplaceMachineInstrInMaps(*Paired, *Copy1);
I->eraseFromParent();
Paired->eraseFromParent();
@@ -291,7 +276,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
LIS->createAndComputeVirtRegInterval(DestReg);
if (UpdateM0Range) {
- SlotIndex Read2Index = LIS->getInstructionIndex(Read2);
+ SlotIndex Read2Index = LIS->getInstructionIndex(*Read2);
M0Segment->end = Read2Index.getRegSlot();
}
@@ -340,7 +325,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
// repairLiveintervalsInRange() doesn't handle physical register, so we have
// to update the M0 range manually.
- SlotIndex PairedIndex = LIS->getInstructionIndex(Paired);
+ SlotIndex PairedIndex = LIS->getInstructionIndex(*Paired);
LiveRange &M0Range = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::M0, TRI));
LiveRange::Segment *M0Segment = M0Range.getSegmentContaining(PairedIndex);
bool UpdateM0Range = M0Segment->end == PairedIndex.getRegSlot();
@@ -359,8 +344,8 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
// XXX - How do we express subregisters here?
unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg() };
- LIS->RemoveMachineInstrFromMaps(I);
- LIS->RemoveMachineInstrFromMaps(Paired);
+ LIS->RemoveMachineInstrFromMaps(*I);
+ LIS->RemoveMachineInstrFromMaps(*Paired);
I->eraseFromParent();
Paired->eraseFromParent();
@@ -368,7 +353,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
LIS->repairIntervalsInRange(MBB, Write2, Write2, OrigRegs);
if (UpdateM0Range) {
- SlotIndex Write2Index = LIS->getInstructionIndex(Write2);
+ SlotIndex Write2Index = LIS->getInstructionIndex(*Write2);
M0Segment->end = Write2Index.getRegSlot();
}
@@ -423,9 +408,16 @@ bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
}
bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
- const TargetSubtargetInfo &STM = MF.getSubtarget();
- TRI = static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
- TII = static_cast<const SIInstrInfo *>(STM.getInstrInfo());
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
+ const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
+ if (!STM.loadStoreOptEnabled())
+ return false;
+
+ TII = STM.getInstrInfo();
+ TRI = &TII->getRegisterInfo();
+
MRI = &MF.getRegInfo();
LIS = &getAnalysis<LiveIntervals>();
diff --git a/lib/Target/AMDGPU/SILowerControlFlow.cpp b/lib/Target/AMDGPU/SILowerControlFlow.cpp
index 126f6245dfc0..ee1d5dae70b7 100644
--- a/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -52,6 +52,7 @@
#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -61,24 +62,24 @@
using namespace llvm;
-namespace {
+#define DEBUG_TYPE "si-lower-control-flow"
-class SILowerControlFlowPass : public MachineFunctionPass {
+namespace {
+class SILowerControlFlow : public MachineFunctionPass {
private:
static const unsigned SkipThreshold = 12;
- static char ID;
const SIRegisterInfo *TRI;
const SIInstrInfo *TII;
bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
void Skip(MachineInstr &From, MachineOperand &To);
- void SkipIfDead(MachineInstr &MI);
+ bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB);
void If(MachineInstr &MI);
- void Else(MachineInstr &MI);
+ void Else(MachineInstr &MI, bool ExecModified);
void Break(MachineInstr &MI);
void IfBreak(MachineInstr &MI);
void ElseBreak(MachineInstr &MI);
@@ -88,56 +89,118 @@ private:
void Kill(MachineInstr &MI);
void Branch(MachineInstr &MI);
- void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
- void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset);
- void IndirectSrc(MachineInstr &MI);
- void IndirectDst(MachineInstr &MI);
+ MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+
+ std::pair<MachineBasicBlock *, MachineBasicBlock *>
+ splitBlock(MachineBasicBlock &MBB, MachineBasicBlock::iterator I);
+
+ void splitLoadM0BlockLiveIns(LivePhysRegs &RemainderLiveRegs,
+ const MachineRegisterInfo &MRI,
+ const MachineInstr &MI,
+ MachineBasicBlock &LoopBB,
+ MachineBasicBlock &RemainderBB,
+ unsigned SaveReg,
+ const MachineOperand &IdxReg);
+
+ void emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB, DebugLoc DL,
+ MachineInstr *MovRel,
+ const MachineOperand &IdxReg,
+ int Offset);
+
+ bool loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
+ std::pair<unsigned, int> computeIndirectRegAndOffset(unsigned VecReg,
+ int Offset) const;
+ bool indirectSrc(MachineInstr &MI);
+ bool indirectDst(MachineInstr &MI);
public:
- SILowerControlFlowPass(TargetMachine &tm) :
+ static char ID;
+
+ SILowerControlFlow() :
MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
bool runOnMachineFunction(MachineFunction &MF) override;
const char *getPassName() const override {
- return "SI Lower control flow instructions";
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
- MachineFunctionPass::getAnalysisUsage(AU);
+ return "SI Lower control flow pseudo instructions";
}
};
} // End anonymous namespace
-char SILowerControlFlowPass::ID = 0;
+char SILowerControlFlow::ID = 0;
+
+INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
+ "SI lower control flow", false, false)
-FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) {
- return new SILowerControlFlowPass(tm);
+char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID;
+
+
+FunctionPass *llvm::createSILowerControlFlowPass() {
+ return new SILowerControlFlow();
}
-bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From,
- MachineBasicBlock *To) {
+static bool opcodeEmitsNoInsts(unsigned Opc) {
+ switch (Opc) {
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::BUNDLE:
+ case TargetOpcode::CFI_INSTRUCTION:
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::GC_LABEL:
+ case TargetOpcode::DBG_VALUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From,
+ MachineBasicBlock *To) {
+ if (From->succ_empty())
+ return false;
unsigned NumInstr = 0;
+ MachineFunction *MF = From->getParent();
- for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty();
- MBB = *MBB->succ_begin()) {
+ for (MachineFunction::iterator MBBI(From), ToI(To), End = MF->end();
+ MBBI != End && MBBI != ToI; ++MBBI) {
+ MachineBasicBlock &MBB = *MBBI;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
NumInstr < SkipThreshold && I != E; ++I) {
+ if (opcodeEmitsNoInsts(I->getOpcode()))
+ continue;
+
+ // When a uniform loop is inside non-uniform control flow, the branch
+ // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
+ // when EXEC = 0. We should skip the loop lest it becomes infinite.
+ if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
+ I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
+ return true;
+
+ if (I->isInlineAsm()) {
+ const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
+ const char *AsmStr = I->getOperand(0).getSymbolName();
+
+ // inlineasm length estimate is number of bytes assuming the longest
+ // instruction.
+ uint64_t MaxAsmSize = TII->getInlineAsmLength(AsmStr, *MAI);
+ NumInstr += MaxAsmSize / MAI->getMaxInstLength();
+ } else {
+ ++NumInstr;
+ }
- if (I->isBundle() || !I->isBundled())
- if (++NumInstr >= SkipThreshold)
- return true;
+ if (NumInstr >= SkipThreshold)
+ return true;
}
}
return false;
}
-void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) {
+void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) {
if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
return;
@@ -147,40 +210,44 @@ void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) {
.addOperand(To);
}
-void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) {
-
+bool SILowerControlFlow::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) {
MachineBasicBlock &MBB = *MI.getParent();
- DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
- if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() !=
- ShaderType::PIXEL ||
+ if (MF->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
!shouldSkip(&MBB, &MBB.getParent()->back()))
- return;
+ return false;
+
+ MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator());
+ MBB.addSuccessor(SkipBB);
- MachineBasicBlock::iterator Insert = &MI;
- ++Insert;
+ const DebugLoc &DL = MI.getDebugLoc();
// If the exec mask is non-zero, skip the next two instructions
- BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
- .addImm(3);
+ BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
+ .addMBB(&NextBB);
+
+ MachineBasicBlock::iterator Insert = SkipBB->begin();
// Exec mask is zero: Export to NULL target...
- BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
- .addImm(0)
- .addImm(0x09) // V_008DFC_SQ_EXP_NULL
- .addImm(0)
- .addImm(1)
- .addImm(1)
- .addReg(AMDGPU::VGPR0)
- .addReg(AMDGPU::VGPR0)
- .addReg(AMDGPU::VGPR0)
- .addReg(AMDGPU::VGPR0);
-
- // ... and terminate wavefront
- BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
+ BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP))
+ .addImm(0)
+ .addImm(0x09) // V_008DFC_SQ_EXP_NULL
+ .addImm(0)
+ .addImm(1)
+ .addImm(1)
+ .addReg(AMDGPU::VGPR0, RegState::Undef)
+ .addReg(AMDGPU::VGPR0, RegState::Undef)
+ .addReg(AMDGPU::VGPR0, RegState::Undef)
+ .addReg(AMDGPU::VGPR0, RegState::Undef);
+
+ // ... and terminate wavefront.
+ BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
+
+ return true;
}
-void SILowerControlFlowPass::If(MachineInstr &MI) {
+void SILowerControlFlow::If(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
unsigned Reg = MI.getOperand(0).getReg();
@@ -195,10 +262,15 @@ void SILowerControlFlowPass::If(MachineInstr &MI) {
Skip(MI, MI.getOperand(2));
+ // Insert a pseudo terminator to help keep the verifier happy.
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
+ .addOperand(MI.getOperand(2))
+ .addReg(Reg);
+
MI.eraseFromParent();
}
-void SILowerControlFlowPass::Else(MachineInstr &MI) {
+void SILowerControlFlow::Else(MachineInstr &MI, bool ExecModified) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
unsigned Dst = MI.getOperand(0).getReg();
@@ -208,22 +280,36 @@ void SILowerControlFlowPass::Else(MachineInstr &MI) {
TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
.addReg(Src); // Saved EXEC
+ if (ExecModified) {
+ // Adjust the saved exec to account for the modifications during the flow
+ // block that contains the ELSE. This can happen when WQM mode is switched
+ // off.
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst)
+ .addReg(AMDGPU::EXEC)
+ .addReg(Dst);
+ }
+
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
.addReg(AMDGPU::EXEC)
.addReg(Dst);
Skip(MI, MI.getOperand(2));
+ // Insert a pseudo terminator to help keep the verifier happy.
+ BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
+ .addOperand(MI.getOperand(2))
+ .addReg(Dst);
+
MI.eraseFromParent();
}
-void SILowerControlFlowPass::Break(MachineInstr &MI) {
+void SILowerControlFlow::Break(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
unsigned Dst = MI.getOperand(0).getReg();
unsigned Src = MI.getOperand(1).getReg();
-
+
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
.addReg(AMDGPU::EXEC)
.addReg(Src);
@@ -231,14 +317,14 @@ void SILowerControlFlowPass::Break(MachineInstr &MI) {
MI.eraseFromParent();
}
-void SILowerControlFlowPass::IfBreak(MachineInstr &MI) {
+void SILowerControlFlow::IfBreak(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
unsigned Dst = MI.getOperand(0).getReg();
unsigned Vcc = MI.getOperand(1).getReg();
unsigned Src = MI.getOperand(2).getReg();
-
+
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
.addReg(Vcc)
.addReg(Src);
@@ -246,14 +332,14 @@ void SILowerControlFlowPass::IfBreak(MachineInstr &MI) {
MI.eraseFromParent();
}
-void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) {
+void SILowerControlFlow::ElseBreak(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
unsigned Dst = MI.getOperand(0).getReg();
unsigned Saved = MI.getOperand(1).getReg();
unsigned Src = MI.getOperand(2).getReg();
-
+
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
.addReg(Saved)
.addReg(Src);
@@ -261,7 +347,7 @@ void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) {
MI.eraseFromParent();
}
-void SILowerControlFlowPass::Loop(MachineInstr &MI) {
+void SILowerControlFlow::Loop(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
unsigned Src = MI.getOperand(0).getReg();
@@ -276,7 +362,7 @@ void SILowerControlFlowPass::Loop(MachineInstr &MI) {
MI.eraseFromParent();
}
-void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
+void SILowerControlFlow::EndCf(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
unsigned Reg = MI.getOperand(0).getReg();
@@ -289,24 +375,24 @@ void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
MI.eraseFromParent();
}
-void SILowerControlFlowPass::Branch(MachineInstr &MI) {
- if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
+void SILowerControlFlow::Branch(MachineInstr &MI) {
+ MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
+ if (MBB == MI.getParent()->getNextNode())
MI.eraseFromParent();
// If these aren't equal, this is probably an infinite loop.
}
-void SILowerControlFlowPass::Kill(MachineInstr &MI) {
+void SILowerControlFlow::Kill(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
const MachineOperand &Op = MI.getOperand(0);
#ifndef NDEBUG
- const SIMachineFunctionInfo *MFI
- = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
+ CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv();
// Kill is only allowed in pixel / geometry shaders.
- assert(MFI->getShaderType() == ShaderType::PIXEL ||
- MFI->getShaderType() == ShaderType::GEOMETRY);
+ assert(CallConv == CallingConv::AMDGPU_PS ||
+ CallConv == CallingConv::AMDGPU_GS);
#endif
// Clear this thread from the exec mask if the operand is negative
@@ -325,94 +411,209 @@ void SILowerControlFlowPass::Kill(MachineInstr &MI) {
MI.eraseFromParent();
}
-void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
+// All currently live registers must remain so in the remainder block.
+void SILowerControlFlow::splitLoadM0BlockLiveIns(LivePhysRegs &RemainderLiveRegs,
+ const MachineRegisterInfo &MRI,
+ const MachineInstr &MI,
+ MachineBasicBlock &LoopBB,
+ MachineBasicBlock &RemainderBB,
+ unsigned SaveReg,
+ const MachineOperand &IdxReg) {
+ // Add reg defined in loop body.
+ RemainderLiveRegs.addReg(SaveReg);
+
+ if (const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val)) {
+ if (!Val->isUndef()) {
+ RemainderLiveRegs.addReg(Val->getReg());
+ LoopBB.addLiveIn(Val->getReg());
+ }
+ }
+
+ for (unsigned Reg : RemainderLiveRegs) {
+ if (MRI.isAllocatable(Reg))
+ RemainderBB.addLiveIn(Reg);
+ }
+
+ const MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src);
+ if (!Src->isUndef())
+ LoopBB.addLiveIn(Src->getReg());
+
+ if (!IdxReg.isUndef())
+ LoopBB.addLiveIn(IdxReg.getReg());
+ LoopBB.sortUniqueLiveIns();
+}
+
+void SILowerControlFlow::emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB,
+ DebugLoc DL,
+ MachineInstr *MovRel,
+ const MachineOperand &IdxReg,
+ int Offset) {
+ MachineBasicBlock::iterator I = LoopBB.begin();
+
+ // Read the next variant into VCC (lower 32 bits) <- also loop target
+ BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), AMDGPU::VCC_LO)
+ .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
+
+ // Move index from VCC into M0
+ BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+ .addReg(AMDGPU::VCC_LO);
+
+ // Compare the just read M0 value to all possible Idx values
+ BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
+ .addReg(AMDGPU::M0)
+ .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
+
+ // Update EXEC, save the original EXEC value to VCC
+ BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
+ .addReg(AMDGPU::VCC);
+
+ if (Offset != 0) {
+ BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
+ .addReg(AMDGPU::M0)
+ .addImm(Offset);
+ }
+
+ // Do the actual move
+ LoopBB.insert(I, MovRel);
+
+ // Update EXEC, switch all done bits to 0 and all todo bits to 1
+ BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
+ .addReg(AMDGPU::EXEC)
+ .addReg(AMDGPU::VCC);
+
+ // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
+ BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
+ .addMBB(&LoopBB);
+}
+
+MachineBasicBlock *SILowerControlFlow::insertSkipBlock(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
+ MachineFunction *MF = MBB.getParent();
+
+ MachineBasicBlock *SkipBB = MF->CreateMachineBasicBlock();
+ MachineFunction::iterator MBBI(MBB);
+ ++MBBI;
+
+ MF->insert(MBBI, SkipBB);
+
+ return SkipBB;
+}
+
+std::pair<MachineBasicBlock *, MachineBasicBlock *>
+SILowerControlFlow::splitBlock(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) {
+ MachineFunction *MF = MBB.getParent();
+ // To insert the loop we need to split the block. Move everything after this
+ // point to a new block, and insert a new empty block between the two.
+ MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
+ MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
+ MachineFunction::iterator MBBI(MBB);
+ ++MBBI;
+
+ MF->insert(MBBI, LoopBB);
+ MF->insert(MBBI, RemainderBB);
+
+ // Move the rest of the block into a new block.
+ RemainderBB->transferSuccessors(&MBB);
+ RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
+
+ MBB.addSuccessor(LoopBB);
+
+ return std::make_pair(LoopBB, RemainderBB);
+}
+
+// Returns true if a new block was inserted.
+bool SILowerControlFlow::loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = MI.getDebugLoc();
- MachineBasicBlock::iterator I = MI;
+ MachineBasicBlock::iterator I(&MI);
- unsigned Save = MI.getOperand(1).getReg();
- unsigned Idx = MI.getOperand(3).getReg();
+ const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
- if (AMDGPU::SReg_32RegClass.contains(Idx)) {
- if (Offset) {
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
- .addReg(Idx)
- .addImm(Offset);
+ if (AMDGPU::SReg_32RegClass.contains(Idx->getReg())) {
+ if (Offset != 0) {
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
+ .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()))
+ .addImm(Offset);
} else {
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
- .addReg(Idx);
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+ .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()));
}
+
MBB.insert(I, MovRel);
- } else {
+ MI.eraseFromParent();
+ return false;
+ }
- assert(AMDGPU::SReg_64RegClass.contains(Save));
- assert(AMDGPU::VGPR_32RegClass.contains(Idx));
+ MachineOperand *SaveOp = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
+ SaveOp->setIsDead(false);
+ unsigned Save = SaveOp->getReg();
- // Save the EXEC mask
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
- .addReg(AMDGPU::EXEC);
+ // Reading from a VGPR requires looping over all workitems in the wavefront.
+ assert(AMDGPU::SReg_64RegClass.contains(Save) &&
+ AMDGPU::VGPR_32RegClass.contains(Idx->getReg()));
- // Read the next variant into VCC (lower 32 bits) <- also loop target
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
- AMDGPU::VCC_LO)
- .addReg(Idx);
+ // Save the EXEC mask
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), Save)
+ .addReg(AMDGPU::EXEC);
- // Move index from VCC into M0
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
- .addReg(AMDGPU::VCC_LO);
+ LivePhysRegs RemainderLiveRegs(TRI);
- // Compare the just read M0 value to all possible Idx values
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
- .addReg(AMDGPU::M0)
- .addReg(Idx);
+ RemainderLiveRegs.addLiveOuts(MBB);
- // Update EXEC, save the original EXEC value to VCC
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
- .addReg(AMDGPU::VCC);
+ MachineBasicBlock *LoopBB;
+ MachineBasicBlock *RemainderBB;
- if (Offset) {
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
- .addReg(AMDGPU::M0)
- .addImm(Offset);
- }
- // Do the actual move
- MBB.insert(I, MovRel);
+ std::tie(LoopBB, RemainderBB) = splitBlock(MBB, I);
- // Update EXEC, switch all done bits to 0 and all todo bits to 1
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
- .addReg(AMDGPU::EXEC)
- .addReg(AMDGPU::VCC);
+ for (const MachineInstr &Inst : reverse(*RemainderBB))
+ RemainderLiveRegs.stepBackward(Inst);
- // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
- .addImm(-7);
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ LoopBB->addSuccessor(RemainderBB);
+ LoopBB->addSuccessor(LoopBB);
- // Restore EXEC
- BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
- .addReg(Save);
+ splitLoadM0BlockLiveIns(RemainderLiveRegs, MRI, MI, *LoopBB,
+ *RemainderBB, Save, *Idx);
+
+ emitLoadM0FromVGPRLoop(*LoopBB, DL, MovRel, *Idx, Offset);
+
+ MachineBasicBlock::iterator First = RemainderBB->begin();
+ BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
+ .addReg(Save);
- }
MI.eraseFromParent();
+ return true;
}
-/// \param @VecReg The register which holds element zero of the vector
-/// being addressed into.
-/// \param[out] @Reg The base register to use in the indirect addressing instruction.
-/// \param[in,out] @Offset As an input, this is the constant offset part of the
-// indirect Index. e.g. v0 = v[VecReg + Offset]
-// As an output, this is a constant value that needs
-// to be added to the value stored in M0.
-void SILowerControlFlowPass::computeIndirectRegAndOffset(unsigned VecReg,
- unsigned &Reg,
- int &Offset) {
+/// \param @VecReg The register which holds element zero of the vector being
+/// addressed into.
+//
+/// \param[in] @Idx The index operand from the movrel instruction. This must be
+// a register, but may be NoRegister.
+///
+/// \param[in] @Offset As an input, this is the constant offset part of the
+// indirect Index. e.g. v0 = v[VecReg + Offset] As an output, this is a constant
+// value that needs to be added to the value stored in M0.
+std::pair<unsigned, int>
+SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg, int Offset) const {
unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
if (!SubReg)
SubReg = VecReg;
+ const TargetRegisterClass *SuperRC = TRI->getPhysRegClass(VecReg);
const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
- int RegIdx = TRI->getHWRegIndex(SubReg) + Offset;
+ int NumElts = SuperRC->getSize() / RC->getSize();
+
+ int BaseRegIdx = TRI->getHWRegIndex(SubReg);
+
+ // Skip out of bounds offsets, or else we would end up using an undefined
+ // register.
+ if (Offset >= NumElts)
+ return std::make_pair(RC->getRegister(BaseRegIdx), Offset);
+ int RegIdx = BaseRegIdx + Offset;
if (RegIdx < 0) {
Offset = RegIdx;
RegIdx = 0;
@@ -420,77 +621,102 @@ void SILowerControlFlowPass::computeIndirectRegAndOffset(unsigned VecReg,
Offset = 0;
}
- Reg = RC->getRegister(RegIdx);
+ unsigned Reg = RC->getRegister(RegIdx);
+ return std::make_pair(Reg, Offset);
}
-void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
-
+// Return true if a new block was inserted.
+bool SILowerControlFlow::indirectSrc(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
- DebugLoc DL = MI.getDebugLoc();
+ const DebugLoc &DL = MI.getDebugLoc();
unsigned Dst = MI.getOperand(0).getReg();
- unsigned Vec = MI.getOperand(2).getReg();
- int Off = MI.getOperand(4).getImm();
+ const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
+ int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
unsigned Reg;
- computeIndirectRegAndOffset(Vec, Reg, Off);
+ std::tie(Reg, Offset) = computeIndirectRegAndOffset(SrcVec->getReg(), Offset);
+
+ const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
+ if (Idx->getReg() == AMDGPU::NoRegister) {
+ // Only had a constant offset, copy the register directly.
+ BuildMI(MBB, MI.getIterator(), DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
+ .addReg(Reg, getUndefRegState(SrcVec->isUndef()));
+ MI.eraseFromParent();
+ return false;
+ }
MachineInstr *MovRel =
BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
- .addReg(Reg)
- .addReg(Vec, RegState::Implicit);
+ .addReg(Reg, getUndefRegState(SrcVec->isUndef()))
+ .addReg(SrcVec->getReg(), RegState::Implicit);
- LoadM0(MI, MovRel, Off);
+ return loadM0(MI, MovRel, Offset);
}
-void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
-
+// Return true if a new block was inserted.
+bool SILowerControlFlow::indirectDst(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
- DebugLoc DL = MI.getDebugLoc();
+ const DebugLoc &DL = MI.getDebugLoc();
unsigned Dst = MI.getOperand(0).getReg();
- int Off = MI.getOperand(4).getImm();
- unsigned Val = MI.getOperand(5).getReg();
+ int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
unsigned Reg;
- computeIndirectRegAndOffset(Dst, Reg, Off);
+ const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
+ std::tie(Reg, Offset) = computeIndirectRegAndOffset(Dst, Offset);
- MachineInstr *MovRel =
- BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
- .addReg(Reg, RegState::Define)
- .addReg(Val)
- .addReg(Dst, RegState::Implicit);
+ MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
+ if (Idx->getReg() == AMDGPU::NoRegister) {
+ // Only had a constant offset, copy the register directly.
+ BuildMI(MBB, MI.getIterator(), DL, TII->get(AMDGPU::V_MOV_B32_e32), Reg)
+ .addOperand(*Val);
+ MI.eraseFromParent();
+ return false;
+ }
+
+ MachineInstr *MovRel =
+ BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32), Reg)
+ .addReg(Val->getReg(), getUndefRegState(Val->isUndef()))
+ .addReg(Dst, RegState::Implicit);
- LoadM0(MI, MovRel, Off);
+ return loadM0(MI, MovRel, Offset);
}
-bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
- TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
- TRI =
- static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ TII = ST.getInstrInfo();
+ TRI = &TII->getRegisterInfo();
+
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
bool HaveKill = false;
- bool NeedWQM = false;
bool NeedFlat = false;
unsigned Depth = 0;
- for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
- BI != BE; ++BI) {
+ MachineFunction::iterator NextBB;
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; BI = NextBB) {
+ NextBB = std::next(BI);
MachineBasicBlock &MBB = *BI;
+
+ MachineBasicBlock *EmptyMBBAtEnd = nullptr;
MachineBasicBlock::iterator I, Next;
+ bool ExecModified = false;
+
for (I = MBB.begin(); I != MBB.end(); I = Next) {
Next = std::next(I);
MachineInstr &MI = *I;
- if (TII->isWQM(MI) || TII->isDS(MI))
- NeedWQM = true;
// Flat uses m0 in case it needs to access LDS.
if (TII->isFLAT(MI))
NeedFlat = true;
+ if (I->modifiesRegister(AMDGPU::EXEC, TRI))
+ ExecModified = true;
+
switch (MI.getOpcode()) {
default: break;
case AMDGPU::SI_IF:
@@ -499,7 +725,7 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
break;
case AMDGPU::SI_ELSE:
- Else(MI);
+ Else(MI, ExecModified);
break;
case AMDGPU::SI_BREAK:
@@ -521,16 +747,20 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::SI_END_CF:
if (--Depth == 0 && HaveKill) {
- SkipIfDead(MI);
HaveKill = false;
+ // TODO: Insert skip if exec is 0?
}
+
EndCf(MI);
break;
- case AMDGPU::SI_KILL:
- if (Depth == 0)
- SkipIfDead(MI);
- else
+ case AMDGPU::SI_KILL_TERMINATOR:
+ if (Depth == 0) {
+ if (skipIfDead(MI, *NextBB)) {
+ NextBB = std::next(BI);
+ BE = MF.end();
+ }
+ } else
HaveKill = true;
Kill(MI);
break;
@@ -544,7 +774,15 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::SI_INDIRECT_SRC_V4:
case AMDGPU::SI_INDIRECT_SRC_V8:
case AMDGPU::SI_INDIRECT_SRC_V16:
- IndirectSrc(MI);
+ if (indirectSrc(MI)) {
+ // The block was split at this point. We can safely skip the middle
+ // inserted block to the following which contains the rest of this
+ // block's instructions.
+ NextBB = std::next(BI);
+ BE = MF.end();
+ Next = MBB.end();
+ }
+
break;
case AMDGPU::SI_INDIRECT_DST_V1:
@@ -552,55 +790,46 @@ bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
case AMDGPU::SI_INDIRECT_DST_V4:
case AMDGPU::SI_INDIRECT_DST_V8:
case AMDGPU::SI_INDIRECT_DST_V16:
- IndirectDst(MI);
+ if (indirectDst(MI)) {
+ // The block was split at this point. We can safely skip the middle
+ // inserted block to the following which contains the rest of this
+ // block's instructions.
+ NextBB = std::next(BI);
+ BE = MF.end();
+ Next = MBB.end();
+ }
+
break;
+
+ case AMDGPU::SI_RETURN: {
+ assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
+
+ // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
+ // because external bytecode will be appended at the end.
+ if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
+ // SI_RETURN is not the last instruction. Add an empty block at
+ // the end and jump there.
+ if (!EmptyMBBAtEnd) {
+ EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
+ MF.insert(MF.end(), EmptyMBBAtEnd);
+ }
+
+ MBB.addSuccessor(EmptyMBBAtEnd);
+ BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
+ .addMBB(EmptyMBBAtEnd);
+ I->eraseFromParent();
+ }
+ break;
+ }
}
}
}
- if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) {
- MachineBasicBlock &MBB = MF.front();
- BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
- AMDGPU::EXEC).addReg(AMDGPU::EXEC);
- }
-
- // FIXME: This seems inappropriate to do here.
if (NeedFlat && MFI->IsKernel) {
- // Insert the prologue initializing the SGPRs pointing to the scratch space
- // for flat accesses.
- const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
-
// TODO: What to use with function calls?
-
- // FIXME: This is reporting stack size that is used in a scratch buffer
- // rather than registers as well.
- uint64_t StackSizeBytes = FrameInfo->getStackSize();
-
- int IndirectBegin
- = static_cast<const AMDGPUInstrInfo*>(TII)->getIndirectIndexBegin(MF);
- // Convert register index to 256-byte unit.
- uint64_t StackOffset = IndirectBegin < 0 ? 0 : (4 * IndirectBegin / 256);
-
- assert((StackSizeBytes < 0xffff) && StackOffset < 0xffff &&
- "Stack limits should be smaller than 16-bits");
-
- // Initialize the flat scratch register pair.
- // TODO: Can we use one s_mov_b64 here?
-
- // Offset is in units of 256-bytes.
- MachineBasicBlock &MBB = MF.front();
- DebugLoc NoDL;
- MachineBasicBlock::iterator Start = MBB.getFirstNonPHI();
- const MCInstrDesc &SMovK = TII->get(AMDGPU::S_MOVK_I32);
-
- assert(isInt<16>(StackOffset) && isInt<16>(StackSizeBytes));
-
- BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_LO)
- .addImm(StackOffset);
-
- // Documentation says size is "per-thread scratch size in bytes"
- BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_HI)
- .addImm(StackSizeBytes);
+ // We will need to Initialize the flat scratch register pair.
+ if (NeedFlat)
+ MFI->setHasFlatInstructions(true);
}
return true;
diff --git a/lib/Target/AMDGPU/SILowerI1Copies.cpp b/lib/Target/AMDGPU/SILowerI1Copies.cpp
index a2fa5fd93aad..dc1d20ddb274 100644
--- a/lib/Target/AMDGPU/SILowerI1Copies.cpp
+++ b/lib/Target/AMDGPU/SILowerI1Copies.cpp
@@ -18,7 +18,6 @@
#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -47,8 +46,6 @@ public:
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<MachineDominatorTree>();
- AU.addPreserved<MachineDominatorTree>();
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -56,11 +53,8 @@ public:
} // End anonymous namespace.
-INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE,
- "SI Lower i1 Copies", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE,
- "SI Lower i1 Copies", false, false)
+INITIALIZE_PASS(SILowerI1Copies, DEBUG_TYPE,
+ "SI Lower i1 Copies", false, false)
char SILowerI1Copies::ID = 0;
@@ -72,9 +66,10 @@ FunctionPass *llvm::createSILowerI1CopiesPass() {
bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
MachineRegisterInfo &MRI = MF.getRegInfo();
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = ST.getInstrInfo();
+ const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
+
std::vector<unsigned> I1Defs;
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 49677fc2b0a3..4d12a1ef9a93 100644
--- a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -1,19 +1,17 @@
-//===-- SIMachineFunctionInfo.cpp - SI Machine Function Info -------===//
+//===-- SIMachineFunctionInfo.cpp -------- SI Machine Function Info -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
-/// \file
//===----------------------------------------------------------------------===//
-
#include "SIMachineFunctionInfo.h"
#include "AMDGPUSubtarget.h"
#include "SIInstrInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
@@ -22,6 +20,11 @@
using namespace llvm;
+static cl::opt<bool> EnableSpillSGPRToVGPR(
+ "amdgpu-spill-sgpr-to-vgpr",
+ cl::desc("Enable spilling VGPRs to SGPRs"),
+ cl::ReallyHidden,
+ cl::init(true));
// Pin the vtable to this file.
void SIMachineFunctionInfo::anchor() {}
@@ -48,12 +51,20 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister),
PSInputAddr(0),
ReturnsVoid(true),
+ MaximumWorkGroupSize(0),
+ DebuggerReservedVGPRCount(0),
+ DebuggerWorkGroupIDStackObjectIndices({{0, 0, 0}}),
+ DebuggerWorkItemIDStackObjectIndices({{0, 0, 0}}),
LDSWaveSpillSize(0),
PSInputEna(0),
NumUserSGPRs(0),
NumSystemSGPRs(0),
HasSpilledSGPRs(false),
HasSpilledVGPRs(false),
+ HasNonSpillStackObjects(false),
+ HasFlatInstructions(false),
+ NumSpilledSGPRs(0),
+ NumSpilledVGPRs(0),
PrivateSegmentBuffer(false),
DispatchPtr(false),
QueuePtr(false),
@@ -63,37 +74,45 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
GridWorkgroupCountX(false),
GridWorkgroupCountY(false),
GridWorkgroupCountZ(false),
- WorkGroupIDX(true),
+ WorkGroupIDX(false),
WorkGroupIDY(false),
WorkGroupIDZ(false),
WorkGroupInfo(false),
PrivateSegmentWaveByteOffset(false),
- WorkItemIDX(true),
+ WorkItemIDX(false),
WorkItemIDY(false),
WorkItemIDZ(false) {
- const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
const Function *F = MF.getFunction();
PSInputAddr = AMDGPU::getInitialPSInputAddr(*F);
const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
- if (getShaderType() == ShaderType::COMPUTE)
+ if (!AMDGPU::isShader(F->getCallingConv())) {
KernargSegmentPtr = true;
+ WorkGroupIDX = true;
+ WorkItemIDX = true;
+ }
- if (F->hasFnAttribute("amdgpu-work-group-id-y"))
+ if (F->hasFnAttribute("amdgpu-work-group-id-y") || ST.debuggerEmitPrologue())
WorkGroupIDY = true;
- if (F->hasFnAttribute("amdgpu-work-group-id-z"))
+ if (F->hasFnAttribute("amdgpu-work-group-id-z") || ST.debuggerEmitPrologue())
WorkGroupIDZ = true;
- if (F->hasFnAttribute("amdgpu-work-item-id-y"))
+ if (F->hasFnAttribute("amdgpu-work-item-id-y") || ST.debuggerEmitPrologue())
WorkItemIDY = true;
- if (F->hasFnAttribute("amdgpu-work-item-id-z"))
+ if (F->hasFnAttribute("amdgpu-work-item-id-z") || ST.debuggerEmitPrologue())
WorkItemIDZ = true;
- bool MaySpill = ST.isVGPRSpillingEnabled(this);
+ // X, XY, and XYZ are the only supported combinations, so make sure Y is
+ // enabled if Z is.
+ if (WorkItemIDZ)
+ WorkItemIDY = true;
+
+ bool MaySpill = ST.isVGPRSpillingEnabled(*F);
bool HasStackObjects = FrameInfo->hasStackObjects();
if (HasStackObjects || MaySpill)
@@ -105,12 +124,25 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
if (F->hasFnAttribute("amdgpu-dispatch-ptr"))
DispatchPtr = true;
+
+ if (F->hasFnAttribute("amdgpu-queue-ptr"))
+ QueuePtr = true;
}
- // X, XY, and XYZ are the only supported combinations, so make sure Y is
- // enabled if Z is.
- if (WorkItemIDZ)
- WorkItemIDY = true;
+ // We don't need to worry about accessing spills with flat instructions.
+ // TODO: On VI where we must use flat for global, we should be able to omit
+ // this if it is never used for generic access.
+ if (HasStackObjects && ST.getGeneration() >= SISubtarget::SEA_ISLANDS &&
+ ST.isAmdHsaOS())
+ FlatScratchInit = true;
+
+ if (AMDGPU::isCompute(F->getCallingConv()))
+ MaximumWorkGroupSize = AMDGPU::getMaximumWorkGroupSize(*F);
+ else
+ MaximumWorkGroupSize = ST.getWavefrontSize();
+
+ if (ST.debuggerReserveRegs())
+ DebuggerReservedVGPRCount = 4;
}
unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer(
@@ -142,13 +174,24 @@ unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI)
return KernargSegmentPtrUserSGPR;
}
-SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
+unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
+ FlatScratchInitUserSGPR = TRI.getMatchingSuperReg(
+ getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
+ NumUserSGPRs += 2;
+ return FlatScratchInitUserSGPR;
+}
+
+SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg (
MachineFunction *MF,
unsigned FrameIndex,
unsigned SubIdx) {
- const MachineFrameInfo *FrameInfo = MF->getFrameInfo();
- const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
- MF->getSubtarget<AMDGPUSubtarget>().getRegisterInfo());
+ if (!EnableSpillSGPRToVGPR)
+ return SpilledReg();
+
+ const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
+ const SIRegisterInfo *TRI = ST.getRegisterInfo();
+
+ MachineFrameInfo *FrameInfo = MF->getFrameInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
Offset += SubIdx * 4;
@@ -157,19 +200,14 @@ SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
unsigned Lane = (Offset / 4) % 64;
struct SpilledReg Spill;
+ Spill.Lane = Lane;
if (!LaneVGPRs.count(LaneVGPRIdx)) {
unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);
- if (LaneVGPR == AMDGPU::NoRegister) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
- Ctx.emitError("Ran out of VGPRs for spilling SGPR");
-
- // When compiling from inside Mesa, the compilation continues.
- // Select an arbitrary register to avoid triggering assertions
- // during subsequent passes.
- LaneVGPR = AMDGPU::VGPR0;
- }
+ if (LaneVGPR == AMDGPU::NoRegister)
+ // We have no VGPRs left for spilling SGPRs.
+ return Spill;
LaneVGPRs[LaneVGPRIdx] = LaneVGPR;
@@ -182,14 +220,10 @@ SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg(
}
Spill.VGPR = LaneVGPRs[LaneVGPRIdx];
- Spill.Lane = Lane;
return Spill;
}
unsigned SIMachineFunctionInfo::getMaximumWorkGroupSize(
const MachineFunction &MF) const {
- const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
- // FIXME: We should get this information from kernel attributes if it
- // is available.
- return getShaderType() == ShaderType::COMPUTE ? 256 : ST.getWavefrontSize();
+ return MaximumWorkGroupSize;
}
diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index 846ee5de057d..f5bd6366c717 100644
--- a/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_SIMACHINEFUNCTIONINFO_H
-#define LLVM_LIB_TARGET_R600_SIMACHINEFUNCTIONINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_SIMACHINEFUNCTIONINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_SIMACHINEFUNCTIONINFO_H
#include "AMDGPUMachineFunction.h"
#include "SIRegisterInfo.h"
+#include <array>
#include <map>
namespace llvm {
@@ -25,7 +25,7 @@ class MachineRegisterInfo;
/// This class keeps track of the SPI_SP_INPUT_ADDR config register, which
/// tells the hardware which interpolation parameters to load.
-class SIMachineFunctionInfo : public AMDGPUMachineFunction {
+class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
// FIXME: This should be removed and getPreloadedValue moved here.
friend struct SIRegisterInfo;
void anchor() override;
@@ -61,6 +61,15 @@ class SIMachineFunctionInfo : public AMDGPUMachineFunction {
unsigned PSInputAddr;
bool ReturnsVoid;
+ unsigned MaximumWorkGroupSize;
+
+ // Number of reserved VGPRs for debugger usage.
+ unsigned DebuggerReservedVGPRCount;
+ // Stack object indices for work group IDs.
+ std::array<int, 3> DebuggerWorkGroupIDStackObjectIndices;
+ // Stack object indices for work item IDs.
+ std::array<int, 3> DebuggerWorkItemIDStackObjectIndices;
+
public:
// FIXME: Make private
unsigned LDSWaveSpillSize;
@@ -73,6 +82,11 @@ public:
private:
bool HasSpilledSGPRs;
bool HasSpilledVGPRs;
+ bool HasNonSpillStackObjects;
+ bool HasFlatInstructions;
+
+ unsigned NumSpilledSGPRs;
+ unsigned NumSpilledVGPRs;
// Feature bits required for inputs passed in user SGPRs.
bool PrivateSegmentBuffer : 1;
@@ -96,7 +110,6 @@ private:
bool WorkItemIDY : 1;
bool WorkItemIDZ : 1;
-
MCPhysReg getNextUserSGPR() const {
assert(NumSystemSGPRs == 0 && "System SGPRs must be added after user SGPRs");
return AMDGPU::SGPR0 + NumUserSGPRs;
@@ -111,8 +124,9 @@ public:
unsigned VGPR;
int Lane;
SpilledReg(unsigned R, int L) : VGPR (R), Lane (L) { }
- SpilledReg() : VGPR(0), Lane(-1) { }
+ SpilledReg() : VGPR(AMDGPU::NoRegister), Lane(-1) { }
bool hasLane() { return Lane != -1;}
+ bool hasReg() { return VGPR != AMDGPU::NoRegister;}
};
// SIMachineFunctionInfo definition
@@ -129,6 +143,7 @@ public:
unsigned addDispatchPtr(const SIRegisterInfo &TRI);
unsigned addQueuePtr(const SIRegisterInfo &TRI);
unsigned addKernargSegmentPtr(const SIRegisterInfo &TRI);
+ unsigned addFlatScratchInit(const SIRegisterInfo &TRI);
// Add system SGPRs.
unsigned addWorkGroupIDX() {
@@ -161,6 +176,10 @@ public:
return PrivateSegmentWaveByteOffsetSystemSGPR;
}
+ void setPrivateSegmentWaveByteOffset(unsigned Reg) {
+ PrivateSegmentWaveByteOffsetSystemSGPR = Reg;
+ }
+
bool hasPrivateSegmentBuffer() const {
return PrivateSegmentBuffer;
}
@@ -261,6 +280,10 @@ public:
ScratchWaveOffsetReg = Reg;
}
+ unsigned getQueuePtrUserSGPR() const {
+ return QueuePtrUserSGPR;
+ }
+
bool hasSpilledSGPRs() const {
return HasSpilledSGPRs;
}
@@ -277,6 +300,38 @@ public:
HasSpilledVGPRs = Spill;
}
+ bool hasNonSpillStackObjects() const {
+ return HasNonSpillStackObjects;
+ }
+
+ void setHasNonSpillStackObjects(bool StackObject = true) {
+ HasNonSpillStackObjects = StackObject;
+ }
+
+ bool hasFlatInstructions() const {
+ return HasFlatInstructions;
+ }
+
+ void setHasFlatInstructions(bool UseFlat = true) {
+ HasFlatInstructions = UseFlat;
+ }
+
+ unsigned getNumSpilledSGPRs() const {
+ return NumSpilledSGPRs;
+ }
+
+ unsigned getNumSpilledVGPRs() const {
+ return NumSpilledVGPRs;
+ }
+
+ void addToSpilledSGPRs(unsigned num) {
+ NumSpilledSGPRs += num;
+ }
+
+ void addToSpilledVGPRs(unsigned num) {
+ NumSpilledVGPRs += num;
+ }
+
unsigned getPSInputAddr() const {
return PSInputAddr;
}
@@ -297,10 +352,70 @@ public:
ReturnsVoid = Value;
}
+ /// \returns Number of reserved VGPRs for debugger usage.
+ unsigned getDebuggerReservedVGPRCount() const {
+ return DebuggerReservedVGPRCount;
+ }
+
+ /// \returns Stack object index for \p Dim's work group ID.
+ int getDebuggerWorkGroupIDStackObjectIndex(unsigned Dim) const {
+ assert(Dim < 3);
+ return DebuggerWorkGroupIDStackObjectIndices[Dim];
+ }
+
+ /// \brief Sets stack object index for \p Dim's work group ID to \p ObjectIdx.
+ void setDebuggerWorkGroupIDStackObjectIndex(unsigned Dim, int ObjectIdx) {
+ assert(Dim < 3);
+ DebuggerWorkGroupIDStackObjectIndices[Dim] = ObjectIdx;
+ }
+
+ /// \returns Stack object index for \p Dim's work item ID.
+ int getDebuggerWorkItemIDStackObjectIndex(unsigned Dim) const {
+ assert(Dim < 3);
+ return DebuggerWorkItemIDStackObjectIndices[Dim];
+ }
+
+ /// \brief Sets stack object index for \p Dim's work item ID to \p ObjectIdx.
+ void setDebuggerWorkItemIDStackObjectIndex(unsigned Dim, int ObjectIdx) {
+ assert(Dim < 3);
+ DebuggerWorkItemIDStackObjectIndices[Dim] = ObjectIdx;
+ }
+
+ /// \returns SGPR used for \p Dim's work group ID.
+ unsigned getWorkGroupIDSGPR(unsigned Dim) const {
+ switch (Dim) {
+ case 0:
+ assert(hasWorkGroupIDX());
+ return WorkGroupIDXSystemSGPR;
+ case 1:
+ assert(hasWorkGroupIDY());
+ return WorkGroupIDYSystemSGPR;
+ case 2:
+ assert(hasWorkGroupIDZ());
+ return WorkGroupIDZSystemSGPR;
+ }
+ llvm_unreachable("unexpected dimension");
+ }
+
+ /// \returns VGPR used for \p Dim' work item ID.
+ unsigned getWorkItemIDVGPR(unsigned Dim) const {
+ switch (Dim) {
+ case 0:
+ assert(hasWorkItemIDX());
+ return AMDGPU::VGPR0;
+ case 1:
+ assert(hasWorkItemIDY());
+ return AMDGPU::VGPR1;
+ case 2:
+ assert(hasWorkItemIDZ());
+ return AMDGPU::VGPR2;
+ }
+ llvm_unreachable("unexpected dimension");
+ }
+
unsigned getMaximumWorkGroupSize(const MachineFunction &MF) const;
};
} // End namespace llvm
-
#endif
diff --git a/lib/Target/AMDGPU/SIMachineScheduler.cpp b/lib/Target/AMDGPU/SIMachineScheduler.cpp
index 1cfa98430020..7125b411c603 100644
--- a/lib/Target/AMDGPU/SIMachineScheduler.cpp
+++ b/lib/Target/AMDGPU/SIMachineScheduler.cpp
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
+#include "AMDGPU.h"
#include "SIMachineScheduler.h"
-#include "AMDGPUSubtarget.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -295,7 +295,7 @@ static bool isDefBetween(unsigned Reg,
const MachineInstr* MI = &*UI;
if (MI->isDebugValue())
continue;
- SlotIndex InstSlot = LIS->getInstructionIndex(MI).getRegSlot();
+ SlotIndex InstSlot = LIS->getInstructionIndex(*MI).getRegSlot();
if (InstSlot >= First && InstSlot <= Last)
return true;
}
@@ -327,9 +327,9 @@ void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock,
BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
// Do not Track Physical Registers, because it messes up.
- for (unsigned Reg : RPTracker.getPressure().LiveInRegs) {
- if (TargetRegisterInfo::isVirtualRegister(Reg))
- LiveInRegs.insert(Reg);
+ for (const auto &RegMaskPair : RPTracker.getPressure().LiveInRegs) {
+ if (TargetRegisterInfo::isVirtualRegister(RegMaskPair.RegUnit))
+ LiveInRegs.insert(RegMaskPair.RegUnit);
}
LiveOutRegs.clear();
// There is several possibilities to distinguish:
@@ -354,11 +354,12 @@ void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock,
// The RPTracker's LiveOutRegs has 1, 3, (some correct or incorrect)4, 5, 7
// Comparing to LiveInRegs is not sufficient to differenciate 4 vs 5, 7
// The use of findDefBetween removes the case 4.
- for (unsigned Reg : RPTracker.getPressure().LiveOutRegs) {
+ for (const auto &RegMaskPair : RPTracker.getPressure().LiveOutRegs) {
+ unsigned Reg = RegMaskPair.RegUnit;
if (TargetRegisterInfo::isVirtualRegister(Reg) &&
- isDefBetween(Reg, LIS->getInstructionIndex(BeginBlock).getRegSlot(),
- LIS->getInstructionIndex(EndBlock).getRegSlot(),
- MRI, LIS)) {
+ isDefBetween(Reg, LIS->getInstructionIndex(*BeginBlock).getRegSlot(),
+ LIS->getInstructionIndex(*EndBlock).getRegSlot(), MRI,
+ LIS)) {
LiveOutRegs.insert(Reg);
}
}
@@ -463,6 +464,9 @@ void SIScheduleBlock::releaseSuccessors(SUnit *SU, bool InOrOutBlock) {
for (SDep& Succ : SU->Succs) {
SUnit *SuccSU = Succ.getSUnit();
+ if (SuccSU->NodeNum >= DAG->SUnits.size())
+ continue;
+
if (BC->isSUInBlock(SuccSU, ID) != InOrOutBlock)
continue;
@@ -521,12 +525,9 @@ void SIScheduleBlock::addPred(SIScheduleBlock *Pred) {
}
Preds.push_back(Pred);
-#ifndef NDEBUG
- for (SIScheduleBlock* S : Succs) {
- if (PredID == S->getID())
- assert(!"Loop in the Block Graph!\n");
- }
-#endif
+ assert(none_of(Succs,
+ [=](SIScheduleBlock *S) { return PredID == S->getID(); }) &&
+ "Loop in the Block Graph!");
}
void SIScheduleBlock::addSucc(SIScheduleBlock *Succ) {
@@ -540,12 +541,9 @@ void SIScheduleBlock::addSucc(SIScheduleBlock *Succ) {
if (Succ->isHighLatencyBlock())
++NumHighLatencySuccessors;
Succs.push_back(Succ);
-#ifndef NDEBUG
- for (SIScheduleBlock* P : Preds) {
- if (SuccID == P->getID())
- assert("Loop in the Block Graph!\n");
- }
-#endif
+ assert(none_of(Preds,
+ [=](SIScheduleBlock *P) { return SuccID == P->getID(); }) &&
+ "Loop in the Block Graph!");
}
#ifndef NDEBUG
@@ -712,8 +710,8 @@ void SIScheduleBlockCreator::colorComputeReservedDependencies() {
// Traverse TopDown, and give different colors to SUs depending
// on which combination of High Latencies they depend on.
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->TopDownIndex2SU[i]];
+ for (unsigned SUNum : DAG->TopDownIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
std::set<unsigned> SUColors;
// Already given.
@@ -754,8 +752,8 @@ void SIScheduleBlockCreator::colorComputeReservedDependencies() {
// Same as before, but BottomUp.
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
+ for (unsigned SUNum : DAG->BottomUpIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
std::set<unsigned> SUColors;
// Already given.
@@ -826,8 +824,8 @@ void SIScheduleBlockCreator::colorEndsAccordingToDependencies() {
unsigned DAGSize = DAG->SUnits.size();
std::vector<int> PendingColoring = CurrentColoring;
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
+ for (unsigned SUNum : DAG->BottomUpIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
std::set<unsigned> SUColors;
std::set<unsigned> SUColorsPending;
@@ -893,8 +891,8 @@ void SIScheduleBlockCreator::colorForceConsecutiveOrderInGroup() {
void SIScheduleBlockCreator::colorMergeConstantLoadsNextGroup() {
unsigned DAGSize = DAG->SUnits.size();
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
+ for (unsigned SUNum : DAG->BottomUpIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
std::set<unsigned> SUColors;
if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
@@ -919,8 +917,8 @@ void SIScheduleBlockCreator::colorMergeConstantLoadsNextGroup() {
void SIScheduleBlockCreator::colorMergeIfPossibleNextGroup() {
unsigned DAGSize = DAG->SUnits.size();
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
+ for (unsigned SUNum : DAG->BottomUpIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
std::set<unsigned> SUColors;
if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
@@ -940,8 +938,8 @@ void SIScheduleBlockCreator::colorMergeIfPossibleNextGroup() {
void SIScheduleBlockCreator::colorMergeIfPossibleNextGroupOnlyForReserved() {
unsigned DAGSize = DAG->SUnits.size();
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
+ for (unsigned SUNum : DAG->BottomUpIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
std::set<unsigned> SUColors;
if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
@@ -962,8 +960,8 @@ void SIScheduleBlockCreator::colorMergeIfPossibleSmallGroupsToNextGroup() {
unsigned DAGSize = DAG->SUnits.size();
std::map<unsigned, unsigned> ColorCount;
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
+ for (unsigned SUNum : DAG->BottomUpIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
unsigned color = CurrentColoring[SU->NodeNum];
std::map<unsigned, unsigned>::iterator Pos = ColorCount.find(color);
if (Pos != ColorCount.end()) {
@@ -973,8 +971,8 @@ void SIScheduleBlockCreator::colorMergeIfPossibleSmallGroupsToNextGroup() {
}
}
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
+ for (unsigned SUNum : DAG->BottomUpIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
unsigned color = CurrentColoring[SU->NodeNum];
std::set<unsigned> SUColors;
@@ -1006,8 +1004,8 @@ void SIScheduleBlockCreator::regroupNoUserInstructions() {
unsigned DAGSize = DAG->SUnits.size();
int GroupID = NextNonReservedID++;
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
+ for (unsigned SUNum : DAG->BottomUpIndex2SU) {
+ SUnit *SU = &DAG->SUnits[SUNum];
bool hasSuccessor = false;
if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
@@ -1223,7 +1221,7 @@ void SIScheduleBlockCreator::scheduleInsideBlocks() {
// is the most cpu intensive operation of the scheduler.
// It would gain a lot if there was a way to recompute the
// LiveIntervals for the entire scheduling region.
- DAG->getLIS()->handleMove(MI, /*UpdateFlags=*/true);
+ DAG->getLIS()->handleMove(*MI, /*UpdateFlags=*/true);
PosNew.push_back(CurrentTopFastSched);
}
}
@@ -1249,7 +1247,7 @@ void SIScheduleBlockCreator::scheduleInsideBlocks() {
DAG->getBB()->splice(POld, DAG->getBB(), PNew);
// Update LiveIntervals.
- DAG->getLIS()->handleMove(POld, /*UpdateFlags=*/true);
+ DAG->getLIS()->handleMove(*POld, /*UpdateFlags=*/true);
}
}
@@ -1675,70 +1673,10 @@ ScheduleDAGInstrs *llvm::createSIMachineScheduler(MachineSchedContext *C) {
// Does a topological sort over the SUs.
// Both TopDown and BottomUp
void SIScheduleDAGMI::topologicalSort() {
- std::vector<int> TopDownSU2Index;
- unsigned DAGSize = SUnits.size();
- std::vector<SUnit*> WorkList;
-
- DEBUG(dbgs() << "Topological Sort\n");
- WorkList.reserve(DAGSize);
-
- TopDownIndex2SU.resize(DAGSize);
- TopDownSU2Index.resize(DAGSize);
- BottomUpIndex2SU.resize(DAGSize);
-
- WorkList.push_back(&getExitSU());
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &SUnits[i];
- int NodeNum = SU->NodeNum;
- unsigned Degree = SU->Succs.size();
- TopDownSU2Index[NodeNum] = Degree;
- if (Degree == 0) {
- assert(SU->Succs.empty() && "SUnit should have no successors");
- WorkList.push_back(SU);
- }
- }
-
- int Id = DAGSize;
- while (!WorkList.empty()) {
- SUnit *SU = WorkList.back();
- WorkList.pop_back();
- if (SU->NodeNum < DAGSize) {
- TopDownSU2Index[SU->NodeNum] = --Id;
- TopDownIndex2SU[Id] = SU->NodeNum;
- }
- for (SDep& Pred : SU->Preds) {
- SUnit *SU = Pred.getSUnit();
- if (SU->NodeNum < DAGSize && !--TopDownSU2Index[SU->NodeNum])
- WorkList.push_back(SU);
- }
- }
-
- BottomUpIndex2SU = std::vector<int>(TopDownIndex2SU.rbegin(),
- TopDownIndex2SU.rend());
+ Topo.InitDAGTopologicalSorting();
-#ifndef NDEBUG
- // Check correctness of the ordering
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &SUnits[i];
- for (SDep& Pred : SU->Preds) {
- if (Pred.getSUnit()->NodeNum >= DAGSize)
- continue;
- assert(TopDownSU2Index[SU->NodeNum] >
- TopDownSU2Index[Pred.getSUnit()->NodeNum] &&
- "Wrong Top Down topological sorting");
- }
- }
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &SUnits[i];
- for (SDep& Succ : SU->Succs) {
- if (Succ.getSUnit()->NodeNum >= DAGSize)
- continue;
- assert(TopDownSU2Index[SU->NodeNum] <
- TopDownSU2Index[Succ.getSUnit()->NodeNum] &&
- "Wrong Bottom Up topological sorting");
- }
- }
-#endif
+ TopDownIndex2SU = std::vector<int>(Topo.begin(), Topo.end());
+ BottomUpIndex2SU = std::vector<int>(Topo.rbegin(), Topo.rend());
}
// Move low latencies further from their user without
@@ -1759,7 +1697,7 @@ void SIScheduleDAGMI::moveLowLatencies() {
for (SDep& PredDep : SU->Preds) {
SUnit *Pred = PredDep.getSUnit();
- if (SITII->isLowLatencyInstruction(Pred->getInstr())) {
+ if (SITII->isLowLatencyInstruction(*Pred->getInstr())) {
IsLowLatencyUser = true;
}
if (Pred->NodeNum >= DAGSize)
@@ -1769,7 +1707,7 @@ void SIScheduleDAGMI::moveLowLatencies() {
MinPos = PredPos + 1;
}
- if (SITII->isLowLatencyInstruction(SU->getInstr())) {
+ if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
unsigned BestPos = LastLowLatencyUser + 1;
if ((int)BestPos <= LastLowLatencyPos)
BestPos = LastLowLatencyPos + 1;
@@ -1794,7 +1732,7 @@ void SIScheduleDAGMI::moveLowLatencies() {
bool CopyForLowLat = false;
for (SDep& SuccDep : SU->Succs) {
SUnit *Succ = SuccDep.getSUnit();
- if (SITII->isLowLatencyInstruction(Succ->getInstr())) {
+ if (SITII->isLowLatencyInstruction(*Succ->getInstr())) {
CopyForLowLat = true;
}
}
@@ -1855,7 +1793,6 @@ void SIScheduleDAGMI::schedule()
SU.dumpAll(this)
);
- Topo.InitDAGTopologicalSorting();
topologicalSort();
findRootsAndBiasEdges(TopRoots, BotRoots);
// We reuse several ScheduleDAGMI and ScheduleDAGMILive
@@ -1878,20 +1815,21 @@ void SIScheduleDAGMI::schedule()
for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
SUnit *SU = &SUnits[i];
- unsigned BaseLatReg, OffLatReg;
- if (SITII->isLowLatencyInstruction(SU->getInstr())) {
+ unsigned BaseLatReg;
+ int64_t OffLatReg;
+ if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
IsLowLatencySU[i] = 1;
- if (SITII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseLatReg,
- OffLatReg, TRI))
+ if (SITII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseLatReg, OffLatReg,
+ TRI))
LowLatencyOffset[i] = OffLatReg;
- } else if (SITII->isHighLatencyInstruction(SU->getInstr()))
+ } else if (SITII->isHighLatencyInstruction(*SU->getInstr()))
IsHighLatencySU[i] = 1;
}
SIScheduler Scheduler(this);
Best = Scheduler.scheduleVariant(SISchedulerBlockCreatorVariant::LatenciesAlone,
SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage);
-#if 0 // To enable when handleMove fix lands
+
// if VGPR usage is extremely high, try other good performing variants
// which could lead to lower VGPR usage
if (Best.MaxVGPRUsage > 180) {
@@ -1930,7 +1868,7 @@ void SIScheduleDAGMI::schedule()
Best = Temp;
}
}
-#endif
+
ScheduledSUnits = Best.SUs;
ScheduledSUnitsInv.resize(SUnits.size());
diff --git a/lib/Target/AMDGPU/SIMachineScheduler.h b/lib/Target/AMDGPU/SIMachineScheduler.h
index b270136811c6..117aed497cc2 100644
--- a/lib/Target/AMDGPU/SIMachineScheduler.h
+++ b/lib/Target/AMDGPU/SIMachineScheduler.h
@@ -418,7 +418,7 @@ public:
SISchedulerBlockSchedulerVariant ScheduleVariant);
};
-class SIScheduleDAGMI : public ScheduleDAGMILive {
+class SIScheduleDAGMI final : public ScheduleDAGMILive {
const SIInstrInfo *SITII;
const SIRegisterInfo *SITRI;
@@ -441,7 +441,7 @@ public:
// To init Block's RPTracker.
void initRPTracker(RegPressureTracker &RPTracker) {
- RPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin);
+ RPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin, false, false);
}
MachineBasicBlock *getBB() { return BB; }
@@ -460,8 +460,10 @@ public:
unsigned &VgprUsage,
unsigned &SgprUsage);
std::set<unsigned> getInRegs() {
- std::set<unsigned> InRegs (RPTracker.getPressure().LiveInRegs.begin(),
- RPTracker.getPressure().LiveInRegs.end());
+ std::set<unsigned> InRegs;
+ for (const auto &RegMaskPair : RPTracker.getPressure().LiveInRegs) {
+ InRegs.insert(RegMaskPair.RegUnit);
+ }
return InRegs;
};
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 025ed2b5b76b..0dd88ee45c58 100644
--- a/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -15,6 +15,7 @@
#include "SIRegisterInfo.h"
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
+#include "AMDGPUSubtarget.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/RegisterScavenging.h"
@@ -23,7 +24,75 @@
using namespace llvm;
-SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {
+static unsigned getMaxWaveCountPerSIMD(const MachineFunction &MF) {
+ const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ unsigned SIMDPerCU = 4;
+
+ unsigned MaxInvocationsPerWave = SIMDPerCU * ST.getWavefrontSize();
+ return alignTo(MFI.getMaximumWorkGroupSize(MF), MaxInvocationsPerWave) /
+ MaxInvocationsPerWave;
+}
+
+static unsigned getMaxWorkGroupSGPRCount(const MachineFunction &MF) {
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ unsigned MaxWaveCountPerSIMD = getMaxWaveCountPerSIMD(MF);
+
+ unsigned TotalSGPRCountPerSIMD, AddressableSGPRCount, SGPRUsageAlignment;
+ unsigned ReservedSGPRCount;
+
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
+ TotalSGPRCountPerSIMD = 800;
+ AddressableSGPRCount = 102;
+ SGPRUsageAlignment = 16;
+ ReservedSGPRCount = 6; // VCC, FLAT_SCRATCH, XNACK
+ } else {
+ TotalSGPRCountPerSIMD = 512;
+ AddressableSGPRCount = 104;
+ SGPRUsageAlignment = 8;
+ ReservedSGPRCount = 2; // VCC
+ }
+
+ unsigned MaxSGPRCount = (TotalSGPRCountPerSIMD / MaxWaveCountPerSIMD);
+ MaxSGPRCount = alignDown(MaxSGPRCount, SGPRUsageAlignment);
+
+ if (ST.hasSGPRInitBug())
+ MaxSGPRCount = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
+
+ return std::min(MaxSGPRCount - ReservedSGPRCount, AddressableSGPRCount);
+}
+
+static unsigned getMaxWorkGroupVGPRCount(const MachineFunction &MF) {
+ unsigned MaxWaveCountPerSIMD = getMaxWaveCountPerSIMD(MF);
+ unsigned TotalVGPRCountPerSIMD = 256;
+ unsigned VGPRUsageAlignment = 4;
+
+ return alignDown(TotalVGPRCountPerSIMD / MaxWaveCountPerSIMD,
+ VGPRUsageAlignment);
+}
+
+static bool hasPressureSet(const int *PSets, unsigned PSetID) {
+ for (unsigned i = 0; PSets[i] != -1; ++i) {
+ if (PSets[i] == (int)PSetID)
+ return true;
+ }
+ return false;
+}
+
+void SIRegisterInfo::classifyPressureSet(unsigned PSetID, unsigned Reg,
+ BitVector &PressureSets) const {
+ for (MCRegUnitIterator U(Reg, this); U.isValid(); ++U) {
+ const int *PSets = getRegUnitPressureSets(*U);
+ if (hasPressureSet(PSets, PSetID)) {
+ PressureSets.set(PSetID);
+ break;
+ }
+ }
+}
+
+SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo(),
+ SGPRPressureSets(getNumRegPressureSets()),
+ VGPRPressureSets(getNumRegPressureSets()) {
unsigned NumRegPressureSets = getNumRegPressureSets();
SGPR32SetID = NumRegPressureSets;
@@ -33,6 +102,9 @@ SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {
SGPR32SetID = i;
else if (strncmp("VGPR_32", getRegPressureSetName(i), 7) == 0)
VGPR32SetID = i;
+
+ classifyPressureSet(i, AMDGPU::SGPR0, SGPRPressureSets);
+ classifyPressureSet(i, AMDGPU::VGPR0, VGPRPressureSets);
}
assert(SGPR32SetID < NumRegPressureSets &&
VGPR32SetID < NumRegPressureSets);
@@ -47,38 +119,27 @@ void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) co
unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
const MachineFunction &MF) const {
- const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
- if (ST.hasSGPRInitBug()) {
- // Leave space for flat_scr, xnack_mask, vcc, and alignment
- unsigned BaseIdx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 8 - 4;
- unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
- return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
- }
-
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
- // 96/97 need to be reserved for flat_scr, 98/99 for xnack_mask, and
- // 100/101 for vcc. This is the next sgpr128 down.
- return AMDGPU::SGPR92_SGPR93_SGPR94_SGPR95;
- }
-
- return AMDGPU::SGPR96_SGPR97_SGPR98_SGPR99;
+ unsigned BaseIdx = alignDown(getMaxWorkGroupSGPRCount(MF), 4) - 4;
+ unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
+ return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
}
unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
const MachineFunction &MF) const {
- const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
- if (ST.hasSGPRInitBug()) {
- unsigned Idx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 6 - 1;
- return AMDGPU::SGPR_32RegClass.getRegister(Idx);
- }
-
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
- // Next register before reservations for flat_scr, xnack_mask, vcc,
- // and scratch resource.
- return AMDGPU::SGPR91;
+ unsigned RegCount = getMaxWorkGroupSGPRCount(MF);
+ unsigned Reg;
+
+ // Try to place it in a hole after PrivateSegmentbufferReg.
+ if (RegCount & 3) {
+ // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
+ // alignment constraints, so we have a hole where can put the wave offset.
+ Reg = RegCount - 1;
+ } else {
+ // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
+ // wave offset before it.
+ Reg = RegCount - 5;
}
-
- return AMDGPU::SGPR95;
+ return AMDGPU::SGPR_32RegClass.getRegister(Reg);
}
BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
@@ -90,35 +151,30 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
reserveRegisterTuples(Reserved, AMDGPU::EXEC);
reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
- // Reserve the last 2 registers so we will always have at least 2 more that
- // will physically contain VCC.
- reserveRegisterTuples(Reserved, AMDGPU::SGPR102_SGPR103);
-
- const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
-
- if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
- // SI/CI have 104 SGPRs. VI has 102. We need to shift down the reservation
- // for VCC/XNACK_MASK/FLAT_SCR.
- //
- // TODO The SGPRs that alias to XNACK_MASK could be used as general purpose
- // SGPRs when the XNACK feature is not used. This is currently not done
- // because the code that counts SGPRs cannot account for such holes.
- reserveRegisterTuples(Reserved, AMDGPU::SGPR96_SGPR97);
- reserveRegisterTuples(Reserved, AMDGPU::SGPR98_SGPR99);
- reserveRegisterTuples(Reserved, AMDGPU::SGPR100_SGPR101);
+ // Reserve Trap Handler registers - support is not implemented in Codegen.
+ reserveRegisterTuples(Reserved, AMDGPU::TBA);
+ reserveRegisterTuples(Reserved, AMDGPU::TMA);
+ reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
+ reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
+ reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
+ reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
+ reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
+ reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
+
+ unsigned MaxWorkGroupSGPRCount = getMaxWorkGroupSGPRCount(MF);
+ unsigned MaxWorkGroupVGPRCount = getMaxWorkGroupVGPRCount(MF);
+
+ unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
+ unsigned NumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
+ for (unsigned i = MaxWorkGroupSGPRCount; i < NumSGPRs; ++i) {
+ unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
+ reserveRegisterTuples(Reserved, Reg);
}
- // Tonga and Iceland can only allocate a fixed number of SGPRs due
- // to a hw bug.
- if (ST.hasSGPRInitBug()) {
- unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
- // Reserve some SGPRs for FLAT_SCRATCH, XNACK_MASK, and VCC (6 SGPRs).
- unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 6;
- for (unsigned i = Limit; i < NumSGPRs; ++i) {
- unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
- reserveRegisterTuples(Reserved, Reg);
- }
+ for (unsigned i = MaxWorkGroupVGPRCount; i < NumVGPRs; ++i) {
+ unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
+ reserveRegisterTuples(Reserved, Reg);
}
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
@@ -138,48 +194,182 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
}
+ // Reserve registers for debugger usage if "amdgpu-debugger-reserve-trap-regs"
+ // attribute was specified.
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ if (ST.debuggerReserveRegs()) {
+ unsigned ReservedVGPRFirst =
+ MaxWorkGroupVGPRCount - MFI->getDebuggerReservedVGPRCount();
+ for (unsigned i = ReservedVGPRFirst; i < MaxWorkGroupVGPRCount; ++i) {
+ unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
+ reserveRegisterTuples(Reserved, Reg);
+ }
+ }
+
return Reserved;
}
unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
unsigned Idx) const {
- const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
+ const SISubtarget &STI = MF.getSubtarget<SISubtarget>();
// FIXME: We should adjust the max number of waves based on LDS size.
- unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
- STI.getMaxWavesPerCU());
+ unsigned SGPRLimit = getNumSGPRsAllowed(STI, STI.getMaxWavesPerCU());
unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
unsigned VSLimit = SGPRLimit + VGPRLimit;
- for (regclass_iterator I = regclass_begin(), E = regclass_end();
- I != E; ++I) {
- const TargetRegisterClass *RC = *I;
+ if (SGPRPressureSets.test(Idx) && VGPRPressureSets.test(Idx)) {
+ // FIXME: This is a hack. We should never be considering the pressure of
+ // these since no virtual register should ever have this class.
+ return VSLimit;
+ }
- unsigned NumSubRegs = std::max((int)RC->getSize() / 4, 1);
- unsigned Limit;
+ if (SGPRPressureSets.test(Idx))
+ return SGPRLimit;
- if (isPseudoRegClass(RC)) {
- // FIXME: This is a hack. We should never be considering the pressure of
- // these since no virtual register should ever have this class.
- Limit = VSLimit;
- } else if (isSGPRClass(RC)) {
- Limit = SGPRLimit / NumSubRegs;
- } else {
- Limit = VGPRLimit / NumSubRegs;
- }
+ return VGPRLimit;
+}
+
+bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
+ return Fn.getFrameInfo()->hasStackObjects();
+}
+
+bool
+SIRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
+ return MF.getFrameInfo()->hasStackObjects();
+}
+
+bool SIRegisterInfo::requiresVirtualBaseRegisters(
+ const MachineFunction &) const {
+ // There are no special dedicated stack or frame pointers.
+ return true;
+}
+
+bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ // This helps catch bugs as verifier errors.
+ return true;
+}
+
+int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
+ int Idx) const {
+ if (!SIInstrInfo::isMUBUF(*MI))
+ return 0;
+
+ assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::vaddr) &&
+ "Should never see frame index on non-address operand");
+
+ int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
+ AMDGPU::OpName::offset);
+ return MI->getOperand(OffIdx).getImm();
+}
+
+bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
+ return MI->mayLoadOrStore();
+}
- const int *Sets = getRegClassPressureSets(RC);
- assert(Sets);
- for (unsigned i = 0; Sets[i] != -1; ++i) {
- if (Sets[i] == (int)Idx)
- return Limit;
+void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
+ unsigned BaseReg,
+ int FrameIdx,
+ int64_t Offset) const {
+ MachineBasicBlock::iterator Ins = MBB->begin();
+ DebugLoc DL; // Defaults to "unknown"
+
+ if (Ins != MBB->end())
+ DL = Ins->getDebugLoc();
+
+ MachineFunction *MF = MBB->getParent();
+ const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = Subtarget.getInstrInfo();
+
+ if (Offset == 0) {
+ BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
+ .addFrameIndex(FrameIdx);
+ return;
+ }
+
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
+ .addImm(Offset);
+ BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_ADD_I32_e64), BaseReg)
+ .addReg(UnusedCarry, RegState::Define | RegState::Dead)
+ .addReg(OffsetReg, RegState::Kill)
+ .addFrameIndex(FrameIdx);
+}
+
+void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const {
+
+ MachineBasicBlock *MBB = MI.getParent();
+ MachineFunction *MF = MBB->getParent();
+ const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = Subtarget.getInstrInfo();
+
+#ifndef NDEBUG
+ // FIXME: Is it possible to be storing a frame index to itself?
+ bool SeenFI = false;
+ for (const MachineOperand &MO: MI.operands()) {
+ if (MO.isFI()) {
+ if (SeenFI)
+ llvm_unreachable("should not see multiple frame indices");
+
+ SeenFI = true;
}
}
- return 256;
+#endif
+
+ MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
+ assert(FIOp && FIOp->isFI() && "frame index must be address operand");
+
+ assert(TII->isMUBUF(MI));
+
+ MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
+ int64_t NewOffset = OffsetOp->getImm() + Offset;
+ if (isUInt<12>(NewOffset)) {
+ // If we have a legal offset, fold it directly into the instruction.
+ FIOp->ChangeToRegister(BaseReg, false);
+ OffsetOp->setImm(NewOffset);
+ return;
+ }
+
+ // The offset is not legal, so we must insert an add of the offset.
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ unsigned NewReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ DebugLoc DL = MI.getDebugLoc();
+
+ assert(Offset != 0 && "Non-zero offset expected");
+
+ unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+ // In the case the instruction already had an immediate offset, here only
+ // the requested new offset is added because we are leaving the original
+ // immediate in place.
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
+ .addImm(Offset);
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ADD_I32_e64), NewReg)
+ .addReg(UnusedCarry, RegState::Define | RegState::Dead)
+ .addReg(OffsetReg, RegState::Kill)
+ .addReg(BaseReg);
+
+ FIOp->ChangeToRegister(NewReg, false);
}
-bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
- return Fn.getFrameInfo()->hasStackObjects();
+bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
+ unsigned BaseReg,
+ int64_t Offset) const {
+ return SIInstrInfo::isMUBUF(*MI) && isUInt<12>(Offset);
+}
+
+const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
+ const MachineFunction &MF, unsigned Kind) const {
+ // This is inaccurate. It depends on the instruction and address space. The
+ // only place where we should hit this is for dealing with frame indexes /
+ // private accesses, so this is correct in that case.
+ return &AMDGPU::VGPR_32RegClass;
}
static unsigned getNumSubRegsForSpillOp(unsigned Op) {
@@ -219,32 +409,48 @@ static unsigned getNumSubRegsForSpillOp(unsigned Op) {
void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
unsigned LoadStoreOp,
- unsigned Value,
+ const MachineOperand *SrcDst,
unsigned ScratchRsrcReg,
unsigned ScratchOffset,
int64_t Offset,
RegScavenger *RS) const {
+ unsigned Value = SrcDst->getReg();
+ bool IsKill = SrcDst->isKill();
MachineBasicBlock *MBB = MI->getParent();
- const MachineFunction *MF = MI->getParent()->getParent();
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
- LLVMContext &Ctx = MF->getFunction()->getContext();
+ MachineFunction *MF = MI->getParent()->getParent();
+ const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = ST.getInstrInfo();
+
DebugLoc DL = MI->getDebugLoc();
- bool IsLoad = TII->get(LoadStoreOp).mayLoad();
+ bool IsStore = MI->mayStore();
bool RanOutOfSGPRs = false;
bool Scavenged = false;
unsigned SOffset = ScratchOffset;
+ unsigned OriginalImmOffset = Offset;
unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
unsigned Size = NumSubRegs * 4;
if (!isUInt<12>(Offset + Size)) {
- SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
+ SOffset = AMDGPU::NoRegister;
+
+ // We don't have access to the register scavenger if this function is called
+ // during PEI::scavengeFrameVirtualRegs().
+ if (RS)
+ SOffset = RS->FindUnusedReg(&AMDGPU::SGPR_32RegClass);
+
if (SOffset == AMDGPU::NoRegister) {
+ // There are no free SGPRs, and since we are in the process of spilling
+ // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
+ // on SI/CI and on VI it is true until we implement spilling using scalar
+ // stores), we have no way to free up an SGPR. Our solution here is to
+ // add the offset directly to the ScratchOffset register, and then
+ // subtract the offset after the spill to return ScratchOffset to it's
+ // original value.
RanOutOfSGPRs = true;
- SOffset = AMDGPU::SGPR0;
+ SOffset = ScratchOffset;
} else {
Scavenged = true;
}
@@ -254,40 +460,48 @@ void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
Offset = 0;
}
- if (RanOutOfSGPRs)
- Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
-
for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
unsigned SubReg = NumSubRegs > 1 ?
getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
Value;
unsigned SOffsetRegState = 0;
- if (i + 1 == e && Scavenged)
- SOffsetRegState |= RegState::Kill;
+ unsigned SrcDstRegState = getDefRegState(!IsStore);
+ if (i + 1 == e) {
+ SOffsetRegState |= getKillRegState(Scavenged);
+ // The last implicit use carries the "Kill" flag.
+ SrcDstRegState |= getKillRegState(IsKill);
+ }
BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
- .addReg(SubReg, getDefRegState(IsLoad))
+ .addReg(SubReg, getDefRegState(!IsStore))
.addReg(ScratchRsrcReg)
.addReg(SOffset, SOffsetRegState)
.addImm(Offset)
.addImm(0) // glc
.addImm(0) // slc
.addImm(0) // tfe
- .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
+ .addReg(Value, RegState::Implicit | SrcDstRegState)
.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
}
+ if (RanOutOfSGPRs) {
+ // Subtract the offset we added to the ScratchOffset register.
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffset)
+ .addReg(ScratchOffset)
+ .addImm(OriginalImmOffset);
+ }
}
void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
MachineFunction *MF = MI->getParent()->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
MachineBasicBlock *MBB = MI->getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo *FrameInfo = MF->getFrameInfo();
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
+ const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = ST.getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
MachineOperand &FIOp = MI->getOperand(FIOperandNum);
@@ -301,24 +515,65 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
case AMDGPU::SI_SPILL_S64_SAVE:
case AMDGPU::SI_SPILL_S32_SAVE: {
unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
+ unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ unsigned SuperReg = MI->getOperand(0).getReg();
+ bool IsKill = MI->getOperand(0).isKill();
+ // SubReg carries the "Kill" flag when SubReg == SuperReg.
+ unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill);
for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
- unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
+ unsigned SubReg = getPhysRegSubReg(SuperReg,
&AMDGPU::SGPR_32RegClass, i);
+
struct SIMachineFunctionInfo::SpilledReg Spill =
MFI->getSpilledReg(MF, Index, i);
- BuildMI(*MBB, MI, DL,
- TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
- Spill.VGPR)
- .addReg(SubReg)
- .addImm(Spill.Lane);
-
- // FIXME: Since this spills to another register instead of an actual
- // frame index, we should delete the frame index when all references to
- // it are fixed.
+ if (Spill.hasReg()) {
+ BuildMI(*MBB, MI, DL,
+ TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
+ Spill.VGPR)
+ .addReg(SubReg, getKillRegState(IsKill))
+ .addImm(Spill.Lane);
+
+ // FIXME: Since this spills to another register instead of an actual
+ // frame index, we should delete the frame index when all references to
+ // it are fixed.
+ } else {
+ // Spill SGPR to a frame index.
+ // FIXME we should use S_STORE_DWORD here for VI.
+ MachineInstrBuilder Mov
+ = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
+ .addReg(SubReg, SubKillState);
+
+
+ // There could be undef components of a spilled super register.
+ // TODO: Can we detect this and skip the spill?
+ if (NumSubRegs > 1) {
+ // The last implicit use of the SuperReg carries the "Kill" flag.
+ unsigned SuperKillState = 0;
+ if (i + 1 == e)
+ SuperKillState |= getKillRegState(IsKill);
+ Mov.addReg(SuperReg, RegState::Implicit | SuperKillState);
+ }
+
+ unsigned Size = FrameInfo->getObjectSize(Index);
+ unsigned Align = FrameInfo->getObjectAlignment(Index);
+ MachinePointerInfo PtrInfo
+ = MachinePointerInfo::getFixedStack(*MF, Index);
+ MachineMemOperand *MMO
+ = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
+ Size, Align);
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE))
+ .addReg(TmpReg, RegState::Kill) // src
+ .addFrameIndex(Index) // frame_idx
+ .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
+ .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
+ .addImm(i * 4) // offset
+ .addMemOperand(MMO);
+ }
}
MI->eraseFromParent();
+ MFI->addToSpilledSGPRs(NumSubRegs);
break;
}
@@ -329,6 +584,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
case AMDGPU::SI_SPILL_S64_RESTORE:
case AMDGPU::SI_SPILL_S32_RESTORE: {
unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
+ unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
@@ -336,28 +592,37 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
struct SIMachineFunctionInfo::SpilledReg Spill =
MFI->getSpilledReg(MF, Index, i);
- BuildMI(*MBB, MI, DL,
- TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
- SubReg)
- .addReg(Spill.VGPR)
- .addImm(Spill.Lane)
- .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
- }
-
- // TODO: only do this when it is needed
- switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
- case AMDGPUSubtarget::SOUTHERN_ISLANDS:
- // "VALU writes SGPR" -> "SMRD reads that SGPR" needs 4 wait states
- // ("S_NOP 3") on SI
- TII->insertWaitStates(MI, 4);
- break;
- case AMDGPUSubtarget::SEA_ISLANDS:
- break;
- default: // VOLCANIC_ISLANDS and later
- // "VALU writes SGPR -> VMEM reads that SGPR" needs 5 wait states
- // ("S_NOP 4") on VI and later. This also applies to VALUs which write
- // VCC, but we're unlikely to see VMEM use VCC.
- TII->insertWaitStates(MI, 5);
+ if (Spill.hasReg()) {
+ BuildMI(*MBB, MI, DL,
+ TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
+ SubReg)
+ .addReg(Spill.VGPR)
+ .addImm(Spill.Lane)
+ .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
+ } else {
+ // Restore SGPR from a stack slot.
+ // FIXME: We should use S_LOAD_DWORD here for VI.
+
+ unsigned Align = FrameInfo->getObjectAlignment(Index);
+ unsigned Size = FrameInfo->getObjectSize(Index);
+
+ MachinePointerInfo PtrInfo
+ = MachinePointerInfo::getFixedStack(*MF, Index);
+
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ PtrInfo, MachineMemOperand::MOLoad, Size, Align);
+
+ BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg)
+ .addFrameIndex(Index) // frame_idx
+ .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
+ .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
+ .addImm(i * 4) // offset
+ .addMemOperand(MMO);
+ BuildMI(*MBB, MI, DL,
+ TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
+ .addReg(TmpReg, RegState::Kill)
+ .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
+ }
}
MI->eraseFromParent();
@@ -372,11 +637,13 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
case AMDGPU::SI_SPILL_V64_SAVE:
case AMDGPU::SI_SPILL_V32_SAVE:
buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
- TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::src),
TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
- FrameInfo->getObjectOffset(Index), RS);
+ FrameInfo->getObjectOffset(Index) +
+ TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS);
MI->eraseFromParent();
+ MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
break;
case AMDGPU::SI_SPILL_V32_RESTORE:
case AMDGPU::SI_SPILL_V64_RESTORE:
@@ -385,10 +652,11 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
case AMDGPU::SI_SPILL_V256_RESTORE:
case AMDGPU::SI_SPILL_V512_RESTORE: {
buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
- TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
+ TII->getNamedOperand(*MI, AMDGPU::OpName::dst),
TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
- FrameInfo->getObjectOffset(Index), RS);
+ FrameInfo->getObjectOffset(Index) +
+ TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS);
MI->eraseFromParent();
break;
}
@@ -396,8 +664,8 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
default: {
int64_t Offset = FrameInfo->getObjectOffset(Index);
FIOp.ChangeToImmediate(Offset);
- if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
- unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
+ if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
+ unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(*MBB, MI, MI->getDebugLoc(),
TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
.addImm(Offset);
@@ -407,10 +675,6 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
}
}
-unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
- return getEncodingValue(Reg) & 0xff;
-}
-
// FIXME: This is very slow. It might be worth creating a map from physreg to
// register class.
const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
@@ -427,7 +691,8 @@ const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
&AMDGPU::VReg_256RegClass,
&AMDGPU::SReg_256RegClass,
&AMDGPU::VReg_512RegClass,
- &AMDGPU::SReg_512RegClass
+ &AMDGPU::SReg_512RegClass,
+ &AMDGPU::SCC_CLASSRegClass,
};
for (const TargetRegisterClass *BaseClass : BaseClasses) {
@@ -442,6 +707,8 @@ const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
// TargetRegisterClass to mark which classes are VGPRs to make this trivial.
bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
switch (RC->getSize()) {
+ case 0: return false;
+ case 1: return false;
case 4:
return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
case 8:
@@ -479,6 +746,24 @@ const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
}
}
+const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
+ const TargetRegisterClass *VRC) const {
+ switch (VRC->getSize()) {
+ case 4:
+ return &AMDGPU::SGPR_32RegClass;
+ case 8:
+ return &AMDGPU::SReg_64RegClass;
+ case 16:
+ return &AMDGPU::SReg_128RegClass;
+ case 32:
+ return &AMDGPU::SReg_256RegClass;
+ case 64:
+ return &AMDGPU::SReg_512RegClass;
+ default:
+ llvm_unreachable("Invalid register class size");
+ }
+}
+
const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
const TargetRegisterClass *RC, unsigned SubIdx) const {
if (SubIdx == AMDGPU::NoSubRegister)
@@ -552,7 +837,21 @@ unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
switch(Channel) {
case 0: return AMDGPU::VCC_LO;
case 1: return AMDGPU::VCC_HI;
- default: llvm_unreachable("Invalid SubIdx for VCC");
+ default: llvm_unreachable("Invalid SubIdx for VCC"); break;
+ }
+
+ case AMDGPU::TBA:
+ switch(Channel) {
+ case 0: return AMDGPU::TBA_LO;
+ case 1: return AMDGPU::TBA_HI;
+ default: llvm_unreachable("Invalid SubIdx for TBA"); break;
+ }
+
+ case AMDGPU::TMA:
+ switch(Channel) {
+ case 0: return AMDGPU::TMA_LO;
+ case 1: return AMDGPU::TMA_HI;
+ default: llvm_unreachable("Invalid SubIdx for TMA"); break;
}
case AMDGPU::FLAT_SCR:
@@ -610,7 +909,7 @@ unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
enum PreloadedValue Value) const {
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
- const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
(void)ST;
switch (Value) {
case SIRegisterInfo::WORKGROUP_ID_X:
@@ -631,11 +930,17 @@ unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
case SIRegisterInfo::KERNARG_SEGMENT_PTR:
assert(MFI->hasKernargSegmentPtr());
return MFI->KernargSegmentPtrUserSGPR;
+ case SIRegisterInfo::DISPATCH_ID:
+ llvm_unreachable("unimplemented");
+ case SIRegisterInfo::FLAT_SCRATCH_INIT:
+ assert(MFI->hasFlatScratchInit());
+ return MFI->FlatScratchInitUserSGPR;
case SIRegisterInfo::DISPATCH_PTR:
assert(MFI->hasDispatchPtr());
return MFI->DispatchPtrUserSGPR;
case SIRegisterInfo::QUEUE_PTR:
- llvm_unreachable("not implemented");
+ assert(MFI->hasQueuePtr());
+ return MFI->QueuePtrUserSGPR;
case SIRegisterInfo::WORKITEM_ID_X:
assert(MFI->hasWorkItemIDX());
return AMDGPU::VGPR0;
@@ -675,9 +980,9 @@ unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
}
}
-unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
+unsigned SIRegisterInfo::getNumSGPRsAllowed(const SISubtarget &ST,
unsigned WaveCount) const {
- if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
switch (WaveCount) {
case 10: return 80;
case 9: return 80;
@@ -696,3 +1001,14 @@ unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
}
}
}
+
+bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
+ unsigned Reg) const {
+ const TargetRegisterClass *RC;
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ RC = MRI.getRegClass(Reg);
+ else
+ RC = getPhysRegClass(Reg);
+
+ return hasVGPRs(RC);
+}
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.h b/lib/Target/AMDGPU/SIRegisterInfo.h
index 9410e2049cba..6e97b1b910a9 100644
--- a/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -12,23 +12,27 @@
//
//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_SIREGISTERINFO_H
-#define LLVM_LIB_TARGET_R600_SIREGISTERINFO_H
+#ifndef LLVM_LIB_TARGET_AMDGPU_SIREGISTERINFO_H
+#define LLVM_LIB_TARGET_AMDGPU_SIREGISTERINFO_H
#include "AMDGPURegisterInfo.h"
-#include "AMDGPUSubtarget.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
namespace llvm {
-struct SIRegisterInfo : public AMDGPURegisterInfo {
+class SISubtarget;
+class MachineRegisterInfo;
+
+struct SIRegisterInfo final : public AMDGPURegisterInfo {
private:
unsigned SGPR32SetID;
unsigned VGPR32SetID;
+ BitVector SGPRPressureSets;
+ BitVector VGPRPressureSets;
void reserveRegisterTuples(BitVector &, unsigned Reg) const;
+ void classifyPressureSet(unsigned PSetID, unsigned Reg,
+ BitVector &PressureSets) const;
public:
SIRegisterInfo();
@@ -47,13 +51,39 @@ public:
unsigned getRegPressureSetLimit(const MachineFunction &MF,
unsigned Idx) const override;
+
bool requiresRegisterScavenging(const MachineFunction &Fn) const override;
+
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override;
+ bool requiresVirtualBaseRegisters(const MachineFunction &Fn) const override;
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
+
+ int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+ int Idx) const override;
+
+ bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
+
+ void materializeFrameBaseRegister(MachineBasicBlock *MBB,
+ unsigned BaseReg, int FrameIdx,
+ int64_t Offset) const override;
+
+ void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const override;
+
+ bool isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
+ int64_t Offset) const override;
+
+ const TargetRegisterClass *getPointerRegClass(
+ const MachineFunction &MF, unsigned Kind = 0) const override;
+
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
unsigned FIOperandNum,
RegScavenger *RS) const override;
- unsigned getHWRegIndex(unsigned Reg) const override;
+ unsigned getHWRegIndex(unsigned Reg) const {
+ return getEncodingValue(Reg) & 0xff;
+ }
/// \brief Return the 'base' register class for this register.
/// e.g. SGPR0 => SReg_32, VGPR => VGPR_32 SGPR0_SGPR1 -> SReg_32, etc.
@@ -70,9 +100,12 @@ public:
}
bool isSGPRReg(const MachineRegisterInfo &MRI, unsigned Reg) const {
+ const TargetRegisterClass *RC;
if (TargetRegisterInfo::isVirtualRegister(Reg))
- return isSGPRClass(MRI.getRegClass(Reg));
- return getPhysRegClass(Reg);
+ RC = MRI.getRegClass(Reg);
+ else
+ RC = getPhysRegClass(Reg);
+ return isSGPRClass(RC);
}
/// \returns true if this class contains VGPR registers.
@@ -89,6 +122,10 @@ public:
const TargetRegisterClass *getEquivalentVGPRClass(
const TargetRegisterClass *SRC) const;
+ /// \returns A SGPR reg class with the same width as \p SRC
+ const TargetRegisterClass *getEquivalentSGPRClass(
+ const TargetRegisterClass *VRC) const;
+
/// \returns The register class that is used for a sub-register of \p RC for
/// the given \p SubIdx. If \p SubIdx equals NoSubRegister, \p RC will
/// be returned.
@@ -117,10 +154,12 @@ public:
enum PreloadedValue {
// SGPRS:
- PRIVATE_SEGMENT_BUFFER = 0,
+ PRIVATE_SEGMENT_BUFFER = 0,
DISPATCH_PTR = 1,
QUEUE_PTR = 2,
KERNARG_SEGMENT_PTR = 3,
+ DISPATCH_ID = 4,
+ FLAT_SCRATCH_INIT = 5,
WORKGROUP_ID_X = 10,
WORKGROUP_ID_Y = 11,
WORKGROUP_ID_Z = 12,
@@ -143,8 +182,7 @@ public:
/// \brief Give the maximum number of SGPRs that can be used by \p WaveCount
/// concurrent waves.
- unsigned getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
- unsigned WaveCount) const;
+ unsigned getNumSGPRsAllowed(const SISubtarget &ST, unsigned WaveCount) const;
unsigned findUnusedRegister(const MachineRegisterInfo &MRI,
const TargetRegisterClass *RC) const;
@@ -152,11 +190,14 @@ public:
unsigned getSGPR32PressureSet() const { return SGPR32SetID; };
unsigned getVGPR32PressureSet() const { return VGPR32SetID; };
+ bool isVGPR(const MachineRegisterInfo &MRI, unsigned Reg) const;
+
private:
void buildScratchLoadStore(MachineBasicBlock::iterator MI,
- unsigned LoadStoreOp, unsigned Value,
+ unsigned LoadStoreOp, const MachineOperand *SrcDst,
unsigned ScratchRsrcReg, unsigned ScratchOffset,
- int64_t Offset, RegScavenger *RS) const;
+ int64_t Offset,
+ RegScavenger *RS) const;
};
} // End namespace llvm
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.td b/lib/Target/AMDGPU/SIRegisterInfo.td
index bfaf93709d8c..c427874d467a 100644
--- a/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -44,6 +44,40 @@ def EXEC : RegisterWithSubRegs<"EXEC", [EXEC_LO, EXEC_HI]>,
def SCC : SIReg<"scc", 253>;
def M0 : SIReg <"m0", 124>;
+// Trap handler registers
+def TBA_LO : SIReg<"tba_lo", 108>;
+def TBA_HI : SIReg<"tba_hi", 109>;
+
+def TBA : RegisterWithSubRegs<"tba", [TBA_LO, TBA_HI]>,
+ DwarfRegAlias<TBA_LO> {
+ let Namespace = "AMDGPU";
+ let SubRegIndices = [sub0, sub1];
+ let HWEncoding = 108;
+}
+
+def TMA_LO : SIReg<"tma_lo", 110>;
+def TMA_HI : SIReg<"tma_hi", 111>;
+
+def TMA : RegisterWithSubRegs<"tma", [TMA_LO, TMA_HI]>,
+ DwarfRegAlias<TMA_LO> {
+ let Namespace = "AMDGPU";
+ let SubRegIndices = [sub0, sub1];
+ let HWEncoding = 110;
+}
+
+def TTMP0 : SIReg <"ttmp0", 112>;
+def TTMP1 : SIReg <"ttmp1", 113>;
+def TTMP2 : SIReg <"ttmp2", 114>;
+def TTMP3 : SIReg <"ttmp3", 115>;
+def TTMP4 : SIReg <"ttmp4", 116>;
+def TTMP5 : SIReg <"ttmp5", 117>;
+def TTMP6 : SIReg <"ttmp6", 118>;
+def TTMP7 : SIReg <"ttmp7", 119>;
+def TTMP8 : SIReg <"ttmp8", 120>;
+def TTMP9 : SIReg <"ttmp9", 121>;
+def TTMP10 : SIReg <"ttmp10", 122>;
+def TTMP11 : SIReg <"ttmp11", 123>;
+
multiclass FLAT_SCR_LOHI_m <string n, bits<16> ci_e, bits<16> vi_e> {
def _ci : SIReg<n, ci_e>;
def _vi : SIReg<n, vi_e>;
@@ -81,11 +115,18 @@ foreach Index = 0-255 in {
// Groupings using register classes and tuples
//===----------------------------------------------------------------------===//
+def SCC_CLASS : RegisterClass<"AMDGPU", [i1], 1, (add SCC)> {
+ let CopyCost = -1;
+ let isAllocatable = 0;
+}
+
// TODO: Do we need to set DwarfRegAlias on register tuples?
// SGPR 32-bit registers
def SGPR_32 : RegisterClass<"AMDGPU", [i32, f32], 32,
- (add (sequence "SGPR%u", 0, 103))>;
+ (add (sequence "SGPR%u", 0, 103))> {
+ let AllocationPriority = 1;
+}
// SGPR 64-bit registers
def SGPR_64Regs : RegisterTuples<[sub0, sub1],
@@ -93,7 +134,7 @@ def SGPR_64Regs : RegisterTuples<[sub0, sub1],
(add (decimate (shl SGPR_32, 1), 2))]>;
// SGPR 128-bit registers
-def SGPR_128 : RegisterTuples<[sub0, sub1, sub2, sub3],
+def SGPR_128Regs : RegisterTuples<[sub0, sub1, sub2, sub3],
[(add (decimate SGPR_32, 4)),
(add (decimate (shl SGPR_32, 1), 4)),
(add (decimate (shl SGPR_32, 2), 4)),
@@ -130,9 +171,29 @@ def SGPR_512 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7,
(add (decimate (shl SGPR_32, 14), 4)),
(add (decimate (shl SGPR_32, 15), 4))]>;
+// Trap handler TMP 32-bit registers
+def TTMP_32 : RegisterClass<"AMDGPU", [i32, f32], 32,
+ (add (sequence "TTMP%u", 0, 11))> {
+ let isAllocatable = 0;
+}
+
+// Trap handler TMP 64-bit registers
+def TTMP_64Regs : RegisterTuples<[sub0, sub1],
+ [(add (decimate TTMP_32, 2)),
+ (add (decimate (shl TTMP_32, 1), 2))]>;
+
+// Trap handler TMP 128-bit registers
+def TTMP_128Regs : RegisterTuples<[sub0, sub1, sub2, sub3],
+ [(add (decimate TTMP_32, 4)),
+ (add (decimate (shl TTMP_32, 1), 4)),
+ (add (decimate (shl TTMP_32, 2), 4)),
+ (add (decimate (shl TTMP_32, 3), 4))]>;
+
// VGPR 32-bit registers
def VGPR_32 : RegisterClass<"AMDGPU", [i32, f32], 32,
- (add (sequence "VGPR%u", 0, 255))>;
+ (add (sequence "VGPR%u", 0, 255))> {
+ let AllocationPriority = 1;
+}
// VGPR 64-bit registers
def VGPR_64 : RegisterTuples<[sub0, sub1],
@@ -192,36 +253,67 @@ class RegImmMatcher<string name> : AsmOperandClass {
let RenderMethod = "addRegOrImmOperands";
}
+// Subset of SReg_32 without M0 for SMRD instructions and alike.
+// See comments in SIInstructions.td for more info.
+def SReg_32_XM0 : RegisterClass<"AMDGPU", [i32, f32], 32,
+ (add SGPR_32, VCC_LO, VCC_HI, EXEC_LO, EXEC_HI, FLAT_SCR_LO, FLAT_SCR_HI,
+ TTMP_32, TMA_LO, TMA_HI, TBA_LO, TBA_HI)> {
+ let AllocationPriority = 1;
+}
+
// Register class for all scalar registers (SGPRs + Special Registers)
def SReg_32 : RegisterClass<"AMDGPU", [i32, f32], 32,
- (add SGPR_32, M0, VCC_LO, VCC_HI, EXEC_LO, EXEC_HI, FLAT_SCR_LO, FLAT_SCR_HI)
->;
+ (add SReg_32_XM0, M0)> {
+ let AllocationPriority = 1;
+}
+
+def SGPR_64 : RegisterClass<"AMDGPU", [v2i32, i64, f64], 32, (add SGPR_64Regs)> {
+ let AllocationPriority = 2;
+}
-def SGPR_64 : RegisterClass<"AMDGPU", [v2i32, i64, f64], 32, (add SGPR_64Regs)>;
+def TTMP_64 : RegisterClass<"AMDGPU", [v2i32, i64, f64], 32, (add TTMP_64Regs)> {
+ let isAllocatable = 0;
+}
def SReg_64 : RegisterClass<"AMDGPU", [v2i32, i64, f64, i1], 32,
- (add SGPR_64, VCC, EXEC, FLAT_SCR)
->;
+ (add SGPR_64, VCC, EXEC, FLAT_SCR, TTMP_64, TBA, TMA)> {
+ let AllocationPriority = 2;
+}
-def SReg_128 : RegisterClass<"AMDGPU", [v4i32, v16i8, v2i64], 32, (add SGPR_128)> {
- // Requires 2 s_mov_b64 to copy
- let CopyCost = 2;
+// Requires 2 s_mov_b64 to copy
+let CopyCost = 2 in {
+
+def SGPR_128 : RegisterClass<"AMDGPU", [v4i32, v16i8, v2i64], 32, (add SGPR_128Regs)> {
+ let AllocationPriority = 4;
+}
+
+def TTMP_128 : RegisterClass<"AMDGPU", [v4i32, v16i8, v2i64], 32, (add TTMP_128Regs)> {
+ let isAllocatable = 0;
+}
+
+def SReg_128 : RegisterClass<"AMDGPU", [v4i32, v16i8, v2i64], 32, (add SGPR_128, TTMP_128)> {
+ let AllocationPriority = 4;
}
-def SReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 32, (add SGPR_256)> {
+} // End CopyCost = 2
+
+def SReg_256 : RegisterClass<"AMDGPU", [v8i32, v8f32], 32, (add SGPR_256)> {
// Requires 4 s_mov_b64 to copy
let CopyCost = 4;
+ let AllocationPriority = 5;
}
def SReg_512 : RegisterClass<"AMDGPU", [v64i8, v16i32], 32, (add SGPR_512)> {
// Requires 8 s_mov_b64 to copy
let CopyCost = 8;
+ let AllocationPriority = 6;
}
// Register class for all vector registers (VGPRs + Interploation Registers)
def VReg_64 : RegisterClass<"AMDGPU", [i64, f64, v2i32, v2f32], 32, (add VGPR_64)> {
// Requires 2 v_mov_b32 to copy
let CopyCost = 2;
+ let AllocationPriority = 2;
}
def VReg_96 : RegisterClass<"AMDGPU", [untyped], 32, (add VGPR_96)> {
@@ -229,19 +321,23 @@ def VReg_96 : RegisterClass<"AMDGPU", [untyped], 32, (add VGPR_96)> {
// Requires 3 v_mov_b32 to copy
let CopyCost = 3;
+ let AllocationPriority = 3;
}
def VReg_128 : RegisterClass<"AMDGPU", [v4i32, v4f32, v2i64, v2f64], 32, (add VGPR_128)> {
// Requires 4 v_mov_b32 to copy
let CopyCost = 4;
+ let AllocationPriority = 4;
}
-def VReg_256 : RegisterClass<"AMDGPU", [v32i8, v8i32, v8f32], 32, (add VGPR_256)> {
+def VReg_256 : RegisterClass<"AMDGPU", [v8i32, v8f32], 32, (add VGPR_256)> {
let CopyCost = 8;
+ let AllocationPriority = 5;
}
def VReg_512 : RegisterClass<"AMDGPU", [v16i32, v16f32], 32, (add VGPR_512)> {
let CopyCost = 16;
+ let AllocationPriority = 6;
}
def VReg_1 : RegisterClass<"AMDGPU", [i1], 32, (add VGPR_32)> {
diff --git a/lib/Target/AMDGPU/SISchedule.td b/lib/Target/AMDGPU/SISchedule.td
index cd77e519abb2..ed19217226b8 100644
--- a/lib/Target/AMDGPU/SISchedule.td
+++ b/lib/Target/AMDGPU/SISchedule.td
@@ -11,6 +11,12 @@
//
//===----------------------------------------------------------------------===//
+def : PredicateProlog<[{
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo*>(SchedModel->getInstrInfo());
+ (void)TII;
+}]>;
+
def WriteBranch : SchedWrite;
def WriteExport : SchedWrite;
def WriteLDS : SchedWrite;
@@ -39,20 +45,33 @@ def Write64Bit : SchedWrite;
// instructions and have VALU rates, but write to the SALU (i.e. VOPC
// instructions)
-def SIFullSpeedModel : SchedMachineModel;
-def SIQuarterSpeedModel : SchedMachineModel;
+class SISchedMachineModel : SchedMachineModel {
+ let CompleteModel = 0;
+ let IssueWidth = 1;
+ let PostRAScheduler = 1;
+}
-// BufferSize = 0 means the processors are in-order.
-let BufferSize = 0 in {
+def SIFullSpeedModel : SISchedMachineModel;
+def SIQuarterSpeedModel : SISchedMachineModel;
// XXX: Are the resource counts correct?
-def HWBranch : ProcResource<1>;
-def HWExport : ProcResource<7>; // Taken from S_WAITCNT
-def HWLGKM : ProcResource<31>; // Taken from S_WAITCNT
-def HWSALU : ProcResource<1>;
-def HWVMEM : ProcResource<15>; // Taken from S_WAITCNT
-def HWVALU : ProcResource<1>;
-
+def HWBranch : ProcResource<1> {
+ let BufferSize = 1;
+}
+def HWExport : ProcResource<1> {
+ let BufferSize = 7; // Taken from S_WAITCNT
+}
+def HWLGKM : ProcResource<1> {
+ let BufferSize = 31; // Taken from S_WAITCNT
+}
+def HWSALU : ProcResource<1> {
+ let BufferSize = 1;
+}
+def HWVMEM : ProcResource<1> {
+ let BufferSize = 15; // Taken from S_WAITCNT
+}
+def HWVALU : ProcResource<1> {
+ let BufferSize = 1;
}
class HWWriteRes<SchedWrite write, list<ProcResourceKind> resources,
@@ -70,12 +89,12 @@ class HWVALUWriteRes<SchedWrite write, int latency> :
// The latency values are 1 / (operations / cycle) / 4.
multiclass SICommonWriteRes {
- def : HWWriteRes<WriteBranch, [HWBranch], 100>; // XXX: Guessed ???
- def : HWWriteRes<WriteExport, [HWExport], 100>; // XXX: Guessed ???
- def : HWWriteRes<WriteLDS, [HWLGKM], 32>; // 2 - 64
- def : HWWriteRes<WriteSALU, [HWSALU], 1>;
- def : HWWriteRes<WriteSMEM, [HWLGKM], 10>; // XXX: Guessed ???
- def : HWWriteRes<WriteVMEM, [HWVMEM], 450>; // 300 - 600
+ def : HWWriteRes<WriteBranch, [HWBranch], 8>;
+ def : HWWriteRes<WriteExport, [HWExport], 4>;
+ def : HWWriteRes<WriteLDS, [HWLGKM], 5>; // Can be between 2 and 64
+ def : HWWriteRes<WriteSALU, [HWSALU], 1>;
+ def : HWWriteRes<WriteSMEM, [HWLGKM], 5>;
+ def : HWWriteRes<WriteVMEM, [HWVMEM], 80>;
def : HWWriteRes<WriteBarrier, [HWBranch], 500>; // XXX: Guessed ???
def : HWVALUWriteRes<Write32Bit, 1>;
@@ -83,6 +102,12 @@ multiclass SICommonWriteRes {
def : HWVALUWriteRes<WriteQuarterRate32, 4>;
}
+def PredIsVGPR32Copy : SchedPredicate<[{TII->isVGPRCopy(*MI) && TII->getOpSize(*MI, 0) <= 32}]>;
+def PredIsVGPR64Copy : SchedPredicate<[{TII->isVGPRCopy(*MI) && TII->getOpSize(*MI, 0) > 32}]>;
+def WriteCopy : SchedWriteVariant<[
+ SchedVar<PredIsVGPR32Copy, [Write32Bit]>,
+ SchedVar<PredIsVGPR64Copy, [Write64Bit]>,
+ SchedVar<NoSchedPred, [WriteSALU]>]>;
let SchedModel = SIFullSpeedModel in {
@@ -92,6 +117,8 @@ def : HWVALUWriteRes<WriteFloatFMA, 1>;
def : HWVALUWriteRes<WriteDouble, 4>;
def : HWVALUWriteRes<WriteDoubleAdd, 2>;
+def : InstRW<[WriteCopy], (instrs COPY)>;
+
} // End SchedModel = SIFullSpeedModel
let SchedModel = SIQuarterSpeedModel in {
@@ -102,4 +129,6 @@ def : HWVALUWriteRes<WriteFloatFMA, 16>;
def : HWVALUWriteRes<WriteDouble, 16>;
def : HWVALUWriteRes<WriteDoubleAdd, 8>;
+def : InstRW<[WriteCopy], (instrs COPY)>;
+
} // End SchedModel = SIQuarterSpeedModel
diff --git a/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 4f0913fe62f2..6cba55300a8c 100644
--- a/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -31,10 +31,6 @@ STATISTIC(NumInstructionsShrunk,
STATISTIC(NumLiteralConstantsFolded,
"Number of literal constants folded into 32-bit instructions.");
-namespace llvm {
- void initializeSIShrinkInstructionsPass(PassRegistry&);
-}
-
using namespace llvm;
namespace {
@@ -61,10 +57,8 @@ public:
} // End anonymous namespace.
-INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE,
- "SI Lower il Copies", false, false)
-INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE,
- "SI Lower il Copies", false, false)
+INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
+ "SI Shrink Instructions", false, false)
char SIShrinkInstructions::ID = 0;
@@ -125,10 +119,7 @@ static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
return false;
- if (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
- return false;
-
- return true;
+ return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp);
}
/// \brief This function checks \p MI for operands defined by a move immediate
@@ -181,31 +172,37 @@ static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
}
// We have failed to fold src0, so commute the instruction and try again.
- if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI))
+ if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(MI))
foldImmediates(MI, TII, MRI, false);
}
// Copy MachineOperand with all flags except setting it as implicit.
-static MachineOperand copyRegOperandAsImplicit(const MachineOperand &Orig) {
- assert(!Orig.isImplicit());
- return MachineOperand::CreateReg(Orig.getReg(),
- Orig.isDef(),
- true,
- Orig.isKill(),
- Orig.isDead(),
- Orig.isUndef(),
- Orig.isEarlyClobber(),
- Orig.getSubReg(),
- Orig.isDebug(),
- Orig.isInternalRead());
+static void copyFlagsToImplicitVCC(MachineInstr &MI,
+ const MachineOperand &Orig) {
+
+ for (MachineOperand &Use : MI.implicit_operands()) {
+ if (Use.getReg() == AMDGPU::VCC) {
+ Use.setIsUndef(Orig.isUndef());
+ Use.setIsKill(Orig.isKill());
+ return;
+ }
+ }
+}
+
+static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
+ return isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src, 4);
}
bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
MachineRegisterInfo &MRI = MF.getRegInfo();
- const SIInstrInfo *TII =
- static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+ const SIInstrInfo *TII = ST.getInstrInfo();
const SIRegisterInfo &TRI = TII->getRegisterInfo();
+
std::vector<unsigned> I1Defs;
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
@@ -217,14 +214,94 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
Next = std::next(I);
MachineInstr &MI = *I;
+ if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
+ // If this has a literal constant source that is the same as the
+ // reversed bits of an inline immediate, replace with a bitreverse of
+ // that constant. This saves 4 bytes in the common case of materializing
+ // sign bits.
+
+ // Test if we are after regalloc. We only want to do this after any
+ // optimizations happen because this will confuse them.
+ // XXX - not exactly a check for post-regalloc run.
+ MachineOperand &Src = MI.getOperand(1);
+ if (Src.isImm() &&
+ TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) {
+ int64_t Imm = Src.getImm();
+ if (isInt<32>(Imm) && !TII->isInlineConstant(Src, 4)) {
+ int32_t ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Imm));
+ if (ReverseImm >= -16 && ReverseImm <= 64) {
+ MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
+ Src.setImm(ReverseImm);
+ continue;
+ }
+ }
+ }
+ }
+
+ // Combine adjacent s_nops to use the immediate operand encoding how long
+ // to wait.
+ //
+ // s_nop N
+ // s_nop M
+ // =>
+ // s_nop (N + M)
+ if (MI.getOpcode() == AMDGPU::S_NOP &&
+ Next != MBB.end() &&
+ (*Next).getOpcode() == AMDGPU::S_NOP) {
+
+ MachineInstr &NextMI = *Next;
+ // The instruction encodes the amount to wait with an offset of 1,
+ // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
+ // after adding.
+ uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
+ uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
+
+ // Make sure we don't overflow the bounds.
+ if (Nop0 + Nop1 <= 8) {
+ NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
+ MI.eraseFromParent();
+ }
+
+ continue;
+ }
+
+ // FIXME: We also need to consider movs of constant operands since
+ // immediate operands are not folded if they have more than one use, and
+ // the operand folding pass is unaware if the immediate will be free since
+ // it won't know if the src == dest constraint will end up being
+ // satisfied.
+ if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
+ MI.getOpcode() == AMDGPU::S_MUL_I32) {
+ const MachineOperand &Dest = MI.getOperand(0);
+ const MachineOperand &Src0 = MI.getOperand(1);
+ const MachineOperand &Src1 = MI.getOperand(2);
+
+ // FIXME: This could work better if hints worked with subregisters. If
+ // we have a vector add of a constant, we usually don't get the correct
+ // allocation due to the subregister usage.
+ if (TargetRegisterInfo::isVirtualRegister(Dest.getReg()) &&
+ Src0.isReg()) {
+ MRI.setRegAllocationHint(Dest.getReg(), 0, Src0.getReg());
+ continue;
+ }
+
+ if (Src0.isReg() && Src0.getReg() == Dest.getReg()) {
+ if (Src1.isImm() && isKImmOperand(TII, Src1)) {
+ unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
+ AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
+
+ MI.setDesc(TII->get(Opc));
+ MI.tieOperands(0, 1);
+ }
+ }
+ }
+
// Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
const MachineOperand &Src = MI.getOperand(1);
- if (Src.isImm()) {
- if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src, 4))
- MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
- }
+ if (Src.isImm() && isKImmOperand(TII, Src))
+ MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
continue;
}
@@ -235,7 +312,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (!canShrink(MI, TII, TRI, MRI)) {
// Try commuting the instruction and see if that enables us to shrink
// it.
- if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
+ if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
!canShrink(MI, TII, TRI, MRI))
continue;
}
@@ -287,9 +364,9 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
MachineInstrBuilder Inst32 =
BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
- // Add the dst operand if the 32-bit encoding also has an explicit $dst.
+ // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
// For VOPC instructions, this is replaced by an implicit def of vcc.
- int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::dst);
+ int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
if (Op32DstIdx != -1) {
// dst
Inst32.addOperand(MI.getOperand(0));
@@ -314,10 +391,9 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
Inst32.addOperand(*Src2);
} else {
// In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
- // replaced with an implicit read of vcc.
- assert(Src2->getReg() == AMDGPU::VCC &&
- "Unexpected missing register operand");
- Inst32.addOperand(copyRegOperandAsImplicit(*Src2));
+ // replaced with an implicit read of vcc. This was already added
+ // during the initial BuildMI, so find it to preserve the flags.
+ copyFlagsToImplicitVCC(*Inst32, *Src2);
}
}
diff --git a/lib/Target/AMDGPU/SITypeRewriter.cpp b/lib/Target/AMDGPU/SITypeRewriter.cpp
index d36c5d29b127..facc0c7df1dc 100644
--- a/lib/Target/AMDGPU/SITypeRewriter.cpp
+++ b/lib/Target/AMDGPU/SITypeRewriter.cpp
@@ -62,7 +62,7 @@ bool SITypeRewriter::doInitialization(Module &M) {
}
bool SITypeRewriter::runOnFunction(Function &F) {
- if (AMDGPU::getShaderType(F) == ShaderType::COMPUTE)
+ if (!AMDGPU::isShader(F.getCallingConv()))
return false;
visit(F);
diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp
new file mode 100644
index 000000000000..c1a237ea5f51
--- /dev/null
+++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp
@@ -0,0 +1,509 @@
+//===-- SIWholeQuadMode.cpp - enter and suspend whole quad mode -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief This pass adds instructions to enable whole quad mode for pixel
+/// shaders.
+///
+/// Whole quad mode is required for derivative computations, but it interferes
+/// with shader side effects (stores and atomics). This pass is run on the
+/// scheduled machine IR but before register coalescing, so that machine SSA is
+/// available for analysis. It ensures that WQM is enabled when necessary, but
+/// disabled around stores and atomics.
+///
+/// When necessary, this pass creates a function prolog
+///
+/// S_MOV_B64 LiveMask, EXEC
+/// S_WQM_B64 EXEC, EXEC
+///
+/// to enter WQM at the top of the function and surrounds blocks of Exact
+/// instructions by
+///
+/// S_AND_SAVEEXEC_B64 Tmp, LiveMask
+/// ...
+/// S_MOV_B64 EXEC, Tmp
+///
+/// In order to avoid excessive switching during sequences of Exact
+/// instructions, the pass first analyzes which instructions must be run in WQM
+/// (aka which instructions produce values that lead to derivative
+/// computations).
+///
+/// Basic blocks are always exited in WQM as long as some successor needs WQM.
+///
+/// There is room for improvement given better control flow analysis:
+///
+/// (1) at the top level (outside of control flow statements, and as long as
+/// kill hasn't been used), one SGPR can be saved by recovering WQM from
+/// the LiveMask (this is implemented for the entry block).
+///
+/// (2) when entire regions (e.g. if-else blocks or entire loops) only
+/// consist of exact and don't-care instructions, the switch only has to
+/// be done at the entry and exit points rather than potentially in each
+/// block of the region.
+///
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDGPUSubtarget.h"
+#include "SIInstrInfo.h"
+#include "SIMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "si-wqm"
+
+namespace {
+
+enum {
+ StateWQM = 0x1,
+ StateExact = 0x2,
+};
+
+struct InstrInfo {
+ char Needs = 0;
+ char OutNeeds = 0;
+};
+
+struct BlockInfo {
+ char Needs = 0;
+ char InNeeds = 0;
+ char OutNeeds = 0;
+};
+
+struct WorkItem {
+ MachineBasicBlock *MBB = nullptr;
+ MachineInstr *MI = nullptr;
+
+ WorkItem() {}
+ WorkItem(MachineBasicBlock *MBB) : MBB(MBB) {}
+ WorkItem(MachineInstr *MI) : MI(MI) {}
+};
+
+class SIWholeQuadMode : public MachineFunctionPass {
+private:
+ const SIInstrInfo *TII;
+ const SIRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+
+ DenseMap<const MachineInstr *, InstrInfo> Instructions;
+ DenseMap<MachineBasicBlock *, BlockInfo> Blocks;
+ SmallVector<const MachineInstr *, 2> ExecExports;
+ SmallVector<MachineInstr *, 1> LiveMaskQueries;
+
+ char scanInstructions(MachineFunction &MF, std::vector<WorkItem> &Worklist);
+ void propagateInstruction(MachineInstr &MI, std::vector<WorkItem> &Worklist);
+ void propagateBlock(MachineBasicBlock &MBB, std::vector<WorkItem> &Worklist);
+ char analyzeFunction(MachineFunction &MF);
+
+ void toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
+ unsigned SaveWQM, unsigned LiveMaskReg);
+ void toWQM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
+ unsigned SavedWQM);
+ void processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, bool isEntry);
+
+ void lowerLiveMaskQueries(unsigned LiveMaskReg);
+
+public:
+ static char ID;
+
+ SIWholeQuadMode() :
+ MachineFunctionPass(ID) { }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "SI Whole Quad Mode";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+
+} // End anonymous namespace
+
+char SIWholeQuadMode::ID = 0;
+
+INITIALIZE_PASS(SIWholeQuadMode, DEBUG_TYPE,
+ "SI Whole Quad Mode", false, false)
+
+char &llvm::SIWholeQuadModeID = SIWholeQuadMode::ID;
+
+FunctionPass *llvm::createSIWholeQuadModePass() {
+ return new SIWholeQuadMode;
+}
+
+// Scan instructions to determine which ones require an Exact execmask and
+// which ones seed WQM requirements.
+char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
+ std::vector<WorkItem> &Worklist) {
+ char GlobalFlags = 0;
+ bool WQMOutputs = MF.getFunction()->hasFnAttribute("amdgpu-ps-wqm-outputs");
+
+ for (auto BI = MF.begin(), BE = MF.end(); BI != BE; ++BI) {
+ MachineBasicBlock &MBB = *BI;
+
+ for (auto II = MBB.begin(), IE = MBB.end(); II != IE; ++II) {
+ MachineInstr &MI = *II;
+ unsigned Opcode = MI.getOpcode();
+ char Flags = 0;
+
+ if (TII->isWQM(Opcode) || TII->isDS(Opcode)) {
+ Flags = StateWQM;
+ } else if (MI.mayStore() && TII->usesVM_CNT(MI)) {
+ Flags = StateExact;
+ } else {
+ // Handle export instructions with the exec mask valid flag set
+ if (Opcode == AMDGPU::EXP) {
+ if (MI.getOperand(4).getImm() != 0)
+ ExecExports.push_back(&MI);
+ } else if (Opcode == AMDGPU::SI_PS_LIVE) {
+ LiveMaskQueries.push_back(&MI);
+ } else if (WQMOutputs) {
+ // The function is in machine SSA form, which means that physical
+ // VGPRs correspond to shader inputs and outputs. Inputs are
+ // only used, outputs are only defined.
+ for (const MachineOperand &MO : MI.defs()) {
+ if (!MO.isReg())
+ continue;
+
+ unsigned Reg = MO.getReg();
+
+ if (!TRI->isVirtualRegister(Reg) &&
+ TRI->hasVGPRs(TRI->getPhysRegClass(Reg))) {
+ Flags = StateWQM;
+ break;
+ }
+ }
+ }
+
+ if (!Flags)
+ continue;
+ }
+
+ Instructions[&MI].Needs = Flags;
+ Worklist.push_back(&MI);
+ GlobalFlags |= Flags;
+ }
+
+ if (WQMOutputs && MBB.succ_empty()) {
+ // This is a prolog shader. Make sure we go back to exact mode at the end.
+ Blocks[&MBB].OutNeeds = StateExact;
+ Worklist.push_back(&MBB);
+ GlobalFlags |= StateExact;
+ }
+ }
+
+ return GlobalFlags;
+}
+
+void SIWholeQuadMode::propagateInstruction(MachineInstr &MI,
+ std::vector<WorkItem>& Worklist) {
+ MachineBasicBlock *MBB = MI.getParent();
+ InstrInfo II = Instructions[&MI]; // take a copy to prevent dangling references
+ BlockInfo &BI = Blocks[MBB];
+
+ // Control flow-type instructions that are followed by WQM computations
+ // must themselves be in WQM.
+ if ((II.OutNeeds & StateWQM) && !(II.Needs & StateWQM) && MI.isTerminator()) {
+ Instructions[&MI].Needs = StateWQM;
+ II.Needs = StateWQM;
+ }
+
+ // Propagate to block level
+ BI.Needs |= II.Needs;
+ if ((BI.InNeeds | II.Needs) != BI.InNeeds) {
+ BI.InNeeds |= II.Needs;
+ Worklist.push_back(MBB);
+ }
+
+ // Propagate backwards within block
+ if (MachineInstr *PrevMI = MI.getPrevNode()) {
+ char InNeeds = II.Needs | II.OutNeeds;
+ if (!PrevMI->isPHI()) {
+ InstrInfo &PrevII = Instructions[PrevMI];
+ if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) {
+ PrevII.OutNeeds |= InNeeds;
+ Worklist.push_back(PrevMI);
+ }
+ }
+ }
+
+ // Propagate WQM flag to instruction inputs
+ assert(II.Needs != (StateWQM | StateExact));
+ if (II.Needs != StateWQM)
+ return;
+
+ for (const MachineOperand &Use : MI.uses()) {
+ if (!Use.isReg() || !Use.isUse())
+ continue;
+
+ // At this point, physical registers appear as inputs or outputs
+ // and following them makes no sense (and would in fact be incorrect
+ // when the same VGPR is used as both an output and an input that leads
+ // to a NeedsWQM instruction).
+ //
+ // Note: VCC appears e.g. in 64-bit addition with carry - theoretically we
+ // have to trace this, in practice it happens for 64-bit computations like
+ // pointers where both dwords are followed already anyway.
+ if (!TargetRegisterInfo::isVirtualRegister(Use.getReg()))
+ continue;
+
+ for (MachineInstr &DefMI : MRI->def_instructions(Use.getReg())) {
+ InstrInfo &DefII = Instructions[&DefMI];
+
+ // Obviously skip if DefMI is already flagged as NeedWQM.
+ //
+ // The instruction might also be flagged as NeedExact. This happens when
+ // the result of an atomic is used in a WQM computation. In this case,
+ // the atomic must not run for helper pixels and the WQM result is
+ // undefined.
+ if (DefII.Needs != 0)
+ continue;
+
+ DefII.Needs = StateWQM;
+ Worklist.push_back(&DefMI);
+ }
+ }
+}
+
+void SIWholeQuadMode::propagateBlock(MachineBasicBlock &MBB,
+ std::vector<WorkItem>& Worklist) {
+ BlockInfo BI = Blocks[&MBB]; // Make a copy to prevent dangling references.
+
+ // Propagate through instructions
+ if (!MBB.empty()) {
+ MachineInstr *LastMI = &*MBB.rbegin();
+ InstrInfo &LastII = Instructions[LastMI];
+ if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) {
+ LastII.OutNeeds |= BI.OutNeeds;
+ Worklist.push_back(LastMI);
+ }
+ }
+
+ // Predecessor blocks must provide for our WQM/Exact needs.
+ for (MachineBasicBlock *Pred : MBB.predecessors()) {
+ BlockInfo &PredBI = Blocks[Pred];
+ if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds)
+ continue;
+
+ PredBI.OutNeeds |= BI.InNeeds;
+ PredBI.InNeeds |= BI.InNeeds;
+ Worklist.push_back(Pred);
+ }
+
+ // All successors must be prepared to accept the same set of WQM/Exact data.
+ for (MachineBasicBlock *Succ : MBB.successors()) {
+ BlockInfo &SuccBI = Blocks[Succ];
+ if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds)
+ continue;
+
+ SuccBI.InNeeds |= BI.OutNeeds;
+ Worklist.push_back(Succ);
+ }
+}
+
+char SIWholeQuadMode::analyzeFunction(MachineFunction &MF) {
+ std::vector<WorkItem> Worklist;
+ char GlobalFlags = scanInstructions(MF, Worklist);
+
+ while (!Worklist.empty()) {
+ WorkItem WI = Worklist.back();
+ Worklist.pop_back();
+
+ if (WI.MI)
+ propagateInstruction(*WI.MI, Worklist);
+ else
+ propagateBlock(*WI.MBB, Worklist);
+ }
+
+ return GlobalFlags;
+}
+
+void SIWholeQuadMode::toExact(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Before,
+ unsigned SaveWQM, unsigned LiveMaskReg) {
+ if (SaveWQM) {
+ BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_SAVEEXEC_B64),
+ SaveWQM)
+ .addReg(LiveMaskReg);
+ } else {
+ BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_B64),
+ AMDGPU::EXEC)
+ .addReg(AMDGPU::EXEC)
+ .addReg(LiveMaskReg);
+ }
+}
+
+void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Before,
+ unsigned SavedWQM) {
+ if (SavedWQM) {
+ BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::EXEC)
+ .addReg(SavedWQM);
+ } else {
+ BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
+ AMDGPU::EXEC)
+ .addReg(AMDGPU::EXEC);
+ }
+}
+
+void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
+ bool isEntry) {
+ auto BII = Blocks.find(&MBB);
+ if (BII == Blocks.end())
+ return;
+
+ const BlockInfo &BI = BII->second;
+
+ if (!(BI.InNeeds & StateWQM))
+ return;
+
+ // This is a non-entry block that is WQM throughout, so no need to do
+ // anything.
+ if (!isEntry && !(BI.Needs & StateExact) && BI.OutNeeds != StateExact)
+ return;
+
+ unsigned SavedWQMReg = 0;
+ bool WQMFromExec = isEntry;
+ char State = isEntry ? StateExact : StateWQM;
+
+ auto II = MBB.getFirstNonPHI(), IE = MBB.end();
+ while (II != IE) {
+ MachineInstr &MI = *II;
+ ++II;
+
+ // Skip instructions that are not affected by EXEC
+ if (TII->isScalarUnit(MI) && !MI.isTerminator())
+ continue;
+
+ // Generic instructions such as COPY will either disappear by register
+ // coalescing or be lowered to SALU or VALU instructions.
+ if (TargetInstrInfo::isGenericOpcode(MI.getOpcode())) {
+ if (MI.getNumExplicitOperands() >= 1) {
+ const MachineOperand &Op = MI.getOperand(0);
+ if (Op.isReg()) {
+ if (TRI->isSGPRReg(*MRI, Op.getReg())) {
+ // SGPR instructions are not affected by EXEC
+ continue;
+ }
+ }
+ }
+ }
+
+ char Needs = 0;
+ char OutNeeds = 0;
+ auto InstrInfoIt = Instructions.find(&MI);
+ if (InstrInfoIt != Instructions.end()) {
+ Needs = InstrInfoIt->second.Needs;
+ OutNeeds = InstrInfoIt->second.OutNeeds;
+
+ // Make sure to switch to Exact mode before the end of the block when
+ // Exact and only Exact is needed further downstream.
+ if (OutNeeds == StateExact && MI.isTerminator()) {
+ assert(Needs == 0);
+ Needs = StateExact;
+ }
+ }
+
+ // State switching
+ if (Needs && State != Needs) {
+ if (Needs == StateExact) {
+ assert(!SavedWQMReg);
+
+ if (!WQMFromExec && (OutNeeds & StateWQM))
+ SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
+
+ toExact(MBB, &MI, SavedWQMReg, LiveMaskReg);
+ } else {
+ assert(WQMFromExec == (SavedWQMReg == 0));
+ toWQM(MBB, &MI, SavedWQMReg);
+ SavedWQMReg = 0;
+ }
+
+ State = Needs;
+ }
+ }
+
+ if ((BI.OutNeeds & StateWQM) && State != StateWQM) {
+ assert(WQMFromExec == (SavedWQMReg == 0));
+ toWQM(MBB, MBB.end(), SavedWQMReg);
+ } else if (BI.OutNeeds == StateExact && State != StateExact) {
+ toExact(MBB, MBB.end(), 0, LiveMaskReg);
+ }
+}
+
+void SIWholeQuadMode::lowerLiveMaskQueries(unsigned LiveMaskReg) {
+ for (MachineInstr *MI : LiveMaskQueries) {
+ const DebugLoc &DL = MI->getDebugLoc();
+ unsigned Dest = MI->getOperand(0).getReg();
+ BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest)
+ .addReg(LiveMaskReg);
+ MI->eraseFromParent();
+ }
+}
+
+bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
+ if (MF.getFunction()->getCallingConv() != CallingConv::AMDGPU_PS)
+ return false;
+
+ Instructions.clear();
+ Blocks.clear();
+ ExecExports.clear();
+ LiveMaskQueries.clear();
+
+ const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
+
+ TII = ST.getInstrInfo();
+ TRI = &TII->getRegisterInfo();
+ MRI = &MF.getRegInfo();
+
+ char GlobalFlags = analyzeFunction(MF);
+ if (!(GlobalFlags & StateWQM)) {
+ lowerLiveMaskQueries(AMDGPU::EXEC);
+ return !LiveMaskQueries.empty();
+ }
+
+ // Store a copy of the original live mask when required
+ unsigned LiveMaskReg = 0;
+ {
+ MachineBasicBlock &Entry = MF.front();
+ MachineBasicBlock::iterator EntryMI = Entry.getFirstNonPHI();
+
+ if (GlobalFlags & StateExact || !LiveMaskQueries.empty()) {
+ LiveMaskReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::COPY), LiveMaskReg)
+ .addReg(AMDGPU::EXEC);
+ }
+
+ if (GlobalFlags == StateWQM) {
+ // For a shader that needs only WQM, we can just set it once.
+ BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
+ AMDGPU::EXEC)
+ .addReg(AMDGPU::EXEC);
+
+ lowerLiveMaskQueries(LiveMaskReg);
+ // EntryMI may become invalid here
+ return true;
+ }
+ }
+
+ lowerLiveMaskQueries(LiveMaskReg);
+
+ // Handle the general case
+ for (auto BII : Blocks)
+ processBlock(*BII.first, LiveMaskReg, BII.first == &*MF.begin());
+
+ return true;
+}
diff --git a/lib/Target/AMDGPU/TargetInfo/Makefile b/lib/Target/AMDGPU/TargetInfo/Makefile
deleted file mode 100644
index 1b232871bd62..000000000000
--- a/lib/Target/AMDGPU/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/AMDGPU/TargetInfo/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAMDGPUInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp b/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp
new file mode 100644
index 000000000000..b6868de6a74e
--- /dev/null
+++ b/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp
@@ -0,0 +1,69 @@
+//===-- AMDGPUAsmUtils.cpp - AsmParser/InstPrinter common -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "AMDGPUAsmUtils.h"
+
+namespace llvm {
+namespace AMDGPU {
+namespace SendMsg {
+
+// This must be in sync with llvm::AMDGPU::SendMsg::Id enum members, see SIDefines.h.
+const char* const IdSymbolic[] = {
+ nullptr,
+ "MSG_INTERRUPT",
+ "MSG_GS",
+ "MSG_GS_DONE",
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ "MSG_SYSMSG"
+};
+
+// These two must be in sync with llvm::AMDGPU::SendMsg::Op enum members, see SIDefines.h.
+const char* const OpSysSymbolic[] = {
+ nullptr,
+ "SYSMSG_OP_ECC_ERR_INTERRUPT",
+ "SYSMSG_OP_REG_RD",
+ "SYSMSG_OP_HOST_TRAP_ACK",
+ "SYSMSG_OP_TTRACE_PC"
+};
+
+const char* const OpGsSymbolic[] = {
+ "GS_OP_NOP",
+ "GS_OP_CUT",
+ "GS_OP_EMIT",
+ "GS_OP_EMIT_CUT"
+};
+
+} // namespace SendMsg
+
+namespace Hwreg {
+
+// This must be in sync with llvm::AMDGPU::Hwreg::ID_SYMBOLIC_FIRST_/LAST_, see SIDefines.h.
+const char* const IdSymbolic[] = {
+ nullptr,
+ "HW_REG_MODE",
+ "HW_REG_STATUS",
+ "HW_REG_TRAPSTS",
+ "HW_REG_HW_ID",
+ "HW_REG_GPR_ALLOC",
+ "HW_REG_LDS_ALLOC",
+ "HW_REG_IB_STS"
+};
+
+} // namespace Hwreg
+} // namespace AMDGPU
+} // namespace llvm
diff --git a/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.h b/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.h
new file mode 100644
index 000000000000..b2dc2c0e364c
--- /dev/null
+++ b/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.h
@@ -0,0 +1,31 @@
+//===-- AMDGPUAsmUtils.h - AsmParser/InstPrinter common ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUASMUTILS_H
+#define LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUASMUTILS_H
+
+namespace llvm {
+namespace AMDGPU {
+namespace SendMsg { // Symbolic names for the sendmsg(...) syntax.
+
+extern const char* const IdSymbolic[];
+extern const char* const OpSysSymbolic[];
+extern const char* const OpGsSymbolic[];
+
+} // namespace SendMsg
+
+namespace Hwreg { // Symbolic names for the hwreg(...) syntax.
+
+extern const char* const IdSymbolic[];
+
+} // namespace Hwreg
+} // namespace AMDGPU
+} // namespace llvm
+
+#endif
diff --git a/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 1f5deaef9d3b..c6f9142c0aa5 100644
--- a/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -109,29 +109,45 @@ bool isReadOnlySegment(const GlobalValue *GV) {
return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
}
-static unsigned getIntegerAttribute(const Function &F, const char *Name,
- unsigned Default) {
+int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
Attribute A = F.getFnAttribute(Name);
- unsigned Result = Default;
+ int Result = Default;
if (A.isStringAttribute()) {
StringRef Str = A.getValueAsString();
if (Str.getAsInteger(0, Result)) {
LLVMContext &Ctx = F.getContext();
- Ctx.emitError("can't parse shader type");
+ Ctx.emitError("can't parse integer attribute " + Name);
}
}
+
return Result;
}
-unsigned getShaderType(const Function &F) {
- return getIntegerAttribute(F, "ShaderType", ShaderType::COMPUTE);
+unsigned getMaximumWorkGroupSize(const Function &F) {
+ return getIntegerAttribute(F, "amdgpu-max-work-group-size", 256);
}
unsigned getInitialPSInputAddr(const Function &F) {
return getIntegerAttribute(F, "InitialPSInputAddr", 0);
}
+bool isShader(CallingConv::ID cc) {
+ switch(cc) {
+ case CallingConv::AMDGPU_VS:
+ case CallingConv::AMDGPU_GS:
+ case CallingConv::AMDGPU_PS:
+ case CallingConv::AMDGPU_CS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool isCompute(CallingConv::ID cc) {
+ return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
+}
+
bool isSI(const MCSubtargetInfo &STI) {
return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
}
diff --git a/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 57cbe1b58f98..995a9041fb36 100644
--- a/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -11,6 +11,7 @@
#define LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUBASEINFO_H
#include "AMDKernelCodeT.h"
+#include "llvm/IR/CallingConv.h"
namespace llvm {
@@ -44,9 +45,13 @@ bool isGroupSegment(const GlobalValue *GV);
bool isGlobalSegment(const GlobalValue *GV);
bool isReadOnlySegment(const GlobalValue *GV);
-unsigned getShaderType(const Function &F);
+int getIntegerAttribute(const Function &F, StringRef Name, int Default);
+
+unsigned getMaximumWorkGroupSize(const Function &F);
unsigned getInitialPSInputAddr(const Function &F);
+bool isShader(CallingConv::ID cc);
+bool isCompute(CallingConv::ID cc);
bool isSI(const MCSubtargetInfo &STI);
bool isCI(const MCSubtargetInfo &STI);
diff --git a/lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h b/lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h
new file mode 100644
index 000000000000..3a5ff60601d0
--- /dev/null
+++ b/lib/Target/AMDGPU/Utils/AMDKernelCodeTInfo.h
@@ -0,0 +1,165 @@
+//===--------------------- AMDKernelCodeTInfo.h ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file - specifies tables for amd_kernel_code_t structure parsing/printing
+//
+//===----------------------------------------------------------------------===//
+
+#define QNAME(name) amd_kernel_code_t::name
+#define FLD_T(name) decltype(QNAME(name)), &QNAME(name)
+
+#define FIELD2(sname, name) \
+ RECORD(sname, printField<FLD_T(name)>, parseField<FLD_T(name)>)
+
+#define FIELD(name) FIELD2(name, name)
+
+
+#define PRINTCODEPROP(name) \
+ printBitField<FLD_T(code_properties),\
+ AMD_CODE_PROPERTY_##name##_SHIFT,\
+ AMD_CODE_PROPERTY_##name##_WIDTH>
+
+#define PARSECODEPROP(name) \
+ parseBitField<FLD_T(code_properties),\
+ AMD_CODE_PROPERTY_##name##_SHIFT,\
+ AMD_CODE_PROPERTY_##name##_WIDTH>
+
+#define CODEPROP(name, shift) \
+ RECORD(name, PRINTCODEPROP(shift), PARSECODEPROP(shift))
+
+// have to define these lambdas because of Set/GetMacro
+#define PRINTCOMP(GetMacro, Shift) \
+[](StringRef Name, const amd_kernel_code_t &C, raw_ostream &OS) { \
+ printName(OS, Name) << \
+ (int)GetMacro(C.compute_pgm_resource_registers >> Shift); \
+}
+#define PARSECOMP(SetMacro, Shift) \
+[](amd_kernel_code_t &C, MCAsmParser &MCParser, raw_ostream &Err) { \
+ int64_t Value = 0; \
+ if (!expectAbsExpression(MCParser, Value, Err)) \
+ return false; \
+ C.compute_pgm_resource_registers |= SetMacro(Value) << Shift; \
+ return true; \
+}
+
+#define COMPPGM(name, GetMacro, SetMacro, Shift) \
+ RECORD(name, PRINTCOMP(GetMacro, Shift), PARSECOMP(SetMacro, Shift))
+
+#define COMPPGM1(name, AccMacro) \
+ COMPPGM(compute_pgm_rsrc1_##name, \
+ G_00B848_##AccMacro, S_00B848_##AccMacro, 0)
+
+#define COMPPGM2(name, AccMacro) \
+ COMPPGM(compute_pgm_rsrc2_##name, \
+ G_00B84C_##AccMacro, S_00B84C_##AccMacro, 32)
+
+///////////////////////////////////////////////////////////////////////////////
+// Begin of the table
+// Define RECORD(name, print, parse) in your code to get field definitions
+// and include this file
+
+FIELD2(kernel_code_version_major, amd_kernel_code_version_major),
+FIELD2(kernel_code_version_minor, amd_kernel_code_version_minor),
+FIELD2(machine_kind, amd_machine_kind),
+FIELD2(machine_version_major, amd_machine_version_major),
+FIELD2(machine_version_minor, amd_machine_version_minor),
+FIELD2(machine_version_stepping, amd_machine_version_stepping),
+FIELD(kernel_code_entry_byte_offset),
+FIELD(kernel_code_prefetch_byte_size),
+FIELD(max_scratch_backing_memory_byte_size),
+FIELD(compute_pgm_resource_registers),
+FIELD(workitem_private_segment_byte_size),
+FIELD(workgroup_group_segment_byte_size),
+FIELD(gds_segment_byte_size),
+FIELD(kernarg_segment_byte_size),
+FIELD(workgroup_fbarrier_count),
+FIELD(wavefront_sgpr_count),
+FIELD(workitem_vgpr_count),
+FIELD(reserved_vgpr_first),
+FIELD(reserved_vgpr_count),
+FIELD(reserved_sgpr_first),
+FIELD(reserved_sgpr_count),
+FIELD(debug_wavefront_private_segment_offset_sgpr),
+FIELD(debug_private_segment_buffer_sgpr),
+FIELD(kernarg_segment_alignment),
+FIELD(group_segment_alignment),
+FIELD(private_segment_alignment),
+FIELD(wavefront_size),
+FIELD(call_convention),
+FIELD(runtime_loader_kernel_symbol),
+
+COMPPGM1(vgprs, VGPRS),
+COMPPGM1(sgprs, SGPRS),
+COMPPGM1(priority, PRIORITY),
+COMPPGM1(float_mode, FLOAT_MODE),
+COMPPGM1(priv, PRIV),
+COMPPGM1(dx10_clamp, DX10_CLAMP),
+COMPPGM1(debug_mode, DEBUG_MODE),
+COMPPGM1(ieee_mode, IEEE_MODE),
+COMPPGM2(scratch_en, SCRATCH_EN),
+COMPPGM2(user_sgpr, USER_SGPR),
+COMPPGM2(tgid_x_en, TGID_X_EN),
+COMPPGM2(tgid_y_en, TGID_Y_EN),
+COMPPGM2(tgid_z_en, TGID_Z_EN),
+COMPPGM2(tg_size_en, TG_SIZE_EN),
+COMPPGM2(tidig_comp_cnt, TIDIG_COMP_CNT),
+COMPPGM2(excp_en_msb, EXCP_EN_MSB),
+COMPPGM2(lds_size, LDS_SIZE),
+COMPPGM2(excp_en, EXCP_EN),
+
+CODEPROP(enable_sgpr_private_segment_buffer,
+ ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER),
+CODEPROP(enable_sgpr_dispatch_ptr,
+ ENABLE_SGPR_DISPATCH_PTR),
+CODEPROP(enable_sgpr_queue_ptr,
+ ENABLE_SGPR_QUEUE_PTR),
+CODEPROP(enable_sgpr_kernarg_segment_ptr,
+ ENABLE_SGPR_KERNARG_SEGMENT_PTR),
+CODEPROP(enable_sgpr_dispatch_id,
+ ENABLE_SGPR_DISPATCH_ID),
+CODEPROP(enable_sgpr_flat_scratch_init,
+ ENABLE_SGPR_FLAT_SCRATCH_INIT),
+CODEPROP(enable_sgpr_private_segment_size,
+ ENABLE_SGPR_PRIVATE_SEGMENT_SIZE),
+CODEPROP(enable_sgpr_grid_workgroup_count_x,
+ ENABLE_SGPR_GRID_WORKGROUP_COUNT_X),
+CODEPROP(enable_sgpr_grid_workgroup_count_y,
+ ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y),
+CODEPROP(enable_sgpr_grid_workgroup_count_z,
+ ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z),
+CODEPROP(enable_ordered_append_gds,
+ ENABLE_ORDERED_APPEND_GDS),
+CODEPROP(private_element_size,
+ PRIVATE_ELEMENT_SIZE),
+CODEPROP(is_ptr64,
+ IS_PTR64),
+CODEPROP(is_dynamic_callstack,
+ IS_DYNAMIC_CALLSTACK),
+CODEPROP(is_debug_enabled,
+ IS_DEBUG_SUPPORTED),
+CODEPROP(is_xnack_enabled,
+ IS_XNACK_SUPPORTED)
+
+// end of the table
+///////////////////////////////////////////////////////////////////////////////
+
+#undef QNAME
+#undef FLD_T
+#undef FIELD2
+#undef FIELD
+#undef PRINTCODEPROP
+#undef PARSECODEPROP
+#undef CODEPROP
+#undef PRINTCOMP
+#undef PAPSECOMP
+#undef COMPPGM
+#undef COMPPGM1
+#undef COMPPGM2
diff --git a/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp b/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp
new file mode 100644
index 000000000000..f64973afa44f
--- /dev/null
+++ b/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp
@@ -0,0 +1,166 @@
+//===--------------------AMDKernelCodeTUtils.cpp --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file - utility functions to parse/print amd_kernel_code_t structure
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDKernelCodeTUtils.h"
+#include "SIDefines.h"
+#include <llvm/MC/MCParser/MCAsmLexer.h>
+#include <llvm/MC/MCParser/MCAsmParser.h>
+#include <llvm/Support/raw_ostream.h>
+
+using namespace llvm;
+
+static ArrayRef<StringRef> get_amd_kernel_code_t_FldNames() {
+ static StringRef const Table[] = {
+ "", // not found placeholder
+#define RECORD(name, print, parse) #name
+#include "AMDKernelCodeTInfo.h"
+#undef RECORD
+ };
+ return makeArrayRef(Table);
+}
+
+static StringMap<int> createIndexMap(const ArrayRef<StringRef> &a) {
+ StringMap<int> map;
+ for (auto Name : a)
+ map.insert(std::make_pair(Name, map.size()));
+ return map;
+}
+
+static int get_amd_kernel_code_t_FieldIndex(StringRef name) {
+ static const auto map = createIndexMap(get_amd_kernel_code_t_FldNames());
+ return map.lookup(name) - 1; // returns -1 if not found
+}
+
+static StringRef get_amd_kernel_code_t_FieldName(int index) {
+ return get_amd_kernel_code_t_FldNames()[index + 1];
+}
+
+
+// Field printing
+
+static raw_ostream &printName(raw_ostream &OS, StringRef Name) {
+ return OS << Name << " = ";
+}
+
+template <typename T, T amd_kernel_code_t::*ptr>
+static void printField(StringRef Name, const amd_kernel_code_t &C,
+ raw_ostream &OS) {
+ printName(OS, Name) << (int)(C.*ptr);
+}
+
+template <typename T, T amd_kernel_code_t::*ptr, int shift, int width = 1>
+static void printBitField(StringRef Name, const amd_kernel_code_t &c,
+ raw_ostream &OS) {
+ const auto Mask = (static_cast<T>(1) << width) - 1;
+ printName(OS, Name) << (int)((c.*ptr >> shift) & Mask);
+}
+
+typedef void(*PrintFx)(StringRef,
+ const amd_kernel_code_t &,
+ raw_ostream &);
+
+static ArrayRef<PrintFx> getPrinterTable() {
+ static const PrintFx Table[] = {
+#define RECORD(name, print, parse) print
+#include "AMDKernelCodeTInfo.h"
+#undef RECORD
+ };
+ return makeArrayRef(Table);
+}
+
+void llvm::printAmdKernelCodeField(const amd_kernel_code_t &C,
+ int FldIndex,
+ raw_ostream &OS) {
+ auto Printer = getPrinterTable()[FldIndex];
+ if (Printer)
+ Printer(get_amd_kernel_code_t_FieldName(FldIndex), C, OS);
+}
+
+void llvm::dumpAmdKernelCode(const amd_kernel_code_t *C,
+ raw_ostream &OS,
+ const char *tab) {
+ const int Size = getPrinterTable().size();
+ for (int i = 0; i < Size; ++i) {
+ OS << tab;
+ printAmdKernelCodeField(*C, i, OS);
+ OS << '\n';
+ }
+}
+
+
+// Field parsing
+
+static bool expectAbsExpression(MCAsmParser &MCParser, int64_t &Value, raw_ostream& Err) {
+
+ if (MCParser.getLexer().isNot(AsmToken::Equal)) {
+ Err << "expected '='";
+ return false;
+ }
+ MCParser.getLexer().Lex();
+
+ if (MCParser.parseAbsoluteExpression(Value)) {
+ Err << "integer absolute expression expected";
+ return false;
+ }
+ return true;
+}
+
+template <typename T, T amd_kernel_code_t::*ptr>
+static bool parseField(amd_kernel_code_t &C, MCAsmParser &MCParser,
+ raw_ostream &Err) {
+ int64_t Value = 0;
+ if (!expectAbsExpression(MCParser, Value, Err))
+ return false;
+ C.*ptr = (T)Value;
+ return true;
+}
+
+template <typename T, T amd_kernel_code_t::*ptr, int shift, int width = 1>
+static bool parseBitField(amd_kernel_code_t &C, MCAsmParser &MCParser,
+ raw_ostream &Err) {
+ int64_t Value = 0;
+ if (!expectAbsExpression(MCParser, Value, Err))
+ return false;
+ const uint64_t Mask = ((UINT64_C(1) << width) - 1) << shift;
+ C.*ptr &= (T)~Mask;
+ C.*ptr |= (T)((Value << shift) & Mask);
+ return true;
+}
+
+typedef bool(*ParseFx)(amd_kernel_code_t &,
+ MCAsmParser &MCParser,
+ raw_ostream &Err);
+
+static ArrayRef<ParseFx> getParserTable() {
+ static const ParseFx Table[] = {
+#define RECORD(name, print, parse) parse
+#include "AMDKernelCodeTInfo.h"
+#undef RECORD
+ };
+ return makeArrayRef(Table);
+}
+
+bool llvm::parseAmdKernelCodeField(StringRef ID,
+ MCAsmParser &MCParser,
+ amd_kernel_code_t &C,
+ raw_ostream &Err) {
+ const int Idx = get_amd_kernel_code_t_FieldIndex(ID);
+ if (Idx < 0) {
+ Err << "unexpected amd_kernel_code_t field name " << ID;
+ return false;
+ }
+ auto Parser = getParserTable()[Idx];
+ return Parser ? Parser(C, MCParser, Err) : false;
+}
diff --git a/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h b/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h
new file mode 100644
index 000000000000..d9edca7a82ac
--- /dev/null
+++ b/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.h
@@ -0,0 +1,39 @@
+//===- AMDGPUKernelCodeTUtils.h - helpers for amd_kernel_code_t *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file AMDKernelCodeTUtils.h
+//===----------------------------------------------------------------------===//
+
+#ifndef AMDKERNELCODETUTILS_H
+#define AMDKERNELCODETUTILS_H
+
+#include "AMDKernelCodeT.h"
+
+namespace llvm {
+
+class MCAsmLexer;
+class MCAsmParser;
+class raw_ostream;
+class StringRef;
+
+void printAmdKernelCodeField(const amd_kernel_code_t &C,
+ int FldIndex,
+ raw_ostream &OS);
+
+void dumpAmdKernelCode(const amd_kernel_code_t *C,
+ raw_ostream &OS,
+ const char *tab);
+
+bool parseAmdKernelCodeField(StringRef ID,
+ MCAsmParser &Parser,
+ amd_kernel_code_t &C,
+ raw_ostream &Err);
+
+}
+
+#endif // AMDKERNELCODETUTILS_H
diff --git a/lib/Target/AMDGPU/Utils/CMakeLists.txt b/lib/Target/AMDGPU/Utils/CMakeLists.txt
index 2c07aeab7dd3..01b80ebe8d3d 100644
--- a/lib/Target/AMDGPU/Utils/CMakeLists.txt
+++ b/lib/Target/AMDGPU/Utils/CMakeLists.txt
@@ -1,3 +1,5 @@
add_llvm_library(LLVMAMDGPUUtils
AMDGPUBaseInfo.cpp
+ AMDKernelCodeTUtils.cpp
+ AMDGPUAsmUtils.cpp
)
diff --git a/lib/Target/AMDGPU/Utils/Makefile b/lib/Target/AMDGPU/Utils/Makefile
deleted file mode 100644
index 1019e726d50e..000000000000
--- a/lib/Target/AMDGPU/Utils/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/AMDGPU/Utils/Makefile --------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAMDGPUUtils
-
-# Hack: we need to include 'main' AMDGPU target directory to grab private
-# headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/VIInstrFormats.td b/lib/Target/AMDGPU/VIInstrFormats.td
index d8738f992630..912ed5329bfe 100644
--- a/lib/Target/AMDGPU/VIInstrFormats.td
+++ b/lib/Target/AMDGPU/VIInstrFormats.td
@@ -91,21 +91,28 @@ class MTBUFe_vi <bits<4> op> : Enc64 {
class SMEMe_vi <bits<8> op, bit imm> : Enc64 {
bits<7> sbase;
- bits<7> sdata;
+ bits<7> sdst;
bits<1> glc;
- bits<20> offset;
let Inst{5-0} = sbase{6-1};
- let Inst{12-6} = sdata;
+ let Inst{12-6} = sdst;
let Inst{16} = glc;
let Inst{17} = imm;
let Inst{25-18} = op;
let Inst{31-26} = 0x30; //encoding
+}
+
+class SMEM_IMMe_vi <bits<8> op> : SMEMe_vi<op, 1> {
+ bits<20> offset;
let Inst{51-32} = offset;
}
-class VOP3e_vi <bits<10> op> : Enc64 {
- bits<8> vdst;
+class SMEM_SOFFe_vi <bits<8> op> : SMEMe_vi<op, 0> {
+ bits<20> soff;
+ let Inst{51-32} = soff;
+}
+
+class VOP3a_vi <bits<10> op> : Enc64 {
bits<2> src0_modifiers;
bits<9> src0;
bits<2> src1_modifiers;
@@ -115,7 +122,6 @@ class VOP3e_vi <bits<10> op> : Enc64 {
bits<1> clamp;
bits<2> omod;
- let Inst{7-0} = vdst;
let Inst{8} = src0_modifiers{1};
let Inst{9} = src1_modifiers{1};
let Inst{10} = src2_modifiers{1};
@@ -131,6 +137,20 @@ class VOP3e_vi <bits<10> op> : Enc64 {
let Inst{63} = src2_modifiers{0};
}
+class VOP3e_vi <bits<10> op> : VOP3a_vi <op> {
+ bits<8> vdst;
+
+ let Inst{7-0} = vdst;
+}
+
+// Encoding used for VOPC instructions encoded as VOP3
+// Differs from VOP3e by destination name (sdst) as VOPC doesn't have vector dst
+class VOP3ce_vi <bits<10> op> : VOP3a_vi <op> {
+ bits<8> sdst;
+
+ let Inst{7-0} = sdst;
+}
+
class VOP3be_vi <bits<10> op> : Enc64 {
bits<8> vdst;
bits<2> src0_modifiers;
@@ -157,6 +177,117 @@ class VOP3be_vi <bits<10> op> : Enc64 {
let Inst{63} = src2_modifiers{0};
}
+class VOP_DPP <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods = 0> :
+ VOPAnyCommon <outs, ins, asm, pattern> {
+ let DPP = 1;
+ let Size = 8;
+
+ let AsmMatchConverter = !if(!eq(HasMods,1), "cvtDPP", "");
+}
+
+class VOP_DPPe : Enc64 {
+ bits<2> src0_modifiers;
+ bits<8> src0;
+ bits<2> src1_modifiers;
+ bits<9> dpp_ctrl;
+ bits<1> bound_ctrl;
+ bits<4> bank_mask;
+ bits<4> row_mask;
+
+ let Inst{39-32} = src0;
+ let Inst{48-40} = dpp_ctrl;
+ let Inst{51} = bound_ctrl;
+ let Inst{52} = src0_modifiers{0}; // src0_neg
+ let Inst{53} = src0_modifiers{1}; // src0_abs
+ let Inst{54} = src1_modifiers{0}; // src1_neg
+ let Inst{55} = src1_modifiers{1}; // src1_abs
+ let Inst{59-56} = bank_mask;
+ let Inst{63-60} = row_mask;
+}
+
+class VOP1_DPPe <bits<8> op> : VOP_DPPe {
+ bits<8> vdst;
+
+ let Inst{8-0} = 0xfa; // dpp
+ let Inst{16-9} = op;
+ let Inst{24-17} = vdst;
+ let Inst{31-25} = 0x3f; //encoding
+}
+
+class VOP2_DPPe <bits<6> op> : VOP_DPPe {
+ bits<8> vdst;
+ bits<8> src1;
+
+ let Inst{8-0} = 0xfa; //dpp
+ let Inst{16-9} = src1;
+ let Inst{24-17} = vdst;
+ let Inst{30-25} = op;
+ let Inst{31} = 0x0; //encoding
+}
+
+class VOP_SDWA <dag outs, dag ins, string asm, list<dag> pattern, bit HasMods = 0> :
+ VOPAnyCommon <outs, ins, asm, pattern> {
+ let SDWA = 1;
+ let Size = 8;
+}
+
+class VOP_SDWAe : Enc64 {
+ bits<8> src0;
+ bits<3> src0_sel;
+ bits<2> src0_fmodifiers; // {abs,neg}
+ bits<1> src0_imodifiers; // sext
+ bits<3> src1_sel;
+ bits<2> src1_fmodifiers;
+ bits<1> src1_imodifiers;
+ bits<3> dst_sel;
+ bits<2> dst_unused;
+ bits<1> clamp;
+
+ let Inst{39-32} = src0;
+ let Inst{42-40} = dst_sel;
+ let Inst{44-43} = dst_unused;
+ let Inst{45} = clamp;
+ let Inst{50-48} = src0_sel;
+ let Inst{53-52} = src0_fmodifiers;
+ let Inst{51} = src0_imodifiers;
+ let Inst{58-56} = src1_sel;
+ let Inst{61-60} = src1_fmodifiers;
+ let Inst{59} = src1_imodifiers;
+}
+
+class VOP1_SDWAe <bits<8> op> : VOP_SDWAe {
+ bits<8> vdst;
+
+ let Inst{8-0} = 0xf9; // sdwa
+ let Inst{16-9} = op;
+ let Inst{24-17} = vdst;
+ let Inst{31-25} = 0x3f; // encoding
+}
+
+class VOP2_SDWAe <bits<6> op> : VOP_SDWAe {
+ bits<8> vdst;
+ bits<8> src1;
+
+ let Inst{8-0} = 0xf9; // sdwa
+ let Inst{16-9} = src1;
+ let Inst{24-17} = vdst;
+ let Inst{30-25} = op;
+ let Inst{31} = 0x0; // encoding
+}
+
+class VOPC_SDWAe <bits<8> op> : VOP_SDWAe {
+ bits<8> src1;
+
+ let Inst{8-0} = 0xf9; // sdwa
+ let Inst{16-9} = src1;
+ let Inst{24-17} = op;
+ let Inst{31-25} = 0x3e; // encoding
+
+ // VOPC disallows dst_sel and dst_unused as they have no effect on destination
+ let Inst{42-40} = 0x6;
+ let Inst{44-43} = 0x2;
+}
+
class EXPe_vi : EXPe {
let Inst{31-26} = 0x31; //encoding
}
diff --git a/lib/Target/AMDGPU/VIInstructions.td b/lib/Target/AMDGPU/VIInstructions.td
index 1a7801c92bd7..5c490ab900f2 100644
--- a/lib/Target/AMDGPU/VIInstructions.td
+++ b/lib/Target/AMDGPU/VIInstructions.td
@@ -11,6 +11,8 @@
let SIAssemblerPredicate = DisableInst, SubtargetPredicate = isVI in {
+let DisableSIDecoder = 1 in {
+
//===----------------------------------------------------------------------===//
// VOP1 Instructions
//===----------------------------------------------------------------------===//
@@ -52,9 +54,9 @@ defm V_SUBREV_F16 : VOP2Inst <vop2<0, 0x21>, "v_subrev_f16", VOP_F16_F16_F16,
defm V_MUL_F16 : VOP2Inst <vop2<0, 0x22>, "v_mul_f16", VOP_F16_F16_F16>;
defm V_MAC_F16 : VOP2Inst <vop2<0, 0x23>, "v_mac_f16", VOP_F16_F16_F16>;
} // End isCommutable = 1
-defm V_MADMK_F16 : VOP2MADK <vop2<0,0x24>, "v_madmk_f16">;
+defm V_MADMK_F16 : VOP2MADK <vop2<0,0x24>, "v_madmk_f16", VOP_MADMK>;
let isCommutable = 1 in {
-defm V_MADAK_F16 : VOP2MADK <vop2<0,0x25>, "v_madak_f16">;
+defm V_MADAK_F16 : VOP2MADK <vop2<0,0x25>, "v_madak_f16", VOP_MADAK>;
defm V_ADD_U16 : VOP2Inst <vop2<0,0x26>, "v_add_u16", VOP_I16_I16_I16>;
defm V_SUB_U16 : VOP2Inst <vop2<0,0x27>, "v_sub_u16" , VOP_I16_I16_I16>;
defm V_SUBREV_U16 : VOP2Inst <vop2<0,0x28>, "v_subrev_u16", VOP_I16_I16_I16>;
@@ -73,6 +75,16 @@ defm V_MIN_I16 : VOP2Inst <vop2<0,0x32>, "v_min_i16", VOP_I16_I16_I16>;
} // End isCommutable = 1
defm V_LDEXP_F16 : VOP2Inst <vop2<0,0x33>, "v_ldexp_f16", VOP_F16_F16_I16>;
+//===----------------------------------------------------------------------===//
+// VOP3 Instructions
+//===----------------------------------------------------------------------===//
+let isCommutable = 1 in {
+ defm V_MAD_F16 : VOP3Inst <vop3<0, 0x1ea>, "v_mad_f16", VOP_F16_F16_F16_F16>;
+ defm V_MAD_U16 : VOP3Inst <vop3<0, 0x1eb>, "v_mad_u16", VOP_I16_I16_I16_I16>;
+ defm V_MAD_I16 : VOP3Inst <vop3<0, 0x1ec>, "v_mad_i16", VOP_I16_I16_I16_I16>;
+}
+} // let DisableSIDecoder = 1
+
// Aliases to simplify matching of floating-point instructions that
// are VOP2 on SI and VOP3 on VI.
@@ -99,6 +111,9 @@ def S_DCACHE_WB : SMEM_Inval <0x21,
def S_DCACHE_WB_VOL : SMEM_Inval <0x23,
"s_dcache_wb_vol", int_amdgcn_s_dcache_wb_vol>;
+def S_MEMREALTIME : SMEM_Ret<0x25,
+ "s_memrealtime", int_amdgcn_s_memrealtime>;
+
} // End SIAssemblerPredicate = DisableInst, SubtargetPredicate = isVI
let Predicates = [isVI] in {
@@ -109,4 +124,35 @@ def : Pat <
(S_BUFFER_LOAD_DWORD_IMM $sbase, (as_i32imm $offset))
>;
+//===----------------------------------------------------------------------===//
+// DPP Patterns
+//===----------------------------------------------------------------------===//
+
+def : Pat <
+ (int_amdgcn_mov_dpp i32:$src, imm:$dpp_ctrl, imm:$row_mask, imm:$bank_mask,
+ imm:$bound_ctrl),
+ (V_MOV_B32_dpp $src, (as_i32imm $dpp_ctrl), (as_i32imm $row_mask),
+ (as_i32imm $bank_mask), (as_i1imm $bound_ctrl))
+>;
+
+//===----------------------------------------------------------------------===//
+// Misc Patterns
+//===----------------------------------------------------------------------===//
+
+def : Pat <
+ (i64 (readcyclecounter)),
+ (S_MEMREALTIME)
+>;
+
+//===----------------------------------------------------------------------===//
+// DS_PERMUTE/DS_BPERMUTE Instructions.
+//===----------------------------------------------------------------------===//
+
+let Uses = [EXEC] in {
+defm DS_PERMUTE_B32 : DS_1A1D_PERMUTE <0x3e, "ds_permute_b32", VGPR_32,
+ int_amdgcn_ds_permute>;
+defm DS_BPERMUTE_B32 : DS_1A1D_PERMUTE <0x3f, "ds_bpermute_b32", VGPR_32,
+ int_amdgcn_ds_bpermute>;
+}
+
} // End Predicates = [isVI]
diff --git a/lib/Target/ARM/A15SDOptimizer.cpp b/lib/Target/ARM/A15SDOptimizer.cpp
index 7a1865ce5fd6..9228cc2d7a9c 100644
--- a/lib/Target/ARM/A15SDOptimizer.cpp
+++ b/lib/Target/ARM/A15SDOptimizer.cpp
@@ -68,34 +68,31 @@ namespace {
//
unsigned createDupLane(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL,
- unsigned Reg, unsigned Lane,
- bool QPR=false);
+ const DebugLoc &DL, unsigned Reg, unsigned Lane,
+ bool QPR = false);
unsigned createExtractSubreg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL,
- unsigned DReg, unsigned Lane,
- const TargetRegisterClass *TRC);
+ const DebugLoc &DL, unsigned DReg,
+ unsigned Lane, const TargetRegisterClass *TRC);
unsigned createVExt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL,
- unsigned Ssub0, unsigned Ssub1);
+ const DebugLoc &DL, unsigned Ssub0, unsigned Ssub1);
unsigned createRegSequence(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL,
- unsigned Reg1, unsigned Reg2);
+ const DebugLoc &DL, unsigned Reg1,
+ unsigned Reg2);
unsigned createInsertSubreg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL, unsigned DReg, unsigned Lane,
- unsigned ToInsert);
+ const DebugLoc &DL, unsigned DReg,
+ unsigned Lane, unsigned ToInsert);
unsigned createImplicitDef(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL);
+ const DebugLoc &DL);
//
// Various property checkers
@@ -426,11 +423,10 @@ SmallVector<unsigned, 8> A15SDOptimizer::getReadDPRs(MachineInstr *MI) {
}
// Creates a DPR register from an SPR one by using a VDUP.
-unsigned
-A15SDOptimizer::createDupLane(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL,
- unsigned Reg, unsigned Lane, bool QPR) {
+unsigned A15SDOptimizer::createDupLane(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore,
+ const DebugLoc &DL, unsigned Reg,
+ unsigned Lane, bool QPR) {
unsigned Out = MRI->createVirtualRegister(QPR ? &ARM::QPRRegClass :
&ARM::DPRRegClass);
AddDefaultPred(BuildMI(MBB,
@@ -445,12 +441,10 @@ A15SDOptimizer::createDupLane(MachineBasicBlock &MBB,
}
// Creates a SPR register from a DPR by copying the value in lane 0.
-unsigned
-A15SDOptimizer::createExtractSubreg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL,
- unsigned DReg, unsigned Lane,
- const TargetRegisterClass *TRC) {
+unsigned A15SDOptimizer::createExtractSubreg(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
+ const DebugLoc &DL, unsigned DReg, unsigned Lane,
+ const TargetRegisterClass *TRC) {
unsigned Out = MRI->createVirtualRegister(TRC);
BuildMI(MBB,
InsertBefore,
@@ -462,11 +456,9 @@ A15SDOptimizer::createExtractSubreg(MachineBasicBlock &MBB,
}
// Takes two SPR registers and creates a DPR by using a REG_SEQUENCE.
-unsigned
-A15SDOptimizer::createRegSequence(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL,
- unsigned Reg1, unsigned Reg2) {
+unsigned A15SDOptimizer::createRegSequence(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
+ const DebugLoc &DL, unsigned Reg1, unsigned Reg2) {
unsigned Out = MRI->createVirtualRegister(&ARM::QPRRegClass);
BuildMI(MBB,
InsertBefore,
@@ -481,11 +473,10 @@ A15SDOptimizer::createRegSequence(MachineBasicBlock &MBB,
// Takes two DPR registers that have previously been VDUPed (Ssub0 and Ssub1)
// and merges them into one DPR register.
-unsigned
-A15SDOptimizer::createVExt(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL,
- unsigned Ssub0, unsigned Ssub1) {
+unsigned A15SDOptimizer::createVExt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore,
+ const DebugLoc &DL, unsigned Ssub0,
+ unsigned Ssub1) {
unsigned Out = MRI->createVirtualRegister(&ARM::DPRRegClass);
AddDefaultPred(BuildMI(MBB,
InsertBefore,
@@ -497,11 +488,9 @@ A15SDOptimizer::createVExt(MachineBasicBlock &MBB,
return Out;
}
-unsigned
-A15SDOptimizer::createInsertSubreg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL, unsigned DReg, unsigned Lane,
- unsigned ToInsert) {
+unsigned A15SDOptimizer::createInsertSubreg(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
+ const DebugLoc &DL, unsigned DReg, unsigned Lane, unsigned ToInsert) {
unsigned Out = MRI->createVirtualRegister(&ARM::DPR_VFP2RegClass);
BuildMI(MBB,
InsertBefore,
@@ -517,7 +506,7 @@ A15SDOptimizer::createInsertSubreg(MachineBasicBlock &MBB,
unsigned
A15SDOptimizer::createImplicitDef(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- DebugLoc DL) {
+ const DebugLoc &DL) {
unsigned Out = MRI->createVirtualRegister(&ARM::DPRRegClass);
BuildMI(MBB,
InsertBefore,
@@ -681,6 +670,9 @@ bool A15SDOptimizer::runOnInstruction(MachineInstr *MI) {
}
bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipFunction(*Fn.getFunction()))
+ return false;
+
const ARMSubtarget &STI = Fn.getSubtarget<ARMSubtarget>();
// Since the A15SDOptimizer pass can insert VDUP instructions, it can only be
// enabled when NEON is available.
@@ -701,7 +693,7 @@ bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) {
for (MachineBasicBlock::iterator MI = MFI->begin(), ME = MFI->end();
MI != ME;) {
- Modified |= runOnInstruction(MI++);
+ Modified |= runOnInstruction(&*MI++);
}
}
diff --git a/lib/Target/ARM/ARM.h b/lib/Target/ARM/ARM.h
index cd7540e52410..690ff86a0c86 100644
--- a/lib/Target/ARM/ARM.h
+++ b/lib/Target/ARM/ARM.h
@@ -27,6 +27,7 @@ class FunctionPass;
class ImmutablePass;
class MachineInstr;
class MCInst;
+class PassRegistry;
class TargetLowering;
class TargetMachine;
@@ -45,6 +46,9 @@ FunctionPass *createThumb2SizeReductionPass(
void LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
ARMAsmPrinter &AP);
+void initializeARMLoadStoreOptPass(PassRegistry &);
+void initializeARMPreAllocLoadStoreOptPass(PassRegistry &);
+
} // end namespace llvm;
#endif
diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td
index c171656b48ab..ef626b66a1e7 100644
--- a/lib/Target/ARM/ARM.td
+++ b/lib/Target/ARM/ARM.td
@@ -76,6 +76,11 @@ def FeatureT2XtPk : SubtargetFeature<"t2xtpk", "HasT2ExtractPack", "true",
"Enable Thumb2 extract and pack instructions">;
def FeatureDB : SubtargetFeature<"db", "HasDataBarrier", "true",
"Has data barrier (dmb / dsb) instructions">;
+def FeatureV7Clrex : SubtargetFeature<"v7clrex", "HasV7Clrex", "true",
+ "Has v7 clrex instruction">;
+def FeatureAcquireRelease : SubtargetFeature<"acquire-release",
+ "HasAcquireRelease", "true",
+ "Has v8 acquire/release (lda/ldaex etc) instructions">;
def FeatureSlowFPBrcc : SubtargetFeature<"slow-fp-brcc", "SlowFPBrcc", "true",
"FP compare + branch is slow">;
def FeatureVFPOnlySP : SubtargetFeature<"fp-only-sp", "FPOnlySP", "true",
@@ -84,17 +89,98 @@ def FeaturePerfMon : SubtargetFeature<"perfmon", "HasPerfMon", "true",
"Enable support for Performance Monitor extensions">;
def FeatureTrustZone : SubtargetFeature<"trustzone", "HasTrustZone", "true",
"Enable support for TrustZone security extensions">;
+def Feature8MSecExt : SubtargetFeature<"8msecext", "Has8MSecExt", "true",
+ "Enable support for ARMv8-M Security Extensions">;
def FeatureCrypto : SubtargetFeature<"crypto", "HasCrypto", "true",
"Enable support for Cryptography extensions",
[FeatureNEON]>;
def FeatureCRC : SubtargetFeature<"crc", "HasCRC", "true",
"Enable support for CRC instructions">;
+// Not to be confused with FeatureHasRetAddrStack (return address stack)
+def FeatureRAS : SubtargetFeature<"ras", "HasRAS", "true",
+ "Enable Reliability, Availability and Serviceability extensions">;
+
// Cyclone has preferred instructions for zeroing VFP registers, which can
// execute in 0 cycles.
def FeatureZCZeroing : SubtargetFeature<"zcz", "HasZeroCycleZeroing", "true",
"Has zero-cycle zeroing instructions">;
+// Whether or not it may be profitable to unpredicate certain instructions
+// during if conversion.
+def FeatureProfUnpredicate : SubtargetFeature<"prof-unpr",
+ "IsProfitableToUnpredicate",
+ "true",
+ "Is profitable to unpredicate">;
+
+// Some targets (e.g. Swift) have microcoded VGETLNi32.
+def FeatureSlowVGETLNi32 : SubtargetFeature<"slow-vgetlni32",
+ "HasSlowVGETLNi32", "true",
+ "Has slow VGETLNi32 - prefer VMOV">;
+
+// Some targets (e.g. Swift) have microcoded VDUP32.
+def FeatureSlowVDUP32 : SubtargetFeature<"slow-vdup32", "HasSlowVDUP32", "true",
+ "Has slow VDUP32 - prefer VMOV">;
+
+// Some targets (e.g. Cortex-A9) prefer VMOVSR to VMOVDRR even when using NEON
+// for scalar FP, as this allows more effective execution domain optimization.
+def FeaturePreferVMOVSR : SubtargetFeature<"prefer-vmovsr", "PreferVMOVSR",
+ "true", "Prefer VMOVSR">;
+
+// Swift has ISHST barriers compatible with Atomic Release semantics but weaker
+// than ISH
+def FeaturePrefISHSTBarrier : SubtargetFeature<"prefer-ishst", "PreferISHST",
+ "true", "Prefer ISHST barriers">;
+
+// Some targets (e.g. Cortex-A9) have muxed AGU and NEON/FPU.
+def FeatureMuxedUnits : SubtargetFeature<"muxed-units", "HasMuxedUnits", "true",
+ "Has muxed AGU and NEON/FPU">;
+
+// On some targets, a VLDM/VSTM starting with an odd register number needs more
+// microops than single VLDRS.
+def FeatureSlowOddRegister : SubtargetFeature<"slow-odd-reg", "SlowOddRegister",
+ "true", "VLDM/VSTM starting with an odd register is slow">;
+
+// Some targets have a renaming dependency when loading into D subregisters.
+def FeatureSlowLoadDSubreg : SubtargetFeature<"slow-load-D-subreg",
+ "SlowLoadDSubregister", "true",
+ "Loading into D subregs is slow">;
+// Some targets (e.g. Cortex-A15) never want VMOVS to be widened to VMOVD.
+def FeatureDontWidenVMOVS : SubtargetFeature<"dont-widen-vmovs",
+ "DontWidenVMOVS", "true",
+ "Don't widen VMOVS to VMOVD">;
+
+// Whether or not it is profitable to expand VFP/NEON MLA/MLS instructions.
+def FeatureExpandMLx : SubtargetFeature<"expand-fp-mlx", "ExpandMLx", "true",
+ "Expand VFP/NEON MLA/MLS instructions">;
+
+// Some targets have special RAW hazards for VFP/NEON VMLA/VMLS.
+def FeatureHasVMLxHazards : SubtargetFeature<"vmlx-hazards", "HasVMLxHazards",
+ "true", "Has VMLx hazards">;
+
+// Some targets (e.g. Cortex-A9) want to convert VMOVRS, VMOVSR and VMOVS from
+// VFP to NEON, as an execution domain optimization.
+def FeatureNEONForFPMovs : SubtargetFeature<"neon-fpmovs", "UseNEONForFPMovs",
+ "true", "Convert VMOVSR, VMOVRS, VMOVS to NEON">;
+
+// Some processors benefit from using NEON instructions for scalar
+// single-precision FP operations. This affects instruction selection and should
+// only be enabled if the handling of denormals is not important.
+def FeatureNEONForFP : SubtargetFeature<"neonfp", "UseNEONForSinglePrecisionFP",
+ "true",
+ "Use NEON for single precision FP">;
+
+// On some processors, VLDn instructions that access unaligned data take one
+// extra cycle. Take that into account when computing operand latencies.
+def FeatureCheckVLDnAlign : SubtargetFeature<"vldn-align", "CheckVLDnAlign",
+ "true",
+ "Check for VLDn unaligned access">;
+
+// Some processors have a nonpipelined VFP coprocessor.
+def FeatureNonpipelinedVFP : SubtargetFeature<"nonpipelined-vfp",
+ "NonpipelinedVFP", "true",
+ "VFP instructions are not pipelined">;
+
// Some processors have FP multiply-accumulate instructions that don't
// play nicely with other VFP / NEON instructions, and it's generally better
// to just not use them.
@@ -106,12 +192,6 @@ def FeatureVMLxForwarding : SubtargetFeature<"vmlx-forwarding",
"HasVMLxForwarding", "true",
"Has multiplier accumulator forwarding">;
-// Some processors benefit from using NEON instructions for scalar
-// single-precision FP operations.
-def FeatureNEONForFP : SubtargetFeature<"neonfp", "UseNEONForSinglePrecisionFP",
- "true",
- "Use NEON for single precision FP">;
-
// Disable 32-bit to 16-bit narrowing for experimentation.
def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true",
"Prefer 32-bit Thumb instrs">;
@@ -130,7 +210,7 @@ def FeatureAvoidMOVsShOp : SubtargetFeature<"avoid-movs-shop",
// Some processors perform return stack prediction. CodeGen should avoid issue
// "normal" call instructions to callees which do not return.
-def FeatureHasRAS : SubtargetFeature<"ras", "HasRAS", "true",
+def FeatureHasRetAddrStack : SubtargetFeature<"ret-addr-stack", "HasRetAddrStack", "true",
"Has return address stack">;
/// DSP extension.
@@ -200,24 +280,31 @@ def HasV6Ops : SubtargetFeature<"v6", "HasV6Ops", "true",
def HasV6MOps : SubtargetFeature<"v6m", "HasV6MOps", "true",
"Support ARM v6M instructions",
[HasV6Ops]>;
+def HasV8MBaselineOps : SubtargetFeature<"v8m", "HasV8MBaselineOps", "true",
+ "Support ARM v8M Baseline instructions",
+ [HasV6MOps]>;
def HasV6KOps : SubtargetFeature<"v6k", "HasV6KOps", "true",
"Support ARM v6k instructions",
[HasV6Ops]>;
def HasV6T2Ops : SubtargetFeature<"v6t2", "HasV6T2Ops", "true",
"Support ARM v6t2 instructions",
- [HasV6MOps, HasV6KOps, FeatureThumb2]>;
+ [HasV8MBaselineOps, HasV6KOps, FeatureThumb2]>;
def HasV7Ops : SubtargetFeature<"v7", "HasV7Ops", "true",
"Support ARM v7 instructions",
- [HasV6T2Ops, FeaturePerfMon]>;
+ [HasV6T2Ops, FeaturePerfMon,
+ FeatureV7Clrex]>;
def HasV8Ops : SubtargetFeature<"v8", "HasV8Ops", "true",
"Support ARM v8 instructions",
- [HasV7Ops]>;
+ [HasV7Ops, FeatureAcquireRelease]>;
def HasV8_1aOps : SubtargetFeature<"v8.1a", "HasV8_1aOps", "true",
"Support ARM v8.1a instructions",
[HasV8Ops]>;
def HasV8_2aOps : SubtargetFeature<"v8.2a", "HasV8_2aOps", "true",
"Support ARM v8.2a instructions",
[HasV8_1aOps]>;
+def HasV8MMainlineOps : SubtargetFeature<"v8m.main", "HasV8MMainlineOps", "true",
+ "Support ARM v8M Mainline instructions",
+ [HasV7Ops]>;
//===----------------------------------------------------------------------===//
@@ -238,6 +325,8 @@ def ProcA15 : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15",
"Cortex-A15 ARM processors", []>;
def ProcA17 : SubtargetFeature<"a17", "ARMProcFamily", "CortexA17",
"Cortex-A17 ARM processors", []>;
+def ProcA32 : SubtargetFeature<"a32", "ARMProcFamily", "CortexA32",
+ "Cortex-A32 ARM processors", []>;
def ProcA35 : SubtargetFeature<"a35", "ARMProcFamily", "CortexA35",
"Cortex-A35 ARM processors", []>;
def ProcA53 : SubtargetFeature<"a53", "ARMProcFamily", "CortexA53",
@@ -246,6 +335,8 @@ def ProcA57 : SubtargetFeature<"a57", "ARMProcFamily", "CortexA57",
"Cortex-A57 ARM processors", []>;
def ProcA72 : SubtargetFeature<"a72", "ARMProcFamily", "CortexA72",
"Cortex-A72 ARM processors", []>;
+def ProcA73 : SubtargetFeature<"a73", "ARMProcFamily", "CortexA73",
+ "Cortex-A73 ARM processors", []>;
def ProcKrait : SubtargetFeature<"krait", "ARMProcFamily", "Krait",
"Qualcomm ARM processors", []>;
@@ -256,12 +347,14 @@ def ProcExynosM1 : SubtargetFeature<"exynosm1", "ARMProcFamily", "ExynosM1",
"Samsung Exynos-M1 processors", []>;
def ProcR4 : SubtargetFeature<"r4", "ARMProcFamily", "CortexR4",
- "Cortex-R4 ARM processors", []>;
+ "Cortex-R4 ARM processors", []>;
def ProcR5 : SubtargetFeature<"r5", "ARMProcFamily", "CortexR5",
"Cortex-R5 ARM processors", []>;
def ProcR7 : SubtargetFeature<"r7", "ARMProcFamily", "CortexR7",
"Cortex-R7 ARM processors", []>;
+def ProcM3 : SubtargetFeature<"m3", "ARMProcFamily", "CortexM3",
+ "Cortex-M3 ARM processors", []>;
//===----------------------------------------------------------------------===//
// ARM schedules.
@@ -374,7 +467,27 @@ def ARMv82a : Architecture<"armv8.2-a", "ARMv82a", [HasV8_2aOps,
FeatureMP,
FeatureVirtualization,
FeatureCrypto,
- FeatureCRC]>;
+ FeatureCRC,
+ FeatureRAS]>;
+
+def ARMv8mBaseline : Architecture<"armv8-m.base", "ARMv8mBaseline",
+ [HasV8MBaselineOps,
+ FeatureNoARM,
+ FeatureDB,
+ FeatureHWDiv,
+ FeatureV7Clrex,
+ Feature8MSecExt,
+ FeatureAcquireRelease,
+ FeatureMClass]>;
+
+def ARMv8mMainline : Architecture<"armv8-m.main", "ARMv8mMainline",
+ [HasV8MMainlineOps,
+ FeatureNoARM,
+ FeatureDB,
+ FeatureHWDiv,
+ Feature8MSecExt,
+ FeatureAcquireRelease,
+ FeatureMClass]>;
// Aliases
def IWMMXT : Architecture<"iwmmxt", "ARMv5te", [ARMv5te]>;
@@ -452,7 +565,7 @@ def : Processor<"arm1156t2f-s", ARMV6Itineraries, [ARMv6t2,
// FIXME: A5 has currently the same Schedule model as A8
def : ProcessorModel<"cortex-a5", CortexA8Model, [ARMv7a, ProcA5,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureSlowFPBrcc,
FeatureHasSlowFPVMLx,
@@ -462,9 +575,10 @@ def : ProcessorModel<"cortex-a5", CortexA8Model, [ARMv7a, ProcA5,
FeatureVFP4]>;
def : ProcessorModel<"cortex-a7", CortexA8Model, [ARMv7a, ProcA7,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureSlowFPBrcc,
+ FeatureHasVMLxHazards,
FeatureHasSlowFPVMLx,
FeatureVMLxForwarding,
FeatureT2XtPk,
@@ -475,25 +589,33 @@ def : ProcessorModel<"cortex-a7", CortexA8Model, [ARMv7a, ProcA7,
FeatureVirtualization]>;
def : ProcessorModel<"cortex-a8", CortexA8Model, [ARMv7a, ProcA8,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
+ FeatureNonpipelinedVFP,
FeatureTrustZone,
FeatureSlowFPBrcc,
+ FeatureHasVMLxHazards,
FeatureHasSlowFPVMLx,
FeatureVMLxForwarding,
FeatureT2XtPk]>;
def : ProcessorModel<"cortex-a9", CortexA9Model, [ARMv7a, ProcA9,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureTrustZone,
+ FeatureHasVMLxHazards,
FeatureVMLxForwarding,
FeatureT2XtPk,
FeatureFP16,
FeatureAvoidPartialCPSR,
+ FeatureExpandMLx,
+ FeaturePreferVMOVSR,
+ FeatureMuxedUnits,
+ FeatureNEONForFPMovs,
+ FeatureCheckVLDnAlign,
FeatureMP]>;
// FIXME: A12 has currently the same Schedule model as A9
def : ProcessorModel<"cortex-a12", CortexA9Model, [ARMv7a, ProcA12,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureVMLxForwarding,
FeatureT2XtPk,
@@ -506,11 +628,14 @@ def : ProcessorModel<"cortex-a12", CortexA9Model, [ARMv7a, ProcA12,
// FIXME: A15 has currently the same Schedule model as A9.
def : ProcessorModel<"cortex-a15", CortexA9Model, [ARMv7a, ProcA15,
- FeatureHasRAS,
+ FeatureDontWidenVMOVS,
+ FeatureHasRetAddrStack,
+ FeatureMuxedUnits,
FeatureTrustZone,
FeatureT2XtPk,
FeatureVFP4,
FeatureMP,
+ FeatureCheckVLDnAlign,
FeatureHWDiv,
FeatureHWDivARM,
FeatureAvoidPartialCPSR,
@@ -518,7 +643,7 @@ def : ProcessorModel<"cortex-a15", CortexA9Model, [ARMv7a, ProcA15,
// FIXME: A17 has currently the same Schedule model as A9
def : ProcessorModel<"cortex-a17", CortexA9Model, [ARMv7a, ProcA17,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureTrustZone,
FeatureMP,
FeatureVMLxForwarding,
@@ -533,7 +658,9 @@ def : ProcessorModel<"cortex-a17", CortexA9Model, [ARMv7a, ProcA17,
// FIXME: krait has currently the same features as A9 plus VFP4 and hardware
// division features.
def : ProcessorModel<"krait", CortexA9Model, [ARMv7a, ProcKrait,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
+ FeatureMuxedUnits,
+ FeatureCheckVLDnAlign,
FeatureVMLxForwarding,
FeatureT2XtPk,
FeatureFP16,
@@ -543,7 +670,7 @@ def : ProcessorModel<"krait", CortexA9Model, [ARMv7a, ProcKrait,
FeatureHWDivARM]>;
def : ProcessorModel<"swift", SwiftModel, [ARMv7a, ProcSwift,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureNEONForFP,
FeatureT2XtPk,
FeatureVFP4,
@@ -552,17 +679,24 @@ def : ProcessorModel<"swift", SwiftModel, [ARMv7a, ProcSwift,
FeatureHWDivARM,
FeatureAvoidPartialCPSR,
FeatureAvoidMOVsShOp,
- FeatureHasSlowFPVMLx]>;
+ FeatureHasSlowFPVMLx,
+ FeatureHasVMLxHazards,
+ FeatureProfUnpredicate,
+ FeaturePrefISHSTBarrier,
+ FeatureSlowOddRegister,
+ FeatureSlowLoadDSubreg,
+ FeatureSlowVGETLNi32,
+ FeatureSlowVDUP32]>;
// FIXME: R4 has currently the same ProcessorModel as A8.
def : ProcessorModel<"cortex-r4", CortexA8Model, [ARMv7r, ProcR4,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureAvoidPartialCPSR,
FeatureT2XtPk]>;
// FIXME: R4F has currently the same ProcessorModel as A8.
def : ProcessorModel<"cortex-r4f", CortexA8Model, [ARMv7r, ProcR4,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureSlowFPBrcc,
FeatureHasSlowFPVMLx,
FeatureVFP3,
@@ -572,7 +706,7 @@ def : ProcessorModel<"cortex-r4f", CortexA8Model, [ARMv7r, ProcR4,
// FIXME: R5 has currently the same ProcessorModel as A8.
def : ProcessorModel<"cortex-r5", CortexA8Model, [ARMv7r, ProcR5,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureVFP3,
FeatureD16,
FeatureSlowFPBrcc,
@@ -583,9 +717,20 @@ def : ProcessorModel<"cortex-r5", CortexA8Model, [ARMv7r, ProcR5,
// FIXME: R7 has currently the same ProcessorModel as A8 and is modelled as R5.
def : ProcessorModel<"cortex-r7", CortexA8Model, [ARMv7r, ProcR7,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
+ FeatureVFP3,
+ FeatureD16,
+ FeatureFP16,
+ FeatureMP,
+ FeatureSlowFPBrcc,
+ FeatureHWDivARM,
+ FeatureHasSlowFPVMLx,
+ FeatureAvoidPartialCPSR,
+ FeatureT2XtPk]>;
+
+def : ProcessorModel<"cortex-r8", CortexA8Model, [ARMv7r,
+ FeatureHasRetAddrStack,
FeatureVFP3,
- FeatureVFPOnlySP,
FeatureD16,
FeatureFP16,
FeatureMP,
@@ -595,8 +740,8 @@ def : ProcessorModel<"cortex-r7", CortexA8Model, [ARMv7r, ProcR7,
FeatureAvoidPartialCPSR,
FeatureT2XtPk]>;
-def : ProcNoItin<"cortex-m3", [ARMv7m]>;
-def : ProcNoItin<"sc300", [ARMv7m]>;
+def : ProcNoItin<"cortex-m3", [ARMv7m, ProcM3]>;
+def : ProcNoItin<"sc300", [ARMv7m, ProcM3]>;
def : ProcNoItin<"cortex-m4", [ARMv7em,
FeatureVFP4,
@@ -607,6 +752,12 @@ def : ProcNoItin<"cortex-m7", [ARMv7em,
FeatureFPARMv8,
FeatureD16]>;
+def : ProcNoItin<"cortex-a32", [ARMv8a,
+ FeatureHWDiv,
+ FeatureHWDivARM,
+ FeatureT2XtPk,
+ FeatureCrypto,
+ FeatureCRC]>;
def : ProcNoItin<"cortex-a35", [ARMv8a, ProcA35,
FeatureHWDiv,
@@ -636,9 +787,16 @@ def : ProcNoItin<"cortex-a72", [ARMv8a, ProcA72,
FeatureCrypto,
FeatureCRC]>;
+def : ProcNoItin<"cortex-a73", [ARMv8a, ProcA73,
+ FeatureHWDiv,
+ FeatureHWDivARM,
+ FeatureT2XtPk,
+ FeatureCrypto,
+ FeatureCRC]>;
+
// Cyclone is very similar to swift
def : ProcessorModel<"cyclone", SwiftModel, [ARMv8a, ProcSwift,
- FeatureHasRAS,
+ FeatureHasRetAddrStack,
FeatureNEONForFP,
FeatureT2XtPk,
FeatureVFP4,
diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp
index 206db9619a2f..04863a7ecf8f 100644
--- a/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -43,12 +43,11 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ARMBuildAttributes.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/COFF.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
@@ -213,8 +212,6 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
GetARMGVSymbol(GV, TF)->print(O, MAI);
printOffset(MO.getOffset(), O);
- if (TF == ARMII::MO_PLT)
- O << "(PLT)";
break;
}
case MachineOperand::MO_ConstantPoolIndex:
@@ -516,9 +513,10 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer->AddBlankLine();
}
- Stubs = MMIMacho.GetHiddenGVStubList();
+ Stubs = MMIMacho.GetThreadLocalGVStubList();
if (!Stubs.empty()) {
- OutStreamer->SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
+ // Switch with ".non_lazy_symbol_pointer" directive.
+ OutStreamer->SwitchSection(TLOFMacho.getThreadLocalPointerSection());
EmitAlignment(2);
for (auto &Stub : Stubs)
@@ -536,18 +534,48 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer->EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
}
+ if (TT.isOSBinFormatCOFF()) {
+ const auto &TLOF =
+ static_cast<const TargetLoweringObjectFileCOFF &>(getObjFileLowering());
+
+ std::string Flags;
+ raw_string_ostream OS(Flags);
+
+ for (const auto &Function : M)
+ TLOF.emitLinkerFlagsForGlobal(OS, &Function, *Mang);
+ for (const auto &Global : M.globals())
+ TLOF.emitLinkerFlagsForGlobal(OS, &Global, *Mang);
+ for (const auto &Alias : M.aliases())
+ TLOF.emitLinkerFlagsForGlobal(OS, &Alias, *Mang);
+
+ OS.flush();
+
+ // Output collected flags
+ if (!Flags.empty()) {
+ OutStreamer->SwitchSection(TLOF.getDrectveSection());
+ OutStreamer->EmitBytes(Flags);
+ }
+ }
+
// The last attribute to be emitted is ABI_optimization_goals
MCTargetStreamer &TS = *OutStreamer->getTargetStreamer();
ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
if (OptimizationGoals > 0 &&
- (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI()))
+ (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
+ Subtarget->isTargetMuslAEABI()))
ATS.emitAttribute(ARMBuildAttrs::ABI_optimization_goals, OptimizationGoals);
OptimizationGoals = -1;
ATS.finishAttributeSection();
}
+static bool isV8M(const ARMSubtarget *Subtarget) {
+ // Note that v8M Baseline is a subset of v6T2!
+ return (Subtarget->hasV8MBaselineOps() && !Subtarget->hasV6T2Ops()) ||
+ Subtarget->hasV8MMainlineOps();
+}
+
//===----------------------------------------------------------------------===//
// Helper routines for EmitStartOfAsmFile() and EmitEndOfAsmFile()
// FIXME:
@@ -561,13 +589,17 @@ static ARMBuildAttrs::CPUArch getArchForCPU(StringRef CPU,
return ARMBuildAttrs::v5TEJ;
if (Subtarget->hasV8Ops())
- return ARMBuildAttrs::v8;
+ return ARMBuildAttrs::v8_A;
+ else if (Subtarget->hasV8MMainlineOps())
+ return ARMBuildAttrs::v8_M_Main;
else if (Subtarget->hasV7Ops()) {
if (Subtarget->isMClass() && Subtarget->hasDSP())
return ARMBuildAttrs::v7E_M;
return ARMBuildAttrs::v7;
} else if (Subtarget->hasV6T2Ops())
return ARMBuildAttrs::v6T2;
+ else if (Subtarget->hasV8MBaselineOps())
+ return ARMBuildAttrs::v8_M_Base;
else if (Subtarget->hasV6MOps())
return ARMBuildAttrs::v6S_M;
else if (Subtarget->hasV6Ops())
@@ -609,9 +641,9 @@ void ARMAsmPrinter::emitAttributes() {
static_cast<const ARMBaseTargetMachine &>(TM);
const ARMSubtarget STI(TT, CPU, ArchFS, ATM, ATM.isLittleEndian());
- std::string CPUString = STI.getCPUString();
+ const std::string &CPUString = STI.getCPUString();
- if (CPUString.find("generic") != 0) { //CPUString doesn't start with "generic"
+ if (!StringRef(CPUString).startswith("generic")) {
// FIXME: remove krait check when GNU tools support krait cpu
if (STI.isKrait()) {
ATS.emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9");
@@ -627,7 +659,7 @@ void ARMAsmPrinter::emitAttributes() {
// Tag_CPU_arch_profile must have the default value of 0 when "Architecture
// profile is not applicable (e.g. pre v7, or cross-profile code)".
- if (STI.hasV7Ops()) {
+ if (STI.hasV7Ops() || isV8M(&STI)) {
if (STI.isAClass()) {
ATS.emitAttribute(ARMBuildAttrs::CPU_arch_profile,
ARMBuildAttrs::ApplicationProfile);
@@ -643,7 +675,10 @@ void ARMAsmPrinter::emitAttributes() {
ATS.emitAttribute(ARMBuildAttrs::ARM_ISA_use,
STI.hasARMOps() ? ARMBuildAttrs::Allowed
: ARMBuildAttrs::Not_Allowed);
- if (STI.isThumb1Only()) {
+ if (isV8M(&STI)) {
+ ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
+ ARMBuildAttrs::AllowThumbDerived);
+ } else if (STI.isThumb1Only()) {
ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed);
} else if (STI.hasThumb2()) {
ATS.emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
@@ -690,7 +725,7 @@ void ARMAsmPrinter::emitAttributes() {
ATS.emitFPU(ARM::FK_VFPV2);
}
- if (TM.getRelocationModel() == Reloc::PIC_) {
+ if (isPositionIndependent()) {
// PIC specific attributes.
ATS.emitAttribute(ARMBuildAttrs::ABI_PCS_RW_data,
ARMBuildAttrs::AddressRWPCRel);
@@ -794,6 +829,9 @@ void ARMAsmPrinter::emitAttributes() {
if (STI.hasDivideInARMMode() && !STI.hasV8Ops())
ATS.emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
+ if (STI.hasDSP() && isV8M(&STI))
+ ATS.emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed);
+
if (MMI) {
if (const Module *SourceModule = MMI->getModule()) {
// ABI_PCS_wchar_t to indicate wchar_t width
@@ -853,11 +891,18 @@ static MCSymbol *getPICLabel(const char *Prefix, unsigned FunctionNumber,
static MCSymbolRefExpr::VariantKind
getModifierVariantKind(ARMCP::ARMCPModifier Modifier) {
switch (Modifier) {
- case ARMCP::no_modifier: return MCSymbolRefExpr::VK_None;
- case ARMCP::TLSGD: return MCSymbolRefExpr::VK_TLSGD;
- case ARMCP::TPOFF: return MCSymbolRefExpr::VK_TPOFF;
- case ARMCP::GOTTPOFF: return MCSymbolRefExpr::VK_GOTTPOFF;
- case ARMCP::GOT_PREL: return MCSymbolRefExpr::VK_ARM_GOT_PREL;
+ case ARMCP::no_modifier:
+ return MCSymbolRefExpr::VK_None;
+ case ARMCP::TLSGD:
+ return MCSymbolRefExpr::VK_TLSGD;
+ case ARMCP::TPOFF:
+ return MCSymbolRefExpr::VK_TPOFF;
+ case ARMCP::GOTTPOFF:
+ return MCSymbolRefExpr::VK_GOTTPOFF;
+ case ARMCP::GOT_PREL:
+ return MCSymbolRefExpr::VK_ARM_GOT_PREL;
+ case ARMCP::SECREL:
+ return MCSymbolRefExpr::VK_SECREL;
}
llvm_unreachable("Invalid ARMCPModifier!");
}
@@ -865,8 +910,8 @@ getModifierVariantKind(ARMCP::ARMCPModifier Modifier) {
MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV,
unsigned char TargetFlags) {
if (Subtarget->isTargetMachO()) {
- bool IsIndirect = (TargetFlags & ARMII::MO_NONLAZY) &&
- Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel());
+ bool IsIndirect =
+ (TargetFlags & ARMII::MO_NONLAZY) && Subtarget->isGVIndirectSymbol(GV);
if (!IsIndirect)
return getSymbol(GV);
@@ -876,8 +921,9 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV,
MachineModuleInfoMachO &MMIMachO =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
MachineModuleInfoImpl::StubValueTy &StubSym =
- GV->hasHiddenVisibility() ? MMIMachO.getHiddenGVStubEntry(MCSym)
- : MMIMachO.getGVStubEntry(MCSym);
+ GV->isThreadLocal() ? MMIMachO.getThreadLocalGVStubEntry(MCSym)
+ : MMIMachO.getGVStubEntry(MCSym);
+
if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::StubValueTy(getSymbol(GV),
!GV->hasInternalLinkage());
@@ -991,7 +1037,7 @@ void ARMAsmPrinter::EmitJumpTableAddrs(const MachineInstr *MI) {
// .word (LBB1 - LJTI_0_0)
const MCExpr *Expr = MCSymbolRefExpr::create(MBB->getSymbol(), OutContext);
- if (TM.getRelocationModel() == Reloc::PIC_)
+ if (isPositionIndependent())
Expr = MCBinaryExpr::createSub(Expr, MCSymbolRefExpr::create(JTISymbol,
OutContext),
OutContext);
@@ -1227,6 +1273,8 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
const DataLayout &DL = getDataLayout();
+ MCTargetStreamer &TS = *OutStreamer->getTargetStreamer();
+ ARMTargetStreamer &ATS = static_cast<ARMTargetStreamer &>(TS);
// If we just ended a constant pool, mark it as such.
if (InConstantPool && MI->getOpcode() != ARM::CONSTPOOL_ENTRY) {
@@ -1643,29 +1691,26 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Non-Darwin binutils don't yet support the "trap" mnemonic.
// FIXME: Remove this special case when they do.
if (!Subtarget->isTargetMachO()) {
- //.long 0xe7ffdefe @ trap
uint32_t Val = 0xe7ffdefeUL;
OutStreamer->AddComment("trap");
- OutStreamer->EmitIntValue(Val, 4);
+ ATS.emitInst(Val);
return;
}
break;
}
case ARM::TRAPNaCl: {
- //.long 0xe7fedef0 @ trap
uint32_t Val = 0xe7fedef0UL;
OutStreamer->AddComment("trap");
- OutStreamer->EmitIntValue(Val, 4);
+ ATS.emitInst(Val);
return;
}
case ARM::tTRAP: {
// Non-Darwin binutils don't yet support the "trap" mnemonic.
// FIXME: Remove this special case when they do.
if (!Subtarget->isTargetMachO()) {
- //.short 57086 @ trap
uint16_t Val = 0xdefe;
OutStreamer->AddComment("trap");
- OutStreamer->EmitIntValue(Val, 2);
+ ATS.emitInst(Val, 'n');
return;
}
break;
@@ -1845,6 +1890,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// bx $scratch
unsigned SrcReg = MI->getOperand(0).getReg();
unsigned ScratchReg = MI->getOperand(1).getReg();
+
EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tLDRi)
.addReg(ScratchReg)
.addReg(SrcReg)
@@ -1885,6 +1931,36 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addReg(0));
return;
}
+ case ARM::tInt_WIN_eh_sjlj_longjmp: {
+ // ldr.w r11, [$src, #0]
+ // ldr.w sp, [$src, #8]
+ // ldr.w pc, [$src, #4]
+
+ unsigned SrcReg = MI->getOperand(0).getReg();
+
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2LDRi12)
+ .addReg(ARM::R11)
+ .addReg(SrcReg)
+ .addImm(0)
+ // Predicate
+ .addImm(ARMCC::AL)
+ .addReg(0));
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2LDRi12)
+ .addReg(ARM::SP)
+ .addReg(SrcReg)
+ .addImm(8)
+ // Predicate
+ .addImm(ARMCC::AL)
+ .addReg(0));
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2LDRi12)
+ .addReg(ARM::PC)
+ .addReg(SrcReg)
+ .addImm(4)
+ // Predicate
+ .addImm(ARMCC::AL)
+ .addReg(0));
+ return;
+ }
}
MCInst TmpInst;
diff --git a/lib/Target/ARM/ARMAsmPrinter.h b/lib/Target/ARM/ARMAsmPrinter.h
index ed7be2de51ca..97f5ca0ecbc2 100644
--- a/lib/Target/ARM/ARMAsmPrinter.h
+++ b/lib/Target/ARM/ARMAsmPrinter.h
@@ -95,6 +95,7 @@ public:
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
private:
+
// Helpers for EmitStartOfAsmFile() and EmitEndOfAsmFile()
void emitAttributes();
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 49f328852667..693f16499717 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -51,15 +51,6 @@ static cl::opt<bool>
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
cl::desc("Enable ARM 2-addr to 3-addr conv"));
-static cl::opt<bool>
-WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true),
- cl::desc("Widen ARM vmovs to vmovd when possible"));
-
-static cl::opt<unsigned>
-SwiftPartialUpdateClearance("swift-partial-update-clearance",
- cl::Hidden, cl::init(12),
- cl::desc("Clearance before partial register updates"));
-
/// ARM_MLxEntry - Record information about MLA / MLS instructions.
struct ARM_MLxEntry {
uint16_t MLxOpc; // MLA / MLS opcode
@@ -124,18 +115,15 @@ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
}
-MachineInstr *
-ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
+MachineInstr *ARMBaseInstrInfo::convertToThreeAddress(
+ MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
// FIXME: Thumb2 support.
if (!EnableARM3Addr)
return nullptr;
- MachineInstr *MI = MBBI;
- MachineFunction &MF = *MI->getParent()->getParent();
- uint64_t TSFlags = MI->getDesc().TSFlags;
+ MachineFunction &MF = *MI.getParent()->getParent();
+ uint64_t TSFlags = MI.getDesc().TSFlags;
bool isPre = false;
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
default: return nullptr;
@@ -148,24 +136,24 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// Try splitting an indexed load/store to an un-indexed one plus an add/sub
// operation.
- unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
+ unsigned MemOpc = getUnindexedOpcode(MI.getOpcode());
if (MemOpc == 0)
return nullptr;
MachineInstr *UpdateMI = nullptr;
MachineInstr *MemMI = nullptr;
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
- const MCInstrDesc &MCID = MI->getDesc();
+ const MCInstrDesc &MCID = MI.getDesc();
unsigned NumOps = MCID.getNumOperands();
- bool isLoad = !MI->mayStore();
- const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
- const MachineOperand &Base = MI->getOperand(2);
- const MachineOperand &Offset = MI->getOperand(NumOps-3);
+ bool isLoad = !MI.mayStore();
+ const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0);
+ const MachineOperand &Base = MI.getOperand(2);
+ const MachineOperand &Offset = MI.getOperand(NumOps - 3);
unsigned WBReg = WB.getReg();
unsigned BaseReg = Base.getReg();
unsigned OffReg = Offset.getReg();
- unsigned OffImm = MI->getOperand(NumOps-2).getImm();
- ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
+ unsigned OffImm = MI.getOperand(NumOps - 2).getImm();
+ ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm();
switch (AddrMode) {
default: llvm_unreachable("Unknown indexed op!");
case ARMII::AddrMode2: {
@@ -176,22 +164,33 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// Can't encode it in a so_imm operand. This transformation will
// add more than 1 instruction. Abandon!
return nullptr;
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
+ UpdateMI = BuildMI(MF, MI.getDebugLoc(),
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
- .addReg(BaseReg).addImm(Amt)
- .addImm(Pred).addReg(0).addReg(0);
+ .addReg(BaseReg)
+ .addImm(Amt)
+ .addImm(Pred)
+ .addReg(0)
+ .addReg(0);
} else if (Amt != 0) {
ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
+ UpdateMI = BuildMI(MF, MI.getDebugLoc(),
get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
- .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
- .addImm(Pred).addReg(0).addReg(0);
+ .addReg(BaseReg)
+ .addReg(OffReg)
+ .addReg(0)
+ .addImm(SOOpc)
+ .addImm(Pred)
+ .addReg(0)
+ .addReg(0);
} else
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
+ UpdateMI = BuildMI(MF, MI.getDebugLoc(),
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
- .addReg(BaseReg).addReg(OffReg)
- .addImm(Pred).addReg(0).addReg(0);
+ .addReg(BaseReg)
+ .addReg(OffReg)
+ .addImm(Pred)
+ .addReg(0)
+ .addReg(0);
break;
}
case ARMII::AddrMode3 : {
@@ -199,15 +198,21 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned Amt = ARM_AM::getAM3Offset(OffImm);
if (OffReg == 0)
// Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
+ UpdateMI = BuildMI(MF, MI.getDebugLoc(),
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
- .addReg(BaseReg).addImm(Amt)
- .addImm(Pred).addReg(0).addReg(0);
+ .addReg(BaseReg)
+ .addImm(Amt)
+ .addImm(Pred)
+ .addReg(0)
+ .addReg(0);
else
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
+ UpdateMI = BuildMI(MF, MI.getDebugLoc(),
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
- .addReg(BaseReg).addReg(OffReg)
- .addImm(Pred).addReg(0).addReg(0);
+ .addReg(BaseReg)
+ .addReg(OffReg)
+ .addImm(Pred)
+ .addReg(0)
+ .addReg(0);
break;
}
}
@@ -215,24 +220,34 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
std::vector<MachineInstr*> NewMIs;
if (isPre) {
if (isLoad)
- MemMI = BuildMI(MF, MI->getDebugLoc(),
- get(MemOpc), MI->getOperand(0).getReg())
- .addReg(WBReg).addImm(0).addImm(Pred);
+ MemMI =
+ BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
+ .addReg(WBReg)
+ .addImm(0)
+ .addImm(Pred);
else
- MemMI = BuildMI(MF, MI->getDebugLoc(),
- get(MemOpc)).addReg(MI->getOperand(1).getReg())
- .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
+ MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
+ .addReg(MI.getOperand(1).getReg())
+ .addReg(WBReg)
+ .addReg(0)
+ .addImm(0)
+ .addImm(Pred);
NewMIs.push_back(MemMI);
NewMIs.push_back(UpdateMI);
} else {
if (isLoad)
- MemMI = BuildMI(MF, MI->getDebugLoc(),
- get(MemOpc), MI->getOperand(0).getReg())
- .addReg(BaseReg).addImm(0).addImm(Pred);
+ MemMI =
+ BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg())
+ .addReg(BaseReg)
+ .addImm(0)
+ .addImm(Pred);
else
- MemMI = BuildMI(MF, MI->getDebugLoc(),
- get(MemOpc)).addReg(MI->getOperand(1).getReg())
- .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
+ MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc))
+ .addReg(MI.getOperand(1).getReg())
+ .addReg(BaseReg)
+ .addReg(0)
+ .addImm(0)
+ .addImm(Pred);
if (WB.isDead())
UpdateMI->getOperand(0).setIsDead();
NewMIs.push_back(UpdateMI);
@@ -241,8 +256,8 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// Transfer LiveVariables states, kill / dead info.
if (LV) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
unsigned Reg = MO.getReg();
@@ -250,7 +265,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (MO.isDef()) {
MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
if (MO.isDead())
- LV->addVirtualRegisterDead(Reg, NewMI);
+ LV->addVirtualRegisterDead(Reg, *NewMI);
}
if (MO.isUse() && MO.isKill()) {
for (unsigned j = 0; j < 2; ++j) {
@@ -258,7 +273,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstr *NewMI = NewMIs[j];
if (!NewMI->readsRegister(Reg))
continue;
- LV->addVirtualRegisterKilled(Reg, NewMI);
+ LV->addVirtualRegisterKilled(Reg, *NewMI);
if (VI.removeKill(MI))
VI.Kills.push_back(NewMI);
break;
@@ -268,17 +283,18 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
}
+ MachineBasicBlock::iterator MBBI = MI.getIterator();
MFI->insert(MBBI, NewMIs[1]);
MFI->insert(MBBI, NewMIs[0]);
return NewMIs[0];
}
// Branch analysis.
-bool
-ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
+bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
TBB = nullptr;
FBB = nullptr;
@@ -289,7 +305,7 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Walk backwards from the end of the basic block until the branch is
// analyzed or we give up.
- while (isPredicated(I) || I->isTerminator() || I->isDebugValue()) {
+ while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) {
// Flag to be raised on unanalyzeable instructions. This is useful in cases
// where we want to clean up on the end of the basic block before we bail
@@ -322,7 +338,7 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
Cond.push_back(I->getOperand(2));
} else if (I->isReturn()) {
// Returns can't be analyzed, but we should run cleanup.
- CantAnalyze = !isPredicated(I);
+ CantAnalyze = !isPredicated(*I);
} else {
// We encountered other unrecognized terminator. Bail out immediately.
return true;
@@ -330,7 +346,7 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Cleanup code - to be run for unpredicated unconditional branches and
// returns.
- if (!isPredicated(I) &&
+ if (!isPredicated(*I) &&
(isUncondBranchOpcode(I->getOpcode()) ||
isIndirectBranchOpcode(I->getOpcode()) ||
isJumpTableBranchOpcode(I->getOpcode()) ||
@@ -344,9 +360,9 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
if (AllowModify) {
MachineBasicBlock::iterator DI = std::next(I);
while (DI != MBB.end()) {
- MachineInstr *InstToDelete = DI;
+ MachineInstr &InstToDelete = *DI;
++DI;
- InstToDelete->eraseFromParent();
+ InstToDelete.eraseFromParent();
}
}
}
@@ -390,11 +406,11 @@ unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 2;
}
-unsigned
-ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+unsigned ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
int BOpc = !AFI->isThumbFunction()
? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
@@ -438,10 +454,10 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return false;
}
-bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
- if (MI->isBundle()) {
- MachineBasicBlock::const_instr_iterator I = MI->getIterator();
- MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const {
+ if (MI.isBundle()) {
+ MachineBasicBlock::const_instr_iterator I = MI.getIterator();
+ MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
while (++I != E && I->isInsideBundle()) {
int PIdx = I->findFirstPredOperandIdx();
if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
@@ -450,26 +466,26 @@ bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
return false;
}
- int PIdx = MI->findFirstPredOperandIdx();
- return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
+ int PIdx = MI.findFirstPredOperandIdx();
+ return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL;
}
-bool ARMBaseInstrInfo::
-PredicateInstruction(MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
- unsigned Opc = MI->getOpcode();
+bool ARMBaseInstrInfo::PredicateInstruction(
+ MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
+ unsigned Opc = MI.getOpcode();
if (isUncondBranchOpcode(Opc)) {
- MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ MI.setDesc(get(getMatchingCondBranchOpcode(Opc)));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
.addImm(Pred[0].getImm())
.addReg(Pred[1].getReg());
return true;
}
- int PIdx = MI->findFirstPredOperandIdx();
+ int PIdx = MI.findFirstPredOperandIdx();
if (PIdx != -1) {
- MachineOperand &PMO = MI->getOperand(PIdx);
+ MachineOperand &PMO = MI.getOperand(PIdx);
PMO.setImm(Pred[0].getImm());
- MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
+ MI.getOperand(PIdx+1).setReg(Pred[1].getReg());
return true;
}
return false;
@@ -501,11 +517,11 @@ bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
}
}
-bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const {
+bool ARMBaseInstrInfo::DefinesPredicate(
+ MachineInstr &MI, std::vector<MachineOperand> &Pred) const {
bool Found = false;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) ||
(MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) {
Pred.push_back(MO);
@@ -555,21 +571,21 @@ static bool isEligibleForITBlock(const MachineInstr *MI) {
/// isPredicable - Return true if the specified instruction can be predicated.
/// By default, this returns true for every instruction with a
/// PredicateOperand.
-bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
- if (!MI->isPredicable())
+bool ARMBaseInstrInfo::isPredicable(MachineInstr &MI) const {
+ if (!MI.isPredicable())
return false;
- if (!isEligibleForITBlock(MI))
+ if (!isEligibleForITBlock(&MI))
return false;
ARMFunctionInfo *AFI =
- MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
+ MI.getParent()->getParent()->getInfo<ARMFunctionInfo>();
if (AFI->isThumb2Function()) {
if (getSubtarget().restrictIT())
- return isV8EligibleForIT(MI);
+ return isV8EligibleForIT(&MI);
} else { // non-Thumb
- if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON)
+ if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON)
return false;
}
@@ -594,19 +610,19 @@ template <> bool IsCPSRDead<MachineInstr>(MachineInstr *MI) {
/// GetInstSize - Return the size of the specified MachineInstr.
///
-unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- const MachineBasicBlock &MBB = *MI->getParent();
+unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr &MI) const {
+ const MachineBasicBlock &MBB = *MI.getParent();
const MachineFunction *MF = MBB.getParent();
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
- const MCInstrDesc &MCID = MI->getDesc();
+ const MCInstrDesc &MCID = MI.getDesc();
if (MCID.getSize())
return MCID.getSize();
// If this machine instr is an inline asm, measure it.
- if (MI->getOpcode() == ARM::INLINEASM)
- return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
- unsigned Opc = MI->getOpcode();
+ if (MI.getOpcode() == ARM::INLINEASM)
+ return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI);
+ unsigned Opc = MI.getOpcode();
switch (Opc) {
default:
// pseudo-instruction sizes are zero.
@@ -628,11 +644,13 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
case ARM::JUMPTABLE_TBH:
// If this machine instr is a constant pool entry, its size is recorded as
// operand #2.
- return MI->getOperand(2).getImm();
+ return MI.getOperand(2).getImm();
case ARM::Int_eh_sjlj_longjmp:
return 16;
case ARM::tInt_eh_sjlj_longjmp:
return 10;
+ case ARM::tInt_WIN_eh_sjlj_longjmp:
+ return 12;
case ARM::Int_eh_sjlj_setjmp:
case ARM::Int_eh_sjlj_setjmp_nofp:
return 20;
@@ -641,17 +659,17 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
case ARM::t2Int_eh_sjlj_setjmp_nofp:
return 12;
case ARM::SPACE:
- return MI->getOperand(1).getImm();
+ return MI.getOperand(1).getImm();
}
}
-unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const {
+unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
unsigned Size = 0;
- MachineBasicBlock::const_instr_iterator I = MI->getIterator();
- MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+ MachineBasicBlock::const_instr_iterator I = MI.getIterator();
+ MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
while (++I != E && I->isInsideBundle()) {
assert(!I->isBundle() && "No nested bundle!");
- Size += GetInstSizeInBytes(&*I);
+ Size += GetInstSizeInBytes(*I);
}
return Size;
}
@@ -700,9 +718,9 @@ void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB,
}
void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
bool GPRDest = ARM::GPRRegClass.contains(DestReg);
bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
@@ -976,20 +994,17 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
-unsigned
-ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
+unsigned ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ switch (MI.getOpcode()) {
default: break;
case ARM::STRrs:
case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isReg() &&
- MI->getOperand(3).isImm() &&
- MI->getOperand(2).getReg() == 0 &&
- MI->getOperand(3).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
+ MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
+ MI.getOperand(3).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
case ARM::STRi12:
@@ -997,27 +1012,24 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
case ARM::tSTRspi:
case ARM::VSTRD:
case ARM::VSTRS:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
case ARM::VST1q64:
case ARM::VST1d64TPseudo:
case ARM::VST1d64QPseudo:
- if (MI->getOperand(0).isFI() &&
- MI->getOperand(2).getSubReg() == 0) {
- FrameIndex = MI->getOperand(0).getIndex();
- return MI->getOperand(2).getReg();
+ if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) {
+ FrameIndex = MI.getOperand(0).getIndex();
+ return MI.getOperand(2).getReg();
}
break;
case ARM::VSTMQIA:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(0).getSubReg() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
}
@@ -1025,10 +1037,10 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
return 0;
}
-unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
+unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
const MachineMemOperand *Dummy;
- return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex);
+ return MI.mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex);
}
void ARMBaseInstrInfo::
@@ -1164,20 +1176,17 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
-unsigned
-ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
+unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ switch (MI.getOpcode()) {
default: break;
case ARM::LDRrs:
case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isReg() &&
- MI->getOperand(3).isImm() &&
- MI->getOperand(2).getReg() == 0 &&
- MI->getOperand(3).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() &&
+ MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 &&
+ MI.getOperand(3).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
case ARM::LDRi12:
@@ -1185,27 +1194,24 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
case ARM::tLDRspi:
case ARM::VLDRD:
case ARM::VLDRS:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
case ARM::VLD1q64:
case ARM::VLD1d64TPseudo:
case ARM::VLD1d64QPseudo:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(0).getSubReg() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
case ARM::VLDMQIA:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(0).getSubReg() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
}
@@ -1213,20 +1219,19 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return 0;
}
-unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
- int &FrameIndex) const {
+unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const {
const MachineMemOperand *Dummy;
- return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
+ return MI.mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
}
/// \brief Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
/// depending on whether the result is used.
-void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MBBI) const {
+void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
bool isThumb1 = Subtarget.isThumb1Only();
bool isThumb2 = Subtarget.isThumb2();
const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo();
- MachineInstr *MI = MBBI;
DebugLoc dl = MI->getDebugLoc();
MachineBasicBlock *BB = MI->getParent();
@@ -1269,24 +1274,20 @@ void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MBBI) const {
STM.addReg(Reg, RegState::Kill);
}
- BB->erase(MBBI);
+ BB->erase(MI);
}
-bool
-ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
- MachineFunction &MF = *MI->getParent()->getParent();
- Reloc::Model RM = MF.getTarget().getRelocationModel();
-
- if (MI->getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
+bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
assert(getSubtarget().getTargetTriple().isOSBinFormatMachO() &&
"LOAD_STACK_GUARD currently supported only for MachO.");
- expandLoadStackGuard(MI, RM);
- MI->getParent()->erase(MI);
+ expandLoadStackGuard(MI);
+ MI.getParent()->erase(MI);
return true;
}
- if (MI->getOpcode() == ARM::MEMCPY) {
+ if (MI.getOpcode() == ARM::MEMCPY) {
expandMEMCPY(MI);
return true;
}
@@ -1295,14 +1296,13 @@ ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
// copyPhysReg() calls. Look for VMOVS instructions that can legally be
// widened to VMOVD. We prefer the VMOVD when possible because it may be
// changed into a VORR that can go down the NEON pipeline.
- if (!WidenVMOVS || !MI->isCopy() || Subtarget.isCortexA15() ||
- Subtarget.isFPOnlySP())
+ if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || Subtarget.isFPOnlySP())
return false;
// Look for a copy between even S-registers. That is where we keep floats
// when using NEON v2f32 instructions for f32 arithmetic.
- unsigned DstRegS = MI->getOperand(0).getReg();
- unsigned SrcRegS = MI->getOperand(1).getReg();
+ unsigned DstRegS = MI.getOperand(0).getReg();
+ unsigned SrcRegS = MI.getOperand(1).getReg();
if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
return false;
@@ -1317,44 +1317,44 @@ ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
// We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only
// legal if the COPY already defines the full DstRegD, and it isn't a
// sub-register insertion.
- if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI))
+ if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI))
return false;
// A dead copy shouldn't show up here, but reject it just in case.
- if (MI->getOperand(0).isDead())
+ if (MI.getOperand(0).isDead())
return false;
// All clear, widen the COPY.
- DEBUG(dbgs() << "widening: " << *MI);
- MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
+ DEBUG(dbgs() << "widening: " << MI);
+ MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
// Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg
// or some other super-register.
- int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD);
+ int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD);
if (ImpDefIdx != -1)
- MI->RemoveOperand(ImpDefIdx);
+ MI.RemoveOperand(ImpDefIdx);
// Change the opcode and operands.
- MI->setDesc(get(ARM::VMOVD));
- MI->getOperand(0).setReg(DstRegD);
- MI->getOperand(1).setReg(SrcRegD);
+ MI.setDesc(get(ARM::VMOVD));
+ MI.getOperand(0).setReg(DstRegD);
+ MI.getOperand(1).setReg(SrcRegD);
AddDefaultPred(MIB);
// We are now reading SrcRegD instead of SrcRegS. This may upset the
// register scavenger and machine verifier, so we need to indicate that we
// are reading an undefined value from SrcRegD, but a proper value from
// SrcRegS.
- MI->getOperand(1).setIsUndef();
+ MI.getOperand(1).setIsUndef();
MIB.addReg(SrcRegS, RegState::Implicit);
// SrcRegD may actually contain an unrelated value in the ssub_1
// sub-register. Don't kill it. Only kill the ssub_0 sub-register.
- if (MI->getOperand(1).isKill()) {
- MI->getOperand(1).setIsKill(false);
- MI->addRegisterKilled(SrcRegS, TRI, true);
+ if (MI.getOperand(1).isKill()) {
+ MI.getOperand(1).setIsKill(false);
+ MI.addRegisterKilled(SrcRegS, TRI, true);
}
- DEBUG(dbgs() << "replaced by: " << *MI);
+ DEBUG(dbgs() << "replaced by: " << MI);
return true;
}
@@ -1403,54 +1403,54 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
return PCLabelId;
}
-void ARMBaseInstrInfo::
-reMaterialize(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
- const TargetRegisterInfo &TRI) const {
- unsigned Opcode = Orig->getOpcode();
+void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DestReg, unsigned SubIdx,
+ const MachineInstr &Orig,
+ const TargetRegisterInfo &TRI) const {
+ unsigned Opcode = Orig.getOpcode();
switch (Opcode) {
default: {
- MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
- MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
+ MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
+ MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
break;
}
case ARM::tLDRpci_pic:
case ARM::t2LDRpci_pic: {
MachineFunction &MF = *MBB.getParent();
- unsigned CPI = Orig->getOperand(1).getIndex();
+ unsigned CPI = Orig.getOperand(1).getIndex();
unsigned PCLabelId = duplicateCPV(MF, CPI);
- MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
- DestReg)
- .addConstantPoolIndex(CPI).addImm(PCLabelId);
- MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
+ .addConstantPoolIndex(CPI)
+ .addImm(PCLabelId);
+ MIB->setMemRefs(Orig.memoperands_begin(), Orig.memoperands_end());
break;
}
}
}
-MachineInstr *
-ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
+MachineInstr *ARMBaseInstrInfo::duplicate(MachineInstr &Orig,
+ MachineFunction &MF) const {
MachineInstr *MI = TargetInstrInfo::duplicate(Orig, MF);
- switch(Orig->getOpcode()) {
+ switch (Orig.getOpcode()) {
case ARM::tLDRpci_pic:
case ARM::t2LDRpci_pic: {
- unsigned CPI = Orig->getOperand(1).getIndex();
+ unsigned CPI = Orig.getOperand(1).getIndex();
unsigned PCLabelId = duplicateCPV(MF, CPI);
- Orig->getOperand(1).setIndex(CPI);
- Orig->getOperand(2).setImm(PCLabelId);
+ Orig.getOperand(1).setIndex(CPI);
+ Orig.getOperand(2).setImm(PCLabelId);
break;
}
}
return MI;
}
-bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1,
+bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
+ const MachineInstr &MI1,
const MachineRegisterInfo *MRI) const {
- unsigned Opcode = MI0->getOpcode();
+ unsigned Opcode = MI0.getOpcode();
if (Opcode == ARM::t2LDRpci ||
Opcode == ARM::t2LDRpci_pic ||
Opcode == ARM::tLDRpci ||
@@ -1461,13 +1461,13 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
Opcode == ARM::MOV_ga_pcrel ||
Opcode == ARM::MOV_ga_pcrel_ldr ||
Opcode == ARM::t2MOV_ga_pcrel) {
- if (MI1->getOpcode() != Opcode)
+ if (MI1.getOpcode() != Opcode)
return false;
- if (MI0->getNumOperands() != MI1->getNumOperands())
+ if (MI0.getNumOperands() != MI1.getNumOperands())
return false;
- const MachineOperand &MO0 = MI0->getOperand(1);
- const MachineOperand &MO1 = MI1->getOperand(1);
+ const MachineOperand &MO0 = MI0.getOperand(1);
+ const MachineOperand &MO1 = MI1.getOperand(1);
if (MO0.getOffset() != MO1.getOffset())
return false;
@@ -1480,7 +1480,7 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
// Ignore the PC labels.
return MO0.getGlobal() == MO1.getGlobal();
- const MachineFunction *MF = MI0->getParent()->getParent();
+ const MachineFunction *MF = MI0.getParent()->getParent();
const MachineConstantPool *MCP = MF->getConstantPool();
int CPI0 = MO0.getIndex();
int CPI1 = MO1.getIndex();
@@ -1499,13 +1499,13 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
}
return false;
} else if (Opcode == ARM::PICLDR) {
- if (MI1->getOpcode() != Opcode)
+ if (MI1.getOpcode() != Opcode)
return false;
- if (MI0->getNumOperands() != MI1->getNumOperands())
+ if (MI0.getNumOperands() != MI1.getNumOperands())
return false;
- unsigned Addr0 = MI0->getOperand(1).getReg();
- unsigned Addr1 = MI1->getOperand(1).getReg();
+ unsigned Addr0 = MI0.getOperand(1).getReg();
+ unsigned Addr1 = MI1.getOperand(1).getReg();
if (Addr0 != Addr1) {
if (!MRI ||
!TargetRegisterInfo::isVirtualRegister(Addr0) ||
@@ -1517,21 +1517,21 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
MachineInstr *Def1 = MRI->getVRegDef(Addr1);
// Check if the loaded value, e.g. a constantpool of a global address, are
// the same.
- if (!produceSameValue(Def0, Def1, MRI))
+ if (!produceSameValue(*Def0, *Def1, MRI))
return false;
}
- for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) {
+ for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
// %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
- const MachineOperand &MO0 = MI0->getOperand(i);
- const MachineOperand &MO1 = MI1->getOperand(i);
+ const MachineOperand &MO0 = MI0.getOperand(i);
+ const MachineOperand &MO1 = MI1.getOperand(i);
if (!MO0.isIdenticalTo(MO1))
return false;
}
return true;
}
- return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
+ return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
}
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
@@ -1653,7 +1653,7 @@ bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
return true;
}
-bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
+bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const {
// Debug info is never a scheduling boundary. It's necessary to be explicit
@@ -1662,11 +1662,11 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
// considered a scheduling hazard, which is wrong. It should be the actual
// instruction preceding the dbg_value instruction(s), just like it is
// when debug info is not present.
- if (MI->isDebugValue())
+ if (MI.isDebugValue())
return false;
// Terminators and labels can't be scheduled around.
- if (MI->isTerminator() || MI->isPosition())
+ if (MI.isTerminator() || MI.isPosition())
return true;
// Treat the start of the IT block as a scheduling boundary, but schedule
@@ -1690,7 +1690,7 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
// Calls don't actually change the stack pointer, even if they have imp-defs.
// No ARM calling conventions change the stack pointer. (X86 calling
// conventions sometimes do).
- if (!MI->isCall() && MI->definesRegister(ARM::SP))
+ if (!MI.isCall() && MI.definesRegister(ARM::SP))
return true;
return false;
@@ -1718,7 +1718,7 @@ isProfitableToIfCvt(MachineBasicBlock &MBB,
CmpMI->getOpcode() == ARM::t2CMPri) {
unsigned Reg = CmpMI->getOperand(0).getReg();
unsigned PredReg = 0;
- ARMCC::CondCodes P = getInstrPredicate(CmpMI, PredReg);
+ ARMCC::CondCodes P = getInstrPredicate(*CmpMI, PredReg);
if (P == ARMCC::AL && CmpMI->getOperand(1).getImm() == 0 &&
isARMLowRegister(Reg))
return false;
@@ -1765,24 +1765,24 @@ isProfitableToIfCvt(MachineBasicBlock &TMBB,
bool
ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
MachineBasicBlock &FMBB) const {
- // Reduce false anti-dependencies to let Swift's out-of-order execution
+ // Reduce false anti-dependencies to let the target's out-of-order execution
// engine do its thing.
- return Subtarget.isSwift();
+ return Subtarget.isProfitableToUnpredicate();
}
/// getInstrPredicate - If instruction is predicated, returns its predicate
/// condition, otherwise returns AL. It also returns the condition code
/// register by reference.
-ARMCC::CondCodes
-llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
- int PIdx = MI->findFirstPredOperandIdx();
+ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI,
+ unsigned &PredReg) {
+ int PIdx = MI.findFirstPredOperandIdx();
if (PIdx == -1) {
PredReg = 0;
return ARMCC::AL;
}
- PredReg = MI->getOperand(PIdx+1).getReg();
- return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
+ PredReg = MI.getOperand(PIdx+1).getReg();
+ return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
}
@@ -1797,11 +1797,11 @@ unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) {
llvm_unreachable("Unknown unconditional branch opcode!");
}
-MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr *MI,
+MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI,
bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case ARM::MOVCCr:
case ARM::t2MOVCCr: {
// MOVCC can be commuted by inverting the condition.
@@ -1810,13 +1810,14 @@ MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr *MI,
// MOVCC AL can't be inverted. Shouldn't happen.
if (CC == ARMCC::AL || PredReg != ARM::CPSR)
return nullptr;
- MI = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
- if (!MI)
+ MachineInstr *CommutedMI =
+ TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ if (!CommutedMI)
return nullptr;
// After swapping the MOVCC operands, also invert the condition.
- MI->getOperand(MI->findFirstPredOperandIdx())
- .setImm(ARMCC::getOppositeCondition(CC));
- return MI;
+ CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx())
+ .setImm(ARMCC::getOppositeCondition(CC));
+ return CommutedMI;
}
}
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
@@ -1860,11 +1861,11 @@ static MachineInstr *canFoldIntoMOVCC(unsigned Reg,
return MI;
}
-bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI,
+bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI,
SmallVectorImpl<MachineOperand> &Cond,
unsigned &TrueOp, unsigned &FalseOp,
bool &Optimizable) const {
- assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
+ assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
"Unknown select instruction");
// MOVCC operands:
// 0: Def.
@@ -1874,38 +1875,38 @@ bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI,
// 4: CPSR use.
TrueOp = 1;
FalseOp = 2;
- Cond.push_back(MI->getOperand(3));
- Cond.push_back(MI->getOperand(4));
+ Cond.push_back(MI.getOperand(3));
+ Cond.push_back(MI.getOperand(4));
// We can always fold a def.
Optimizable = true;
return false;
}
MachineInstr *
-ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
+ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &SeenMIs,
bool PreferFalse) const {
- assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
+ assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) &&
"Unknown select instruction");
- MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
- MachineInstr *DefMI = canFoldIntoMOVCC(MI->getOperand(2).getReg(), MRI, this);
+ MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ MachineInstr *DefMI = canFoldIntoMOVCC(MI.getOperand(2).getReg(), MRI, this);
bool Invert = !DefMI;
if (!DefMI)
- DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this);
+ DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this);
if (!DefMI)
return nullptr;
// Find new register class to use.
- MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1);
- unsigned DestReg = MI->getOperand(0).getReg();
+ MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1);
+ unsigned DestReg = MI.getOperand(0).getReg();
const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
if (!MRI.constrainRegClass(DestReg, PreviousClass))
return nullptr;
// Create a new predicated version of DefMI.
// Rfalse is the first use.
- MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- DefMI->getDesc(), DestReg);
+ MachineInstrBuilder NewMI =
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg);
// Copy all the DefMI operands, excluding its (null) predicate.
const MCInstrDesc &DefDesc = DefMI->getDesc();
@@ -1913,12 +1914,12 @@ ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
NewMI.addOperand(DefMI->getOperand(i));
- unsigned CondCode = MI->getOperand(3).getImm();
+ unsigned CondCode = MI.getOperand(3).getImm();
if (Invert)
NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode)));
else
NewMI.addImm(CondCode);
- NewMI.addOperand(MI->getOperand(4));
+ NewMI.addOperand(MI.getOperand(4));
// DefMI is not the -S version that sets CPSR, so add an optional %noreg.
if (NewMI->hasOptionalDef())
@@ -1940,7 +1941,7 @@ ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
// DefMI would be invalid when tranferred inside the loop. Checking for a
// loop is expensive, but at least remove kill flags if they are in different
// BBs.
- if (DefMI->getParent() != MI->getParent())
+ if (DefMI->getParent() != MI.getParent())
NewMI->clearKillInfo();
// The caller will erase MI, but not DefMI.
@@ -1994,10 +1995,12 @@ unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
}
void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg, int NumBytes,
- ARMCC::CondCodes Pred, unsigned PredReg,
- const ARMBaseInstrInfo &TII, unsigned MIFlags) {
+ MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg,
+ unsigned BaseReg, int NumBytes,
+ ARMCC::CondCodes Pred, unsigned PredReg,
+ const ARMBaseInstrInfo &TII,
+ unsigned MIFlags) {
if (NumBytes == 0 && DestReg != BaseReg) {
BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg)
.addReg(BaseReg, RegState::Kill)
@@ -2281,30 +2284,30 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
-bool ARMBaseInstrInfo::
-analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
- int &CmpMask, int &CmpValue) const {
- switch (MI->getOpcode()) {
+bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const {
+ switch (MI.getOpcode()) {
default: break;
case ARM::CMPri:
case ARM::t2CMPri:
- SrcReg = MI->getOperand(0).getReg();
+ SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
CmpMask = ~0;
- CmpValue = MI->getOperand(1).getImm();
+ CmpValue = MI.getOperand(1).getImm();
return true;
case ARM::CMPrr:
case ARM::t2CMPrr:
- SrcReg = MI->getOperand(0).getReg();
- SrcReg2 = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(0).getReg();
+ SrcReg2 = MI.getOperand(1).getReg();
CmpMask = ~0;
CmpValue = 0;
return true;
case ARM::TSTri:
case ARM::t2TSTri:
- SrcReg = MI->getOperand(0).getReg();
+ SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
- CmpMask = MI->getOperand(1).getImm();
+ CmpMask = MI.getOperand(1).getImm();
CmpValue = 0;
return true;
}
@@ -2385,25 +2388,25 @@ inline static bool isRedundantFlagInstr(MachineInstr *CmpI, unsigned SrcReg,
/// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
/// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
/// condition code of instructions which use the flags.
-bool ARMBaseInstrInfo::
-optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
- int CmpMask, int CmpValue,
- const MachineRegisterInfo *MRI) const {
+bool ARMBaseInstrInfo::optimizeCompareInstr(
+ MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
+ int CmpValue, const MachineRegisterInfo *MRI) const {
// Get the unique definition of SrcReg.
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
if (!MI) return false;
// Masked compares sometimes use the same register as the corresponding 'and'.
if (CmpMask != ~0) {
- if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) {
+ if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) {
MI = nullptr;
for (MachineRegisterInfo::use_instr_iterator
UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end();
UI != UE; ++UI) {
- if (UI->getParent() != CmpInstr->getParent()) continue;
+ if (UI->getParent() != CmpInstr.getParent())
+ continue;
MachineInstr *PotentialAND = &*UI;
if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
- isPredicated(PotentialAND))
+ isPredicated(*PotentialAND))
continue;
MI = PotentialAND;
break;
@@ -2414,7 +2417,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// Get ready to iterate backward from CmpInstr.
MachineBasicBlock::iterator I = CmpInstr, E = MI,
- B = CmpInstr->getParent()->begin();
+ B = CmpInstr.getParent()->begin();
// Early exit if CmpInstr is at the beginning of the BB.
if (I == B) return false;
@@ -2427,13 +2430,13 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
if (SrcReg2 != 0)
// MI is not a candidate for CMPrr.
MI = nullptr;
- else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) {
+ else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) {
// Conservatively refuse to convert an instruction which isn't in the same
// BB as the comparison.
// For CMPri w/ CmpValue != 0, a Sub may still be a candidate.
// Thus we cannot return here.
- if (CmpInstr->getOpcode() == ARM::CMPri ||
- CmpInstr->getOpcode() == ARM::t2CMPri)
+ if (CmpInstr.getOpcode() == ARM::CMPri ||
+ CmpInstr.getOpcode() == ARM::t2CMPri)
MI = nullptr;
else
return false;
@@ -2453,7 +2456,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
return false;
// Check whether CmpInstr can be made redundant by the current instruction.
- if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, &*I)) {
+ if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &*I)) {
Sub = &*I;
break;
}
@@ -2471,7 +2474,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
if (!MI) MI = Sub;
// We can't use a predicated instruction - it doesn't always write the flags.
- if (isPredicated(MI))
+ if (isPredicated(*MI))
return false;
switch (MI->getOpcode()) {
@@ -2519,7 +2522,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
OperandsToUpdate;
bool isSafe = false;
I = CmpInstr;
- E = CmpInstr->getParent()->end();
+ E = CmpInstr.getParent()->end();
while (!isSafe && ++I != E) {
const MachineInstr &Instr = *I;
for (unsigned IO = 0, EO = Instr.getNumOperands();
@@ -2608,7 +2611,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// If CPSR is not killed nor re-defined, we should check whether it is
// live-out. If it is live-out, do not optimize.
if (!isSafe) {
- MachineBasicBlock *MBB = CmpInstr->getParent();
+ MachineBasicBlock *MBB = CmpInstr.getParent();
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end(); SI != SE; ++SI)
if ((*SI)->isLiveIn(ARM::CPSR))
@@ -2618,8 +2621,8 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// Toggle the optional operand to CPSR.
MI->getOperand(5).setReg(ARM::CPSR);
MI->getOperand(5).setIsDef(true);
- assert(!isPredicated(MI) && "Can't use flags from predicated instruction");
- CmpInstr->eraseFromParent();
+ assert(!isPredicated(*MI) && "Can't use flags from predicated instruction");
+ CmpInstr.eraseFromParent();
// Modify the condition code of operands in OperandsToUpdate.
// Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
@@ -2633,42 +2636,42 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
return false;
}
-bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
- MachineInstr *DefMI, unsigned Reg,
+bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
+ unsigned Reg,
MachineRegisterInfo *MRI) const {
// Fold large immediates into add, sub, or, xor.
- unsigned DefOpc = DefMI->getOpcode();
+ unsigned DefOpc = DefMI.getOpcode();
if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
return false;
- if (!DefMI->getOperand(1).isImm())
+ if (!DefMI.getOperand(1).isImm())
// Could be t2MOVi32imm <ga:xx>
return false;
if (!MRI->hasOneNonDBGUse(Reg))
return false;
- const MCInstrDesc &DefMCID = DefMI->getDesc();
+ const MCInstrDesc &DefMCID = DefMI.getDesc();
if (DefMCID.hasOptionalDef()) {
unsigned NumOps = DefMCID.getNumOperands();
- const MachineOperand &MO = DefMI->getOperand(NumOps-1);
+ const MachineOperand &MO = DefMI.getOperand(NumOps - 1);
if (MO.getReg() == ARM::CPSR && !MO.isDead())
// If DefMI defines CPSR and it is not dead, it's obviously not safe
// to delete DefMI.
return false;
}
- const MCInstrDesc &UseMCID = UseMI->getDesc();
+ const MCInstrDesc &UseMCID = UseMI.getDesc();
if (UseMCID.hasOptionalDef()) {
unsigned NumOps = UseMCID.getNumOperands();
- if (UseMI->getOperand(NumOps-1).getReg() == ARM::CPSR)
+ if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR)
// If the instruction sets the flag, do not attempt this optimization
// since it may change the semantics of the code.
return false;
}
- unsigned UseOpc = UseMI->getOpcode();
+ unsigned UseOpc = UseMI.getOpcode();
unsigned NewUseOpc = 0;
- uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm();
+ uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm();
uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
bool Commute = false;
switch (UseOpc) {
@@ -2681,17 +2684,27 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
case ARM::t2ADDrr:
case ARM::t2ORRrr:
case ARM::t2EORrr: {
- Commute = UseMI->getOperand(2).getReg() != Reg;
+ Commute = UseMI.getOperand(2).getReg() != Reg;
switch (UseOpc) {
default: break;
+ case ARM::ADDrr:
case ARM::SUBrr: {
- if (Commute)
+ if (UseOpc == ARM::SUBrr && Commute)
+ return false;
+
+ // ADD/SUB are special because they're essentially the same operation, so
+ // we can handle a larger range of immediates.
+ if (ARM_AM::isSOImmTwoPartVal(ImmVal))
+ NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
+ else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) {
+ ImmVal = -ImmVal;
+ NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
+ } else
return false;
- ImmVal = -ImmVal;
- NewUseOpc = ARM::SUBri;
- // Fallthrough
+ SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
+ SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
+ break;
}
- case ARM::ADDrr:
case ARM::ORRrr:
case ARM::EORrr: {
if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
@@ -2700,20 +2713,29 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
switch (UseOpc) {
default: break;
- case ARM::ADDrr: NewUseOpc = ARM::ADDri; break;
case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
case ARM::EORrr: NewUseOpc = ARM::EORri; break;
}
break;
}
+ case ARM::t2ADDrr:
case ARM::t2SUBrr: {
- if (Commute)
+ if (UseOpc == ARM::t2SUBrr && Commute)
return false;
- ImmVal = -ImmVal;
- NewUseOpc = ARM::t2SUBri;
- // Fallthrough
+
+ // ADD/SUB are special because they're essentially the same operation, so
+ // we can handle a larger range of immediates.
+ if (ARM_AM::isT2SOImmTwoPartVal(ImmVal))
+ NewUseOpc = UseOpc == ARM::t2ADDrr ? ARM::t2ADDri : ARM::t2SUBri;
+ else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) {
+ ImmVal = -ImmVal;
+ NewUseOpc = UseOpc == ARM::t2ADDrr ? ARM::t2SUBri : ARM::t2ADDri;
+ } else
+ return false;
+ SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
+ SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
+ break;
}
- case ARM::t2ADDrr:
case ARM::t2ORRrr:
case ARM::t2EORrr: {
if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
@@ -2722,7 +2744,6 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
switch (UseOpc) {
default: break;
- case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break;
case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
}
@@ -2733,27 +2754,27 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
}
unsigned OpIdx = Commute ? 2 : 1;
- unsigned Reg1 = UseMI->getOperand(OpIdx).getReg();
- bool isKill = UseMI->getOperand(OpIdx).isKill();
+ unsigned Reg1 = UseMI.getOperand(OpIdx).getReg();
+ bool isKill = UseMI.getOperand(OpIdx).isKill();
unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
- AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(),
- UseMI, UseMI->getDebugLoc(),
- get(NewUseOpc), NewReg)
- .addReg(Reg1, getKillRegState(isKill))
- .addImm(SOImmValV1)));
- UseMI->setDesc(get(NewUseOpc));
- UseMI->getOperand(1).setReg(NewReg);
- UseMI->getOperand(1).setIsKill();
- UseMI->getOperand(2).ChangeToImmediate(SOImmValV2);
- DefMI->eraseFromParent();
+ AddDefaultCC(
+ AddDefaultPred(BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
+ get(NewUseOpc), NewReg)
+ .addReg(Reg1, getKillRegState(isKill))
+ .addImm(SOImmValV1)));
+ UseMI.setDesc(get(NewUseOpc));
+ UseMI.getOperand(1).setReg(NewReg);
+ UseMI.getOperand(1).setIsKill();
+ UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
+ DefMI.eraseFromParent();
return true;
}
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
- const MachineInstr *MI) {
- switch (MI->getOpcode()) {
+ const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
default: {
- const MCInstrDesc &Desc = MI->getDesc();
+ const MCInstrDesc &Desc = MI.getDesc();
int UOps = ItinData->getNumMicroOps(Desc.getSchedClass());
assert(UOps >= 0 && "bad # UOps");
return UOps;
@@ -2763,7 +2784,7 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
case ARM::LDRBrs:
case ARM::STRrs:
case ARM::STRBrs: {
- unsigned ShOpVal = MI->getOperand(3).getImm();
+ unsigned ShOpVal = MI.getOperand(3).getImm();
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (!isSub &&
@@ -2776,10 +2797,10 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
case ARM::LDRH:
case ARM::STRH: {
- if (!MI->getOperand(2).getReg())
+ if (!MI.getOperand(2).getReg())
return 1;
- unsigned ShOpVal = MI->getOperand(3).getImm();
+ unsigned ShOpVal = MI.getOperand(3).getImm();
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (!isSub &&
@@ -2792,22 +2813,22 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
case ARM::LDRSB:
case ARM::LDRSH:
- return (ARM_AM::getAM3Op(MI->getOperand(3).getImm()) == ARM_AM::sub) ? 3:2;
+ return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2;
case ARM::LDRSB_POST:
case ARM::LDRSH_POST: {
- unsigned Rt = MI->getOperand(0).getReg();
- unsigned Rm = MI->getOperand(3).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
+ unsigned Rm = MI.getOperand(3).getReg();
return (Rt == Rm) ? 4 : 3;
}
case ARM::LDR_PRE_REG:
case ARM::LDRB_PRE_REG: {
- unsigned Rt = MI->getOperand(0).getReg();
- unsigned Rm = MI->getOperand(3).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
+ unsigned Rm = MI.getOperand(3).getReg();
if (Rt == Rm)
return 3;
- unsigned ShOpVal = MI->getOperand(4).getImm();
+ unsigned ShOpVal = MI.getOperand(4).getImm();
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (!isSub &&
@@ -2820,7 +2841,7 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
case ARM::STR_PRE_REG:
case ARM::STRB_PRE_REG: {
- unsigned ShOpVal = MI->getOperand(4).getImm();
+ unsigned ShOpVal = MI.getOperand(4).getImm();
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (!isSub &&
@@ -2833,21 +2854,20 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
case ARM::LDRH_PRE:
case ARM::STRH_PRE: {
- unsigned Rt = MI->getOperand(0).getReg();
- unsigned Rm = MI->getOperand(3).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
+ unsigned Rm = MI.getOperand(3).getReg();
if (!Rm)
return 2;
if (Rt == Rm)
return 3;
- return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub)
- ? 3 : 2;
+ return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2;
}
case ARM::LDR_POST_REG:
case ARM::LDRB_POST_REG:
case ARM::LDRH_POST: {
- unsigned Rt = MI->getOperand(0).getReg();
- unsigned Rm = MI->getOperand(3).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
+ unsigned Rm = MI.getOperand(3).getReg();
return (Rt == Rm) ? 3 : 2;
}
@@ -2866,13 +2886,13 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
case ARM::LDRSB_PRE:
case ARM::LDRSH_PRE: {
- unsigned Rm = MI->getOperand(3).getReg();
+ unsigned Rm = MI.getOperand(3).getReg();
if (Rm == 0)
return 3;
- unsigned Rt = MI->getOperand(0).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
if (Rt == Rm)
return 4;
- unsigned ShOpVal = MI->getOperand(4).getImm();
+ unsigned ShOpVal = MI.getOperand(4).getImm();
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (!isSub &&
@@ -2884,18 +2904,20 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
}
case ARM::LDRD: {
- unsigned Rt = MI->getOperand(0).getReg();
- unsigned Rn = MI->getOperand(2).getReg();
- unsigned Rm = MI->getOperand(3).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
+ unsigned Rn = MI.getOperand(2).getReg();
+ unsigned Rm = MI.getOperand(3).getReg();
if (Rm)
- return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3;
+ return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
+ : 3;
return (Rt == Rn) ? 3 : 2;
}
case ARM::STRD: {
- unsigned Rm = MI->getOperand(3).getReg();
+ unsigned Rm = MI.getOperand(3).getReg();
if (Rm)
- return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3;
+ return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4
+ : 3;
return 2;
}
@@ -2908,24 +2930,26 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
return 4;
case ARM::LDRD_PRE: {
- unsigned Rt = MI->getOperand(0).getReg();
- unsigned Rn = MI->getOperand(3).getReg();
- unsigned Rm = MI->getOperand(4).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
+ unsigned Rn = MI.getOperand(3).getReg();
+ unsigned Rm = MI.getOperand(4).getReg();
if (Rm)
- return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4;
+ return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
+ : 4;
return (Rt == Rn) ? 4 : 3;
}
case ARM::t2LDRD_PRE: {
- unsigned Rt = MI->getOperand(0).getReg();
- unsigned Rn = MI->getOperand(3).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
+ unsigned Rn = MI.getOperand(3).getReg();
return (Rt == Rn) ? 4 : 3;
}
case ARM::STRD_PRE: {
- unsigned Rm = MI->getOperand(4).getReg();
+ unsigned Rm = MI.getOperand(4).getReg();
if (Rm)
- return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4;
+ return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5
+ : 4;
return 3;
}
@@ -2953,8 +2977,8 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
return 2;
case ARM::t2LDRDi8: {
- unsigned Rt = MI->getOperand(0).getReg();
- unsigned Rn = MI->getOperand(2).getReg();
+ unsigned Rt = MI.getOperand(0).getReg();
+ unsigned Rn = MI.getOperand(2).getReg();
return (Rt == Rn) ? 3 : 2;
}
@@ -2994,22 +3018,61 @@ static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
// sizes during MC lowering. That target hook should be local to MC lowering
// because we can't ensure that it is aware of other MI forms. Doing this will
// ensure that MachineMemOperands are correctly propagated through all passes.
-unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr *MI) const {
+unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const {
unsigned Size = 0;
- for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
- E = MI->memoperands_end(); I != E; ++I) {
+ for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
+ E = MI.memoperands_end();
+ I != E; ++I) {
Size += (*I)->getSize();
}
return Size / 4;
}
-unsigned
-ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
- const MachineInstr *MI) const {
+static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc,
+ unsigned NumRegs) {
+ unsigned UOps = 1 + NumRegs; // 1 for address computation.
+ switch (Opc) {
+ default:
+ break;
+ case ARM::VLDMDIA_UPD:
+ case ARM::VLDMDDB_UPD:
+ case ARM::VLDMSIA_UPD:
+ case ARM::VLDMSDB_UPD:
+ case ARM::VSTMDIA_UPD:
+ case ARM::VSTMDDB_UPD:
+ case ARM::VSTMSIA_UPD:
+ case ARM::VSTMSDB_UPD:
+ case ARM::LDMIA_UPD:
+ case ARM::LDMDA_UPD:
+ case ARM::LDMDB_UPD:
+ case ARM::LDMIB_UPD:
+ case ARM::STMIA_UPD:
+ case ARM::STMDA_UPD:
+ case ARM::STMDB_UPD:
+ case ARM::STMIB_UPD:
+ case ARM::tLDMIA_UPD:
+ case ARM::tSTMIA_UPD:
+ case ARM::t2LDMIA_UPD:
+ case ARM::t2LDMDB_UPD:
+ case ARM::t2STMIA_UPD:
+ case ARM::t2STMDB_UPD:
+ ++UOps; // One for base register writeback.
+ break;
+ case ARM::LDMIA_RET:
+ case ARM::tPOP_RET:
+ case ARM::t2LDMIA_RET:
+ UOps += 2; // One for base reg wb, one for write to pc.
+ break;
+ }
+ return UOps;
+}
+
+unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr &MI) const {
if (!ItinData || ItinData->isEmpty())
return 1;
- const MCInstrDesc &Desc = MI->getDesc();
+ const MCInstrDesc &Desc = MI.getDesc();
unsigned Class = Desc.getSchedClass();
int ItinUOps = ItinData->getNumMicroOps(Class);
if (ItinUOps >= 0) {
@@ -3019,7 +3082,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
return ItinUOps;
}
- unsigned Opc = MI->getOpcode();
+ unsigned Opc = MI.getOpcode();
switch (Opc) {
default:
llvm_unreachable("Unexpected multi-uops instruction!");
@@ -3049,7 +3112,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
case ARM::VSTMSIA:
case ARM::VSTMSIA_UPD:
case ARM::VSTMSDB_UPD: {
- unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands();
+ unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands();
return (NumRegs / 2) + (NumRegs % 2) + 1;
}
@@ -3085,66 +3148,36 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
case ARM::t2STMDB:
case ARM::t2STMIA_UPD:
case ARM::t2STMDB_UPD: {
- unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
- if (Subtarget.isSwift()) {
- int UOps = 1 + NumRegs; // One for address computation, one for each ld / st.
- switch (Opc) {
- default: break;
- case ARM::VLDMDIA_UPD:
- case ARM::VLDMDDB_UPD:
- case ARM::VLDMSIA_UPD:
- case ARM::VLDMSDB_UPD:
- case ARM::VSTMDIA_UPD:
- case ARM::VSTMDDB_UPD:
- case ARM::VSTMSIA_UPD:
- case ARM::VSTMSDB_UPD:
- case ARM::LDMIA_UPD:
- case ARM::LDMDA_UPD:
- case ARM::LDMDB_UPD:
- case ARM::LDMIB_UPD:
- case ARM::STMIA_UPD:
- case ARM::STMDA_UPD:
- case ARM::STMDB_UPD:
- case ARM::STMIB_UPD:
- case ARM::tLDMIA_UPD:
- case ARM::tSTMIA_UPD:
- case ARM::t2LDMIA_UPD:
- case ARM::t2LDMDB_UPD:
- case ARM::t2STMIA_UPD:
- case ARM::t2STMDB_UPD:
- ++UOps; // One for base register writeback.
- break;
- case ARM::LDMIA_RET:
- case ARM::tPOP_RET:
- case ARM::t2LDMIA_RET:
- UOps += 2; // One for base reg wb, one for write to pc.
- break;
- }
- return UOps;
- } else if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
+ unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1;
+ switch (Subtarget.getLdStMultipleTiming()) {
+ case ARMSubtarget::SingleIssuePlusExtras:
+ return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs);
+ case ARMSubtarget::SingleIssue:
+ // Assume the worst.
+ return NumRegs;
+ case ARMSubtarget::DoubleIssue: {
if (NumRegs < 4)
return 2;
// 4 registers would be issued: 2, 2.
// 5 registers would be issued: 2, 2, 1.
- int A8UOps = (NumRegs / 2);
+ unsigned UOps = (NumRegs / 2);
if (NumRegs % 2)
- ++A8UOps;
- return A8UOps;
- } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
- int A9UOps = (NumRegs / 2);
+ ++UOps;
+ return UOps;
+ }
+ case ARMSubtarget::DoubleIssueCheckUnalignedAccess: {
+ unsigned UOps = (NumRegs / 2);
// If there are odd number of registers or if it's not 64-bit aligned,
// then it takes an extra AGU (Address Generation Unit) cycle.
- if ((NumRegs % 2) ||
- !MI->hasOneMemOperand() ||
- (*MI->memoperands_begin())->getAlignment() < 8)
- ++A9UOps;
- return A9UOps;
- } else {
- // Assume the worst.
- return NumRegs;
+ if ((NumRegs % 2) || !MI.hasOneMemOperand() ||
+ (*MI.memoperands_begin())->getAlignment() < 8)
+ ++UOps;
+ return UOps;
+ }
}
}
}
+ llvm_unreachable("Didn't find the number of microops");
}
int
@@ -3428,13 +3461,13 @@ static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
}
static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
- const MachineInstr *MI, unsigned Reg,
+ const MachineInstr &MI, unsigned Reg,
unsigned &UseIdx, unsigned &Dist) {
Dist = 0;
- MachineBasicBlock::const_instr_iterator II = ++MI->getIterator();
+ MachineBasicBlock::const_instr_iterator II = ++MI.getIterator();
assert(II->isInsideBundle() && "Empty bundle?");
- MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+ MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
// FIXME: This doesn't properly handle multiple uses.
int Idx = -1;
@@ -3460,17 +3493,17 @@ static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
/// itinerary based on the def opcode and alignment. The caller will ensure that
/// adjusted latency is at least one cycle.
static int adjustDefLatency(const ARMSubtarget &Subtarget,
- const MachineInstr *DefMI,
- const MCInstrDesc *DefMCID, unsigned DefAlign) {
+ const MachineInstr &DefMI,
+ const MCInstrDesc &DefMCID, unsigned DefAlign) {
int Adjust = 0;
if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
- switch (DefMCID->getOpcode()) {
+ switch (DefMCID.getOpcode()) {
default: break;
case ARM::LDRrs:
case ARM::LDRBrs: {
- unsigned ShOpVal = DefMI->getOperand(3).getImm();
+ unsigned ShOpVal = DefMI.getOperand(3).getImm();
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (ShImm == 0 ||
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
@@ -3482,7 +3515,7 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
case ARM::t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
- unsigned ShAmt = DefMI->getOperand(3).getImm();
+ unsigned ShAmt = DefMI.getOperand(3).getImm();
if (ShAmt == 0 || ShAmt == 2)
--Adjust;
break;
@@ -3491,11 +3524,11 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
} else if (Subtarget.isSwift()) {
// FIXME: Properly handle all of the latency adjustments for address
// writeback.
- switch (DefMCID->getOpcode()) {
+ switch (DefMCID.getOpcode()) {
default: break;
case ARM::LDRrs:
case ARM::LDRBrs: {
- unsigned ShOpVal = DefMI->getOperand(3).getImm();
+ unsigned ShOpVal = DefMI.getOperand(3).getImm();
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (!isSub &&
@@ -3513,7 +3546,7 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
case ARM::t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
- unsigned ShAmt = DefMI->getOperand(3).getImm();
+ unsigned ShAmt = DefMI.getOperand(3).getImm();
if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
Adjust -= 2;
break;
@@ -3521,8 +3554,8 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
}
}
- if (DefAlign < 8 && Subtarget.isLikeA9()) {
- switch (DefMCID->getOpcode()) {
+ if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
+ switch (DefMCID.getOpcode()) {
default: break;
case ARM::VLD1q8:
case ARM::VLD1q16:
@@ -3637,53 +3670,55 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
return Adjust;
}
-
-
-int
-ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
- unsigned UseIdx) const {
+int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr &DefMI,
+ unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const {
// No operand latency. The caller may fall back to getInstrLatency.
if (!ItinData || ItinData->isEmpty())
return -1;
- const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
+ const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
unsigned Reg = DefMO.getReg();
- const MCInstrDesc *DefMCID = &DefMI->getDesc();
- const MCInstrDesc *UseMCID = &UseMI->getDesc();
+ const MachineInstr *ResolvedDefMI = &DefMI;
unsigned DefAdj = 0;
- if (DefMI->isBundle()) {
- DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj);
- DefMCID = &DefMI->getDesc();
- }
- if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
- DefMI->isRegSequence() || DefMI->isImplicitDef()) {
+ if (DefMI.isBundle())
+ ResolvedDefMI =
+ getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj);
+ if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() ||
+ ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) {
return 1;
}
+ const MachineInstr *ResolvedUseMI = &UseMI;
unsigned UseAdj = 0;
- if (UseMI->isBundle()) {
- unsigned NewUseIdx;
- const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI,
- Reg, NewUseIdx, UseAdj);
- if (!NewUseMI)
+ if (UseMI.isBundle()) {
+ ResolvedUseMI =
+ getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj);
+ if (!ResolvedUseMI)
return -1;
-
- UseMI = NewUseMI;
- UseIdx = NewUseIdx;
- UseMCID = &UseMI->getDesc();
}
+ return getOperandLatencyImpl(
+ ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO,
+ Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj);
+}
+
+int ARMBaseInstrInfo::getOperandLatencyImpl(
+ const InstrItineraryData *ItinData, const MachineInstr &DefMI,
+ unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj,
+ const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI,
+ unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const {
if (Reg == ARM::CPSR) {
- if (DefMI->getOpcode() == ARM::FMSTAT) {
+ if (DefMI.getOpcode() == ARM::FMSTAT) {
// fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
return Subtarget.isLikeA9() ? 1 : 20;
}
// CPSR set and branch can be paired in the same cycle.
- if (UseMI->isBranch())
+ if (UseMI.isBranch())
return 0;
// Otherwise it takes the instruction latency (generally one).
@@ -3694,7 +3729,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// incur a code size penalty (not able to use the CPSR setting 16-bit
// instructions).
if (Latency > 0 && Subtarget.isThumb2()) {
- const MachineFunction *MF = DefMI->getParent()->getParent();
+ const MachineFunction *MF = DefMI.getParent()->getParent();
// FIXME: Use Function::optForSize().
if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
--Latency;
@@ -3702,17 +3737,19 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
return Latency;
}
- if (DefMO.isImplicit() || UseMI->getOperand(UseIdx).isImplicit())
+ if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit())
return -1;
- unsigned DefAlign = DefMI->hasOneMemOperand()
- ? (*DefMI->memoperands_begin())->getAlignment() : 0;
- unsigned UseAlign = UseMI->hasOneMemOperand()
- ? (*UseMI->memoperands_begin())->getAlignment() : 0;
+ unsigned DefAlign = DefMI.hasOneMemOperand()
+ ? (*DefMI.memoperands_begin())->getAlignment()
+ : 0;
+ unsigned UseAlign = UseMI.hasOneMemOperand()
+ ? (*UseMI.memoperands_begin())->getAlignment()
+ : 0;
// Get the itinerary's latency if possible, and handle variable_ops.
- int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign,
- *UseMCID, UseIdx, UseAlign);
+ int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID,
+ UseIdx, UseAlign);
// Unable to find operand latency. The caller may resort to getInstrLatency.
if (Latency < 0)
return Latency;
@@ -3746,10 +3783,9 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
if (!UseNode->isMachineOpcode()) {
int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
- if (Subtarget.isLikeA9() || Subtarget.isSwift())
- return Latency <= 2 ? 1 : Latency - 1;
- else
- return Latency <= 3 ? 1 : Latency - 2;
+ int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
+ int Threshold = 1 + Adj;
+ return Latency <= Threshold ? 1 : Latency - Adj;
}
const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
@@ -3820,7 +3856,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
}
}
- if (DefAlign < 8 && Subtarget.isLikeA9())
+ if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
switch (DefMCID.getOpcode()) {
default: break;
case ARM::VLD1q8:
@@ -3946,15 +3982,15 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
return Latency;
}
-unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr *MI) const {
- if (MI->isCopyLike() || MI->isInsertSubreg() ||
- MI->isRegSequence() || MI->isImplicitDef())
+unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const {
+ if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
+ MI.isImplicitDef())
return 0;
- if (MI->isBundle())
+ if (MI.isBundle())
return 0;
- const MCInstrDesc &MCID = MI->getDesc();
+ const MCInstrDesc &MCID = MI.getDesc();
if (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR)) {
// When predicated, CPSR is an additional source operand for CPSR updating
@@ -3965,26 +4001,26 @@ unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr *MI) const {
}
unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
+ const MachineInstr &MI,
unsigned *PredCost) const {
- if (MI->isCopyLike() || MI->isInsertSubreg() ||
- MI->isRegSequence() || MI->isImplicitDef())
+ if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() ||
+ MI.isImplicitDef())
return 1;
// An instruction scheduler typically runs on unbundled instructions, however
// other passes may query the latency of a bundled instruction.
- if (MI->isBundle()) {
+ if (MI.isBundle()) {
unsigned Latency = 0;
- MachineBasicBlock::const_instr_iterator I = MI->getIterator();
- MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+ MachineBasicBlock::const_instr_iterator I = MI.getIterator();
+ MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
while (++I != E && I->isInsideBundle()) {
if (I->getOpcode() != ARM::t2IT)
- Latency += getInstrLatency(ItinData, &*I, PredCost);
+ Latency += getInstrLatency(ItinData, *I, PredCost);
}
return Latency;
}
- const MCInstrDesc &MCID = MI->getDesc();
+ const MCInstrDesc &MCID = MI.getDesc();
if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR))) {
// When predicated, CPSR is an additional source operand for CPSR updating
// instructions, this apparently increases their latencies.
@@ -3993,7 +4029,7 @@ unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
// Be sure to call getStageLatency for an empty itinerary in case it has a
// valid MinLatency property.
if (!ItinData)
- return MI->mayLoad() ? 3 : 1;
+ return MI.mayLoad() ? 3 : 1;
unsigned Class = MCID.getSchedClass();
@@ -4005,9 +4041,9 @@ unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
unsigned Latency = ItinData->getStageLatency(Class);
// Adjust for dynamic def-side opcode variants not captured by the itinerary.
- unsigned DefAlign = MI->hasOneMemOperand()
- ? (*MI->memoperands_begin())->getAlignment() : 0;
- int Adj = adjustDefLatency(Subtarget, MI, &MCID, DefAlign);
+ unsigned DefAlign =
+ MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlignment() : 0;
+ int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign);
if (Adj >= 0 || (int)Latency > -Adj) {
return Latency + Adj;
}
@@ -4032,46 +4068,46 @@ int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
}
}
-bool ARMBaseInstrInfo::
-hasHighOperandLatency(const TargetSchedModel &SchedModel,
- const MachineRegisterInfo *MRI,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const {
- unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
- unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask;
- if (Subtarget.isCortexA8() &&
+bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr &DefMI,
+ unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const {
+ unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
+ unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask;
+ if (Subtarget.nonpipelinedVFP() &&
(DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
- // CortexA8 VFP instructions are not pipelined.
return true;
// Hoist VFP / NEON instructions with 4 or higher latency.
- unsigned Latency
- = SchedModel.computeOperandLatency(DefMI, DefIdx, UseMI, UseIdx);
+ unsigned Latency =
+ SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx);
if (Latency <= 3)
return false;
return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
}
-bool ARMBaseInstrInfo::
-hasLowDefLatency(const TargetSchedModel &SchedModel,
- const MachineInstr *DefMI, unsigned DefIdx) const {
+bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
+ const MachineInstr &DefMI,
+ unsigned DefIdx) const {
const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
if (!ItinData || ItinData->isEmpty())
return false;
- unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
+ unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask;
if (DDomain == ARMII::DomainGeneral) {
- unsigned DefClass = DefMI->getDesc().getSchedClass();
+ unsigned DefClass = DefMI.getDesc().getSchedClass();
int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
return (DefCycle != -1 && DefCycle <= 2);
}
return false;
}
-bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI,
+bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const {
- if (convertAddSubFlagsOpcode(MI->getOpcode())) {
+ if (convertAddSubFlagsOpcode(MI.getOpcode())) {
ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
return false;
}
@@ -4082,8 +4118,7 @@ bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI,
// sequence is needed for other targets.
void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
unsigned LoadImmOpc,
- unsigned LoadOpc,
- Reloc::Model RM) const {
+ unsigned LoadOpc) const {
MachineBasicBlock &MBB = *MI->getParent();
DebugLoc DL = MI->getDebugLoc();
unsigned Reg = MI->getOperand(0).getReg();
@@ -4094,12 +4129,12 @@ void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
.addGlobalAddress(GV, 0, ARMII::MO_NONLAZY);
- if (Subtarget.GVIsIndirectSymbol(GV, RM)) {
+ if (Subtarget.isGVIndirectSymbol(GV)) {
MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
MIB.addReg(Reg, RegState::Kill).addImm(0);
- unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
+ auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
- MachinePointerInfo::getGOT(*MBB.getParent()), Flag, 4, 4);
+ MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4);
MIB.addMemOperand(MMO);
AddDefaultPred(MIB);
}
@@ -4146,24 +4181,24 @@ enum ARMExeDomain {
// Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
//
std::pair<uint16_t, uint16_t>
-ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
+ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const {
// If we don't have access to NEON instructions then we won't be able
// to swizzle anything to the NEON domain. Check to make sure.
if (Subtarget.hasNEON()) {
// VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON
// if they are not predicated.
- if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI))
+ if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI))
return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
// CortexA9 is particularly picky about mixing the two and wants these
// converted.
- if (Subtarget.isCortexA9() && !isPredicated(MI) &&
- (MI->getOpcode() == ARM::VMOVRS || MI->getOpcode() == ARM::VMOVSR ||
- MI->getOpcode() == ARM::VMOVS))
+ if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) &&
+ (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR ||
+ MI.getOpcode() == ARM::VMOVS))
return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON));
}
// No other instructions can be swizzled, so just determine their domain.
- unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask;
+ unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask;
if (Domain & ARMII::DomainNEON)
return std::make_pair(ExeNEON, 0);
@@ -4210,12 +4245,11 @@ static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI,
/// (including the case where the DPR itself is defined), it should not.
///
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
- MachineInstr *MI,
- unsigned DReg, unsigned Lane,
- unsigned &ImplicitSReg) {
+ MachineInstr &MI, unsigned DReg,
+ unsigned Lane, unsigned &ImplicitSReg) {
// If the DPR is defined or used already, the other SPR lane will be chained
// correctly, so there is nothing to be done.
- if (MI->definesRegister(DReg, TRI) || MI->readsRegister(DReg, TRI)) {
+ if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) {
ImplicitSReg = 0;
return true;
}
@@ -4224,7 +4258,7 @@ static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
ImplicitSReg = TRI->getSubReg(DReg,
(Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
MachineBasicBlock::LivenessQueryResult LQR =
- MI->getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI);
+ MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI);
if (LQR == MachineBasicBlock::LQR_Live)
return true;
@@ -4237,106 +4271,105 @@ static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
return true;
}
-void
-ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
+void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI,
+ unsigned Domain) const {
unsigned DstReg, SrcReg, DReg;
unsigned Lane;
- MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
+ MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
const TargetRegisterInfo *TRI = &getRegisterInfo();
- switch (MI->getOpcode()) {
- default:
- llvm_unreachable("cannot handle opcode!");
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("cannot handle opcode!");
+ break;
+ case ARM::VMOVD:
+ if (Domain != ExeNEON)
break;
- case ARM::VMOVD:
- if (Domain != ExeNEON)
- break;
- // Zap the predicate operands.
- assert(!isPredicated(MI) && "Cannot predicate a VORRd");
+ // Zap the predicate operands.
+ assert(!isPredicated(MI) && "Cannot predicate a VORRd");
- // Make sure we've got NEON instructions.
- assert(Subtarget.hasNEON() && "VORRd requires NEON");
+ // Make sure we've got NEON instructions.
+ assert(Subtarget.hasNEON() && "VORRd requires NEON");
- // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(1).getReg();
+ // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
+ DstReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(1).getReg();
- for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
- MI->RemoveOperand(i-1);
+ for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
+ MI.RemoveOperand(i - 1);
- // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
- MI->setDesc(get(ARM::VORRd));
- AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
- .addReg(SrcReg)
- .addReg(SrcReg));
+ // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
+ MI.setDesc(get(ARM::VORRd));
+ AddDefaultPred(
+ MIB.addReg(DstReg, RegState::Define).addReg(SrcReg).addReg(SrcReg));
+ break;
+ case ARM::VMOVRS:
+ if (Domain != ExeNEON)
break;
- case ARM::VMOVRS:
- if (Domain != ExeNEON)
- break;
- assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
+ assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
- // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(1).getReg();
+ // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
+ DstReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(1).getReg();
- for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
- MI->RemoveOperand(i-1);
+ for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
+ MI.RemoveOperand(i - 1);
- DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
+ DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
- // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
- // Note that DSrc has been widened and the other lane may be undef, which
- // contaminates the entire register.
- MI->setDesc(get(ARM::VGETLNi32));
- AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
- .addReg(DReg, RegState::Undef)
- .addImm(Lane));
+ // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
+ // Note that DSrc has been widened and the other lane may be undef, which
+ // contaminates the entire register.
+ MI.setDesc(get(ARM::VGETLNi32));
+ AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
+ .addReg(DReg, RegState::Undef)
+ .addImm(Lane));
- // The old source should be an implicit use, otherwise we might think it
- // was dead before here.
- MIB.addReg(SrcReg, RegState::Implicit);
+ // The old source should be an implicit use, otherwise we might think it
+ // was dead before here.
+ MIB.addReg(SrcReg, RegState::Implicit);
+ break;
+ case ARM::VMOVSR: {
+ if (Domain != ExeNEON)
break;
- case ARM::VMOVSR: {
- if (Domain != ExeNEON)
- break;
- assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
+ assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
- // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(1).getReg();
+ // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
+ DstReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(1).getReg();
- DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
+ DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
- unsigned ImplicitSReg;
- if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg))
- break;
+ unsigned ImplicitSReg;
+ if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg))
+ break;
- for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
- MI->RemoveOperand(i-1);
+ for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
+ MI.RemoveOperand(i - 1);
- // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
- // Again DDst may be undefined at the beginning of this instruction.
- MI->setDesc(get(ARM::VSETLNi32));
- MIB.addReg(DReg, RegState::Define)
- .addReg(DReg, getUndefRegState(!MI->readsRegister(DReg, TRI)))
- .addReg(SrcReg)
- .addImm(Lane);
- AddDefaultPred(MIB);
+ // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
+ // Again DDst may be undefined at the beginning of this instruction.
+ MI.setDesc(get(ARM::VSETLNi32));
+ MIB.addReg(DReg, RegState::Define)
+ .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI)))
+ .addReg(SrcReg)
+ .addImm(Lane);
+ AddDefaultPred(MIB);
- // The narrower destination must be marked as set to keep previous chains
- // in place.
- MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
- if (ImplicitSReg != 0)
- MIB.addReg(ImplicitSReg, RegState::Implicit);
- break;
+ // The narrower destination must be marked as set to keep previous chains
+ // in place.
+ MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
+ if (ImplicitSReg != 0)
+ MIB.addReg(ImplicitSReg, RegState::Implicit);
+ break;
}
case ARM::VMOVS: {
if (Domain != ExeNEON)
break;
// Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits)
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(1).getReg();
+ DstReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(1).getReg();
unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane);
@@ -4346,16 +4379,16 @@ ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg))
break;
- for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
- MI->RemoveOperand(i-1);
+ for (unsigned i = MI.getDesc().getNumOperands(); i; --i)
+ MI.RemoveOperand(i - 1);
if (DSrc == DDst) {
// Destination can be:
// %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits)
- MI->setDesc(get(ARM::VDUPLN32d));
+ MI.setDesc(get(ARM::VDUPLN32d));
MIB.addReg(DDst, RegState::Define)
- .addReg(DDst, getUndefRegState(!MI->readsRegister(DDst, TRI)))
- .addImm(SrcLane);
+ .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI)))
+ .addImm(SrcLane);
AddDefaultPred(MIB);
// Neither the source or the destination are naturally represented any
@@ -4380,18 +4413,18 @@ ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
// Pattern of the MachineInstrs is:
// %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits)
MachineInstrBuilder NewMIB;
- NewMIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- get(ARM::VEXTd32), DDst);
+ NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32),
+ DDst);
// On the first instruction, both DSrc and DDst may be <undef> if present.
// Specifically when the original instruction didn't have them as an
// <imp-use>.
unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
- bool CurUndef = !MI->readsRegister(CurReg, TRI);
+ bool CurUndef = !MI.readsRegister(CurReg, TRI);
NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
- CurUndef = !MI->readsRegister(CurReg, TRI);
+ CurUndef = !MI.readsRegister(CurReg, TRI);
NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
NewMIB.addImm(1);
@@ -4400,17 +4433,17 @@ ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
if (SrcLane == DstLane)
NewMIB.addReg(SrcReg, RegState::Implicit);
- MI->setDesc(get(ARM::VEXTd32));
+ MI.setDesc(get(ARM::VEXTd32));
MIB.addReg(DDst, RegState::Define);
// On the second instruction, DDst has definitely been defined above, so
// it is not <undef>. DSrc, if present, can be <undef> as above.
CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
- CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI);
+ CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
MIB.addReg(CurReg, getUndefRegState(CurUndef));
CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
- CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI);
+ CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
MIB.addReg(CurReg, getUndefRegState(CurUndef));
MIB.addImm(1);
@@ -4446,24 +4479,23 @@ ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
// VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops.
//
// FCONSTD can be used as a dependency-breaking instruction.
-unsigned ARMBaseInstrInfo::
-getPartialRegUpdateClearance(const MachineInstr *MI,
- unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- if (!SwiftPartialUpdateClearance ||
- !(Subtarget.isSwift() || Subtarget.isCortexA15()))
+unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance(
+ const MachineInstr &MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
+ if (!PartialUpdateClearance)
return 0;
assert(TRI && "Need TRI instance");
- const MachineOperand &MO = MI->getOperand(OpNum);
+ const MachineOperand &MO = MI.getOperand(OpNum);
if (MO.readsReg())
return 0;
unsigned Reg = MO.getReg();
int UseOp = -1;
- switch(MI->getOpcode()) {
- // Normal instructions writing only an S-register.
+ switch (MI.getOpcode()) {
+ // Normal instructions writing only an S-register.
case ARM::VLDRS:
case ARM::FCONSTS:
case ARM::VMOVSR:
@@ -4472,7 +4504,7 @@ getPartialRegUpdateClearance(const MachineInstr *MI,
case ARM::VMOVv2i32:
case ARM::VMOVv2f32:
case ARM::VMOVv1i64:
- UseOp = MI->findRegisterUseOperandIdx(Reg, false, TRI);
+ UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI);
break;
// Explicitly reads the dependency.
@@ -4485,37 +4517,35 @@ getPartialRegUpdateClearance(const MachineInstr *MI,
// If this instruction actually reads a value from Reg, there is no unwanted
// dependency.
- if (UseOp != -1 && MI->getOperand(UseOp).readsReg())
+ if (UseOp != -1 && MI.getOperand(UseOp).readsReg())
return 0;
// We must be able to clobber the whole D-reg.
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
// Virtual register must be a foo:ssub_0<def,undef> operand.
- if (!MO.getSubReg() || MI->readsVirtualRegister(Reg))
+ if (!MO.getSubReg() || MI.readsVirtualRegister(Reg))
return 0;
} else if (ARM::SPRRegClass.contains(Reg)) {
// Physical register: MI must define the full D-reg.
unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
&ARM::DPRRegClass);
- if (!DReg || !MI->definesRegister(DReg, TRI))
+ if (!DReg || !MI.definesRegister(DReg, TRI))
return 0;
}
// MI has an unwanted D-register dependency.
// Avoid defs in the previous N instructrions.
- return SwiftPartialUpdateClearance;
+ return PartialUpdateClearance;
}
// Break a partial register dependency after getPartialRegUpdateClearance
// returned non-zero.
-void ARMBaseInstrInfo::
-breakPartialRegDependency(MachineBasicBlock::iterator MI,
- unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- assert(MI && OpNum < MI->getDesc().getNumDefs() && "OpNum is not a def");
+void ARMBaseInstrInfo::breakPartialRegDependency(
+ MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
+ assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def");
assert(TRI && "Need TRI instance");
- const MachineOperand &MO = MI->getOperand(OpNum);
+ const MachineOperand &MO = MI.getOperand(OpNum);
unsigned Reg = MO.getReg();
assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
"Can't break virtual register dependencies.");
@@ -4528,7 +4558,7 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI,
}
assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps");
- assert(MI->definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg");
+ assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg");
// FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines
// the full D-register by loading the same value to both lanes. The
@@ -4538,9 +4568,10 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI,
// Insert the dependency-breaking FCONSTD before MI.
// 96 is the encoding of 0.5, but the actual value doesn't matter here.
- AddDefaultPred(BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- get(ARM::FCONSTD), DReg).addImm(96));
- MI->addRegisterKilled(DReg, TRI, true);
+ AddDefaultPred(
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg)
+ .addImm(96));
+ MI.addRegisterKilled(DReg, TRI, true);
}
bool ARMBaseInstrInfo::hasNOP() const {
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h
index d80c49494c77..52b0ff17dea2 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -36,8 +36,7 @@ protected:
explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
- unsigned LoadImmOpc, unsigned LoadOpc,
- Reloc::Model RM) const;
+ unsigned LoadImmOpc, unsigned LoadOpc) const;
/// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
/// and \p DefIdx.
@@ -93,8 +92,7 @@ protected:
/// non-commutable pair of operand indices OpIdx1 and OpIdx2.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
- MachineInstr *commuteInstructionImpl(MachineInstr *MI,
- bool NewMI,
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const override;
@@ -107,7 +105,7 @@ public:
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0;
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
+ MachineInstr &MI,
LiveVariables *LV) const override;
virtual const ARMBaseRegisterInfo &getRegisterInfo() const = 0;
@@ -122,49 +120,49 @@ public:
const ScheduleDAG *DAG) const override;
// Branch analysis.
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify = false) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
// Predication support.
- bool isPredicated(const MachineInstr *MI) const override;
+ bool isPredicated(const MachineInstr &MI) const override;
- ARMCC::CondCodes getPredicate(const MachineInstr *MI) const {
- int PIdx = MI->findFirstPredOperandIdx();
- return PIdx != -1 ? (ARMCC::CondCodes)MI->getOperand(PIdx).getImm()
+ ARMCC::CondCodes getPredicate(const MachineInstr &MI) const {
+ int PIdx = MI.findFirstPredOperandIdx();
+ return PIdx != -1 ? (ARMCC::CondCodes)MI.getOperand(PIdx).getImm()
: ARMCC::AL;
}
- bool PredicateInstruction(MachineInstr *MI,
- ArrayRef<MachineOperand> Pred) const override;
+ bool PredicateInstruction(MachineInstr &MI,
+ ArrayRef<MachineOperand> Pred) const override;
bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
ArrayRef<MachineOperand> Pred2) const override;
- bool DefinesPredicate(MachineInstr *MI,
+ bool DefinesPredicate(MachineInstr &MI,
std::vector<MachineOperand> &Pred) const override;
- bool isPredicable(MachineInstr *MI) const override;
+ bool isPredicable(MachineInstr &MI) const override;
/// GetInstSize - Returns the size of the specified MachineInstr.
///
- virtual unsigned GetInstSizeInBytes(const MachineInstr* MI) const;
+ virtual unsigned GetInstSizeInBytes(const MachineInstr &MI) const;
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
- unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
+ unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
@@ -175,7 +173,7 @@ public:
const ARMSubtarget &Subtarget) const;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -190,21 +188,21 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
+ const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const override;
- MachineInstr *duplicate(MachineInstr *Orig,
+ MachineInstr *duplicate(MachineInstr &Orig,
MachineFunction &MF) const override;
const MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
unsigned SubIdx, unsigned State,
const TargetRegisterInfo *TRI) const;
- bool produceSameValue(const MachineInstr *MI0, const MachineInstr *MI1,
+ bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1,
const MachineRegisterInfo *MRI) const override;
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
@@ -227,7 +225,7 @@ public:
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const override;
- bool isSchedulingBoundary(const MachineInstr *MI,
+ bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const override;
@@ -252,7 +250,7 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
- bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
@@ -260,30 +258,29 @@ public:
/// that we can remove a "comparison with zero"; Remove a redundant CMP
/// instruction if the flags can be updated in the same way by an earlier
/// instruction such as SUB.
- bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
- bool analyzeSelect(const MachineInstr *MI,
- SmallVectorImpl<MachineOperand> &Cond,
- unsigned &TrueOp, unsigned &FalseOp,
- bool &Optimizable) const override;
+ bool analyzeSelect(const MachineInstr &MI,
+ SmallVectorImpl<MachineOperand> &Cond, unsigned &TrueOp,
+ unsigned &FalseOp, bool &Optimizable) const override;
- MachineInstr *optimizeSelect(MachineInstr *MI,
+ MachineInstr *optimizeSelect(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &SeenMIs,
bool) const override;
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
/// instruction, try to fold the immediate into the use instruction.
- bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
- unsigned Reg, MachineRegisterInfo *MRI) const override;
+ bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
+ MachineRegisterInfo *MRI) const override;
unsigned getNumMicroOps(const InstrItineraryData *ItinData,
- const MachineInstr *MI) const override;
+ const MachineInstr &MI) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
unsigned UseIdx) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
@@ -291,19 +288,20 @@ public:
/// VFP/NEON execution domains.
std::pair<uint16_t, uint16_t>
- getExecutionDomain(const MachineInstr *MI) const override;
- void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
+ getExecutionDomain(const MachineInstr &MI) const override;
+ void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override;
- unsigned getPartialRegUpdateClearance(const MachineInstr*, unsigned,
- const TargetRegisterInfo*) const override;
- void breakPartialRegDependency(MachineBasicBlock::iterator, unsigned,
+ unsigned
+ getPartialRegUpdateClearance(const MachineInstr &, unsigned,
+ const TargetRegisterInfo *) const override;
+ void breakPartialRegDependency(MachineInstr &, unsigned,
const TargetRegisterInfo *TRI) const override;
/// Get the number of addresses by LDM or VLDM or zero for unknown.
- unsigned getNumLDMAddresses(const MachineInstr *MI) const;
+ unsigned getNumLDMAddresses(const MachineInstr &MI) const;
private:
- unsigned getInstBundleLength(const MachineInstr *MI) const;
+ unsigned getInstBundleLength(const MachineInstr &MI) const;
int getVLDMDefCycle(const InstrItineraryData *ItinData,
const MCInstrDesc &DefMCID,
@@ -327,10 +325,17 @@ private:
const MCInstrDesc &UseMCID,
unsigned UseIdx, unsigned UseAlign) const;
- unsigned getPredicationCost(const MachineInstr *MI) const override;
+ int getOperandLatencyImpl(const InstrItineraryData *ItinData,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MCInstrDesc &DefMCID, unsigned DefAdj,
+ const MachineOperand &DefMO, unsigned Reg,
+ const MachineInstr &UseMI, unsigned UseIdx,
+ const MCInstrDesc &UseMCID, unsigned UseAdj) const;
+
+ unsigned getPredicationCost(const MachineInstr &MI) const override;
unsigned getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
+ const MachineInstr &MI,
unsigned *PredCost = nullptr) const override;
int getInstrLatency(const InstrItineraryData *ItinData,
@@ -338,19 +343,18 @@ private:
bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
unsigned UseIdx) const override;
bool hasLowDefLatency(const TargetSchedModel &SchedModel,
- const MachineInstr *DefMI,
+ const MachineInstr &DefMI,
unsigned DefIdx) const override;
/// verifyInstruction - Perform target specific instruction verification.
- bool verifyInstruction(const MachineInstr *MI,
+ bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const override;
- virtual void expandLoadStackGuard(MachineBasicBlock::iterator MI,
- Reloc::Model RM) const = 0;
+ virtual void expandLoadStackGuard(MachineBasicBlock::iterator MI) const = 0;
void expandMEMCPY(MachineBasicBlock::iterator) const;
@@ -447,7 +451,7 @@ static inline bool isPushOpcode(int Opc) {
/// getInstrPredicate - If instruction is predicated, returns its predicate
/// condition, otherwise returns AL. It also returns the condition code
/// register by reference.
-ARMCC::CondCodes getInstrPredicate(const MachineInstr *MI, unsigned &PredReg);
+ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, unsigned &PredReg);
unsigned getMatchingCondBranchOpcode(unsigned Opc);
@@ -466,21 +470,24 @@ unsigned convertAddSubFlagsOpcode(unsigned OldOpc);
/// instructions to materializea destreg = basereg + immediate in ARM / Thumb2
/// code.
void emitARMRegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg, int NumBytes,
+ MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg,
+ unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII, unsigned MIFlags = 0);
void emitT2RegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg, int NumBytes,
+ MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg,
+ unsigned BaseReg, int NumBytes,
ARMCC::CondCodes Pred, unsigned PredReg,
const ARMBaseInstrInfo &TII, unsigned MIFlags = 0);
void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg,
- int NumBytes, const TargetInstrInfo &TII,
- const ARMBaseRegisterInfo& MRI,
+ MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg,
+ unsigned BaseReg, int NumBytes,
+ const TargetInstrInfo &TII,
+ const ARMBaseRegisterInfo &MRI,
unsigned MIFlags = 0);
/// Tries to add registers to the reglist of a given base-updating
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index a5207705fc69..aa968efc37d4 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -49,12 +49,9 @@ ARMBaseRegisterInfo::ARMBaseRegisterInfo()
: ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), BasePtr(ARM::R6) {}
static unsigned getFramePointerReg(const ARMSubtarget &STI) {
- if (STI.isTargetMachO()) {
- if (STI.isTargetDarwin() || STI.isThumb1Only())
- return ARM::R7;
- else
- return ARM::R11;
- } else if (STI.isTargetWindows())
+ if (STI.isTargetMachO())
+ return ARM::R7;
+ else if (STI.isTargetWindows())
return ARM::R11;
else // ARM EABI
return STI.isThumb() ? ARM::R7 : ARM::R11;
@@ -63,8 +60,11 @@ static unsigned getFramePointerReg(const ARMSubtarget &STI) {
const MCPhysReg*
ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>();
+ bool UseSplitPush = STI.splitFramePushPop();
const MCPhysReg *RegList =
- STI.isTargetDarwin() ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
+ STI.isTargetDarwin()
+ ? CSR_iOS_SaveList
+ : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList);
const Function *F = MF->getFunction();
if (F->getCallingConv() == CallingConv::GHC) {
@@ -75,7 +75,7 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
if (STI.isMClass()) {
// M-class CPUs have hardware which saves the registers needed to allow a
// function conforming to the AAPCS to function as a handler.
- return CSR_AAPCS_SaveList;
+ return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList;
} else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") {
// Fast interrupt mode gives the handler a private copy of R8-R14, so less
// need to be saved to restore user-mode state.
@@ -87,6 +87,10 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
}
}
+ if (STI.isTargetDarwin() && STI.getTargetLowering()->supportSwiftError() &&
+ F->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ return CSR_iOS_SwiftError_SaveList;
+
if (STI.isTargetDarwin() && F->getCallingConv() == CallingConv::CXX_FAST_TLS)
return MF->getInfo<ARMFunctionInfo>()->isSplitCSR()
? CSR_iOS_CXX_TLS_PE_SaveList
@@ -110,6 +114,11 @@ ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
if (CC == CallingConv::GHC)
// This is academic becase all GHC calls are (supposed to be) tail calls
return CSR_NoRegs_RegMask;
+
+ if (STI.isTargetDarwin() && STI.getTargetLowering()->supportSwiftError() &&
+ MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ return CSR_iOS_SwiftError_RegMask;
+
if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS)
return CSR_iOS_CXX_TLS_RegMask;
return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
@@ -167,9 +176,8 @@ getReservedRegs(const MachineFunction &MF) const {
Reserved.set(ARM::R9);
// Reserve D16-D31 if the subtarget doesn't support them.
if (!STI.hasVFP3() || STI.hasD16()) {
- assert(ARM::D31 == ARM::D16 + 15);
- for (unsigned i = 0; i != 16; ++i)
- Reserved.set(ARM::D16 + i);
+ static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!");
+ Reserved.set(ARM::D16, ARM::D31 + 1);
}
const TargetRegisterClass *RC = &ARM::GPRPairRegClass;
for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I)
@@ -400,13 +408,10 @@ ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
-void ARMBaseRegisterInfo::
-emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx, int Val,
- ARMCC::CondCodes Pred,
- unsigned PredReg, unsigned MIFlags) const {
+void ARMBaseRegisterInfo::emitLoadConstPool(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val,
+ ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.h b/lib/Target/ARM/ARMBaseRegisterInfo.h
index 6a9a45a65687..1eee94857e05 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -166,12 +166,12 @@ public:
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
- virtual void emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl, unsigned DestReg, unsigned SubIdx,
- int Val, ARMCC::CondCodes Pred = ARMCC::AL,
- unsigned PredReg = 0,
- unsigned MIFlags = MachineInstr::NoFlags)const;
+ virtual void
+ emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg, unsigned SubIdx,
+ int Val, ARMCC::CondCodes Pred = ARMCC::AL,
+ unsigned PredReg = 0,
+ unsigned MIFlags = MachineInstr::NoFlags) const;
/// Code Generation virtual methods...
bool requiresRegisterScavenging(const MachineFunction &MF) const override;
diff --git a/lib/Target/ARM/ARMCallingConv.h b/lib/Target/ARM/ARMCallingConv.h
index a731d00883a1..71b819362404 100644
--- a/lib/Target/ARM/ARMCallingConv.h
+++ b/lib/Target/ARM/ARMCallingConv.h
@@ -211,7 +211,7 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT,
// First consume all registers that would give an unaligned object. Whether
// we go on stack or in regs, no-one will be using them in future.
- unsigned RegAlign = RoundUpToAlignment(Align, 4) / 4;
+ unsigned RegAlign = alignTo(Align, 4) / 4;
while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
State.AllocateReg(RegList[RegIdx++]);
diff --git a/lib/Target/ARM/ARMCallingConv.td b/lib/Target/ARM/ARMCallingConv.td
index 847ef87c1b26..edb69581b9d3 100644
--- a/lib/Target/ARM/ARMCallingConv.td
+++ b/lib/Target/ARM/ARMCallingConv.td
@@ -23,6 +23,12 @@ def CC_ARM_APCS : CallingConv<[
CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
+
+ // A SwiftError is passed in R6.
+ CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
+
// Handle all vector types as either f64 or v2f64.
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
@@ -42,6 +48,12 @@ def RetCC_ARM_APCS : CallingConv<[
CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
CCIfType<[f32], CCBitConvertToType<i32>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
+
+ // A SwiftError is returned in R6.
+ CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
+
// Handle all vector types as either f64 or v2f64.
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
@@ -151,6 +163,12 @@ def CC_ARM_AAPCS : CallingConv<[
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
+
+ // A SwiftError is passed in R6.
+ CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
+
CCIfType<[f64, v2f64], CCCustom<"CC_ARM_AAPCS_Custom_f64">>,
CCIfType<[f32], CCBitConvertToType<i32>>,
CCDelegateTo<CC_ARM_AAPCS_Common>
@@ -161,6 +179,12 @@ def RetCC_ARM_AAPCS : CallingConv<[
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
+
+ // A SwiftError is returned in R6.
+ CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
+
CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_AAPCS_Custom_f64">>,
CCIfType<[f32], CCBitConvertToType<i32>>,
CCDelegateTo<RetCC_ARM_AAPCS_Common>
@@ -179,6 +203,12 @@ def CC_ARM_AAPCS_VFP : CallingConv<[
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
+
+ // A SwiftError is passed in R6.
+ CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
+
// HFAs are passed in a contiguous block of registers, or on the stack
CCIfConsecutiveRegs<CCCustom<"CC_ARM_AAPCS_Custom_Aggregate">>,
@@ -194,6 +224,12 @@ def RetCC_ARM_AAPCS_VFP : CallingConv<[
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
+
+ // A SwiftError is returned in R6.
+ CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
+
CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7, S8,
@@ -210,6 +246,14 @@ def CSR_NoRegs : CalleeSavedRegs<(add)>;
def CSR_AAPCS : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6, R5, R4,
(sequence "D%u", 15, 8))>;
+// The order of callee-saved registers needs to match the order we actually push
+// them in FrameLowering, because this order is what's used by
+// PrologEpilogInserter to allocate frame index slots. So when R7 is the frame
+// pointer, we use this AAPCS alternative.
+def CSR_AAPCS_SplitPush : CalleeSavedRegs<(add LR, R7, R6, R5, R4,
+ R11, R10, R9, R8,
+ (sequence "D%u", 15, 8))>;
+
// Constructors and destructors return 'this' in the ARM C++ ABI; since 'this'
// and the pointer return value are both passed in R0 in these cases, this can
// be partially modelled by treating R0 as a callee-saved register
@@ -222,6 +266,9 @@ def CSR_AAPCS_ThisReturn : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6,
// Also save R7-R4 first to match the stack frame fixed spill areas.
def CSR_iOS : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS, R9))>;
+// R6 is used to pass swifterror, remove it from CSR.
+def CSR_iOS_SwiftError : CalleeSavedRegs<(sub CSR_iOS, R6)>;
+
def CSR_iOS_ThisReturn : CalleeSavedRegs<(add LR, R7, R6, R5, R4,
(sub CSR_AAPCS_ThisReturn, R9))>;
@@ -235,10 +282,11 @@ def CSR_iOS_CXX_TLS : CalleeSavedRegs<(add CSR_iOS, (sequence "R%u", 12, 1),
(sequence "D%u", 31, 0))>;
// CSRs that are handled by prologue, epilogue.
-def CSR_iOS_CXX_TLS_PE : CalleeSavedRegs<(add LR)>;
+def CSR_iOS_CXX_TLS_PE : CalleeSavedRegs<(add LR, R12, R11, R7, R5, R4)>;
// CSRs that are handled explicitly via copies.
-def CSR_iOS_CXX_TLS_ViaCopy : CalleeSavedRegs<(sub CSR_iOS_CXX_TLS, LR)>;
+def CSR_iOS_CXX_TLS_ViaCopy : CalleeSavedRegs<(sub CSR_iOS_CXX_TLS,
+ CSR_iOS_CXX_TLS_PE)>;
// The "interrupt" attribute is used to generate code that is acceptable in
// exception-handlers of various kinds. It makes us use a different return
diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp
index 55c1684028c2..8511f67dccd5 100644
--- a/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -53,6 +53,11 @@ static cl::opt<bool>
AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
cl::desc("Adjust basic block layout to better use TB[BH]"));
+static cl::opt<unsigned>
+CPMaxIteration("arm-constant-island-max-iteration", cl::Hidden, cl::init(30),
+ cl::desc("The max number of iteration for converge"));
+
+
/// UnknownPadding - Return the worst case padding that could result from
/// unknown offset bits. This does not include alignment padding caused by
/// known offset bits.
@@ -274,6 +279,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "ARM constant island placement and branch shortening pass";
}
@@ -293,10 +303,10 @@ namespace {
unsigned getCombinedIndex(const MachineInstr *CPEMI);
int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
bool findAvailableWater(CPUser&U, unsigned UserOffset,
- water_iterator &WaterIter);
+ water_iterator &WaterIter, bool CloserWater);
void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
MachineBasicBlock *&NewMBB);
- bool handleConstantPoolUser(unsigned CPUserIndex);
+ bool handleConstantPoolUser(unsigned CPUserIndex, bool CloserWater);
void removeDeadCPEMI(MachineInstr *CPEMI);
bool removeUnusedCPEntries();
bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
@@ -456,8 +466,11 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
bool CPChange = false;
for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
- CPChange |= handleConstantPoolUser(i);
- if (CPChange && ++NoCPIters > 30)
+ // For most inputs, it converges in no more than 5 iterations.
+ // If it doesn't end in 10, the input may have huge BB or many CPEs.
+ // In this case, we will try different heuristics.
+ CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
+ if (CPChange && ++NoCPIters > CPMaxIteration)
report_fatal_error("Constant Island pass failed to converge!");
DEBUG(dumpBBs());
@@ -478,10 +491,18 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
MadeChange = true;
}
- // Shrink 32-bit Thumb2 branch, load, and store instructions.
+ // Shrink 32-bit Thumb2 load and store instructions.
if (isThumb2 && !STI->prefers32BitThumb())
MadeChange |= optimizeThumb2Instructions();
+ // Shrink 32-bit branch instructions.
+ if (isThumb && STI->hasV8MBaselineOps())
+ MadeChange |= optimizeThumb2Branches();
+
+ // Optimize jump tables using TBB / TBH.
+ if (isThumb2)
+ MadeChange |= optimizeThumb2JumpTables();
+
// After a while, this might be made debug-only, but it is not expensive.
verify();
@@ -654,7 +675,7 @@ bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
// have an unconditional branch for whatever reason.
MachineBasicBlock *TBB, *FBB;
SmallVector<MachineOperand, 4> Cond;
- bool TooDifficult = TII->AnalyzeBranch(*MBB, TBB, FBB, Cond);
+ bool TooDifficult = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
return TooDifficult || FBB == nullptr;
}
@@ -701,14 +722,10 @@ unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
/// information about the sizes of each block and the locations of all
/// the jump tables.
void ARMConstantIslands::scanFunctionJumpTables() {
- for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
-
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I)
- if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT)
- T2JumpTables.push_back(I);
+ for (MachineBasicBlock &MBB : *MF) {
+ for (MachineInstr &I : MBB)
+ if (I.isBranch() && I.getOpcode() == ARM::t2BR_JT)
+ T2JumpTables.push_back(&I);
}
}
@@ -735,22 +752,18 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
adjustBBOffsetsAfter(&MF->front());
// Now go back through the instructions and build up our data structures.
- for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
-
+ for (MachineBasicBlock &MBB : *MF) {
// If this block doesn't fall through into the next MBB, then this is
// 'water' that a constant pool island could be placed.
if (!BBHasFallthrough(&MBB))
WaterList.push_back(&MBB);
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- if (I->isDebugValue())
+ for (MachineInstr &I : MBB) {
+ if (I.isDebugValue())
continue;
- unsigned Opc = I->getOpcode();
- if (I->isBranch()) {
+ unsigned Opc = I.getOpcode();
+ if (I.isBranch()) {
bool isCond = false;
unsigned Bits = 0;
unsigned Scale = 1;
@@ -759,7 +772,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
default:
continue; // Ignore other JT branches
case ARM::t2BR_JT:
- T2JumpTables.push_back(I);
+ T2JumpTables.push_back(&I);
continue; // Does not get an entry in ImmBranches
case ARM::Bcc:
isCond = true;
@@ -793,11 +806,11 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
// Record this immediate branch.
unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
- ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
+ ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
}
if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
- PushPopMIs.push_back(I);
+ PushPopMIs.push_back(&I);
if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
@@ -805,8 +818,8 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
continue;
// Scan the instructions for constant pool operands.
- for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
- if (I->getOperand(op).isCPI() || I->getOperand(op).isJTI()) {
+ for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
+ if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
// We found one. The addressing mode tells us the max displacement
// from the PC that this instruction permits.
@@ -865,15 +878,15 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
}
// Remember that this is a user of a CP entry.
- unsigned CPI = I->getOperand(op).getIndex();
- if (I->getOperand(op).isJTI()) {
+ unsigned CPI = I.getOperand(op).getIndex();
+ if (I.getOperand(op).isJTI()) {
JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
CPI = JumpTableEntryIndices[CPI];
}
MachineInstr *CPEMI = CPEMIs[CPI];
unsigned MaxOffs = ((1 << Bits)-1) * Scale;
- CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm));
+ CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
// Increment corresponding CPEntry reference count.
CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
@@ -896,15 +909,14 @@ void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
BBI.Unalign = 0;
BBI.PostAlign = 0;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
- ++I) {
+ for (MachineInstr &I : *MBB) {
BBI.Size += TII->GetInstSizeInBytes(I);
// For inline asm, GetInstSizeInBytes returns a conservative estimate.
// The actual size may be smaller, but still a multiple of the instr size.
- if (I->isInlineAsm())
+ if (I.isInlineAsm())
BBI.Unalign = isThumb ? 1 : 2;
// Also consider instructions that may be shrunk later.
- else if (isThumb && mayOptimizeThumb2Instruction(I))
+ else if (isThumb && mayOptimizeThumb2Instruction(&I))
BBI.Unalign = 1;
}
@@ -929,7 +941,7 @@ unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
// Sum instructions before MI in MBB.
for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
assert(I != MBB->end() && "Didn't find MI in its own basic block?");
- Offset += TII->GetInstSizeInBytes(I);
+ Offset += TII->GetInstSizeInBytes(*I);
}
return Offset;
}
@@ -1108,7 +1120,7 @@ bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
Growth = CPEEnd - NextBlockOffset;
// Compute the padding that would go at the end of the CPE to align the next
// block.
- Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
+ Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
// If the CPE is to be inserted before the instruction, that will raise
// the offset of the instruction. Also account for unknown alignment padding
@@ -1285,11 +1297,27 @@ static inline unsigned getUnconditionalBrDisp(int Opc) {
/// move to a lower address, so search backward from the end of the list and
/// prefer the first water that is in range.
bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
- water_iterator &WaterIter) {
+ water_iterator &WaterIter,
+ bool CloserWater) {
if (WaterList.empty())
return false;
unsigned BestGrowth = ~0u;
+ // The nearest water without splitting the UserBB is right after it.
+ // If the distance is still large (we have a big BB), then we need to split it
+ // if we don't converge after certain iterations. This helps the following
+ // situation to converge:
+ // BB0:
+ // Big BB
+ // BB1:
+ // Constant Pool
+ // When a CP access is out of range, BB0 may be used as water. However,
+ // inserting islands between BB0 and BB1 makes other accesses out of range.
+ MachineBasicBlock *UserBB = U.MI->getParent();
+ unsigned MinNoSplitDisp =
+ BBInfo[UserBB->getNumber()].postOffset(getCPELogAlign(U.CPEMI));
+ if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
+ return false;
for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
--IP) {
MachineBasicBlock* WaterBB = *IP;
@@ -1301,6 +1329,8 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
// should be relatively uncommon and when it does happen, we want to be
// sure to take advantage of it for all the CPEs near that block, so that
// we don't insert more branches than necessary.
+ // When CloserWater is true, we try to find the lowest address after (or
+ // equal to) user MI's BB no matter of padding growth.
unsigned Growth;
if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
(WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
@@ -1312,8 +1342,11 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
<< " Growth=" << Growth << '\n');
- // Keep looking unless it is perfect.
- if (BestGrowth == 0)
+ if (CloserWater && WaterBB == U.MI->getParent())
+ return true;
+ // Keep looking unless it is perfect and we're not looking for the lowest
+ // possible address.
+ if (!CloserWater && BestGrowth == 0)
return true;
}
if (IP == B)
@@ -1416,7 +1449,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// iterates at least once.
BaseInsertOffset =
std::max(UserBBI.postOffset() - UPad - 8,
- UserOffset + TII->GetInstSizeInBytes(UserMI) + 1);
+ UserOffset + TII->GetInstSizeInBytes(*UserMI) + 1);
DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
}
unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
@@ -1426,11 +1459,11 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned CPUIndex = CPUserIndex+1;
unsigned NumCPUsers = CPUsers.size();
MachineInstr *LastIT = nullptr;
- for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
+ for (unsigned Offset = UserOffset + TII->GetInstSizeInBytes(*UserMI);
Offset < BaseInsertOffset;
- Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
+ Offset += TII->GetInstSizeInBytes(*MI), MI = std::next(MI)) {
assert(MI != UserMBB->end() && "Fell off end of block");
- if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
+ if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
CPUser &U = CPUsers[CPUIndex];
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
// Shift intertion point by one unit of alignment so it is within reach.
@@ -1447,7 +1480,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// Remember the last IT instruction.
if (MI->getOpcode() == ARM::t2IT)
- LastIT = MI;
+ LastIT = &*MI;
}
--MI;
@@ -1455,23 +1488,24 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// Avoid splitting an IT block.
if (LastIT) {
unsigned PredReg = 0;
- ARMCC::CondCodes CC = getITInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
if (CC != ARMCC::AL)
MI = LastIT;
}
// We really must not split an IT block.
DEBUG(unsigned PredReg;
- assert(!isThumb || getITInstrPredicate(MI, PredReg) == ARMCC::AL));
+ assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL));
- NewMBB = splitBlockBeforeInstr(MI);
+ NewMBB = splitBlockBeforeInstr(&*MI);
}
/// handleConstantPoolUser - Analyze the specified user, checking to see if it
/// is out-of-range. If so, pick up the constant pool value and move it some
/// place in-range. Return true if we changed any addresses (thus must run
/// another pass of branch lengthening), false otherwise.
-bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
+bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
+ bool CloserWater) {
CPUser &U = CPUsers[CPUserIndex];
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
@@ -1494,7 +1528,7 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
MachineBasicBlock *NewMBB;
water_iterator IP;
- if (findAvailableWater(U, UserOffset, IP)) {
+ if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
DEBUG(dbgs() << "Found water in range\n");
MachineBasicBlock *WaterBB = *IP;
@@ -1584,7 +1618,7 @@ void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
CPEBB->setAlignment(0);
} else
// Entries are sorted by descending alignment, so realign from the front.
- CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
+ CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
adjustBBOffsetsAfter(CPEBB);
// An island has only one predecessor BB and one successor BB. Check if
@@ -1728,7 +1762,7 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
splitBlockBeforeInstr(MI);
// No need for the branch to the next block. We're adding an unconditional
// branch to the destination.
- int delta = TII->GetInstSizeInBytes(&MBB->back());
+ int delta = TII->GetInstSizeInBytes(MBB->back());
BBInfo[MBB->getNumber()].Size -= delta;
MBB->back().eraseFromParent();
// BBInfo[SplitBB].Offset is wrong temporarily, fixed below
@@ -1744,18 +1778,18 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
.addMBB(NextBB).addImm(CC).addReg(CCReg);
Br.MI = &MBB->back();
- BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
if (isThumb)
BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB)
.addImm(ARMCC::AL).addReg(0);
else
BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
- BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
// Remove the old conditional branch. It may or may not still be in MBB.
- BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
+ BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(*MI);
MI->eraseFromParent();
adjustBBOffsetsAfter(MBB);
return true;
@@ -1852,8 +1886,6 @@ bool ARMConstantIslands::optimizeThumb2Instructions() {
}
}
- MadeChange |= optimizeThumb2Branches();
- MadeChange |= optimizeThumb2JumpTables();
return MadeChange;
}
@@ -1910,7 +1942,7 @@ bool ARMConstantIslands::optimizeThumb2Branches() {
NewOpc = 0;
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(Br.MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(*Br.MI, PredReg);
if (Pred == ARMCC::EQ)
NewOpc = ARM::tCBZ;
else if (Pred == ARMCC::NE)
@@ -1928,7 +1960,7 @@ bool ARMConstantIslands::optimizeThumb2Branches() {
--CmpMI;
if (CmpMI->getOpcode() == ARM::tCMPi8) {
unsigned Reg = CmpMI->getOperand(0).getReg();
- Pred = getInstrPredicate(CmpMI, PredReg);
+ Pred = getInstrPredicate(*CmpMI, PredReg);
if (Pred == ARMCC::AL &&
CmpMI->getOperand(1).getImm() == 0 &&
isARMLowRegister(Reg)) {
@@ -2170,8 +2202,8 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() {
}
}
- unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
- unsigned OrigSize = TII->GetInstSizeInBytes(MI);
+ unsigned NewSize = TII->GetInstSizeInBytes(*NewJTMI);
+ unsigned OrigSize = TII->GetInstSizeInBytes(*MI);
MI->eraseFromParent();
int Delta = OrigSize - NewSize + DeadSize;
@@ -2240,13 +2272,13 @@ adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
MachineFunction::iterator OldPrior = std::prev(BBi);
// If the block terminator isn't analyzable, don't try to move the block
- bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond);
+ bool B = TII->analyzeBranch(*BB, TBB, FBB, Cond);
// If the block ends in an unconditional branch, move it. The prior block
// has to have an analyzable terminator for us to move this one. Be paranoid
// and make sure we're not trying to move the entry block of the function.
- if (!B && Cond.empty() && BB != MF->begin() &&
- !TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
+ if (!B && Cond.empty() && BB != &MF->front() &&
+ !TII->analyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
BB->moveAfter(JTBB);
OldPrior->updateTerminator();
BB->updateTerminator();
diff --git a/lib/Target/ARM/ARMConstantPoolValue.cpp b/lib/Target/ARM/ARMConstantPoolValue.cpp
index c9849b2605ea..c0db001cb6f1 100644
--- a/lib/Target/ARM/ARMConstantPoolValue.cpp
+++ b/lib/Target/ARM/ARMConstantPoolValue.cpp
@@ -50,11 +50,18 @@ const char *ARMConstantPoolValue::getModifierText() const {
switch (Modifier) {
// FIXME: Are these case sensitive? It'd be nice to lower-case all the
// strings if that's legal.
- case ARMCP::no_modifier: return "none";
- case ARMCP::TLSGD: return "tlsgd";
- case ARMCP::GOT_PREL: return "GOT_PREL";
- case ARMCP::GOTTPOFF: return "gottpoff";
- case ARMCP::TPOFF: return "tpoff";
+ case ARMCP::no_modifier:
+ return "none";
+ case ARMCP::TLSGD:
+ return "tlsgd";
+ case ARMCP::GOT_PREL:
+ return "GOT_PREL";
+ case ARMCP::GOTTPOFF:
+ return "gottpoff";
+ case ARMCP::TPOFF:
+ return "tpoff";
+ case ARMCP::SECREL:
+ return "secrel32";
}
llvm_unreachable("Unknown modifier!");
}
@@ -74,9 +81,9 @@ bool
ARMConstantPoolValue::hasSameValue(ARMConstantPoolValue *ACPV) {
if (ACPV->Kind == Kind &&
ACPV->PCAdjust == PCAdjust &&
- ACPV->Modifier == Modifier) {
- if (ACPV->LabelId == LabelId)
- return true;
+ ACPV->Modifier == Modifier &&
+ ACPV->LabelId == LabelId &&
+ ACPV->AddCurrentAddress == AddCurrentAddress) {
// Two PC relative constpool entries containing the same GV address or
// external symbols. FIXME: What about blockaddress?
if (Kind == ARMCP::CPValue || Kind == ARMCP::CPExtSymbol)
@@ -85,7 +92,7 @@ ARMConstantPoolValue::hasSameValue(ARMConstantPoolValue *ACPV) {
return false;
}
-void ARMConstantPoolValue::dump() const {
+LLVM_DUMP_METHOD void ARMConstantPoolValue::dump() const {
errs() << " " << *this;
}
diff --git a/lib/Target/ARM/ARMConstantPoolValue.h b/lib/Target/ARM/ARMConstantPoolValue.h
index 6b18a4e52878..c07331d71dad 100644
--- a/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/lib/Target/ARM/ARMConstantPoolValue.h
@@ -37,11 +37,12 @@ namespace ARMCP {
};
enum ARMCPModifier {
- no_modifier,
- TLSGD,
- GOT_PREL,
- GOTTPOFF,
- TPOFF
+ no_modifier, /// None
+ TLSGD, /// Thread Local Storage (General Dynamic Mode)
+ GOT_PREL, /// Global Offset Table, PC Relative
+ GOTTPOFF, /// Global Offset Table, Thread Pointer Offset
+ TPOFF, /// Thread Pointer Offset
+ SECREL, /// Section Relative (Windows TLS)
};
}
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 56f3498e1204..56f5728ecfb8 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -20,6 +20,7 @@
#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -50,6 +51,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "ARM pseudo instruction expansion pass";
}
@@ -58,7 +64,8 @@ namespace {
void TransferImpOps(MachineInstr &OldMI,
MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI);
bool ExpandMI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI);
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
bool ExpandMBB(MachineBasicBlock &MBB);
void ExpandVLD(MachineBasicBlock::iterator &MBBI);
void ExpandVST(MachineBasicBlock::iterator &MBBI);
@@ -67,6 +74,14 @@ namespace {
unsigned Opc, bool IsExt);
void ExpandMOV32BitImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI);
+ bool ExpandCMP_SWAP(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, unsigned LdrexOp,
+ unsigned StrexOp, unsigned UxtOp,
+ MachineBasicBlock::iterator &NextMBBI);
+
+ bool ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
};
char ARMExpandPseudo::ID = 0;
}
@@ -651,7 +666,7 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(&MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
unsigned DstReg = MI.getOperand(0).getReg();
bool DstIsDead = MI.getOperand(0).isDead();
bool isCC = Opcode == ARM::MOVCCi32imm || Opcode == ARM::t2MOVCCi32imm;
@@ -737,8 +752,242 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
MI.eraseFromParent();
}
+static void addPostLoopLiveIns(MachineBasicBlock *MBB, LivePhysRegs &LiveRegs) {
+ for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I)
+ MBB->addLiveIn(*I);
+}
+
+/// Expand a CMP_SWAP pseudo-inst to an ldrex/strex loop as simply as
+/// possible. This only gets used at -O0 so we don't care about efficiency of the
+/// generated code.
+bool ARMExpandPseudo::ExpandCMP_SWAP(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned LdrexOp, unsigned StrexOp,
+ unsigned UxtOp,
+ MachineBasicBlock::iterator &NextMBBI) {
+ bool IsThumb = STI->isThumb();
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineOperand &Dest = MI.getOperand(0);
+ unsigned StatusReg = MI.getOperand(1).getReg();
+ MachineOperand &Addr = MI.getOperand(2);
+ MachineOperand &Desired = MI.getOperand(3);
+ MachineOperand &New = MI.getOperand(4);
+
+ LivePhysRegs LiveRegs(&TII->getRegisterInfo());
+ LiveRegs.addLiveOuts(MBB);
+ for (auto I = std::prev(MBB.end()); I != MBBI; --I)
+ LiveRegs.stepBackward(*I);
+
+ MachineFunction *MF = MBB.getParent();
+ auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ MF->insert(++MBB.getIterator(), LoadCmpBB);
+ MF->insert(++LoadCmpBB->getIterator(), StoreBB);
+ MF->insert(++StoreBB->getIterator(), DoneBB);
+
+ if (UxtOp) {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, DL, TII->get(UxtOp), Desired.getReg())
+ .addReg(Desired.getReg(), RegState::Kill);
+ if (!IsThumb)
+ MIB.addImm(0);
+ AddDefaultPred(MIB);
+ }
+
+ // .Lloadcmp:
+ // ldrex rDest, [rAddr]
+ // cmp rDest, rDesired
+ // bne .Ldone
+ LoadCmpBB->addLiveIn(Addr.getReg());
+ LoadCmpBB->addLiveIn(Dest.getReg());
+ LoadCmpBB->addLiveIn(Desired.getReg());
+ addPostLoopLiveIns(LoadCmpBB, LiveRegs);
+
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(LoadCmpBB, DL, TII->get(LdrexOp), Dest.getReg());
+ MIB.addReg(Addr.getReg());
+ if (LdrexOp == ARM::t2LDREX)
+ MIB.addImm(0); // a 32-bit Thumb ldrex (only) allows an offset.
+ AddDefaultPred(MIB);
+
+ unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr;
+ AddDefaultPred(BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
+ .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
+ .addOperand(Desired));
+ unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc;
+ BuildMI(LoadCmpBB, DL, TII->get(Bcc))
+ .addMBB(DoneBB)
+ .addImm(ARMCC::NE)
+ .addReg(ARM::CPSR, RegState::Kill);
+ LoadCmpBB->addSuccessor(DoneBB);
+ LoadCmpBB->addSuccessor(StoreBB);
+
+ // .Lstore:
+ // strex rStatus, rNew, [rAddr]
+ // cmp rStatus, #0
+ // bne .Lloadcmp
+ StoreBB->addLiveIn(Addr.getReg());
+ StoreBB->addLiveIn(New.getReg());
+ addPostLoopLiveIns(StoreBB, LiveRegs);
+
+
+ MIB = BuildMI(StoreBB, DL, TII->get(StrexOp), StatusReg);
+ MIB.addOperand(New);
+ MIB.addOperand(Addr);
+ if (StrexOp == ARM::t2STREX)
+ MIB.addImm(0); // a 32-bit Thumb strex (only) allows an offset.
+ AddDefaultPred(MIB);
+
+ unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri;
+ AddDefaultPred(BuildMI(StoreBB, DL, TII->get(CMPri))
+ .addReg(StatusReg, RegState::Kill)
+ .addImm(0));
+ BuildMI(StoreBB, DL, TII->get(Bcc))
+ .addMBB(LoadCmpBB)
+ .addImm(ARMCC::NE)
+ .addReg(ARM::CPSR, RegState::Kill);
+ StoreBB->addSuccessor(LoadCmpBB);
+ StoreBB->addSuccessor(DoneBB);
+
+ DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
+ DoneBB->transferSuccessors(&MBB);
+ addPostLoopLiveIns(DoneBB, LiveRegs);
+
+ MBB.addSuccessor(LoadCmpBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+ return true;
+}
+
+/// ARM's ldrexd/strexd take a consecutive register pair (represented as a
+/// single GPRPair register), Thumb's take two separate registers so we need to
+/// extract the subregs from the pair.
+static void addExclusiveRegPair(MachineInstrBuilder &MIB, MachineOperand &Reg,
+ unsigned Flags, bool IsThumb,
+ const TargetRegisterInfo *TRI) {
+ if (IsThumb) {
+ unsigned RegLo = TRI->getSubReg(Reg.getReg(), ARM::gsub_0);
+ unsigned RegHi = TRI->getSubReg(Reg.getReg(), ARM::gsub_1);
+ MIB.addReg(RegLo, Flags | getKillRegState(Reg.isDead()));
+ MIB.addReg(RegHi, Flags | getKillRegState(Reg.isDead()));
+ } else
+ MIB.addReg(Reg.getReg(), Flags | getKillRegState(Reg.isDead()));
+}
+
+/// Expand a 64-bit CMP_SWAP to an ldrexd/strexd loop.
+bool ARMExpandPseudo::ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ bool IsThumb = STI->isThumb();
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineOperand &Dest = MI.getOperand(0);
+ unsigned StatusReg = MI.getOperand(1).getReg();
+ MachineOperand &Addr = MI.getOperand(2);
+ MachineOperand &Desired = MI.getOperand(3);
+ MachineOperand &New = MI.getOperand(4);
+
+ unsigned DestLo = TRI->getSubReg(Dest.getReg(), ARM::gsub_0);
+ unsigned DestHi = TRI->getSubReg(Dest.getReg(), ARM::gsub_1);
+ unsigned DesiredLo = TRI->getSubReg(Desired.getReg(), ARM::gsub_0);
+ unsigned DesiredHi = TRI->getSubReg(Desired.getReg(), ARM::gsub_1);
+
+ LivePhysRegs LiveRegs(&TII->getRegisterInfo());
+ LiveRegs.addLiveOuts(MBB);
+ for (auto I = std::prev(MBB.end()); I != MBBI; --I)
+ LiveRegs.stepBackward(*I);
+
+ MachineFunction *MF = MBB.getParent();
+ auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ MF->insert(++MBB.getIterator(), LoadCmpBB);
+ MF->insert(++LoadCmpBB->getIterator(), StoreBB);
+ MF->insert(++StoreBB->getIterator(), DoneBB);
+
+ // .Lloadcmp:
+ // ldrexd rDestLo, rDestHi, [rAddr]
+ // cmp rDestLo, rDesiredLo
+ // sbcs rStatus<dead>, rDestHi, rDesiredHi
+ // bne .Ldone
+ LoadCmpBB->addLiveIn(Addr.getReg());
+ LoadCmpBB->addLiveIn(Dest.getReg());
+ LoadCmpBB->addLiveIn(Desired.getReg());
+ addPostLoopLiveIns(LoadCmpBB, LiveRegs);
+
+ unsigned LDREXD = IsThumb ? ARM::t2LDREXD : ARM::LDREXD;
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(LoadCmpBB, DL, TII->get(LDREXD));
+ addExclusiveRegPair(MIB, Dest, RegState::Define, IsThumb, TRI);
+ MIB.addReg(Addr.getReg());
+ AddDefaultPred(MIB);
+
+ unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr;
+ AddDefaultPred(BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
+ .addReg(DestLo, getKillRegState(Dest.isDead()))
+ .addReg(DesiredLo, getKillRegState(Desired.isDead())));
+
+ unsigned SBCrr = IsThumb ? ARM::t2SBCrr : ARM::SBCrr;
+ MIB = BuildMI(LoadCmpBB, DL, TII->get(SBCrr))
+ .addReg(StatusReg, RegState::Define | RegState::Dead)
+ .addReg(DestHi, getKillRegState(Dest.isDead()))
+ .addReg(DesiredHi, getKillRegState(Desired.isDead()));
+ AddDefaultPred(MIB);
+ MIB.addReg(ARM::CPSR, RegState::Kill);
+
+ unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc;
+ BuildMI(LoadCmpBB, DL, TII->get(Bcc))
+ .addMBB(DoneBB)
+ .addImm(ARMCC::NE)
+ .addReg(ARM::CPSR, RegState::Kill);
+ LoadCmpBB->addSuccessor(DoneBB);
+ LoadCmpBB->addSuccessor(StoreBB);
+
+ // .Lstore:
+ // strexd rStatus, rNewLo, rNewHi, [rAddr]
+ // cmp rStatus, #0
+ // bne .Lloadcmp
+ StoreBB->addLiveIn(Addr.getReg());
+ StoreBB->addLiveIn(New.getReg());
+ addPostLoopLiveIns(StoreBB, LiveRegs);
+
+ unsigned STREXD = IsThumb ? ARM::t2STREXD : ARM::STREXD;
+ MIB = BuildMI(StoreBB, DL, TII->get(STREXD), StatusReg);
+ addExclusiveRegPair(MIB, New, 0, IsThumb, TRI);
+ MIB.addOperand(Addr);
+ AddDefaultPred(MIB);
+
+ unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri;
+ AddDefaultPred(BuildMI(StoreBB, DL, TII->get(CMPri))
+ .addReg(StatusReg, RegState::Kill)
+ .addImm(0));
+ BuildMI(StoreBB, DL, TII->get(Bcc))
+ .addMBB(LoadCmpBB)
+ .addImm(ARMCC::NE)
+ .addReg(ARM::CPSR, RegState::Kill);
+ StoreBB->addSuccessor(LoadCmpBB);
+ StoreBB->addSuccessor(DoneBB);
+
+ DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
+ DoneBB->transferSuccessors(&MBB);
+ addPostLoopLiveIns(DoneBB, LiveRegs);
+
+ MBB.addSuccessor(LoadCmpBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+ return true;
+}
+
+
bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI) {
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
switch (Opcode) {
@@ -784,7 +1033,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
.addReg(JumpTarget.getReg(), RegState::Kill);
}
- MachineInstr *NewMI = std::prev(MBBI);
+ auto NewMI = std::prev(MBBI);
for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
NewMI->addOperand(MBBI->getOperand(i));
@@ -1375,6 +1624,30 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
case ARM::VTBL4Pseudo: ExpandVTBL(MBBI, ARM::VTBL4, false); return true;
case ARM::VTBX3Pseudo: ExpandVTBL(MBBI, ARM::VTBX3, true); return true;
case ARM::VTBX4Pseudo: ExpandVTBL(MBBI, ARM::VTBX4, true); return true;
+
+ case ARM::CMP_SWAP_8:
+ if (STI->isThumb())
+ return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREXB, ARM::t2STREXB,
+ ARM::tUXTB, NextMBBI);
+ else
+ return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREXB, ARM::STREXB,
+ ARM::UXTB, NextMBBI);
+ case ARM::CMP_SWAP_16:
+ if (STI->isThumb())
+ return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREXH, ARM::t2STREXH,
+ ARM::tUXTH, NextMBBI);
+ else
+ return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREXH, ARM::STREXH,
+ ARM::UXTH, NextMBBI);
+ case ARM::CMP_SWAP_32:
+ if (STI->isThumb())
+ return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREX, ARM::t2STREX, 0,
+ NextMBBI);
+ else
+ return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREX, ARM::STREX, 0, NextMBBI);
+
+ case ARM::CMP_SWAP_64:
+ return ExpandCMP_SWAP_64(MBB, MBBI, NextMBBI);
}
}
@@ -1384,7 +1657,7 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineBasicBlock::iterator NMBBI = std::next(MBBI);
- Modified |= ExpandMI(MBB, MBBI);
+ Modified |= ExpandMI(MBB, MBBI, NMBBI);
MBBI = NMBBI;
}
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index ff2fcfa349dc..13724da5d4f7 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -22,7 +22,6 @@
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -41,7 +40,6 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
@@ -110,11 +108,6 @@ class ARMFastISel final : public FastISel {
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill);
- unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill,
- unsigned Op2, bool Op2IsKill);
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
@@ -164,6 +157,7 @@ class ARMFastISel final : public FastISel {
// Utility routines.
private:
+ bool isPositionIndependent() const;
bool isTypeLegal(Type *Ty, MVT &VT);
bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
@@ -215,7 +209,7 @@ class ARMFastISel final : public FastISel {
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
void AddLoadStoreOperands(MVT VT, Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags, bool useAM3);
+ MachineMemOperand::Flags Flags, bool useAM3);
};
} // end anonymous namespace
@@ -331,38 +325,6 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
return ResultReg;
}
-unsigned ARMFastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
- const TargetRegisterClass *RC,
- unsigned Op0, bool Op0IsKill,
- unsigned Op1, bool Op1IsKill,
- unsigned Op2, bool Op2IsKill) {
- unsigned ResultReg = createResultReg(RC);
- const MCInstrDesc &II = TII.get(MachineInstOpcode);
-
- // Make sure the input operands are sufficiently constrained to be legal
- // for this instruction.
- Op0 = constrainOperandRegClass(II, Op0, 1);
- Op1 = constrainOperandRegClass(II, Op1, 2);
- Op2 = constrainOperandRegClass(II, Op1, 3);
-
- if (II.getNumDefs() >= 1) {
- AddOptionalDefs(
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addReg(Op1, Op1IsKill * RegState::Kill)
- .addReg(Op2, Op2IsKill * RegState::Kill));
- } else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addReg(Op1, Op1IsKill * RegState::Kill)
- .addReg(Op2, Op2IsKill * RegState::Kill));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(II.ImplicitDefs[0]));
- }
- return ResultReg;
-}
-
unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
@@ -576,12 +538,15 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
return ResultReg;
}
+bool ARMFastISel::isPositionIndependent() const {
+ return TLI.isPositionIndependent();
+}
+
unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
// For now 32-bit only.
if (VT != MVT::i32 || GV->isThreadLocal()) return 0;
- Reloc::Model RelocM = TM.getRelocationModel();
- bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM);
+ bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
: &ARM::GPRRegClass;
unsigned DestReg = createResultReg(RC);
@@ -591,23 +556,20 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
bool IsThreadLocal = GVar && GVar->isThreadLocal();
if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0;
+ bool IsPositionIndependent = isPositionIndependent();
// Use movw+movt when possible, it avoids constant pool entries.
// Non-darwin targets only support static movt relocations in FastISel.
if (Subtarget->useMovt(*FuncInfo.MF) &&
- (Subtarget->isTargetMachO() || RelocM == Reloc::Static)) {
+ (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
unsigned Opc;
unsigned char TF = 0;
if (Subtarget->isTargetMachO())
TF = ARMII::MO_NONLAZY;
- switch (RelocM) {
- case Reloc::PIC_:
+ if (IsPositionIndependent)
Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
- break;
- default:
+ else
Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
- break;
- }
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
} else {
@@ -618,12 +580,11 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
Align = DL.getTypeAllocSize(GV->getType());
}
- if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_)
+ if (Subtarget->isTargetELF() && IsPositionIndependent)
return ARMLowerPICELF(GV, Align, VT);
// Grab index.
- unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 :
- (Subtarget->isThumb() ? 4 : 8);
+ unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
unsigned Id = AFI->createPICLabelUId();
ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
ARMCP::CPValue,
@@ -633,10 +594,10 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
// Load value.
MachineInstrBuilder MIB;
if (isThumb2) {
- unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
+ unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
DestReg).addConstantPoolIndex(Idx);
- if (RelocM == Reloc::PIC_)
+ if (IsPositionIndependent)
MIB.addImm(Id);
AddOptionalDefs(MIB);
} else {
@@ -648,7 +609,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
.addImm(0);
AddOptionalDefs(MIB);
- if (RelocM == Reloc::PIC_) {
+ if (IsPositionIndependent) {
unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
@@ -912,7 +873,8 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags, bool useAM3) {
+ MachineMemOperand::Flags Flags,
+ bool useAM3) {
// addrmode5 output depends on the selection dag addressing dividing the
// offset by 4 that it then later multiplies. Do this here as well.
if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64)
@@ -931,7 +893,7 @@ void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
// ARM halfword load/stores and signed byte loads need an additional
// operand.
if (useAM3) {
- signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
MIB.addReg(0);
MIB.addImm(Imm);
} else {
@@ -945,7 +907,7 @@ void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
// ARM halfword load/stores and signed byte loads need an additional
// operand.
if (useAM3) {
- signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
MIB.addReg(0);
MIB.addImm(Imm);
} else {
@@ -1062,6 +1024,21 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
if (cast<LoadInst>(I)->isAtomic())
return false;
+ const Value *SV = I->getOperand(0);
+ if (TLI.supportSwiftError()) {
+ // Swifterror values can come from either a function parameter with
+ // swifterror attribute or an alloca with swifterror attribute.
+ if (const Argument *Arg = dyn_cast<Argument>(SV)) {
+ if (Arg->hasSwiftErrorAttr())
+ return false;
+ }
+
+ if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
+ if (Alloca->isSwiftError())
+ return false;
+ }
+ }
+
// Verify we have a legal type before going any further.
MVT VT;
if (!isLoadTypeLegal(I->getType(), VT))
@@ -1177,6 +1154,21 @@ bool ARMFastISel::SelectStore(const Instruction *I) {
if (cast<StoreInst>(I)->isAtomic())
return false;
+ const Value *PtrV = I->getOperand(1);
+ if (TLI.supportSwiftError()) {
+ // Swifterror values can come from either a function parameter with
+ // swifterror attribute or an alloca with swifterror attribute.
+ if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
+ if (Arg->hasSwiftErrorAttr())
+ return false;
+ }
+
+ if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
+ if (Alloca->isSwiftError())
+ return false;
+ }
+ }
+
// Verify we have a legal type before going any further.
MVT VT;
if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
@@ -1726,6 +1718,13 @@ bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
if (!isTypeLegal(Ty, VT))
return false;
+ // Many ABIs do not provide a libcall for standalone remainder, so we need to
+ // use divrem (see the RTABI 4.3.1). Since FastISel can't handle non-double
+ // multi-reg returns, we'll have to bail out.
+ if (!TLI.hasStandaloneRem(VT)) {
+ return false;
+ }
+
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i8)
LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
@@ -1847,6 +1846,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
}
// Fallthrough
case CallingConv::C:
+ case CallingConv::CXX_FAST_TLS:
// Use target triple & subtarget features to do actual dispatch.
if (Subtarget->isAAPCS_ABI()) {
if (Subtarget->hasVFP2() &&
@@ -1858,6 +1858,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
}
case CallingConv::ARM_AAPCS_VFP:
+ case CallingConv::Swift:
if (!isVarArg)
return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
// Fall through to soft float variant, variadic functions don't
@@ -2083,6 +2084,10 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
if (!FuncInfo.CanLowerReturn)
return false;
+ if (TLI.supportSwiftError() &&
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ return false;
+
if (TLI.supportSplitCSR(FuncInfo.MF))
return false;
@@ -2295,8 +2300,7 @@ bool ARMFastISel::SelectCall(const Instruction *I,
// TODO: Avoid some calling conventions?
- PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ FunctionType *FTy = CS.getFunctionType();
bool isVarArg = FTy->isVarArg();
// Handle *simple* calls for now.
@@ -2345,6 +2349,8 @@ bool ARMFastISel::SelectCall(const Instruction *I,
// FIXME: Only handle *easy* calls for now.
if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
+ CS.paramHasAttr(AttrInd, Attribute::SwiftSelf) ||
+ CS.paramHasAttr(AttrInd, Attribute::SwiftError) ||
CS.paramHasAttr(AttrInd, Attribute::Nest) ||
CS.paramHasAttr(AttrInd, Attribute::ByVal))
return false;
@@ -2394,22 +2400,15 @@ bool ARMFastISel::SelectCall(const Instruction *I,
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DbgLoc, TII.get(CallOpc));
- unsigned char OpFlags = 0;
-
- // Add MO_PLT for global address or external symbol in the PIC relocation
- // model.
- if (Subtarget->isTargetELF() && TM.getRelocationModel() == Reloc::PIC_)
- OpFlags = ARMII::MO_PLT;
-
// ARM calls don't take a predicate, but tBL / tBLX do.
if(isThumb2)
AddDefaultPred(MIB);
if (UseReg)
MIB.addReg(CalleeReg);
else if (!IntrMemName)
- MIB.addGlobalAddress(GV, 0, OpFlags);
+ MIB.addGlobalAddress(GV, 0, 0);
else
- MIB.addExternalSymbol(IntrMemName, OpFlags);
+ MIB.addExternalSymbol(IntrMemName, 0);
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
@@ -2942,8 +2941,7 @@ bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
unsigned Align, MVT VT) {
- bool UseGOT_PREL =
- !(GV->hasHiddenVisibility() || GV->hasLocalLinkage());
+ bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
LLVMContext *Context = &MF->getFunction()->getContext();
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
@@ -3006,6 +3004,7 @@ bool ARMFastISel::fastLowerArguments() {
case CallingConv::ARM_AAPCS_VFP:
case CallingConv::ARM_AAPCS:
case CallingConv::ARM_APCS:
+ case CallingConv::Swift:
break;
}
@@ -3019,6 +3018,8 @@ bool ARMFastISel::fastLowerArguments() {
if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) ||
F->getAttributes().hasAttribute(Idx, Attribute::ByVal))
return false;
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index c5990bb7d1fb..e8c9f610ea64 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -98,35 +98,32 @@ ARMFrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
}
-static bool isCSRestore(MachineInstr *MI,
- const ARMBaseInstrInfo &TII,
+static bool isCSRestore(MachineInstr &MI, const ARMBaseInstrInfo &TII,
const MCPhysReg *CSRegs) {
// Integer spill area is handled with "pop".
- if (isPopOpcode(MI->getOpcode())) {
+ if (isPopOpcode(MI.getOpcode())) {
// The first two operands are predicates. The last two are
// imp-def and imp-use of SP. Check everything in between.
- for (int i = 5, e = MI->getNumOperands(); i != e; ++i)
- if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
+ for (int i = 5, e = MI.getNumOperands(); i != e; ++i)
+ if (!isCalleeSavedRegister(MI.getOperand(i).getReg(), CSRegs))
return false;
return true;
}
- if ((MI->getOpcode() == ARM::LDR_POST_IMM ||
- MI->getOpcode() == ARM::LDR_POST_REG ||
- MI->getOpcode() == ARM::t2LDR_POST) &&
- isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) &&
- MI->getOperand(1).getReg() == ARM::SP)
+ if ((MI.getOpcode() == ARM::LDR_POST_IMM ||
+ MI.getOpcode() == ARM::LDR_POST_REG ||
+ MI.getOpcode() == ARM::t2LDR_POST) &&
+ isCalleeSavedRegister(MI.getOperand(0).getReg(), CSRegs) &&
+ MI.getOperand(1).getReg() == ARM::SP)
return true;
return false;
}
-static void emitRegPlusImmediate(bool isARM, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- const ARMBaseInstrInfo &TII, unsigned DestReg,
- unsigned SrcReg, int NumBytes,
- unsigned MIFlags = MachineInstr::NoFlags,
- ARMCC::CondCodes Pred = ARMCC::AL,
- unsigned PredReg = 0) {
+static void emitRegPlusImmediate(
+ bool isARM, MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, const ARMBaseInstrInfo &TII, unsigned DestReg,
+ unsigned SrcReg, int NumBytes, unsigned MIFlags = MachineInstr::NoFlags,
+ ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
if (isARM)
emitARMRegPlusImmediate(MBB, MBBI, dl, DestReg, SrcReg, NumBytes,
Pred, PredReg, TII, MIFlags);
@@ -136,7 +133,7 @@ static void emitRegPlusImmediate(bool isARM, MachineBasicBlock &MBB,
}
static void emitSPUpdate(bool isARM, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
+ MachineBasicBlock::iterator &MBBI, const DebugLoc &dl,
const ARMBaseInstrInfo &TII, int NumBytes,
unsigned MIFlags = MachineInstr::NoFlags,
ARMCC::CondCodes Pred = ARMCC::AL,
@@ -145,9 +142,9 @@ static void emitSPUpdate(bool isARM, MachineBasicBlock &MBB,
MIFlags, Pred, PredReg);
}
-static int sizeOfSPAdjustment(const MachineInstr *MI) {
+static int sizeOfSPAdjustment(const MachineInstr &MI) {
int RegSize;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case ARM::VSTMDDB_UPD:
RegSize = 8;
break;
@@ -165,7 +162,7 @@ static int sizeOfSPAdjustment(const MachineInstr *MI) {
int count = 0;
// ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
// pred) so the list starts at 4.
- for (int i = MI->getNumOperands() - 1; i >= 4; --i)
+ for (int i = MI.getNumOperands() - 1; i >= 4; --i)
count += RegSize;
return count;
}
@@ -206,7 +203,8 @@ struct StackAdjustingInsts {
}
void emitDefCFAOffsets(MachineModuleInfo &MMI, MachineBasicBlock &MBB,
- DebugLoc dl, const ARMBaseInstrInfo &TII, bool HasFP) {
+ const DebugLoc &dl, const ARMBaseInstrInfo &TII,
+ bool HasFP) {
unsigned CFAOffset = 0;
for (auto &Info : Insts) {
if (HasFP && !Info.BeforeFPSet)
@@ -235,7 +233,7 @@ static void emitAligningInstructions(MachineFunction &MF, ARMFunctionInfo *AFI,
const TargetInstrInfo &TII,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, const unsigned Reg,
+ const DebugLoc &DL, const unsigned Reg,
const unsigned Alignment,
const bool MustBeSingleInstruction) {
const ARMSubtarget &AST =
@@ -355,7 +353,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R10:
case ARM::R11:
case ARM::R12:
- if (STI.isTargetDarwin()) {
+ if (STI.splitFramePushPop()) {
GPRCS2Size += 4;
break;
}
@@ -416,7 +414,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// .cfi_offset operations will reflect that.
if (DPRGapSize) {
assert(DPRGapSize == 4 && "unexpected alignment requirements for DPRs");
- if (tryFoldSPUpdateIntoPushPop(STI, MF, LastPush, DPRGapSize))
+ if (tryFoldSPUpdateIntoPushPop(STI, MF, &*LastPush, DPRGapSize))
DefCFAOffsetCandidates.addExtraBytes(LastPush, DPRGapSize);
else {
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRGapSize,
@@ -430,7 +428,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// Since vpush register list cannot have gaps, there may be multiple vpush
// instructions in the prologue.
while (MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
- DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(MBBI));
+ DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(*MBBI));
LastPush = MBBI++;
}
}
@@ -485,7 +483,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr),
ARM::SP)
- .addReg(ARM::SP, RegState::Define)
+ .addReg(ARM::SP, RegState::Kill)
.addReg(ARM::R4, RegState::Kill)
.setMIFlags(MachineInstr::FrameSetup)));
NumBytes = 0;
@@ -494,7 +492,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
if (NumBytes) {
// Adjust SP after all the callee-save spills.
if (AFI->getNumAlignedDPRCS2Regs() == 0 &&
- tryFoldSPUpdateIntoPushPop(STI, MF, LastPush, NumBytes))
+ tryFoldSPUpdateIntoPushPop(STI, MF, &*LastPush, NumBytes))
DefCFAOffsetCandidates.addExtraBytes(LastPush, NumBytes);
else {
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
@@ -522,7 +520,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// that push.
if (HasFP) {
MachineBasicBlock::iterator AfterPush = std::next(GPRCS1Push);
- unsigned PushSize = sizeOfSPAdjustment(GPRCS1Push);
+ unsigned PushSize = sizeOfSPAdjustment(*GPRCS1Push);
emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, AfterPush,
dl, TII, FramePtr, ARM::SP,
PushSize + FramePtrOffsetInPush,
@@ -559,7 +557,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R10:
case ARM::R11:
case ARM::R12:
- if (STI.isTargetDarwin())
+ if (STI.splitFramePushPop())
break;
// fallthrough
case ARM::R0:
@@ -592,7 +590,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R10:
case ARM::R11:
case ARM::R12:
- if (STI.isTargetDarwin()) {
+ if (STI.splitFramePushPop()) {
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
unsigned Offset = MFI->getObjectOffset(FI);
unsigned CFIIndex = MMI.addFrameInst(
@@ -727,8 +725,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
if (MBBI != MBB.begin()) {
do {
--MBBI;
- } while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
- if (!isCSRestore(MBBI, TII, CSRegs))
+ } while (MBBI != MBB.begin() && isCSRestore(*MBBI, TII, CSRegs));
+ if (!isCSRestore(*MBBI, TII, CSRegs))
++MBBI;
}
@@ -774,8 +772,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
.addReg(FramePtr));
}
} else if (NumBytes &&
- !tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
+ !tryFoldSPUpdateIntoPushPop(STI, MF, &*MBBI, NumBytes))
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
// Increment past our save areas.
if (AFI->getDPRCalleeSavedAreaSize()) {
@@ -904,33 +902,27 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
unsigned LastReg = 0;
for (; i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- if (!(Func)(Reg, STI.isTargetDarwin())) continue;
+ if (!(Func)(Reg, STI.splitFramePushPop())) continue;
// D-registers in the aligned area DPRCS2 are NOT spilled here.
if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs)
continue;
- // Add the callee-saved register as live-in unless it's LR and
- // @llvm.returnaddress is called. If LR is returned for
- // @llvm.returnaddress then it's already added to the function and
- // entry block live-in sets.
- bool isKill = true;
- if (Reg == ARM::LR) {
- if (MF.getFrameInfo()->isReturnAddressTaken() &&
- MF.getRegInfo().isLiveIn(Reg))
- isKill = false;
- }
-
- if (isKill)
+ bool isLiveIn = MF.getRegInfo().isLiveIn(Reg);
+ if (!isLiveIn)
MBB.addLiveIn(Reg);
-
// If NoGap is true, push consecutive registers and then leave the rest
// for other instructions. e.g.
// vpush {d8, d10, d11} -> vpush {d8}, vpush {d10, d11}
if (NoGap && LastReg && LastReg != Reg-1)
break;
LastReg = Reg;
- Regs.push_back(std::make_pair(Reg, isKill));
+ // Do not set a kill flag on values that are also marked as live-in. This
+ // happens with the @llvm-returnaddress intrinsic and with arguments
+ // passed in callee saved registers.
+ // Omitting the kill flags is conservatively correct even if the live-in
+ // is not used after all.
+ Regs.push_back(std::make_pair(Reg, /*isKill=*/!isLiveIn));
}
if (Regs.empty())
@@ -991,7 +983,7 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
bool DeleteRet = false;
for (; i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- if (!(Func)(Reg, STI.isTargetDarwin())) continue;
+ if (!(Func)(Reg, STI.splitFramePushPop())) continue;
// The aligned reloads from area DPRCS2 are not inserted here.
if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs)
@@ -1027,7 +1019,7 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
for (unsigned i = 0, e = Regs.size(); i < e; ++i)
MIB.addReg(Regs[i], getDefRegState(true));
if (DeleteRet && MI != MBB.end()) {
- MIB.copyImplicitOps(&*MI);
+ MIB.copyImplicitOps(*MI);
MI->eraseFromParent();
}
MI = MIB;
@@ -1367,7 +1359,7 @@ static unsigned GetFunctionSizeInBytes(const MachineFunction &MF,
unsigned FnSize = 0;
for (auto &MBB : MF) {
for (auto &MI : MBB)
- FnSize += TII.GetInstSizeInBytes(&MI);
+ FnSize += TII.GetInstSizeInBytes(MI);
}
return FnSize;
}
@@ -1485,6 +1477,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
bool CS1Spilled = false;
bool LRSpilled = false;
unsigned NumGPRSpills = 0;
+ unsigned NumFPRSpills = 0;
SmallVector<unsigned, 4> UnspilledCS1GPRs;
SmallVector<unsigned, 4> UnspilledCS2GPRs;
const ARMBaseRegisterInfo *RegInfo = static_cast<const ARMBaseRegisterInfo *>(
@@ -1539,13 +1532,22 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
CanEliminateFrame = false;
}
- if (!ARM::GPRRegClass.contains(Reg))
+ if (!ARM::GPRRegClass.contains(Reg)) {
+ if (Spilled) {
+ if (ARM::SPRRegClass.contains(Reg))
+ NumFPRSpills++;
+ else if (ARM::DPRRegClass.contains(Reg))
+ NumFPRSpills += 2;
+ else if (ARM::QPRRegClass.contains(Reg))
+ NumFPRSpills += 4;
+ }
continue;
+ }
if (Spilled) {
NumGPRSpills++;
- if (!STI.isTargetDarwin()) {
+ if (!STI.splitFramePushPop()) {
if (Reg == ARM::LR)
LRSpilled = true;
CS1Spilled = true;
@@ -1567,7 +1569,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
break;
}
} else {
- if (!STI.isTargetDarwin()) {
+ if (!STI.splitFramePushPop()) {
UnspilledCS1GPRs.push_back(Reg);
continue;
}
@@ -1613,12 +1615,21 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
// FIXME: We could add logic to be more precise about negative offsets
// and which instructions will need a scratch register for them. Is it
// worth the effort and added fragility?
- bool BigStack = (RS && (MFI->estimateStackSize(MF) +
- ((hasFP(MF) && AFI->hasStackFrame()) ? 4 : 0) >=
- estimateRSStackSizeLimit(MF, this))) ||
+ unsigned EstimatedStackSize =
+ MFI->estimateStackSize(MF) + 4 * (NumGPRSpills + NumFPRSpills);
+ if (hasFP(MF)) {
+ if (AFI->hasStackFrame())
+ EstimatedStackSize += 4;
+ } else {
+ // If FP is not used, SP will be used to access arguments, so count the
+ // size of arguments into the estimation.
+ EstimatedStackSize += MF.getInfo<ARMFunctionInfo>()->getArgumentStackSize();
+ }
+ EstimatedStackSize += 16; // For possible paddings.
+
+ bool BigStack = EstimatedStackSize >= estimateRSStackSizeLimit(MF, this) ||
MFI->hasVarSizedObjects() ||
(MFI->adjustsStack() && !canSimplifyCallFramePseudos(MF));
-
bool ExtraCSSpill = false;
if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) {
AFI->setHasStackFrame(true);
@@ -1712,6 +1723,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
} else if (!AFI->isThumb1OnlyFunction()) {
// note: Thumb1 functions spill to R12, not the stack. Reserve a slot
// closest to SP or frame pointer.
+ assert(RS && "Register scavenging not provided");
const TargetRegisterClass *RC = &ARM::GPRRegClass;
RS->addScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
RC->getAlignment(),
@@ -1726,19 +1738,18 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
}
}
-
-void ARMFrameLowering::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+MachineBasicBlock::iterator ARMFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
const ARMBaseInstrInfo &TII =
*static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
if (!hasReservedCallFrame(MF)) {
// If we have alloca, convert as follows:
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
// ADJCALLSTACKUP -> add, sp, sp, amount
- MachineInstr *Old = I;
- DebugLoc dl = Old->getDebugLoc();
- unsigned Amount = Old->getOperand(0).getImm();
+ MachineInstr &Old = *I;
+ DebugLoc dl = Old.getDebugLoc();
+ unsigned Amount = Old.getOperand(0).getImm();
if (Amount != 0) {
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
@@ -1751,25 +1762,26 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
bool isARM = !AFI->isThumbFunction();
// Replace the pseudo instruction with a new instruction...
- unsigned Opc = Old->getOpcode();
- int PIdx = Old->findFirstPredOperandIdx();
- ARMCC::CondCodes Pred = (PIdx == -1)
- ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
+ unsigned Opc = Old.getOpcode();
+ int PIdx = Old.findFirstPredOperandIdx();
+ ARMCC::CondCodes Pred =
+ (PIdx == -1) ? ARMCC::AL
+ : (ARMCC::CondCodes)Old.getOperand(PIdx).getImm();
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
// Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
- unsigned PredReg = Old->getOperand(2).getReg();
+ unsigned PredReg = Old.getOperand(2).getReg();
emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, MachineInstr::NoFlags,
Pred, PredReg);
} else {
// Note: PredReg is operand 3 for ADJCALLSTACKUP.
- unsigned PredReg = Old->getOperand(3).getReg();
+ unsigned PredReg = Old.getOperand(3).getReg();
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
emitSPUpdate(isARM, MBB, I, dl, TII, Amount, MachineInstr::NoFlags,
Pred, PredReg);
}
}
}
- MBB.erase(I);
+ return MBB.erase(I);
}
/// Get the minimum constant for ARM that is greater than or equal to the
@@ -2162,7 +2174,7 @@ void ARMFrameLowering::adjustForSegmentedStacks(
PrevStackMBB->addSuccessor(McrMBB);
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
diff --git a/lib/Target/ARM/ARMFrameLowering.h b/lib/Target/ARM/ARMFrameLowering.h
index 66f4dfb6ef52..21cd78da395c 100644
--- a/lib/Target/ARM/ARMFrameLowering.h
+++ b/lib/Target/ARM/ARMFrameLowering.h
@@ -74,7 +74,7 @@ public:
bool(*Func)(unsigned, bool),
unsigned NumAlignedDPRCS2Regs) const;
- void
+ MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
diff --git a/lib/Target/ARM/ARMHazardRecognizer.cpp b/lib/Target/ARM/ARMHazardRecognizer.cpp
index 0157c0a35286..0d904ecb6296 100644
--- a/lib/Target/ARM/ARMHazardRecognizer.cpp
+++ b/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -50,8 +50,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
// Skip over one non-VFP / NEON instruction.
if (!LastMI->isBarrier() &&
- // On A9, AGU and NEON/FPU are muxed.
- !(TII.getSubtarget().isLikeA9() && LastMI->mayLoadOrStore()) &&
+ !(TII.getSubtarget().hasMuxedUnits() && LastMI->mayLoadOrStore()) &&
(LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
MachineBasicBlock::iterator I = LastMI;
if (I != LastMI->getParent()->begin()) {
diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 6e7edbf9fb15..20db3d39bcae 100644
--- a/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -29,7 +29,6 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetLowering.h"
@@ -44,11 +43,6 @@ DisableShifterOp("disable-shifter-op", cl::Hidden,
cl::desc("Disable isel of shifter-op"),
cl::init(false));
-static cl::opt<bool>
-CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
- cl::desc("Check fp vmla / vmls hazard at isel time"),
- cl::init(true));
-
//===--------------------------------------------------------------------===//
/// ARMDAGToDAGISel - ARM specific code to select ARM machine
/// instructions for SelectionDAG operations.
@@ -84,12 +78,11 @@ public:
/// getI32Imm - Return a target constant of type i32 with the specified
/// value.
- inline SDValue getI32Imm(unsigned Imm, SDLoc dl) {
+ inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
- SDNode *Select(SDNode *N) override;
-
+ void Select(SDNode *N) override;
bool hasNoVMLxHazardUse(SDNode *N) const;
bool isShifterOpProfitable(const SDValue &Shift,
@@ -200,57 +193,61 @@ public:
#include "ARMGenDAGISel.inc"
private:
- /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
- /// ARM.
- SDNode *SelectARMIndexedLoad(SDNode *N);
- SDNode *SelectT2IndexedLoad(SDNode *N);
+ /// Indexed (pre/post inc/dec) load matching code for ARM.
+ bool tryARMIndexedLoad(SDNode *N);
+ bool tryT1IndexedLoad(SDNode *N);
+ bool tryT2IndexedLoad(SDNode *N);
/// SelectVLD - Select NEON load intrinsics. NumVecs should be
/// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
/// loads of D registers and even subregs and odd subregs of Q registers.
/// For NumVecs <= 2, QOpcodes1 is not used.
- SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
- const uint16_t *DOpcodes,
- const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
+ void SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
+ const uint16_t *QOpcodes1);
/// SelectVST - Select NEON store intrinsics. NumVecs should
/// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
/// stores of D registers and even subregs and odd subregs of Q registers.
/// For NumVecs <= 2, QOpcodes1 is not used.
- SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
- const uint16_t *DOpcodes,
- const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
+ void SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *DOpcodes, const uint16_t *QOpcodes0,
+ const uint16_t *QOpcodes1);
/// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
/// be 2, 3 or 4. The opcode arrays specify the instructions used for
/// load/store of D registers and Q registers.
- SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
- bool isUpdating, unsigned NumVecs,
- const uint16_t *DOpcodes, const uint16_t *QOpcodes);
+ void SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
+ unsigned NumVecs, const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes);
/// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
/// should be 2, 3 or 4. The opcode array specifies the instructions used
/// for loading D registers. (Q registers are not supported.)
- SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
- const uint16_t *Opcodes);
+ void SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *Opcodes);
/// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
/// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
/// generated to force the table registers to be consecutive.
- SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
+ void SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
- /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
- SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
+ /// Try to select SBFX/UBFX instructions for ARM.
+ bool tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
// Select special operations if node forms integer ABS pattern
- SDNode *SelectABSOp(SDNode *N);
+ bool tryABSOp(SDNode *N);
+
+ bool tryReadRegister(SDNode *N);
+ bool tryWriteRegister(SDNode *N);
- SDNode *SelectReadRegister(SDNode *N);
- SDNode *SelectWriteRegister(SDNode *N);
+ bool tryInlineAsm(SDNode *N);
- SDNode *SelectInlineAsm(SDNode *N);
+ void SelectConcatVector(SDNode *N);
- SDNode *SelectConcatVector(SDNode *N);
+ bool trySMLAWSMULW(SDNode *N);
+
+ void SelectCMP_SWAP(SDNode *N);
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
@@ -269,7 +266,7 @@ private:
SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
// Get the alignment operand for a NEON VLD or VST instruction.
- SDValue GetVLDSTAlign(SDValue Align, SDLoc dl, unsigned NumVecs,
+ SDValue GetVLDSTAlign(SDValue Align, const SDLoc &dl, unsigned NumVecs,
bool is64BitVector);
/// Returns the number of instructions required to materialize the given
@@ -426,11 +423,7 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
if (OptLevel == CodeGenOpt::None)
return true;
- if (!CheckVMLxHazard)
- return true;
-
- if (!Subtarget->isCortexA7() && !Subtarget->isCortexA8() &&
- !Subtarget->isCortexA9() && !Subtarget->isSwift())
+ if (!Subtarget->hasVMLxHazards())
return true;
if (!N->hasOneUse())
@@ -484,6 +477,7 @@ unsigned ARMDAGToDAGISel::ConstantMaterializationCost(unsigned Val) const {
if (Subtarget->isThumb()) {
if (Val <= 255) return 1; // MOV
if (Subtarget->hasV6T2Ops() && Val <= 0xffff) return 1; // MOVW
+ if (Val <= 510) return 2; // MOV + ADDi8
if (~Val <= 255) return 2; // MOV + MVN
if (ARM_AM::isThumbImmShiftedVal(Val)) return 2; // MOV + LSL
} else {
@@ -548,11 +542,9 @@ bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
unsigned PowerOfTwo = 0;
SDValue NewMulConst;
if (canExtractShiftFromMul(N, 31, PowerOfTwo, NewMulConst)) {
- BaseReg = SDValue(Select(CurDAG->getNode(ISD::MUL, SDLoc(N), MVT::i32,
- N.getOperand(0), NewMulConst)
- .getNode()),
- 0);
+ HandleSDNode Handle(N);
replaceDAGValue(N.getOperand(1), NewMulConst);
+ BaseReg = Handle.getValue();
Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ARM_AM::lsl,
PowerOfTwo),
SDLoc(N), MVT::i32);
@@ -623,6 +615,7 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
if (N.getOpcode() == ARMISD::Wrapper &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
+ N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
Base = N.getOperand(0);
} else
@@ -803,6 +796,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
FI, TLI->getPointerTy(CurDAG->getDataLayout()));
} else if (N.getOpcode() == ARMISD::Wrapper &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
+ N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
Base = N.getOperand(0);
}
@@ -1070,6 +1064,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
FI, TLI->getPointerTy(CurDAG->getDataLayout()));
} else if (N.getOpcode() == ARMISD::Wrapper &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
+ N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
Base = N.getOperand(0);
}
@@ -1190,6 +1185,7 @@ ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
return false; // We want to select register offset instead
} else if (N.getOpcode() == ARMISD::Wrapper &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
+ N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
Base = N.getOperand(0);
} else {
@@ -1297,6 +1293,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
if (N.getOpcode() == ARMISD::Wrapper &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress &&
+ N.getOperand(0).getOpcode() != ISD::TargetExternalSymbol &&
N.getOperand(0).getOpcode() != ISD::TargetGlobalTLSAddress) {
Base = N.getOperand(0);
if (Base.getOpcode() == ISD::TargetConstantPool)
@@ -1468,15 +1465,15 @@ bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
//===--------------------------------------------------------------------===//
/// getAL - Returns a ARMCC::AL immediate node.
-static inline SDValue getAL(SelectionDAG *CurDAG, SDLoc dl) {
+static inline SDValue getAL(SelectionDAG *CurDAG, const SDLoc &dl) {
return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, dl, MVT::i32);
}
-SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
+bool ARMDAGToDAGISel::tryARMIndexedLoad(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::MemIndexedMode AM = LD->getAddressingMode();
if (AM == ISD::UNINDEXED)
- return nullptr;
+ return false;
EVT LoadedVT = LD->getMemoryVT();
SDValue Offset, AMOpc;
@@ -1530,26 +1527,53 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
SDValue Base = LD->getBasePtr();
SDValue Ops[]= { Base, AMOpc, getAL(CurDAG, SDLoc(N)),
CurDAG->getRegister(0, MVT::i32), Chain };
- return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
- MVT::i32, MVT::Other, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
+ MVT::i32, MVT::Other, Ops));
+ return true;
} else {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG, SDLoc(N)),
CurDAG->getRegister(0, MVT::i32), Chain };
- return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
- MVT::i32, MVT::Other, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
+ MVT::i32, MVT::Other, Ops));
+ return true;
}
}
- return nullptr;
+ return false;
+}
+
+bool ARMDAGToDAGISel::tryT1IndexedLoad(SDNode *N) {
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ EVT LoadedVT = LD->getMemoryVT();
+ ISD::MemIndexedMode AM = LD->getAddressingMode();
+ if (AM == ISD::UNINDEXED || LD->getExtensionType() != ISD::NON_EXTLOAD ||
+ AM != ISD::POST_INC || LoadedVT.getSimpleVT().SimpleTy != MVT::i32)
+ return false;
+
+ auto *COffs = dyn_cast<ConstantSDNode>(LD->getOffset());
+ if (!COffs || COffs->getZExtValue() != 4)
+ return false;
+
+ // A T1 post-indexed load is just a single register LDM: LDM r0!, {r1}.
+ // The encoding of LDM is not how the rest of ISel expects a post-inc load to
+ // look however, so we use a pseudo here and switch it for a tLDMIA_UPD after
+ // ISel.
+ SDValue Chain = LD->getChain();
+ SDValue Base = LD->getBasePtr();
+ SDValue Ops[]= { Base, getAL(CurDAG, SDLoc(N)),
+ CurDAG->getRegister(0, MVT::i32), Chain };
+ ReplaceNode(N, CurDAG->getMachineNode(ARM::tLDR_postidx, SDLoc(N), MVT::i32, MVT::i32,
+ MVT::Other, Ops));
+ return true;
}
-SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
+bool ARMDAGToDAGISel::tryT2IndexedLoad(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::MemIndexedMode AM = LD->getAddressingMode();
if (AM == ISD::UNINDEXED)
- return nullptr;
+ return false;
EVT LoadedVT = LD->getMemoryVT();
bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
@@ -1576,7 +1600,7 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
break;
default:
- return nullptr;
+ return false;
}
Match = true;
}
@@ -1586,11 +1610,12 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
SDValue Base = LD->getBasePtr();
SDValue Ops[]= { Base, Offset, getAL(CurDAG, SDLoc(N)),
CurDAG->getRegister(0, MVT::i32), Chain };
- return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
- MVT::Other, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
+ MVT::Other, Ops));
+ return true;
}
- return nullptr;
+ return false;
}
/// \brief Form a GPRPair pseudo register from a pair of GPR regs.
@@ -1685,7 +1710,7 @@ SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
/// of a NEON VLD or VST instruction. The supported values depend on the
/// number of registers being loaded.
-SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, SDLoc dl,
+SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, const SDLoc &dl,
unsigned NumVecs, bool is64BitVector) {
unsigned NumRegs = NumVecs;
if (!is64BitVector && NumVecs < 3)
@@ -1806,17 +1831,17 @@ static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
return Opc; // If not one we handle, return it unchanged.
}
-SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
- const uint16_t *DOpcodes,
- const uint16_t *QOpcodes0,
- const uint16_t *QOpcodes1) {
+void ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes0,
+ const uint16_t *QOpcodes1) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
SDLoc dl(N);
SDValue MemAddr, Align;
unsigned AddrOpIdx = isUpdating ? 1 : 2;
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
- return nullptr;
+ return;
SDValue Chain = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -1922,13 +1947,16 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
- if (NumVecs == 1)
- return VLd;
+ if (NumVecs == 1) {
+ ReplaceNode(N, VLd);
+ return;
+ }
// Extract out the subregisters.
SDValue SuperReg = SDValue(VLd, 0);
- assert(ARM::dsub_7 == ARM::dsub_0+7 &&
- ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
+ static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
+ ARM::qsub_3 == ARM::qsub_0 + 3,
+ "Unexpected subreg numbering");
unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
ReplaceUses(SDValue(N, Vec),
@@ -1936,13 +1964,13 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
if (isUpdating)
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
}
-SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
- const uint16_t *DOpcodes,
- const uint16_t *QOpcodes0,
- const uint16_t *QOpcodes1) {
+void ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes0,
+ const uint16_t *QOpcodes1) {
assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
SDLoc dl(N);
@@ -1950,7 +1978,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
unsigned AddrOpIdx = isUpdating ? 1 : 2;
unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
- return nullptr;
+ return;
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
@@ -2042,7 +2070,8 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
// Transfer memoperands.
cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
- return VSt;
+ ReplaceNode(N, VSt);
+ return;
}
// Otherwise, quad registers are stored with two separate instructions,
@@ -2083,13 +2112,13 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
Ops);
cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
- return VStB;
+ ReplaceNode(N, VStB);
}
-SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
- bool isUpdating, unsigned NumVecs,
- const uint16_t *DOpcodes,
- const uint16_t *QOpcodes) {
+void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
+ unsigned NumVecs,
+ const uint16_t *DOpcodes,
+ const uint16_t *QOpcodes) {
assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
SDLoc dl(N);
@@ -2097,7 +2126,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
unsigned AddrOpIdx = isUpdating ? 1 : 2;
unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
- return nullptr;
+ return;
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
@@ -2188,13 +2217,16 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
QOpcodes[OpcodeIndex]);
SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
- if (!IsLoad)
- return VLdLn;
+ if (!IsLoad) {
+ ReplaceNode(N, VLdLn);
+ return;
+ }
// Extract the subregisters.
SuperReg = SDValue(VLdLn, 0);
- assert(ARM::dsub_7 == ARM::dsub_0+7 &&
- ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
+ static_assert(ARM::dsub_7 == ARM::dsub_0 + 7 &&
+ ARM::qsub_3 == ARM::qsub_0 + 3,
+ "Unexpected subreg numbering");
unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
ReplaceUses(SDValue(N, Vec),
@@ -2202,18 +2234,17 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
if (isUpdating)
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
}
-SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
- unsigned NumVecs,
- const uint16_t *Opcodes) {
+void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
+ const uint16_t *Opcodes) {
assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
SDLoc dl(N);
SDValue MemAddr, Align;
if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
- return nullptr;
+ return;
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
@@ -2277,7 +2308,7 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
SuperReg = SDValue(VLdDup, 0);
// Extract the subregisters.
- assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
+ static_assert(ARM::dsub_7 == ARM::dsub_0 + 7, "Unexpected subreg numbering");
unsigned SubIdx = ARM::dsub_0;
for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
ReplaceUses(SDValue(N, Vec),
@@ -2285,11 +2316,11 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
if (isUpdating)
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
}
-SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
- unsigned Opc) {
+void ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
+ unsigned Opc) {
assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
SDLoc dl(N);
EVT VT = N->getValueType(0);
@@ -2318,13 +2349,12 @@ SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
Ops.push_back(getAL(CurDAG, dl)); // predicate
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
- return CurDAG->getMachineNode(Opc, dl, VT, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
}
-SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
- bool isSigned) {
+bool ARMDAGToDAGISel::tryV6T2BitfieldExtractOp(SDNode *N, bool isSigned) {
if (!Subtarget->hasV6T2Ops())
- return nullptr;
+ return false;
unsigned Opc = isSigned
? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
@@ -2338,7 +2368,7 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
// The immediate is a mask of the low bits iff imm & (imm+1) == 0
if (And_imm & (And_imm + 1))
- return nullptr;
+ return false;
unsigned Srl_imm = 0;
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
@@ -2358,7 +2388,8 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
SDValue Ops[] = { N->getOperand(0).getOperand(0),
CurDAG->getTargetConstant(LSB, dl, MVT::i32),
getAL(CurDAG, dl), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ return true;
}
// ARM models shift instructions as MOVsi with shifter operand.
@@ -2368,17 +2399,19 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
MVT::i32);
SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
getAL(CurDAG, dl), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
+ return true;
}
SDValue Ops[] = { N->getOperand(0).getOperand(0),
CurDAG->getTargetConstant(LSB, dl, MVT::i32),
CurDAG->getTargetConstant(Width, dl, MVT::i32),
getAL(CurDAG, dl), Reg0 };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ return true;
}
}
- return nullptr;
+ return false;
}
// Otherwise, we're looking for a shift of a shift
@@ -2392,13 +2425,35 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
unsigned Width = 32 - Srl_imm - 1;
int LSB = Srl_imm - Shl_imm;
if (LSB < 0)
- return nullptr;
+ return false;
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0).getOperand(0),
CurDAG->getTargetConstant(LSB, dl, MVT::i32),
CurDAG->getTargetConstant(Width, dl, MVT::i32),
getAL(CurDAG, dl), Reg0 };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ return true;
+ }
+ }
+
+ // Or we are looking for a shift of an and, with a mask operand
+ if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_imm) &&
+ isShiftedMask_32(And_imm)) {
+ unsigned Srl_imm = 0;
+ unsigned LSB = countTrailingZeros(And_imm);
+ // Shift must be the same as the ands lsb
+ if (isInt32Immediate(N->getOperand(1), Srl_imm) && Srl_imm == LSB) {
+ assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
+ unsigned MSB = 31 - countLeadingZeros(And_imm);
+ // Note: The width operand is encoded as width-1.
+ unsigned Width = MSB - LSB;
+ SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
+ SDValue Ops[] = { N->getOperand(0).getOperand(0),
+ CurDAG->getTargetConstant(Srl_imm, dl, MVT::i32),
+ CurDAG->getTargetConstant(Width, dl, MVT::i32),
+ getAL(CurDAG, dl), Reg0 };
+ CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ return true;
}
}
@@ -2407,20 +2462,21 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
unsigned LSB = 0;
if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, LSB) &&
!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRA, LSB))
- return nullptr;
+ return false;
if (LSB + Width > 32)
- return nullptr;
+ return false;
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0).getOperand(0),
CurDAG->getTargetConstant(LSB, dl, MVT::i32),
CurDAG->getTargetConstant(Width - 1, dl, MVT::i32),
getAL(CurDAG, dl), Reg0 };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ return true;
}
- return nullptr;
+ return false;
}
/// Target-specific DAG combining for ISD::XOR.
@@ -2433,16 +2489,16 @@ SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
/// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
/// ARM instruction selection detects the latter and matches it to
/// ARM::ABS or ARM::t2ABS machine node.
-SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
+bool ARMDAGToDAGISel::tryABSOp(SDNode *N){
SDValue XORSrc0 = N->getOperand(0);
SDValue XORSrc1 = N->getOperand(1);
EVT VT = N->getValueType(0);
if (Subtarget->isThumb1Only())
- return nullptr;
+ return false;
if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
- return nullptr;
+ return false;
SDValue ADDSrc0 = XORSrc0.getOperand(0);
SDValue ADDSrc1 = XORSrc0.getOperand(1);
@@ -2456,57 +2512,214 @@ SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
XType.isInteger() && SRAConstant != nullptr &&
Size == SRAConstant->getZExtValue()) {
unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
- return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
+ CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
+ return true;
+ }
+
+ return false;
+}
+
+static bool SearchSignedMulShort(SDValue SignExt, unsigned *Opc, SDValue &Src1,
+ bool Accumulate) {
+ // For SM*WB, we need to some form of sext.
+ // For SM*WT, we need to search for (sra X, 16)
+ // Src1 then gets set to X.
+ if ((SignExt.getOpcode() == ISD::SIGN_EXTEND ||
+ SignExt.getOpcode() == ISD::SIGN_EXTEND_INREG ||
+ SignExt.getOpcode() == ISD::AssertSext) &&
+ SignExt.getValueType() == MVT::i32) {
+
+ *Opc = Accumulate ? ARM::SMLAWB : ARM::SMULWB;
+ Src1 = SignExt.getOperand(0);
+ return true;
}
- return nullptr;
+ if (SignExt.getOpcode() != ISD::SRA)
+ return false;
+
+ ConstantSDNode *SRASrc1 = dyn_cast<ConstantSDNode>(SignExt.getOperand(1));
+ if (!SRASrc1 || SRASrc1->getZExtValue() != 16)
+ return false;
+
+ SDValue Op0 = SignExt.getOperand(0);
+
+ // The sign extend operand for SM*WB could be generated by a shl and ashr.
+ if (Op0.getOpcode() == ISD::SHL) {
+ SDValue SHL = Op0;
+ ConstantSDNode *SHLSrc1 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
+ if (!SHLSrc1 || SHLSrc1->getZExtValue() != 16)
+ return false;
+
+ *Opc = Accumulate ? ARM::SMLAWB : ARM::SMULWB;
+ Src1 = Op0.getOperand(0);
+ return true;
+ }
+ *Opc = Accumulate ? ARM::SMLAWT : ARM::SMULWT;
+ Src1 = SignExt.getOperand(0);
+ return true;
}
-SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
+static bool SearchSignedMulLong(SDValue OR, unsigned *Opc, SDValue &Src0,
+ SDValue &Src1, bool Accumulate) {
+ // First we look for:
+ // (add (or (srl ?, 16), (shl ?, 16)))
+ if (OR.getOpcode() != ISD::OR)
+ return false;
+
+ SDValue SRL = OR.getOperand(0);
+ SDValue SHL = OR.getOperand(1);
+
+ if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
+ SRL = OR.getOperand(1);
+ SHL = OR.getOperand(0);
+ if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL)
+ return false;
+ }
+
+ ConstantSDNode *SRLSrc1 = dyn_cast<ConstantSDNode>(SRL.getOperand(1));
+ ConstantSDNode *SHLSrc1 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
+ if (!SRLSrc1 || !SHLSrc1 || SRLSrc1->getZExtValue() != 16 ||
+ SHLSrc1->getZExtValue() != 16)
+ return false;
+
+ // The first operands to the shifts need to be the two results from the
+ // same smul_lohi node.
+ if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
+ SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
+ return false;
+
+ SDNode *SMULLOHI = SRL.getOperand(0).getNode();
+ if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
+ SHL.getOperand(0) != SDValue(SMULLOHI, 1))
+ return false;
+
+ // Now we have:
+ // (add (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
+ // For SMLAW[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
+ // For SMLAWB the 16-bit value will signed extended somehow.
+ // For SMLAWT only the SRA is required.
+
+ // Check both sides of SMUL_LOHI
+ if (SearchSignedMulShort(SMULLOHI->getOperand(0), Opc, Src1, Accumulate)) {
+ Src0 = SMULLOHI->getOperand(1);
+ } else if (SearchSignedMulShort(SMULLOHI->getOperand(1), Opc, Src1,
+ Accumulate)) {
+ Src0 = SMULLOHI->getOperand(0);
+ } else {
+ return false;
+ }
+ return true;
+}
+
+bool ARMDAGToDAGISel::trySMLAWSMULW(SDNode *N) {
+ SDLoc dl(N);
+ SDValue Src0 = N->getOperand(0);
+ SDValue Src1 = N->getOperand(1);
+ SDValue A, B;
+ unsigned Opc = 0;
+
+ if (N->getOpcode() == ISD::ADD) {
+ if (Src0.getOpcode() != ISD::OR && Src1.getOpcode() != ISD::OR)
+ return false;
+
+ SDValue Acc;
+ if (SearchSignedMulLong(Src0, &Opc, A, B, true)) {
+ Acc = Src1;
+ } else if (SearchSignedMulLong(Src1, &Opc, A, B, true)) {
+ Acc = Src0;
+ } else {
+ return false;
+ }
+ if (Opc == 0)
+ return false;
+
+ SDValue Ops[] = { A, B, Acc, getAL(CurDAG, dl),
+ CurDAG->getRegister(0, MVT::i32) };
+ CurDAG->SelectNodeTo(N, Opc, MVT::i32, MVT::Other, Ops);
+ return true;
+ } else if (N->getOpcode() == ISD::OR &&
+ SearchSignedMulLong(SDValue(N, 0), &Opc, A, B, false)) {
+ if (Opc == 0)
+ return false;
+
+ SDValue Ops[] = { A, B, getAL(CurDAG, dl),
+ CurDAG->getRegister(0, MVT::i32)};
+ CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ return true;
+ }
+ return false;
+}
+
+/// We've got special pseudo-instructions for these
+void ARMDAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
+ unsigned Opcode;
+ EVT MemTy = cast<MemSDNode>(N)->getMemoryVT();
+ if (MemTy == MVT::i8)
+ Opcode = ARM::CMP_SWAP_8;
+ else if (MemTy == MVT::i16)
+ Opcode = ARM::CMP_SWAP_16;
+ else if (MemTy == MVT::i32)
+ Opcode = ARM::CMP_SWAP_32;
+ else
+ llvm_unreachable("Unknown AtomicCmpSwap type");
+
+ SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3),
+ N->getOperand(0)};
+ SDNode *CmpSwap = CurDAG->getMachineNode(
+ Opcode, SDLoc(N),
+ CurDAG->getVTList(MVT::i32, MVT::i32, MVT::Other), Ops);
+
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
+
+ ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0));
+ ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2));
+ CurDAG->RemoveDeadNode(N);
+}
+
+void ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
// The only time a CONCAT_VECTORS operation can have legal types is when
// two 64-bit vectors are concatenated to a 128-bit vector.
EVT VT = N->getValueType(0);
if (!VT.is128BitVector() || N->getNumOperands() != 2)
llvm_unreachable("unexpected CONCAT_VECTORS");
- return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
+ ReplaceNode(N, createDRegPairNode(VT, N->getOperand(0), N->getOperand(1)));
}
-SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
+void ARMDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return nullptr; // Already selected.
+ return; // Already selected.
}
switch (N->getOpcode()) {
default: break;
- case ISD::WRITE_REGISTER: {
- SDNode *ResNode = SelectWriteRegister(N);
- if (ResNode)
- return ResNode;
+ case ISD::ADD:
+ case ISD::OR:
+ if (trySMLAWSMULW(N))
+ return;
break;
- }
- case ISD::READ_REGISTER: {
- SDNode *ResNode = SelectReadRegister(N);
- if (ResNode)
- return ResNode;
+ case ISD::WRITE_REGISTER:
+ if (tryWriteRegister(N))
+ return;
break;
- }
- case ISD::INLINEASM: {
- SDNode *ResNode = SelectInlineAsm(N);
- if (ResNode)
- return ResNode;
+ case ISD::READ_REGISTER:
+ if (tryReadRegister(N))
+ return;
break;
- }
- case ISD::XOR: {
+ case ISD::INLINEASM:
+ if (tryInlineAsm(N))
+ return;
+ break;
+ case ISD::XOR:
// Select special operations if XOR node forms integer ABS pattern
- SDNode *ResNode = SelectABSOp(N);
- if (ResNode)
- return ResNode;
+ if (tryABSOp(N))
+ return;
// Other cases are autogenerated.
break;
- }
case ISD::Constant: {
unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
// If we can't materialize the constant we need to use a literal pool
@@ -2530,11 +2743,11 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
CurDAG->getRegister(0, MVT::i32),
CurDAG->getEntryNode()
};
- ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
- Ops);
+ ResNode = CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
+ Ops);
}
- ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
- return nullptr;
+ ReplaceNode(N, ResNode);
+ return;
}
// Other cases are autogenerated.
@@ -2551,25 +2764,27 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
MachineFrameInfo *MFI = MF->getFrameInfo();
if (MFI->getObjectAlignment(FI) < 4)
MFI->setObjectAlignment(FI, 4);
- return CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
- CurDAG->getTargetConstant(0, dl, MVT::i32));
+ CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
+ CurDAG->getTargetConstant(0, dl, MVT::i32));
+ return;
} else {
unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
ARM::t2ADDri : ARM::ADDri);
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, dl, MVT::i32),
getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
+ return;
}
}
case ISD::SRL:
- if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
- return I;
+ if (tryV6T2BitfieldExtractOp(N, false))
+ return;
break;
case ISD::SIGN_EXTEND_INREG:
case ISD::SRA:
- if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
- return I;
+ if (tryV6T2BitfieldExtractOp(N, true))
+ return;
break;
case ISD::MUL:
if (Subtarget->isThumb1Only())
@@ -2587,11 +2802,13 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
+ return;
} else {
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
+ return;
}
}
if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
@@ -2604,19 +2821,63 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
+ return;
} else {
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
+ return;
}
}
}
break;
case ISD::AND: {
// Check for unsigned bitfield extract
- if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
- return I;
+ if (tryV6T2BitfieldExtractOp(N, false))
+ return;
+
+ // If an immediate is used in an AND node, it is possible that the immediate
+ // can be more optimally materialized when negated. If this is the case we
+ // can negate the immediate and use a BIC instead.
+ auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (N1C && N1C->hasOneUse() && Subtarget->isThumb()) {
+ uint32_t Imm = (uint32_t) N1C->getZExtValue();
+
+ // In Thumb2 mode, an AND can take a 12-bit immediate. If this
+ // immediate can be negated and fit in the immediate operand of
+ // a t2BIC, don't do any manual transform here as this can be
+ // handled by the generic ISel machinery.
+ bool PreferImmediateEncoding =
+ Subtarget->hasThumb2() && (is_t2_so_imm(Imm) || is_t2_so_imm_not(Imm));
+ if (!PreferImmediateEncoding &&
+ ConstantMaterializationCost(Imm) >
+ ConstantMaterializationCost(~Imm)) {
+ // The current immediate costs more to materialize than a negated
+ // immediate, so negate the immediate and use a BIC.
+ SDValue NewImm =
+ CurDAG->getConstant(~N1C->getZExtValue(), dl, MVT::i32);
+ // If the new constant didn't exist before, reposition it in the topological
+ // ordering so it is just before N. Otherwise, don't touch its location.
+ if (NewImm->getNodeId() == -1)
+ CurDAG->RepositionNode(N->getIterator(), NewImm.getNode());
+
+ if (!Subtarget->hasThumb2()) {
+ SDValue Ops[] = {CurDAG->getRegister(ARM::CPSR, MVT::i32),
+ N->getOperand(0), NewImm, getAL(CurDAG, dl),
+ CurDAG->getRegister(0, MVT::i32)};
+ ReplaceNode(N, CurDAG->getMachineNode(ARM::tBIC, dl, MVT::i32, Ops));
+ return;
+ } else {
+ SDValue Ops[] = {N->getOperand(0), NewImm, getAL(CurDAG, dl),
+ CurDAG->getRegister(0, MVT::i32),
+ CurDAG->getRegister(0, MVT::i32)};
+ ReplaceNode(N,
+ CurDAG->getMachineNode(ARM::t2BICrr, dl, MVT::i32, Ops));
+ return;
+ }
+ }
+ }
// (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
// of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
@@ -2632,7 +2893,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
if (!Opc)
break;
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ N1C = dyn_cast<ConstantSDNode>(N1);
if (!N1C)
break;
if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
@@ -2649,29 +2910,34 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
dl, MVT::i32);
SDValue Ops[] = { N0.getOperand(0), Imm16,
getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(Opc, dl, VT, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
+ return;
}
}
break;
}
case ARMISD::VMOVRRD:
- return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
- N->getOperand(0), getAL(CurDAG, dl),
- CurDAG->getRegister(0, MVT::i32));
+ ReplaceNode(N, CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
+ N->getOperand(0), getAL(CurDAG, dl),
+ CurDAG->getRegister(0, MVT::i32)));
+ return;
case ISD::UMUL_LOHI: {
if (Subtarget->isThumb1Only())
break;
if (Subtarget->isThumb()) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
+ ReplaceNode(
+ N, CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops));
+ return;
} else {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
- ARM::UMULL : ARM::UMULLv5,
- dl, MVT::i32, MVT::i32, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(
+ Subtarget->hasV6Ops() ? ARM::UMULL : ARM::UMULLv5, dl,
+ MVT::i32, MVT::i32, Ops));
+ return;
}
}
case ISD::SMUL_LOHI: {
@@ -2680,30 +2946,76 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
if (Subtarget->isThumb()) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
+ ReplaceNode(
+ N, CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops));
+ return;
} else {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
- ARM::SMULL : ARM::SMULLv5,
- dl, MVT::i32, MVT::i32, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(
+ Subtarget->hasV6Ops() ? ARM::SMULL : ARM::SMULLv5, dl,
+ MVT::i32, MVT::i32, Ops));
+ return;
}
}
+ case ARMISD::UMAAL: {
+ unsigned Opc = Subtarget->isThumb() ? ARM::t2UMAAL : ARM::UMAAL;
+ SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
+ N->getOperand(2), N->getOperand(3),
+ getAL(CurDAG, dl),
+ CurDAG->getRegister(0, MVT::i32) };
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, MVT::i32, MVT::i32, Ops));
+ return;
+ }
case ARMISD::UMLAL:{
+ // UMAAL is similar to UMLAL but it adds two 32-bit values to the
+ // 64-bit multiplication result.
+ if (Subtarget->hasV6Ops() && N->getOperand(2).getOpcode() == ARMISD::ADDC &&
+ N->getOperand(3).getOpcode() == ARMISD::ADDE) {
+
+ SDValue Addc = N->getOperand(2);
+ SDValue Adde = N->getOperand(3);
+
+ if (Adde.getOperand(2).getNode() == Addc.getNode()) {
+
+ ConstantSDNode *Op0 = dyn_cast<ConstantSDNode>(Adde.getOperand(0));
+ ConstantSDNode *Op1 = dyn_cast<ConstantSDNode>(Adde.getOperand(1));
+
+ if (Op0 && Op1 && Op0->getZExtValue() == 0 && Op1->getZExtValue() == 0)
+ {
+ // Select UMAAL instead: UMAAL RdLo, RdHi, Rn, Rm
+ // RdLo = one operand to be added, lower 32-bits of res
+ // RdHi = other operand to be added, upper 32-bits of res
+ // Rn = first multiply operand
+ // Rm = second multiply operand
+ SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
+ Addc.getOperand(0), Addc.getOperand(1),
+ getAL(CurDAG, dl),
+ CurDAG->getRegister(0, MVT::i32) };
+ unsigned opc = Subtarget->isThumb() ? ARM::t2UMAAL : ARM::UMAAL;
+ CurDAG->SelectNodeTo(N, opc, MVT::i32, MVT::i32, Ops);
+ return;
+ }
+ }
+ }
+
if (Subtarget->isThumb()) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
N->getOperand(3), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32)};
- return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
+ ReplaceNode(
+ N, CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops));
+ return;
}else{
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
N->getOperand(3), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
- ARM::UMLAL : ARM::UMLALv5,
- dl, MVT::i32, MVT::i32, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(
+ Subtarget->hasV6Ops() ? ARM::UMLAL : ARM::UMLALv5, dl,
+ MVT::i32, MVT::i32, Ops));
+ return;
}
}
case ARMISD::SMLAL:{
@@ -2711,25 +3023,29 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
N->getOperand(3), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32)};
- return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
+ ReplaceNode(
+ N, CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops));
+ return;
}else{
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
N->getOperand(3), getAL(CurDAG, dl),
CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
- ARM::SMLAL : ARM::SMLALv5,
- dl, MVT::i32, MVT::i32, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(
+ Subtarget->hasV6Ops() ? ARM::SMLAL : ARM::SMLALv5, dl,
+ MVT::i32, MVT::i32, Ops));
+ return;
}
}
case ISD::LOAD: {
- SDNode *ResNode = nullptr;
- if (Subtarget->isThumb() && Subtarget->hasThumb2())
- ResNode = SelectT2IndexedLoad(N);
- else
- ResNode = SelectARMIndexedLoad(N);
- if (ResNode)
- return ResNode;
+ if (Subtarget->isThumb() && Subtarget->hasThumb2()) {
+ if (tryT2IndexedLoad(N))
+ return;
+ } else if (Subtarget->isThumb()) {
+ if (tryT1IndexedLoad(N))
+ return;
+ } else if (tryARMIndexedLoad(N))
+ return;
// Other cases are autogenerated.
break;
}
@@ -2770,13 +3086,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
ReplaceUses(SDValue(N, 0),
SDValue(Chain.getNode(), Chain.getResNo()));
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
+ return;
}
case ARMISD::VZIP: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
- default: return nullptr;
+ default: return;
case MVT::v8i8: Opc = ARM::VZIPd8; break;
case MVT::v4i16: Opc = ARM::VZIPd16; break;
case MVT::v2f32:
@@ -2790,13 +3107,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Pred = getAL(CurDAG, dl);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
- return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
+ return;
}
case ARMISD::VUZP: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
- default: return nullptr;
+ default: return;
case MVT::v8i8: Opc = ARM::VUZPd8; break;
case MVT::v4i16: Opc = ARM::VUZPd16; break;
case MVT::v2f32:
@@ -2810,13 +3128,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Pred = getAL(CurDAG, dl);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
- return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
+ return;
}
case ARMISD::VTRN: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
- default: return nullptr;
+ default: return;
case MVT::v8i8: Opc = ARM::VTRNd8; break;
case MVT::v4i16: Opc = ARM::VTRNd16; break;
case MVT::v2f32:
@@ -2829,7 +3148,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Pred = getAL(CurDAG, dl);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
- return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, VT, Ops));
+ return;
}
case ARMISD::BUILD_VECTOR: {
EVT VecVT = N->getValueType(0);
@@ -2837,55 +3157,68 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
unsigned NumElts = VecVT.getVectorNumElements();
if (EltVT == MVT::f64) {
assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
- return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
+ ReplaceNode(
+ N, createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
+ return;
}
assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
- if (NumElts == 2)
- return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
+ if (NumElts == 2) {
+ ReplaceNode(
+ N, createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1)));
+ return;
+ }
assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
- return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
- N->getOperand(2), N->getOperand(3));
+ ReplaceNode(N,
+ createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
+ N->getOperand(2), N->getOperand(3)));
+ return;
}
case ARMISD::VLD2DUP: {
static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
ARM::VLD2DUPd32 };
- return SelectVLDDup(N, false, 2, Opcodes);
+ SelectVLDDup(N, false, 2, Opcodes);
+ return;
}
case ARMISD::VLD3DUP: {
static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
ARM::VLD3DUPd16Pseudo,
ARM::VLD3DUPd32Pseudo };
- return SelectVLDDup(N, false, 3, Opcodes);
+ SelectVLDDup(N, false, 3, Opcodes);
+ return;
}
case ARMISD::VLD4DUP: {
static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
ARM::VLD4DUPd16Pseudo,
ARM::VLD4DUPd32Pseudo };
- return SelectVLDDup(N, false, 4, Opcodes);
+ SelectVLDDup(N, false, 4, Opcodes);
+ return;
}
case ARMISD::VLD2DUP_UPD: {
static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
ARM::VLD2DUPd16wb_fixed,
ARM::VLD2DUPd32wb_fixed };
- return SelectVLDDup(N, true, 2, Opcodes);
+ SelectVLDDup(N, true, 2, Opcodes);
+ return;
}
case ARMISD::VLD3DUP_UPD: {
static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
ARM::VLD3DUPd16Pseudo_UPD,
ARM::VLD3DUPd32Pseudo_UPD };
- return SelectVLDDup(N, true, 3, Opcodes);
+ SelectVLDDup(N, true, 3, Opcodes);
+ return;
}
case ARMISD::VLD4DUP_UPD: {
static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
ARM::VLD4DUPd16Pseudo_UPD,
ARM::VLD4DUPd32Pseudo_UPD };
- return SelectVLDDup(N, true, 4, Opcodes);
+ SelectVLDDup(N, true, 4, Opcodes);
+ return;
}
case ARMISD::VLD1_UPD: {
@@ -2897,7 +3230,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD1q16wb_fixed,
ARM::VLD1q32wb_fixed,
ARM::VLD1q64wb_fixed };
- return SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
+ SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
+ return;
}
case ARMISD::VLD2_UPD: {
@@ -2908,7 +3242,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
ARM::VLD2q16PseudoWB_fixed,
ARM::VLD2q32PseudoWB_fixed };
- return SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
+ SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
+ return;
}
case ARMISD::VLD3_UPD: {
@@ -2922,7 +3257,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
ARM::VLD3q16oddPseudo_UPD,
ARM::VLD3q32oddPseudo_UPD };
- return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ return;
}
case ARMISD::VLD4_UPD: {
@@ -2936,7 +3272,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
ARM::VLD4q16oddPseudo_UPD,
ARM::VLD4q32oddPseudo_UPD };
- return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ return;
}
case ARMISD::VLD2LN_UPD: {
@@ -2945,7 +3282,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD2LNd32Pseudo_UPD };
static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
ARM::VLD2LNq32Pseudo_UPD };
- return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
+ return;
}
case ARMISD::VLD3LN_UPD: {
@@ -2954,7 +3292,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD3LNd32Pseudo_UPD };
static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
ARM::VLD3LNq32Pseudo_UPD };
- return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
+ return;
}
case ARMISD::VLD4LN_UPD: {
@@ -2963,7 +3302,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD4LNd32Pseudo_UPD };
static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
ARM::VLD4LNq32Pseudo_UPD };
- return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
+ return;
}
case ARMISD::VST1_UPD: {
@@ -2975,7 +3315,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST1q16wb_fixed,
ARM::VST1q32wb_fixed,
ARM::VST1q64wb_fixed };
- return SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
+ SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
+ return;
}
case ARMISD::VST2_UPD: {
@@ -2986,7 +3327,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
ARM::VST2q16PseudoWB_fixed,
ARM::VST2q32PseudoWB_fixed };
- return SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
+ SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
+ return;
}
case ARMISD::VST3_UPD: {
@@ -3000,7 +3342,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
ARM::VST3q16oddPseudo_UPD,
ARM::VST3q32oddPseudo_UPD };
- return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ return;
}
case ARMISD::VST4_UPD: {
@@ -3014,7 +3357,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
ARM::VST4q16oddPseudo_UPD,
ARM::VST4q32oddPseudo_UPD };
- return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ return;
}
case ARMISD::VST2LN_UPD: {
@@ -3023,7 +3367,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST2LNd32Pseudo_UPD };
static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
ARM::VST2LNq32Pseudo_UPD };
- return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
+ return;
}
case ARMISD::VST3LN_UPD: {
@@ -3032,7 +3377,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST3LNd32Pseudo_UPD };
static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
ARM::VST3LNq32Pseudo_UPD };
- return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
+ return;
}
case ARMISD::VST4LN_UPD: {
@@ -3041,7 +3387,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST4LNd32Pseudo_UPD };
static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
ARM::VST4LNq32Pseudo_UPD };
- return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
+ return;
}
case ISD::INTRINSIC_VOID:
@@ -3051,12 +3398,44 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
default:
break;
+ case Intrinsic::arm_mrrc:
+ case Intrinsic::arm_mrrc2: {
+ SDLoc dl(N);
+ SDValue Chain = N->getOperand(0);
+ unsigned Opc;
+
+ if (Subtarget->isThumb())
+ Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::t2MRRC : ARM::t2MRRC2);
+ else
+ Opc = (IntNo == Intrinsic::arm_mrrc ? ARM::MRRC : ARM::MRRC2);
+
+ SmallVector<SDValue, 5> Ops;
+ Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(), dl)); /* coproc */
+ Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(3))->getZExtValue(), dl)); /* opc */
+ Ops.push_back(getI32Imm(cast<ConstantSDNode>(N->getOperand(4))->getZExtValue(), dl)); /* CRm */
+
+ // The mrrc2 instruction in ARM doesn't allow predicates, the top 4 bits of the encoded
+ // instruction will always be '1111' but it is possible in assembly language to specify
+ // AL as a predicate to mrrc2 but it doesn't make any difference to the encoded instruction.
+ if (Opc != ARM::MRRC2) {
+ Ops.push_back(getAL(CurDAG, dl));
+ Ops.push_back(CurDAG->getRegister(0, MVT::i32));
+ }
+
+ Ops.push_back(Chain);
+
+ // Writes to two registers.
+ const EVT RetType[] = {MVT::i32, MVT::i32, MVT::Other};
+
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, RetType, Ops));
+ return;
+ }
case Intrinsic::arm_ldaexd:
case Intrinsic::arm_ldrexd: {
SDLoc dl(N);
SDValue Chain = N->getOperand(0);
SDValue MemAddr = N->getOperand(2);
- bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
+ bool isThumb = Subtarget->isThumb() && Subtarget->hasV8MBaselineOps();
bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
@@ -3072,11 +3451,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ResTys.push_back(MVT::Other);
// Place arguments in the right order.
- SmallVector<SDValue, 7> Ops;
- Ops.push_back(MemAddr);
- Ops.push_back(getAL(CurDAG, dl));
- Ops.push_back(CurDAG->getRegister(0, MVT::i32));
- Ops.push_back(Chain);
+ SDValue Ops[] = {MemAddr, getAL(CurDAG, dl),
+ CurDAG->getRegister(0, MVT::i32), Chain};
SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
// Transfer memoperands.
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
@@ -3112,7 +3488,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ReplaceUses(SDValue(N, 1), Result);
}
ReplaceUses(SDValue(N, 2), OutChain);
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
+ return;
}
case Intrinsic::arm_stlexd:
case Intrinsic::arm_strexd: {
@@ -3150,7 +3527,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
- return St;
+ ReplaceNode(N, St);
+ return;
}
case Intrinsic::arm_neon_vld1: {
@@ -3158,7 +3536,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD1d32, ARM::VLD1d64 };
static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
ARM::VLD1q32, ARM::VLD1q64};
- return SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
+ SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
+ return;
}
case Intrinsic::arm_neon_vld2: {
@@ -3166,7 +3545,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD2d32, ARM::VLD1q64 };
static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
ARM::VLD2q32Pseudo };
- return SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
+ SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
+ return;
}
case Intrinsic::arm_neon_vld3: {
@@ -3180,7 +3560,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
ARM::VLD3q16oddPseudo,
ARM::VLD3q32oddPseudo };
- return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ return;
}
case Intrinsic::arm_neon_vld4: {
@@ -3194,7 +3575,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
ARM::VLD4q16oddPseudo,
ARM::VLD4q32oddPseudo };
- return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ return;
}
case Intrinsic::arm_neon_vld2lane: {
@@ -3203,7 +3585,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD2LNd32Pseudo };
static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
ARM::VLD2LNq32Pseudo };
- return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
+ return;
}
case Intrinsic::arm_neon_vld3lane: {
@@ -3212,7 +3595,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD3LNd32Pseudo };
static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
ARM::VLD3LNq32Pseudo };
- return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
+ return;
}
case Intrinsic::arm_neon_vld4lane: {
@@ -3221,7 +3605,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VLD4LNd32Pseudo };
static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
ARM::VLD4LNq32Pseudo };
- return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
+ return;
}
case Intrinsic::arm_neon_vst1: {
@@ -3229,15 +3614,17 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST1d32, ARM::VST1d64 };
static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
ARM::VST1q32, ARM::VST1q64 };
- return SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
+ SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
+ return;
}
case Intrinsic::arm_neon_vst2: {
static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
ARM::VST2d32, ARM::VST1q64 };
- static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
- ARM::VST2q32Pseudo };
- return SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
+ static const uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
+ ARM::VST2q32Pseudo };
+ SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
+ return;
}
case Intrinsic::arm_neon_vst3: {
@@ -3251,7 +3638,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
ARM::VST3q16oddPseudo,
ARM::VST3q32oddPseudo };
- return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
+ return;
}
case Intrinsic::arm_neon_vst4: {
@@ -3265,7 +3653,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
ARM::VST4q16oddPseudo,
ARM::VST4q32oddPseudo };
- return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
+ return;
}
case Intrinsic::arm_neon_vst2lane: {
@@ -3274,7 +3663,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST2LNd32Pseudo };
static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
ARM::VST2LNq32Pseudo };
- return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
+ return;
}
case Intrinsic::arm_neon_vst3lane: {
@@ -3283,7 +3673,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST3LNd32Pseudo };
static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
ARM::VST3LNq32Pseudo };
- return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
+ return;
}
case Intrinsic::arm_neon_vst4lane: {
@@ -3292,7 +3683,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
ARM::VST4LNd32Pseudo };
static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
ARM::VST4LNq32Pseudo };
- return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
+ SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
+ return;
}
}
break;
@@ -3305,18 +3697,24 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
break;
case Intrinsic::arm_neon_vtbl2:
- return SelectVTBL(N, false, 2, ARM::VTBL2);
+ SelectVTBL(N, false, 2, ARM::VTBL2);
+ return;
case Intrinsic::arm_neon_vtbl3:
- return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
+ SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
+ return;
case Intrinsic::arm_neon_vtbl4:
- return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
+ SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
+ return;
case Intrinsic::arm_neon_vtbx2:
- return SelectVTBL(N, true, 2, ARM::VTBX2);
+ SelectVTBL(N, true, 2, ARM::VTBX2);
+ return;
case Intrinsic::arm_neon_vtbx3:
- return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
+ SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
+ return;
case Intrinsic::arm_neon_vtbx4:
- return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
+ SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
+ return;
}
break;
}
@@ -3324,13 +3722,11 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ARMISD::VTBL1: {
SDLoc dl(N);
EVT VT = N->getValueType(0);
- SmallVector<SDValue, 6> Ops;
-
- Ops.push_back(N->getOperand(0));
- Ops.push_back(N->getOperand(1));
- Ops.push_back(getAL(CurDAG, dl)); // Predicate
- Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
- return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
+ SDValue Ops[] = {N->getOperand(0), N->getOperand(1),
+ getAL(CurDAG, dl), // Predicate
+ CurDAG->getRegister(0, MVT::i32)}; // Predicate Register
+ ReplaceNode(N, CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops));
+ return;
}
case ARMISD::VTBL2: {
SDLoc dl(N);
@@ -3341,19 +3737,22 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue V1 = N->getOperand(1);
SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
- SmallVector<SDValue, 6> Ops;
- Ops.push_back(RegSeq);
- Ops.push_back(N->getOperand(2));
- Ops.push_back(getAL(CurDAG, dl)); // Predicate
- Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
- return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
+ SDValue Ops[] = {RegSeq, N->getOperand(2), getAL(CurDAG, dl), // Predicate
+ CurDAG->getRegister(0, MVT::i32)}; // Predicate Register
+ ReplaceNode(N, CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops));
+ return;
}
case ISD::CONCAT_VECTORS:
- return SelectConcatVector(N);
+ SelectConcatVector(N);
+ return;
+
+ case ISD::ATOMIC_CMP_SWAP:
+ SelectCMP_SWAP(N);
+ return;
}
- return SelectCode(N);
+ SelectCode(N);
}
// Inspect a register string of the form
@@ -3362,8 +3761,9 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
// and obtain the integer operands from them, adding these operands to the
// provided vector.
static void getIntOperandsFromRegisterString(StringRef RegString,
- SelectionDAG *CurDAG, SDLoc DL,
- std::vector<SDValue>& Ops) {
+ SelectionDAG *CurDAG,
+ const SDLoc &DL,
+ std::vector<SDValue> &Ops) {
SmallVector<StringRef, 5> Fields;
RegString.split(Fields, ':');
@@ -3444,6 +3844,9 @@ static inline int getMClassRegisterSYSmValueMask(StringRef RegString) {
.Case("basepri_max", 0x12)
.Case("faultmask", 0x13)
.Case("control", 0x14)
+ .Case("msplim", 0x0a)
+ .Case("psplim", 0x0b)
+ .Case("sp", 0x18)
.Default(-1);
}
@@ -3473,11 +3876,27 @@ static int getMClassRegisterMask(StringRef Reg, StringRef Flags, bool IsRead,
if (!Subtarget->hasV7Ops() && SYSmvalue >= 0x11 && SYSmvalue <= 0x13)
return -1;
+ if (Subtarget->has8MSecExt() && Flags.lower() == "ns") {
+ Flags = "";
+ SYSmvalue |= 0x80;
+ }
+
+ if (!Subtarget->has8MSecExt() &&
+ (SYSmvalue == 0xa || SYSmvalue == 0xb || SYSmvalue > 0x14))
+ return -1;
+
+ if (!Subtarget->hasV8MMainlineOps() &&
+ (SYSmvalue == 0x8a || SYSmvalue == 0x8b || SYSmvalue == 0x91 ||
+ SYSmvalue == 0x93))
+ return -1;
+
// If it was a read then we won't be expecting flags and so at this point
// we can return the mask.
if (IsRead) {
- assert (Flags.empty() && "Unexpected flags for reading M class register.");
- return SYSmvalue;
+ if (Flags.empty())
+ return SYSmvalue;
+ else
+ return -1;
}
// We know we are now handling a write so need to get the mask for the flags.
@@ -3563,7 +3982,7 @@ static int getARClassRegisterMask(StringRef Reg, StringRef Flags) {
// Lower the read_register intrinsic to ARM specific DAG nodes
// using the supplied metadata string to select the instruction node to use
// and the registers/masks to construct as operands for the node.
-SDNode *ARMDAGToDAGISel::SelectReadRegister(SDNode *N){
+bool ARMDAGToDAGISel::tryReadRegister(SDNode *N){
const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
bool IsThumb2 = Subtarget->isThumb2();
@@ -3592,7 +4011,8 @@ SDNode *ARMDAGToDAGISel::SelectReadRegister(SDNode *N){
Ops.push_back(getAL(CurDAG, DL));
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
Ops.push_back(N->getOperand(0));
- return CurDAG->getMachineNode(Opcode, DL, ResTypes, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, ResTypes, Ops));
+ return true;
}
std::string SpecialReg = RegString->getString().lower();
@@ -3602,8 +4022,10 @@ SDNode *ARMDAGToDAGISel::SelectReadRegister(SDNode *N){
Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32),
getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
N->getOperand(0) };
- return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSbanked : ARM::MRSbanked,
- DL, MVT::i32, MVT::Other, Ops);
+ ReplaceNode(
+ N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSbanked : ARM::MRSbanked,
+ DL, MVT::i32, MVT::Other, Ops));
+ return true;
}
// The VFP registers are read by creating SelectionDAG nodes with opcodes
@@ -3623,27 +4045,37 @@ SDNode *ARMDAGToDAGISel::SelectReadRegister(SDNode *N){
// If an opcode was found then we can lower the read to a VFP instruction.
if (Opcode) {
if (!Subtarget->hasVFP2())
- return nullptr;
+ return false;
if (Opcode == ARM::VMRS_MVFR2 && !Subtarget->hasFPARMv8())
- return nullptr;
+ return false;
Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
N->getOperand(0) };
- return CurDAG->getMachineNode(Opcode, DL, MVT::i32, MVT::Other, Ops);
+ ReplaceNode(N,
+ CurDAG->getMachineNode(Opcode, DL, MVT::i32, MVT::Other, Ops));
+ return true;
}
// If the target is M Class then need to validate that the register string
// is an acceptable value, so check that a mask can be constructed from the
// string.
if (Subtarget->isMClass()) {
- int SYSmValue = getMClassRegisterMask(SpecialReg, "", true, Subtarget);
+ StringRef Flags = "", Reg = SpecialReg;
+ if (Reg.endswith("_ns")) {
+ Flags = "ns";
+ Reg = Reg.drop_back(3);
+ }
+
+ int SYSmValue = getMClassRegisterMask(Reg, Flags, true, Subtarget);
if (SYSmValue == -1)
- return nullptr;
+ return false;
SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
N->getOperand(0) };
- return CurDAG->getMachineNode(ARM::t2MRS_M, DL, MVT::i32, MVT::Other, Ops);
+ ReplaceNode(
+ N, CurDAG->getMachineNode(ARM::t2MRS_M, DL, MVT::i32, MVT::Other, Ops));
+ return true;
}
// Here we know the target is not M Class so we need to check if it is one
@@ -3651,24 +4083,27 @@ SDNode *ARMDAGToDAGISel::SelectReadRegister(SDNode *N){
if (SpecialReg == "apsr" || SpecialReg == "cpsr") {
Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
N->getOperand(0) };
- return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRS_AR : ARM::MRS, DL,
- MVT::i32, MVT::Other, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRS_AR : ARM::MRS,
+ DL, MVT::i32, MVT::Other, Ops));
+ return true;
}
if (SpecialReg == "spsr") {
Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
N->getOperand(0) };
- return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSsys_AR : ARM::MRSsys,
- DL, MVT::i32, MVT::Other, Ops);
+ ReplaceNode(
+ N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSsys_AR : ARM::MRSsys, DL,
+ MVT::i32, MVT::Other, Ops));
+ return true;
}
- return nullptr;
+ return false;
}
// Lower the write_register intrinsic to ARM specific DAG nodes
// using the supplied metadata string to select the instruction node to use
// and the registers/masks to use in the nodes
-SDNode *ARMDAGToDAGISel::SelectWriteRegister(SDNode *N){
+bool ARMDAGToDAGISel::tryWriteRegister(SDNode *N){
const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
bool IsThumb2 = Subtarget->isThumb2();
@@ -3698,7 +4133,8 @@ SDNode *ARMDAGToDAGISel::SelectWriteRegister(SDNode *N){
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
Ops.push_back(N->getOperand(0));
- return CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
+ return true;
}
std::string SpecialReg = RegString->getString().lower();
@@ -3707,8 +4143,10 @@ SDNode *ARMDAGToDAGISel::SelectWriteRegister(SDNode *N){
Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32), N->getOperand(2),
getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
N->getOperand(0) };
- return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSRbanked : ARM::MSRbanked,
- DL, MVT::Other, Ops);
+ ReplaceNode(
+ N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSRbanked : ARM::MSRbanked,
+ DL, MVT::Other, Ops));
+ return true;
}
// The VFP registers are written to by creating SelectionDAG nodes with
@@ -3724,16 +4162,17 @@ SDNode *ARMDAGToDAGISel::SelectWriteRegister(SDNode *N){
if (Opcode) {
if (!Subtarget->hasVFP2())
- return nullptr;
+ return false;
Ops = { N->getOperand(2), getAL(CurDAG, DL),
CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
- return CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
+ return true;
}
- SmallVector<StringRef, 5> Fields;
- StringRef(SpecialReg).split(Fields, '_', 1, false);
- std::string Reg = Fields[0].str();
- StringRef Flags = Fields.size() == 2 ? Fields[1] : "";
+ std::pair<StringRef, StringRef> Fields;
+ Fields = StringRef(SpecialReg).rsplit('_');
+ std::string Reg = Fields.first.str();
+ StringRef Flags = Fields.second;
// If the target was M Class then need to validate the special register value
// and retrieve the mask for use in the instruction node.
@@ -3745,12 +4184,13 @@ SDNode *ARMDAGToDAGISel::SelectWriteRegister(SDNode *N){
}
int SYSmValue = getMClassRegisterMask(Reg, Flags, false, Subtarget);
if (SYSmValue == -1)
- return nullptr;
+ return false;
SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
N->getOperand(2), getAL(CurDAG, DL),
CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
- return CurDAG->getMachineNode(ARM::t2MSR_M, DL, MVT::Other, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(ARM::t2MSR_M, DL, MVT::Other, Ops));
+ return true;
}
// We then check to see if a valid mask can be constructed for one of the
@@ -3761,14 +4201,15 @@ SDNode *ARMDAGToDAGISel::SelectWriteRegister(SDNode *N){
Ops = { CurDAG->getTargetConstant(Mask, DL, MVT::i32), N->getOperand(2),
getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
N->getOperand(0) };
- return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSR_AR : ARM::MSR,
- DL, MVT::Other, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSR_AR : ARM::MSR,
+ DL, MVT::Other, Ops));
+ return true;
}
- return nullptr;
+ return false;
}
-SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
+bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){
std::vector<SDValue> AsmNodeOperands;
unsigned Flag, Kind;
bool Changed = false;
@@ -3823,6 +4264,17 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
IsTiedToChangedOp = OpChanged[DefIdx];
+ // Memory operands to inline asm in the SelectionDAG are modeled with two
+ // operands: a constant of value InlineAsm::Kind_Mem followed by the input
+ // operand. If we get here and we have a Kind_Mem, skip the next operand (so
+ // it doesn't get misinterpreted), and continue. We do this here because
+ // it's important to update the OpChanged array correctly before moving on.
+ if (Kind == InlineAsm::Kind_Mem) {
+ SDValue op = N->getOperand(++i);
+ AsmNodeOperands.push_back(op);
+ continue;
+ }
+
if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
&& Kind != InlineAsm::Kind_RegDefEarlyClobber)
continue;
@@ -3912,12 +4364,13 @@ SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
if (Glue.getNode())
AsmNodeOperands.push_back(Glue);
if (!Changed)
- return nullptr;
+ return false;
SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
New->setNodeId(-1);
- return New.getNode();
+ ReplaceNode(N, New.getNode());
+ return true;
}
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 978e99cf511e..d6e7caf98a80 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -65,6 +65,13 @@ ARMInterworking("arm-interworking", cl::Hidden,
cl::desc("Enable / disable ARM interworking (for debugging only)"),
cl::init(true));
+// Disabled for causing self-hosting failures once returned-attribute inference
+// was enabled.
+static cl::opt<bool>
+EnableThisRetForwarding("arm-this-return-forwarding", cl::Hidden,
+ cl::desc("Directly forward this return"),
+ cl::init(false));
+
namespace {
class ARMCCState : public CCState {
public:
@@ -240,7 +247,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// Set the correct calling convention for ARMv7k WatchOS. It's just
// AAPCS_VFP for functions as simple as libcalls.
- if (Subtarget->isTargetWatchOS()) {
+ if (Subtarget->isTargetWatchABI()) {
for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i)
setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP);
}
@@ -254,7 +261,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// RTLIB
if (Subtarget->isAAPCS_ABI() &&
(Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
- Subtarget->isTargetAndroid())) {
+ Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
static const struct {
const RTLIB::Libcall Op;
const char * const Name;
@@ -390,10 +397,6 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
{ RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
{ RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
{ RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
- { RTLIB::SDIV_I32, "__rt_sdiv", CallingConv::ARM_AAPCS_VFP },
- { RTLIB::UDIV_I32, "__rt_udiv", CallingConv::ARM_AAPCS_VFP },
- { RTLIB::SDIV_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS_VFP },
- { RTLIB::UDIV_I64, "__rt_udiv64", CallingConv::ARM_AAPCS_VFP },
};
for (const auto &LC : LibraryCalls) {
@@ -410,17 +413,19 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
}
- // The half <-> float conversion functions are always soft-float, but are
- // needed for some targets which use a hard-float calling convention by
- // default.
- if (Subtarget->isAAPCS_ABI()) {
- setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
- setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
- } else {
- setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
- setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
- setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
+ // The half <-> float conversion functions are always soft-float on
+ // non-watchos platforms, but are needed for some targets which use a
+ // hard-float calling convention by default.
+ if (!Subtarget->isTargetWatchABI()) {
+ if (Subtarget->isAAPCS_ABI()) {
+ setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
+ } else {
+ setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
+ setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
+ setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
+ }
}
// In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
@@ -581,6 +586,11 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
setOperationAction(ISD::CTPOP, MVT::v4i16, Custom);
setOperationAction(ISD::CTPOP, MVT::v8i16, Custom);
+ setOperationAction(ISD::CTPOP, MVT::v1i64, Expand);
+ setOperationAction(ISD::CTPOP, MVT::v2i64, Expand);
+
+ setOperationAction(ISD::CTLZ, MVT::v1i64, Expand);
+ setOperationAction(ISD::CTLZ, MVT::v2i64, Expand);
// NEON does not have single instruction CTTZ for vectors.
setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
@@ -712,6 +722,10 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setIndexedStoreAction(im, MVT::i16, Legal);
setIndexedStoreAction(im, MVT::i32, Legal);
}
+ } else {
+ // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
+ setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
}
setOperationAction(ISD::SADDO, MVT::i32, Custom);
@@ -758,10 +772,6 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
- // These just redirect to CTTZ and CTLZ on ARM.
- setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i32 , Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF , MVT::i32 , Expand);
-
// @llvm.readcyclecounter requires the Performance Monitors extension.
// Default to the 0 expansion on unsupported platforms.
// FIXME: Technically there are older ARM CPUs that have
@@ -773,19 +783,30 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
if (!Subtarget->hasV6Ops())
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
- if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) &&
- !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) {
+ bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivide()
+ : Subtarget->hasDivideInARMMode();
+ if (!hasDivide) {
// These are expanded into libcalls if the cpu doesn't have HW divider.
setOperationAction(ISD::SDIV, MVT::i32, LibCall);
setOperationAction(ISD::UDIV, MVT::i32, LibCall);
}
+ if (Subtarget->isTargetWindows() && !Subtarget->hasDivide()) {
+ setOperationAction(ISD::SDIV, MVT::i32, Custom);
+ setOperationAction(ISD::UDIV, MVT::i32, Custom);
+
+ setOperationAction(ISD::SDIV, MVT::i64, Custom);
+ setOperationAction(ISD::UDIV, MVT::i64, Custom);
+ }
+
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
// Register based DivRem for AEABI (RTABI 4.2)
- if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid()) {
+ if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
+ Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI()) {
setOperationAction(ISD::SREM, MVT::i64, Custom);
setOperationAction(ISD::UREM, MVT::i64, Custom);
+ HasStandaloneRem = false;
setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod");
setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod");
@@ -807,6 +828,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
} else {
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
@@ -833,21 +856,21 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
// ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
- // the default expansion. If we are targeting a single threaded system,
- // then set them all for expand so we can lower them later into their
- // non-atomic form.
- if (TM.Options.ThreadModel == ThreadModel::Single)
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
- else if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) {
+ // the default expansion.
+ InsertFencesForAtomic = false;
+ if (Subtarget->hasAnyDataBarrier() &&
+ (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
// ATOMIC_FENCE needs custom lowering; the others should have been expanded
// to ldrex/strex loops already.
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+ if (!Subtarget->isThumb() || !Subtarget->isMClass())
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
// On v8, we have particularly efficient implementations of atomic fences
// if they can be combined with nearby atomic loads and stores.
- if (!Subtarget->hasV8Ops()) {
+ if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) {
// Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
- setInsertFencesForAtomic(true);
+ InsertFencesForAtomic = true;
}
} else {
// If there's anything we can use as a barrier, go through custom lowering
@@ -909,6 +932,10 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
+ // Thumb-1 cannot currently select ARMISD::SUBE.
+ if (!Subtarget->isThumb1Only())
+ setOperationAction(ISD::SETCCE, MVT::i32, Custom);
+
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
setOperationAction(ISD::BR_CC, MVT::f32, Custom);
@@ -956,7 +983,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
if (Subtarget->hasSinCos()) {
setLibcallName(RTLIB::SINCOS_F32, "sincosf");
setLibcallName(RTLIB::SINCOS_F64, "sincos");
- if (Subtarget->isTargetWatchOS()) {
+ if (Subtarget->isTargetWatchABI()) {
setLibcallCallingConv(RTLIB::SINCOS_F32, CallingConv::ARM_AAPCS_VFP);
setLibcallCallingConv(RTLIB::SINCOS_F64, CallingConv::ARM_AAPCS_VFP);
}
@@ -1039,7 +1066,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setMinStackArgumentAlignment(4);
// Prefer likely predicted branches to selects on out-of-order cores.
- PredictableSelectIsExpensive = Subtarget->isLikeA9();
+ PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
}
@@ -1106,7 +1133,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::CALL: return "ARMISD::CALL";
case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
- case ARMISD::tCALL: return "ARMISD::tCALL";
case ARMISD::BRCOND: return "ARMISD::BRCOND";
case ARMISD::BR_JT: return "ARMISD::BR_JT";
case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
@@ -1123,6 +1149,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::CMOV: return "ARMISD::CMOV";
+ case ARMISD::SSAT: return "ARMISD::SSAT";
+
case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
case ARMISD::RRX: return "ARMISD::RRX";
@@ -1199,6 +1227,7 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::VTBL2: return "ARMISD::VTBL2";
case ARMISD::VMULLs: return "ARMISD::VMULLs";
case ARMISD::VMULLu: return "ARMISD::VMULLu";
+ case ARMISD::UMAAL: return "ARMISD::UMAAL";
case ARMISD::UMLAL: return "ARMISD::UMLAL";
case ARMISD::SMLAL: return "ARMISD::SMLAL";
case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
@@ -1373,7 +1402,10 @@ ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
case CallingConv::ARM_APCS:
case CallingConv::GHC:
return CC;
+ case CallingConv::PreserveMost:
+ return CallingConv::PreserveMost;
case CallingConv::ARM_AAPCS_VFP:
+ case CallingConv::Swift:
return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
case CallingConv::C:
if (!Subtarget->isAAPCS_ABI())
@@ -1415,18 +1447,18 @@ CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
case CallingConv::GHC:
return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
+ case CallingConv::PreserveMost:
+ return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
}
}
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
-SDValue
-ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- bool isThisReturn, SDValue ThisVal) const {
+SDValue ARMTargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
+ SDValue ThisVal) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@@ -1442,7 +1474,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Pass 'this' value directly from the argument to return value, to avoid
// reg unit interference
- if (i == 0 && isThisReturn) {
+ if (i == 0 && isThisReturn && EnableThisRetForwarding) {
assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
"unexpected return calling convention register assignment");
InVals.push_back(ThisVal);
@@ -1506,23 +1538,21 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
}
/// LowerMemOpCallTo - Store the argument to the stack.
-SDValue
-ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
- SDValue StackPtr, SDValue Arg,
- SDLoc dl, SelectionDAG &DAG,
- const CCValAssign &VA,
- ISD::ArgFlagsTy Flags) const {
+SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
+ SDValue Arg, const SDLoc &dl,
+ SelectionDAG &DAG,
+ const CCValAssign &VA,
+ ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
StackPtr, PtrOff);
return DAG.getStore(
Chain, dl, Arg, PtrOff,
- MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset),
- false, false, 0);
+ MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
}
-void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
+void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
SDValue Chain, SDValue &Arg,
RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
@@ -1704,7 +1734,6 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
MachinePointerInfo(),
- false, false, false,
DAG.InferPtrAlignment(AddArg));
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(j, Load));
@@ -1780,20 +1809,27 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
bool isDirect = false;
- bool isARMFunc = false;
+
+ const TargetMachine &TM = getTargetMachine();
+ const Module *Mod = MF.getFunction()->getParent();
+ const GlobalValue *GV = nullptr;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ GV = G->getGlobal();
+ bool isStub =
+ !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
+
+ bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
bool isLocalARMFunc = false;
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
auto PtrVt = getPointerTy(DAG.getDataLayout());
if (Subtarget->genLongCalls()) {
- assert((Subtarget->isTargetWindows() ||
- getTargetMachine().getRelocationModel() == Reloc::Static) &&
- "long-calls with non-static relocation model!");
+ assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
+ "long-calls codegen is not position independent!");
// Handle a global address or an external symbol. If it's not one of
// those, the target's already in a register, so we don't need to do
// anything extra.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- const GlobalValue *GV = G->getGlobal();
+ if (isa<GlobalAddressSDNode>(Callee)) {
// Create a constant pool entry for the callee address
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
ARMConstantPoolValue *CPV =
@@ -1804,8 +1840,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
Callee = DAG.getLoad(
PtrVt, dl, DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
} else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
const char *Sym = S->getSymbol();
@@ -1819,54 +1854,55 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
Callee = DAG.getLoad(
PtrVt, dl, DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
- }
- } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- const GlobalValue *GV = G->getGlobal();
- isDirect = true;
- bool isDef = GV->isStrongDefinitionForLinker();
- bool isStub = (!isDef && Subtarget->isTargetMachO()) &&
- getTargetMachine().getRelocationModel() != Reloc::Static;
- isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
- // ARM call to a local ARM function is predicable.
- isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
- // tBX takes a register source operand.
- if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
- assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
- Callee = DAG.getNode(
- ARMISD::WrapperPIC, dl, PtrVt,
- DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
- Callee = DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), Callee,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, true, 0);
- } else if (Subtarget->isTargetCOFF()) {
- assert(Subtarget->isTargetWindows() &&
- "Windows is the only supported COFF target");
- unsigned TargetFlags = GV->hasDLLImportStorageClass()
- ? ARMII::MO_DLLIMPORT
- : ARMII::MO_NO_FLAG;
- Callee =
- DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0, TargetFlags);
- if (GV->hasDLLImportStorageClass())
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
+ }
+ } else if (isa<GlobalAddressSDNode>(Callee)) {
+ // If we're optimizing for minimum size and the function is called three or
+ // more times in this block, we can improve codesize by calling indirectly
+ // as BLXr has a 16-bit encoding.
+ auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
+ auto *BB = CLI.CS->getParent();
+ bool PreferIndirect =
+ Subtarget->isThumb() && MF.getFunction()->optForMinSize() &&
+ std::count_if(GV->user_begin(), GV->user_end(), [&BB](const User *U) {
+ return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
+ }) > 2;
+
+ if (!PreferIndirect) {
+ isDirect = true;
+ bool isDef = GV->isStrongDefinitionForLinker();
+
+ // ARM call to a local ARM function is predicable.
+ isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
+ // tBX takes a register source operand.
+ if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
+ assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
+ Callee = DAG.getNode(
+ ARMISD::WrapperPIC, dl, PtrVt,
+ DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
Callee =
- DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
- DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
+ DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), Callee,
MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
- } else {
- // On ELF targets for PIC code, direct calls should go through the PLT
- unsigned OpFlags = 0;
- if (Subtarget->isTargetELF() &&
- getTargetMachine().getRelocationModel() == Reloc::PIC_)
- OpFlags = ARMII::MO_PLT;
- Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, OpFlags);
+ /* Alignment = */ 0, MachineMemOperand::MOInvariant);
+ } else if (Subtarget->isTargetCOFF()) {
+ assert(Subtarget->isTargetWindows() &&
+ "Windows is the only supported COFF target");
+ unsigned TargetFlags = GV->hasDLLImportStorageClass()
+ ? ARMII::MO_DLLIMPORT
+ : ARMII::MO_NO_FLAG;
+ Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0,
+ TargetFlags);
+ if (GV->hasDLLImportStorageClass())
+ Callee =
+ DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
+ DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
+ } else {
+ Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
+ }
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
isDirect = true;
- bool isStub = Subtarget->isTargetMachO() &&
- getTargetMachine().getRelocationModel() != Reloc::Static;
- isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
// tBX takes a register source operand.
const char *Sym = S->getSymbol();
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
@@ -1878,17 +1914,11 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
Callee = DAG.getLoad(
PtrVt, dl, DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
} else {
- unsigned OpFlags = 0;
- // On ELF targets for PIC code, direct calls should go through the PLT
- if (Subtarget->isTargetELF() &&
- getTargetMachine().getRelocationModel() == Reloc::PIC_)
- OpFlags = ARMII::MO_PLT;
- Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, OpFlags);
+ Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
}
}
@@ -1898,11 +1928,11 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
else
- CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
+ CallOpc = ARMISD::CALL;
} else {
if (!isDirect && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
- else if (doesNotRet && isDirect && Subtarget->hasRAS() &&
+ else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
// Emit regular call when code size is the priority
!MF.getFunction()->optForMinSize())
// "mov lr, pc; b _foo" to avoid confusing the RSP
@@ -2042,7 +2072,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
if (!Def)
return false;
if (!Flags.isByVal()) {
- if (!TII->isLoadFromStackSlot(Def, FI))
+ if (!TII->isLoadFromStackSlot(*Def, FI))
return false;
} else {
return false;
@@ -2082,9 +2112,9 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
- const Function *CallerF = DAG.getMachineFunction().getFunction();
+ MachineFunction &MF = DAG.getMachineFunction();
+ const Function *CallerF = MF.getFunction();
CallingConv::ID CallerCC = CallerF->getCallingConv();
- bool CCMatch = CallerCC == CalleeCC;
assert(Subtarget->supportsTailCall());
@@ -2122,41 +2152,25 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
return false;
}
- // If the calling conventions do not match, then we'd better make sure the
- // results are returned in the same way as what the caller expects.
- if (!CCMatch) {
- SmallVector<CCValAssign, 16> RVLocs1;
- ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
- *DAG.getContext(), Call);
- CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
-
- SmallVector<CCValAssign, 16> RVLocs2;
- ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
- *DAG.getContext(), Call);
- CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
-
- if (RVLocs1.size() != RVLocs2.size())
+ // Check that the call results are passed in the same way.
+ LLVMContext &C = *DAG.getContext();
+ if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
+ CCAssignFnForNode(CalleeCC, true, isVarArg),
+ CCAssignFnForNode(CallerCC, true, isVarArg)))
+ return false;
+ // The callee has to preserve all registers the caller needs to preserve.
+ const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
+ if (CalleeCC != CallerCC) {
+ const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
+ if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
return false;
- for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
- if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
- return false;
- if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
- return false;
- if (RVLocs1[i].isRegLoc()) {
- if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
- return false;
- } else {
- if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
- return false;
- }
- }
}
// If Caller's vararg or byval argument has been split between registers and
// stack, do not perform tail call, since part of the argument is in caller's
// local frame.
- const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction().
- getInfo<ARMFunctionInfo>();
+ const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
if (AFI_Caller->getArgRegsSaveSize())
return false;
@@ -2166,13 +2180,10 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// Check if stack adjustment is needed. For now, do not do this if any
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
- ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext(), Call);
+ ARMCCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C, Call);
CCInfo.AnalyzeCallOperands(Outs,
CCAssignFnForNode(CalleeCC, false, isVarArg));
if (CCInfo.getNextStackOffset()) {
- MachineFunction &MF = DAG.getMachineFunction();
-
// Check if the arguments are already laid out in the right way as
// the caller's fixed stack objects.
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -2209,6 +2220,10 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
}
}
}
+
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
+ return false;
}
return true;
@@ -2226,7 +2241,7 @@ ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
}
static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
- SDLoc DL, SelectionDAG &DAG) {
+ const SDLoc &DL, SelectionDAG &DAG) {
const MachineFunction &MF = DAG.getMachineFunction();
const Function *F = MF.getFunction();
@@ -2259,11 +2274,11 @@ static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
}
SDValue
-ARMTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location.
SmallVector<CCValAssign, 16> RVLocs;
@@ -2521,9 +2536,9 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
SDLoc DL(Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
SDValue CPAddr;
- if (RelocM == Reloc::Static) {
+ bool IsPositionIndependent = isPositionIndependent();
+ if (!IsPositionIndependent) {
CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
} else {
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
@@ -2534,11 +2549,10 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
}
CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
- SDValue Result =
- DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 0);
- if (RelocM == Reloc::Static)
+ SDValue Result = DAG.getLoad(
+ PtrVT, DL, DAG.getEntryNode(), CPAddr,
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
+ if (!IsPositionIndependent)
return Result;
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
@@ -2584,7 +2598,8 @@ ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
SDValue FuncTLVGet =
DAG.getLoad(MVT::i32, DL, Chain, DescAddr,
MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, true, true, 4);
+ /* Alignment = */ 4, MachineMemOperand::MONonTemporal |
+ MachineMemOperand::MOInvariant);
Chain = FuncTLVGet.getValue(1);
MachineFunction &F = DAG.getMachineFunction();
@@ -2610,6 +2625,61 @@ ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
}
+SDValue
+ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
+
+ SDValue Chain = DAG.getEntryNode();
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ SDLoc DL(Op);
+
+ // Load the current TEB (thread environment block)
+ SDValue Ops[] = {Chain,
+ DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
+ DAG.getConstant(15, DL, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32),
+ DAG.getConstant(13, DL, MVT::i32),
+ DAG.getConstant(0, DL, MVT::i32),
+ DAG.getConstant(2, DL, MVT::i32)};
+ SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
+ DAG.getVTList(MVT::i32, MVT::Other), Ops);
+
+ SDValue TEB = CurrentTEB.getValue(0);
+ Chain = CurrentTEB.getValue(1);
+
+ // Load the ThreadLocalStoragePointer from the TEB
+ // A pointer to the TLS array is located at offset 0x2c from the TEB.
+ SDValue TLSArray =
+ DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
+ TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
+
+ // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
+ // offset into the TLSArray.
+
+ // Load the TLS index from the C runtime
+ SDValue TLSIndex =
+ DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
+ TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
+ TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
+
+ SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
+ DAG.getConstant(2, DL, MVT::i32));
+ SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
+ DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
+ MachinePointerInfo());
+
+ // Get the offset of the start of the .tls section (section base)
+ const auto *GA = cast<GlobalAddressSDNode>(Op);
+ auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
+ SDValue Offset = DAG.getLoad(
+ PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
+ DAG.getTargetConstantPool(CPV, PtrVT, 4)),
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
+
+ return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
+}
+
// Lower ISD::GlobalTLSAddress using the "general dynamic" model
SDValue
ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
@@ -2625,10 +2695,9 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
- Argument =
- DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 0);
+ Argument = DAG.getLoad(
+ PtrVT, dl, DAG.getEntryNode(), Argument,
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
SDValue Chain = Argument.getValue(1);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
@@ -2645,8 +2714,7 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(Chain)
.setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
- DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args),
- 0);
+ DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.first;
@@ -2680,8 +2748,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
Offset = DAG.getLoad(
PtrVT, dl, Chain, Offset,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
Chain = Offset.getValue(1);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
@@ -2689,8 +2756,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
Offset = DAG.getLoad(
PtrVT, dl, Chain, Offset,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
} else {
// local exec model
assert(model == TLSModel::LocalExec);
@@ -2700,8 +2766,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
Offset = DAG.getLoad(
PtrVT, dl, Chain, Offset,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
}
// The address of the thread local variable is the add of the thread
@@ -2714,6 +2779,9 @@ ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
if (Subtarget->isTargetDarwin())
return LowerGlobalTLSAddressDarwin(Op, DAG);
+ if (Subtarget->isTargetWindows())
+ return LowerGlobalTLSAddressWindows(Op, DAG);
+
// TODO: implement the "local dynamic" model
assert(Subtarget->isTargetELF() && "Only ELF implemented here");
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
@@ -2738,9 +2806,9 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDLoc dl(Op);
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
- bool UseGOT_PREL =
- !(GV->hasHiddenVisibility() || GV->hasLocalLinkage());
+ const TargetMachine &TM = getTargetMachine();
+ if (isPositionIndependent()) {
+ bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -2756,15 +2824,14 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
SDValue Result = DAG.getLoad(
PtrVT, dl, DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
SDValue Chain = Result.getValue(1);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
if (UseGOT_PREL)
- Result = DAG.getLoad(PtrVT, dl, Chain, Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ Result =
+ DAG.getLoad(PtrVT, dl, Chain, Result,
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
return Result;
}
@@ -2781,8 +2848,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
return DAG.getLoad(
PtrVT, dl, DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
}
}
@@ -2791,7 +2857,6 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDLoc dl(Op);
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
if (Subtarget->useMovt(DAG.getMachineFunction()))
++NumMovwMovt;
@@ -2799,15 +2864,14 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
// FIXME: Once remat is capable of dealing with instructions with register
// operands, expand this into multiple nodes
unsigned Wrapper =
- RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper;
+ isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
- if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
+ if (Subtarget->isGVIndirectSymbol(GV))
Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
return Result;
}
@@ -2833,8 +2897,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
TargetFlags));
if (GV->hasDLLImportStorageClass())
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
return Result;
}
@@ -2873,7 +2936,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
"RBIT intrinsic must have i32 type!");
return DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Op.getOperand(1));
}
- case Intrinsic::arm_thread_pointer: {
+ case Intrinsic::thread_pointer: {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
}
@@ -2882,10 +2945,9 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
SDValue CPAddr;
- unsigned PCAdj = (RelocM != Reloc::PIC_)
- ? 0 : (Subtarget->isThumb() ? 4 : 8);
+ bool IsPositionIndependent = isPositionIndependent();
+ unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
ARMConstantPoolValue *CPV =
ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
ARMCP::CPLSDA, PCAdj);
@@ -2893,10 +2955,9 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
SDValue Result = DAG.getLoad(
PtrVT, dl, DAG.getEntryNode(), CPAddr,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
- if (RelocM == Reloc::PIC_) {
+ if (IsPositionIndependent) {
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
}
@@ -2962,7 +3023,8 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
if (Subtarget->isMClass()) {
// Only a full system barrier exists in the M-class architectures.
Domain = ARM_MB::SY;
- } else if (Subtarget->isSwift() && Ord == Release) {
+ } else if (Subtarget->preferISHSTBarriers() &&
+ Ord == AtomicOrdering::Release) {
// Swift happens to implement ISHST barriers in a way that's compatible with
// Release semantics but weaker than ISH so we'd be fools not to use
// it. Beware: other processors probably don't!
@@ -3012,13 +3074,14 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV));
}
-SDValue
-ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
- SDValue &Root, SelectionDAG &DAG,
- SDLoc dl) const {
+SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
+ CCValAssign &NextVA,
+ SDValue &Root,
+ SelectionDAG &DAG,
+ const SDLoc &dl) const {
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -3041,8 +3104,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
ArgValue2 = DAG.getLoad(
MVT::i32, dl, Root, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false,
- false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
} else {
Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
@@ -3060,13 +3122,11 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
// these values; otherwise, this reassembles a (byval) structure that
// was split between registers and memory.
// Return: The frame index registers were stored into.
-int
-ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
- SDLoc dl, SDValue &Chain,
- const Value *OrigArg,
- unsigned InRegsParamRecordIdx,
- int ArgOffset,
- unsigned ArgSize) const {
+int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
+ const SDLoc &dl, SDValue &Chain,
+ const Value *OrigArg,
+ unsigned InRegsParamRecordIdx,
+ int ArgOffset, unsigned ArgSize) const {
// Currently, two use-cases possible:
// Case #1. Non-var-args function, and we meet first byval parameter.
// Setup first unallocated register as first byval register;
@@ -3104,9 +3164,8 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
unsigned VReg = MF.addLiveIn(Reg, RC);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
- SDValue Store =
- DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(OrigArg, 4 * i), false, false, 0);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(OrigArg, 4 * i));
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
}
@@ -3117,17 +3176,16 @@ ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
}
// Setup stack frame, the va_list pointer will start from.
-void
-ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
- SDLoc dl, SDValue &Chain,
- unsigned ArgOffset,
- unsigned TotalArgRegsSaveSize,
- bool ForceMutable) const {
+void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
+ const SDLoc &dl, SDValue &Chain,
+ unsigned ArgOffset,
+ unsigned TotalArgRegsSaveSize,
+ bool ForceMutable) const {
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
// Try to store any remaining integer argument regs
- // to their spots on the stack so that they may be loaded by deferencing
+ // to their spots on the stack so that they may be loaded by dereferencing
// the result of va_next.
// If there is no regs to be stored, just point address after last
// argument passed via stack.
@@ -3137,14 +3195,10 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
AFI->setVarArgsFrameIndex(FrameIndex);
}
-SDValue
-ARMTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
+SDValue ARMTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -3226,10 +3280,9 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
if (VA.isMemLoc()) {
int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- ArgValue2 = DAG.getLoad(
- MVT::f64, dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- false, false, false, 0);
+ ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
+ MachinePointerInfo::getFixedStack(
+ DAG.getMachineFunction(), FI));
} else {
ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
Chain, DAG, dl);
@@ -3322,10 +3375,9 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- InVals.push_back(DAG.getLoad(
- VA.getValVT(), dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- false, false, false, 0));
+ InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
+ MachinePointerInfo::getFixedStack(
+ DAG.getMachineFunction(), FI)));
}
lastInsIndex = index;
}
@@ -3369,10 +3421,9 @@ static bool isFloatingPointZero(SDValue Op) {
/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
/// the given operands.
-SDValue
-ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &ARMcc, SelectionDAG &DAG,
- SDLoc dl) const {
+SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
+ SDValue &ARMcc, SelectionDAG &DAG,
+ const SDLoc &dl) const {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
unsigned C = RHSC->getZExtValue();
if (!isLegalICmpImmediate(C)) {
@@ -3428,9 +3479,8 @@ ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
}
/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
-SDValue
-ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
- SDLoc dl) const {
+SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
+ SelectionDAG &DAG, const SDLoc &dl) const {
assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
SDValue Cmp;
if (!isFloatingPointZero(RHS))
@@ -3647,7 +3697,7 @@ static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
}
}
-SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal,
+SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
SDValue TrueVal, SDValue ARMcc, SDValue CCR,
SDValue Cmp, SelectionDAG &DAG) const {
if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
@@ -3673,14 +3723,149 @@ SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal,
}
}
+static bool isGTorGE(ISD::CondCode CC) {
+ return CC == ISD::SETGT || CC == ISD::SETGE;
+}
+
+static bool isLTorLE(ISD::CondCode CC) {
+ return CC == ISD::SETLT || CC == ISD::SETLE;
+}
+
+// See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
+// All of these conditions (and their <= and >= counterparts) will do:
+// x < k ? k : x
+// x > k ? x : k
+// k < x ? x : k
+// k > x ? k : x
+static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
+ const SDValue TrueVal, const SDValue FalseVal,
+ const ISD::CondCode CC, const SDValue K) {
+ return (isGTorGE(CC) &&
+ ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
+ (isLTorLE(CC) &&
+ ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
+}
+
+// Similar to isLowerSaturate(), but checks for upper-saturating conditions.
+static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
+ const SDValue TrueVal, const SDValue FalseVal,
+ const ISD::CondCode CC, const SDValue K) {
+ return (isGTorGE(CC) &&
+ ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
+ (isLTorLE(CC) &&
+ ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
+}
+
+// Check if two chained conditionals could be converted into SSAT.
+//
+// SSAT can replace a set of two conditional selectors that bound a number to an
+// interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
+//
+// x < -k ? -k : (x > k ? k : x)
+// x < -k ? -k : (x < k ? x : k)
+// x > -k ? (x > k ? k : x) : -k
+// x < k ? (x < -k ? -k : x) : k
+// etc.
+//
+// It returns true if the conversion can be done, false otherwise.
+// Additionally, the variable is returned in parameter V and the constant in K.
+static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
+ uint64_t &K) {
+
+ SDValue LHS1 = Op.getOperand(0);
+ SDValue RHS1 = Op.getOperand(1);
+ SDValue TrueVal1 = Op.getOperand(2);
+ SDValue FalseVal1 = Op.getOperand(3);
+ ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
+
+ const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
+ if (Op2.getOpcode() != ISD::SELECT_CC)
+ return false;
+
+ SDValue LHS2 = Op2.getOperand(0);
+ SDValue RHS2 = Op2.getOperand(1);
+ SDValue TrueVal2 = Op2.getOperand(2);
+ SDValue FalseVal2 = Op2.getOperand(3);
+ ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
+
+ // Find out which are the constants and which are the variables
+ // in each conditional
+ SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
+ ? &RHS1
+ : NULL;
+ SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
+ ? &RHS2
+ : NULL;
+ SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
+ SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
+ SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
+ SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
+
+ // We must detect cases where the original operations worked with 16- or
+ // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
+ // must work with sign-extended values but the select operations return
+ // the original non-extended value.
+ SDValue V2TmpReg = V2Tmp;
+ if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
+ V2TmpReg = V2Tmp->getOperand(0);
+
+ // Check that the registers and the constants have the correct values
+ // in both conditionals
+ if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
+ V2TmpReg != V2)
+ return false;
+
+ // Figure out which conditional is saturating the lower/upper bound.
+ const SDValue *LowerCheckOp =
+ isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
+ ? &Op
+ : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) ? &Op2
+ : NULL;
+ const SDValue *UpperCheckOp =
+ isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
+ ? &Op
+ : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) ? &Op2
+ : NULL;
+
+ if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
+ return false;
+
+ // Check that the constant in the lower-bound check is
+ // the opposite of the constant in the upper-bound check
+ // in 1's complement.
+ int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
+ int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
+ int64_t PosVal = std::max(Val1, Val2);
+
+ if (((Val1 > Val2 && UpperCheckOp == &Op) ||
+ (Val1 < Val2 && UpperCheckOp == &Op2)) &&
+ Val1 == ~Val2 && isPowerOf2_64(PosVal + 1)) {
+
+ V = V2;
+ K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
+ return true;
+ }
+
+ return false;
+}
+
SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
+
EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+
+ // Try to convert two saturating conditional selects into a single SSAT
+ SDValue SatValue;
+ uint64_t SatConstant;
+ if (isSaturatingConditional(Op, SatValue, SatConstant))
+ return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
+ DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
+
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
SDValue TrueVal = Op.getOperand(2);
SDValue FalseVal = Op.getOperand(3);
- SDLoc dl(Op);
if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
@@ -3781,10 +3966,9 @@ static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
return DAG.getConstant(0, SDLoc(Op), MVT::i32);
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
- return DAG.getLoad(MVT::i32, SDLoc(Op),
- Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
- Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(), Ld->getAlignment());
+ return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
+ Ld->getPointerInfo(), Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
llvm_unreachable("Unknown VFP cmp argument!");
}
@@ -3801,21 +3985,17 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
SDValue Ptr = Ld->getBasePtr();
- RetVal1 = DAG.getLoad(MVT::i32, dl,
- Ld->getChain(), Ptr,
- Ld->getPointerInfo(),
- Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(), Ld->getAlignment());
+ RetVal1 =
+ DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
+ Ld->getAlignment(), Ld->getMemOperand()->getFlags());
EVT PtrType = Ptr.getValueType();
unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
- RetVal2 = DAG.getLoad(MVT::i32, dl,
- Ld->getChain(), NewPtr,
- Ld->getPointerInfo().getWithOffset(4),
- Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(), NewAlign);
+ RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
+ Ld->getPointerInfo().getWithOffset(4), NewAlign,
+ Ld->getMemOperand()->getFlags());
return;
}
@@ -3908,8 +4088,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
if (getTargetMachine().Options.UnsafeFPMath &&
(CC == ISD::SETEQ || CC == ISD::SETOEQ ||
CC == ISD::SETNE || CC == ISD::SETUNE)) {
- SDValue Result = OptimizeVFPBrcond(Op, DAG);
- if (Result.getNode())
+ if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
return Result;
}
@@ -3950,19 +4129,17 @@ SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
Addr, Op.getOperand(2), JTI);
}
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
+ if (isPositionIndependent()) {
Addr =
DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
- MachinePointerInfo::getJumpTable(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
Chain = Addr.getValue(1);
Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
} else {
Addr =
DAG.getLoad(PTy, dl, Chain, Addr,
- MachinePointerInfo::getJumpTable(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
Chain = Addr.getValue(1);
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
}
@@ -4156,7 +4333,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
return DAG.getLoad(VT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo());
}
// Return LR, which contains the return address. Mark it an implicit live-in.
@@ -4178,8 +4355,7 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
- MachinePointerInfo(),
- false, false, false, 0);
+ MachinePointerInfo());
return FrameAddr;
}
@@ -4322,7 +4498,7 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
/// not support i64 elements, so sometimes the zero vectors will need to be
/// explicitly constructed. Regardless, use a canonical VMOV to create the
/// zero vector.
-static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) {
+static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
assert(VT.isVector() && "Expected a vector type");
// The canonical modified immediate encoding of a zero vector is....0!
SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
@@ -4826,12 +5002,36 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
return Result;
}
+static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) {
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ SDValue Carry = Op.getOperand(2);
+ SDValue Cond = Op.getOperand(3);
+ SDLoc DL(Op);
+
+ assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only.");
+
+ assert(Carry.getOpcode() != ISD::CARRY_FALSE);
+ SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
+ SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
+
+ SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
+ SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
+ SDValue ARMcc = DAG.getConstant(
+ IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
+ SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+ SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
+ Cmp.getValue(1), SDValue());
+ return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
+ CCR, Chain.getValue(1));
+}
+
/// isNEONModifiedImm - Check if the specified splat value corresponds to a
/// valid vector constant for a NEON instruction with a "modified immediate"
/// operand (e.g., VMOV). If so, return the encoded value.
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
unsigned SplatBitSize, SelectionDAG &DAG,
- SDLoc dl, EVT &VT, bool is128Bits,
+ const SDLoc &dl, EVT &VT, bool is128Bits,
NEONModImmType type) {
unsigned OpCmode, Imm;
@@ -4979,7 +5179,7 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
return SDValue();
// Try splatting with a VMOV.f32...
- APFloat FPVal = CFP->getValueAPF();
+ const APFloat &FPVal = CFP->getValueAPF();
int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
if (ImmVal != -1) {
@@ -5421,7 +5621,7 @@ static bool isReverseMask(ArrayRef<int> M, EVT VT) {
// instruction, return an SDValue of such a constant (will become a MOV
// instruction). Otherwise return null.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
- const ARMSubtarget *ST, SDLoc dl) {
+ const ARMSubtarget *ST, const SDLoc &dl) {
uint64_t Val;
if (!isa<ConstantSDNode>(N))
return SDValue();
@@ -5502,7 +5702,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SDValue Value;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
+ if (V.isUndef())
continue;
if (i > 0)
isOnlyLowElement = false;
@@ -5585,7 +5785,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
Op.getOperand(i)));
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
- SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops);
+ SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
Val = LowerBUILD_VECTOR(Val, DAG, ST);
if (Val.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
@@ -5635,7 +5835,7 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SDValue Vec = DAG.getUNDEF(VT);
for (unsigned i = 0 ; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
+ if (V.isUndef())
continue;
SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
@@ -5681,7 +5881,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
SmallVector<ShuffleSourceInfo, 2> Sources;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue V = Op.getOperand(i);
- if (V.getOpcode() == ISD::UNDEF)
+ if (V.isUndef())
continue;
else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
// A shuffle can only come from building a vector from various
@@ -5808,7 +6008,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits();
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
SDValue Entry = Op.getOperand(i);
- if (Entry.getOpcode() == ISD::UNDEF)
+ if (Entry.isUndef())
continue;
auto Src = std::find(Sources.begin(), Sources.end(), Entry.getOperand(0));
@@ -5845,7 +6045,7 @@ SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
ShuffleOps[i] = Sources[i].ShuffleVec;
SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
- ShuffleOps[1], &Mask[0]);
+ ShuffleOps[1], Mask);
return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
}
@@ -5895,7 +6095,7 @@ ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
- SDLoc dl) {
+ const SDLoc &dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
@@ -5982,12 +6182,12 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
- if (V2.getNode()->getOpcode() == ISD::UNDEF)
+ if (V2.getNode()->isUndef())
return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask));
+ DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask));
+ DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
}
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
@@ -6024,7 +6224,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
if (EltSize <= 32) {
- if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
+ if (SVN->isSplat()) {
int Lane = SVN->getSplatIndex();
// If this is undef splat, generate it via "just" vdup, if possible.
if (Lane == -1) Lane = 0;
@@ -6040,7 +6240,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
!isa<ConstantSDNode>(V1.getOperand(0))) {
bool IsScalarToVector = true;
for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
- if (V1.getOperand(i).getOpcode() != ISD::UNDEF) {
+ if (!V1.getOperand(i).isUndef()) {
IsScalarToVector = false;
break;
}
@@ -6067,8 +6267,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (isVREVMask(ShuffleMask, VT, 16))
return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
- if (V2->getOpcode() == ISD::UNDEF &&
- isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
+ if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
DAG.getConstant(Imm, dl, MVT::i32));
}
@@ -6103,8 +6302,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// ->
// concat(VZIP(v1, v2):0, :1)
//
- if (V1->getOpcode() == ISD::CONCAT_VECTORS &&
- V2->getOpcode() == ISD::UNDEF) {
+ if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
SDValue SubV1 = V1->getOperand(0);
SDValue SubV2 = V1->getOperand(1);
EVT SubVT = SubV1.getValueType();
@@ -6175,11 +6373,9 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
- if (VT == MVT::v8i8) {
- SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG);
- if (NewOp.getNode())
+ if (VT == MVT::v8i8)
+ if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
return NewOp;
- }
return SDValue();
}
@@ -6218,11 +6414,11 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
SDValue Val = DAG.getUNDEF(MVT::v2f64);
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
- if (Op0.getOpcode() != ISD::UNDEF)
+ if (!Op0.isUndef())
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
DAG.getIntPtrConstant(0, dl));
- if (Op1.getOpcode() != ISD::UNDEF)
+ if (!Op1.isUndef())
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
DAG.getIntPtrConstant(1, dl));
@@ -6351,17 +6547,16 @@ static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
// The load already has the right type.
if (ExtendedTy == LD->getMemoryVT())
return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
- LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(),
- LD->isNonTemporal(), LD->isInvariant(),
- LD->getAlignment());
+ LD->getBasePtr(), LD->getPointerInfo(),
+ LD->getAlignment(), LD->getMemOperand()->getFlags());
// We need to create a zextload/sextload. We cannot just create a load
// followed by a zext/zext node because LowerMUL is also run during normal
// operation legalization where we can't create illegal types.
return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
- LD->getMemoryVT(), LD->isVolatile(), LD->isInvariant(),
- LD->isNonTemporal(), LD->getAlignment());
+ LD->getMemoryVT(), LD->getAlignment(),
+ LD->getMemOperand()->getFlags());
}
/// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
@@ -6387,8 +6582,9 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
- return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), MVT::v2i32,
- BVN->getOperand(LowElt), BVN->getOperand(LowElt+2));
+ return DAG.getBuildVector(
+ MVT::v2i32, SDLoc(N),
+ {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
}
// Construct a new BUILD_VECTOR with elements truncated to half the size.
assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
@@ -6405,8 +6601,7 @@ static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
// The values are implicitly truncated so sext vs. zext doesn't matter.
Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::getVectorVT(TruncVT, NumElts), Ops);
+ return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
}
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
@@ -6506,8 +6701,8 @@ static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
}
-static SDValue
-LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) {
+static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
+ SelectionDAG &DAG) {
// TODO: Should this propagate fast-math-flags?
// Convert to float
@@ -6528,8 +6723,7 @@ LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) {
// float4 result = as_float4(as_int4(xf*recip) + 0xb000);
X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
- Y = DAG.getConstant(0xb000, dl, MVT::i32);
- Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y);
+ Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
// Convert back to short.
@@ -6538,8 +6732,8 @@ LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) {
return X;
}
-static SDValue
-LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) {
+static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
+ SelectionDAG &DAG) {
// TODO: Should this propagate fast-math-flags?
SDValue N2;
@@ -6567,8 +6761,7 @@ LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) {
// float4 result = as_float4(as_int4(xf*recip) + 0x89);
N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
- N1 = DAG.getConstant(0x89, dl, MVT::i32);
- N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
+ N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
// Convert back to integer and return.
@@ -6679,8 +6872,7 @@ static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
// float4 result = as_float4(as_int4(xf*recip) + 2);
N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
- N1 = DAG.getConstant(2, dl, MVT::i32);
- N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1);
+ N1 = DAG.getConstant(2, dl, MVT::v4i32);
N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
// Convert back to integer and return.
@@ -6766,21 +6958,21 @@ SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl)
.setChain(DAG.getEntryNode())
- .setCallee(CC, RetTy, Callee, std::move(Args), 0)
+ .setCallee(CC, RetTy, Callee, std::move(Args))
.setDiscardResult(ShouldUseSRet);
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
if (!ShouldUseSRet)
return CallResult.first;
- SDValue LoadSin = DAG.getLoad(ArgVT, dl, CallResult.second, SRet,
- MachinePointerInfo(), false, false, false, 0);
+ SDValue LoadSin =
+ DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
// Address of cos field.
SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
- SDValue LoadCos = DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add,
- MachinePointerInfo(), false, false, false, 0);
+ SDValue LoadCos =
+ DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
@@ -6819,7 +7011,7 @@ SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
CLI.setDebugLoc(dl)
.setChain(Chain)
.setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
- ES, std::move(Args), 0);
+ ES, std::move(Args));
return LowerCallTo(CLI).first;
}
@@ -6867,13 +7059,13 @@ void ARMTargetLowering::ExpandDIV_Windows(
}
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
- // Monotonic load/store is legal for all targets
- if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
- return Op;
+ if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
+ // Acquire/Release load/store is not legal for targets without a dmb or
+ // equivalent available.
+ return SDValue();
- // Acquire/Release load/store is not legal for targets without a
- // dmb or equivalent available.
- return SDValue();
+ // Monotonic load/store is legal for all targets.
+ return Op;
}
static void ReplaceREADCYCLECOUNTER(SDNode *N,
@@ -6899,6 +7091,46 @@ static void ReplaceREADCYCLECOUNTER(SDNode *N,
Results.push_back(Cycles32.getValue(1));
}
+static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
+ SDLoc dl(V.getNode());
+ SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
+ SDValue VHi = DAG.getAnyExtOrTrunc(
+ DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
+ dl, MVT::i32);
+ SDValue RegClass =
+ DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
+ SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
+ SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
+ const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
+ return SDValue(
+ DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
+}
+
+static void ReplaceCMP_SWAP_64Results(SDNode *N,
+ SmallVectorImpl<SDValue> & Results,
+ SelectionDAG &DAG) {
+ assert(N->getValueType(0) == MVT::i64 &&
+ "AtomicCmpSwap on types less than 64 should be legal");
+ SDValue Ops[] = {N->getOperand(1),
+ createGPRPairNode(DAG, N->getOperand(2)),
+ createGPRPairNode(DAG, N->getOperand(3)),
+ N->getOperand(0)};
+ SDNode *CmpSwap = DAG.getMachineNode(
+ ARM::CMP_SWAP_64, SDLoc(N),
+ DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1);
+ MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
+
+ Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_0, SDLoc(N), MVT::i32,
+ SDValue(CmpSwap, 0)));
+ Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_1, SDLoc(N), MVT::i32,
+ SDValue(CmpSwap, 0)));
+ Results.push_back(SDValue(CmpSwap, 2));
+}
+
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Don't know how to custom lower this!");
@@ -6948,6 +7180,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget);
case ISD::SETCC: return LowerVSETCC(Op, DAG);
+ case ISD::SETCCE: return LowerSETCCE(Op, DAG);
case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
@@ -6956,8 +7189,14 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
- case ISD::SDIV: return LowerSDIV(Op, DAG);
- case ISD::UDIV: return LowerUDIV(Op, DAG);
+ case ISD::SDIV:
+ if (Subtarget->isTargetWindows())
+ return LowerDIV_Windows(Op, DAG, /* Signed */ true);
+ return LowerSDIV(Op, DAG);
+ case ISD::UDIV:
+ if (Subtarget->isTargetWindows())
+ return LowerDIV_Windows(Op, DAG, /* Signed */ false);
+ return LowerUDIV(Op, DAG);
case ISD::ADDC:
case ISD::ADDE:
case ISD::SUBC:
@@ -7005,6 +7244,13 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::UREM:
Res = LowerREM(N, DAG);
break;
+ case ISD::SDIVREM:
+ case ISD::UDIVREM:
+ Res = LowerDivRem(SDValue(N, 0), DAG);
+ assert(Res.getNumOperands() == 2 && "DivRem needs two values");
+ Results.push_back(Res.getValue(0));
+ Results.push_back(Res.getValue(1));
+ return;
case ISD::READCYCLECOUNTER:
ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
return;
@@ -7013,6 +7259,9 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
Results);
+ case ISD::ATOMIC_CMP_SWAP:
+ ReplaceCMP_SWAP_64Results(N, Results, DAG);
+ return;
}
if (Res.getNode())
Results.push_back(Res);
@@ -7024,11 +7273,12 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
/// registers the function context.
-void ARMTargetLowering::
-SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
- MachineBasicBlock *DispatchBB, int FI) const {
+void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
+ MachineBasicBlock *MBB,
+ MachineBasicBlock *DispatchBB,
+ int FI) const {
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
MachineConstantPool *MCP = MF->getConstantPool();
@@ -7139,10 +7389,10 @@ SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
}
}
-void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
+void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
MachineBasicBlock *MBB) const {
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
MachineFrameInfo *MFI = MF->getFrameInfo();
@@ -7182,7 +7432,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
// Get an ordered list of the machine basic blocks for the jump table.
std::vector<MachineBasicBlock*> LPadList;
- SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs;
+ SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
LPadList.reserve(CallSiteNumToLPad.size());
for (unsigned I = 1; I <= MaxCSNum; ++I) {
SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
@@ -7200,7 +7450,6 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
MachineJumpTableInfo *JTI =
MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
unsigned MJTI = JTI->createJumpTableIndex(LPadList);
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
// Create the MBBs for the dispatch code.
@@ -7244,6 +7493,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
// registers being marked as clobbered.
MIB.addRegMask(RI.getNoPreservedMask());
+ bool IsPositionIndependent = isPositionIndependent();
unsigned NumLPads = LPadList.size();
if (Subtarget->isThumb2()) {
unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
@@ -7357,7 +7607,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
.addMemOperand(JTMMOLd));
unsigned NewVReg6 = NewVReg5;
- if (RelocM == Reloc::PIC_) {
+ if (IsPositionIndependent) {
NewVReg6 = MRI->createVirtualRegister(TRC);
AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
.addReg(ARM::CPSR, RegState::Define)
@@ -7440,7 +7690,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
.addImm(0)
.addMemOperand(JTMMOLd));
- if (RelocM == Reloc::PIC_) {
+ if (IsPositionIndependent) {
BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
.addReg(NewVReg5, RegState::Kill)
.addReg(NewVReg4)
@@ -7524,7 +7774,7 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
(*I)->setIsEHPad(false);
// The instruction is gone now.
- MI->eraseFromParent();
+ MI.eraseFromParent();
}
static
@@ -7576,8 +7826,8 @@ static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
/// Emit a post-increment load operation with given size. The instructions
/// will be added to BB at Pos.
-static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos,
- const TargetInstrInfo *TII, DebugLoc dl,
+static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
+ const TargetInstrInfo *TII, const DebugLoc &dl,
unsigned LdSize, unsigned Data, unsigned AddrIn,
unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
@@ -7608,8 +7858,8 @@ static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos,
/// Emit a post-increment store operation with given size. The instructions
/// will be added to BB at Pos.
-static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos,
- const TargetInstrInfo *TII, DebugLoc dl,
+static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
+ const TargetInstrInfo *TII, const DebugLoc &dl,
unsigned StSize, unsigned Data, unsigned AddrIn,
unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
@@ -7637,7 +7887,7 @@ static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos,
}
MachineBasicBlock *
-ARMTargetLowering::EmitStructByval(MachineInstr *MI,
+ARMTargetLowering::EmitStructByval(MachineInstr &MI,
MachineBasicBlock *BB) const {
// This pseudo instruction has 3 operands: dst, src, size
// We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
@@ -7646,11 +7896,11 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = ++BB->getIterator();
- unsigned dest = MI->getOperand(0).getReg();
- unsigned src = MI->getOperand(1).getReg();
- unsigned SizeVal = MI->getOperand(2).getImm();
- unsigned Align = MI->getOperand(3).getImm();
- DebugLoc dl = MI->getDebugLoc();
+ unsigned dest = MI.getOperand(0).getReg();
+ unsigned src = MI.getOperand(1).getReg();
+ unsigned SizeVal = MI.getOperand(2).getImm();
+ unsigned Align = MI.getOperand(3).getImm();
+ DebugLoc dl = MI.getDebugLoc();
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
@@ -7722,7 +7972,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
srcIn = srcOut;
destIn = destOut;
}
- MI->eraseFromParent(); // The instruction is gone now.
+ MI.eraseFromParent(); // The instruction is gone now.
return BB;
}
@@ -7848,7 +8098,7 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
// Add epilogue to handle BytesLeft.
BB = exitMBB;
- MachineInstr *StartOfExit = exitMBB->begin();
+ auto StartOfExit = exitMBB->begin();
// [scratch, srcOut] = LDRB_POST(srcLoop, 1)
// [destOut] = STRB_POST(scratch, destLoop, 1)
@@ -7866,16 +8116,16 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
destIn = destOut;
}
- MI->eraseFromParent(); // The instruction is gone now.
+ MI.eraseFromParent(); // The instruction is gone now.
return BB;
}
MachineBasicBlock *
-ARMTargetLowering::EmitLowered__chkstk(MachineInstr *MI,
+ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
MachineBasicBlock *MBB) const {
const TargetMachine &TM = getTargetMachine();
const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
assert(Subtarget->isTargetWindows() &&
"__chkstk is only supported on Windows");
@@ -7930,24 +8180,26 @@ ARMTargetLowering::EmitLowered__chkstk(MachineInstr *MI,
AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr),
ARM::SP)
- .addReg(ARM::SP).addReg(ARM::R4)));
+ .addReg(ARM::SP, RegState::Kill)
+ .addReg(ARM::R4, RegState::Kill)
+ .setMIFlags(MachineInstr::FrameSetup)));
- MI->eraseFromParent();
+ MI.eraseFromParent();
return MBB;
}
MachineBasicBlock *
-ARMTargetLowering::EmitLowered__dbzchk(MachineInstr *MI,
+ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
- MF->push_back(ContBB);
+ MF->insert(++MBB->getIterator(), ContBB);
ContBB->splice(ContBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
- MBB->addSuccessor(ContBB);
+ ContBB->transferSuccessorsAndUpdatePHIs(MBB);
MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
MF->push_back(TrapBB);
@@ -7955,74 +8207,89 @@ ARMTargetLowering::EmitLowered__dbzchk(MachineInstr *MI,
MBB->addSuccessor(TrapBB);
BuildMI(*MBB, MI, DL, TII->get(ARM::tCBZ))
- .addReg(MI->getOperand(0).getReg())
+ .addReg(MI.getOperand(0).getReg())
.addMBB(TrapBB);
+ AddDefaultPred(BuildMI(*MBB, MI, DL, TII->get(ARM::t2B)).addMBB(ContBB));
+ MBB->addSuccessor(ContBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return ContBB;
}
MachineBasicBlock *
-ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
bool isThumb2 = Subtarget->isThumb2();
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: {
- MI->dump();
+ MI.dump();
llvm_unreachable("Unexpected instr type to insert");
}
+
+ // Thumb1 post-indexed loads are really just single-register LDMs.
+ case ARM::tLDR_postidx: {
+ BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
+ .addOperand(MI.getOperand(1)) // Rn_wb
+ .addOperand(MI.getOperand(2)) // Rn
+ .addOperand(MI.getOperand(3)) // PredImm
+ .addOperand(MI.getOperand(4)) // PredReg
+ .addOperand(MI.getOperand(0)); // Rt
+ MI.eraseFromParent();
+ return BB;
+ }
+
// The Thumb2 pre-indexed stores have the same MI operands, they just
// define them differently in the .td files from the isel patterns, so
// they need pseudos.
case ARM::t2STR_preidx:
- MI->setDesc(TII->get(ARM::t2STR_PRE));
+ MI.setDesc(TII->get(ARM::t2STR_PRE));
return BB;
case ARM::t2STRB_preidx:
- MI->setDesc(TII->get(ARM::t2STRB_PRE));
+ MI.setDesc(TII->get(ARM::t2STRB_PRE));
return BB;
case ARM::t2STRH_preidx:
- MI->setDesc(TII->get(ARM::t2STRH_PRE));
+ MI.setDesc(TII->get(ARM::t2STRH_PRE));
return BB;
case ARM::STRi_preidx:
case ARM::STRBi_preidx: {
- unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ?
- ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM;
+ unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
+ : ARM::STRB_PRE_IMM;
// Decode the offset.
- unsigned Offset = MI->getOperand(4).getImm();
+ unsigned Offset = MI.getOperand(4).getImm();
bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
Offset = ARM_AM::getAM2Offset(Offset);
if (isSub)
Offset = -Offset;
- MachineMemOperand *MMO = *MI->memoperands_begin();
+ MachineMemOperand *MMO = *MI.memoperands_begin();
BuildMI(*BB, MI, dl, TII->get(NewOpc))
- .addOperand(MI->getOperand(0)) // Rn_wb
- .addOperand(MI->getOperand(1)) // Rt
- .addOperand(MI->getOperand(2)) // Rn
- .addImm(Offset) // offset (skip GPR==zero_reg)
- .addOperand(MI->getOperand(5)) // pred
- .addOperand(MI->getOperand(6))
- .addMemOperand(MMO);
- MI->eraseFromParent();
+ .addOperand(MI.getOperand(0)) // Rn_wb
+ .addOperand(MI.getOperand(1)) // Rt
+ .addOperand(MI.getOperand(2)) // Rn
+ .addImm(Offset) // offset (skip GPR==zero_reg)
+ .addOperand(MI.getOperand(5)) // pred
+ .addOperand(MI.getOperand(6))
+ .addMemOperand(MMO);
+ MI.eraseFromParent();
return BB;
}
case ARM::STRr_preidx:
case ARM::STRBr_preidx:
case ARM::STRH_preidx: {
unsigned NewOpc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("unexpected opcode!");
case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
}
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
- for (unsigned i = 0; i < MI->getNumOperands(); ++i)
- MIB.addOperand(MI->getOperand(i));
- MI->eraseFromParent();
+ for (unsigned i = 0; i < MI.getNumOperands(); ++i)
+ MIB.addOperand(MI.getOperand(i));
+ MI.eraseFromParent();
return BB;
}
@@ -8055,8 +8322,10 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
- BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
- .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
+ BuildMI(BB, dl, TII->get(ARM::tBcc))
+ .addMBB(sinkMBB)
+ .addImm(MI.getOperand(3).getImm())
+ .addReg(MI.getOperand(4).getReg());
// copy0MBB:
// %FalseValue = ...
@@ -8070,12 +8339,13 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(*BB, BB->begin(), dl,
- TII->get(ARM::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
+ BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(copy0MBB)
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(thisMBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -8086,10 +8356,10 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Compare both parts that make up the double comparison separately for
// equality.
- bool RHSisZero = MI->getOpcode() == ARM::BCCZi64;
+ bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
- unsigned LHS1 = MI->getOperand(1).getReg();
- unsigned LHS2 = MI->getOperand(2).getReg();
+ unsigned LHS1 = MI.getOperand(1).getReg();
+ unsigned LHS2 = MI.getOperand(2).getReg();
if (RHSisZero) {
AddDefaultPred(BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
@@ -8098,8 +8368,8 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
.addReg(LHS2).addImm(0)
.addImm(ARMCC::EQ).addReg(ARM::CPSR);
} else {
- unsigned RHS1 = MI->getOperand(3).getReg();
- unsigned RHS2 = MI->getOperand(4).getReg();
+ unsigned RHS1 = MI.getOperand(3).getReg();
+ unsigned RHS2 = MI.getOperand(4).getReg();
AddDefaultPred(BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
.addReg(LHS1).addReg(RHS1));
@@ -8108,9 +8378,9 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
.addImm(ARMCC::EQ).addReg(ARM::CPSR);
}
- MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB();
+ MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
- if (MI->getOperand(0).getImm() == ARMCC::NE)
+ if (MI.getOperand(0).getImm() == ARMCC::NE)
std::swap(destMBB, exitMBB);
BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
@@ -8120,7 +8390,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
else
BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -8157,9 +8427,9 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
Fn->insert(BBI, RSBBB);
Fn->insert(BBI, SinkBB);
- unsigned int ABSSrcReg = MI->getOperand(1).getReg();
- unsigned int ABSDstReg = MI->getOperand(0).getReg();
- bool ABSSrcKIll = MI->getOperand(1).isKill();
+ unsigned int ABSSrcReg = MI.getOperand(1).getReg();
+ unsigned int ABSDstReg = MI.getOperand(0).getReg();
+ bool ABSSrcKIll = MI.getOperand(1).isKill();
bool isThumb2 = Subtarget->isThumb2();
MachineRegisterInfo &MRI = Fn->getRegInfo();
// In Thumb mode S must not be specified if source register is the SP or
@@ -8204,7 +8474,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
.addReg(ABSSrcReg).addMBB(BB);
// remove ABS instruction
- MI->eraseFromParent();
+ MI.eraseFromParent();
// return last added BB
return SinkBB;
@@ -8223,38 +8493,38 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
/// when it is expanded into LDM/STM. This is done as a post-isel lowering
/// instead of as a custom inserter because we need the use list from the SDNode.
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
- MachineInstr *MI, const SDNode *Node) {
+ MachineInstr &MI, const SDNode *Node) {
bool isThumb1 = Subtarget->isThumb1Only();
- DebugLoc DL = MI->getDebugLoc();
- MachineFunction *MF = MI->getParent()->getParent();
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MI.getParent()->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
MachineInstrBuilder MIB(*MF, MI);
// If the new dst/src is unused mark it as dead.
if (!Node->hasAnyUseOfValue(0)) {
- MI->getOperand(0).setIsDead(true);
+ MI.getOperand(0).setIsDead(true);
}
if (!Node->hasAnyUseOfValue(1)) {
- MI->getOperand(1).setIsDead(true);
+ MI.getOperand(1).setIsDead(true);
}
// The MEMCPY both defines and kills the scratch registers.
- for (unsigned I = 0; I != MI->getOperand(4).getImm(); ++I) {
+ for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
: &ARM::GPRRegClass);
MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
}
}
-void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
+void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const {
- if (MI->getOpcode() == ARM::MEMCPY) {
+ if (MI.getOpcode() == ARM::MEMCPY) {
attachMEMCPYScratchRegs(Subtarget, MI, Node);
return;
}
- const MCInstrDesc *MCID = &MI->getDesc();
+ const MCInstrDesc *MCID = &MI.getDesc();
// Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
// RSC. Coming out of isel, they have an implicit CPSR def, but the optional
// operand is still set to noreg. If needed, set the optional operand's
@@ -8263,24 +8533,24 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
// e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
// Rename pseudo opcodes.
- unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode());
+ unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
if (NewOpc) {
const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
MCID = &TII->get(NewOpc);
- assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 &&
+ assert(MCID->getNumOperands() == MI.getDesc().getNumOperands() + 1 &&
"converted opcode should be the same except for cc_out");
- MI->setDesc(*MCID);
+ MI.setDesc(*MCID);
// Add the optional cc_out operand
- MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
+ MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
}
unsigned ccOutIdx = MCID->getNumOperands() - 1;
// Any ARM instruction that sets the 's' bit should specify an optional
// "cc_out" operand in the last operand position.
- if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
+ if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
assert(!NewOpc && "Optional cc_out operand required");
return;
}
@@ -8288,14 +8558,14 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
// since we already have an optional CPSR def.
bool definesCPSR = false;
bool deadCPSR = false;
- for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands();
- i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
+ ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
definesCPSR = true;
if (MO.isDead())
deadCPSR = true;
- MI->RemoveOperand(i);
+ MI.RemoveOperand(i);
break;
}
}
@@ -8305,14 +8575,14 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
}
assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
if (deadCPSR) {
- assert(!MI->getOperand(ccOutIdx).getReg() &&
+ assert(!MI.getOperand(ccOutIdx).getReg() &&
"expect uninitialized optional cc_out operand");
return;
}
// If this instruction was defined with an optional CPSR def and its dag node
// had a live implicit CPSR def, then activate the optional CPSR def.
- MachineOperand &MO = MI->getOperand(ccOutIdx);
+ MachineOperand &MO = MI.getOperand(ccOutIdx);
MO.setReg(ARM::CPSR);
MO.setIsDef(true);
}
@@ -8442,16 +8712,12 @@ SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- if (N0.getNode()->hasOneUse()) {
- SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes);
- if (Result.getNode())
+ if (N0.getNode()->hasOneUse())
+ if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
return Result;
- }
- if (N1.getNode()->hasOneUse()) {
- SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes);
- if (Result.getNode())
+ if (N1.getNode()->hasOneUse())
+ if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
return Result;
- }
return SDValue();
}
@@ -8533,7 +8799,7 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
// Get widened type and narrowed type.
MVT widenType;
unsigned numElem = VT.getVectorNumElements();
-
+
EVT inputLaneType = Vec.getValueType().getVectorElementType();
switch (inputLaneType.getSimpleVT().SimpleTy) {
case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
@@ -8559,11 +8825,6 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) {
- if (Subtarget->isThumb1Only()) return SDValue();
-
- // Only perform the checks after legalize when the pattern is available.
- if (DCI.isBeforeLegalize()) return SDValue();
-
// Look for multiply add opportunities.
// The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
// each add nodes consumes a value from ISD::UMUL_LOHI and there is
@@ -8691,14 +8952,97 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
return resNode;
}
+static SDValue AddCombineTo64bitUMAAL(SDNode *AddcNode,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const ARMSubtarget *Subtarget) {
+ // UMAAL is similar to UMLAL except that it adds two unsigned values.
+ // While trying to combine for the other MLAL nodes, first search for the
+ // chance to use UMAAL. Check if Addc uses another addc node which can first
+ // be combined into a UMLAL. The other pattern is AddcNode being combined
+ // into an UMLAL and then using another addc is handled in ISelDAGToDAG.
+
+ if (!Subtarget->hasV6Ops())
+ return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
+
+ SDNode *PrevAddc = nullptr;
+ if (AddcNode->getOperand(0).getOpcode() == ISD::ADDC)
+ PrevAddc = AddcNode->getOperand(0).getNode();
+ else if (AddcNode->getOperand(1).getOpcode() == ISD::ADDC)
+ PrevAddc = AddcNode->getOperand(1).getNode();
+
+ // If there's no addc chains, just return a search for any MLAL.
+ if (PrevAddc == nullptr)
+ return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
+
+ // Try to convert the addc operand to an MLAL and if that fails try to
+ // combine AddcNode.
+ SDValue MLAL = AddCombineTo64bitMLAL(PrevAddc, DCI, Subtarget);
+ if (MLAL != SDValue(PrevAddc, 0))
+ return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
+
+ // Find the converted UMAAL or quit if it doesn't exist.
+ SDNode *UmlalNode = nullptr;
+ SDValue AddHi;
+ if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
+ UmlalNode = AddcNode->getOperand(0).getNode();
+ AddHi = AddcNode->getOperand(1);
+ } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
+ UmlalNode = AddcNode->getOperand(1).getNode();
+ AddHi = AddcNode->getOperand(0);
+ } else {
+ return SDValue();
+ }
+
+ // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
+ // the ADDC as well as Zero.
+ auto *Zero = dyn_cast<ConstantSDNode>(UmlalNode->getOperand(3));
+
+ if (!Zero || Zero->getZExtValue() != 0)
+ return SDValue();
+
+ // Check that we have a glued ADDC node.
+ if (AddcNode->getValueType(1) != MVT::Glue)
+ return SDValue();
+
+ // Look for the glued ADDE.
+ SDNode* AddeNode = AddcNode->getGluedUser();
+ if (!AddeNode)
+ return SDValue();
+
+ if ((AddeNode->getOperand(0).getNode() == Zero &&
+ AddeNode->getOperand(1).getNode() == UmlalNode) ||
+ (AddeNode->getOperand(0).getNode() == UmlalNode &&
+ AddeNode->getOperand(1).getNode() == Zero)) {
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
+ UmlalNode->getOperand(2), AddHi };
+ SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
+ DAG.getVTList(MVT::i32, MVT::i32), Ops);
+
+ // Replace the ADDs' nodes uses by the UMAAL node's values.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
+
+ // Return original node to notify the driver to stop replacing.
+ return SDValue(AddcNode, 0);
+ }
+ return SDValue();
+}
+
/// PerformADDCCombine - Target-specific dag combine transform from
-/// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL.
+/// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL or
+/// ISD::ADDC, ISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
static SDValue PerformADDCCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) {
- return AddCombineTo64bitMLAL(N, DCI, Subtarget);
+ if (Subtarget->isThumb1Only()) return SDValue();
+ // Only perform the checks after legalize when the pattern is available.
+ if (DCI.isBeforeLegalize()) return SDValue();
+
+ return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
}
/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
@@ -8710,15 +9054,13 @@ static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
const ARMSubtarget *Subtarget){
// Attempt to create vpaddl for this add.
- SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget);
- if (Result.getNode())
+ if (SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget))
return Result;
// fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
- if (N0.getNode()->hasOneUse()) {
- SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
- if (Result.getNode()) return Result;
- }
+ if (N0.getNode()->hasOneUse())
+ if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
+ return Result;
return SDValue();
}
@@ -8731,8 +9073,7 @@ static SDValue PerformADDCombine(SDNode *N,
SDValue N1 = N->getOperand(1);
// First try with the default operand order.
- SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget);
- if (Result.getNode())
+ if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
return Result;
// If that didn't work, try again with the operands commuted.
@@ -8747,10 +9088,9 @@ static SDValue PerformSUBCombine(SDNode *N,
SDValue N1 = N->getOperand(1);
// fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
- if (N1.getNode()->hasOneUse()) {
- SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
- if (Result.getNode()) return Result;
- }
+ if (N1.getNode()->hasOneUse())
+ if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
+ return Result;
return SDValue();
}
@@ -8920,8 +9260,7 @@ static SDValue PerformANDCombine(SDNode *N,
if (!Subtarget->isThumb1Only()) {
// fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
- SDValue Result = combineSelectAndUseCommutative(N, true, DCI);
- if (Result.getNode())
+ if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
return Result;
}
@@ -8963,8 +9302,7 @@ static SDValue PerformORCombine(SDNode *N,
if (!Subtarget->isThumb1Only()) {
// fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
- SDValue Result = combineSelectAndUseCommutative(N, false, DCI);
- if (Result.getNode())
+ if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
return Result;
}
@@ -9137,8 +9475,7 @@ static SDValue PerformXORCombine(SDNode *N,
if (!Subtarget->isThumb1Only()) {
// fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
- SDValue Result = combineSelectAndUseCommutative(N, false, DCI);
- if (Result.getNode())
+ if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
return Result;
}
@@ -9300,17 +9637,15 @@ static SDValue PerformVMOVRRDCombine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
SDLoc DL(LD);
SDValue BasePtr = LD->getBasePtr();
- SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr,
- LD->getPointerInfo(), LD->isVolatile(),
- LD->isNonTemporal(), LD->isInvariant(),
- LD->getAlignment());
+ SDValue NewLD1 =
+ DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
+ LD->getAlignment(), LD->getMemOperand()->getFlags());
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
DAG.getConstant(4, DL, MVT::i32));
- SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr,
- LD->getPointerInfo(), LD->isVolatile(),
- LD->isNonTemporal(), LD->isInvariant(),
- std::min(4U, LD->getAlignment() / 2));
+ SDValue NewLD2 = DAG.getLoad(
+ MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(),
+ std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags());
DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
if (DCI.DAG.getDataLayout().isBigEndian())
@@ -9364,11 +9699,9 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N,
// into a pair of GPRs, which is fine when the value is used as a scalar,
// but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
SelectionDAG &DAG = DCI.DAG;
- if (N->getNumOperands() == 2) {
- SDValue RV = PerformVMOVDRRCombine(N, DAG);
- if (RV.getNode())
+ if (N->getNumOperands() == 2)
+ if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
return RV;
- }
// Load i64 elements as f64 values so that type legalization does not split
// them up into i32 values.
@@ -9385,7 +9718,7 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N,
DCI.AddToWorklist(V.getNode());
}
EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops);
+ SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
return DAG.getNode(ISD::BITCAST, dl, VT, BV);
}
@@ -9434,7 +9767,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
// Assume only bit cast to i32 will go away.
if (Elt->getOperand(0).getValueType() == MVT::i32)
++NumOfBitCastedElts;
- } else if (Elt.getOpcode() == ISD::UNDEF || isa<ConstantSDNode>(Elt))
+ } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
// Constants are statically casted, thus do not count them as
// relevant operands.
--NumOfRelevantElts;
@@ -9461,7 +9794,7 @@ PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
SDLoc dl(N);
for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
SDValue V = N->getOperand(Idx);
- if (V.getOpcode() == ISD::UNDEF)
+ if (V.isUndef())
continue;
if (V.getOpcode() == ISD::BITCAST &&
V->getOperand(0).getValueType() == MVT::i32)
@@ -9529,8 +9862,7 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
SDValue Concat0Op1 = Op0.getOperand(1);
SDValue Concat1Op1 = Op1.getOperand(1);
- if (Concat0Op1.getOpcode() != ISD::UNDEF ||
- Concat1Op1.getOpcode() != ISD::UNDEF)
+ if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
return SDValue();
// Skip the transformation if any of the types are illegal.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -9557,7 +9889,7 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
NewMask.push_back(NewElt);
}
return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
- DAG.getUNDEF(VT), NewMask.data());
+ DAG.getUNDEF(VT), NewMask);
}
/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
@@ -9953,7 +10285,7 @@ static SDValue PerformSTORECombine(SDNode *N,
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
DAG.getUNDEF(WideVec.getValueType()),
- ShuffleVec.data());
+ ShuffleVec);
// At this point all of the data is stored at the bottom of the
// register. We now need to save it to mem.
@@ -9984,8 +10316,8 @@ static SDValue PerformSTORECombine(SDNode *N,
StoreType, ShuffWide,
DAG.getIntPtrConstant(I, DL));
SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
+ St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags());
BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
Increment);
Chains.push_back(Ch);
@@ -10004,18 +10336,18 @@ static SDValue PerformSTORECombine(SDNode *N,
bool isBigEndian = DAG.getDataLayout().isBigEndian();
SDLoc DL(St);
SDValue BasePtr = St->getBasePtr();
- SDValue NewST1 = DAG.getStore(St->getChain(), DL,
- StVal.getNode()->getOperand(isBigEndian ? 1 : 0 ),
- BasePtr, St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
+ SDValue NewST1 = DAG.getStore(
+ St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
+ BasePtr, St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags());
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
DAG.getConstant(4, DL, MVT::i32));
return DAG.getStore(NewST1.getValue(0), DL,
StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
- OffsetPtr, St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(),
- std::min(4U, St->getAlignment() / 2));
+ OffsetPtr, St->getPointerInfo(),
+ std::min(4U, St->getAlignment() / 2),
+ St->getMemOperand()->getFlags());
}
if (StVal.getValueType() == MVT::i64 &&
@@ -10038,9 +10370,8 @@ static SDValue PerformSTORECombine(SDNode *N,
DCI.AddToWorklist(ExtElt.getNode());
DCI.AddToWorklist(V.getNode());
return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment(),
- St->getAAInfo());
+ St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags(), St->getAAInfo());
}
// If this is a legal vector store, try to combine it into a VST1_UPD.
@@ -10066,7 +10397,8 @@ static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
SDValue Op = N->getOperand(0);
- if (!Op.getValueType().isVector() || Op.getOpcode() != ISD::FMUL)
+ if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
+ Op.getOpcode() != ISD::FMUL)
return SDValue();
SDValue ConstVec = Op->getOperand(1);
@@ -10123,7 +10455,7 @@ static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
SDValue Op = N->getOperand(0);
unsigned OpOpcode = Op.getNode()->getOpcode();
- if (!N->getValueType(0).isVector() ||
+ if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
(OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
return SDValue();
@@ -10464,7 +10796,7 @@ static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero,
// The operand to BFI is already a mask suitable for removing the bits it
// sets.
ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
- APInt Mask = CI->getAPIntValue();
+ const APInt &Mask = CI->getAPIntValue();
KnownZero &= Mask;
KnownOne &= Mask;
return;
@@ -10522,7 +10854,7 @@ SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &D
} else {
assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
}
-
+
if (Op1->getOpcode() != ISD::OR)
return SDValue();
@@ -10552,7 +10884,7 @@ SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &D
SDLoc dl(X);
EVT VT = X.getValueType();
unsigned BitInX = AndC->getAPIntValue().logBase2();
-
+
if (BitInX != 0) {
// We must shift X first.
X = DAG.getNode(ISD::SRL, dl, VT, X,
@@ -10573,6 +10905,46 @@ SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &D
return V;
}
+/// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
+SDValue
+ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
+ SDValue Cmp = N->getOperand(4);
+ if (Cmp.getOpcode() != ARMISD::CMPZ)
+ // Only looking at NE cases.
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ SDLoc dl(N);
+ SDValue LHS = Cmp.getOperand(0);
+ SDValue RHS = Cmp.getOperand(1);
+ SDValue Chain = N->getOperand(0);
+ SDValue BB = N->getOperand(1);
+ SDValue ARMcc = N->getOperand(2);
+ ARMCC::CondCodes CC =
+ (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
+
+ // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
+ // -> (brcond Chain BB CC CPSR Cmp)
+ if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
+ LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
+ LHS->getOperand(0)->hasOneUse()) {
+ auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
+ auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
+ auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
+ auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
+ if ((LHS00C && LHS00C->getZExtValue() == 0) &&
+ (LHS01C && LHS01C->getZExtValue() == 1) &&
+ (LHS1C && LHS1C->getZExtValue() == 1) &&
+ (RHSC && RHSC->getZExtValue() == 0)) {
+ return DAG.getNode(
+ ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
+ LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
+ }
+ }
+
+ return SDValue();
+}
+
/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
SDValue
ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
@@ -10626,6 +10998,21 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
N->getOperand(3), NewCmp);
}
+ // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
+ // -> (cmov F T CC CPSR Cmp)
+ if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
+ auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
+ auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
+ auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
+ if ((LHS0C && LHS0C->getZExtValue() == 0) &&
+ (LHS1C && LHS1C->getZExtValue() == 1) &&
+ (RHSC && RHSC->getZExtValue() == 0)) {
+ return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
+ LHS->getOperand(2), LHS->getOperand(3),
+ LHS->getOperand(4));
+ }
+ }
+
if (Res.getNode()) {
APInt KnownZero, KnownOne;
DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne);
@@ -10676,6 +11063,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
+ case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
case ISD::LOAD: return PerformLOADCombine(N, DCI);
case ARMISD::VLD2DUP:
case ARMISD::VLD3DUP:
@@ -11198,22 +11586,37 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const {
- if (Subtarget->isThumb1Only())
- return false;
-
EVT VT;
SDValue Ptr;
- bool isSEXTLoad = false;
+ bool isSEXTLoad = false, isNonExt;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
VT = LD->getMemoryVT();
Ptr = LD->getBasePtr();
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
+ isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
VT = ST->getMemoryVT();
Ptr = ST->getBasePtr();
+ isNonExt = !ST->isTruncatingStore();
} else
return false;
+ if (Subtarget->isThumb1Only()) {
+ // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
+ // must be non-extending/truncating, i32, with an offset of 4.
+ assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
+ if (Op->getOpcode() != ISD::ADD || !isNonExt)
+ return false;
+ auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
+ if (!RHS || RHS->getZExtValue() != 4)
+ return false;
+
+ Offset = Op->getOperand(1);
+ Base = Op->getOperand(0);
+ AM = ISD::POST_INC;
+ return true;
+ }
+
bool isInc;
bool isLegal = false;
if (Subtarget->isThumb2())
@@ -11322,6 +11725,26 @@ bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
return false;
}
+const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
+ // At this point, we have to lower this constraint to something else, so we
+ // lower it to an "r" or "w". However, by doing this we will force the result
+ // to be in register, while the X constraint is much more permissive.
+ //
+ // Although we are correct (we are free to emit anything, without
+ // constraints), we might break use cases that would expect us to be more
+ // efficient and emit something else.
+ if (!Subtarget->hasVFP2())
+ return "r";
+ if (ConstraintVT.isFloatingPoint())
+ return "w";
+ if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
+ (ConstraintVT.getSizeInBits() == 64 ||
+ ConstraintVT.getSizeInBits() == 128))
+ return "w";
+
+ return "r";
+}
+
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
ARMTargetLowering::ConstraintType
@@ -11640,7 +12063,8 @@ static TargetLowering::ArgListTy getDivRemArgList(
}
SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
- assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid()) &&
+ assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
+ Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI()) &&
"Register-based DivRem lowering only");
unsigned Opcode = Op->getOpcode();
assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
@@ -11664,7 +12088,7 @@ SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(InChain)
- .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0)
+ .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
.setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
@@ -11702,7 +12126,7 @@ SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
// Lower call
CallLoweringInfo CLI(DAG);
CLI.setChain(InChain)
- .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args), 0)
+ .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
.setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
@@ -11950,23 +12374,20 @@ Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
- if (!getInsertFencesForAtomic())
- return nullptr;
-
switch (Ord) {
- case NotAtomic:
- case Unordered:
+ case AtomicOrdering::NotAtomic:
+ case AtomicOrdering::Unordered:
llvm_unreachable("Invalid fence: unordered/non-atomic");
- case Monotonic:
- case Acquire:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Acquire:
return nullptr; // Nothing to do
- case SequentiallyConsistent:
+ case AtomicOrdering::SequentiallyConsistent:
if (!IsStore)
return nullptr; // Nothing to do
/*FALLTHROUGH*/
- case Release:
- case AcquireRelease:
- if (Subtarget->isSwift())
+ case AtomicOrdering::Release:
+ case AtomicOrdering::AcquireRelease:
+ if (Subtarget->preferISHSTBarriers())
return makeDMB(Builder, ARM_MB::ISHST);
// FIXME: add a comment with a link to documentation justifying this.
else
@@ -11978,19 +12399,16 @@ Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
- if (!getInsertFencesForAtomic())
- return nullptr;
-
switch (Ord) {
- case NotAtomic:
- case Unordered:
+ case AtomicOrdering::NotAtomic:
+ case AtomicOrdering::Unordered:
llvm_unreachable("Invalid fence: unordered/not-atomic");
- case Monotonic:
- case Release:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Release:
return nullptr; // Nothing to do
- case Acquire:
- case AcquireRelease:
- case SequentiallyConsistent:
+ case AtomicOrdering::Acquire:
+ case AtomicOrdering::AcquireRelease:
+ case AtomicOrdering::SequentiallyConsistent:
return makeDMB(Builder, ARM_MB::ISH);
}
llvm_unreachable("Unknown fence ordering in emitTrailingFence");
@@ -12031,7 +12449,17 @@ ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(
AtomicCmpXchgInst *AI) const {
- return true;
+ // At -O0, fast-regalloc cannot cope with the live vregs necessary to
+ // implement cmpxchg without spilling. If the address being exchanged is also
+ // on the stack and close enough to the spill slot, this can lead to a
+ // situation where the monitor always gets cleared and the atomic operation
+ // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
+ return getTargetMachine().getOptLevel() != 0;
+}
+
+bool ARMTargetLowering::shouldInsertFencesForAtomic(
+ const Instruction *I) const {
+ return InsertFencesForAtomic;
}
// This has so far only been implemented for MachO.
@@ -12080,7 +12508,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
- bool IsAcquire = isAtLeastAcquire(Ord);
+ bool IsAcquire = isAcquireOrStronger(Ord);
// Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
// intrinsic must return {i32, i32} and we have to recombine them into a
@@ -12124,7 +12552,7 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- bool IsRelease = isAtLeastRelease(Ord);
+ bool IsRelease = isReleaseOrStronger(Ord);
// Since the intrinsics must have legal type, the i64 intrinsics take two
// parameters: "i32, i32". We must marshal Val into the appropriate form
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 96b56c3ec330..4906686616bc 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -43,7 +43,6 @@ namespace llvm {
CALL, // Function call.
CALL_PRED, // Function call that's predicable.
CALL_NOLINK, // Function call with branch not branch-and-link.
- tCALL, // Thumb function call.
BRCOND, // Conditional branch.
BR_JT, // Jumptable branch.
BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
@@ -61,6 +60,8 @@ namespace llvm {
CMOV, // ARM conditional move instructions.
+ SSAT, // Signed saturation
+
BCC_i64,
SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
@@ -164,6 +165,7 @@ namespace llvm {
UMLAL, // 64bit Unsigned Accumulate Multiply
SMLAL, // 64bit Signed Accumulate Multiply
+ UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
// Operands of the standard BUILD_VECTOR node are not legalized, which
// is fine if BUILD_VECTORs are always lowered to shuffles or other
@@ -251,13 +253,14 @@ namespace llvm {
EVT VT) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const override;
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
- void AdjustInstrPostInstrSelection(MachineInstr *MI,
+ void AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const override;
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
+ SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
@@ -335,6 +338,8 @@ namespace llvm {
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
+ const char *LowerXConstraint(EVT ConstraintVT) const override;
+
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
/// true it means one of the asm constraint of the inline asm instruction
@@ -453,6 +458,7 @@ namespace llvm {
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
unsigned Factor) const override;
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override;
TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
@@ -468,6 +474,14 @@ namespace llvm {
bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override;
+ bool supportSwiftError() const override {
+ return true;
+ }
+
+ bool hasStandaloneRem(EVT VT) const override {
+ return HasStandaloneRem;
+ }
+
protected:
std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI,
@@ -486,29 +500,34 @@ namespace llvm {
///
unsigned ARMPCLabelIndex;
+ // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
+ // check.
+ bool InsertFencesForAtomic;
+
+ bool HasStandaloneRem = true;
+
void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
void addDRTypeForNEON(MVT VT);
void addQRTypeForNEON(MVT VT);
std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
- void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
- SDValue Chain, SDValue &Arg,
- RegsToPassVector &RegsToPass,
+ void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
+ SDValue &Arg, RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr,
SmallVectorImpl<SDValue> &MemOpChains,
ISD::ArgFlagsTy Flags) const;
SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue &Root, SelectionDAG &DAG,
- SDLoc dl) const;
+ const SDLoc &dl) const;
CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
bool isVarArg) const;
CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
bool isVarArg) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
@@ -527,6 +546,7 @@ namespace llvm {
SelectionDAG &DAG,
TLSModel::Model model) const;
SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
@@ -576,9 +596,9 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- bool isThisReturn, SDValue ThisVal) const;
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
+ SDValue ThisVal) const;
bool supportSplitCSR(MachineFunction *MF) const override {
return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
@@ -590,23 +610,19 @@ namespace llvm {
const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
-
- int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
- SDLoc dl, SDValue &Chain,
- const Value *OrigArg,
- unsigned InRegsParamRecordIdx,
- int ArgOffset,
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
+ SDValue &Chain, const Value *OrigArg,
+ unsigned InRegsParamRecordIdx, int ArgOffset,
unsigned ArgSize) const;
void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
- SDLoc dl, SDValue &Chain,
- unsigned ArgOffset,
- unsigned TotalArgRegsSaveSize,
+ const SDLoc &dl, SDValue &Chain,
+ unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
bool ForceMutable = false) const;
SDValue
@@ -634,42 +650,39 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const override;
- SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const override;
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
- SDValue getCMOV(SDLoc dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
+ SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
SDValue ARMcc, SDValue CCR, SDValue Cmp,
SelectionDAG &DAG) const;
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
- SDValue getVFPCmp(SDValue LHS, SDValue RHS,
- SelectionDAG &DAG, SDLoc dl) const;
+ SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
+ SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
+ const SDLoc &dl) const;
SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
- void SetupEntryBlockForSjLj(MachineInstr *MI,
- MachineBasicBlock *MBB,
+ void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
MachineBasicBlock *DispatchBB, int FI) const;
- void EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const;
+ void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
- bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
+ bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitStructByval(MachineInstr *MI,
+ MachineBasicBlock *EmitStructByval(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *EmitLowered__chkstk(MachineInstr *MI,
+ MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *EmitLowered__dbzchk(MachineInstr *MI,
+ MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
MachineBasicBlock *MBB) const;
};
diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td
index e79608d360ca..37a83f70a1fb 100644
--- a/lib/Target/ARM/ARMInstrFormats.td
+++ b/lib/Target/ARM/ARMInstrFormats.td
@@ -246,23 +246,33 @@ def shr_imm64 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm <= 64; }]> {
let ParserMatchClass = shr_imm64_asm_operand;
}
+
+// ARM Assembler operand for ldr Rd, =expression which generates an offset
+// to a constant pool entry or a MOV depending on the value of expression
+def const_pool_asm_operand : AsmOperandClass { let Name = "ConstPoolAsmImm"; }
+def const_pool_asm_imm : Operand<i32> {
+ let ParserMatchClass = const_pool_asm_operand;
+}
+
+
//===----------------------------------------------------------------------===//
// ARM Assembler alias templates.
//
-class ARMInstAlias<string Asm, dag Result, bit Emit = 0b1>
- : InstAlias<Asm, Result, Emit>, Requires<[IsARM]>;
-class tInstAlias<string Asm, dag Result, bit Emit = 0b1>
- : InstAlias<Asm, Result, Emit>, Requires<[IsThumb]>;
-class t2InstAlias<string Asm, dag Result, bit Emit = 0b1>
- : InstAlias<Asm, Result, Emit>, Requires<[IsThumb2]>;
-class VFP2InstAlias<string Asm, dag Result, bit Emit = 0b1>
- : InstAlias<Asm, Result, Emit>, Requires<[HasVFP2]>;
-class VFP2DPInstAlias<string Asm, dag Result, bit Emit = 0b1>
- : InstAlias<Asm, Result, Emit>, Requires<[HasVFP2,HasDPVFP]>;
-class VFP3InstAlias<string Asm, dag Result, bit Emit = 0b1>
- : InstAlias<Asm, Result, Emit>, Requires<[HasVFP3]>;
-class NEONInstAlias<string Asm, dag Result, bit Emit = 0b1>
- : InstAlias<Asm, Result, Emit>, Requires<[HasNEON]>;
+// Note: When EmitPriority == 1, the alias will be used for printing
+class ARMInstAlias<string Asm, dag Result, bit EmitPriority = 0>
+ : InstAlias<Asm, Result, EmitPriority>, Requires<[IsARM]>;
+class tInstAlias<string Asm, dag Result, bit EmitPriority = 0>
+ : InstAlias<Asm, Result, EmitPriority>, Requires<[IsThumb]>;
+class t2InstAlias<string Asm, dag Result, bit EmitPriority = 0>
+ : InstAlias<Asm, Result, EmitPriority>, Requires<[IsThumb2]>;
+class VFP2InstAlias<string Asm, dag Result, bit EmitPriority = 0>
+ : InstAlias<Asm, Result, EmitPriority>, Requires<[HasVFP2]>;
+class VFP2DPInstAlias<string Asm, dag Result, bit EmitPriority = 0>
+ : InstAlias<Asm, Result, EmitPriority>, Requires<[HasVFP2,HasDPVFP]>;
+class VFP3InstAlias<string Asm, dag Result, bit EmitPriority = 0>
+ : InstAlias<Asm, Result, EmitPriority>, Requires<[HasVFP3]>;
+class NEONInstAlias<string Asm, dag Result, bit EmitPriority = 0>
+ : InstAlias<Asm, Result, EmitPriority>, Requires<[HasNEON]>;
class VFP2MnemonicAlias<string src, string dst> : MnemonicAlias<src, dst>,
@@ -563,12 +573,12 @@ class AIstrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
class AIldaex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AIldr_ex_or_acq<opcod, 0b10, oops, iops, itin, opc, asm, pattern>,
- Requires<[IsARM, HasV8]>;
+ Requires<[IsARM, HasAcquireRelease, HasV7Clrex]>;
class AIstlex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AIstr_ex_or_rel<opcod, 0b10, oops, iops, itin, opc, asm, pattern>,
- Requires<[IsARM, HasV8]> {
+ Requires<[IsARM, HasAcquireRelease, HasV7Clrex]> {
bits<4> Rd;
let Inst{15-12} = Rd;
}
@@ -593,12 +603,12 @@ class AIswp<bit b, dag oops, dag iops, string opc, list<dag> pattern>
class AIldracq<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AIldr_ex_or_acq<opcod, 0b00, oops, iops, itin, opc, asm, pattern>,
- Requires<[IsARM, HasV8]>;
+ Requires<[IsARM, HasAcquireRelease]>;
class AIstrrel<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
string opc, string asm, list<dag> pattern>
: AIstr_ex_or_rel<opcod, 0b00, oops, iops, itin, opc, asm, pattern>,
- Requires<[IsARM, HasV8]> {
+ Requires<[IsARM, HasAcquireRelease]> {
let Inst{15-12} = 0b1111;
}
@@ -1379,11 +1389,6 @@ class T2Ipostldst<bit signed, bits<2> opcod, bit load, bit pre,
let DecoderMethod = "DecodeT2LdStPre";
}
-// Tv5Pat - Same as Pat<>, but requires V5T Thumb mode.
-class Tv5Pat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsThumb, IsThumb1Only, HasV5T];
-}
-
// T1Pat - Same as Pat<>, but requires that the compiler be in Thumb1 mode.
class T1Pat<dag pattern, dag result> : Pat<pattern, result> {
list<Predicate> Predicates = [IsThumb, IsThumb1Only];
@@ -1495,6 +1500,32 @@ class ASI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
let D = VFPNeonDomain;
}
+class AHI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
+ InstrItinClass itin,
+ string opc, string asm, list<dag> pattern>
+ : VFPI<oops, iops, AddrMode5, 4, IndexModeNone,
+ VFPLdStFrm, itin, opc, asm, "", pattern> {
+ list<Predicate> Predicates = [HasFullFP16];
+
+ // Instruction operands.
+ bits<5> Sd;
+ bits<13> addr;
+
+ // Encode instruction operands.
+ let Inst{23} = addr{8}; // U (add = (U == '1'))
+ let Inst{22} = Sd{0};
+ let Inst{19-16} = addr{12-9}; // Rn
+ let Inst{15-12} = Sd{4-1};
+ let Inst{7-0} = addr{7-0}; // imm8
+
+ let Inst{27-24} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{11-8} = 0b1001; // Half precision
+
+ // Loads & stores operate on both NEON and VFP pipelines.
+ let D = VFPNeonDomain;
+}
+
// VFP Load / store multiple pseudo instructions.
class PseudoVFPLdStM<dag oops, dag iops, InstrItinClass itin, string cstr,
list<dag> pattern>
@@ -1817,6 +1848,114 @@ class ASbIn<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
let Inst{22} = Sd{0};
}
+// Half precision, unary, predicated
+class AHuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
+ bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
+ string asm, list<dag> pattern>
+ : VFPAI<oops, iops, VFPUnaryFrm, itin, opc, asm, pattern> {
+ list<Predicate> Predicates = [HasFullFP16];
+
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
+ let Inst{27-23} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{19-16} = opcod3;
+ let Inst{11-8} = 0b1001; // Half precision
+ let Inst{7-6} = opcod4;
+ let Inst{4} = opcod5;
+}
+
+// Half precision, unary, non-predicated
+class AHuInp<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
+ bit opcod5, dag oops, dag iops, InstrItinClass itin,
+ string asm, list<dag> pattern>
+ : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone,
+ VFPUnaryFrm, itin, asm, "", pattern> {
+ list<Predicate> Predicates = [HasFullFP16];
+
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sm;
+
+ let Inst{31-28} = 0b1111;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
+ let Inst{27-23} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{19-16} = opcod3;
+ let Inst{11-8} = 0b1001; // Half precision
+ let Inst{7-6} = opcod4;
+ let Inst{4} = opcod5;
+}
+
+// Half precision, binary
+class AHbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm, list<dag> pattern>
+ : VFPAI<oops, iops, VFPBinaryFrm, itin, opc, asm, pattern> {
+ list<Predicate> Predicates = [HasFullFP16];
+
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sn;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
+ let Inst{27-23} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{11-8} = 0b1001; // Half precision
+ let Inst{6} = op6;
+ let Inst{4} = op4;
+}
+
+// Half precision, binary, not predicated
+class AHbInp<bits<5> opcod1, bits<2> opcod2, bit opcod3, dag oops, dag iops,
+ InstrItinClass itin, string asm, list<dag> pattern>
+ : VFPXI<oops, iops, AddrModeNone, 4, IndexModeNone,
+ VFPBinaryFrm, itin, asm, "", pattern> {
+ list<Predicate> Predicates = [HasFullFP16];
+
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sn;
+ bits<5> Sm;
+
+ let Inst{31-28} = 0b1111;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
+ let Inst{27-23} = opcod1;
+ let Inst{21-20} = opcod2;
+ let Inst{11-8} = 0b1001; // Half precision
+ let Inst{6} = opcod3;
+ let Inst{4} = 0;
+}
+
// VFP conversion instructions
class AVConv1I<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<4> opcod4,
dag oops, dag iops, InstrItinClass itin, string opc, string asm,
@@ -2321,22 +2460,25 @@ class NEONFPPat<dag pattern, dag result> : Pat<pattern, result> {
}
// VFP/NEON Instruction aliases for type suffices.
-class VFPDataTypeInstAlias<string opc, string dt, string asm, dag Result> :
- InstAlias<!strconcat(opc, dt, "\t", asm), Result>, Requires<[HasVFP2]>;
+// Note: When EmitPriority == 1, the alias will be used for printing
+class VFPDataTypeInstAlias<string opc, string dt, string asm, dag Result, bit EmitPriority = 0> :
+ InstAlias<!strconcat(opc, dt, "\t", asm), Result, EmitPriority>, Requires<[HasVFP2]>;
-multiclass VFPDTAnyInstAlias<string opc, string asm, dag Result> {
- def : VFPDataTypeInstAlias<opc, ".8", asm, Result>;
- def : VFPDataTypeInstAlias<opc, ".16", asm, Result>;
- def : VFPDataTypeInstAlias<opc, ".32", asm, Result>;
- def : VFPDataTypeInstAlias<opc, ".64", asm, Result>;
+// Note: When EmitPriority == 1, the alias will be used for printing
+multiclass VFPDTAnyInstAlias<string opc, string asm, dag Result, bit EmitPriority = 0> {
+ def : VFPDataTypeInstAlias<opc, ".8", asm, Result, EmitPriority>;
+ def : VFPDataTypeInstAlias<opc, ".16", asm, Result, EmitPriority>;
+ def : VFPDataTypeInstAlias<opc, ".32", asm, Result, EmitPriority>;
+ def : VFPDataTypeInstAlias<opc, ".64", asm, Result, EmitPriority>;
}
-multiclass NEONDTAnyInstAlias<string opc, string asm, dag Result> {
+// Note: When EmitPriority == 1, the alias will be used for printing
+multiclass NEONDTAnyInstAlias<string opc, string asm, dag Result, bit EmitPriority = 0> {
let Predicates = [HasNEON] in {
- def : VFPDataTypeInstAlias<opc, ".8", asm, Result>;
- def : VFPDataTypeInstAlias<opc, ".16", asm, Result>;
- def : VFPDataTypeInstAlias<opc, ".32", asm, Result>;
- def : VFPDataTypeInstAlias<opc, ".64", asm, Result>;
+ def : VFPDataTypeInstAlias<opc, ".8", asm, Result, EmitPriority>;
+ def : VFPDataTypeInstAlias<opc, ".16", asm, Result, EmitPriority>;
+ def : VFPDataTypeInstAlias<opc, ".32", asm, Result, EmitPriority>;
+ def : VFPDataTypeInstAlias<opc, ".64", asm, Result, EmitPriority>;
}
}
diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp
index cf973d68085f..98b1b4ca4272 100644
--- a/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/lib/Target/ARM/ARMInstrInfo.cpp
@@ -90,29 +90,29 @@ unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
return 0;
}
-void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
- Reloc::Model RM) const {
+void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI) const {
MachineFunction &MF = *MI->getParent()->getParent();
const ARMSubtarget &Subtarget = MF.getSubtarget<ARMSubtarget>();
+ const TargetMachine &TM = MF.getTarget();
if (!Subtarget.useMovt(MF)) {
- if (RM == Reloc::PIC_)
- expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_pcrel, ARM::LDRi12, RM);
+ if (TM.isPositionIndependent())
+ expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_pcrel, ARM::LDRi12);
else
- expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_abs, ARM::LDRi12, RM);
+ expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_abs, ARM::LDRi12);
return;
}
- if (RM != Reloc::PIC_) {
- expandLoadStackGuardBase(MI, ARM::MOVi32imm, ARM::LDRi12, RM);
+ if (!TM.isPositionIndependent()) {
+ expandLoadStackGuardBase(MI, ARM::MOVi32imm, ARM::LDRi12);
return;
}
const GlobalValue *GV =
cast<GlobalValue>((*MI->memoperands_begin())->getValue());
- if (!Subtarget.GVIsIndirectSymbol(GV, RM)) {
- expandLoadStackGuardBase(MI, ARM::MOV_ga_pcrel, ARM::LDRi12, RM);
+ if (!Subtarget.isGVIndirectSymbol(GV)) {
+ expandLoadStackGuardBase(MI, ARM::MOV_ga_pcrel, ARM::LDRi12);
return;
}
@@ -123,9 +123,9 @@ void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
MIB = BuildMI(MBB, MI, DL, get(ARM::MOV_ga_pcrel_ldr), Reg)
.addGlobalAddress(GV, 0, ARMII::MO_NONLAZY);
- unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
+ auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
- MachinePointerInfo::getGOT(*MBB.getParent()), Flag, 4, 4);
+ MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4);
MIB.addMemOperand(MMO);
MIB = BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg);
MIB.addReg(Reg, RegState::Kill).addImm(0);
diff --git a/lib/Target/ARM/ARMInstrInfo.h b/lib/Target/ARM/ARMInstrInfo.h
index 90f34ea08401..4b1b7097b18d 100644
--- a/lib/Target/ARM/ARMInstrInfo.h
+++ b/lib/Target/ARM/ARMInstrInfo.h
@@ -39,8 +39,7 @@ public:
const ARMRegisterInfo &getRegisterInfo() const override { return RI; }
private:
- void expandLoadStackGuard(MachineBasicBlock::iterator MI,
- Reloc::Model RM) const override;
+ void expandLoadStackGuard(MachineBasicBlock::iterator MI) const override;
};
}
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index c446ba3109e4..060376b0a273 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -90,12 +90,6 @@ def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
SDTCisVT<1, i32>,
SDTCisVT<4, i32>]>;
-def SDT_ARM64bitmlal : SDTypeProfile<2,4, [ SDTCisVT<0, i32>, SDTCisVT<1, i32>,
- SDTCisVT<2, i32>, SDTCisVT<3, i32>,
- SDTCisVT<4, i32>, SDTCisVT<5, i32> ] >;
-def ARMUmlal : SDNode<"ARMISD::UMLAL", SDT_ARM64bitmlal>;
-def ARMSmlal : SDNode<"ARMISD::SMLAL", SDT_ARM64bitmlal>;
-
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
@@ -128,6 +122,8 @@ def ARMintretflag : SDNode<"ARMISD::INTRET_FLAG", SDT_ARMcall,
def ARMcmov : SDNode<"ARMISD::CMOV", SDT_ARMCMov,
[SDNPInGlue]>;
+def ARMssatnoshift : SDNode<"ARMISD::SSAT", SDTIntSatNoShOp, []>;
+
def ARMbrcond : SDNode<"ARMISD::BRCOND", SDT_ARMBrcond,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
@@ -201,6 +197,12 @@ def NoV6 : Predicate<"!Subtarget->hasV6Ops()">;
def HasV6M : Predicate<"Subtarget->hasV6MOps()">,
AssemblerPredicate<"HasV6MOps",
"armv6m or armv6t2">;
+def HasV8MBaseline : Predicate<"Subtarget->hasV8MBaselineOps()">,
+ AssemblerPredicate<"HasV8MBaselineOps",
+ "armv8m.base">;
+def HasV8MMainline : Predicate<"Subtarget->hasV8MMainlineOps()">,
+ AssemblerPredicate<"HasV8MMainlineOps",
+ "armv8m.main">;
def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">,
AssemblerPredicate<"HasV6T2Ops", "armv6t2">;
def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">;
@@ -235,6 +237,8 @@ def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
AssemblerPredicate<"FeatureCrypto", "crypto">;
def HasCRC : Predicate<"Subtarget->hasCRC()">,
AssemblerPredicate<"FeatureCRC", "crc">;
+def HasRAS : Predicate<"Subtarget->hasRAS()">,
+ AssemblerPredicate<"FeatureRAS", "ras">;
def HasFP16 : Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<"FeatureFP16","half-float conversions">;
def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
@@ -251,6 +255,12 @@ def HasDSP : Predicate<"Subtarget->hasDSP()">,
def HasDB : Predicate<"Subtarget->hasDataBarrier()">,
AssemblerPredicate<"FeatureDB",
"data-barriers">;
+def HasV7Clrex : Predicate<"Subtarget->hasV7Clrex()">,
+ AssemblerPredicate<"FeatureV7Clrex",
+ "v7 clrex">;
+def HasAcquireRelease : Predicate<"Subtarget->hasAcquireRelease()">,
+ AssemblerPredicate<"FeatureAcquireRelease",
+ "acquire/release">;
def HasMP : Predicate<"Subtarget->hasMPExtension()">,
AssemblerPredicate<"FeatureMP",
"mp-extensions">;
@@ -260,6 +270,9 @@ def HasVirtualization: Predicate<"false">,
def HasTrustZone : Predicate<"Subtarget->hasTrustZone()">,
AssemblerPredicate<"FeatureTrustZone",
"TrustZone">;
+def Has8MSecExt : Predicate<"Subtarget->has8MSecExt()">,
+ AssemblerPredicate<"Feature8MSecExt",
+ "ARMv8-M Security Extensions">;
def HasZCZ : Predicate<"Subtarget->hasZeroCycleZeroing()">;
def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">;
def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">;
@@ -279,6 +292,8 @@ def IsARM : Predicate<"!Subtarget->isThumb()">,
def IsMachO : Predicate<"Subtarget->isTargetMachO()">;
def IsNotMachO : Predicate<"!Subtarget->isTargetMachO()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
+def IsWindows : Predicate<"Subtarget->isTargetWindows()">;
+def IsNotWindows : Predicate<"!Subtarget->isTargetWindows()">;
def UseNaClTrap : Predicate<"Subtarget->useNaClTrap()">,
AssemblerPredicate<"FeatureNaClTrap", "NaCl">;
def DontUseNaClTrap : Predicate<"!Subtarget->useNaClTrap()">;
@@ -301,19 +316,16 @@ def DontUseFusedMAC : Predicate<"!(TM.Options.AllowFPOpFusion =="
" Subtarget->hasVFP4()) || "
"Subtarget->isTargetDarwin()">;
-// VGETLNi32 is microcoded on Swift - prefer VMOV.
-def HasFastVGETLNi32 : Predicate<"!Subtarget->isSwift()">;
-def HasSlowVGETLNi32 : Predicate<"Subtarget->isSwift()">;
+def HasFastVGETLNi32 : Predicate<"!Subtarget->hasSlowVGETLNi32()">;
+def HasSlowVGETLNi32 : Predicate<"Subtarget->hasSlowVGETLNi32()">;
-// VDUP.32 is microcoded on Swift - prefer VMOV.
-def HasFastVDUP32 : Predicate<"!Subtarget->isSwift()">;
-def HasSlowVDUP32 : Predicate<"Subtarget->isSwift()">;
+def HasFastVDUP32 : Predicate<"!Subtarget->hasSlowVDUP32()">;
+def HasSlowVDUP32 : Predicate<"Subtarget->hasSlowVDUP32()">;
-// Cortex-A9 prefers VMOVSR to VMOVDRR even when using NEON for scalar FP, as
-// this allows more effective execution domain optimization. See
-// setExecutionDomain().
-def UseVMOVSR : Predicate<"Subtarget->isCortexA9() || !Subtarget->useNEONForSinglePrecisionFP()">;
-def DontUseVMOVSR : Predicate<"!Subtarget->isCortexA9() && Subtarget->useNEONForSinglePrecisionFP()">;
+def UseVMOVSR : Predicate<"Subtarget->preferVMOVSR() ||"
+ "!Subtarget->useNEONForSinglePrecisionFP()">;
+def DontUseVMOVSR : Predicate<"!Subtarget->preferVMOVSR() &&"
+ "Subtarget->useNEONForSinglePrecisionFP()">;
def IsLE : Predicate<"MF->getDataLayout().isLittleEndian()">;
def IsBE : Predicate<"MF->getDataLayout().isBigEndian()">;
@@ -360,8 +372,6 @@ def lo16AllZero : PatLeaf<(i32 imm), [{
return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
}], hi16>;
-class BinOpWithFlagFrag<dag res> :
- PatFrag<(ops node:$LHS, node:$RHS, node:$FLAG), res>;
class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
class UnOpFrag <dag res> : PatFrag<(ops node:$Src), res>;
@@ -408,34 +418,35 @@ def brtarget : Operand<OtherVT> {
let DecoderMethod = "DecodeT2BROperand";
}
-// FIXME: get rid of this one?
-def uncondbrtarget : Operand<OtherVT> {
- let EncoderMethod = "getUnconditionalBranchTargetOpValue";
- let OperandType = "OPERAND_PCREL";
+// Branches targeting ARM-mode must be divisible by 4 if they're a raw
+// immediate.
+def ARMBranchTarget : AsmOperandClass {
+ let Name = "ARMBranchTarget";
}
-// Branch target for ARM. Handles conditional/unconditional
-def br_target : Operand<OtherVT> {
- let EncoderMethod = "getARMBranchTargetOpValue";
- let OperandType = "OPERAND_PCREL";
+// Branches targeting Thumb-mode must be divisible by 2 if they're a raw
+// immediate.
+def ThumbBranchTarget : AsmOperandClass {
+ let Name = "ThumbBranchTarget";
}
-// Call target.
-// FIXME: rename bltarget to t2_bl_target?
-def bltarget : Operand<i32> {
- // Encoded the same as branch targets.
- let EncoderMethod = "getBranchTargetOpValue";
+def arm_br_target : Operand<OtherVT> {
+ let ParserMatchClass = ARMBranchTarget;
+ let EncoderMethod = "getARMBranchTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
// Call target for ARM. Handles conditional/unconditional
// FIXME: rename bl_target to t2_bltarget?
-def bl_target : Operand<i32> {
+def arm_bl_target : Operand<i32> {
+ let ParserMatchClass = ARMBranchTarget;
let EncoderMethod = "getARMBLTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
-def blx_target : Operand<i32> {
+// Target for BLX *from* ARM mode.
+def arm_blx_target : Operand<i32> {
+ let ParserMatchClass = ThumbBranchTarget;
let EncoderMethod = "getARMBLXTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
@@ -981,6 +992,21 @@ def addrmode5_pre : AddrMode5 {
let PrintMethod = "printAddrMode5Operand<true>";
}
+// addrmode5fp16 := reg +/- imm8*2
+//
+def AddrMode5FP16AsmOperand : AsmOperandClass { let Name = "AddrMode5FP16"; }
+class AddrMode5FP16 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectAddrMode5FP16", []> {
+ let EncoderMethod = "getAddrMode5FP16OpValue";
+ let DecoderMethod = "DecodeAddrMode5FP16Operand";
+ let ParserMatchClass = AddrMode5FP16AsmOperand;
+ let MIOperandInfo = (ops GPR:$base, i32imm);
+}
+
+def addrmode5fp16 : AddrMode5FP16 {
+ let PrintMethod = "printAddrMode5FP16Operand<false>";
+}
+
// addrmode6 := reg with optional alignment
//
def AddrMode6AsmOperand : AsmOperandClass { let Name = "AlignedMemory"; }
@@ -1224,7 +1250,7 @@ include "ARMInstrFormats.td"
let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AsI1_bin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, bit Commutable = 0> {
+ SDPatternOperator opnode, bit Commutable = 0> {
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
@@ -1297,7 +1323,7 @@ multiclass AsI1_bin_irs<bits<4> opcod, string opc,
let TwoOperandAliasConstraint = "$Rn = $Rd" in
multiclass AsI1_rbin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, bit Commutable = 0> {
+ SDNode opnode, bit Commutable = 0> {
// The register-immediate version is re-materializable. This is useful
// in particular for taking the address of a local.
let isReMaterializable = 1 in {
@@ -1369,7 +1395,7 @@ multiclass AsI1_rbin_irs<bits<4> opcod, string opc,
/// AdjustInstrPostInstrSelection after giving them an optional CPSR operand.
let hasPostISelHook = 1, Defs = [CPSR] in {
multiclass AsI1_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
- InstrItinClass iis, PatFrag opnode,
+ InstrItinClass iis, SDNode opnode,
bit Commutable = 0> {
def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm, pred:$p),
4, iii,
@@ -1402,7 +1428,7 @@ multiclass AsI1_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
/// operands are reversed.
let hasPostISelHook = 1, Defs = [CPSR] in {
multiclass AsI1_rbin_s_is<InstrItinClass iii, InstrItinClass iir,
- InstrItinClass iis, PatFrag opnode,
+ InstrItinClass iis, SDNode opnode,
bit Commutable = 0> {
def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm, pred:$p),
4, iii,
@@ -1431,8 +1457,8 @@ multiclass AsI1_rbin_s_is<InstrItinClass iii, InstrItinClass iir,
let isCompare = 1, Defs = [CPSR] in {
multiclass AI1_cmp_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, bit Commutable = 0,
- string rrDecoderMethod = ""> {
+ SDPatternOperator opnode, bit Commutable = 0,
+ string rrDecoderMethod = ""> {
def ri : AI1<opcod, (outs), (ins GPR:$Rn, mod_imm:$imm), DPFrm, iii,
opc, "\t$Rn, $imm",
[(opnode GPR:$Rn, mod_imm:$imm)]>,
@@ -1561,7 +1587,7 @@ class AI_exta_rrot_np<bits<8> opcod, string opc>
/// AI1_adde_sube_irs - Define instructions and patterns for adde and sube.
let TwoOperandAliasConstraint = "$Rn = $Rd" in
-multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
+multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, SDNode opnode,
bit Commutable = 0> {
let hasPostISelHook = 1, Defs = [CPSR], Uses = [CPSR] in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm),
@@ -1632,7 +1658,7 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
/// AI1_rsc_irs - Define instructions and patterns for rsc
let TwoOperandAliasConstraint = "$Rn = $Rd" in
-multiclass AI1_rsc_irs<bits<4> opcod, string opc, PatFrag opnode> {
+multiclass AI1_rsc_irs<bits<4> opcod, string opc, SDNode opnode> {
let hasPostISelHook = 1, Defs = [CPSR], Uses = [CPSR] in {
def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, mod_imm:$imm),
DPFrm, IIC_iALUi, opc, "\t$Rd, $Rn, $imm",
@@ -1880,6 +1906,7 @@ def HINT : AI<(outs), (ins imm0_239:$imm), MiscFrm, NoItinerary,
bits<8> imm;
let Inst{27-8} = 0b00110010000011110000;
let Inst{7-0} = imm;
+ let DecoderMethod = "DecodeHINTInstruction";
}
def : InstAlias<"nop$p", (HINT 0, pred:$p)>, Requires<[IsARM, HasV6K]>;
@@ -1888,6 +1915,7 @@ def : InstAlias<"wfe$p", (HINT 2, pred:$p)>, Requires<[IsARM, HasV6K]>;
def : InstAlias<"wfi$p", (HINT 3, pred:$p)>, Requires<[IsARM, HasV6K]>;
def : InstAlias<"sev$p", (HINT 4, pred:$p)>, Requires<[IsARM, HasV6K]>;
def : InstAlias<"sevl$p", (HINT 5, pred:$p)>, Requires<[IsARM, HasV8]>;
+def : InstAlias<"esb$p", (HINT 16, pred:$p)>, Requires<[IsARM, HasRAS]>;
def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
"\t$Rd, $Rn, $Rm", []>, Requires<[IsARM, HasV6]> {
@@ -1915,7 +1943,7 @@ def BKPT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
let Inst{7-4} = 0b0111;
}
// default immediate for breakpoint mnemonic
-def : InstAlias<"bkpt", (BKPT 0)>, Requires<[IsARM]>;
+def : InstAlias<"bkpt", (BKPT 0), 0>, Requires<[IsARM]>;
def HLT : AInoP<(outs), (ins imm0_65535:$val), MiscFrm, NoItinerary,
"hlt", "\t$val", []>, Requires<[IsARM, HasV8]> {
@@ -2181,7 +2209,7 @@ let isCall = 1,
// at least be a pseudo instruction expanding to the predicated version
// at MC lowering time.
Defs = [LR], Uses = [SP] in {
- def BL : ABXI<0b1011, (outs), (ins bl_target:$func),
+ def BL : ABXI<0b1011, (outs), (ins arm_bl_target:$func),
IIC_Br, "bl\t$func",
[(ARMcall tglobaladdr:$func)]>,
Requires<[IsARM]>, Sched<[WriteBrL]> {
@@ -2191,7 +2219,7 @@ let isCall = 1,
let DecoderMethod = "DecodeBranchImmInstruction";
}
- def BL_pred : ABI<0b1011, (outs), (ins bl_target:$func),
+ def BL_pred : ABI<0b1011, (outs), (ins arm_bl_target:$func),
IIC_Br, "bl", "\t$func",
[(ARMcall_pred tglobaladdr:$func)]>,
Requires<[IsARM]>, Sched<[WriteBrL]> {
@@ -2232,7 +2260,7 @@ let isCall = 1,
// mov lr, pc; b if callee is marked noreturn to avoid confusing the
// return stack predictor.
- def BMOVPCB_CALL : ARMPseudoInst<(outs), (ins bl_target:$func),
+ def BMOVPCB_CALL : ARMPseudoInst<(outs), (ins arm_bl_target:$func),
8, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>,
Requires<[IsARM]>, Sched<[WriteBr]>;
}
@@ -2240,7 +2268,7 @@ let isCall = 1,
let isBranch = 1, isTerminator = 1 in {
// FIXME: should be able to write a pattern for ARMBrcond, but can't use
// a two-value operand where a dag node expects two operands. :(
- def Bcc : ABI<0b1010, (outs), (ins br_target:$target),
+ def Bcc : ABI<0b1010, (outs), (ins arm_br_target:$target),
IIC_Br, "b", "\t$target",
[/*(ARMbrcond bb:$target, imm:$cc, CCR:$ccr)*/]>,
Sched<[WriteBr]> {
@@ -2255,8 +2283,9 @@ let isBranch = 1, isTerminator = 1 in {
// FIXME: We shouldn't need this pseudo at all. Just using Bcc directly
// should be sufficient.
// FIXME: Is B really a Barrier? That doesn't seem right.
- def B : ARMPseudoExpand<(outs), (ins br_target:$target), 4, IIC_Br,
- [(br bb:$target)], (Bcc br_target:$target, (ops 14, zero_reg))>,
+ def B : ARMPseudoExpand<(outs), (ins arm_br_target:$target), 4, IIC_Br,
+ [(br bb:$target)], (Bcc arm_br_target:$target,
+ (ops 14, zero_reg))>,
Sched<[WriteBr]>;
let Size = 4, isNotDuplicable = 1, isIndirectBranch = 1 in {
@@ -2283,7 +2312,7 @@ let isBranch = 1, isTerminator = 1 in {
}
// BLX (immediate)
-def BLXi : AXI<(outs), (ins blx_target:$target), BrMiscFrm, NoItinerary,
+def BLXi : AXI<(outs), (ins arm_blx_target:$target), BrMiscFrm, NoItinerary,
"blx\t$target", []>,
Requires<[IsARM, HasV5T]>, Sched<[WriteBrL]> {
let Inst{31-25} = 0b1111101;
@@ -2313,9 +2342,9 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst), IIC_Br, []>,
Sched<[WriteBr]>;
- def TAILJMPd : ARMPseudoExpand<(outs), (ins br_target:$dst),
+ def TAILJMPd : ARMPseudoExpand<(outs), (ins arm_br_target:$dst),
4, IIC_Br, [],
- (Bcc br_target:$dst, (ops 14, zero_reg))>,
+ (Bcc arm_br_target:$dst, (ops 14, zero_reg))>,
Requires<[IsARM]>, Sched<[WriteBr]>;
def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst),
@@ -2467,14 +2496,12 @@ def ERET : ABI<0b0001, (outs), (ins), NoItinerary, "eret", "", []>,
// Load
-defm LDR : AI_ldr1<0, "ldr", IIC_iLoad_r, IIC_iLoad_si,
- UnOpFrag<(load node:$Src)>>;
+defm LDR : AI_ldr1<0, "ldr", IIC_iLoad_r, IIC_iLoad_si, load>;
defm LDRB : AI_ldr1nopc<1, "ldrb", IIC_iLoad_bh_r, IIC_iLoad_bh_si,
- UnOpFrag<(zextloadi8 node:$Src)>>;
-defm STR : AI_str1<0, "str", IIC_iStore_r, IIC_iStore_si,
- BinOpFrag<(store node:$LHS, node:$RHS)>>;
+ zextloadi8>;
+defm STR : AI_str1<0, "str", IIC_iStore_r, IIC_iStore_si, store>;
defm STRB : AI_str1nopc<1, "strb", IIC_iStore_bh_r, IIC_iStore_bh_si,
- BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
+ truncstorei8>;
// Special LDR for loads from non-pc-relative constpools.
let canFoldAsLoad = 1, mayLoad = 1, hasSideEffects = 0,
@@ -2764,6 +2791,12 @@ def LDRBT_POST
: ARMAsmPseudo<"ldrbt${q} $Rt, $addr", (ins addr_offset_none:$addr, pred:$q),
(outs GPR:$Rt)>;
+// Pseudo instruction ldr Rt, =immediate
+def LDRConstPool
+ : ARMAsmPseudo<"ldr${q} $Rt, $immediate",
+ (ins const_pool_asm_imm:$immediate, pred:$q),
+ (outs GPR:$Rt)>;
+
// Store
// Stores with truncate
@@ -3299,8 +3332,8 @@ def MOVi16 : AI1<0b1000, (outs GPR:$Rd), (ins imm0_65535_expr:$imm),
}
def : InstAlias<"mov${p} $Rd, $imm",
- (MOVi16 GPR:$Rd, imm0_65535_expr:$imm, pred:$p)>,
- Requires<[IsARM]>;
+ (MOVi16 GPR:$Rd, imm0_65535_expr:$imm, pred:$p), 0>,
+ Requires<[IsARM, HasV6T2]>;
def MOVi16_ga_pcrel : PseudoInst<(outs GPR:$Rd),
(ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>,
@@ -3439,11 +3472,9 @@ def UBFX : I<(outs GPRnopc:$Rd),
//
defm ADD : AsI1_bin_irs<0b0100, "add",
- IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(add node:$LHS, node:$RHS)>, 1>;
+ IIC_iALUi, IIC_iALUr, IIC_iALUsr, add, 1>;
defm SUB : AsI1_bin_irs<0b0010, "sub",
- IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(sub node:$LHS, node:$RHS)>>;
+ IIC_iALUi, IIC_iALUr, IIC_iALUsr, sub>;
// ADD and SUB with 's' bit set.
//
@@ -3455,27 +3486,21 @@ defm SUB : AsI1_bin_irs<0b0010, "sub",
// FIXME: Eliminate ADDS/SUBS pseudo opcodes after adding tablegen
// support for an optional CPSR definition that corresponds to the DAG
// node's second value. We can then eliminate the implicit def of CPSR.
-defm ADDS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(ARMaddc node:$LHS, node:$RHS)>, 1>;
-defm SUBS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
+defm ADDS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr, ARMaddc, 1>;
+defm SUBS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr, ARMsubc>;
-defm ADC : AI1_adde_sube_irs<0b0101, "adc",
- BinOpWithFlagFrag<(ARMadde node:$LHS, node:$RHS, node:$FLAG)>, 1>;
-defm SBC : AI1_adde_sube_irs<0b0110, "sbc",
- BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>>;
+defm ADC : AI1_adde_sube_irs<0b0101, "adc", ARMadde, 1>;
+defm SBC : AI1_adde_sube_irs<0b0110, "sbc", ARMsube>;
defm RSB : AsI1_rbin_irs<0b0011, "rsb",
IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(sub node:$LHS, node:$RHS)>>;
+ sub>;
// FIXME: Eliminate them if we can write def : Pat patterns which defines
// CPSR and the implicit def of CPSR is not needed.
-defm RSBS : AsI1_rbin_s_is<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
+defm RSBS : AsI1_rbin_s_is<IIC_iALUi, IIC_iALUr, IIC_iALUsr, ARMsubc>;
-defm RSC : AI1_rsc_irs<0b0111, "rsc",
- BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>>;
+defm RSC : AI1_rsc_irs<0b0111, "rsc", ARMsube>;
// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
// The assume-no-carry-in form uses the negation of the input since add/sub
@@ -3685,20 +3710,19 @@ def : ARMV6Pat<(int_arm_ssat GPRnopc:$a, imm1_32:$pos),
(SSAT imm1_32:$pos, GPRnopc:$a, 0)>;
def : ARMV6Pat<(int_arm_usat GPRnopc:$a, imm0_31:$pos),
(USAT imm0_31:$pos, GPRnopc:$a, 0)>;
+def : ARMPat<(ARMssatnoshift GPRnopc:$Rn, imm0_31:$imm),
+ (SSAT imm0_31:$imm, GPRnopc:$Rn, 0)>;
//===----------------------------------------------------------------------===//
// Bitwise Instructions.
//
defm AND : AsI1_bin_irs<0b0000, "and",
- IIC_iBITi, IIC_iBITr, IIC_iBITsr,
- BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr, and, 1>;
defm ORR : AsI1_bin_irs<0b1100, "orr",
- IIC_iBITi, IIC_iBITr, IIC_iBITsr,
- BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr, or, 1>;
defm EOR : AsI1_bin_irs<0b0001, "eor",
- IIC_iBITi, IIC_iBITr, IIC_iBITsr,
- BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr, xor, 1>;
defm BIC : AsI1_bin_irs<0b1110, "bic",
IIC_iBITi, IIC_iBITr, IIC_iBITsr,
BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
@@ -3923,9 +3947,10 @@ def UMLAL : AsMla1I64<0b0000101, (outs GPR:$RdLo, GPR:$RdHi),
RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">, Requires<[IsARM, HasV6]>;
def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi),
- (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64,
+ (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
+ IIC_iMAC64,
"umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
- Requires<[IsARM, HasV6]> {
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">, Requires<[IsARM, HasV6]> {
bits<4> RdLo;
bits<4> RdHi;
bits<4> Rm;
@@ -3989,28 +4014,28 @@ def SMMLSR : AMul2Ia <0b0111010, 0b1111, (outs GPR:$Rd),
IIC_iMAC32, "smmlsr", "\t$Rd, $Rn, $Rm, $Ra", []>,
Requires<[IsARM, HasV6]>;
-multiclass AI_smul<string opc, PatFrag opnode> {
+multiclass AI_smul<string opc> {
def BB : AMulxyI<0b0001011, 0b00, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm",
- [(set GPR:$Rd, (opnode (sext_inreg GPR:$Rn, i16),
+ [(set GPR:$Rd, (mul (sext_inreg GPR:$Rn, i16),
(sext_inreg GPR:$Rm, i16)))]>,
Requires<[IsARM, HasV5TE]>;
def BT : AMulxyI<0b0001011, 0b10, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm",
- [(set GPR:$Rd, (opnode (sext_inreg GPR:$Rn, i16),
+ [(set GPR:$Rd, (mul (sext_inreg GPR:$Rn, i16),
(sra GPR:$Rm, (i32 16))))]>,
Requires<[IsARM, HasV5TE]>;
def TB : AMulxyI<0b0001011, 0b01, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm",
- [(set GPR:$Rd, (opnode (sra GPR:$Rn, (i32 16)),
+ [(set GPR:$Rd, (mul (sra GPR:$Rn, (i32 16)),
(sext_inreg GPR:$Rm, i16)))]>,
Requires<[IsARM, HasV5TE]>;
def TT : AMulxyI<0b0001011, 0b11, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
IIC_iMUL16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm",
- [(set GPR:$Rd, (opnode (sra GPR:$Rn, (i32 16)),
+ [(set GPR:$Rd, (mul (sra GPR:$Rn, (i32 16)),
(sra GPR:$Rm, (i32 16))))]>,
Requires<[IsARM, HasV5TE]>;
@@ -4026,13 +4051,13 @@ multiclass AI_smul<string opc, PatFrag opnode> {
}
-multiclass AI_smla<string opc, PatFrag opnode> {
+multiclass AI_smla<string opc> {
let DecoderMethod = "DecodeSMLAInstruction" in {
def BB : AMulxyIa<0b0001000, 0b00, (outs GPRnopc:$Rd),
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd, (add GPR:$Ra,
- (opnode (sext_inreg GPRnopc:$Rn, i16),
+ (mul (sext_inreg GPRnopc:$Rn, i16),
(sext_inreg GPRnopc:$Rm, i16))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
@@ -4040,7 +4065,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
- (add GPR:$Ra, (opnode (sext_inreg GPRnopc:$Rn, i16),
+ (add GPR:$Ra, (mul (sext_inreg GPRnopc:$Rn, i16),
(sra GPRnopc:$Rm, (i32 16)))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
@@ -4048,7 +4073,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
- (add GPR:$Ra, (opnode (sra GPRnopc:$Rn, (i32 16)),
+ (add GPR:$Ra, (mul (sra GPRnopc:$Rn, (i32 16)),
(sext_inreg GPRnopc:$Rm, i16))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
@@ -4056,7 +4081,7 @@ multiclass AI_smla<string opc, PatFrag opnode> {
(ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
IIC_iMAC16, !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
- (add GPR:$Ra, (opnode (sra GPRnopc:$Rn, (i32 16)),
+ (add GPR:$Ra, (mul (sra GPRnopc:$Rn, (i32 16)),
(sra GPRnopc:$Rm, (i32 16)))))]>,
Requires<[IsARM, HasV5TE, UseMulOps]>;
@@ -4074,8 +4099,8 @@ multiclass AI_smla<string opc, PatFrag opnode> {
}
}
-defm SMUL : AI_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
-defm SMLA : AI_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
+defm SMUL : AI_smul<"smul">;
+defm SMLA : AI_smla<"smla">;
// Halfword multiply accumulate long: SMLAL<x><y>.
def SMLALBB : AMulxyI64<0b0001010, 0b00, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
@@ -4336,8 +4361,7 @@ def SETPAN : AInoP<(outs), (ins imm0_1:$imm), MiscFrm, NoItinerary, "setpan",
//
defm CMP : AI1_cmp_irs<0b1010, "cmp",
- IIC_iCMPi, IIC_iCMPr, IIC_iCMPsr,
- BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
+ IIC_iCMPi, IIC_iCMPr, IIC_iCMPsr, ARMcmp>;
// ARMcmpZ can re-use the above instruction definitions.
def : ARMPat<(ARMcmpZ GPR:$src, mod_imm:$imm),
@@ -4745,7 +4769,7 @@ def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
class acquiring_load<PatFrag base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return isAtLeastAcquire(Ordering);
+ return isAcquireOrStronger(Ordering);
}]>;
def atomic_load_acquire_8 : acquiring_load<atomic_load_8>;
@@ -4755,7 +4779,7 @@ def atomic_load_acquire_32 : acquiring_load<atomic_load_32>;
class releasing_store<PatFrag base>
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
- return isAtLeastRelease(Ordering);
+ return isReleaseOrStronger(Ordering);
}]>;
def atomic_store_release_8 : releasing_store<atomic_store_8>;
@@ -4831,21 +4855,21 @@ def CDP2 : ABXI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
}
class ACI<dag oops, dag iops, string opc, string asm,
- IndexMode im = IndexModeNone>
+ list<dag> pattern, IndexMode im = IndexModeNone>
: I<oops, iops, AddrModeNone, 4, im, BrFrm, NoItinerary,
- opc, asm, "", []> {
+ opc, asm, "", pattern> {
let Inst{27-25} = 0b110;
}
class ACInoP<dag oops, dag iops, string opc, string asm,
- IndexMode im = IndexModeNone>
+ list<dag> pattern, IndexMode im = IndexModeNone>
: InoP<oops, iops, AddrModeNone, 4, im, BrFrm, NoItinerary,
- opc, asm, "", []> {
+ opc, asm, "", pattern> {
let Inst{31-28} = 0b1111;
let Inst{27-25} = 0b110;
}
-multiclass LdStCop<bit load, bit Dbit, string asm> {
+multiclass LdStCop<bit load, bit Dbit, string asm, list<dag> pattern> {
def _OFFSET : ACI<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr),
- asm, "\t$cop, $CRd, $addr"> {
+ asm, "\t$cop, $CRd, $addr", pattern> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
@@ -4861,7 +4885,7 @@ multiclass LdStCop<bit load, bit Dbit, string asm> {
let DecoderMethod = "DecodeCopMemInstruction";
}
def _PRE : ACI<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr),
- asm, "\t$cop, $CRd, $addr!", IndexModePre> {
+ asm, "\t$cop, $CRd, $addr!", [], IndexModePre> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
@@ -4878,7 +4902,7 @@ multiclass LdStCop<bit load, bit Dbit, string asm> {
}
def _POST: ACI<(outs), (ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
postidx_imm8s4:$offset),
- asm, "\t$cop, $CRd, $addr, $offset", IndexModePost> {
+ asm, "\t$cop, $CRd, $addr, $offset", [], IndexModePost> {
bits<9> offset;
bits<4> addr;
bits<4> cop;
@@ -4897,7 +4921,7 @@ multiclass LdStCop<bit load, bit Dbit, string asm> {
def _OPTION : ACI<(outs),
(ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
coproc_option_imm:$option),
- asm, "\t$cop, $CRd, $addr, $option"> {
+ asm, "\t$cop, $CRd, $addr, $option", []> {
bits<8> option;
bits<4> addr;
bits<4> cop;
@@ -4914,9 +4938,9 @@ multiclass LdStCop<bit load, bit Dbit, string asm> {
let DecoderMethod = "DecodeCopMemInstruction";
}
}
-multiclass LdSt2Cop<bit load, bit Dbit, string asm> {
+multiclass LdSt2Cop<bit load, bit Dbit, string asm, list<dag> pattern> {
def _OFFSET : ACInoP<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr),
- asm, "\t$cop, $CRd, $addr"> {
+ asm, "\t$cop, $CRd, $addr", pattern> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
@@ -4932,7 +4956,7 @@ multiclass LdSt2Cop<bit load, bit Dbit, string asm> {
let DecoderMethod = "DecodeCopMemInstruction";
}
def _PRE : ACInoP<(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr),
- asm, "\t$cop, $CRd, $addr!", IndexModePre> {
+ asm, "\t$cop, $CRd, $addr!", [], IndexModePre> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
@@ -4949,7 +4973,7 @@ multiclass LdSt2Cop<bit load, bit Dbit, string asm> {
}
def _POST: ACInoP<(outs), (ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
postidx_imm8s4:$offset),
- asm, "\t$cop, $CRd, $addr, $offset", IndexModePost> {
+ asm, "\t$cop, $CRd, $addr, $offset", [], IndexModePost> {
bits<9> offset;
bits<4> addr;
bits<4> cop;
@@ -4968,7 +4992,7 @@ multiclass LdSt2Cop<bit load, bit Dbit, string asm> {
def _OPTION : ACInoP<(outs),
(ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
coproc_option_imm:$option),
- asm, "\t$cop, $CRd, $addr, $option"> {
+ asm, "\t$cop, $CRd, $addr, $option", []> {
bits<8> option;
bits<4> addr;
bits<4> cop;
@@ -4986,14 +5010,15 @@ multiclass LdSt2Cop<bit load, bit Dbit, string asm> {
}
}
-defm LDC : LdStCop <1, 0, "ldc">;
-defm LDCL : LdStCop <1, 1, "ldcl">;
-defm STC : LdStCop <0, 0, "stc">;
-defm STCL : LdStCop <0, 1, "stcl">;
-defm LDC2 : LdSt2Cop<1, 0, "ldc2">, Requires<[PreV8]>;
-defm LDC2L : LdSt2Cop<1, 1, "ldc2l">, Requires<[PreV8]>;
-defm STC2 : LdSt2Cop<0, 0, "stc2">, Requires<[PreV8]>;
-defm STC2L : LdSt2Cop<0, 1, "stc2l">, Requires<[PreV8]>;
+defm LDC : LdStCop <1, 0, "ldc", [(int_arm_ldc imm:$cop, imm:$CRd, addrmode5:$addr)]>;
+defm LDCL : LdStCop <1, 1, "ldcl", [(int_arm_ldcl imm:$cop, imm:$CRd, addrmode5:$addr)]>;
+defm LDC2 : LdSt2Cop<1, 0, "ldc2", [(int_arm_ldc2 imm:$cop, imm:$CRd, addrmode5:$addr)]>, Requires<[PreV8]>;
+defm LDC2L : LdSt2Cop<1, 1, "ldc2l", [(int_arm_ldc2l imm:$cop, imm:$CRd, addrmode5:$addr)]>, Requires<[PreV8]>;
+
+defm STC : LdStCop <0, 0, "stc", [(int_arm_stc imm:$cop, imm:$CRd, addrmode5:$addr)]>;
+defm STCL : LdStCop <0, 1, "stcl", [(int_arm_stcl imm:$cop, imm:$CRd, addrmode5:$addr)]>;
+defm STC2 : LdSt2Cop<0, 0, "stc2", [(int_arm_stc2 imm:$cop, imm:$CRd, addrmode5:$addr)]>, Requires<[PreV8]>;
+defm STC2L : LdSt2Cop<0, 1, "stc2l", [(int_arm_stc2l imm:$cop, imm:$CRd, addrmode5:$addr)]>, Requires<[PreV8]>;
//===----------------------------------------------------------------------===//
// Move between coprocessor and ARM core register.
@@ -5118,9 +5143,9 @@ def MRRC : MovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */,
(outs GPRnopc:$Rt, GPRnopc:$Rt2),
(ins p_imm:$cop, imm0_15:$opc1, c_imm:$CRm), []>;
-class MovRRCopro2<string opc, bit direction, list<dag> pattern = []>
- : ABXI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1,
- GPRnopc:$Rt, GPRnopc:$Rt2, c_imm:$CRm), NoItinerary,
+class MovRRCopro2<string opc, bit direction, dag oops, dag iops,
+ list<dag> pattern = []>
+ : ABXI<0b1100, oops, iops, NoItinerary,
!strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern>,
Requires<[PreV8]> {
let Inst{31-28} = 0b1111;
@@ -5139,13 +5164,18 @@ class MovRRCopro2<string opc, bit direction, list<dag> pattern = []>
let Inst{7-4} = opc1;
let Inst{3-0} = CRm;
- let DecoderMethod = "DecodeMRRC2";
+ let DecoderMethod = "DecoderForMRRC2AndMCRR2";
}
def MCRR2 : MovRRCopro2<"mcrr2", 0 /* from ARM core register to coprocessor */,
+ (outs), (ins p_imm:$cop, imm0_15:$opc1, GPRnopc:$Rt,
+ GPRnopc:$Rt2, c_imm:$CRm),
[(int_arm_mcrr2 imm:$cop, imm:$opc1, GPRnopc:$Rt,
GPRnopc:$Rt2, imm:$CRm)]>;
-def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>;
+
+def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */,
+ (outs GPRnopc:$Rt, GPRnopc:$Rt2),
+ (ins p_imm:$cop, imm0_15:$opc1, c_imm:$CRm), []>;
//===----------------------------------------------------------------------===//
// Move between special register and ARM core register
@@ -5164,7 +5194,7 @@ def MRS : ABI<0b0001, (outs GPRnopc:$Rd), (ins), NoItinerary,
let Unpredictable{11-0} = 0b110100001111;
}
-def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPRnopc:$Rd, pred:$p)>,
+def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPRnopc:$Rd, pred:$p), 0>,
Requires<[IsARM]>;
// The MRSsys instruction is the MRS instruction from the ARM ARM,
@@ -5206,6 +5236,7 @@ def MRSbanked : ABI<0b0001, (outs GPRnopc:$Rd), (ins banked_reg:$banked),
// to distinguish between them. The mask operand contains the special register
// (R Bit) in bit 4 and bits 3-0 contains the mask with the fields to be
// accessed in the special register.
+let Defs = [CPSR] in
def MSR : ABI<0b0001, (outs), (ins msr_mask:$mask, GPR:$Rn), NoItinerary,
"msr", "\t$mask, $Rn", []> {
bits<5> mask;
@@ -5220,6 +5251,7 @@ def MSR : ABI<0b0001, (outs), (ins msr_mask:$mask, GPR:$Rn), NoItinerary,
let Inst{3-0} = Rn;
}
+let Defs = [CPSR] in
def MSRi : ABI<0b0011, (outs), (ins msr_mask:$mask, mod_imm:$imm), NoItinerary,
"msr", "\t$mask, $imm", []> {
bits<5> mask;
@@ -5268,8 +5300,8 @@ let usesCustomInserter = 1, Uses = [R4], Defs = [R4, SP] in
def win__dbzchk : SDNode<"ARMISD::WIN__DBZCHK", SDT_WIN__DBZCHK,
[SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
let usesCustomInserter = 1, Defs = [CPSR] in
- def WIN__DBZCHK : PseudoInst<(outs), (ins GPR:$divisor), NoItinerary,
- [(win__dbzchk GPR:$divisor)]>;
+ def WIN__DBZCHK : PseudoInst<(outs), (ins tGPR:$divisor), NoItinerary,
+ [(win__dbzchk tGPR:$divisor)]>;
//===----------------------------------------------------------------------===//
// TLS Instructions
@@ -5423,6 +5455,8 @@ def : Pat<(load (ARMWrapperPIC tglobaltlsaddr:$addr)),
def : ARMPat<(ARMWrapper tconstpool :$dst), (LEApcrel tconstpool :$dst)>;
def : ARMPat<(ARMWrapper tglobaladdr :$dst), (MOVi32imm tglobaladdr :$dst)>,
Requires<[IsARM, UseMovt]>;
+def : ARMPat<(ARMWrapper texternalsym :$dst), (MOVi32imm texternalsym :$dst)>,
+ Requires<[IsARM, UseMovt]>;
def : ARMPat<(ARMWrapperJT tjumptable:$dst),
(LEApcrelJT tjumptable:$dst)>;
@@ -5568,9 +5602,9 @@ include "ARMInstrNEON.td"
//
// Memory barriers
-def : InstAlias<"dmb", (DMB 0xf)>, Requires<[IsARM, HasDB]>;
-def : InstAlias<"dsb", (DSB 0xf)>, Requires<[IsARM, HasDB]>;
-def : InstAlias<"isb", (ISB 0xf)>, Requires<[IsARM, HasDB]>;
+def : InstAlias<"dmb", (DMB 0xf), 0>, Requires<[IsARM, HasDB]>;
+def : InstAlias<"dsb", (DSB 0xf), 0>, Requires<[IsARM, HasDB]>;
+def : InstAlias<"isb", (ISB 0xf), 0>, Requires<[IsARM, HasDB]>;
// System instructions
def : MnemonicAlias<"swi", "svc">;
@@ -5583,13 +5617,13 @@ def : MnemonicAlias<"stmfd", "stmdb">;
def : MnemonicAlias<"stmia", "stm">;
def : MnemonicAlias<"stmea", "stm">;
-// PKHBT/PKHTB with default shift amount. PKHTB is equivalent to PKHBT when the
-// shift amount is zero (i.e., unspecified).
+// PKHBT/PKHTB with default shift amount. PKHTB is equivalent to PKHBT with the
+// input operands swapped when the shift amount is zero (i.e., unspecified).
def : InstAlias<"pkhbt${p} $Rd, $Rn, $Rm",
- (PKHBT GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, 0, pred:$p)>,
+ (PKHBT GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, 0, pred:$p), 0>,
Requires<[IsARM, HasV6]>;
def : InstAlias<"pkhtb${p} $Rd, $Rn, $Rm",
- (PKHBT GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, 0, pred:$p)>,
+ (PKHBT GPRnopc:$Rd, GPRnopc:$Rm, GPRnopc:$Rn, 0, pred:$p), 0>,
Requires<[IsARM, HasV6]>;
// PUSH/POP aliases for STM/LDM
@@ -5747,23 +5781,23 @@ def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg)>,
// the instruction definitions need difference constraints pre-v6.
// Use these aliases for the assembly parsing on pre-v6.
def : InstAlias<"mul${s}${p} $Rd, $Rn, $Rm",
- (MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>,
+ (MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s), 0>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"mla${s}${p} $Rd, $Rn, $Rm, $Ra",
(MLA GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra,
- pred:$p, cc_out:$s)>,
+ pred:$p, cc_out:$s), 0>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"smlal${s}${p} $RdLo, $RdHi, $Rn, $Rm",
- (SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ (SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s), 0>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"umlal${s}${p} $RdLo, $RdHi, $Rn, $Rm",
- (UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ (UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s), 0>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"smull${s}${p} $RdLo, $RdHi, $Rn, $Rm",
- (SMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ (SMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s), 0>,
Requires<[IsARM, NoV6]>;
def : InstAlias<"umull${s}${p} $RdLo, $RdHi, $Rn, $Rm",
- (UMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ (UMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s), 0>,
Requires<[IsARM, NoV6]>;
// 'it' blocks in ARM mode just validate the predicates. The IT itself
@@ -5775,3 +5809,36 @@ let mayLoad = 1, mayStore =1, hasSideEffects = 1 in
def SPACE : PseudoInst<(outs GPR:$Rd), (ins i32imm:$size, GPR:$Rn),
NoItinerary,
[(set GPR:$Rd, (int_arm_space imm:$size, GPR:$Rn))]>;
+
+//===----------------------------------
+// Atomic cmpxchg for -O0
+//===----------------------------------
+
+// The fast register allocator used during -O0 inserts spills to cover any VRegs
+// live across basic block boundaries. When this happens between an LDXR and an
+// STXR it can clear the exclusive monitor, causing all cmpxchg attempts to
+// fail.
+
+// Unfortunately, this means we have to have an alternative (expanded
+// post-regalloc) path for -O0 compilations. Fortunately this path can be
+// significantly more naive than the standard expansion: we conservatively
+// assume seq_cst, strong cmpxchg and omit clrex on failure.
+
+let Constraints = "@earlyclobber $Rd,@earlyclobber $status",
+ mayLoad = 1, mayStore = 1 in {
+def CMP_SWAP_8 : PseudoInst<(outs GPR:$Rd, GPR:$status),
+ (ins GPR:$addr, GPR:$desired, GPR:$new),
+ NoItinerary, []>, Sched<[]>;
+
+def CMP_SWAP_16 : PseudoInst<(outs GPR:$Rd, GPR:$status),
+ (ins GPR:$addr, GPR:$desired, GPR:$new),
+ NoItinerary, []>, Sched<[]>;
+
+def CMP_SWAP_32 : PseudoInst<(outs GPR:$Rd, GPR:$status),
+ (ins GPR:$addr, GPR:$desired, GPR:$new),
+ NoItinerary, []>, Sched<[]>;
+
+def CMP_SWAP_64 : PseudoInst<(outs GPRPair:$Rd, GPR:$status),
+ (ins GPR:$addr, GPRPair:$desired, GPRPair:$new),
+ NoItinerary, []>, Sched<[]>;
+}
diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td
index 5b1f9a06442e..93a174f3678a 100644
--- a/lib/Target/ARM/ARMInstrThumb.td
+++ b/lib/Target/ARM/ARMInstrThumb.td
@@ -15,10 +15,6 @@
// Thumb specific DAG Nodes.
//
-def ARMtcall : SDNode<"ARMISD::tCALL", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPVariadic]>;
-
def imm_sr_XFORM: SDNodeXForm<imm, [{
unsigned Imm = N->getZExtValue();
return CurDAG->getTargetConstant((Imm == 32 ? 0 : Imm), SDLoc(N), MVT::i32);
@@ -70,6 +66,14 @@ def thumb_immshifted_shamt : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(V, SDLoc(N), MVT::i32);
}]>;
+def imm256_510 : ImmLeaf<i32, [{
+ return Imm >= 256 && Imm < 511;
+}]>;
+
+def thumb_imm256_510_addend : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getZExtValue() - 255, SDLoc(N), MVT::i32);
+}]>;
+
// Scaled 4 immediate.
def t_imm0_1020s4_asmoperand: AsmOperandClass { let Name = "Imm0_1020s4"; }
def t_imm0_1020s4 : Operand<i32> {
@@ -121,26 +125,38 @@ def t_adrlabel : Operand<i32> {
let ParserMatchClass = UnsignedOffset_b8s2;
}
-def t_bcctarget : Operand<i32> {
- let EncoderMethod = "getThumbBCCTargetOpValue";
- let DecoderMethod = "DecodeThumbBCCTargetOperand";
-}
-def t_cbtarget : Operand<i32> {
- let EncoderMethod = "getThumbCBTargetOpValue";
- let DecoderMethod = "DecodeThumbCmpBROperand";
+def thumb_br_target : Operand<OtherVT> {
+ let ParserMatchClass = ThumbBranchTarget;
+ let EncoderMethod = "getThumbBranchTargetOpValue";
+ let OperandType = "OPERAND_PCREL";
}
-def t_bltarget : Operand<i32> {
+def thumb_bl_target : Operand<i32> {
+ let ParserMatchClass = ThumbBranchTarget;
let EncoderMethod = "getThumbBLTargetOpValue";
let DecoderMethod = "DecodeThumbBLTargetOperand";
}
-def t_blxtarget : Operand<i32> {
+// Target for BLX *from* thumb mode.
+def thumb_blx_target : Operand<i32> {
+ let ParserMatchClass = ARMBranchTarget;
let EncoderMethod = "getThumbBLXTargetOpValue";
let DecoderMethod = "DecodeThumbBLXOffset";
}
+def thumb_bcc_target : Operand<OtherVT> {
+ let ParserMatchClass = ThumbBranchTarget;
+ let EncoderMethod = "getThumbBCCTargetOpValue";
+ let DecoderMethod = "DecodeThumbBCCTargetOperand";
+}
+
+def thumb_cb_target : Operand<OtherVT> {
+ let ParserMatchClass = ThumbBranchTarget;
+ let EncoderMethod = "getThumbCBTargetOpValue";
+ let DecoderMethod = "DecodeThumbCmpBROperand";
+}
+
// t_addrmode_pc := <label> => pc + imm8 * 4
//
def t_addrmode_pc : MemOperand {
@@ -278,16 +294,17 @@ def tHINT : T1pI<(outs), (ins imm0_15:$imm), NoItinerary, "hint", "\t$imm",
let Inst{7-4} = imm;
}
-class tHintAlias<string Asm, dag Result> : tInstAlias<Asm, Result> {
+// Note: When EmitPriority == 1, the alias will be used for printing
+class tHintAlias<string Asm, dag Result, bit EmitPriority = 0> : tInstAlias<Asm, Result, EmitPriority> {
let Predicates = [IsThumb, HasV6M];
}
-def : tHintAlias<"nop$p", (tHINT 0, pred:$p)>; // A8.6.110
-def : tHintAlias<"yield$p", (tHINT 1, pred:$p)>; // A8.6.410
-def : tHintAlias<"wfe$p", (tHINT 2, pred:$p)>; // A8.6.408
-def : tHintAlias<"wfi$p", (tHINT 3, pred:$p)>; // A8.6.409
-def : tHintAlias<"sev$p", (tHINT 4, pred:$p)>; // A8.6.157
-def : tInstAlias<"sevl$p", (tHINT 5, pred:$p)> {
+def : tHintAlias<"nop$p", (tHINT 0, pred:$p), 1>; // A8.6.110
+def : tHintAlias<"yield$p", (tHINT 1, pred:$p), 1>; // A8.6.410
+def : tHintAlias<"wfe$p", (tHINT 2, pred:$p), 1>; // A8.6.408
+def : tHintAlias<"wfi$p", (tHINT 3, pred:$p), 1>; // A8.6.409
+def : tHintAlias<"sev$p", (tHINT 4, pred:$p), 1>; // A8.6.157
+def : tInstAlias<"sevl$p", (tHINT 5, pred:$p), 1> {
let Predicates = [IsThumb2, HasV8];
}
@@ -302,7 +319,7 @@ def tBKPT : T1I<(outs), (ins imm0_255:$val), NoItinerary, "bkpt\t$val",
let Inst{7-0} = val;
}
// default immediate for breakpoint mnemonic
-def : InstAlias<"bkpt", (tBKPT 0)>, Requires<[IsThumb]>;
+def : InstAlias<"bkpt", (tBKPT 0), 0>, Requires<[IsThumb]>;
def tHLT : T1I<(outs), (ins imm0_63:$val), NoItinerary, "hlt\t$val",
[]>, T1Encoding<0b101110>, Requires<[IsThumb, HasV8]> {
@@ -439,6 +456,14 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
let Inst{2-0} = 0b000;
let Unpredictable{2-0} = 0b111;
}
+ def tBXNS : TI<(outs), (ins GPR:$Rm, pred:$p), IIC_Br, "bxns${p}\t$Rm", []>,
+ Requires<[IsThumb, Has8MSecExt]>,
+ T1Special<{1,1,0,?}>, Sched<[WriteBr]> {
+ bits<4> Rm;
+ let Inst{6-3} = Rm;
+ let Inst{2-0} = 0b100;
+ let Unpredictable{1-0} = 0b11;
+ }
}
let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
@@ -458,9 +483,9 @@ let isCall = 1,
Defs = [LR], Uses = [SP] in {
// Also used for Thumb2
def tBL : TIx2<0b11110, 0b11, 1,
- (outs), (ins pred:$p, t_bltarget:$func), IIC_Br,
+ (outs), (ins pred:$p, thumb_bl_target:$func), IIC_Br,
"bl${p}\t$func",
- [(ARMtcall tglobaladdr:$func)]>,
+ [(ARMcall tglobaladdr:$func)]>,
Requires<[IsThumb]>, Sched<[WriteBrL]> {
bits<24> func;
let Inst{26} = func{23};
@@ -472,9 +497,8 @@ let isCall = 1,
// ARMv5T and above, also used for Thumb2
def tBLXi : TIx2<0b11110, 0b11, 0,
- (outs), (ins pred:$p, t_blxtarget:$func), IIC_Br,
- "blx${p}\t$func",
- [(ARMcall tglobaladdr:$func)]>,
+ (outs), (ins pred:$p, thumb_blx_target:$func), IIC_Br,
+ "blx${p}\t$func", []>,
Requires<[IsThumb, HasV5T, IsNotMClass]>, Sched<[WriteBrL]> {
bits<24> func;
let Inst{26} = func{23};
@@ -488,7 +512,7 @@ let isCall = 1,
// Also used for Thumb2
def tBLXr : TI<(outs), (ins pred:$p, GPR:$func), IIC_Br,
"blx${p}\t$func",
- [(ARMtcall GPR:$func)]>,
+ [(ARMcall GPR:$func)]>,
Requires<[IsThumb, HasV5T]>,
T1Special<{1,1,1,?}>, Sched<[WriteBrL]> { // A6.2.3 & A8.6.24;
bits<4> func;
@@ -496,6 +520,17 @@ let isCall = 1,
let Inst{2-0} = 0b000;
}
+ // ARMv8-M Security Extensions
+ def tBLXNSr : TI<(outs), (ins pred:$p, GPRnopc:$func), IIC_Br,
+ "blxns${p}\t$func", []>,
+ Requires<[IsThumb, Has8MSecExt]>,
+ T1Special<{1,1,1,?}>, Sched<[WriteBrL]> {
+ bits<4> func;
+ let Inst{6-3} = func;
+ let Inst{2-0} = 0b100;
+ let Unpredictable{1-0} = 0b11;
+ }
+
// ARMv4T
def tBX_CALL : tPseudoInst<(outs), (ins tGPR:$func),
4, IIC_Br,
@@ -517,8 +552,9 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
// Just a pseudo for a tBL instruction. Needed to let regalloc know about
// the clobber of LR.
let Defs = [LR] in
- def tBfar : tPseudoExpand<(outs), (ins t_bltarget:$target, pred:$p),
- 4, IIC_Br, [], (tBL pred:$p, t_bltarget:$target)>,
+ def tBfar : tPseudoExpand<(outs), (ins thumb_bl_target:$target, pred:$p),
+ 4, IIC_Br, [],
+ (tBL pred:$p, thumb_bl_target:$target)>,
Sched<[WriteBrTbl]>;
def tBR_JTr : tPseudoInst<(outs),
@@ -534,7 +570,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
// FIXME: should be able to write a pattern for ARMBrcond, but can't use
// a two-value operand where a dag node expects two operands. :(
let isBranch = 1, isTerminator = 1 in
- def tBcc : T1I<(outs), (ins t_bcctarget:$target, pred:$p), IIC_Br,
+ def tBcc : T1I<(outs), (ins thumb_bcc_target:$target, pred:$p), IIC_Br,
"b${p}\t$target",
[/*(ARMbrcond bb:$target, imm:$cc)*/]>,
T1BranchCond<{1,1,0,1}>, Sched<[WriteBr]> {
@@ -663,19 +699,19 @@ multiclass thumb_st_rr_ri_enc<bits<3> reg_opc, bits<4> imm_opc,
defm tLDR : thumb_ld_rr_ri_enc<0b100, 0b0110, t_addrmode_rr,
t_addrmode_is4, AddrModeT1_4,
IIC_iLoad_r, IIC_iLoad_i, "ldr",
- UnOpFrag<(load node:$Src)>>;
+ load>;
// A8.6.64 & A8.6.61
defm tLDRB : thumb_ld_rr_ri_enc<0b110, 0b0111, t_addrmode_rr,
t_addrmode_is1, AddrModeT1_1,
IIC_iLoad_bh_r, IIC_iLoad_bh_i, "ldrb",
- UnOpFrag<(zextloadi8 node:$Src)>>;
+ zextloadi8>;
// A8.6.76 & A8.6.73
defm tLDRH : thumb_ld_rr_ri_enc<0b101, 0b1000, t_addrmode_rr,
t_addrmode_is2, AddrModeT1_2,
IIC_iLoad_bh_r, IIC_iLoad_bh_i, "ldrh",
- UnOpFrag<(zextloadi16 node:$Src)>>;
+ zextloadi16>;
let AddedComplexity = 10 in
def tLDRSB : // A8.6.80
@@ -706,19 +742,19 @@ def tSTRspi : T1pIs<(outs), (ins tGPR:$Rt, t_addrmode_sp:$addr), IIC_iStore_i,
defm tSTR : thumb_st_rr_ri_enc<0b000, 0b0110, t_addrmode_rr,
t_addrmode_is4, AddrModeT1_4,
IIC_iStore_r, IIC_iStore_i, "str",
- BinOpFrag<(store node:$LHS, node:$RHS)>>;
+ store>;
// A8.6.197 & A8.6.195
defm tSTRB : thumb_st_rr_ri_enc<0b010, 0b0111, t_addrmode_rr,
t_addrmode_is1, AddrModeT1_1,
IIC_iStore_bh_r, IIC_iStore_bh_i, "strb",
- BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
+ truncstorei8>;
// A8.6.207 & A8.6.205
defm tSTRH : thumb_st_rr_ri_enc<0b001, 0b1000, t_addrmode_rr,
t_addrmode_is2, AddrModeT1_2,
IIC_iStore_bh_r, IIC_iStore_bh_i, "strh",
- BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>;
+ truncstorei16>;
//===----------------------------------------------------------------------===//
@@ -770,7 +806,7 @@ def tSTMIA_UPD : Thumb1I<(outs GPR:$wb),
} // hasSideEffects
def : InstAlias<"ldm${p} $Rn!, $regs",
- (tLDMIA tGPR:$Rn, pred:$p, reglist:$regs)>,
+ (tLDMIA tGPR:$Rn, pred:$p, reglist:$regs), 0>,
Requires<[IsThumb, IsThumb1Only]>;
let mayLoad = 1, Uses = [SP], Defs = [SP], hasExtraDefRegAllocReq = 1 in
@@ -1310,7 +1346,14 @@ def tInt_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch),
AddrModeNone, 0, IndexModeNone,
Pseudo, NoItinerary, "", "",
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
- Requires<[IsThumb]>;
+ Requires<[IsThumb,IsNotWindows]>;
+
+let isBarrier = 1, hasSideEffects = 1, isTerminator = 1, isCodeGenOnly = 1,
+ Defs = [ R11, LR, SP ] in
+def tInt_WIN_eh_sjlj_longjmp
+ : XI<(outs), (ins GPR:$src, GPR:$scratch), AddrModeNone, 0, IndexModeNone,
+ Pseudo, NoItinerary, "", "", [(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
+ Requires<[IsThumb,IsWindows]>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -1380,16 +1423,9 @@ def : T1Pat<(ARMWrapperJT tjumptable:$dst),
(tLEApcrelJT tjumptable:$dst)>;
// Direct calls
-def : T1Pat<(ARMtcall texternalsym:$func), (tBL texternalsym:$func)>,
+def : T1Pat<(ARMcall texternalsym:$func), (tBL texternalsym:$func)>,
Requires<[IsThumb]>;
-def : Tv5Pat<(ARMcall texternalsym:$func), (tBLXi texternalsym:$func)>,
- Requires<[IsThumb, HasV5T, IsNotMClass]>;
-
-// Indirect calls to ARM routines
-def : Tv5Pat<(ARMcall GPR:$dst), (tBLXr GPR:$dst)>,
- Requires<[IsThumb, HasV5T]>;
-
// zextload i1 -> zextload i8
def : T1Pat<(zextloadi1 t_addrmode_is1:$addr),
(tLDRBi t_addrmode_is1:$addr)>;
@@ -1415,6 +1451,24 @@ def : T1Pat<(extloadi8 t_addrmode_rr:$addr), (tLDRBr t_addrmode_rr:$addr)>;
def : T1Pat<(extloadi16 t_addrmode_is2:$addr), (tLDRHi t_addrmode_is2:$addr)>;
def : T1Pat<(extloadi16 t_addrmode_rr:$addr), (tLDRHr t_addrmode_rr:$addr)>;
+// post-inc loads and stores
+
+// post-inc LDR -> LDM r0!, {r1}. The way operands are layed out in LDMs is
+// different to how ISel expects them for a post-inc load, so use a pseudo
+// and expand it just after ISel.
+let usesCustomInserter = 1,
+ Constraints = "$Rn = $Rn_wb,@earlyclobber $Rn_wb" in
+ def tLDR_postidx: tPseudoInst<(outs rGPR:$Rt, rGPR:$Rn_wb),
+ (ins rGPR:$Rn, pred:$p),
+ 4, IIC_iStore_ru,
+ []>;
+
+// post-inc STR -> STM r0!, {r1}. The layout of this (because it doesn't def
+// multiple registers) is the same in ISel as MachineInstr, so there's no need
+// for a pseudo.
+def : T1Pat<(post_store rGPR:$Rt, rGPR:$Rn, 4),
+ (tSTMIA_UPD rGPR:$Rn, rGPR:$Rt)>;
+
// If it's impossible to use [r,r] address mode for sextload, select to
// ldr{b|h} + sxt{b|h} instead.
def : T1Pat<(sextloadi8 t_addrmode_is1:$addr),
@@ -1474,6 +1528,10 @@ def : T1Pat<(i32 thumb_immshifted:$src),
def : T1Pat<(i32 imm0_255_comp:$src),
(tMVN (tMOVi8 (imm_comp_XFORM imm:$src)))>;
+def : T1Pat<(i32 imm256_510:$src),
+ (tADDi8 (tMOVi8 255),
+ (thumb_imm256_510_addend imm:$src))>;
+
// Pseudo instruction that combines ldr from constpool and add pc. This should
// be expanded into two instructions late to allow if-conversion and
// scheduling.
@@ -1502,7 +1560,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
// In Thumb1, "nop" is encoded as a "mov r8, r8". Technically, the bf00
// encoding is available on ARMv6K, but we don't differentiate that finely.
-def : InstAlias<"nop", (tMOVr R8, R8, 14, 0)>,Requires<[IsThumb, IsThumb1Only]>;
+def : InstAlias<"nop", (tMOVr R8, R8, 14, 0), 0>, Requires<[IsThumb, IsThumb1Only]>;
// For round-trip assembly/disassembly, we have to handle a CPS instruction
@@ -1524,3 +1582,8 @@ def : tInstAlias<"lsr${s}${p} $Rdm, $imm",
(tLSRri tGPR:$Rdm, cc_out:$s, tGPR:$Rdm, imm_sr:$imm, pred:$p)>;
def : tInstAlias<"asr${s}${p} $Rdm, $imm",
(tASRri tGPR:$Rdm, cc_out:$s, tGPR:$Rdm, imm_sr:$imm, pred:$p)>;
+
+// Pseudo instruction ldr Rt, =immediate
+def tLDRConstPool
+ : tAsmPseudo<"ldr${p} $Rt, $immediate",
+ (ins tGPR:$Rt, const_pool_asm_imm:$immediate, pred:$p)>;
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index f42f4569b2f8..55e5308be40e 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -270,7 +270,7 @@ def t2addrmode_so_reg : MemOperand,
let EncoderMethod = "getT2AddrModeSORegOpValue";
let DecoderMethod = "DecodeT2AddrModeSOReg";
let ParserMatchClass = t2addrmode_so_reg_asmoperand;
- let MIOperandInfo = (ops GPR:$base, rGPR:$offsreg, i32imm:$offsimm);
+ let MIOperandInfo = (ops GPRnopc:$base, rGPR:$offsreg, i32imm:$offsimm);
}
// Addresses for the TBB/TBH instructions.
@@ -576,8 +576,8 @@ class T2MlaLong<bits<3> opc22_20, bits<4> opc7_4,
/// changed to modify CPSR.
multiclass T2I_bin_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, bit Commutable = 0,
- string wide = ""> {
+ SDPatternOperator opnode, bit Commutable = 0,
+ string wide = ""> {
// shifted imm
def ri : T2sTwoRegImm<
(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), iii,
@@ -632,7 +632,7 @@ multiclass T2I_bin_irs<bits<4> opcod, string opc,
// the ".w" suffix to indicate that they are wide.
multiclass T2I_bin_w_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, bit Commutable = 0> :
+ SDPatternOperator opnode, bit Commutable = 0> :
T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, Commutable, ".w"> {
// Assembler aliases w/ the ".w" suffix.
def : t2InstAlias<!strconcat(opc, "${s}${p}.w", " $Rd, $Rn, $imm"),
@@ -661,7 +661,7 @@ multiclass T2I_bin_w_irs<bits<4> opcod, string opc,
/// T2I_rbin_is - Same as T2I_bin_irs except the order of operands are
/// reversed. The 'rr' form is only defined for the disassembler; for codegen
/// it is equivalent to the T2I_bin_irs counterpart.
-multiclass T2I_rbin_irs<bits<4> opcod, string opc, PatFrag opnode> {
+multiclass T2I_rbin_irs<bits<4> opcod, string opc, SDNode opnode> {
// shifted imm
def ri : T2sTwoRegImm<
(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), IIC_iALUi,
@@ -705,7 +705,7 @@ multiclass T2I_rbin_irs<bits<4> opcod, string opc, PatFrag opnode> {
/// AdjustInstrPostInstrSelection after giving then an optional CPSR operand.
let hasPostISelHook = 1, Defs = [CPSR] in {
multiclass T2I_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
- InstrItinClass iis, PatFrag opnode,
+ InstrItinClass iis, SDNode opnode,
bit Commutable = 0> {
// shifted imm
def ri : t2PseudoInst<(outs rGPR:$Rd),
@@ -735,7 +735,7 @@ multiclass T2I_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
/// T2I_rbin_s_is - Same as T2I_bin_s_irs, except selection DAG
/// operands are reversed.
let hasPostISelHook = 1, Defs = [CPSR] in {
-multiclass T2I_rbin_s_is<PatFrag opnode> {
+multiclass T2I_rbin_s_is<SDNode opnode> {
// shifted imm
def ri : t2PseudoInst<(outs rGPR:$Rd),
(ins rGPR:$Rn, t2_so_imm:$imm, pred:$p),
@@ -755,7 +755,7 @@ multiclass T2I_rbin_s_is<PatFrag opnode> {
/// T2I_bin_ii12rs - Defines a set of (op reg, {so_imm|imm0_4095|r|so_reg})
/// patterns for a binary operation that produces a value.
-multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
+multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, SDNode opnode,
bit Commutable = 0> {
// shifted imm
// The register-immediate version is re-materializable. This is useful
@@ -824,7 +824,7 @@ multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
/// for a binary operation that produces a value and use the carry
/// bit. It's not predicable.
let Defs = [CPSR], Uses = [CPSR] in {
-multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
+multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, SDNode opnode,
bit Commutable = 0> {
// shifted imm
def ri : T2sTwoRegImm<(outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm),
@@ -864,7 +864,7 @@ multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
/// T2I_sh_ir - Defines a set of (op reg, {so_imm|r}) patterns for a shift /
// rotate operation that produces a value.
-multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode> {
+multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, SDNode opnode> {
// 5-bit imm
def ri : T2sTwoRegShiftImm<
(outs rGPR:$Rd), (ins rGPR:$Rm, ty:$imm), IIC_iMOVsi,
@@ -919,7 +919,7 @@ multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode> {
/// a explicit result, only implicitly set CPSR.
multiclass T2I_cmp_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode> {
+ SDPatternOperator opnode> {
let isCompare = 1, Defs = [CPSR] in {
// shifted imm
def ri : T2OneRegCmpImm<
@@ -1260,20 +1260,19 @@ def t2LEApcrelJT : t2PseudoInst<(outs rGPR:$Rd),
// Load
let canFoldAsLoad = 1, isReMaterializable = 1 in
-defm t2LDR : T2I_ld<0, 0b10, "ldr", IIC_iLoad_i, IIC_iLoad_si, GPR,
- UnOpFrag<(load node:$Src)>>;
+defm t2LDR : T2I_ld<0, 0b10, "ldr", IIC_iLoad_i, IIC_iLoad_si, GPR, load>;
// Loads with zero extension
defm t2LDRH : T2I_ld<0, 0b01, "ldrh", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
- GPRnopc, UnOpFrag<(zextloadi16 node:$Src)>>;
+ GPRnopc, zextloadi16>;
defm t2LDRB : T2I_ld<0, 0b00, "ldrb", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
- GPRnopc, UnOpFrag<(zextloadi8 node:$Src)>>;
+ GPRnopc, zextloadi8>;
// Loads with sign extension
defm t2LDRSH : T2I_ld<1, 0b01, "ldrsh", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
- GPRnopc, UnOpFrag<(sextloadi16 node:$Src)>>;
+ GPRnopc, sextloadi16>;
defm t2LDRSB : T2I_ld<1, 0b00, "ldrsb", IIC_iLoad_bh_i, IIC_iLoad_bh_si,
- GPRnopc, UnOpFrag<(sextloadi8 node:$Src)>>;
+ GPRnopc, sextloadi8>;
let mayLoad = 1, hasSideEffects = 0, hasExtraDefRegAllocReq = 1 in {
// Load doubleword
@@ -1414,7 +1413,7 @@ def t2LDRSHT : T2IldT<1, 0b01, "ldrsht", IIC_iLoad_bh_i>;
class T2Ildacq<bits<4> bits23_20, bits<2> bit54, dag oops, dag iops,
string opc, string asm, list<dag> pattern>
: Thumb2I<oops, iops, AddrModeNone, 4, NoItinerary,
- opc, asm, "", pattern>, Requires<[IsThumb, HasV8]> {
+ opc, asm, "", pattern>, Requires<[IsThumb, HasAcquireRelease]> {
bits<4> Rt;
bits<4> addr;
@@ -1438,12 +1437,11 @@ def t2LDAH : T2Ildacq<0b1101, 0b01, (outs rGPR:$Rt),
(ins addr_offset_none:$addr), "ldah", "\t$Rt, $addr", []>;
// Store
-defm t2STR :T2I_st<0b10,"str", IIC_iStore_i, IIC_iStore_si, GPR,
- BinOpFrag<(store node:$LHS, node:$RHS)>>;
+defm t2STR :T2I_st<0b10,"str", IIC_iStore_i, IIC_iStore_si, GPR, store>;
defm t2STRB:T2I_st<0b00,"strb", IIC_iStore_bh_i, IIC_iStore_bh_si,
- rGPR, BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
+ rGPR, truncstorei8>;
defm t2STRH:T2I_st<0b01,"strh", IIC_iStore_bh_i, IIC_iStore_bh_si,
- rGPR, BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>;
+ rGPR, truncstorei16>;
// Store doubleword
let mayStore = 1, hasSideEffects = 0, hasExtraSrcRegAllocReq = 1 in
@@ -1586,7 +1584,7 @@ def t2STRD_POST : T2Ii8s4post<0, 1, 0, (outs GPR:$wb),
class T2Istrrel<bits<2> bit54, dag oops, dag iops,
string opc, string asm, list<dag> pattern>
: Thumb2I<oops, iops, AddrModeNone, 4, NoItinerary, opc,
- asm, "", pattern>, Requires<[IsThumb, HasV8]> {
+ asm, "", pattern>, Requires<[IsThumb, HasAcquireRelease]> {
bits<4> Rt;
bits<4> addr;
@@ -1906,7 +1904,8 @@ def : t2InstAlias<"mov${p} $Rd, $imm", (t2MOVi rGPR:$Rd, t2_so_imm:$imm,
let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
def t2MOVi16 : T2I<(outs rGPR:$Rd), (ins imm0_65535_expr:$imm), IIC_iMOVi,
"movw", "\t$Rd, $imm",
- [(set rGPR:$Rd, imm0_65535:$imm)]>, Sched<[WriteALU]> {
+ [(set rGPR:$Rd, imm0_65535:$imm)]>, Sched<[WriteALU]>,
+ Requires<[IsThumb, HasV8MBaseline]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-21} = 0b0010;
@@ -1924,8 +1923,9 @@ def t2MOVi16 : T2I<(outs rGPR:$Rd), (ins imm0_65535_expr:$imm), IIC_iMOVi,
let DecoderMethod = "DecodeT2MOVTWInstruction";
}
-def : t2InstAlias<"mov${p} $Rd, $imm",
- (t2MOVi16 rGPR:$Rd, imm256_65535_expr:$imm, pred:$p)>;
+def : InstAlias<"mov${p} $Rd, $imm",
+ (t2MOVi16 rGPR:$Rd, imm256_65535_expr:$imm, pred:$p), 0>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def t2MOVi16_ga_pcrel : PseudoInst<(outs rGPR:$Rd),
(ins i32imm:$addr, pclabel:$id), IIC_iMOVi, []>;
@@ -1936,7 +1936,8 @@ def t2MOVTi16 : T2I<(outs rGPR:$Rd),
"movt", "\t$Rd, $imm",
[(set rGPR:$Rd,
(or (and rGPR:$src, 0xffff), lo16AllZero:$imm))]>,
- Sched<[WriteALU]> {
+ Sched<[WriteALU]>,
+ Requires<[IsThumb, HasV8MBaseline]> {
let Inst{31-27} = 0b11110;
let Inst{25} = 1;
let Inst{24-21} = 0b0110;
@@ -1956,7 +1957,7 @@ def t2MOVTi16 : T2I<(outs rGPR:$Rd),
def t2MOVTi16_ga_pcrel : PseudoInst<(outs rGPR:$Rd),
(ins rGPR:$src, i32imm:$addr, pclabel:$id), IIC_iMOVi, []>,
- Sched<[WriteALU]>;
+ Sched<[WriteALU]>, Requires<[IsThumb, HasV8MBaseline]>;
} // Constraints
def : T2Pat<(or rGPR:$src, 0xffff0000), (t2MOVTi16 rGPR:$src, 0xffff)>;
@@ -1997,7 +1998,7 @@ def t2UXTB : T2I_ext_rrot<0b101, "uxtb",
def t2UXTH : T2I_ext_rrot<0b001, "uxth",
UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
def t2UXTB16 : T2I_ext_rrot_uxtb16<0b011, "uxtb16",
- UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
+ UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
// FIXME: This pattern incorrectly assumes the shl operator is a rotate.
// The transformation should probably be done as a combiner action
@@ -2029,10 +2030,8 @@ def : Pat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot), 0xFFFF)),
// Arithmetic Instructions.
//
-defm t2ADD : T2I_bin_ii12rs<0b000, "add",
- BinOpFrag<(add node:$LHS, node:$RHS)>, 1>;
-defm t2SUB : T2I_bin_ii12rs<0b101, "sub",
- BinOpFrag<(sub node:$LHS, node:$RHS)>>;
+defm t2ADD : T2I_bin_ii12rs<0b000, "add", add, 1>;
+defm t2SUB : T2I_bin_ii12rs<0b101, "sub", sub>;
// ADD and SUB with 's' bit set. No 12-bit immediate (T4) variants.
//
@@ -2044,25 +2043,20 @@ defm t2SUB : T2I_bin_ii12rs<0b101, "sub",
// FIXME: Eliminate t2ADDS/t2SUBS pseudo opcodes after adding tablegen
// support for an optional CPSR definition that corresponds to the DAG
// node's second value. We can then eliminate the implicit def of CPSR.
-defm t2ADDS : T2I_bin_s_irs <IIC_iALUi, IIC_iALUr, IIC_iALUsi,
- BinOpFrag<(ARMaddc node:$LHS, node:$RHS)>, 1>;
-defm t2SUBS : T2I_bin_s_irs <IIC_iALUi, IIC_iALUr, IIC_iALUsi,
- BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
+defm t2ADDS : T2I_bin_s_irs <IIC_iALUi, IIC_iALUr, IIC_iALUsi, ARMaddc, 1>;
+defm t2SUBS : T2I_bin_s_irs <IIC_iALUi, IIC_iALUr, IIC_iALUsi, ARMsubc>;
let hasPostISelHook = 1 in {
-defm t2ADC : T2I_adde_sube_irs<0b1010, "adc",
- BinOpWithFlagFrag<(ARMadde node:$LHS, node:$RHS, node:$FLAG)>, 1>;
-defm t2SBC : T2I_adde_sube_irs<0b1011, "sbc",
- BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>>;
+defm t2ADC : T2I_adde_sube_irs<0b1010, "adc", ARMadde, 1>;
+defm t2SBC : T2I_adde_sube_irs<0b1011, "sbc", ARMsube>;
}
// RSB
-defm t2RSB : T2I_rbin_irs <0b1110, "rsb",
- BinOpFrag<(sub node:$LHS, node:$RHS)>>;
+defm t2RSB : T2I_rbin_irs <0b1110, "rsb", sub>;
// FIXME: Eliminate them if we can write def : Pat patterns which defines
// CPSR and the implicit def of CPSR is not needed.
-defm t2RSBS : T2I_rbin_s_is <BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
+defm t2RSBS : T2I_rbin_s_is <ARMsubc>;
// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
// The assume-no-carry-in form uses the negation of the input since add/sub
@@ -2293,19 +2287,17 @@ def t2USAT16: T2SatI<(outs rGPR:$Rd), (ins imm0_15:$sat_imm, rGPR:$Rn),
def : T2Pat<(int_arm_ssat GPR:$a, imm1_32:$pos), (t2SSAT imm1_32:$pos, GPR:$a, 0)>;
def : T2Pat<(int_arm_usat GPR:$a, imm0_31:$pos), (t2USAT imm0_31:$pos, GPR:$a, 0)>;
+def : T2Pat<(ARMssatnoshift GPRnopc:$Rn, imm0_31:$imm),
+ (t2SSAT imm0_31:$imm, GPRnopc:$Rn, 0)>;
//===----------------------------------------------------------------------===//
// Shift and rotate Instructions.
//
-defm t2LSL : T2I_sh_ir<0b00, "lsl", imm0_31,
- BinOpFrag<(shl node:$LHS, node:$RHS)>>;
-defm t2LSR : T2I_sh_ir<0b01, "lsr", imm_sr,
- BinOpFrag<(srl node:$LHS, node:$RHS)>>;
-defm t2ASR : T2I_sh_ir<0b10, "asr", imm_sr,
- BinOpFrag<(sra node:$LHS, node:$RHS)>>;
-defm t2ROR : T2I_sh_ir<0b11, "ror", imm0_31,
- BinOpFrag<(rotr node:$LHS, node:$RHS)>>;
+defm t2LSL : T2I_sh_ir<0b00, "lsl", imm0_31, shl>;
+defm t2LSR : T2I_sh_ir<0b01, "lsr", imm_sr, srl>;
+defm t2ASR : T2I_sh_ir<0b10, "asr", imm_sr, sra>;
+defm t2ROR : T2I_sh_ir<0b11, "ror", imm0_31, rotr>;
// (rotr x, (and y, 0x...1f)) ==> (ROR x, y)
def : T2Pat<(rotr rGPR:$lhs, (and rGPR:$rhs, lo5AllOne)),
@@ -2362,14 +2354,11 @@ def t2MOVsra_flag : T2TwoRegShiftImm<
//
defm t2AND : T2I_bin_w_irs<0b0000, "and",
- IIC_iBITi, IIC_iBITr, IIC_iBITsi,
- BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi, and, 1>;
defm t2ORR : T2I_bin_w_irs<0b0010, "orr",
- IIC_iBITi, IIC_iBITr, IIC_iBITsi,
- BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi, or, 1>;
defm t2EOR : T2I_bin_w_irs<0b0100, "eor",
- IIC_iBITi, IIC_iBITr, IIC_iBITsi,
- BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi, xor, 1>;
defm t2BIC : T2I_bin_w_irs<0b0001, "bic",
IIC_iBITi, IIC_iBITr, IIC_iBITsi,
@@ -2516,7 +2505,7 @@ multiclass T2I_un_irs<bits<4> opcod, string opc,
let AddedComplexity = 1 in
defm t2MVN : T2I_un_irs <0b0011, "mvn",
IIC_iMVNi, IIC_iMVNr, IIC_iMVNsi,
- UnOpFrag<(not node:$Src)>, 1, 1, 1>;
+ not, 1, 1, 1>;
let AddedComplexity = 1 in
def : T2Pat<(and rGPR:$src, t2_so_imm_not:$imm),
@@ -2606,8 +2595,9 @@ def t2UMLAL : T2MlaLong<0b110, 0b0000,
def t2UMAAL : T2MulLong<0b110, 0b0110,
(outs rGPR:$RdLo, rGPR:$RdHi),
- (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64,
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi), IIC_iMAC64,
"umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">,
Requires<[IsThumb2, HasDSP]>;
} // hasSideEffects
@@ -2677,7 +2667,7 @@ def t2SMMLSR:T2FourReg<
let Inst{7-4} = 0b0001; // Rounding (Inst{4} = 1)
}
-multiclass T2I_smul<string opc, PatFrag opnode> {
+multiclass T2I_smul<string opc, SDNode opnode> {
def BB : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
!strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm",
[(set rGPR:$Rd, (opnode (sext_inreg rGPR:$Rn, i16),
@@ -2756,7 +2746,7 @@ multiclass T2I_smul<string opc, PatFrag opnode> {
}
-multiclass T2I_smla<string opc, PatFrag opnode> {
+multiclass T2I_smla<string opc, SDNode opnode> {
def BB : T2FourReg<
(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
!strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm, $Ra",
@@ -2835,8 +2825,8 @@ multiclass T2I_smla<string opc, PatFrag opnode> {
}
}
-defm t2SMUL : T2I_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
-defm t2SMLA : T2I_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
+defm t2SMUL : T2I_smul<"smul", mul>;
+defm t2SMLA : T2I_smla<"smla", mul>;
// Halfword multiple accumulate long: SMLAL<x><y>
def t2SMLALBB : T2FourReg_mac<1, 0b100, 0b1000, (outs rGPR:$Ra,rGPR:$Rd),
@@ -2923,7 +2913,7 @@ def t2SMLSLDX : T2FourReg_mac<1, 0b101, 0b1101, (outs rGPR:$Ra,rGPR:$Rd),
def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV,
"sdiv", "\t$Rd, $Rn, $Rm",
[(set rGPR:$Rd, (sdiv rGPR:$Rn, rGPR:$Rm))]>,
- Requires<[HasDivide, IsThumb2]> {
+ Requires<[HasDivide, IsThumb, HasV8MBaseline]> {
let Inst{31-27} = 0b11111;
let Inst{26-21} = 0b011100;
let Inst{20} = 0b1;
@@ -2934,7 +2924,7 @@ def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV,
def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV,
"udiv", "\t$Rd, $Rn, $Rm",
[(set rGPR:$Rd, (udiv rGPR:$Rn, rGPR:$Rm))]>,
- Requires<[HasDivide, IsThumb2]> {
+ Requires<[HasDivide, IsThumb, HasV8MBaseline]> {
let Inst{31-27} = 0b11111;
let Inst{26-21} = 0b011101;
let Inst{20} = 0b1;
@@ -3080,8 +3070,7 @@ def t2CRC32CW : T2I_crc32<1, 0b10, "cw", int_arm_crc32cw>;
// Comparison Instructions...
//
defm t2CMP : T2I_cmp_irs<0b1101, "cmp",
- IIC_iCMPi, IIC_iCMPr, IIC_iCMPsi,
- BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
+ IIC_iCMPi, IIC_iCMPr, IIC_iCMPsi, ARMcmp>;
def : T2Pat<(ARMcmpZ GPRnopc:$lhs, t2_so_imm:$imm),
(t2CMPri GPRnopc:$lhs, t2_so_imm:$imm)>;
@@ -3288,15 +3277,18 @@ let mayLoad = 1 in {
def t2LDREXB : T2I_ldrex<0b0100, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldrexb", "\t$Rt, $addr", "",
- [(set rGPR:$Rt, (ldrex_1 addr_offset_none:$addr))]>;
+ [(set rGPR:$Rt, (ldrex_1 addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def t2LDREXH : T2I_ldrex<0b0101, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldrexh", "\t$Rt, $addr", "",
- [(set rGPR:$Rt, (ldrex_2 addr_offset_none:$addr))]>;
+ [(set rGPR:$Rt, (ldrex_2 addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def t2LDREX : Thumb2I<(outs rGPR:$Rt), (ins t2addrmode_imm0_1020s4:$addr),
AddrModeNone, 4, NoItinerary,
"ldrex", "\t$Rt, $addr", "",
- [(set rGPR:$Rt, (ldrex_4 t2addrmode_imm0_1020s4:$addr))]> {
+ [(set rGPR:$Rt, (ldrex_4 t2addrmode_imm0_1020s4:$addr))]>,
+ Requires<[IsThumb, HasV8MBaseline]> {
bits<4> Rt;
bits<12> addr;
let Inst{31-27} = 0b11101;
@@ -3320,17 +3312,17 @@ def t2LDAEXB : T2I_ldrex<0b1100, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldaexb", "\t$Rt, $addr", "",
[(set rGPR:$Rt, (ldaex_1 addr_offset_none:$addr))]>,
- Requires<[IsThumb, HasV8]>;
+ Requires<[IsThumb, HasAcquireRelease, HasV7Clrex]>;
def t2LDAEXH : T2I_ldrex<0b1101, (outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldaexh", "\t$Rt, $addr", "",
[(set rGPR:$Rt, (ldaex_2 addr_offset_none:$addr))]>,
- Requires<[IsThumb, HasV8]>;
+ Requires<[IsThumb, HasAcquireRelease, HasV7Clrex]>;
def t2LDAEX : Thumb2I<(outs rGPR:$Rt), (ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldaex", "\t$Rt, $addr", "",
[(set rGPR:$Rt, (ldaex_4 addr_offset_none:$addr))]>,
- Requires<[IsThumb, HasV8]> {
+ Requires<[IsThumb, HasAcquireRelease, HasV7Clrex]> {
bits<4> Rt;
bits<4> addr;
let Inst{31-27} = 0b11101;
@@ -3345,7 +3337,8 @@ def t2LDAEXD : T2I_ldrex<0b1111, (outs rGPR:$Rt, rGPR:$Rt2),
(ins addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"ldaexd", "\t$Rt, $Rt2, $addr", "",
- [], {?, ?, ?, ?}>, Requires<[IsThumb, HasV8]> {
+ [], {?, ?, ?, ?}>, Requires<[IsThumb,
+ HasAcquireRelease, HasV7Clrex, IsNotMClass]> {
bits<4> Rt2;
let Inst{11-8} = Rt2;
@@ -3359,20 +3352,23 @@ def t2STREXB : T2I_strex<0b0100, (outs rGPR:$Rd),
AddrModeNone, 4, NoItinerary,
"strexb", "\t$Rd, $Rt, $addr", "",
[(set rGPR:$Rd,
- (strex_1 rGPR:$Rt, addr_offset_none:$addr))]>;
+ (strex_1 rGPR:$Rt, addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def t2STREXH : T2I_strex<0b0101, (outs rGPR:$Rd),
(ins rGPR:$Rt, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"strexh", "\t$Rd, $Rt, $addr", "",
[(set rGPR:$Rd,
- (strex_2 rGPR:$Rt, addr_offset_none:$addr))]>;
+ (strex_2 rGPR:$Rt, addr_offset_none:$addr))]>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def t2STREX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
t2addrmode_imm0_1020s4:$addr),
AddrModeNone, 4, NoItinerary,
"strex", "\t$Rd, $Rt, $addr", "",
[(set rGPR:$Rd,
- (strex_4 rGPR:$Rt, t2addrmode_imm0_1020s4:$addr))]> {
+ (strex_4 rGPR:$Rt, t2addrmode_imm0_1020s4:$addr))]>,
+ Requires<[IsThumb, HasV8MBaseline]> {
bits<4> Rd;
bits<4> Rt;
bits<12> addr;
@@ -3399,7 +3395,8 @@ def t2STLEXB : T2I_strex<0b1100, (outs rGPR:$Rd),
"stlexb", "\t$Rd, $Rt, $addr", "",
[(set rGPR:$Rd,
(stlex_1 rGPR:$Rt, addr_offset_none:$addr))]>,
- Requires<[IsThumb, HasV8]>;
+ Requires<[IsThumb, HasAcquireRelease,
+ HasV7Clrex]>;
def t2STLEXH : T2I_strex<0b1101, (outs rGPR:$Rd),
(ins rGPR:$Rt, addr_offset_none:$addr),
@@ -3407,7 +3404,8 @@ def t2STLEXH : T2I_strex<0b1101, (outs rGPR:$Rd),
"stlexh", "\t$Rd, $Rt, $addr", "",
[(set rGPR:$Rd,
(stlex_2 rGPR:$Rt, addr_offset_none:$addr))]>,
- Requires<[IsThumb, HasV8]>;
+ Requires<[IsThumb, HasAcquireRelease,
+ HasV7Clrex]>;
def t2STLEX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
addr_offset_none:$addr),
@@ -3415,7 +3413,7 @@ def t2STLEX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
"stlex", "\t$Rd, $Rt, $addr", "",
[(set rGPR:$Rd,
(stlex_4 rGPR:$Rt, addr_offset_none:$addr))]>,
- Requires<[IsThumb, HasV8]> {
+ Requires<[IsThumb, HasAcquireRelease, HasV7Clrex]> {
bits<4> Rd;
bits<4> Rt;
bits<4> addr;
@@ -3431,14 +3429,15 @@ def t2STLEXD : T2I_strex<0b1111, (outs rGPR:$Rd),
(ins rGPR:$Rt, rGPR:$Rt2, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
"stlexd", "\t$Rd, $Rt, $Rt2, $addr", "", [],
- {?, ?, ?, ?}>, Requires<[IsThumb, HasV8]> {
+ {?, ?, ?, ?}>, Requires<[IsThumb, HasAcquireRelease,
+ HasV7Clrex, IsNotMClass]> {
bits<4> Rt2;
let Inst{11-8} = Rt2;
}
}
def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", [(int_arm_clrex)]>,
- Requires<[IsThumb2, HasV7]> {
+ Requires<[IsThumb, HasV7Clrex]> {
let Inst{31-16} = 0xf3bf;
let Inst{15-14} = 0b10;
let Inst{13} = 0;
@@ -3449,22 +3448,30 @@ def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", [(int_arm_clrex)]>,
}
def : T2Pat<(and (ldrex_1 addr_offset_none:$addr), 0xff),
- (t2LDREXB addr_offset_none:$addr)>;
+ (t2LDREXB addr_offset_none:$addr)>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def : T2Pat<(and (ldrex_2 addr_offset_none:$addr), 0xffff),
- (t2LDREXH addr_offset_none:$addr)>;
+ (t2LDREXH addr_offset_none:$addr)>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def : T2Pat<(strex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
- (t2STREXB GPR:$Rt, addr_offset_none:$addr)>;
+ (t2STREXB GPR:$Rt, addr_offset_none:$addr)>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def : T2Pat<(strex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
- (t2STREXH GPR:$Rt, addr_offset_none:$addr)>;
+ (t2STREXH GPR:$Rt, addr_offset_none:$addr)>,
+ Requires<[IsThumb, HasV8MBaseline]>;
def : T2Pat<(and (ldaex_1 addr_offset_none:$addr), 0xff),
- (t2LDAEXB addr_offset_none:$addr)>;
+ (t2LDAEXB addr_offset_none:$addr)>,
+ Requires<[IsThumb, HasAcquireRelease, HasV7Clrex]>;
def : T2Pat<(and (ldaex_2 addr_offset_none:$addr), 0xffff),
- (t2LDAEXH addr_offset_none:$addr)>;
+ (t2LDAEXH addr_offset_none:$addr)>,
+ Requires<[IsThumb, HasAcquireRelease, HasV7Clrex]>;
def : T2Pat<(stlex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
- (t2STLEXB GPR:$Rt, addr_offset_none:$addr)>;
+ (t2STLEXB GPR:$Rt, addr_offset_none:$addr)>,
+ Requires<[IsThumb, HasAcquireRelease, HasV7Clrex]>;
def : T2Pat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
- (t2STLEXH GPR:$Rt, addr_offset_none:$addr)>;
+ (t2STLEXH GPR:$Rt, addr_offset_none:$addr)>,
+ Requires<[IsThumb, HasAcquireRelease, HasV7Clrex]>;
//===----------------------------------------------------------------------===//
// SJLJ Exception handling intrinsics
@@ -3517,9 +3524,10 @@ def t2LDMIA_RET: t2PseudoExpand<(outs GPR:$wb), (ins GPR:$Rn, pred:$p,
let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
let isPredicable = 1 in
-def t2B : T2I<(outs), (ins uncondbrtarget:$target), IIC_Br,
+def t2B : T2I<(outs), (ins thumb_br_target:$target), IIC_Br,
"b", ".w\t$target",
- [(br bb:$target)]>, Sched<[WriteBr]> {
+ [(br bb:$target)]>, Sched<[WriteBr]>,
+ Requires<[IsThumb, HasV8MBaseline]> {
let Inst{31-27} = 0b11110;
let Inst{15-14} = 0b10;
let Inst{12} = 1;
@@ -3609,9 +3617,9 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
// IOS version.
let Uses = [SP] in
def tTAILJMPd: tPseudoExpand<(outs),
- (ins uncondbrtarget:$dst, pred:$p),
+ (ins thumb_br_target:$dst, pred:$p),
4, IIC_Br, [],
- (t2B uncondbrtarget:$dst, pred:$p)>,
+ (t2B thumb_br_target:$dst, pred:$p)>,
Requires<[IsThumb2, IsMachO]>, Sched<[WriteBr]>;
}
@@ -3647,10 +3655,10 @@ def t2BXJ : T2I<(outs), (ins GPRnopc:$func), NoItinerary, "bxj", "\t$func", []>,
// Compare and branch on zero / non-zero
let isBranch = 1, isTerminator = 1 in {
- def tCBZ : T1I<(outs), (ins tGPR:$Rn, t_cbtarget:$target), IIC_Br,
+ def tCBZ : T1I<(outs), (ins tGPR:$Rn, thumb_cb_target:$target), IIC_Br,
"cbz\t$Rn, $target", []>,
T1Misc<{0,0,?,1,?,?,?}>,
- Requires<[IsThumb2]>, Sched<[WriteBr]> {
+ Requires<[IsThumb, HasV8MBaseline]>, Sched<[WriteBr]> {
// A8.6.27
bits<6> target;
bits<3> Rn;
@@ -3659,10 +3667,10 @@ let isBranch = 1, isTerminator = 1 in {
let Inst{2-0} = Rn;
}
- def tCBNZ : T1I<(outs), (ins tGPR:$Rn, t_cbtarget:$target), IIC_Br,
+ def tCBNZ : T1I<(outs), (ins tGPR:$Rn, thumb_cb_target:$target), IIC_Br,
"cbnz\t$Rn, $target", []>,
T1Misc<{1,0,?,1,?,?,?}>,
- Requires<[IsThumb2]>, Sched<[WriteBr]> {
+ Requires<[IsThumb, HasV8MBaseline]>, Sched<[WriteBr]> {
// A8.6.27
bits<6> target;
bits<3> Rn;
@@ -3715,15 +3723,21 @@ def t2HINT : T2I<(outs), (ins imm0_239:$imm), NoItinerary, "hint", ".w\t$imm",
let Inst{7-0} = imm;
}
-def : t2InstAlias<"hint$p $imm", (t2HINT imm0_239:$imm, pred:$p)>;
-def : t2InstAlias<"nop$p.w", (t2HINT 0, pred:$p)>;
-def : t2InstAlias<"yield$p.w", (t2HINT 1, pred:$p)>;
-def : t2InstAlias<"wfe$p.w", (t2HINT 2, pred:$p)>;
-def : t2InstAlias<"wfi$p.w", (t2HINT 3, pred:$p)>;
-def : t2InstAlias<"sev$p.w", (t2HINT 4, pred:$p)>;
-def : t2InstAlias<"sevl$p.w", (t2HINT 5, pred:$p)> {
+def : t2InstAlias<"hint$p $imm", (t2HINT imm0_239:$imm, pred:$p), 0>;
+def : t2InstAlias<"nop$p.w", (t2HINT 0, pred:$p), 1>;
+def : t2InstAlias<"yield$p.w", (t2HINT 1, pred:$p), 1>;
+def : t2InstAlias<"wfe$p.w", (t2HINT 2, pred:$p), 1>;
+def : t2InstAlias<"wfi$p.w", (t2HINT 3, pred:$p), 1>;
+def : t2InstAlias<"sev$p.w", (t2HINT 4, pred:$p), 1>;
+def : t2InstAlias<"sevl$p.w", (t2HINT 5, pred:$p), 1> {
let Predicates = [IsThumb2, HasV8];
}
+def : t2InstAlias<"esb$p.w", (t2HINT 16, pred:$p), 1> {
+ let Predicates = [IsThumb2, HasRAS];
+}
+def : t2InstAlias<"esb$p", (t2HINT 16, pred:$p), 0> {
+ let Predicates = [IsThumb2, HasRAS];
+}
def t2DBG : T2I<(outs), (ins imm0_15:$opt), NoItinerary, "dbg", "\t$opt",
[(int_arm_dbg imm0_15:$opt)]> {
@@ -3848,7 +3862,7 @@ def : t2InstAlias<"hvc\t$imm16", (t2HVC imm0_65535:$imm16)>;
// ERET - Return from exception in Hypervisor mode.
// B9.3.3, B9.3.20: ERET is an alias for "SUBS PC, LR, #0" in an implementation that
// includes virtualization extensions.
-def t2ERET : InstAlias<"eret${p}", (t2SUBS_PC_LR 0, pred:$p)>,
+def t2ERET : InstAlias<"eret${p}", (t2SUBS_PC_LR 0, pred:$p), 1>,
Requires<[IsThumb2, HasVirtualization]>;
//===----------------------------------------------------------------------===//
@@ -3871,7 +3885,7 @@ let isReMaterializable = 1 in {
def t2MOV_ga_pcrel : PseudoInst<(outs rGPR:$dst), (ins i32imm:$addr),
IIC_iMOVix2addpc,
[(set rGPR:$dst, (ARMWrapperPIC tglobaladdr:$addr))]>,
- Requires<[IsThumb2, UseMovt]>;
+ Requires<[IsThumb, HasV8MBaseline, UseMovt]>;
}
@@ -3883,12 +3897,13 @@ def : T2Pat<(ARMWrapper tglobaltlsaddr:$dst),
Requires<[IsThumb2, UseMovt]>;
// ConstantPool, GlobalAddress, and JumpTable
-def : T2Pat<(ARMWrapper tconstpool :$dst), (t2LEApcrel tconstpool :$dst)>;
-def : T2Pat<(ARMWrapper tglobaladdr :$dst), (t2MOVi32imm tglobaladdr :$dst)>,
- Requires<[IsThumb2, UseMovt]>;
+def : T2Pat<(ARMWrapper tconstpool :$dst), (t2LEApcrel tconstpool :$dst)>;
+def : T2Pat<(ARMWrapper texternalsym :$dst), (t2MOVi32imm texternalsym :$dst)>,
+ Requires<[IsThumb, HasV8MBaseline, UseMovt]>;
+def : T2Pat<(ARMWrapper tglobaladdr :$dst), (t2MOVi32imm tglobaladdr :$dst)>,
+ Requires<[IsThumb, HasV8MBaseline, UseMovt]>;
-def : T2Pat<(ARMWrapperJT tjumptable:$dst),
- (t2LEApcrelJT tjumptable:$dst)>;
+def : T2Pat<(ARMWrapperJT tjumptable:$dst), (t2LEApcrelJT tjumptable:$dst)>;
// Pseudo instruction that combines ldr from constpool and add pc. This should
// be expanded into two instructions late to allow if-conversion and
@@ -3910,16 +3925,16 @@ def t2ABS : PseudoInst<(outs rGPR:$dst), (ins rGPR:$src),
//===----------------------------------------------------------------------===//
// Coprocessor load/store -- for disassembly only
//
-class T2CI<bits<4> op31_28, dag oops, dag iops, string opc, string asm>
- : T2I<oops, iops, NoItinerary, opc, asm, []> {
+class T2CI<bits<4> op31_28, dag oops, dag iops, string opc, string asm, list<dag> pattern>
+ : T2I<oops, iops, NoItinerary, opc, asm, pattern> {
let Inst{31-28} = op31_28;
let Inst{27-25} = 0b110;
}
-multiclass t2LdStCop<bits<4> op31_28, bit load, bit Dbit, string asm> {
+multiclass t2LdStCop<bits<4> op31_28, bit load, bit Dbit, string asm, list<dag> pattern> {
def _OFFSET : T2CI<op31_28,
(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5:$addr),
- asm, "\t$cop, $CRd, $addr"> {
+ asm, "\t$cop, $CRd, $addr", pattern> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
@@ -3936,7 +3951,7 @@ multiclass t2LdStCop<bits<4> op31_28, bit load, bit Dbit, string asm> {
}
def _PRE : T2CI<op31_28,
(outs), (ins p_imm:$cop, c_imm:$CRd, addrmode5_pre:$addr),
- asm, "\t$cop, $CRd, $addr!"> {
+ asm, "\t$cop, $CRd, $addr!", []> {
bits<13> addr;
bits<4> cop;
bits<4> CRd;
@@ -3954,7 +3969,7 @@ multiclass t2LdStCop<bits<4> op31_28, bit load, bit Dbit, string asm> {
def _POST: T2CI<op31_28,
(outs), (ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
postidx_imm8s4:$offset),
- asm, "\t$cop, $CRd, $addr, $offset"> {
+ asm, "\t$cop, $CRd, $addr, $offset", []> {
bits<9> offset;
bits<4> addr;
bits<4> cop;
@@ -3973,7 +3988,7 @@ multiclass t2LdStCop<bits<4> op31_28, bit load, bit Dbit, string asm> {
def _OPTION : T2CI<op31_28, (outs),
(ins p_imm:$cop, c_imm:$CRd, addr_offset_none:$addr,
coproc_option_imm:$option),
- asm, "\t$cop, $CRd, $addr, $option"> {
+ asm, "\t$cop, $CRd, $addr, $option", []> {
bits<8> option;
bits<4> addr;
bits<4> cop;
@@ -3991,14 +4006,15 @@ multiclass t2LdStCop<bits<4> op31_28, bit load, bit Dbit, string asm> {
}
}
-defm t2LDC : t2LdStCop<0b1110, 1, 0, "ldc">;
-defm t2LDCL : t2LdStCop<0b1110, 1, 1, "ldcl">;
-defm t2STC : t2LdStCop<0b1110, 0, 0, "stc">;
-defm t2STCL : t2LdStCop<0b1110, 0, 1, "stcl">;
-defm t2LDC2 : t2LdStCop<0b1111, 1, 0, "ldc2">, Requires<[PreV8,IsThumb2]>;
-defm t2LDC2L : t2LdStCop<0b1111, 1, 1, "ldc2l">, Requires<[PreV8,IsThumb2]>;
-defm t2STC2 : t2LdStCop<0b1111, 0, 0, "stc2">, Requires<[PreV8,IsThumb2]>;
-defm t2STC2L : t2LdStCop<0b1111, 0, 1, "stc2l">, Requires<[PreV8,IsThumb2]>;
+defm t2LDC : t2LdStCop<0b1110, 1, 0, "ldc", [(int_arm_ldc imm:$cop, imm:$CRd, addrmode5:$addr)]>;
+defm t2LDCL : t2LdStCop<0b1110, 1, 1, "ldcl", [(int_arm_ldcl imm:$cop, imm:$CRd, addrmode5:$addr)]>;
+defm t2LDC2 : t2LdStCop<0b1111, 1, 0, "ldc2", [(int_arm_ldc2 imm:$cop, imm:$CRd, addrmode5:$addr)]>, Requires<[PreV8,IsThumb2]>;
+defm t2LDC2L : t2LdStCop<0b1111, 1, 1, "ldc2l", [(int_arm_ldc2l imm:$cop, imm:$CRd, addrmode5:$addr)]>, Requires<[PreV8,IsThumb2]>;
+
+defm t2STC : t2LdStCop<0b1110, 0, 0, "stc", [(int_arm_stc imm:$cop, imm:$CRd, addrmode5:$addr)]>;
+defm t2STCL : t2LdStCop<0b1110, 0, 1, "stcl", [(int_arm_stcl imm:$cop, imm:$CRd, addrmode5:$addr)]>;
+defm t2STC2 : t2LdStCop<0b1111, 0, 0, "stc2", [(int_arm_stc2 imm:$cop, imm:$CRd, addrmode5:$addr)]>, Requires<[PreV8,IsThumb2]>;
+defm t2STC2L : t2LdStCop<0b1111, 0, 1, "stc2l", [(int_arm_stc2l imm:$cop, imm:$CRd, addrmode5:$addr)]>, Requires<[PreV8,IsThumb2]>;
//===----------------------------------------------------------------------===//
@@ -4070,6 +4086,7 @@ def t2MRS_M : T2I<(outs rGPR:$Rd), (ins msr_mask:$SYSm), NoItinerary,
// same and the assembly parser has no way to distinguish between them. The mask
// operand contains the special register (R Bit) in bit 4 and bits 3-0 contains
// the mask with the fields to be accessed in the special register.
+let Defs = [CPSR] in
def t2MSR_AR : T2I<(outs), (ins msr_mask:$mask, rGPR:$Rn),
NoItinerary, "msr", "\t$mask, $Rn", []>,
Requires<[IsThumb2,IsNotMClass]> {
@@ -4105,6 +4122,7 @@ def t2MSRbanked : T2I<(outs), (ins banked_reg:$banked, rGPR:$Rn),
// M class MSR.
//
// Move from ARM core register to Special Register
+let Defs = [CPSR] in
def t2MSR_M : T2I<(outs), (ins msr_mask:$SYSm, rGPR:$Rn),
NoItinerary, "msr", "\t$SYSm, $Rn", []>,
Requires<[IsThumb,IsMClass]> {
@@ -4314,6 +4332,37 @@ def t2SETPAN : T1I<(outs), (ins imm0_1:$imm), NoItinerary, "setpan\t$imm", []>,
}
//===----------------------------------------------------------------------===//
+// ARMv8-M Security Extensions instructions
+//
+
+let hasSideEffects = 1 in
+def t2SG : T2I<(outs), (ins), NoItinerary, "sg", "", []>,
+ Requires<[Has8MSecExt]> {
+ let Inst = 0xe97fe97f;
+}
+
+class T2TT<bits<2> at, string asm, list<dag> pattern>
+ : T2I<(outs rGPR:$Rt), (ins GPRnopc:$Rn), NoItinerary, asm, "\t$Rt, $Rn",
+ pattern> {
+ bits<4> Rn;
+ bits<4> Rt;
+
+ let Inst{31-20} = 0b111010000100;
+ let Inst{19-16} = Rn;
+ let Inst{15-12} = 0b1111;
+ let Inst{11-8} = Rt;
+ let Inst{7-6} = at;
+ let Inst{5-0} = 0b000000;
+
+ let Unpredictable{5-0} = 0b111111;
+}
+
+def t2TT : T2TT<0b00, "tt", []>, Requires<[IsThumb,Has8MSecExt]>;
+def t2TTT : T2TT<0b01, "ttt", []>, Requires<[IsThumb,Has8MSecExt]>;
+def t2TTA : T2TT<0b10, "tta", []>, Requires<[IsThumb,Has8MSecExt]>;
+def t2TTAT : T2TT<0b11, "ttat", []>, Requires<[IsThumb,Has8MSecExt]>;
+
+//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
//
@@ -4488,9 +4537,9 @@ def : t2InstAlias<"tst${p} $Rn, $Rm",
(t2TSTrr GPRnopc:$Rn, rGPR:$Rm, pred:$p)>;
// Memory barriers
-def : InstAlias<"dmb${p}", (t2DMB 0xf, pred:$p)>, Requires<[HasDB]>;
-def : InstAlias<"dsb${p}", (t2DSB 0xf, pred:$p)>, Requires<[HasDB]>;
-def : InstAlias<"isb${p}", (t2ISB 0xf, pred:$p)>, Requires<[HasDB]>;
+def : InstAlias<"dmb${p}", (t2DMB 0xf, pred:$p), 0>, Requires<[HasDB]>;
+def : InstAlias<"dsb${p}", (t2DSB 0xf, pred:$p), 0>, Requires<[HasDB]>;
+def : InstAlias<"isb${p}", (t2ISB 0xf, pred:$p), 0>, Requires<[HasDB]>;
// Alias for LDR, LDRB, LDRH, LDRSB, and LDRSH without the ".w" optional
// width specifier.
@@ -4535,13 +4584,13 @@ def : t2InstAlias<"mvn${s}${p} $Rd, $Rm",
def : t2InstAlias<"mvn${s}${p} $Rd, $ShiftedRm",
(t2MVNs rGPR:$Rd, t2_so_reg:$ShiftedRm, pred:$p, cc_out:$s)>;
-// PKHBT/PKHTB with default shift amount. PKHTB is equivalent to PKHBT when the
-// shift amount is zero (i.e., unspecified).
+// PKHBT/PKHTB with default shift amount. PKHTB is equivalent to PKHBT with the
+// input operands swapped when the shift amount is zero (i.e., unspecified).
def : InstAlias<"pkhbt${p} $Rd, $Rn, $Rm",
- (t2PKHBT rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ (t2PKHBT rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : InstAlias<"pkhtb${p} $Rd, $Rn, $Rm",
- (t2PKHBT rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ (t2PKHBT rGPR:$Rd, rGPR:$Rm, rGPR:$Rn, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
// PUSH/POP aliases for STM/LDM
@@ -4620,16 +4669,16 @@ def : t2InstAlias<"strh${p} $Rt, $addr",
// Extend instruction optional rotate operand.
def : InstAlias<"sxtab${p} $Rd, $Rn, $Rm",
- (t2SXTAB rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ (t2SXTAB rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : InstAlias<"sxtah${p} $Rd, $Rn, $Rm",
- (t2SXTAH rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ (t2SXTAH rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : InstAlias<"sxtab16${p} $Rd, $Rn, $Rm",
- (t2SXTAB16 rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ (t2SXTAB16 rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : InstAlias<"sxtb16${p} $Rd, $Rm",
- (t2SXTB16 rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>,
+ (t2SXTB16 rGPR:$Rd, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : t2InstAlias<"sxtb${p} $Rd, $Rm",
@@ -4642,16 +4691,16 @@ def : t2InstAlias<"sxth${p}.w $Rd, $Rm",
(t2SXTH rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>;
def : InstAlias<"uxtab${p} $Rd, $Rn, $Rm",
- (t2UXTAB rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ (t2UXTAB rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : InstAlias<"uxtah${p} $Rd, $Rn, $Rm",
- (t2UXTAH rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ (t2UXTAH rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : InstAlias<"uxtab16${p} $Rd, $Rn, $Rm",
- (t2UXTAB16 rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p)>,
+ (t2UXTAB16 rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : InstAlias<"uxtb16${p} $Rd, $Rm",
- (t2UXTB16 rGPR:$Rd, rGPR:$Rm, 0, pred:$p)>,
+ (t2UXTB16 rGPR:$Rd, rGPR:$Rm, 0, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : t2InstAlias<"uxtb${p} $Rd, $Rm",
@@ -4667,7 +4716,7 @@ def : t2InstAlias<"uxth${p}.w $Rd, $Rm",
def : t2InstAlias<"uxtb${p} $Rd, $Rm$rot",
(t2UXTB rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
def : InstAlias<"uxtb16${p} $Rd, $Rm$rot",
- (t2UXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>,
+ (t2UXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : t2InstAlias<"uxth${p} $Rd, $Rm$rot",
(t2UXTH rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
@@ -4675,7 +4724,7 @@ def : t2InstAlias<"uxth${p} $Rd, $Rm$rot",
def : t2InstAlias<"sxtb${p} $Rd, $Rm$rot",
(t2SXTB rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
def : InstAlias<"sxtb16${p} $Rd, $Rm$rot",
- (t2SXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>,
+ (t2SXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p), 0>,
Requires<[HasT2ExtractPack, IsThumb2]>;
def : t2InstAlias<"sxth${p} $Rd, $Rm$rot",
(t2SXTH rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
@@ -4764,9 +4813,14 @@ def : t2InstAlias<"ldrsh${p}.w $Rt, $addr",
def : t2InstAlias<"add${p} $Rd, pc, $imm",
(t2ADR rGPR:$Rd, imm0_4095:$imm, pred:$p)>;
+// Pseudo instruction ldr Rt, =immediate
+def t2LDRConstPool
+ : t2AsmPseudo<"ldr${p} $Rt, $immediate",
+ (ins GPRnopc:$Rt, const_pool_asm_imm:$immediate, pred:$p)>;
+
// PLD/PLDW/PLI with alternate literal form.
def : t2InstAlias<"pld${p} $addr",
(t2PLDpci t2ldr_pcrel_imm12:$addr, pred:$p)>;
def : InstAlias<"pli${p} $addr",
- (t2PLIpci t2ldr_pcrel_imm12:$addr, pred:$p)>,
+ (t2PLIpci t2ldr_pcrel_imm12:$addr, pred:$p), 0>,
Requires<[IsThumb2,HasV7]>;
diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td
index 63e7940bb14e..e29d265ae3d1 100644
--- a/lib/Target/ARM/ARMInstrVFP.td
+++ b/lib/Target/ARM/ARMInstrVFP.td
@@ -30,6 +30,18 @@ def FPImmOperand : AsmOperandClass {
let ParserMethod = "parseFPImm";
}
+def vfp_f16imm : Operand<f16>,
+ PatLeaf<(f16 fpimm), [{
+ return ARM_AM::getFP16Imm(N->getValueAPF()) != -1;
+ }], SDNodeXForm<fpimm, [{
+ APFloat InVal = N->getValueAPF();
+ uint32_t enc = ARM_AM::getFP16Imm(InVal);
+ return CurDAG->getTargetConstant(enc, MVT::i32);
+ }]>> {
+ let PrintMethod = "printFPImmOperand";
+ let ParserMatchClass = FPImmOperand;
+}
+
def vfp_f32imm : Operand<f32>,
PatLeaf<(f32 fpimm), [{
return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
@@ -98,6 +110,11 @@ def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
let D = VFPNeonDomain;
}
+def VLDRH : AHI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5fp16:$addr),
+ IIC_fpLoad16, "vldr", ".16\t$Sd, $addr",
+ []>,
+ Requires<[HasFullFP16]>;
+
} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
@@ -112,6 +129,11 @@ def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
let D = VFPNeonDomain;
}
+def VSTRH : AHI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5fp16:$addr),
+ IIC_fpStore16, "vstr", ".16\t$Sd, $addr",
+ []>,
+ Requires<[HasFullFP16]>;
+
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
@@ -200,6 +222,37 @@ defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
def : MnemonicAlias<"vldm", "vldmia">;
def : MnemonicAlias<"vstm", "vstmia">;
+
+//===----------------------------------------------------------------------===//
+// Lazy load / store multiple Instructions
+//
+let mayLoad = 1 in
+def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
+ IIC_fpLoad_m, "vlldm${p}\t$Rn", "", []>,
+ Requires<[HasV8MMainline, Has8MSecExt]> {
+ let Inst{24-23} = 0b00;
+ let Inst{22} = 0;
+ let Inst{21} = 1;
+ let Inst{20} = 1;
+ let Inst{15-12} = 0;
+ let Inst{7-0} = 0;
+ let mayLoad = 1;
+}
+
+let mayStore = 1 in
+def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
+ IIC_fpStore_m, "vlstm${p}\t$Rn", "", []>,
+ Requires<[HasV8MMainline, Has8MSecExt]> {
+ let Inst{24-23} = 0b00;
+ let Inst{22} = 0;
+ let Inst{21} = 1;
+ let Inst{20} = 0;
+ let Inst{15-12} = 0;
+ let Inst{7-0} = 0;
+ let mayStore = 1;
+}
+
+
// FLDM/FSTM - Load / Store multiple single / double precision registers for
// pre-ARMv6 cores.
// These instructions are deprecated!
@@ -221,13 +274,13 @@ def : VFP2MnemonicAlias<"fstmdbd", "vstmdb">;
def : VFP2MnemonicAlias<"fstmead", "vstmia">;
def : VFP2MnemonicAlias<"fstmfdd", "vstmdb">;
-def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>,
+def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>,
Requires<[HasVFP2]>;
-def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>,
+def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>,
Requires<[HasVFP2]>;
-def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>,
+def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>,
Requires<[HasVFP2]>;
-def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>,
+def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>,
Requires<[HasVFP2]>;
defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
(VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
@@ -295,6 +348,12 @@ def VADDS : ASbIn<0b11100, 0b11, 0, 0,
let D = VFPNeonA8Domain;
}
+let TwoOperandAliasConstraint = "$Sn = $Sd" in
+def VADDH : AHbI<0b11100, 0b11, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm",
+ []>;
+
let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VSUBD : ADbI<0b11100, 0b11, 1, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
@@ -311,6 +370,12 @@ def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
let D = VFPNeonA8Domain;
}
+let TwoOperandAliasConstraint = "$Sn = $Sd" in
+def VSUBH : AHbI<0b11100, 0b11, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm",
+ []>;
+
let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VDIVD : ADbI<0b11101, 0b00, 0, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
@@ -323,6 +388,12 @@ def VDIVS : ASbI<0b11101, 0b00, 0, 0,
IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
[(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
+let TwoOperandAliasConstraint = "$Sn = $Sd" in
+def VDIVH : AHbI<0b11101, 0b00, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm",
+ []>;
+
let TwoOperandAliasConstraint = "$Dn = $Dd" in
def VMULD : ADbI<0b11100, 0b10, 0, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
@@ -339,6 +410,12 @@ def VMULS : ASbIn<0b11100, 0b10, 0, 0,
let D = VFPNeonA8Domain;
}
+let TwoOperandAliasConstraint = "$Sn = $Sd" in
+def VMULH : AHbI<0b11100, 0b10, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm",
+ []>;
+
def VNMULD : ADbI<0b11100, 0b10, 1, 0,
(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
@@ -353,9 +430,20 @@ def VNMULS : ASbI<0b11100, 0b10, 1, 0,
let D = VFPNeonA8Domain;
}
+def VNMULH : AHbI<0b11100, 0b10, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm",
+ []>;
+
multiclass vsel_inst<string op, bits<2> opc, int CC> {
let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
Uses = [CPSR], AddedComplexity = 4 in {
+ def H : AHbInp<0b11100, opc, 0,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"),
+ []>,
+ Requires<[HasFullFP16]>;
+
def S : ASbInp<0b11100, opc, 0,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"),
@@ -378,6 +466,12 @@ defm VSELVS : vsel_inst<"vs", 0b01, 6>;
multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in {
+ def H : AHbInp<0b11101, 0b00, opc,
+ (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"),
+ []>,
+ Requires<[HasFullFP16]>;
+
def S : ASbInp<0b11101, 0b00, opc,
(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"),
@@ -418,6 +512,12 @@ def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
let D = VFPNeonA8Domain;
}
+def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0,
+ (outs), (ins SPR:$Sd, SPR:$Sm),
+ IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm",
+ []>;
+
+
// FIXME: Verify encoding after integrated assembler is working.
def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
(outs), (ins DPR:$Dd, DPR:$Dm),
@@ -432,6 +532,11 @@ def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
+
+def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0,
+ (outs), (ins SPR:$Sd, SPR:$Sm),
+ IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm",
+ []>;
} // Defs = [FPSCR_NZCV]
//===----------------------------------------------------------------------===//
@@ -452,6 +557,11 @@ def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
let D = VFPNeonA8Domain;
}
+def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm",
+ []>;
+
let Defs = [FPSCR_NZCV] in {
def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
(outs), (ins DPR:$Dd),
@@ -473,6 +583,14 @@ def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
let D = VFPNeonA8Domain;
}
+def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0,
+ (outs), (ins SPR:$Sd),
+ IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0",
+ []> {
+ let Inst{3-0} = 0b0000;
+ let Inst{5} = 0;
+}
+
// FIXME: Verify encoding after integrated assembler is working.
def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
(outs), (ins DPR:$Dd),
@@ -493,6 +611,14 @@ def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
+
+def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0,
+ (outs), (ins SPR:$Sd),
+ IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0",
+ []> {
+ let Inst{3-0} = 0b0000;
+ let Inst{5} = 0;
+}
} // Defs = [FPSCR_NZCV]
def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
@@ -627,6 +753,22 @@ def : Pat<(f64 (f16_to_fp GPR:$a)),
multiclass vcvt_inst<string opc, bits<2> rm,
SDPatternOperator node = null_frag> {
let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
+ def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"),
+ []>,
+ Requires<[HasFullFP16]> {
+ let Inst{17-16} = rm;
+ }
+
+ def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"),
+ []>,
+ Requires<[HasFullFP16]> {
+ let Inst{17-16} = rm;
+ }
+
def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
(outs SPR:$Sd), (ins SPR:$Sm),
NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"),
@@ -715,7 +857,21 @@ def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
let D = VFPNeonA8Domain;
}
+def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm",
+ []>;
+
multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
+ def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm",
+ []>,
+ Requires<[HasFullFP16]> {
+ let Inst{7} = op2;
+ let Inst{16} = op;
+ }
+
def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0,
(outs SPR:$Sd), (ins SPR:$Sm),
NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm",
@@ -733,11 +889,14 @@ multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
let Inst{16} = op;
}
+ def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"),
+ (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
+ Requires<[HasFullFP16]>;
def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"),
- (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p)>,
+ (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
Requires<[HasFPARMv8]>;
def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"),
- (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p)>,
+ (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>,
Requires<[HasFPARMv8,HasDPVFP]>;
}
@@ -748,6 +907,13 @@ defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
multiclass vrint_inst_anpm<string opc, bits<2> rm,
SDPatternOperator node = null_frag> {
let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
+ def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"),
+ []>,
+ Requires<[HasFullFP16]> {
+ let Inst{17-16} = rm;
+ }
def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0,
(outs SPR:$Sd), (ins SPR:$Sm),
NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"),
@@ -765,10 +931,10 @@ multiclass vrint_inst_anpm<string opc, bits<2> rm,
}
def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"),
- (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm)>,
+ (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>,
Requires<[HasFPARMv8]>;
def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"),
- (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm)>,
+ (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>,
Requires<[HasFPARMv8,HasDPVFP]>;
}
@@ -787,6 +953,11 @@ def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
[(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
+def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm",
+ []>;
+
let hasSideEffects = 0 in {
def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
(outs DPR:$Dd), (ins DPR:$Dm),
@@ -795,6 +966,18 @@ def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
(outs SPR:$Sd), (ins SPR:$Sm),
IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
+
+let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
+def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>,
+ Requires<[HasFullFP16]>;
+
+def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>,
+ Requires<[HasFullFP16]>;
+} // PostEncoderMethod
} // hasSideEffects
//===----------------------------------------------------------------------===//
@@ -966,6 +1149,44 @@ def VMOVSRR : AVConv5I<0b11000100, 0b1010,
let DecoderMethod = "DecodeVMOVSRR";
}
+// Move H->R, clearing top 16 bits
+def VMOVRH : AVConv2I<0b11100001, 0b1001,
+ (outs GPR:$Rt), (ins SPR:$Sn),
+ IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn",
+ []>,
+ Requires<[HasFullFP16]> {
+ // Instruction operands.
+ bits<4> Rt;
+ bits<5> Sn;
+
+ // Encode instruction operands.
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Rt;
+
+ let Inst{6-5} = 0b00;
+ let Inst{3-0} = 0b0000;
+}
+
+// Move R->H, clearing top 16 bits
+def VMOVHR : AVConv4I<0b11100000, 0b1001,
+ (outs SPR:$Sn), (ins GPR:$Rt),
+ IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt",
+ []>,
+ Requires<[HasFullFP16]> {
+ // Instruction operands.
+ bits<5> Sn;
+ bits<4> Rt;
+
+ // Encode instruction operands.
+ let Inst{19-16} = Sn{4-1};
+ let Inst{7} = Sn{0};
+ let Inst{15-12} = Rt;
+
+ let Inst{6-5} = 0b00;
+ let Inst{3-0} = 0b0000;
+}
+
// FMRDH: SPR -> GPR
// FMRDL: SPR -> GPR
// FMRRS: SPR -> GPR
@@ -1011,6 +1232,25 @@ class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
let Inst{22} = Sd{0};
}
+class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
+ bits<4> opcod4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
+ pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
+ let Predicates = [HasFullFP16];
+}
+
def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
(outs DPR:$Dd), (ins SPR:$Sm),
IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
@@ -1043,6 +1283,13 @@ def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)),
def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
(VSITOS (VLDRS addrmode5:$a))>;
+def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm",
+ []> {
+ let Inst{7} = 1; // s32
+}
+
def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
(outs DPR:$Dd), (ins SPR:$Sm),
IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
@@ -1075,6 +1322,13 @@ def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)),
def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
(VUITOS (VLDRS addrmode5:$a))>;
+def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm",
+ []> {
+ let Inst{7} = 0; // u32
+}
+
// FP -> Int:
class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
@@ -1113,6 +1367,25 @@ class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
let Inst{22} = Sd{0};
}
+class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
+ bits<4> opcod4, dag oops, dag iops,
+ InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
+ pattern> {
+ // Instruction operands.
+ bits<5> Sd;
+ bits<5> Sm;
+
+ // Encode instruction operands.
+ let Inst{3-0} = Sm{4-1};
+ let Inst{5} = Sm{0};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{22} = Sd{0};
+
+ let Predicates = [HasFullFP16];
+}
+
// Always set Z bit in the instruction, i.e. "round towards zero" variants.
def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
(outs SPR:$Sd), (ins DPR:$Dm),
@@ -1147,6 +1420,13 @@ def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))),
addrmode5:$ptr),
(VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
+def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm",
+ []> {
+ let Inst{7} = 1; // Z bit
+}
+
def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
(outs SPR:$Sd), (ins DPR:$Dm),
IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
@@ -1180,6 +1460,13 @@ def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))),
addrmode5:$ptr),
(VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
+def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm",
+ []> {
+ let Inst{7} = 1; // Z bit
+}
+
// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
let Uses = [FPSCR] in {
// FIXME: Verify encoding after integrated assembler is working.
@@ -1197,6 +1484,13 @@ def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
let Inst{7} = 0; // Z bit
}
+def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm",
+ []> {
+ let Inst{7} = 0; // Z bit
+}
+
def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
(outs SPR:$Sd), (ins DPR:$Dm),
IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
@@ -1210,6 +1504,13 @@ def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
[(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
let Inst{7} = 0; // Z bit
}
+
+def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
+ (outs SPR:$Sd), (ins SPR:$Sm),
+ IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm",
+ []> {
+ let Inst{7} = 0; // Z bit
+}
}
// Convert between floating-point and fixed-point
@@ -1249,6 +1550,26 @@ class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
let Predicates = [HasVFP2, HasDPVFP];
}
+def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0,
+ (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
+ IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>,
+ Requires<[HasFullFP16]>;
+
+def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0,
+ (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
+ IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>,
+ Requires<[HasFullFP16]>;
+
+def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1,
+ (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
+ IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>,
+ Requires<[HasFullFP16]>;
+
+def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1,
+ (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
+ IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>,
+ Requires<[HasFullFP16]>;
+
def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
(outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []> {
@@ -1299,6 +1620,26 @@ def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
// Fixed-Point to FP:
+def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0,
+ (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
+ IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>,
+ Requires<[HasFullFP16]>;
+
+def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0,
+ (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
+ IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>,
+ Requires<[HasFullFP16]>;
+
+def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1,
+ (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
+ IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>,
+ Requires<[HasFullFP16]>;
+
+def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1,
+ (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
+ IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>,
+ Requires<[HasFullFP16]>;
+
def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
(outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []> {
@@ -1373,6 +1714,13 @@ def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
let D = VFPNeonA8Domain;
}
+def VMLAH : AHbI<0b11100, 0b00, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm",
+ []>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasFullFP16,UseFPVMLx,DontUseFusedMAC]>;
+
def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
@@ -1400,6 +1748,13 @@ def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
let D = VFPNeonA8Domain;
}
+def VMLSH : AHbI<0b11100, 0b00, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm",
+ []>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasFullFP16,UseFPVMLx,DontUseFusedMAC]>;
+
def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
@@ -1427,6 +1782,13 @@ def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
let D = VFPNeonA8Domain;
}
+def VNMLAH : AHbI<0b11100, 0b01, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm",
+ []>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasFullFP16,UseFPVMLx,DontUseFusedMAC]>;
+
def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
(VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
@@ -1453,6 +1815,13 @@ def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
let D = VFPNeonA8Domain;
}
+def VNMLSH : AHbI<0b11100, 0b01, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm",
+ []>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasFullFP16,UseFPVMLx,DontUseFusedMAC]>;
+
def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
(VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>;
@@ -1482,6 +1851,13 @@ def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
// VFP pipelines.
}
+def VFMAH : AHbI<0b11101, 0b10, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm",
+ []>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasFullFP16,UseFusedMAC]>;
+
def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
@@ -1517,6 +1893,13 @@ def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
// VFP pipelines.
}
+def VFMSH : AHbI<0b11101, 0b10, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm",
+ []>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasFullFP16,UseFusedMAC]>;
+
def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
@@ -1559,6 +1942,13 @@ def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
// VFP pipelines.
}
+def VFNMAH : AHbI<0b11101, 0b01, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm",
+ []>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasFullFP16,UseFusedMAC]>;
+
def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
(VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
@@ -1600,6 +1990,13 @@ def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
// VFP pipelines.
}
+def VFNMSH : AHbI<0b11101, 0b01, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm",
+ []>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasFullFP16,UseFusedMAC]>;
+
def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
(VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
@@ -1780,6 +2177,23 @@ def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
let Inst{7-4} = 0b0000;
let Inst{3-0} = imm{3-0};
}
+
+def FCONSTH : VFPAI<(outs SPR:$Sd), (ins vfp_f16imm:$imm),
+ VFPMiscFrm, IIC_fpUNA16,
+ "vmov", ".f16\t$Sd, $imm",
+ []>, Requires<[HasFullFP16]> {
+ bits<5> Sd;
+ bits<8> imm;
+
+ let Inst{27-23} = 0b11101;
+ let Inst{22} = Sd{0};
+ let Inst{21-20} = 0b11;
+ let Inst{19-16} = imm{7-4};
+ let Inst{15-12} = Sd{4-1};
+ let Inst{11-8} = 0b1001; // Half precision
+ let Inst{7-4} = 0b0000;
+ let Inst{3-0} = imm{3-0};
+}
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 6e7e47b8706a..62d57f3f4986 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -60,9 +60,14 @@ STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm");
STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's");
STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's");
-namespace llvm {
-void initializeARMLoadStoreOptPass(PassRegistry &);
-}
+/// This switch disables formation of double/multi instructions that could
+/// potentially lead to (new) alignment traps even with CCR.UNALIGN_TRP
+/// disabled. This can be used to create libraries that are robust even when
+/// users provoke undefined behaviour by supplying misaligned pointers.
+/// \see mayCombineMisaligned()
+static cl::opt<bool>
+AssumeMisalignedLoadStores("arm-assume-misaligned-load-store", cl::Hidden,
+ cl::init(false), cl::desc("Be more conservative in ARM load/store opt"));
#define ARM_LOAD_STORE_OPT_NAME "ARM load / store optimization pass"
@@ -71,9 +76,7 @@ namespace {
/// form ldm / stm instructions.
struct ARMLoadStoreOpt : public MachineFunctionPass {
static char ID;
- ARMLoadStoreOpt() : MachineFunctionPass(ID) {
- initializeARMLoadStoreOptPass(*PassRegistry::getPassRegistry());
- }
+ ARMLoadStoreOpt() : MachineFunctionPass(ID) {}
const MachineFunction *MF;
const TargetInstrInfo *TII;
@@ -90,6 +93,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return ARM_LOAD_STORE_OPT_NAME;
}
@@ -101,8 +109,8 @@ namespace {
MachineInstr *MI;
int Offset; ///< Load/Store offset.
unsigned Position; ///< Position as counted from end of basic block.
- MemOpQueueEntry(MachineInstr *MI, int Offset, unsigned Position)
- : MI(MI), Offset(Offset), Position(Position) {}
+ MemOpQueueEntry(MachineInstr &MI, int Offset, unsigned Position)
+ : MI(&MI), Offset(Offset), Position(Position) {}
};
typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
@@ -131,17 +139,19 @@ namespace {
MachineBasicBlock::const_iterator Before);
unsigned findFreeReg(const TargetRegisterClass &RegClass);
void UpdateBaseRegUses(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned Base, unsigned WordOffset,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
+ unsigned Base, unsigned WordOffset,
ARMCC::CondCodes Pred, unsigned PredReg);
- MachineInstr *CreateLoadStoreMulti(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore, int Offset, unsigned Base,
- bool BaseKill, unsigned Opcode, ARMCC::CondCodes Pred, unsigned PredReg,
- DebugLoc DL, ArrayRef<std::pair<unsigned, bool>> Regs);
- MachineInstr *CreateLoadStoreDouble(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore, int Offset, unsigned Base,
- bool BaseKill, unsigned Opcode, ARMCC::CondCodes Pred, unsigned PredReg,
- DebugLoc DL, ArrayRef<std::pair<unsigned, bool>> Regs) const;
+ MachineInstr *CreateLoadStoreMulti(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
+ int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
+ ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
+ ArrayRef<std::pair<unsigned, bool>> Regs);
+ MachineInstr *CreateLoadStoreDouble(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
+ int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
+ ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
+ ArrayRef<std::pair<unsigned, bool>> Regs) const;
void FormCandidates(const MemOpQueue &MemOps);
MachineInstr *MergeOpsUpdate(const MergeCandidate &Cand);
bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
@@ -156,10 +166,11 @@ namespace {
char ARMLoadStoreOpt::ID = 0;
}
-INITIALIZE_PASS(ARMLoadStoreOpt, "arm-load-store-opt", ARM_LOAD_STORE_OPT_NAME, false, false)
+INITIALIZE_PASS(ARMLoadStoreOpt, "arm-ldst-opt", ARM_LOAD_STORE_OPT_NAME, false,
+ false)
-static bool definesCPSR(const MachineInstr *MI) {
- for (const auto &MO : MI->operands()) {
+static bool definesCPSR(const MachineInstr &MI) {
+ for (const auto &MO : MI.operands()) {
if (!MO.isReg())
continue;
if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
@@ -171,11 +182,11 @@ static bool definesCPSR(const MachineInstr *MI) {
return false;
}
-static int getMemoryOpOffset(const MachineInstr *MI) {
- unsigned Opcode = MI->getOpcode();
+static int getMemoryOpOffset(const MachineInstr &MI) {
+ unsigned Opcode = MI.getOpcode();
bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
- unsigned NumOperands = MI->getDesc().getNumOperands();
- unsigned OffField = MI->getOperand(NumOperands-3).getImm();
+ unsigned NumOperands = MI.getDesc().getNumOperands();
+ unsigned OffField = MI.getOperand(NumOperands - 3).getImm();
if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
@@ -436,12 +447,12 @@ static unsigned getLSMultipleTransferSize(const MachineInstr *MI) {
/// Update future uses of the base register with the offset introduced
/// due to writeback. This function only works on Thumb1.
-void
-ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned Base,
- unsigned WordOffset,
- ARMCC::CondCodes Pred, unsigned PredReg) {
+void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, unsigned Base,
+ unsigned WordOffset,
+ ARMCC::CondCodes Pred,
+ unsigned PredReg) {
assert(isThumb1 && "Can only update base register uses for Thumb1!");
// Start updating any instructions with immediate offsets. Insert a SUB before
// the first non-updateable instruction (if any).
@@ -475,7 +486,7 @@ ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
InsertSub = true;
} else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
- !definesCPSR(MBBI)) {
+ !definesCPSR(*MBBI)) {
// SUBS/ADDS using this register, with a dead def of the CPSR.
// Merge it with the update; if the merged offset is too large,
// insert a new sub instead.
@@ -499,7 +510,7 @@ ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
InsertSub = true;
}
- } else if (definesCPSR(MBBI) || MBBI->isCall() || MBBI->isBranch()) {
+ } else if (definesCPSR(*MBBI) || MBBI->isCall() || MBBI->isBranch()) {
// Since SUBS sets the condition flags, we can't place the base reset
// after an instruction that has a live CPSR def.
// The base register might also contain an argument for a function call.
@@ -552,7 +563,7 @@ void ARMLoadStoreOpt::moveLiveRegsBefore(const MachineBasicBlock &MBB,
// Initialize if we never queried in this block.
if (!LiveRegsValid) {
LiveRegs.init(TRI);
- LiveRegs.addLiveOuts(&MBB, true);
+ LiveRegs.addLiveOuts(MBB);
LiveRegPos = MBB.end();
LiveRegsValid = true;
}
@@ -574,10 +585,11 @@ static bool ContainsReg(const ArrayRef<std::pair<unsigned, bool>> &Regs,
/// Create and insert a LDM or STM with Base as base register and registers in
/// Regs as the register operands that would be loaded / stored. It returns
/// true if the transformation is done.
-MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore, int Offset, unsigned Base,
- bool BaseKill, unsigned Opcode, ARMCC::CondCodes Pred, unsigned PredReg,
- DebugLoc DL, ArrayRef<std::pair<unsigned, bool>> Regs) {
+MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
+ int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
+ ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
+ ArrayRef<std::pair<unsigned, bool>> Regs) {
unsigned NumRegs = Regs.size();
assert(NumRegs > 1);
@@ -770,10 +782,11 @@ MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(MachineBasicBlock &MBB,
return MIB.getInstr();
}
-MachineInstr *ARMLoadStoreOpt::CreateLoadStoreDouble(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore, int Offset, unsigned Base,
- bool BaseKill, unsigned Opcode, ARMCC::CondCodes Pred, unsigned PredReg,
- DebugLoc DL, ArrayRef<std::pair<unsigned, bool>> Regs) const {
+MachineInstr *ARMLoadStoreOpt::CreateLoadStoreDouble(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
+ int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
+ ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
+ ArrayRef<std::pair<unsigned, bool>> Regs) const {
bool IsLoad = isi32Load(Opcode);
assert((IsLoad || isi32Store(Opcode)) && "Must have integer load or store");
unsigned LoadStoreOpcode = IsLoad ? ARM::t2LDRDi8 : ARM::t2STRDi8;
@@ -836,11 +849,11 @@ MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) {
MachineInstr *LatestMI = Cand.Instrs[Cand.LatestMIIdx];
iterator InsertBefore = std::next(iterator(LatestMI));
MachineBasicBlock &MBB = *LatestMI->getParent();
- unsigned Offset = getMemoryOpOffset(First);
+ unsigned Offset = getMemoryOpOffset(*First);
unsigned Base = getLoadStoreBaseOp(*First).getReg();
bool BaseKill = LatestMI->killsRegister(Base);
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(First, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(*First, PredReg);
DebugLoc DL = First->getDebugLoc();
MachineInstr *Merged = nullptr;
if (Cand.CanMergeToLSDouble)
@@ -916,6 +929,24 @@ static bool isValidLSDoubleOffset(int Offset) {
return (Value % 4) == 0 && Value < 1024;
}
+/// Return true for loads/stores that can be combined to a double/multi
+/// operation without increasing the requirements for alignment.
+static bool mayCombineMisaligned(const TargetSubtargetInfo &STI,
+ const MachineInstr &MI) {
+ // vldr/vstr trap on misaligned pointers anyway, forming vldm makes no
+ // difference.
+ unsigned Opcode = MI.getOpcode();
+ if (!isi32Load(Opcode) && !isi32Store(Opcode))
+ return true;
+
+ // Stack pointer alignment is out of the programmers control so we can trust
+ // SP-relative loads/stores.
+ if (getLoadStoreBaseOp(MI).getReg() == ARM::SP &&
+ STI.getFrameLowering()->getTransientStackAlignment() >= 4)
+ return true;
+ return false;
+}
+
/// Find candidates for load/store multiple merge in list of MemOpQueueEntries.
void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) {
const MachineInstr *FirstMI = MemOps[0].MI;
@@ -946,7 +977,7 @@ void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) {
bool CanMergeToLSMulti = true;
// On swift vldm/vstm starting with an odd register number as that needs
// more uops than single vldrs.
- if (STI->isSwift() && !isNotVFP && (PRegNum % 2) == 1)
+ if (STI->hasSlowOddRegister() && !isNotVFP && (PRegNum % 2) == 1)
CanMergeToLSMulti = false;
// LDRD/STRD do not allow SP/PC. LDM/STM do not support it or have it
@@ -954,6 +985,10 @@ void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) {
if (PReg == ARM::SP || PReg == ARM::PC)
CanMergeToLSMulti = CanMergeToLSDouble = false;
+ // Should we be conservative?
+ if (AssumeMisalignedLoadStores && !mayCombineMisaligned(*STI, *MI))
+ CanMergeToLSMulti = CanMergeToLSDouble = false;
+
// Merge following instructions where possible.
for (unsigned I = SIndex+1; I < EIndex; ++I, ++Count) {
int NewOffset = MemOps[I].Offset;
@@ -1102,11 +1137,11 @@ static int isIncrementOrDecrement(const MachineInstr &MI, unsigned Reg,
unsigned MIPredReg;
if (MI.getOperand(0).getReg() != Reg ||
MI.getOperand(1).getReg() != Reg ||
- getInstrPredicate(&MI, MIPredReg) != Pred ||
+ getInstrPredicate(MI, MIPredReg) != Pred ||
MIPredReg != PredReg)
return 0;
- if (CheckCPSRDef && definesCPSR(&MI))
+ if (CheckCPSRDef && definesCPSR(MI))
return 0;
return MI.getOperand(2).getImm() * Scale;
}
@@ -1169,7 +1204,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) {
unsigned Base = BaseOP.getReg();
bool BaseKill = BaseOP.isKill();
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
unsigned Opcode = MI->getOpcode();
DebugLoc DL = MI->getDebugLoc();
@@ -1193,10 +1228,30 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) {
} else {
MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
if (((Mode != ARM_AM::ia && Mode != ARM_AM::ib) || Offset != Bytes) &&
- ((Mode != ARM_AM::da && Mode != ARM_AM::db) || Offset != -Bytes))
- return false;
+ ((Mode != ARM_AM::da && Mode != ARM_AM::db) || Offset != -Bytes)) {
+
+ // We couldn't find an inc/dec to merge. But if the base is dead, we
+ // can still change to a writeback form as that will save us 2 bytes
+ // of code size. It can create WAW hazards though, so only do it if
+ // we're minimizing code size.
+ if (!MBB.getParent()->getFunction()->optForMinSize() || !BaseKill)
+ return false;
+
+ bool HighRegsUsed = false;
+ for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
+ if (MI->getOperand(i).getReg() >= ARM::R8) {
+ HighRegsUsed = true;
+ break;
+ }
+
+ if (!HighRegsUsed)
+ MergeInstr = MBB.end();
+ else
+ return false;
+ }
}
- MBB.erase(MergeInstr);
+ if (MergeInstr != MBB.end())
+ MBB.erase(MergeInstr);
unsigned NewOpc = getUpdatingLSMultipleOpcode(Opcode, Mode);
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
@@ -1291,7 +1346,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) {
return false;
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
int Bytes = getLSMultipleTransferSize(MI);
MachineBasicBlock &MBB = *MI->getParent();
MachineBasicBlock::iterator MBBI(MI);
@@ -1388,7 +1443,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const {
return false;
unsigned PredReg;
- ARMCC::CondCodes Pred = getInstrPredicate(&MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
MachineBasicBlock::iterator MBBI(MI);
MachineBasicBlock &MBB = *MI.getParent();
int Offset;
@@ -1487,14 +1542,13 @@ static bool isMemoryOp(const MachineInstr &MI) {
}
static void InsertLDR_STR(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- int Offset, bool isDef,
- DebugLoc DL, unsigned NewOpc,
+ MachineBasicBlock::iterator &MBBI, int Offset,
+ bool isDef, const DebugLoc &DL, unsigned NewOpc,
unsigned Reg, bool RegDeadKill, bool RegUndef,
unsigned BaseReg, bool BaseKill, bool BaseUndef,
- bool OffKill, bool OffUndef,
- ARMCC::CondCodes Pred, unsigned PredReg,
- const TargetInstrInfo *TII, bool isT2) {
+ bool OffKill, bool OffUndef, ARMCC::CondCodes Pred,
+ unsigned PredReg, const TargetInstrInfo *TII,
+ bool isT2) {
if (isDef) {
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
TII->get(NewOpc))
@@ -1547,9 +1601,9 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
bool BaseUndef = BaseOp.isUndef();
bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
- int OffImm = getMemoryOpOffset(MI);
+ int OffImm = getMemoryOpOffset(*MI);
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
if (OddRegNum > EvenRegNum && OffImm == 0) {
// Ascending register numbers and no offset. It's safe to change it to a
@@ -1655,14 +1709,14 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
unsigned Reg = MO.getReg();
unsigned Base = getLoadStoreBaseOp(*MBBI).getReg();
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(MBBI, PredReg);
- int Offset = getMemoryOpOffset(MBBI);
+ ARMCC::CondCodes Pred = getInstrPredicate(*MBBI, PredReg);
+ int Offset = getMemoryOpOffset(*MBBI);
if (CurrBase == 0) {
// Start of a new chain.
CurrBase = Base;
CurrOpc = Opcode;
CurrPred = Pred;
- MemOps.push_back(MemOpQueueEntry(MBBI, Offset, Position));
+ MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
continue;
}
// Note: No need to match PredReg in the next if.
@@ -1690,7 +1744,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
if (!Overlap) {
// Check offset and sort memory operation into the current chain.
if (Offset > MemOps.back().Offset) {
- MemOps.push_back(MemOpQueueEntry(MBBI, Offset, Position));
+ MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
continue;
} else {
MemOpQueue::iterator MI, ME;
@@ -1706,7 +1760,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
}
}
if (MI != MemOps.end()) {
- MemOps.insert(MI, MemOpQueueEntry(MBBI, Offset, Position));
+ MemOps.insert(MI, MemOpQueueEntry(*MBBI, Offset, Position));
continue;
}
}
@@ -1723,7 +1777,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
MBBI->getOpcode() == ARM::t2STRDi8) {
// ARMPreAllocLoadStoreOpt has already formed some LDRD/STRD instructions
// remember them because we may still be able to merge add/sub into them.
- MergeBaseCandidates.push_back(MBBI);
+ MergeBaseCandidates.push_back(&*MBBI);
}
@@ -1805,20 +1859,20 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
// Ignore any DBG_VALUE instructions.
while (PrevI->isDebugValue() && PrevI != MBB.begin())
--PrevI;
- MachineInstr *PrevMI = PrevI;
- unsigned Opcode = PrevMI->getOpcode();
+ MachineInstr &PrevMI = *PrevI;
+ unsigned Opcode = PrevMI.getOpcode();
if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
- MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
+ MachineOperand &MO = PrevMI.getOperand(PrevMI.getNumOperands() - 1);
if (MO.getReg() != ARM::LR)
return false;
unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
- PrevMI->setDesc(TII->get(NewOpc));
+ PrevMI.setDesc(TII->get(NewOpc));
MO.setReg(ARM::PC);
- PrevMI->copyImplicitOps(*MBB.getParent(), &*MBBI);
+ PrevMI.copyImplicitOps(*MBB.getParent(), *MBBI);
MBB.erase(MBBI);
return true;
}
@@ -1840,8 +1894,8 @@ bool ARMLoadStoreOpt::CombineMovBx(MachineBasicBlock &MBB) {
for (auto Use : Prev->uses())
if (Use.isKill()) {
AddDefaultPred(BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX))
- .addReg(Use.getReg(), RegState::Kill))
- .copyImplicitOps(&*MBBI);
+ .addReg(Use.getReg(), RegState::Kill))
+ .copyImplicitOps(*MBBI);
MBB.erase(MBBI);
MBB.erase(Prev);
return true;
@@ -1851,6 +1905,9 @@ bool ARMLoadStoreOpt::CombineMovBx(MachineBasicBlock &MBB) {
}
bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipFunction(*Fn.getFunction()))
+ return false;
+
MF = &Fn;
STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
TL = STI->getTargetLowering();
@@ -1877,10 +1934,6 @@ bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
return Modified;
}
-namespace llvm {
-void initializeARMPreAllocLoadStoreOptPass(PassRegistry &);
-}
-
#define ARM_PREALLOC_LOAD_STORE_OPT_NAME \
"ARM pre- register allocation load / store optimization pass"
@@ -1889,9 +1942,7 @@ namespace {
/// locations close to make it more likely they will be combined later.
struct ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
static char ID;
- ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {
- initializeARMPreAllocLoadStoreOptPass(*PassRegistry::getPassRegistry());
- }
+ ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
const DataLayout *TD;
const TargetInstrInfo *TII;
@@ -1922,10 +1973,13 @@ namespace {
char ARMPreAllocLoadStoreOpt::ID = 0;
}
-INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-load-store-opt",
+INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-ldst-opt",
ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false)
bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
+ if (AssumeMisalignedLoadStores || skipFunction(*Fn.getFunction()))
+ return false;
+
TD = &Fn.getDataLayout();
STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
TII = STI->getInstrInfo();
@@ -2034,7 +2088,7 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
return false;
// Then make sure the immediate offset fits.
- int OffImm = getMemoryOpOffset(Op0);
+ int OffImm = getMemoryOpOffset(*Op0);
if (isT2) {
int Limit = (1 << 8) * Scale;
if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
@@ -2056,7 +2110,7 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
if (FirstReg == SecondReg)
return false;
BaseReg = Op0->getOperand(1).getReg();
- Pred = getInstrPredicate(Op0, PredReg);
+ Pred = getInstrPredicate(*Op0, PredReg);
dl = Op0->getDebugLoc();
return true;
}
@@ -2070,11 +2124,11 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
// Sort by offset (in reverse order).
std::sort(Ops.begin(), Ops.end(),
[](const MachineInstr *LHS, const MachineInstr *RHS) {
- int LOffset = getMemoryOpOffset(LHS);
- int ROffset = getMemoryOpOffset(RHS);
- assert(LHS == RHS || LOffset != ROffset);
- return LOffset > ROffset;
- });
+ int LOffset = getMemoryOpOffset(*LHS);
+ int ROffset = getMemoryOpOffset(*RHS);
+ assert(LHS == RHS || LOffset != ROffset);
+ return LOffset > ROffset;
+ });
// The loads / stores of the same base are in order. Scan them from first to
// last and check for the following:
@@ -2106,7 +2160,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (LastOpcode && LSMOpcode != LastOpcode)
break;
- int Offset = getMemoryOpOffset(Op);
+ int Offset = getMemoryOpOffset(*Op);
unsigned Bytes = getLSMultipleTransferSize(Op);
if (LastBytes) {
if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
@@ -2141,8 +2195,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
} else {
// This is the new location for the loads / stores.
MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
- while (InsertPos != MBB->end()
- && (MemOps.count(InsertPos) || InsertPos->isDebugValue()))
+ while (InsertPos != MBB->end() &&
+ (MemOps.count(&*InsertPos) || InsertPos->isDebugValue()))
++InsertPos;
// If we are moving a pair of loads / stores, see if it makes sense
@@ -2237,25 +2291,25 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
MachineBasicBlock::iterator E = MBB->end();
while (MBBI != E) {
for (; MBBI != E; ++MBBI) {
- MachineInstr *MI = MBBI;
- if (MI->isCall() || MI->isTerminator()) {
+ MachineInstr &MI = *MBBI;
+ if (MI.isCall() || MI.isTerminator()) {
// Stop at barriers.
++MBBI;
break;
}
- if (!MI->isDebugValue())
- MI2LocMap[MI] = ++Loc;
+ if (!MI.isDebugValue())
+ MI2LocMap[&MI] = ++Loc;
- if (!isMemoryOp(*MI))
+ if (!isMemoryOp(MI))
continue;
unsigned PredReg = 0;
if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
continue;
- int Opc = MI->getOpcode();
+ int Opc = MI.getOpcode();
bool isLd = isLoadSingle(Opc);
- unsigned Base = MI->getOperand(1).getReg();
+ unsigned Base = MI.getOperand(1).getReg();
int Offset = getMemoryOpOffset(MI);
bool StopHere = false;
@@ -2264,15 +2318,15 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
Base2LdsMap.find(Base);
if (BI != Base2LdsMap.end()) {
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
- if (Offset == getMemoryOpOffset(BI->second[i])) {
+ if (Offset == getMemoryOpOffset(*BI->second[i])) {
StopHere = true;
break;
}
}
if (!StopHere)
- BI->second.push_back(MI);
+ BI->second.push_back(&MI);
} else {
- Base2LdsMap[Base].push_back(MI);
+ Base2LdsMap[Base].push_back(&MI);
LdBases.push_back(Base);
}
} else {
@@ -2280,15 +2334,15 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
Base2StsMap.find(Base);
if (BI != Base2StsMap.end()) {
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
- if (Offset == getMemoryOpOffset(BI->second[i])) {
+ if (Offset == getMemoryOpOffset(*BI->second[i])) {
StopHere = true;
break;
}
}
if (!StopHere)
- BI->second.push_back(MI);
+ BI->second.push_back(&MI);
} else {
- Base2StsMap[Base].push_back(MI);
+ Base2StsMap[Base].push_back(&MI);
StBases.push_back(Base);
}
}
@@ -2335,4 +2389,3 @@ FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
return new ARMPreAllocLoadStoreOpt();
return new ARMLoadStoreOpt();
}
-
diff --git a/lib/Target/ARM/ARMMCInstLower.cpp b/lib/Target/ARM/ARMMCInstLower.cpp
index a2aca2d1a69e..7429acdb09ad 100644
--- a/lib/Target/ARM/ARMMCInstLower.cpp
+++ b/lib/Target/ARM/ARMMCInstLower.cpp
@@ -26,33 +26,22 @@ using namespace llvm;
MCOperand ARMAsmPrinter::GetSymbolRef(const MachineOperand &MO,
const MCSymbol *Symbol) {
- const MCExpr *Expr;
- unsigned Option = MO.getTargetFlags() & ARMII::MO_OPTION_MASK;
- switch (Option) {
- default: {
- Expr = MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None,
- OutContext);
- switch (Option) {
- default: llvm_unreachable("Unknown target flag on symbol operand");
- case ARMII::MO_NO_FLAG:
- break;
- case ARMII::MO_LO16:
- Expr = MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None,
- OutContext);
- Expr = ARMMCExpr::createLower16(Expr, OutContext);
- break;
- case ARMII::MO_HI16:
- Expr = MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None,
- OutContext);
- Expr = ARMMCExpr::createUpper16(Expr, OutContext);
- break;
- }
+ const MCExpr *Expr =
+ MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None, OutContext);
+ switch (MO.getTargetFlags() & ARMII::MO_OPTION_MASK) {
+ default:
+ llvm_unreachable("Unknown target flag on symbol operand");
+ case ARMII::MO_NO_FLAG:
break;
- }
-
- case ARMII::MO_PLT:
- Expr = MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_PLT,
- OutContext);
+ case ARMII::MO_LO16:
+ Expr =
+ MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None, OutContext);
+ Expr = ARMMCExpr::createLower16(Expr, OutContext);
+ break;
+ case ARMII::MO_HI16:
+ Expr =
+ MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None, OutContext);
+ Expr = ARMMCExpr::createUpper16(Expr, OutContext);
break;
}
@@ -89,7 +78,7 @@ bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO,
break;
}
case MachineOperand::MO_ExternalSymbol:
- MCOp = GetSymbolRef(MO,
+ MCOp = GetSymbolRef(MO,
GetExternalSymbolSymbol(MO.getSymbolName()));
break;
case MachineOperand::MO_JumpTableIndex:
diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.cpp b/lib/Target/ARM/ARMMachineFunctionInfo.cpp
index 71ad7a4a732a..b6dee9ff8385 100644
--- a/lib/Target/ARM/ARMMachineFunctionInfo.cpp
+++ b/lib/Target/ARM/ARMMachineFunctionInfo.cpp
@@ -21,4 +21,4 @@ ARMFunctionInfo::ARMFunctionInfo(MachineFunction &MF)
FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
PICLabelUId(0), VarArgsFrameIndex(0), HasITBlocks(false),
- IsSplitCSR(false) {}
+ ArgumentStackSize(0), IsSplitCSR(false) {}
diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.h b/lib/Target/ARM/ARMMachineFunctionInfo.h
index 68f9aec8cae5..f71497240ff3 100644
--- a/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -15,7 +15,6 @@
#define LLVM_LIB_TARGET_ARM_ARMMACHINEFUNCTIONINFO_H
#include "ARMSubtarget.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Target/TargetMachine.h"
diff --git a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
index 30baf4263c11..73dcb9641b61 100644
--- a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
+++ b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
@@ -27,6 +27,11 @@ public:
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "optimise barriers pass";
}
@@ -46,6 +51,9 @@ static bool CanMovePastDMB(const MachineInstr *MI) {
}
bool ARMOptimizeBarriersPass::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
// Vector to store the DMBs we will remove after the first iteration
std::vector<MachineInstr *> ToRemove;
// DMBType is the Imm value of the first operand. It determines whether it's a
diff --git a/lib/Target/ARM/ARMSchedule.td b/lib/Target/ARM/ARMSchedule.td
index 528c4ec73781..47a99313025c 100644
--- a/lib/Target/ARM/ARMSchedule.td
+++ b/lib/Target/ARM/ARMSchedule.td
@@ -94,7 +94,7 @@ def : PredicateProlog<[{
(void)TII;
}]>;
-def IsPredicatedPred : SchedPredicate<[{TII->isPredicated(MI)}]>;
+def IsPredicatedPred : SchedPredicate<[{TII->isPredicated(*MI)}]>;
//===----------------------------------------------------------------------===//
// Instruction Itinerary classes used for ARM
@@ -186,38 +186,50 @@ def IIC_iStore_mu : InstrItinClass;
def IIC_Preload : InstrItinClass;
def IIC_Br : InstrItinClass;
def IIC_fpSTAT : InstrItinClass;
+def IIC_fpUNA16 : InstrItinClass;
def IIC_fpUNA32 : InstrItinClass;
def IIC_fpUNA64 : InstrItinClass;
+def IIC_fpCMP16 : InstrItinClass;
def IIC_fpCMP32 : InstrItinClass;
def IIC_fpCMP64 : InstrItinClass;
def IIC_fpCVTSD : InstrItinClass;
def IIC_fpCVTDS : InstrItinClass;
def IIC_fpCVTSH : InstrItinClass;
def IIC_fpCVTHS : InstrItinClass;
+def IIC_fpCVTIH : InstrItinClass;
def IIC_fpCVTIS : InstrItinClass;
def IIC_fpCVTID : InstrItinClass;
+def IIC_fpCVTHI : InstrItinClass;
def IIC_fpCVTSI : InstrItinClass;
def IIC_fpCVTDI : InstrItinClass;
def IIC_fpMOVIS : InstrItinClass;
def IIC_fpMOVID : InstrItinClass;
def IIC_fpMOVSI : InstrItinClass;
def IIC_fpMOVDI : InstrItinClass;
+def IIC_fpALU16 : InstrItinClass;
def IIC_fpALU32 : InstrItinClass;
def IIC_fpALU64 : InstrItinClass;
+def IIC_fpMUL16 : InstrItinClass;
def IIC_fpMUL32 : InstrItinClass;
def IIC_fpMUL64 : InstrItinClass;
+def IIC_fpMAC16 : InstrItinClass;
def IIC_fpMAC32 : InstrItinClass;
def IIC_fpMAC64 : InstrItinClass;
+def IIC_fpFMAC16 : InstrItinClass;
def IIC_fpFMAC32 : InstrItinClass;
def IIC_fpFMAC64 : InstrItinClass;
+def IIC_fpDIV16 : InstrItinClass;
def IIC_fpDIV32 : InstrItinClass;
def IIC_fpDIV64 : InstrItinClass;
+def IIC_fpSQRT16 : InstrItinClass;
def IIC_fpSQRT32 : InstrItinClass;
def IIC_fpSQRT64 : InstrItinClass;
+def IIC_fpLoad16 : InstrItinClass;
def IIC_fpLoad32 : InstrItinClass;
def IIC_fpLoad64 : InstrItinClass;
def IIC_fpLoad_m : InstrItinClass;
def IIC_fpLoad_mu : InstrItinClass;
+def IIC_fpStore16 : InstrItinClass;
def IIC_fpStore32 : InstrItinClass;
def IIC_fpStore64 : InstrItinClass;
def IIC_fpStore_m : InstrItinClass;
diff --git a/lib/Target/ARM/ARMScheduleA8.td b/lib/Target/ARM/ARMScheduleA8.td
index 2c6382542ab9..ba380cba100f 100644
--- a/lib/Target/ARM/ARMScheduleA8.td
+++ b/lib/Target/ARM/ARMScheduleA8.td
@@ -1065,11 +1065,11 @@ def CortexA8Itineraries : ProcessorItineraries<
// Cortex-A8 machine model for scheduling and other instruction cost heuristics.
def CortexA8Model : SchedMachineModel {
let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
- let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
let LoadLatency = 2; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
let MispredictPenalty = 13; // Based on estimate of pipeline depth.
+ let CompleteModel = 0;
let Itineraries = CortexA8Itineraries;
}
diff --git a/lib/Target/ARM/ARMScheduleA9.td b/lib/Target/ARM/ARMScheduleA9.td
index 9a1d22275646..519e595bd184 100644
--- a/lib/Target/ARM/ARMScheduleA9.td
+++ b/lib/Target/ARM/ARMScheduleA9.td
@@ -2025,12 +2025,12 @@ def A9WriteAdr#NumAddr : WriteSequence<[A9WriteAdr], NumAddr>;
// Define a predicate to select the LDM based on number of memory addresses.
def A9LMAdr#NumAddr#Pred :
- SchedPredicate<"(TII->getNumLDMAddresses(MI)+1)/2 == "#NumAddr>;
+ SchedPredicate<"(TII->getNumLDMAddresses(*MI)+1)/2 == "#NumAddr>;
} // foreach NumAddr
// Fall-back for unknown LDMs.
-def A9LMUnknownPred : SchedPredicate<"TII->getNumLDMAddresses(MI) == 0">;
+def A9LMUnknownPred : SchedPredicate<"TII->getNumLDMAddresses(*MI) == 0">;
// LDM/VLDM/VLDn address generation latency & resources.
// Dynamically select the A9WriteAdrN sequence using a predicate.
diff --git a/lib/Target/ARM/ARMScheduleSwift.td b/lib/Target/ARM/ARMScheduleSwift.td
index 3ad7730228e5..ea2bf4b578f0 100644
--- a/lib/Target/ARM/ARMScheduleSwift.td
+++ b/lib/Target/ARM/ARMScheduleSwift.td
@@ -374,7 +374,7 @@ let SchedModel = SwiftModel in {
}
// Predicate.
foreach NumAddr = 1-16 in {
- def SwiftLMAddr#NumAddr#Pred : SchedPredicate<"TII->getNumLDMAddresses(MI) == "#NumAddr>;
+ def SwiftLMAddr#NumAddr#Pred : SchedPredicate<"TII->getNumLDMAddresses(*MI) == "#NumAddr>;
}
def SwiftWriteLDMAddrNoWB : SchedWriteRes<[SwiftUnitP01]> { let Latency = 0; }
def SwiftWriteLDMAddrWB : SchedWriteRes<[SwiftUnitP01, SwiftUnitP01]>;
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index 6fded9c8ab73..3b99762f7157 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -21,12 +21,9 @@ using namespace llvm;
// Emit, if possible, a specialized version of the given Libcall. Typically this
// means selecting the appropriately aligned version, but we also convert memset
// of 0 into memclr.
-SDValue ARMSelectionDAGInfo::
-EmitSpecializedLibcall(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- RTLIB::Libcall LC) const {
+SDValue ARMSelectionDAGInfo::EmitSpecializedLibcall(
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, RTLIB::Libcall LC) const {
const ARMSubtarget &Subtarget =
DAG.getMachineFunction().getSubtarget<ARMSubtarget>();
const ARMTargetLowering *TLI = Subtarget.getTargetLowering();
@@ -121,21 +118,17 @@ EmitSpecializedLibcall(SelectionDAG &DAG, SDLoc dl,
TLI->getLibcallCallingConv(LC), Type::getVoidTy(*DAG.getContext()),
DAG.getExternalSymbol(FunctionNames[AEABILibcall][AlignVariant],
TLI->getPointerTy(DAG.getDataLayout())),
- std::move(Args), 0)
+ std::move(Args))
.setDiscardResult();
std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
return CallResult.second;
}
-SDValue
-ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const {
+SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
const ARMSubtarget &Subtarget =
DAG.getMachineFunction().getSubtarget<ARMSubtarget>();
// Do repeated 4-byte loads and stores. To be improved.
@@ -176,6 +169,12 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
// emit.
unsigned NumMEMCPYs = (NumMemOps + MaxLoadsInLDM - 1) / MaxLoadsInLDM;
+ // Code size optimisation: do not inline memcpy if expansion results in
+ // more instructions than the libary call.
+ if (NumMEMCPYs > 1 && DAG.getMachineFunction().getFunction()->optForMinSize()) {
+ return SDValue();
+ }
+
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other, MVT::Glue);
for (unsigned I = 0; I != NumMEMCPYs; ++I) {
@@ -213,8 +212,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
DAG.getConstant(SrcOff, dl, MVT::i32)),
- SrcPtrInfo.getWithOffset(SrcOff),
- false, false, false, 0);
+ SrcPtrInfo.getWithOffset(SrcOff));
TFOps[i] = Loads[i].getValue(1);
++i;
SrcOff += VTSize;
@@ -237,7 +235,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
DAG.getConstant(DstOff, dl, MVT::i32)),
- DstPtrInfo.getWithOffset(DstOff), false, false, 0);
+ DstPtrInfo.getWithOffset(DstOff));
++i;
DstOff += VTSize;
BytesLeft -= VTSize;
@@ -246,26 +244,18 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
makeArrayRef(TFOps, i));
}
-
-SDValue ARMSelectionDAGInfo::
-EmitTargetCodeForMemmove(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const {
+SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemmove(
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
+ MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
return EmitSpecializedLibcall(DAG, dl, Chain, Dst, Src, Size, Align,
RTLIB::MEMMOVE);
}
-
-SDValue ARMSelectionDAGInfo::
-EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain, SDValue Dst,
- SDValue Src, SDValue Size,
- unsigned Align, bool isVolatile,
- MachinePointerInfo DstPtrInfo) const {
+SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemset(
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
+ MachinePointerInfo DstPtrInfo) const {
return EmitSpecializedLibcall(DAG, dl, Chain, Dst, Src, Size, Align,
RTLIB::MEMSET);
}
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.h b/lib/Target/ARM/ARMSelectionDAGInfo.h
index 289879ee1d7e..2ddb42c95397 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.h
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the ARM subclass for TargetSelectionDAGInfo.
+// This file defines the ARM subclass for SelectionDAGTargetInfo.
//
//===----------------------------------------------------------------------===//
@@ -15,7 +15,8 @@
#define LLVM_LIB_TARGET_ARM_ARMSELECTIONDAGINFO_H
#include "MCTargetDesc/ARMAddressingModes.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
namespace llvm {
@@ -35,35 +36,30 @@ namespace ARM_AM {
}
} // end namespace ARM_AM
-class ARMSelectionDAGInfo : public TargetSelectionDAGInfo {
+class ARMSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
-
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;
- SDValue EmitTargetCodeForMemmove(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align, bool isVolatile,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const override;
+ SDValue
+ EmitTargetCodeForMemmove(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
+ SDValue Dst, SDValue Src, SDValue Size,
+ unsigned Align, bool isVolatile,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const override;
// Adjust parameters for memset, see RTABI section 4.3.4
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Op1, SDValue Op2,
- SDValue Op3, unsigned Align,
- bool isVolatile,
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const override;
- SDValue EmitSpecializedLibcall(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
+ SDValue EmitSpecializedLibcall(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
RTLIB::Libcall LC) const;
};
diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp
index bb6ae28065bd..1d7eef9ddcfd 100644
--- a/lib/Target/ARM/ARMSubtarget.cpp
+++ b/lib/Target/ARM/ARMSubtarget.cpp
@@ -88,10 +88,9 @@ ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU,
ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS,
const ARMBaseTargetMachine &TM, bool IsLittle)
- : ARMGenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
- ARMProcClass(None), ARMArch(ARMv4t), stackAlignment(4), CPUString(CPU),
- IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM),
- FrameLowering(initializeFrameLowering(CPU, FS)),
+ : ARMGenSubtargetInfo(TT, CPU, FS), UseMulOps(UseFusedMulOps),
+ CPUString(CPU), IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options),
+ TM(TM), FrameLowering(initializeFrameLowering(CPU, FS)),
// At this point initializeSubtargetDependencies has been called so
// we can query directly.
InstrInfo(isThumb1Only()
@@ -102,63 +101,10 @@ ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU,
TLInfo(TM, *this) {}
void ARMSubtarget::initializeEnvironment() {
- HasV4TOps = false;
- HasV5TOps = false;
- HasV5TEOps = false;
- HasV6Ops = false;
- HasV6MOps = false;
- HasV6KOps = false;
- HasV6T2Ops = false;
- HasV7Ops = false;
- HasV8Ops = false;
- HasV8_1aOps = false;
- HasV8_2aOps = false;
- HasVFPv2 = false;
- HasVFPv3 = false;
- HasVFPv4 = false;
- HasFPARMv8 = false;
- HasNEON = false;
- UseNEONForSinglePrecisionFP = false;
- UseMulOps = UseFusedMulOps;
- SlowFPVMLx = false;
- HasVMLxForwarding = false;
- SlowFPBrcc = false;
- InThumbMode = false;
- UseSoftFloat = false;
- HasThumb2 = false;
- NoARM = false;
- ReserveR9 = false;
- NoMovt = false;
- SupportsTailCall = false;
- HasFP16 = false;
- HasFullFP16 = false;
- HasD16 = false;
- HasHardwareDivide = false;
- HasHardwareDivideInARM = false;
- HasT2ExtractPack = false;
- HasDataBarrier = false;
- Pref32BitThumb = false;
- AvoidCPSRPartialUpdate = false;
- AvoidMOVsShifterOperand = false;
- HasRAS = false;
- HasMPExtension = false;
- HasVirtualization = false;
- FPOnlySP = false;
- HasPerfMon = false;
- HasTrustZone = false;
- HasCrypto = false;
- HasCRC = false;
- HasZeroCycleZeroing = false;
- StrictAlign = false;
- HasDSP = false;
- UseNaClTrap = false;
- GenLongCalls = false;
- UnsafeFPMath = false;
-
// MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this
// directly from it, but we can try to make sure they're consistent when both
// available.
- UseSjLjEH = isTargetDarwin() && !isTargetWatchOS();
+ UseSjLjEH = isTargetDarwin() && !isTargetWatchABI();
assert((!TM.getMCAsmInfo() ||
(TM.getMCAsmInfo()->getExceptionHandlingType() ==
ExceptionHandling::SjLj) == UseSjLjEH) &&
@@ -230,7 +176,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
// registers are the 4 used for parameters. We don't currently do this
// case.
- SupportsTailCall = !isThumb1Only();
+ SupportsTailCall = !isThumb() || hasV8MBaselineOps();
if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(5, 0))
SupportsTailCall = false;
@@ -252,6 +198,53 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters
(Options.UnsafeFPMath || isTargetDarwin()))
UseNEONForSinglePrecisionFP = true;
+
+ // FIXME: Teach TableGen to deal with these instead of doing it manually here.
+ switch (ARMProcFamily) {
+ case Others:
+ case CortexA5:
+ break;
+ case CortexA7:
+ LdStMultipleTiming = DoubleIssue;
+ break;
+ case CortexA8:
+ LdStMultipleTiming = DoubleIssue;
+ break;
+ case CortexA9:
+ LdStMultipleTiming = DoubleIssueCheckUnalignedAccess;
+ PreISelOperandLatencyAdjustment = 1;
+ break;
+ case CortexA12:
+ break;
+ case CortexA15:
+ MaxInterleaveFactor = 2;
+ PreISelOperandLatencyAdjustment = 1;
+ PartialUpdateClearance = 12;
+ break;
+ case CortexA17:
+ case CortexA32:
+ case CortexA35:
+ case CortexA53:
+ case CortexA57:
+ case CortexA72:
+ case CortexA73:
+ case CortexR4:
+ case CortexR4F:
+ case CortexR5:
+ case CortexR7:
+ case CortexM3:
+ case ExynosM1:
+ break;
+ case Krait:
+ PreISelOperandLatencyAdjustment = 1;
+ break;
+ case Swift:
+ MaxInterleaveFactor = 2;
+ LdStMultipleTiming = SingleIssuePlusExtras;
+ PreISelOperandLatencyAdjustment = 1;
+ PartialUpdateClearance = 12;
+ break;
+ }
}
bool ARMSubtarget::isAPCS_ABI() const {
@@ -268,40 +261,16 @@ bool ARMSubtarget::isAAPCS16_ABI() const {
return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16;
}
+bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const {
+ if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
+ return true;
-/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol.
-bool
-ARMSubtarget::GVIsIndirectSymbol(const GlobalValue *GV,
- Reloc::Model RelocM) const {
- if (RelocM == Reloc::Static)
- return false;
-
- bool isDef = GV->isStrongDefinitionForLinker();
-
- if (!isTargetMachO()) {
- // Extra load is needed for all externally visible.
- if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
- return false;
+ // 32 bit macho has no relocation for a-b if a is undefined, even if b is in
+ // the section that is being relocated. This means we have to use o load even
+ // for GVs that are known to be local to the dso.
+ if (isTargetDarwin() && TM.isPositionIndependent() &&
+ (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
return true;
- } else {
- // If this is a strong reference to a definition, it is definitely not
- // through a stub.
- if (isDef)
- return false;
-
- // Unless we have a symbol with hidden visibility, we have to go through a
- // normal $non_lazy_ptr stub because this symbol might be resolved late.
- if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
- return true;
-
- if (RelocM == Reloc::PIC_) {
- // If symbol visibility is hidden, we have a stub for common symbol
- // references and external declarations.
- if (GV->isDeclarationForLinker() || GV->hasCommonLinkage())
- // Hidden $non_lazy_ptr reference.
- return true;
- }
- }
return false;
}
@@ -332,21 +301,21 @@ bool ARMSubtarget::enablePostRAScheduler() const {
}
bool ARMSubtarget::enableAtomicExpand() const {
- return hasAnyDataBarrier() && !isThumb1Only();
+ return hasAnyDataBarrier() && (!isThumb() || hasV8MBaselineOps());
}
bool ARMSubtarget::useStride4VFPs(const MachineFunction &MF) const {
// For general targets, the prologue can grow when VFPs are allocated with
// stride 4 (more vpush instructions). But WatchOS uses a compact unwind
// format which it's more important to get right.
- return isTargetWatchOS() || (isSwift() && !MF.getFunction()->optForMinSize());
+ return isTargetWatchABI() || (isSwift() && !MF.getFunction()->optForMinSize());
}
bool ARMSubtarget::useMovt(const MachineFunction &MF) const {
// NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit
// immediates as it is inherently position independent, and may be out of
// range otherwise.
- return !NoMovt && hasV6T2Ops() &&
+ return !NoMovt && hasV8MBaselineOps() &&
(isTargetWindows() || !MF.getFunction()->optForMinSize());
}
diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h
index 4d54e5751473..910de0e1e72d 100644
--- a/lib/Target/ARM/ARMSubtarget.h
+++ b/lib/Target/ARM/ARMSubtarget.h
@@ -43,8 +43,9 @@ class ARMSubtarget : public ARMGenSubtargetInfo {
protected:
enum ARMProcFamilyEnum {
Others, CortexA5, CortexA7, CortexA8, CortexA9, CortexA12, CortexA15,
- CortexA17, CortexR4, CortexR4F, CortexR5, CortexR7, CortexA35, CortexA53,
- CortexA57, CortexA72, Krait, Swift, ExynosM1
+ CortexA17, CortexR4, CortexR4F, CortexR5, CortexR7, CortexM3,
+ CortexA32, CortexA35, CortexA53, CortexA57, CortexA72, CortexA73,
+ Krait, Swift, ExynosM1
};
enum ARMProcClassEnum {
None, AClass, RClass, MClass
@@ -52,188 +53,275 @@ protected:
enum ARMArchEnum {
ARMv2, ARMv2a, ARMv3, ARMv3m, ARMv4, ARMv4t, ARMv5, ARMv5t, ARMv5te,
ARMv5tej, ARMv6, ARMv6k, ARMv6kz, ARMv6t2, ARMv6m, ARMv6sm, ARMv7a, ARMv7r,
- ARMv7m, ARMv7em, ARMv8a, ARMv81a, ARMv82a
+ ARMv7m, ARMv7em, ARMv8a, ARMv81a, ARMv82a, ARMv8mMainline, ARMv8mBaseline
};
+public:
+ /// What kind of timing do load multiple/store multiple instructions have.
+ enum ARMLdStMultipleTiming {
+ /// Can load/store 2 registers/cycle.
+ DoubleIssue,
+ /// Can load/store 2 registers/cycle, but needs an extra cycle if the access
+ /// is not 64-bit aligned.
+ DoubleIssueCheckUnalignedAccess,
+ /// Can load/store 1 register/cycle.
+ SingleIssue,
+ /// Can load/store 1 register/cycle, but needs an extra cycle for address
+ /// computation and potentially also for register writeback.
+ SingleIssuePlusExtras,
+ };
+
+protected:
/// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
- ARMProcFamilyEnum ARMProcFamily;
+ ARMProcFamilyEnum ARMProcFamily = Others;
/// ARMProcClass - ARM processor class: None, AClass, RClass or MClass.
- ARMProcClassEnum ARMProcClass;
+ ARMProcClassEnum ARMProcClass = None;
/// ARMArch - ARM architecture
- ARMArchEnum ARMArch;
+ ARMArchEnum ARMArch = ARMv4t;
/// HasV4TOps, HasV5TOps, HasV5TEOps,
/// HasV6Ops, HasV6MOps, HasV6KOps, HasV6T2Ops, HasV7Ops, HasV8Ops -
/// Specify whether target support specific ARM ISA variants.
- bool HasV4TOps;
- bool HasV5TOps;
- bool HasV5TEOps;
- bool HasV6Ops;
- bool HasV6MOps;
- bool HasV6KOps;
- bool HasV6T2Ops;
- bool HasV7Ops;
- bool HasV8Ops;
- bool HasV8_1aOps;
- bool HasV8_2aOps;
+ bool HasV4TOps = false;
+ bool HasV5TOps = false;
+ bool HasV5TEOps = false;
+ bool HasV6Ops = false;
+ bool HasV6MOps = false;
+ bool HasV6KOps = false;
+ bool HasV6T2Ops = false;
+ bool HasV7Ops = false;
+ bool HasV8Ops = false;
+ bool HasV8_1aOps = false;
+ bool HasV8_2aOps = false;
+ bool HasV8MBaselineOps = false;
+ bool HasV8MMainlineOps = false;
/// HasVFPv2, HasVFPv3, HasVFPv4, HasFPARMv8, HasNEON - Specify what
/// floating point ISAs are supported.
- bool HasVFPv2;
- bool HasVFPv3;
- bool HasVFPv4;
- bool HasFPARMv8;
- bool HasNEON;
+ bool HasVFPv2 = false;
+ bool HasVFPv3 = false;
+ bool HasVFPv4 = false;
+ bool HasFPARMv8 = false;
+ bool HasNEON = false;
/// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
/// specified. Use the method useNEONForSinglePrecisionFP() to
/// determine if NEON should actually be used.
- bool UseNEONForSinglePrecisionFP;
+ bool UseNEONForSinglePrecisionFP = false;
/// UseMulOps - True if non-microcoded fused integer multiply-add and
/// multiply-subtract instructions should be used.
- bool UseMulOps;
+ bool UseMulOps = false;
/// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates
/// whether the FP VML[AS] instructions are slow (if so, don't use them).
- bool SlowFPVMLx;
+ bool SlowFPVMLx = false;
/// HasVMLxForwarding - If true, NEON has special multiplier accumulator
/// forwarding to allow mul + mla being issued back to back.
- bool HasVMLxForwarding;
+ bool HasVMLxForwarding = false;
/// SlowFPBrcc - True if floating point compare + branch is slow.
- bool SlowFPBrcc;
+ bool SlowFPBrcc = false;
/// InThumbMode - True if compiling for Thumb, false for ARM.
- bool InThumbMode;
+ bool InThumbMode = false;
/// UseSoftFloat - True if we're using software floating point features.
- bool UseSoftFloat;
+ bool UseSoftFloat = false;
/// HasThumb2 - True if Thumb2 instructions are supported.
- bool HasThumb2;
+ bool HasThumb2 = false;
/// NoARM - True if subtarget does not support ARM mode execution.
- bool NoARM;
+ bool NoARM = false;
/// ReserveR9 - True if R9 is not available as a general purpose register.
- bool ReserveR9;
+ bool ReserveR9 = false;
/// NoMovt - True if MOVT / MOVW pairs are not used for materialization of
/// 32-bit imms (including global addresses).
- bool NoMovt;
+ bool NoMovt = false;
/// SupportsTailCall - True if the OS supports tail call. The dynamic linker
/// must be able to synthesize call stubs for interworking between ARM and
/// Thumb.
- bool SupportsTailCall;
+ bool SupportsTailCall = false;
/// HasFP16 - True if subtarget supports half-precision FP conversions
- bool HasFP16;
+ bool HasFP16 = false;
/// HasFullFP16 - True if subtarget supports half-precision FP operations
- bool HasFullFP16;
+ bool HasFullFP16 = false;
/// HasD16 - True if subtarget is limited to 16 double precision
/// FP registers for VFPv3.
- bool HasD16;
+ bool HasD16 = false;
/// HasHardwareDivide - True if subtarget supports [su]div
- bool HasHardwareDivide;
+ bool HasHardwareDivide = false;
/// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode
- bool HasHardwareDivideInARM;
+ bool HasHardwareDivideInARM = false;
/// HasT2ExtractPack - True if subtarget supports thumb2 extract/pack
/// instructions.
- bool HasT2ExtractPack;
+ bool HasT2ExtractPack = false;
/// HasDataBarrier - True if the subtarget supports DMB / DSB data barrier
/// instructions.
- bool HasDataBarrier;
+ bool HasDataBarrier = false;
+
+ /// HasV7Clrex - True if the subtarget supports CLREX instructions
+ bool HasV7Clrex = false;
+
+ /// HasAcquireRelease - True if the subtarget supports v8 atomics (LDA/LDAEX etc)
+ /// instructions
+ bool HasAcquireRelease = false;
/// Pref32BitThumb - If true, codegen would prefer 32-bit Thumb instructions
/// over 16-bit ones.
- bool Pref32BitThumb;
+ bool Pref32BitThumb = false;
/// AvoidCPSRPartialUpdate - If true, codegen would avoid using instructions
/// that partially update CPSR and add false dependency on the previous
/// CPSR setting instruction.
- bool AvoidCPSRPartialUpdate;
+ bool AvoidCPSRPartialUpdate = false;
/// AvoidMOVsShifterOperand - If true, codegen should avoid using flag setting
/// movs with shifter operand (i.e. asr, lsl, lsr).
- bool AvoidMOVsShifterOperand;
+ bool AvoidMOVsShifterOperand = false;
- /// HasRAS - Some processors perform return stack prediction. CodeGen should
+ /// HasRetAddrStack - Some processors perform return stack prediction. CodeGen should
/// avoid issue "normal" call instructions to callees which do not return.
- bool HasRAS;
+ bool HasRetAddrStack = false;
/// HasMPExtension - True if the subtarget supports Multiprocessing
/// extension (ARMv7 only).
- bool HasMPExtension;
+ bool HasMPExtension = false;
/// HasVirtualization - True if the subtarget supports the Virtualization
/// extension.
- bool HasVirtualization;
+ bool HasVirtualization = false;
/// FPOnlySP - If true, the floating point unit only supports single
/// precision.
- bool FPOnlySP;
+ bool FPOnlySP = false;
/// If true, the processor supports the Performance Monitor Extensions. These
/// include a generic cycle-counter as well as more fine-grained (often
/// implementation-specific) events.
- bool HasPerfMon;
+ bool HasPerfMon = false;
/// HasTrustZone - if true, processor supports TrustZone security extensions
- bool HasTrustZone;
+ bool HasTrustZone = false;
+
+ /// Has8MSecExt - if true, processor supports ARMv8-M Security Extensions
+ bool Has8MSecExt = false;
/// HasCrypto - if true, processor supports Cryptography extensions
- bool HasCrypto;
+ bool HasCrypto = false;
/// HasCRC - if true, processor supports CRC instructions
- bool HasCRC;
+ bool HasCRC = false;
+
+ /// HasRAS - if true, the processor supports RAS extensions
+ bool HasRAS = false;
/// If true, the instructions "vmov.i32 d0, #0" and "vmov.i32 q0, #0" are
/// particularly effective at zeroing a VFP register.
- bool HasZeroCycleZeroing;
+ bool HasZeroCycleZeroing = false;
+
+ /// If true, if conversion may decide to leave some instructions unpredicated.
+ bool IsProfitableToUnpredicate = false;
+
+ /// If true, VMOV will be favored over VGETLNi32.
+ bool HasSlowVGETLNi32 = false;
+
+ /// If true, VMOV will be favored over VDUP.
+ bool HasSlowVDUP32 = false;
+
+ /// If true, VMOVSR will be favored over VMOVDRR.
+ bool PreferVMOVSR = false;
+
+ /// If true, ISHST barriers will be used for Release semantics.
+ bool PreferISHST = false;
+
+ /// If true, a VLDM/VSTM starting with an odd register number is considered to
+ /// take more microops than single VLDRS/VSTRS.
+ bool SlowOddRegister = false;
+
+ /// If true, loading into a D subregister will be penalized.
+ bool SlowLoadDSubregister = false;
+
+ /// If true, the AGU and NEON/FPU units are multiplexed.
+ bool HasMuxedUnits = false;
+
+ /// If true, VMOVS will never be widened to VMOVD
+ bool DontWidenVMOVS = false;
+
+ /// If true, run the MLx expansion pass.
+ bool ExpandMLx = false;
+
+ /// If true, VFP/NEON VMLA/VMLS have special RAW hazards.
+ bool HasVMLxHazards = false;
+
+ /// If true, VMOVRS, VMOVSR and VMOVS will be converted from VFP to NEON.
+ bool UseNEONForFPMovs = false;
+
+ /// If true, VLDn instructions take an extra cycle for unaligned accesses.
+ bool CheckVLDnAlign = false;
+
+ /// If true, VFP instructions are not pipelined.
+ bool NonpipelinedVFP = false;
/// StrictAlign - If true, the subtarget disallows unaligned memory
/// accesses for some types. For details, see
/// ARMTargetLowering::allowsMisalignedMemoryAccesses().
- bool StrictAlign;
+ bool StrictAlign = false;
/// RestrictIT - If true, the subtarget disallows generation of deprecated IT
/// blocks to conform to ARMv8 rule.
- bool RestrictIT;
+ bool RestrictIT = false;
/// HasDSP - If true, the subtarget supports the DSP (saturating arith
/// and such) instructions.
- bool HasDSP;
+ bool HasDSP = false;
/// NaCl TRAP instruction is generated instead of the regular TRAP.
- bool UseNaClTrap;
+ bool UseNaClTrap = false;
/// Generate calls via indirect call instructions.
- bool GenLongCalls;
+ bool GenLongCalls = false;
/// Target machine allowed unsafe FP math (such as use of NEON fp)
- bool UnsafeFPMath;
+ bool UnsafeFPMath = false;
/// UseSjLjEH - If true, the target uses SjLj exception handling (e.g. iOS).
- bool UseSjLjEH;
+ bool UseSjLjEH = false;
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
- unsigned stackAlignment;
+ unsigned stackAlignment = 4;
/// CPUString - String name of used CPU.
std::string CPUString;
+ unsigned MaxInterleaveFactor = 1;
+
+ /// Clearance before partial register updates (in number of instructions)
+ unsigned PartialUpdateClearance = 0;
+
+ /// What kind of timing do load multiple/store multiple have (double issue,
+ /// single issue etc).
+ ARMLdStMultipleTiming LdStMultipleTiming = SingleIssue;
+
+ /// The adjustment that we need to apply to get the operand latency from the
+ /// operand cycle returned by the itinerary data for pre-ISel operands.
+ int PreISelOperandLatencyAdjustment = 2;
+
/// IsLittle - The target is Little Endian
bool IsLittle;
@@ -313,17 +401,23 @@ public:
bool hasV8Ops() const { return HasV8Ops; }
bool hasV8_1aOps() const { return HasV8_1aOps; }
bool hasV8_2aOps() const { return HasV8_2aOps; }
+ bool hasV8MBaselineOps() const { return HasV8MBaselineOps; }
+ bool hasV8MMainlineOps() const { return HasV8MMainlineOps; }
+ /// @{
+ /// These functions are obsolete, please consider adding subtarget features
+ /// or properties instead of calling them.
bool isCortexA5() const { return ARMProcFamily == CortexA5; }
bool isCortexA7() const { return ARMProcFamily == CortexA7; }
bool isCortexA8() const { return ARMProcFamily == CortexA8; }
bool isCortexA9() const { return ARMProcFamily == CortexA9; }
bool isCortexA15() const { return ARMProcFamily == CortexA15; }
bool isSwift() const { return ARMProcFamily == Swift; }
- bool isCortexM3() const { return CPUString == "cortex-m3"; }
+ bool isCortexM3() const { return ARMProcFamily == CortexM3; }
bool isLikeA9() const { return isCortexA9() || isCortexA15() || isKrait(); }
bool isCortexR5() const { return ARMProcFamily == CortexR5; }
bool isKrait() const { return ARMProcFamily == Krait; }
+ /// @}
bool hasARMOps() const { return !NoARM; }
@@ -334,6 +428,7 @@ public:
bool hasNEON() const { return HasNEON; }
bool hasCrypto() const { return HasCrypto; }
bool hasCRC() const { return HasCRC; }
+ bool hasRAS() const { return HasRAS; }
bool hasVirtualization() const { return HasVirtualization; }
bool useNEONForSinglePrecisionFP() const {
return hasNEON() && UseNEONForSinglePrecisionFP;
@@ -343,6 +438,8 @@ public:
bool hasDivideInARMMode() const { return HasHardwareDivideInARM; }
bool hasT2ExtractPack() const { return HasT2ExtractPack; }
bool hasDataBarrier() const { return HasDataBarrier; }
+ bool hasV7Clrex() const { return HasV7Clrex; }
+ bool hasAcquireRelease() const { return HasAcquireRelease; }
bool hasAnyDataBarrier() const {
return HasDataBarrier || (hasV6Ops() && !isThumb());
}
@@ -353,11 +450,26 @@ public:
bool isFPOnlySP() const { return FPOnlySP; }
bool hasPerfMon() const { return HasPerfMon; }
bool hasTrustZone() const { return HasTrustZone; }
+ bool has8MSecExt() const { return Has8MSecExt; }
bool hasZeroCycleZeroing() const { return HasZeroCycleZeroing; }
+ bool isProfitableToUnpredicate() const { return IsProfitableToUnpredicate; }
+ bool hasSlowVGETLNi32() const { return HasSlowVGETLNi32; }
+ bool hasSlowVDUP32() const { return HasSlowVDUP32; }
+ bool preferVMOVSR() const { return PreferVMOVSR; }
+ bool preferISHSTBarriers() const { return PreferISHST; }
+ bool expandMLx() const { return ExpandMLx; }
+ bool hasVMLxHazards() const { return HasVMLxHazards; }
+ bool hasSlowOddRegister() const { return SlowOddRegister; }
+ bool hasSlowLoadDSubregister() const { return SlowLoadDSubregister; }
+ bool hasMuxedUnits() const { return HasMuxedUnits; }
+ bool dontWidenVMOVS() const { return DontWidenVMOVS; }
+ bool useNEONForFPMovs() const { return UseNEONForFPMovs; }
+ bool checkVLDnAccessAlignment() const { return CheckVLDnAlign; }
+ bool nonpipelinedVFP() const { return NonpipelinedVFP; }
bool prefers32BitThumb() const { return Pref32BitThumb; }
bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
bool avoidMOVsShifterOperand() const { return AvoidMOVsShifterOperand; }
- bool hasRAS() const { return HasRAS; }
+ bool hasRetAddrStack() const { return HasRetAddrStack; }
bool hasMPExtension() const { return HasMPExtension; }
bool hasDSP() const { return HasDSP; }
bool useNaClTrap() const { return UseNaClTrap; }
@@ -373,6 +485,7 @@ public:
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
bool isTargetIOS() const { return TargetTriple.isiOS(); }
bool isTargetWatchOS() const { return TargetTriple.isWatchOS(); }
+ bool isTargetWatchABI() const { return TargetTriple.isWatchABI(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
bool isTargetNetBSD() const { return TargetTriple.isOSNetBSD(); }
@@ -399,14 +512,21 @@ public:
TargetTriple.getEnvironment() == Triple::GNUEABIHF) &&
!isTargetDarwin() && !isTargetWindows();
}
+ bool isTargetMuslAEABI() const {
+ return (TargetTriple.getEnvironment() == Triple::MuslEABI ||
+ TargetTriple.getEnvironment() == Triple::MuslEABIHF) &&
+ !isTargetDarwin() && !isTargetWindows();
+ }
// ARM Targets that support EHABI exception handling standard
// Darwin uses SjLj. Other targets might need more checks.
bool isTargetEHABICompatible() const {
return (TargetTriple.getEnvironment() == Triple::EABI ||
TargetTriple.getEnvironment() == Triple::GNUEABI ||
+ TargetTriple.getEnvironment() == Triple::MuslEABI ||
TargetTriple.getEnvironment() == Triple::EABIHF ||
TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
+ TargetTriple.getEnvironment() == Triple::MuslEABIHF ||
isTargetAndroid()) &&
!isTargetDarwin() && !isTargetWindows();
}
@@ -414,6 +534,7 @@ public:
bool isTargetHardFloat() const {
// FIXME: this is invalid for WindowsCE
return TargetTriple.getEnvironment() == Triple::GNUEABIHF ||
+ TargetTriple.getEnvironment() == Triple::MuslEABIHF ||
TargetTriple.getEnvironment() == Triple::EABIHF ||
isTargetWindows() || isAAPCS16_ABI();
}
@@ -436,6 +557,13 @@ public:
return isTargetMachO() ? (ReserveR9 || !HasV6Ops) : ReserveR9;
}
+ /// Returns true if the frame setup is split into two separate pushes (first
+ /// r0-r7,lr then r8-r11), principally so that the frame pointer is adjacent
+ /// to lr.
+ bool splitFramePushPop() const {
+ return isTargetMachO();
+ }
+
bool useStride4VFPs(const MachineFunction &MF) const;
bool useMovt(const MachineFunction &MF) const;
@@ -476,9 +604,20 @@ public:
/// function for this subtarget.
unsigned getStackAlignment() const { return stackAlignment; }
- /// GVIsIndirectSymbol - true if the GV will be accessed via an indirect
- /// symbol.
- bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const;
+ unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
+
+ unsigned getPartialUpdateClearance() const { return PartialUpdateClearance; }
+
+ ARMLdStMultipleTiming getLdStMultipleTiming() const {
+ return LdStMultipleTiming;
+ }
+
+ int getPreISelOperandLatencyAdjustment() const {
+ return PreISelOperandLatencyAdjustment;
+ }
+
+ /// True if the GV will be accessed via an indirect symbol.
+ bool isGVIndirectSymbol(const GlobalValue *GV) const;
/// True if fast-isel is used.
bool useFastISel() const;
diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp
index fca1901dc57c..dc730a675bef 100644
--- a/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/lib/Target/ARM/ARMTargetMachine.cpp
@@ -16,6 +16,7 @@
#include "ARMTargetObjectFile.h"
#include "ARMTargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -53,6 +54,10 @@ extern "C" void LLVMInitializeARMTarget() {
RegisterTargetMachine<ARMBETargetMachine> Y(TheARMBETarget);
RegisterTargetMachine<ThumbLETargetMachine> A(TheThumbLETarget);
RegisterTargetMachine<ThumbBETargetMachine> B(TheThumbBETarget);
+
+ PassRegistry &Registry = *PassRegistry::getPassRegistry();
+ initializeARMLoadStoreOptPass(Registry);
+ initializeARMPreAllocLoadStoreOptPass(Registry);
}
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -85,7 +90,7 @@ computeTargetABI(const Triple &TT, StringRef CPU,
(TT.getOS() == llvm::Triple::UnknownOS && TT.isOSBinFormatMachO()) ||
CPU.startswith("cortex-m")) {
TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
- } else if (TT.isWatchOS()) {
+ } else if (TT.isWatchABI()) {
TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS16;
} else {
TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
@@ -99,6 +104,8 @@ computeTargetABI(const Triple &TT, StringRef CPU,
case llvm::Triple::Android:
case llvm::Triple::GNUEABI:
case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::MuslEABI:
+ case llvm::Triple::MuslEABIHF:
case llvm::Triple::EABIHF:
case llvm::Triple::EABI:
TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
@@ -171,15 +178,30 @@ static std::string computeDataLayout(const Triple &TT, StringRef CPU,
return Ret;
}
-/// TargetMachine ctor - Create an ARM architecture model.
+static Reloc::Model getEffectiveRelocModel(const Triple &TT,
+ Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ // Default relocation model on Darwin is PIC.
+ return TT.isOSBinFormatMachO() ? Reloc::PIC_ : Reloc::Static;
+
+ // DynamicNoPIC is only used on darwin.
+ if (*RM == Reloc::DynamicNoPIC && !TT.isOSDarwin())
+ return Reloc::Static;
+
+ return *RM;
+}
+
+/// Create an ARM architecture model.
///
ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL, bool isLittle)
: LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT,
- CPU, FS, Options, RM, CM, OL),
+ CPU, FS, Options, getEffectiveRelocModel(TT, RM), CM,
+ OL),
TargetABI(computeTargetABI(TT, CPU, Options)),
TLOF(createTLOF(getTargetTriple())),
Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) {
@@ -192,7 +214,8 @@ ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT,
// Default to triple-appropriate EABI
if (Options.EABIVersion == EABI::Default ||
Options.EABIVersion == EABI::Unknown) {
- if (Subtarget.isTargetGNUAEABI())
+ // musl is compatible with glibc with regard to EABI version
+ if (Subtarget.isTargetGNUAEABI() || Subtarget.isTargetMuslAEABI())
this->Options.EABIVersion = EABI::GNU;
else
this->Options.EABIVersion = EABI::EABI5;
@@ -219,7 +242,6 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const {
// it as a key for the subtarget since that can be the only difference
// between two functions.
bool SoftFloat =
- F.hasFnAttribute("use-soft-float") &&
F.getFnAttribute("use-soft-float").getValueAsString() == "true";
// If the soft float attribute is set on the function turn on the soft float
// subtarget feature.
@@ -248,8 +270,9 @@ void ARMTargetMachine::anchor() {}
ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL, bool isLittle)
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL,
+ bool isLittle)
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
initAsmInfo();
if (!Subtarget.hasARMOps())
@@ -262,7 +285,8 @@ void ARMLETargetMachine::anchor() {}
ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
@@ -271,7 +295,8 @@ void ARMBETargetMachine::anchor() {}
ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
@@ -280,7 +305,8 @@ void ThumbTargetMachine::anchor() {}
ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL, bool isLittle)
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
initAsmInfo();
@@ -291,7 +317,8 @@ void ThumbLETargetMachine::anchor() {}
ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
@@ -300,7 +327,8 @@ void ThumbBETargetMachine::anchor() {}
ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
diff --git a/lib/Target/ARM/ARMTargetMachine.h b/lib/Target/ARM/ARMTargetMachine.h
index 8ad1f3dc2c34..c6b70b953162 100644
--- a/lib/Target/ARM/ARMTargetMachine.h
+++ b/lib/Target/ARM/ARMTargetMachine.h
@@ -39,7 +39,7 @@ protected:
public:
ARMBaseTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool isLittle);
~ARMBaseTargetMachine() override;
@@ -58,39 +58,40 @@ public:
}
};
-/// ARMTargetMachine - ARM target machine.
+/// ARM target machine.
///
class ARMTargetMachine : public ARMBaseTargetMachine {
virtual void anchor();
public:
ARMTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL, bool isLittle);
};
-/// ARMLETargetMachine - ARM little endian target machine.
+/// ARM little endian target machine.
///
class ARMLETargetMachine : public ARMTargetMachine {
void anchor() override;
public:
ARMLETargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
-/// ARMBETargetMachine - ARM big endian target machine.
+/// ARM big endian target machine.
///
class ARMBETargetMachine : public ARMTargetMachine {
void anchor() override;
public:
ARMBETargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
-/// ThumbTargetMachine - Thumb target machine.
+/// Thumb target machine.
/// Due to the way architectures are handled, this represents both
/// Thumb-1 and Thumb-2.
///
@@ -99,29 +100,29 @@ class ThumbTargetMachine : public ARMBaseTargetMachine {
public:
ThumbTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL,
- bool isLittle);
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL, bool isLittle);
};
-/// ThumbLETargetMachine - Thumb little endian target machine.
+/// Thumb little endian target machine.
///
class ThumbLETargetMachine : public ThumbTargetMachine {
void anchor() override;
public:
ThumbLETargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
-/// ThumbBETargetMachine - Thumb big endian target machine.
+/// Thumb big endian target machine.
///
class ThumbBETargetMachine : public ThumbTargetMachine {
void anchor() override;
public:
ThumbBETargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
diff --git a/lib/Target/ARM/ARMTargetObjectFile.h b/lib/Target/ARM/ARMTargetObjectFile.h
index 98e8763c4705..b1db201cb30d 100644
--- a/lib/Target/ARM/ARMTargetObjectFile.h
+++ b/lib/Target/ARM/ARMTargetObjectFile.h
@@ -21,10 +21,10 @@ class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
protected:
const MCSection *AttributesSection;
public:
- ARMElfTargetObjectFile() :
- TargetLoweringObjectFileELF(),
- AttributesSection(nullptr)
- {}
+ ARMElfTargetObjectFile()
+ : TargetLoweringObjectFileELF(), AttributesSection(nullptr) {
+ PLTRelativeVariantKind = MCSymbolRefExpr::VK_ARM_PREL31;
+ }
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp
index c1520119ef21..13c5dc61acd9 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -18,12 +18,12 @@ using namespace llvm;
int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
assert(Ty->isIntegerTy());
- unsigned Bits = Ty->getPrimitiveSizeInBits();
- if (Bits == 0 || Bits > 32)
- return 4;
+ unsigned Bits = Ty->getPrimitiveSizeInBits();
+ if (Bits == 0 || Imm.getActiveBits() >= 64)
+ return 4;
- int32_t SImmVal = Imm.getSExtValue();
- uint32_t ZImmVal = Imm.getZExtValue();
+ int64_t SImmVal = Imm.getSExtValue();
+ uint64_t ZImmVal = Imm.getZExtValue();
if (!ST->isThumb()) {
if ((SImmVal >= 0 && SImmVal < 65536) ||
(ARM_AM::getSOImmVal(ZImmVal) != -1) ||
@@ -47,6 +47,32 @@ int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
return 3;
}
+
+// Constants smaller than 256 fit in the immediate field of
+// Thumb1 instructions so we return a zero cost and 1 otherwise.
+int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
+ const APInt &Imm, Type *Ty) {
+ if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
+ return 0;
+
+ return 1;
+}
+
+int ARMTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) {
+ // Division by a constant can be turned into multiplication, but only if we
+ // know it's constant. So it's not so much that the immediate is cheap (it's
+ // not), but that the alternative is worse.
+ // FIXME: this is probably unneeded with GlobalISel.
+ if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
+ Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
+ Idx == 1)
+ return 0;
+
+ return getIntImmCost(Imm, Ty);
+}
+
+
int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
@@ -244,10 +270,8 @@ int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
unsigned Index) {
// Penalize inserting into an D-subregister. We end up with a three times
// lower estimated throughput on swift.
- if (ST->isSwift() &&
- Opcode == Instruction::InsertElement &&
- ValTy->isVectorTy() &&
- ValTy->getScalarSizeInBits() <= 32)
+ if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
+ ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
return 3;
if ((Opcode == Instruction::InsertElement ||
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.h b/lib/Target/ARM/ARMTargetTransformInfo.h
index 7d8d2381c983..a0ca9e648002 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -54,12 +54,24 @@ public:
bool enableInterleavedAccessVectorization() { return true; }
+ /// Floating-point computation using ARMv8 AArch32 Advanced
+ /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD
+ /// is IEEE-754 compliant, but it's not covered in this target.
+ bool isFPVectorizationPotentiallyUnsafe() {
+ return !ST->isTargetDarwin();
+ }
+
/// \name Scalar TTI Implementations
/// @{
+ int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty);
+
using BaseT::getIntImmCost;
int getIntImmCost(const APInt &Imm, Type *Ty);
+ int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
+
/// @}
/// \name Vector TTI Implementations
@@ -88,10 +100,7 @@ public:
}
unsigned getMaxInterleaveFactor(unsigned VF) {
- // These are out of order CPUs:
- if (ST->isCortexA15() || ST->isSwift())
- return 2;
- return 1;
+ return ST->getMaxInterleaveFactor();
}
int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index c69a741244cf..7d49302f9a96 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -20,7 +20,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
@@ -31,20 +31,20 @@
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCAsmParserUtils.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/Support/ARMEHABI.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -257,9 +257,15 @@ class ARMAsmParser : public MCTargetAsmParser {
bool hasThumb() const {
return getSTI().getFeatureBits()[ARM::HasV4TOps];
}
+ bool hasThumb2() const {
+ return getSTI().getFeatureBits()[ARM::FeatureThumb2];
+ }
bool hasV6Ops() const {
return getSTI().getFeatureBits()[ARM::HasV6Ops];
}
+ bool hasV6T2Ops() const {
+ return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
+ }
bool hasV6MOps() const {
return getSTI().getFeatureBits()[ARM::HasV6MOps];
}
@@ -269,6 +275,15 @@ class ARMAsmParser : public MCTargetAsmParser {
bool hasV8Ops() const {
return getSTI().getFeatureBits()[ARM::HasV8Ops];
}
+ bool hasV8MBaseline() const {
+ return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
+ }
+ bool hasV8MMainline() const {
+ return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
+ }
+ bool has8MSecExt() const {
+ return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
+ }
bool hasARM() const {
return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
}
@@ -281,12 +296,16 @@ class ARMAsmParser : public MCTargetAsmParser {
bool hasV8_1aOps() const {
return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
}
+ bool hasRAS() const {
+ return getSTI().getFeatureBits()[ARM::FeatureRAS];
+ }
void SwitchMode() {
MCSubtargetInfo &STI = copySTI();
uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
setAvailableFeatures(FB);
}
+ void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
bool isMClass() const {
return getSTI().getFeatureBits()[ARM::FeatureMClass];
}
@@ -417,8 +436,9 @@ class ARMOperand : public MCParsedAsmOperand {
k_ShifterImmediate,
k_RotateImmediate,
k_ModifiedImmediate,
+ k_ConstantPoolImmediate,
k_BitfieldDescriptor,
- k_Token
+ k_Token,
} Kind;
SMLoc StartLoc, EndLoc, AlignmentLoc;
@@ -611,6 +631,11 @@ public:
return Imm.Val;
}
+ const MCExpr *getConstantPoolImm() const {
+ assert(isConstantPoolImm() && "Invalid access!");
+ return Imm.Val;
+ }
+
unsigned getVectorIndex() const {
assert(Kind == k_VectorIndex && "Invalid access!");
return VectorIndex.Val;
@@ -648,7 +673,27 @@ public:
bool isCCOut() const { return Kind == k_CCOut; }
bool isITMask() const { return Kind == k_ITCondMask; }
bool isITCondCode() const { return Kind == k_CondCode; }
- bool isImm() const override { return Kind == k_Immediate; }
+ bool isImm() const override {
+ return Kind == k_Immediate;
+ }
+
+ bool isARMBranchTarget() const {
+ if (!isImm()) return false;
+
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
+ return CE->getValue() % 4 == 0;
+ return true;
+ }
+
+
+ bool isThumbBranchTarget() const {
+ if (!isImm()) return false;
+
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
+ return CE->getValue() % 2 == 0;
+ return true;
+ }
+
// checks whether this operand is an unsigned offset which fits is a field
// of specified width and scaled by a specific number of bits
template<unsigned width, unsigned scale>
@@ -1036,6 +1081,7 @@ public:
return ARM_AM::getSOImmVal(Value) == -1 &&
ARM_AM::getSOImmVal(-Value) != -1;
}
+ bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
bool isPostIdxReg() const {
@@ -1183,6 +1229,20 @@ public:
return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
Val == INT32_MIN;
}
+ bool isAddrMode5FP16() const {
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm() && !isa<MCConstantExpr>(getImm()))
+ return true;
+ if (!isMem() || Memory.Alignment != 0) return false;
+ // Check for register offset.
+ if (Memory.OffsetRegNum) return false;
+ // Immediate offset in range [-510, 510] and a multiple of 2.
+ if (!Memory.OffsetImm) return true;
+ int64_t Val = Memory.OffsetImm->getValue();
+ return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) || Val == INT32_MIN;
+ }
bool isMemTBB() const {
if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
@@ -1203,7 +1263,7 @@ public:
}
bool isT2MemRegOffset() const {
if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
- Memory.Alignment != 0)
+ Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
return false;
// Only lsl #{0, 1, 2, 3} allowed.
if (Memory.ShiftType == ARM_AM::no_shift)
@@ -1319,6 +1379,7 @@ public:
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
+
if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
@@ -1329,6 +1390,11 @@ public:
int64_t Val = Memory.OffsetImm->getValue();
return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
}
+ bool isConstPoolAsmImm() const {
+ // Delay processing of Constant Pool Immediate, this will turn into
+ // a constant. Match no other operand
+ return (isConstantPoolImm());
+ }
bool isPostIdxImm8() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
@@ -1665,7 +1731,7 @@ public:
if (!CE) return false;
uint64_t Value = CE->getValue();
// i64 value with each byte being either 0 or 0xff.
- for (unsigned i = 0; i < 8; ++i)
+ for (unsigned i = 0; i < 8; ++i, Value >>= 8)
if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
return true;
}
@@ -1680,6 +1746,16 @@ public:
Inst.addOperand(MCOperand::createExpr(Expr));
}
+ void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
void addCondCodeOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
@@ -1941,6 +2017,7 @@ public:
}
const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
+
assert(SR && "Unknown value type!");
Inst.addOperand(MCOperand::createExpr(SR));
return;
@@ -2145,6 +2222,28 @@ public:
Inst.addOperand(MCOperand::createImm(Val));
}
+ void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm()) {
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ Inst.addOperand(MCOperand::createImm(0));
+ return;
+ }
+
+ // The lower bit is always zero and as such is not encoded.
+ int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
+ ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
+ // Special case for #-0
+ if (Val == INT32_MIN) Val = 0;
+ if (Val < 0) Val = -Val;
+ Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
+ Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
+ Inst.addOperand(MCOperand::createImm(Val));
+ }
+
void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If we have an immediate that's not a constant, treat it as a label
@@ -2214,6 +2313,14 @@ public:
Inst.addOperand(MCOperand::createImm(Val));
}
+ void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // This is container for the immediate that we will create the constant
+ // pool from
+ addExpr(Inst, getConstantPoolImm());
+ return;
+ }
+
void addMemTBBOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
@@ -2594,6 +2701,15 @@ public:
}
static std::unique_ptr<ARMOperand>
+ CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
+ auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static std::unique_ptr<ARMOperand>
CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
Op->Bitfield.LSB = LSB;
@@ -2850,6 +2966,9 @@ void ARMOperand::print(raw_ostream &OS) const {
OS << "<mod_imm #" << ModImm.Bits << ", #"
<< ModImm.Rot << ")>";
break;
+ case k_ConstantPoolImmediate:
+ OS << "<constant_pool_imm #" << *getConstantPoolImm();
+ break;
case k_BitfieldDescriptor:
OS << "<bitfield " << "lsb: " << Bitfield.LSB
<< ", width: " << Bitfield.Width << ">";
@@ -3969,6 +4088,18 @@ ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
.Case("basepri_max", 0x812)
.Case("faultmask", 0x813)
.Case("control", 0x814)
+ .Case("msplim", 0x80a)
+ .Case("psplim", 0x80b)
+ .Case("msp_ns", 0x888)
+ .Case("psp_ns", 0x889)
+ .Case("msplim_ns", 0x88a)
+ .Case("psplim_ns", 0x88b)
+ .Case("primask_ns", 0x890)
+ .Case("basepri_ns", 0x891)
+ .Case("basepri_max_ns", 0x892)
+ .Case("faultmask_ns", 0x893)
+ .Case("control_ns", 0x894)
+ .Case("sp_ns", 0x898)
.Default(~0U);
if (FlagsVal == ~0U)
@@ -3983,6 +4114,14 @@ ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
// basepri, basepri_max and faultmask only valid for V7m.
return MatchOperand_NoMatch;
+ if (!has8MSecExt() && (FlagsVal == 0x80a || FlagsVal == 0x80b ||
+ (FlagsVal > 0x814 && FlagsVal < 0xc00)))
+ return MatchOperand_NoMatch;
+
+ if (!hasV8MMainline() && (FlagsVal == 0x88a || FlagsVal == 0x88b ||
+ (FlagsVal > 0x890 && FlagsVal <= 0x893)))
+ return MatchOperand_NoMatch;
+
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
return MatchOperand_Success;
@@ -4673,14 +4812,14 @@ void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
// classify tB as either t2B or t1B based on range of immediate operand
case ARM::tB: {
ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
- if (!op.isSignedOffset<11, 1>() && isThumbTwo())
+ if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
Inst.setOpcode(ARM::t2B);
break;
}
// classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
case ARM::tBcc: {
ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
- if (!op.isSignedOffset<8, 1>() && isThumbTwo())
+ if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
Inst.setOpcode(ARM::t2Bcc);
break;
}
@@ -4973,7 +5112,8 @@ ARMAsmParser::parseFPImm(OperandVector &Operands) {
// vmov.i{8|16|32|64} <dreg|qreg>, #imm
ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
bool isVmovf = TyOp.isToken() &&
- (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64");
+ (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
+ TyOp.getToken() == ".f16");
ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
Mnemonic.getToken() == "fconsts");
@@ -5144,16 +5284,12 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
S = Parser.getTok().getLoc();
if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
return Error(S, "unexpected token in operand");
-
Parser.Lex(); // Eat '='
const MCExpr *SubExprVal;
if (getParser().parseExpression(SubExprVal))
return true;
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
-
- const MCExpr *CPLoc =
- getTargetStreamer().addConstantPoolEntry(SubExprVal, S);
- Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E));
+ Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
return false;
}
}
@@ -5265,7 +5401,8 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
- Mnemonic.startswith("vsel"))
+ Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
+ Mnemonic == "bxns" || Mnemonic == "blxns")
return Mnemonic;
// First, split out any predication code. Ignore mnemonics we know aren't
@@ -5311,6 +5448,7 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
+ Mnemonic == "bxns" || Mnemonic == "blxns" ||
(Mnemonic == "movs" && isThumb()))) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
CarrySetting = true;
@@ -5369,7 +5507,8 @@ void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
- (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
+ (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
+ Mnemonic == "vmovx" || Mnemonic == "vins") {
// These mnemonics are never predicable
CanAcceptPredicationCode = false;
} else if (!isThumb()) {
@@ -6405,6 +6544,20 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
"immediate expression for mov requires :lower16: or :upper16");
break;
}
+ case ARM::HINT:
+ case ARM::t2HINT: {
+ if (hasRAS()) {
+ // ESB is not predicable (pred must be AL)
+ unsigned Imm8 = Inst.getOperand(0).getImm();
+ unsigned Pred = Inst.getOperand(1).getImm();
+ if (Imm8 == 0x10 && Pred != ARMCC::AL)
+ return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
+ "predicable, but condition "
+ "code specified");
+ }
+ // Without the RAS extension, this behaves as any other unallocated hint.
+ break;
+ }
}
return false;
@@ -6766,6 +6919,90 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
case ARM::t2LDRSHpcrel:
Inst.setOpcode(ARM::t2LDRSHpci);
return true;
+ case ARM::LDRConstPool:
+ case ARM::tLDRConstPool:
+ case ARM::t2LDRConstPool: {
+ // Pseudo instruction ldr rt, =immediate is converted to a
+ // MOV rt, immediate if immediate is known and representable
+ // otherwise we create a constant pool entry that we load from.
+ MCInst TmpInst;
+ if (Inst.getOpcode() == ARM::LDRConstPool)
+ TmpInst.setOpcode(ARM::LDRi12);
+ else if (Inst.getOpcode() == ARM::tLDRConstPool)
+ TmpInst.setOpcode(ARM::tLDRpci);
+ else if (Inst.getOpcode() == ARM::t2LDRConstPool)
+ TmpInst.setOpcode(ARM::t2LDRpci);
+ const ARMOperand &PoolOperand =
+ static_cast<ARMOperand &>(*Operands[3]);
+ const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
+ // If SubExprVal is a constant we may be able to use a MOV
+ if (isa<MCConstantExpr>(SubExprVal) &&
+ Inst.getOperand(0).getReg() != ARM::PC &&
+ Inst.getOperand(0).getReg() != ARM::SP) {
+ int64_t Value =
+ (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
+ bool UseMov = true;
+ bool MovHasS = true;
+ if (Inst.getOpcode() == ARM::LDRConstPool) {
+ // ARM Constant
+ if (ARM_AM::getSOImmVal(Value) != -1) {
+ Value = ARM_AM::getSOImmVal(Value);
+ TmpInst.setOpcode(ARM::MOVi);
+ }
+ else if (ARM_AM::getSOImmVal(~Value) != -1) {
+ Value = ARM_AM::getSOImmVal(~Value);
+ TmpInst.setOpcode(ARM::MVNi);
+ }
+ else if (hasV6T2Ops() &&
+ Value >=0 && Value < 65536) {
+ TmpInst.setOpcode(ARM::MOVi16);
+ MovHasS = false;
+ }
+ else
+ UseMov = false;
+ }
+ else {
+ // Thumb/Thumb2 Constant
+ if (hasThumb2() &&
+ ARM_AM::getT2SOImmVal(Value) != -1)
+ TmpInst.setOpcode(ARM::t2MOVi);
+ else if (hasThumb2() &&
+ ARM_AM::getT2SOImmVal(~Value) != -1) {
+ TmpInst.setOpcode(ARM::t2MVNi);
+ Value = ~Value;
+ }
+ else if (hasV8MBaseline() &&
+ Value >=0 && Value < 65536) {
+ TmpInst.setOpcode(ARM::t2MOVi16);
+ MovHasS = false;
+ }
+ else
+ UseMov = false;
+ }
+ if (UseMov) {
+ TmpInst.addOperand(Inst.getOperand(0)); // Rt
+ TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate
+ TmpInst.addOperand(Inst.getOperand(2)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ if (MovHasS)
+ TmpInst.addOperand(MCOperand::createReg(0)); // S
+ Inst = TmpInst;
+ return true;
+ }
+ }
+ // No opportunity to use MOV/MVN create constant pool
+ const MCExpr *CPLoc =
+ getTargetStreamer().addConstantPoolEntry(SubExprVal,
+ PoolOperand.getStartLoc());
+ TmpInst.addOperand(Inst.getOperand(0)); // Rt
+ TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
+ if (TmpInst.getOpcode() == ARM::LDRi12)
+ TmpInst.addOperand(MCOperand::createImm(0)); // unused offset
+ TmpInst.addOperand(Inst.getOperand(2)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ Inst = TmpInst;
+ return true;
+ }
// Handle NEON VST complex aliases.
case ARM::VST1LNdWB_register_Asm_8:
case ARM::VST1LNdWB_register_Asm_16:
@@ -9031,6 +9268,31 @@ bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
return false;
}
+// After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
+// before, if supported by the new target, or emit mapping symbols for the mode
+// switch.
+void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
+ if (WasThumb != isThumb()) {
+ if (WasThumb && hasThumb()) {
+ // Stay in Thumb mode
+ SwitchMode();
+ } else if (!WasThumb && hasARM()) {
+ // Stay in ARM mode
+ SwitchMode();
+ } else {
+ // Mode switch forced, because the new arch doesn't support the old mode.
+ getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
+ : MCAF_Code32);
+ // Warn about the implcit mode switch. GAS does not switch modes here,
+ // but instead stays in the old mode, reporting an error on any following
+ // instructions as the mode does not exist on the target.
+ Warning(Loc, Twine("new target does not support ") +
+ (WasThumb ? "thumb" : "arm") + " mode, switching to " +
+ (!WasThumb ? "thumb" : "arm") + " mode");
+ }
+ }
+}
+
/// parseDirectiveArch
/// ::= .arch token
bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
@@ -9043,10 +9305,12 @@ bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
return false;
}
+ bool WasThumb = isThumb();
Triple T;
MCSubtargetInfo &STI = copySTI();
STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ FixModeAfterArchChange(WasThumb, L);
getTargetStreamer().emitArch(ID);
return false;
@@ -9177,9 +9441,11 @@ bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
return false;
}
+ bool WasThumb = isThumb();
MCSubtargetInfo &STI = copySTI();
STI.setDefaultFeatures(CPU, "");
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ FixModeAfterArchChange(WasThumb, L);
return false;
}
@@ -9834,7 +10100,7 @@ bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
StringRef Arch = Parser.getTok().getString();
SMLoc ArchLoc = Parser.getTok().getLoc();
- getLexer().Lex();
+ Lex();
unsigned ID = ARM::parseArch(Arch);
@@ -9863,7 +10129,9 @@ bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
return true;
// '.align' is target specifically handled to mean 2**2 byte alignment.
- if (getStreamer().getCurrentSection().first->UseCodeAlign())
+ const MCSection *Section = getStreamer().getCurrentSection().first;
+ assert(Section && "must have section to emit alignment");
+ if (Section->UseCodeAlign())
getStreamer().EmitCodeAlignment(4, 0);
else
getStreamer().EmitValueToAlignment(4, 0, 1, 0);
@@ -9933,6 +10201,7 @@ static const struct {
// FIXME: Only available in A-class, isel not predicated
{ ARM::AEK_VIRT, Feature_HasV7, {ARM::FeatureVirtualization} },
{ ARM::AEK_FP16, Feature_HasV8_2a, {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
+ { ARM::AEK_RAS, Feature_HasV8, {ARM::FeatureRAS} },
// FIXME: Unsupported extensions.
{ ARM::AEK_OS, Feature_None, {} },
{ ARM::AEK_IWMMXT, Feature_None, {} },
@@ -9954,7 +10223,7 @@ bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
StringRef Name = Parser.getTok().getString();
SMLoc ExtLoc = Parser.getTok().getLoc();
- getLexer().Lex();
+ Lex();
bool EnableFeature = true;
if (Name.startswith_lower("no")) {
diff --git a/lib/Target/ARM/AsmParser/Makefile b/lib/Target/ARM/AsmParser/Makefile
deleted file mode 100644
index 841516fffbd5..000000000000
--- a/lib/Target/ARM/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/ARM/AsmParser/Makefile -------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMARMAsmParser
-
-# Hack: we need to include 'main' ARM target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index e63defed2288..3196a57ccc3e 100644
--- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMMCExpr.h"
@@ -210,6 +210,8 @@ static DecodeStatus DecodeArmMOVTWInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeSMLAInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeHINTInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
static DecodeStatus DecodeCPSInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeTSTInstruction(MCInst &Inst, unsigned Insn,
@@ -222,6 +224,8 @@ static DecodeStatus DecodeAddrModeImm12Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeAddrMode5Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeAddrMode5FP16Operand(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
static DecodeStatus DecodeAddrMode7Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeT2BInstruction(MCInst &Inst, unsigned Insn,
@@ -391,8 +395,8 @@ static DecodeStatus DecodeT2ShifterImmOperand(MCInst &Inst, unsigned Val,
static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val,
- uint64_t Address, const void *Decoder);
+static DecodeStatus DecoderForMRRC2AndMCRR2(llvm::MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
#include "ARMGenDisassemblerTables.inc"
static MCDisassembler *createARMDisassembler(const Target &T,
@@ -590,6 +594,8 @@ MCDisassembler::DecodeStatus
ThumbDisassembler::AddThumbPredicate(MCInst &MI) const {
MCDisassembler::DecodeStatus S = Success;
+ const FeatureBitset &FeatureBits = getSubtargetInfo().getFeatureBits();
+
// A few instructions actually have predicates encoded in them. Don't
// try to overwrite it if we're seeing one of those.
switch (MI.getOpcode()) {
@@ -610,6 +616,10 @@ ThumbDisassembler::AddThumbPredicate(MCInst &MI) const {
else
return Success;
break;
+ case ARM::t2HINT:
+ if (MI.getOperand(0).getImm() == 0x10 && (FeatureBits[ARM::FeatureRAS]) != 0)
+ S = SoftFail;
+ break;
case ARM::tB:
case ARM::t2B:
case ARM::t2TBB:
@@ -1941,6 +1951,29 @@ static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst &Inst,
return S;
}
+// Check for UNPREDICTABLE predicated ESB instruction
+static DecodeStatus DecodeHINTInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned pred = fieldFromInstruction(Insn, 28, 4);
+ unsigned imm8 = fieldFromInstruction(Insn, 0, 8);
+ const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
+ const FeatureBitset &FeatureBits = Dis->getSubtargetInfo().getFeatureBits();
+
+ DecodeStatus S = MCDisassembler::Success;
+
+ Inst.addOperand(MCOperand::createImm(imm8));
+
+ if (!Check(S, DecodePredicateOperand(Inst, pred, Address, Decoder)))
+ return MCDisassembler::Fail;
+
+ // ESB is unpredictable if pred != AL. Without the RAS extension, it is a NOP,
+ // so all predicates should be allowed.
+ if (imm8 == 0x10 && pred != 0xe && ((FeatureBits[ARM::FeatureRAS]) != 0))
+ S = MCDisassembler::SoftFail;
+
+ return S;
+}
+
static DecodeStatus DecodeCPSInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
unsigned imod = fieldFromInstruction(Insn, 18, 2);
@@ -2183,6 +2216,7 @@ static DecodeStatus DecodeAddrMode5Operand(MCInst &Inst, unsigned Val,
DecodeStatus S = MCDisassembler::Success;
unsigned Rn = fieldFromInstruction(Val, 9, 4);
+ // U == 1 to add imm, 0 to subtract it.
unsigned U = fieldFromInstruction(Val, 8, 1);
unsigned imm = fieldFromInstruction(Val, 0, 8);
@@ -2197,6 +2231,26 @@ static DecodeStatus DecodeAddrMode5Operand(MCInst &Inst, unsigned Val,
return S;
}
+static DecodeStatus DecodeAddrMode5FP16Operand(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ unsigned Rn = fieldFromInstruction(Val, 9, 4);
+ // U == 1 to add imm, 0 to subtract it.
+ unsigned U = fieldFromInstruction(Val, 8, 1);
+ unsigned imm = fieldFromInstruction(Val, 0, 8);
+
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
+ return MCDisassembler::Fail;
+
+ if (U)
+ Inst.addOperand(MCOperand::createImm(ARM_AM::getAM5FP16Opc(ARM_AM::add, imm)));
+ else
+ Inst.addOperand(MCOperand::createImm(ARM_AM::getAM5FP16Opc(ARM_AM::sub, imm)));
+
+ return S;
+}
+
static DecodeStatus DecodeAddrMode7Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
return DecodeGPRRegisterClass(Inst, Val, Address, Decoder);
@@ -4096,6 +4150,24 @@ static DecodeStatus DecodeMSRMask(MCInst &Inst, unsigned Val,
// Values basepri, basepri_max and faultmask are only valid for v7m.
return MCDisassembler::Fail;
break;
+ case 0x8a: // msplim_ns
+ case 0x8b: // psplim_ns
+ case 0x91: // basepri_ns
+ case 0x92: // basepri_max_ns
+ case 0x93: // faultmask_ns
+ if (!(FeatureBits[ARM::HasV8MMainlineOps]))
+ return MCDisassembler::Fail;
+ // fall through
+ case 10: // msplim
+ case 11: // psplim
+ case 0x88: // msp_ns
+ case 0x89: // psp_ns
+ case 0x90: // primask_ns
+ case 0x94: // control_ns
+ case 0x98: // sp_ns
+ if (!(FeatureBits[ARM::Feature8MSecExt]))
+ return MCDisassembler::Fail;
+ break;
default:
return MCDisassembler::Fail;
}
@@ -5193,8 +5265,8 @@ static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val,
- uint64_t Address, const void *Decoder) {
+static DecodeStatus DecoderForMRRC2AndMCRR2(llvm::MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -5210,12 +5282,30 @@ static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val,
if (Rt == Rt2)
S = MCDisassembler::SoftFail;
+ // We have to check if the instruction is MRRC2
+ // or MCRR2 when constructing the operands for
+ // Inst. Reason is because MRRC2 stores to two
+ // registers so it's tablegen desc has has two
+ // outputs whereas MCRR doesn't store to any
+ // registers so all of it's operands are listed
+ // as inputs, therefore the operand order for
+ // MRRC2 needs to be [Rt, Rt2, cop, opc1, CRm]
+ // and MCRR2 operand order is [cop, opc1, Rt, Rt2, CRm]
+
+ if (Inst.getOpcode() == ARM::MRRC2) {
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt2, Address, Decoder)))
+ return MCDisassembler::Fail;
+ }
Inst.addOperand(MCOperand::createImm(cop));
Inst.addOperand(MCOperand::createImm(opc1));
- if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder)))
- return MCDisassembler::Fail;
- if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt2, Address, Decoder)))
- return MCDisassembler::Fail;
+ if (Inst.getOpcode() == ARM::MCRR2) {
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt2, Address, Decoder)))
+ return MCDisassembler::Fail;
+ }
Inst.addOperand(MCOperand::createImm(CRm));
return S;
diff --git a/lib/Target/ARM/Disassembler/Makefile b/lib/Target/ARM/Disassembler/Makefile
deleted file mode 100644
index 031b6aca5a48..000000000000
--- a/lib/Target/ARM/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/ARM/Disassembler/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMARMDisassembler
-
-# Hack: we need to include 'main' arm target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index 33fc85af9b19..e81bb77dbdfc 100644
--- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -25,6 +25,7 @@ using namespace llvm;
#define DEBUG_TYPE "asm-printer"
+#define PRINT_ALIAS_INSTR
#include "ARMGenAsmWriter.inc"
/// translateShiftImm - Convert shift immediate from 0-31 to 1-32 for printing.
@@ -73,43 +74,6 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
switch (Opcode) {
- // Check for HINT instructions w/ canonical names.
- case ARM::HINT:
- case ARM::tHINT:
- case ARM::t2HINT:
- switch (MI->getOperand(0).getImm()) {
- case 0:
- O << "\tnop";
- break;
- case 1:
- O << "\tyield";
- break;
- case 2:
- O << "\twfe";
- break;
- case 3:
- O << "\twfi";
- break;
- case 4:
- O << "\tsev";
- break;
- case 5:
- if (STI.getFeatureBits()[ARM::HasV8Ops]) {
- O << "\tsevl";
- break;
- } // Fallthrough for non-v8
- default:
- // Anything else should just print normally.
- printInstruction(MI, STI, O);
- printAnnotation(O, Annot);
- return;
- }
- printPredicateOperand(MI, 1, STI, O);
- if (Opcode == ARM::t2HINT)
- O << ".w";
- printAnnotation(O, Annot);
- return;
-
// Check for MOVs and print canonical forms, instead.
case ARM::MOVsr: {
// FIXME: Thumb variants?
@@ -297,23 +261,11 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
}
break;
}
- // B9.3.3 ERET (Thumb)
- // For a target that has Virtualization Extensions, ERET is the preferred
- // disassembly of SUBS PC, LR, #0
- case ARM::t2SUBS_PC_LR: {
- if (MI->getNumOperands() == 3 && MI->getOperand(0).isImm() &&
- MI->getOperand(0).getImm() == 0 &&
- STI.getFeatureBits()[ARM::FeatureVirtualization]) {
- O << "\teret";
- printPredicateOperand(MI, 1, STI, O);
- printAnnotation(O, Annot);
- return;
- }
- break;
- }
}
- printInstruction(MI, STI, O);
+ if (!printAliasInstr(MI, STI, O))
+ printInstruction(MI, STI, O);
+
printAnnotation(O, Annot);
}
@@ -645,6 +597,34 @@ void ARMInstPrinter::printAddrMode5Operand(const MCInst *MI, unsigned OpNum,
O << "]" << markup(">");
}
+template <bool AlwaysPrintImm0>
+void ARMInstPrinter::printAddrMode5FP16Operand(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &MO1 = MI->getOperand(OpNum);
+ const MCOperand &MO2 = MI->getOperand(OpNum+1);
+
+ if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
+ printOperand(MI, OpNum, STI, O);
+ return;
+ }
+
+ O << markup("<mem:") << "[";
+ printRegName(O, MO1.getReg());
+
+ unsigned ImmOffs = ARM_AM::getAM5FP16Offset(MO2.getImm());
+ unsigned Op = ARM_AM::getAM5FP16Op(MO2.getImm());
+ if (AlwaysPrintImm0 || ImmOffs || Op == ARM_AM::sub) {
+ O << ", "
+ << markup("<imm:")
+ << "#"
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM5FP16Op(MO2.getImm()))
+ << ImmOffs * 2
+ << markup(">");
+ }
+ O << "]" << markup(">");
+}
+
void ARMInstPrinter::printAddrMode6Operand(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
@@ -901,6 +881,42 @@ void ARMInstPrinter::printMSRMaskOperand(const MCInst *MI, unsigned OpNum,
case 20:
O << "control";
return;
+ case 10:
+ O << "msplim";
+ return;
+ case 11:
+ O << "psplim";
+ return;
+ case 0x88:
+ O << "msp_ns";
+ return;
+ case 0x89:
+ O << "psp_ns";
+ return;
+ case 0x8a:
+ O << "msplim_ns";
+ return;
+ case 0x8b:
+ O << "psplim_ns";
+ return;
+ case 0x90:
+ O << "primask_ns";
+ return;
+ case 0x91:
+ O << "basepri_ns";
+ return;
+ case 0x92:
+ O << "basepri_max_ns";
+ return;
+ case 0x93:
+ O << "faultmask_ns";
+ return;
+ case 0x94:
+ O << "control_ns";
+ return;
+ case 0x98:
+ O << "sp_ns";
+ return;
}
}
diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
index 52f7115f0558..9d80eed84dc2 100644
--- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
+++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
@@ -30,6 +30,12 @@ public:
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, const MCSubtargetInfo &STI,
raw_ostream &O);
+ virtual bool printAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ virtual void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
@@ -71,6 +77,9 @@ public:
template <bool AlwaysPrintImm0>
void printAddrMode5Operand(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
+ template <bool AlwaysPrintImm0>
+ void printAddrMode5FP16Operand(const MCInst *MI, unsigned OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &O);
void printAddrMode6Operand(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
void printAddrMode7Operand(const MCInst *MI, unsigned OpNum,
diff --git a/lib/Target/ARM/InstPrinter/Makefile b/lib/Target/ARM/InstPrinter/Makefile
deleted file mode 100644
index 65d372e44b88..000000000000
--- a/lib/Target/ARM/InstPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/ARM/AsmPrinter/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMARMAsmPrinter
-
-# Hack: we need to include 'main' arm target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h b/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
index b03cada9a641..3959eab966a8 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
@@ -486,7 +486,7 @@ namespace ARM_AM {
// addrmode5 := reg +/- imm8*4
//
// The first operand is always a Reg. The second operand encodes the
- // operation in bit 8 and the immediate in bits 0-7.
+ // operation (add or subtract) in bit 8 and the immediate in bits 0-7.
/// getAM5Opc - This function encodes the addrmode5 opc field.
static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
@@ -501,6 +501,29 @@ namespace ARM_AM {
}
//===--------------------------------------------------------------------===//
+ // Addressing Mode #5 FP16
+ //===--------------------------------------------------------------------===//
+ //
+ // This is used for coprocessor instructions, such as 16-bit FP load/stores.
+ //
+ // addrmode5fp16 := reg +/- imm8*2
+ //
+ // The first operand is always a Reg. The second operand encodes the
+ // operation (add or subtract) in bit 8 and the immediate in bits 0-7.
+
+ /// getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
+ static inline unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset) {
+ bool isSub = Opc == sub;
+ return ((int)isSub << 8) | Offset;
+ }
+ static inline unsigned char getAM5FP16Offset(unsigned AM5Opc) {
+ return AM5Opc & 0xFF;
+ }
+ static inline AddrOpc getAM5FP16Op(unsigned AM5Opc) {
+ return ((AM5Opc >> 8) & 1) ? sub : add;
+ }
+
+ //===--------------------------------------------------------------------===//
// Addressing Mode #6
//===--------------------------------------------------------------------===//
//
@@ -650,6 +673,32 @@ namespace ARM_AM {
return FPUnion.F;
}
+ /// getFP16Imm - Return an 8-bit floating-point version of the 16-bit
+ /// floating-point value. If the value cannot be represented as an 8-bit
+ /// floating-point value, then return -1.
+ static inline int getFP16Imm(const APInt &Imm) {
+ uint32_t Sign = Imm.lshr(15).getZExtValue() & 1;
+ int32_t Exp = (Imm.lshr(10).getSExtValue() & 0x1f) - 15; // -14 to 15
+ int64_t Mantissa = Imm.getZExtValue() & 0x3ff; // 10 bits
+
+ // We can handle 4 bits of mantissa.
+ // mantissa = (16+UInt(e:f:g:h))/16.
+ if (Mantissa & 0x3f)
+ return -1;
+ Mantissa >>= 6;
+
+ // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
+ if (Exp < -3 || Exp > 4)
+ return -1;
+ Exp = ((Exp+3) & 0x7) ^ 4;
+
+ return ((int)Sign << 7) | (Exp << 4) | Mantissa;
+ }
+
+ static inline int getFP16Imm(const APFloat &FPImm) {
+ return getFP16Imm(FPImm.bitcastToAPInt());
+ }
+
/// getFP32Imm - Return an 8-bit floating-point version of the 32-bit
/// floating-point value. If the value cannot be represented as an 8-bit
/// floating-point value, then return -1.
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index fa52c9354c17..0fc758201d47 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -46,6 +46,7 @@ public:
: MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
/*HasRelocationAddend*/ false) {}
};
+} // end anonymous namespace
const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
@@ -62,6 +63,10 @@ const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
{"fixup_t2_pcrel_10", 0, 32,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+ {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"fixup_t2_pcrel_9", 0, 32,
+ MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_thumb_adr_pcrel_10", 0, 8,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
@@ -78,7 +83,9 @@ const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
{"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"fixup_arm_thumb_blx", 0, 32,
+ MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_thumb_cp", 0, 8,
MCFixupKindInfo::FKF_IsPCRel |
@@ -90,6 +97,7 @@ const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
{"fixup_arm_movw_lo16", 0, 20, 0},
{"fixup_t2_movt_hi16", 0, 20, 0},
{"fixup_t2_movw_lo16", 0, 20, 0},
+ {"fixup_arm_mod_imm", 0, 12, 0},
};
const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
// This table *must* be in the order that the fixup_* kinds are defined in
@@ -105,6 +113,10 @@ const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
{"fixup_t2_pcrel_10", 0, 32,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
+ {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"fixup_t2_pcrel_9", 0, 32,
+ MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_thumb_adr_pcrel_10", 8, 8,
MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
@@ -121,7 +133,9 @@ const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
{"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"fixup_arm_thumb_blx", 0, 32,
+ MCFixupKindInfo::FKF_IsPCRel |
+ MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
{"fixup_arm_thumb_cp", 8, 8,
MCFixupKindInfo::FKF_IsPCRel |
@@ -133,6 +147,7 @@ const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
{"fixup_arm_movw_lo16", 12, 20, 0},
{"fixup_t2_movt_hi16", 12, 20, 0},
{"fixup_t2_movw_lo16", 12, 20, 0},
+ {"fixup_arm_mod_imm", 20, 12, 0},
};
if (Kind < FirstTargetFixupKind)
@@ -155,10 +170,10 @@ void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
break;
}
}
-} // end anonymous namespace
unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const {
bool HasThumb2 = STI->getFeatureBits()[ARM::FeatureThumb2];
+ bool HasV8MBaselineOps = STI->getFeatureBits()[ARM::HasV8MBaselineOps];
switch (Op) {
default:
@@ -170,7 +185,7 @@ unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const {
case ARM::tADR:
return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
case ARM::tB:
- return HasThumb2 ? (unsigned)ARM::t2B : Op;
+ return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
case ARM::tCBZ:
return ARM::tHINT;
case ARM::tCBNZ:
@@ -243,7 +258,9 @@ bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
return reasonForFixupRelaxation(Fixup, Value);
}
-void ARMAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+void ARMAsmBackend::relaxInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI,
+ MCInst &Res) const {
unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
// Sanity check w/ diagnostic if we get here w/ a bogus instruction.
@@ -449,7 +466,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
// Offset by 8 just as above.
if (const MCSymbolRefExpr *SRE =
dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
- if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_TLSCALL)
+ if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
return 0;
return 0xffffff & ((Value - 8) >> 2);
case ARM::fixup_t2_uncondbranch: {
@@ -524,10 +541,15 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
//
// Note that the halfwords are stored high first, low second; so we need
// to transpose the fixup value here to map properly.
- uint32_t offset = (Value - 2) >> 2;
+ if (Ctx && Value % 4 != 0) {
+ Ctx->reportError(Fixup.getLoc(), "misaligned ARM call destination");
+ return 0;
+ }
+
+ uint32_t offset = (Value - 4) >> 2;
if (const MCSymbolRefExpr *SRE =
dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
- if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_TLSCALL)
+ if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
offset = 0;
uint32_t signBit = (offset & 0x400000) >> 22;
uint32_t I1Bit = (offset & 0x200000) >> 21;
@@ -563,7 +585,8 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
}
case ARM::fixup_arm_thumb_br:
// Offset by 4 and don't encode the lower bit, which is always 0.
- if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2]) {
+ if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2] &&
+ !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
if (FixupDiagnostic) {
Ctx->reportError(Fixup.getLoc(), FixupDiagnostic);
@@ -624,6 +647,44 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
return Value;
}
+ case ARM::fixup_arm_pcrel_9:
+ Value = Value - 4; // ARM fixups offset by an additional word and don't
+ // need to adjust for the half-word ordering.
+ // Fall through.
+ case ARM::fixup_t2_pcrel_9: {
+ // Offset by 4, adjusted by two due to the half-word ordering of thumb.
+ Value = Value - 4;
+ bool isAdd = true;
+ if ((int64_t)Value < 0) {
+ Value = -Value;
+ isAdd = false;
+ }
+ // These values don't encode the low bit since it's always zero.
+ if (Ctx && (Value & 1)) {
+ Ctx->reportError(Fixup.getLoc(), "invalid value for this fixup");
+ return 0;
+ }
+ Value >>= 1;
+ if (Ctx && Value >= 256) {
+ Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
+ return 0;
+ }
+ Value |= isAdd << 23;
+
+ // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
+ // swapped.
+ if (Kind == ARM::fixup_t2_pcrel_9)
+ return swapHalfWords(Value, IsLittleEndian);
+
+ return Value;
+ }
+ case ARM::fixup_arm_mod_imm:
+ Value = ARM_AM::getSOImmVal(Value);
+ if (Ctx && Value >> 12) {
+ Ctx->reportError(Fixup.getLoc(), "out of range immediate fixup value");
+ return 0;
+ }
+ return Value;
}
}
@@ -690,11 +751,13 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
case FK_Data_2:
case ARM::fixup_arm_thumb_br:
case ARM::fixup_arm_thumb_cb:
+ case ARM::fixup_arm_mod_imm:
return 2;
case ARM::fixup_arm_pcrel_10_unscaled:
case ARM::fixup_arm_ldst_pcrel_12:
case ARM::fixup_arm_pcrel_10:
+ case ARM::fixup_arm_pcrel_9:
case ARM::fixup_arm_adr_pcrel_12:
case ARM::fixup_arm_uncondbl:
case ARM::fixup_arm_condbl:
@@ -708,6 +771,7 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
case ARM::fixup_t2_condbranch:
case ARM::fixup_t2_uncondbranch:
case ARM::fixup_t2_pcrel_10:
+ case ARM::fixup_t2_pcrel_9:
case ARM::fixup_t2_adr_pcrel_12:
case ARM::fixup_arm_thumb_bl:
case ARM::fixup_arm_thumb_blx:
@@ -766,6 +830,7 @@ static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
case ARM::fixup_arm_movw_lo16:
case ARM::fixup_t2_movt_hi16:
case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_arm_mod_imm:
// Instruction size is 4 bytes.
return 4;
}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
index 28a62132a419..84caaacc47d3 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
@@ -11,12 +11,12 @@
#define LLVM_LIB_TARGET_ARM_ARMASMBACKEND_H
#include "MCTargetDesc/ARMFixupKinds.h"
+#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/TargetRegistry.h"
-using namespace llvm;
-
-namespace {
+namespace llvm {
class ARMAsmBackend : public MCAsmBackend {
const MCSubtargetInfo *STI;
@@ -63,7 +63,8 @@ public:
const MCRelaxableFragment *DF,
const MCAsmLayout &Layout) const override;
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override;
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
@@ -74,6 +75,6 @@ public:
void setIsThumb(bool it) { isThumbMode = it; }
bool isLittle() const { return IsLittleEndian; }
};
-} // end anonymous namespace
+} // end namespace llvm
#endif
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h
index 995dd0fe08ee..09dc0173ade6 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h
@@ -10,11 +10,10 @@
#ifndef LLVM_LIB_TARGET_ARM_ARMASMBACKENDDARWIN_H
#define LLVM_LIB_TARGET_ARM_ARMASMBACKENDDARWIN_H
+#include "ARMAsmBackend.h"
#include "llvm/Support/MachO.h"
-using namespace llvm;
-
-namespace {
+namespace llvm {
class ARMAsmBackendDarwin : public ARMAsmBackend {
const MCRegisterInfo &MRI;
public:
@@ -22,7 +21,6 @@ public:
ARMAsmBackendDarwin(const Target &T, const Triple &TT,
const MCRegisterInfo &MRI, MachO::CPUSubTypeARM st)
: ARMAsmBackend(T, TT, /* IsLittleEndian */ true), MRI(MRI), Subtype(st) {
- HasDataInCodeSupport = true;
}
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
@@ -33,6 +31,6 @@ public:
uint32_t generateCompactUnwindEncoding(
ArrayRef<MCCFIInstruction> Instrs) const override;
};
-}
+} // end namespace llvm
#endif
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h
index 68b12edd089e..748f915be17b 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h
@@ -10,7 +10,10 @@
#ifndef LLVM_LIB_TARGET_ARM_ELFARMASMBACKEND_H
#define LLVM_LIB_TARGET_ARM_ELFARMASMBACKEND_H
+#include "ARMAsmBackend.h"
+#include "MCTargetDesc/ARMMCTargetDesc.h"
using namespace llvm;
+
namespace {
class ARMAsmBackendELF : public ARMAsmBackend {
public:
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h
index 170f59a4c905..2a375be49a83 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h
@@ -10,6 +10,7 @@
#ifndef LLVM_LIB_TARGET_ARM_ARMASMBACKENDWINCOFF_H
#define LLVM_LIB_TARGET_ARM_ARMASMBACKENDWINCOFF_H
+#include "ARMAsmBackend.h"
using namespace llvm;
namespace {
diff --git a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
index 4289a73e9d6b..088b4205ed62 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
@@ -289,18 +289,20 @@ namespace ARMII {
/// higher 16 bit of the address. Used only via movt instruction.
MO_HI16 = 0x2,
- /// MO_PLT - On a symbol operand, this represents an ELF PLT reference on a
- /// call operand.
- MO_PLT = 0x3,
-
/// MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects
/// just that part of the flag set.
- MO_OPTION_MASK = 0x3f,
+ MO_OPTION_MASK = 0x1f,
/// MO_DLLIMPORT - On a symbol operand, this represents that the reference
/// to the symbol is for an import stub. This is used for DLL import
/// storage class indication on Windows.
- MO_DLLIMPORT = 0x40,
+ MO_DLLIMPORT = 0x20,
+
+ /// MO_SECREL - On a symbol operand this indicates that the immediate is
+ /// the offset from beginning of section.
+ ///
+ /// This is the TLS offset for the COFF/Windows TLS mechanism.
+ MO_SECREL = 0x40,
/// MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it
/// represents a symbol which, if indirect, will get special Darwin mangling
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index 52eba8be288f..4118fe8e8cdb 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -34,8 +34,8 @@ namespace {
~ARMELFObjectWriter() override;
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
bool needsRelocateWithSymbol(const MCSymbol &Sym,
unsigned Type) const override;
@@ -67,7 +67,7 @@ bool ARMELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym,
// Need to examine the Fixup when determining whether to
// emit the relocation as an explicit symbol or as a section relative
// offset
-unsigned ARMELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned ARMELFObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
return GetRelocTypeInner(Target, Fixup, IsPCRel);
@@ -98,6 +98,9 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_ARM_GOT_PREL:
Type = ELF::R_ARM_GOT_PREL;
break;
+ case MCSymbolRefExpr::VK_ARM_PREL31:
+ Type = ELF::R_ARM_PREL31;
+ break;
}
break;
case ARM::fixup_arm_blx:
@@ -106,7 +109,7 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_PLT:
Type = ELF::R_ARM_CALL;
break;
- case MCSymbolRefExpr::VK_ARM_TLSCALL:
+ case MCSymbolRefExpr::VK_TLSCALL:
Type = ELF::R_ARM_TLS_CALL;
break;
default:
@@ -120,6 +123,8 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
Type = ELF::R_ARM_JUMP24;
break;
case ARM::fixup_t2_condbranch:
+ Type = ELF::R_ARM_THM_JUMP19;
+ break;
case ARM::fixup_t2_uncondbranch:
Type = ELF::R_ARM_THM_JUMP24;
break;
@@ -138,7 +143,7 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case ARM::fixup_arm_thumb_bl:
case ARM::fixup_arm_thumb_blx:
switch (Modifier) {
- case MCSymbolRefExpr::VK_ARM_TLSCALL:
+ case MCSymbolRefExpr::VK_TLSCALL:
Type = ELF::R_ARM_THM_TLS_CALL;
break;
default:
@@ -210,10 +215,10 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
case MCSymbolRefExpr::VK_ARM_TLSLDO:
Type = ELF::R_ARM_TLS_LDO32;
break;
- case MCSymbolRefExpr::VK_ARM_TLSCALL:
+ case MCSymbolRefExpr::VK_TLSCALL:
Type = ELF::R_ARM_TLS_CALL;
break;
- case MCSymbolRefExpr::VK_ARM_TLSDESC:
+ case MCSymbolRefExpr::VK_TLSDESC:
Type = ELF::R_ARM_TLS_GOTDESC;
break;
case MCSymbolRefExpr::VK_ARM_TLSDESCSEQ:
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 57577dc834b7..36cb74765f3b 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -763,6 +763,12 @@ void ARMTargetELFStreamer::emitArchDefaultAttributes() {
setAttributeItem(Virtualization_use, AllowTZVirtualization, false);
break;
+ case ARM::AK_ARMV8MBaseline:
+ case ARM::AK_ARMV8MMainline:
+ setAttributeItem(THUMB_ISA_use, AllowThumbDerived, false);
+ setAttributeItem(CPU_arch_profile, MicroControllerProfile, false);
+ break;
+
case ARM::AK_IWMMXT:
setAttributeItem(ARM_ISA_use, Allowed, false);
setAttributeItem(THUMB_ISA_use, Allowed, false);
diff --git a/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h b/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
index 46ba57170db5..3fe2302bdd37 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
@@ -33,6 +33,13 @@ enum Fixups {
// fixup_t2_pcrel_10 - Equivalent to fixup_arm_pcrel_10, accounting for
// the short-swapped encoding of Thumb2 instructions.
fixup_t2_pcrel_10,
+ // fixup_arm_pcrel_9 - 9-bit PC relative relocation for symbol addresses
+ // used in VFP instructions where bit 0 not encoded (so it's encoded as an
+ // 8-bit immediate).
+ fixup_arm_pcrel_9,
+ // fixup_t2_pcrel_9 - Equivalent to fixup_arm_pcrel_9, accounting for
+ // the short-swapped encoding of Thumb2 instructions.
+ fixup_t2_pcrel_9,
// fixup_thumb_adr_pcrel_10 - 10-bit PC relative relocation for symbol
// addresses where the lower 2 bits are not encoded (so it's encoded as an
// 8-bit immediate).
@@ -100,6 +107,9 @@ enum Fixups {
fixup_t2_movt_hi16, // :upper16:
fixup_t2_movw_lo16, // :lower16:
+ // fixup_arm_mod_imm - Fixup for mod_imm
+ fixup_arm_mod_imm,
+
// Marker
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
index bda37f6616a8..53cd29a6061e 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
@@ -13,7 +13,6 @@
#include "ARMMCAsmInfo.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/Support/CommandLine.h"
using namespace llvm;
@@ -33,7 +32,7 @@ ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin(const Triple &TheTriple) {
SupportsDebugInformation = true;
// Exceptions handling
- ExceptionsType = TheTriple.isOSDarwin() && !TheTriple.isWatchOS()
+ ExceptionsType = (TheTriple.isOSDarwin() && !TheTriple.isWatchABI())
? ExceptionHandling::SjLj
: ExceptionHandling::DwarfCFI;
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index b88578309f08..9fca13eeea93 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -120,11 +120,11 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
- /// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit
+ /// getThumbBranchTargetOpValue - Return encoding info for 24-bit
/// immediate Thumb2 direct branch target.
- uint32_t getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
+ uint32_t getThumbBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
/// getARMBranchTargetOpValue - Return encoding info for 24-bit immediate
/// branch target.
@@ -214,11 +214,6 @@ public:
llvm_unreachable("Invalid ShiftOpc!");
}
- /// getAddrMode2OpValue - Return encoding for addrmode2 operands.
- uint32_t getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
-
/// getAddrMode2OffsetOpValue - Return encoding for am2offset operands.
uint32_t getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
@@ -255,11 +250,16 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
- /// getAddrMode5OpValue - Return encoding info for 'reg +/- imm8' operand.
+ /// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand.
uint32_t getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ /// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand.
+ uint32_t getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
/// getCCOutOpValue - Return encoding of the 's' bit.
unsigned getCCOutOpValue(const MCInst &MI, unsigned Op,
SmallVectorImpl<MCFixup> &Fixups,
@@ -312,12 +312,8 @@ public:
// Support for fixups (MCFixup)
if (MO.isExpr()) {
const MCExpr *Expr = MO.getExpr();
- // In instruction code this value always encoded as lowest 12 bits,
- // so we don't have to perform any specific adjustments.
- // Due to requirements of relocatable records we have to use FK_Data_4.
- // See ARMELFObjectWriter::ExplicitRelSym and
- // ARMELFObjectWriter::GetRelocTypeInner for more details.
- MCFixupKind Kind = MCFixupKind(FK_Data_4);
+ // Fixups resolve to plain values that need to be encoded.
+ MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_mod_imm);
Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
return 0;
}
@@ -345,9 +341,6 @@ public:
unsigned getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
- unsigned getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const;
/// getSORegOpValue - Return an encoded so_reg shifted register value.
unsigned getSORegRegOpValue(const MCInst &MI, unsigned Op,
@@ -757,10 +750,9 @@ getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
/// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit
/// immediate branch target.
-uint32_t ARMMCCodeEmitter::
-getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
+uint32_t ARMMCCodeEmitter::getThumbBranchTargetOpValue(
+ const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
unsigned Val = 0;
const MCOperand MO = MI.getOperand(OpIdx);
@@ -1049,12 +1041,12 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
switch (ARM16Expr->getKind()) {
default: llvm_unreachable("Unsupported ARMFixup");
case ARMMCExpr::VK_ARM_HI16:
- Kind = MCFixupKind(isThumb2(STI) ? ARM::fixup_t2_movt_hi16
- : ARM::fixup_arm_movt_hi16);
+ Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movt_hi16
+ : ARM::fixup_arm_movt_hi16);
break;
case ARMMCExpr::VK_ARM_LO16:
- Kind = MCFixupKind(isThumb2(STI) ? ARM::fixup_t2_movw_lo16
- : ARM::fixup_arm_movw_lo16);
+ Kind = MCFixupKind(isThumb(STI) ? ARM::fixup_t2_movw_lo16
+ : ARM::fixup_arm_movw_lo16);
break;
}
@@ -1105,21 +1097,6 @@ getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx,
}
uint32_t ARMMCCodeEmitter::
-getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- // {17-14} Rn
- // {13} 1 == imm12, 0 == Rm
- // {12} isAdd
- // {11-0} imm12/Rm
- const MCOperand &MO = MI.getOperand(OpIdx);
- unsigned Rn = CTX.getRegisterInfo()->getEncodingValue(MO.getReg());
- uint32_t Binary = getAddrMode2OffsetOpValue(MI, OpIdx + 1, Fixups, STI);
- Binary |= Rn << 14;
- return Binary;
-}
-
-uint32_t ARMMCCodeEmitter::
getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
@@ -1252,7 +1229,7 @@ getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx,
return (MO.getImm() >> 2);
}
-/// getAddrMode5OpValue - Return encoding info for 'reg +/- imm10' operand.
+/// getAddrMode5OpValue - Return encoding info for 'reg +/- (imm8 << 2)' operand.
uint32_t ARMMCCodeEmitter::
getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
@@ -1292,6 +1269,46 @@ getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
return Binary;
}
+/// getAddrMode5FP16OpValue - Return encoding info for 'reg +/- (imm8 << 1)' operand.
+uint32_t ARMMCCodeEmitter::
+getAddrMode5FP16OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // {12-9} = reg
+ // {8} = (U)nsigned (add == '1', sub == '0')
+ // {7-0} = imm8
+ unsigned Reg, Imm8;
+ bool isAdd;
+ // If The first operand isn't a register, we have a label reference.
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ if (!MO.isReg()) {
+ Reg = CTX.getRegisterInfo()->getEncodingValue(ARM::PC); // Rn is PC.
+ Imm8 = 0;
+ isAdd = false; // 'U' bit is handled as part of the fixup.
+
+ assert(MO.isExpr() && "Unexpected machine operand type!");
+ const MCExpr *Expr = MO.getExpr();
+ MCFixupKind Kind;
+ if (isThumb2(STI))
+ Kind = MCFixupKind(ARM::fixup_t2_pcrel_9);
+ else
+ Kind = MCFixupKind(ARM::fixup_arm_pcrel_9);
+ Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
+
+ ++MCNumCPRelocations;
+ } else {
+ EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups, STI);
+ isAdd = ARM_AM::getAM5Op(Imm8) == ARM_AM::add;
+ }
+
+ uint32_t Binary = ARM_AM::getAM5Offset(Imm8);
+ // Immediate is always encoded as positive. The 'U' bit controls add vs sub.
+ if (isAdd)
+ Binary |= (1 << 8);
+ Binary |= (Reg << 9);
+ return Binary;
+}
+
unsigned ARMMCCodeEmitter::
getSORegRegOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
@@ -1446,23 +1463,6 @@ getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum,
}
unsigned ARMMCCodeEmitter::
-getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- const MCOperand &MO1 = MI.getOperand(OpNum);
-
- // FIXME: Needs fixup support.
- unsigned Value = 0;
- int32_t tmp = (int32_t)MO1.getImm();
- if (tmp < 0)
- tmp = abs(tmp);
- else
- Value |= 4096; // Set the ADD bit
- Value |= tmp & 4095;
- return Value;
-}
-
-unsigned ARMMCCodeEmitter::
getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index 8c8c249addb5..afb089ab0286 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -16,7 +16,6 @@
#include "ARMMCTargetDesc.h"
#include "InstPrinter/ARMInstPrinter.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -201,18 +200,6 @@ static MCAsmInfo *createARMMCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createARMMCCodeGenInfo(const Triple &TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- if (RM == Reloc::Default) {
- // Default relocation model on Darwin is PIC, not DynamicNoPIC.
- RM = TT.isOSDarwin() ? Reloc::PIC_ : Reloc::DynamicNoPIC;
- }
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
-}
-
static MCStreamer *createELFStreamer(const Triple &T, MCContext &Ctx,
MCAsmBackend &MAB, raw_pwrite_stream &OS,
MCCodeEmitter *Emitter, bool RelaxAll) {
@@ -291,9 +278,6 @@ extern "C" void LLVMInitializeARMTargetMC() {
// Register the MC asm info.
RegisterMCAsmInfoFn X(*T, createARMMCAsmInfo);
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(*T, createARMMCCodeGenInfo);
-
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createARMMCInstrInfo);
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp b/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp
index 4468132588cf..482bcf902518 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMachORelocationInfo.cpp
@@ -12,7 +12,7 @@
#include "llvm-c/Disassembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCRelocationInfo.h"
+#include "llvm/MC/MCDisassembler/MCRelocationInfo.h"
using namespace llvm;
using namespace object;
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index cfd504e533af..cfa6ce7da65e 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -389,7 +389,8 @@ void ARMMachObjectWriter::recordRelocation(MachObjectWriter *Writer,
uint32_t Offset = Target.getConstant();
if (IsPCRel && RelocType == MachO::ARM_RELOC_VANILLA)
Offset += 1 << Log2Size;
- if (Offset && A && !Writer->doesSymbolRequireExternRelocation(*A))
+ if (Offset && A && !Writer->doesSymbolRequireExternRelocation(*A) &&
+ RelocType != MachO::ARM_RELOC_HALF)
return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
Target, RelocType, Log2Size,
FixedValue);
@@ -447,8 +448,10 @@ void ARMMachObjectWriter::recordRelocation(MachObjectWriter *Writer,
// Even when it's not a scattered relocation, movw/movt always uses
// a PAIR relocation.
if (Type == MachO::ARM_RELOC_HALF) {
- // The other-half value only gets populated for the movt and movw
- // relocation entries.
+ // The entire addend is needed to correctly apply a relocation. One half is
+ // extracted from the instruction itself, the other comes from this
+ // PAIR. I.e. it's correct that we insert the high bits of the addend in the
+ // MOVW case here. relocation entries.
uint32_t Value = 0;
switch ((unsigned)Fixup.getKind()) {
default: break;
diff --git a/lib/Target/ARM/MCTargetDesc/Makefile b/lib/Target/ARM/MCTargetDesc/Makefile
deleted file mode 100644
index 448ed9df2bff..000000000000
--- a/lib/Target/ARM/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/ARM/TargetDesc/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMARMDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp
index ed2deeaa24c0..7f2124033982 100644
--- a/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/lib/Target/ARM/MLxExpansionPass.cpp
@@ -378,12 +378,14 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) {
}
bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipFunction(*Fn.getFunction()))
+ return false;
+
TII = static_cast<const ARMBaseInstrInfo *>(Fn.getSubtarget().getInstrInfo());
TRI = Fn.getSubtarget().getRegisterInfo();
MRI = &Fn.getRegInfo();
const ARMSubtarget *STI = &Fn.getSubtarget<ARMSubtarget>();
- // Only run this for CortexA9.
- if (!STI->isCortexA9())
+ if (!STI->expandMLx())
return false;
isLikeA9 = STI->isLikeA9() || STI->isSwift();
isSwift = STI->isSwift();
diff --git a/lib/Target/ARM/Makefile b/lib/Target/ARM/Makefile
deleted file mode 100644
index c1601a3f29dd..000000000000
--- a/lib/Target/ARM/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-##===- lib/Target/ARM/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMARMCodeGen
-TARGET = ARM
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = ARMGenRegisterInfo.inc ARMGenInstrInfo.inc \
- ARMGenAsmWriter.inc ARMGenAsmMatcher.inc \
- ARMGenDAGISel.inc ARMGenSubtargetInfo.inc \
- ARMGenCallingConv.inc \
- ARMGenFastISel.inc ARMGenMCCodeEmitter.inc \
- ARMGenMCPseudoLowering.inc ARMGenDisassemblerTables.inc
-
-DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/ARM/README.txt b/lib/Target/ARM/README.txt
index 5acb2d46f3e7..549af00fcc99 100644
--- a/lib/Target/ARM/README.txt
+++ b/lib/Target/ARM/README.txt
@@ -115,7 +115,7 @@ L6:
.long -858993459
.long 1074318540
-3) struct copies appear to be done field by field
+3) struct copies appear to be done field by field
instead of by words, at least sometimes:
struct foo { int x; short s; char c1; char c2; };
@@ -142,7 +142,7 @@ a good way to measure on ARM).
* Consider this silly example:
-double bar(double x) {
+double bar(double x) {
double r = foo(3.1);
return x+r;
}
@@ -162,7 +162,7 @@ _bar:
fmrrd r0, r1, d0
ldmfd sp!, {r4, r5, r7, pc}
-Ignore the prologue and epilogue stuff for a second. Note
+Ignore the prologue and epilogue stuff for a second. Note
mov r4, r0
mov r5, r1
the copys to callee-save registers and the fact they are only being used by the
@@ -269,7 +269,7 @@ LBB4:
b LBB2
If BB4 is the only predecessor of BB3, then we can emit BB3 after BB4. We can
-then eliminate beq and and turn the unconditional branch to LBB2 to a bne.
+then eliminate beq and turn the unconditional branch to LBB2 to a bne.
See McCat/18-imp/ComputeBoundingBoxes for an example.
@@ -391,10 +391,10 @@ void foo(signed char* p) {
}
llvm decides it's a good idea to turn the repeated if...else into a
-binary tree, as if it were a switch; the resulting code requires -1
+binary tree, as if it were a switch; the resulting code requires -1
compare-and-branches when *p<=2 or *p==5, the same number if *p==4
or *p>6, and +1 if *p==3. So it should be a speed win
-(on balance). However, the revised code is larger, with 4 conditional
+(on balance). However, the revised code is larger, with 4 conditional
branches instead of 3.
More seriously, there is a byte->word extend before
@@ -421,8 +421,8 @@ int foo(int a, int b, int c, int d) {
return (int)(acc >> 32);
}
-Should compile to use SMLAL (Signed Multiply Accumulate Long) which multiplies
-two signed 32-bit values to produce a 64-bit value, and accumulates this with
+Should compile to use SMLAL (Signed Multiply Accumulate Long) which multiplies
+two signed 32-bit values to produce a 64-bit value, and accumulates this with
a 64-bit value.
We currently get this with both v4 and v6:
@@ -513,7 +513,7 @@ Be careful though as the last attempt caused infinite looping on lencod.
//===---------------------------------------------------------------------===//
-Predication issue. This function:
+Predication issue. This function:
extern unsigned array[ 128 ];
int foo( int x ) {
diff --git a/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp b/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
index df73554372d8..3f88eb818062 100644
--- a/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
+++ b/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
@@ -8,7 +8,6 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/ARMMCTargetDesc.h"
-#include "llvm/IR/Module.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
diff --git a/lib/Target/ARM/TargetInfo/Makefile b/lib/Target/ARM/TargetInfo/Makefile
deleted file mode 100644
index 6292ab14b346..000000000000
--- a/lib/Target/ARM/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/ARM/TargetInfo/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMARMInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp
index 93e0ac4aa320..c0732e4b750a 100644
--- a/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -38,18 +38,17 @@ bool Thumb1FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const{
return !MF.getFrameInfo()->hasVarSizedObjects();
}
-static void
-emitSPUpdate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- const TargetInstrInfo &TII, DebugLoc dl,
- const ThumbRegisterInfo &MRI,
- int NumBytes, unsigned MIFlags = MachineInstr::NoFlags) {
+static void emitSPUpdate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const TargetInstrInfo &TII, const DebugLoc &dl,
+ const ThumbRegisterInfo &MRI, int NumBytes,
+ unsigned MIFlags = MachineInstr::NoFlags) {
emitThumbRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes, TII,
MRI, MIFlags);
}
-void Thumb1FrameLowering::
+MachineBasicBlock::iterator Thumb1FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
const Thumb1InstrInfo &TII =
@@ -60,9 +59,9 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// If we have alloca, convert as follows:
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
// ADJCALLSTACKUP -> add, sp, sp, amount
- MachineInstr *Old = I;
- DebugLoc dl = Old->getDebugLoc();
- unsigned Amount = Old->getOperand(0).getImm();
+ MachineInstr &Old = *I;
+ DebugLoc dl = Old.getDebugLoc();
+ unsigned Amount = Old.getOperand(0).getImm();
if (Amount != 0) {
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
@@ -71,7 +70,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
Amount = (Amount+Align-1)/Align*Align;
// Replace the pseudo instruction with a new instruction...
- unsigned Opc = Old->getOpcode();
+ unsigned Opc = Old.getOpcode();
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
emitSPUpdate(MBB, I, TII, dl, *RegInfo, -Amount);
} else {
@@ -80,7 +79,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
}
}
}
- MBB.erase(I);
+ return MBB.erase(I);
}
void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
@@ -151,7 +150,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R9:
case ARM::R10:
case ARM::R11:
- if (STI.isTargetMachO()) {
+ if (STI.splitFramePushPop()) {
GPRCS2Size += 4;
break;
}
@@ -189,7 +188,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
int FramePtrOffsetInBlock = 0;
unsigned adjustedGPRCS1Size = GPRCS1Size;
- if (tryFoldSPUpdateIntoPushPop(STI, MF, std::prev(MBBI), NumBytes)) {
+ if (tryFoldSPUpdateIntoPushPop(STI, MF, &*std::prev(MBBI), NumBytes)) {
FramePtrOffsetInBlock = NumBytes;
adjustedGPRCS1Size += NumBytes;
NumBytes = 0;
@@ -213,7 +212,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R10:
case ARM::R11:
case ARM::R12:
- if (STI.isTargetMachO())
+ if (STI.splitFramePushPop())
break;
// fallthough
case ARM::R0:
@@ -304,16 +303,15 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
AFI->setShouldRestoreSPFromFP(true);
}
-static bool isCSRestore(MachineInstr *MI, const MCPhysReg *CSRegs) {
- if (MI->getOpcode() == ARM::tLDRspi &&
- MI->getOperand(1).isFI() &&
- isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs))
+static bool isCSRestore(MachineInstr &MI, const MCPhysReg *CSRegs) {
+ if (MI.getOpcode() == ARM::tLDRspi && MI.getOperand(1).isFI() &&
+ isCalleeSavedRegister(MI.getOperand(0).getReg(), CSRegs))
return true;
- else if (MI->getOpcode() == ARM::tPOP) {
+ else if (MI.getOpcode() == ARM::tPOP) {
// The first two operands are predicates. The last two are
// imp-def and imp-use of SP. Check everything in between.
- for (int i = 2, e = MI->getNumOperands() - 2; i != e; ++i)
- if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
+ for (int i = 2, e = MI.getNumOperands() - 2; i != e; ++i)
+ if (!isCalleeSavedRegister(MI.getOperand(i).getReg(), CSRegs))
return false;
return true;
}
@@ -346,8 +344,8 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
if (MBBI != MBB.begin()) {
do
--MBBI;
- while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
- if (!isCSRestore(MBBI, CSRegs))
+ while (MBBI != MBB.begin() && isCSRestore(*MBBI, CSRegs));
+ if (!isCSRestore(*MBBI, CSRegs))
++MBBI;
}
@@ -376,11 +374,11 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
.addReg(FramePtr));
} else {
if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tBX_RET &&
- &MBB.front() != MBBI && std::prev(MBBI)->getOpcode() == ARM::tPOP) {
+ &MBB.front() != &*MBBI && std::prev(MBBI)->getOpcode() == ARM::tPOP) {
MachineBasicBlock::iterator PMBBI = std::prev(MBBI);
- if (!tryFoldSPUpdateIntoPushPop(STI, MF, PMBBI, NumBytes))
+ if (!tryFoldSPUpdateIntoPushPop(STI, MF, &*PMBBI, NumBytes))
emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
- } else if (!tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
+ } else if (!tryFoldSPUpdateIntoPushPop(STI, MF, &*MBBI, NumBytes))
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
}
}
@@ -467,7 +465,7 @@ bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB,
// Look for a temporary register to use.
// First, compute the liveness information.
LivePhysRegs UsedRegs(STI.getRegisterInfo());
- UsedRegs.addLiveOuts(&MBB, /*AddPristines*/ true);
+ UsedRegs.addLiveOuts(MBB);
// The semantic of pristines changed recently and now,
// the callee-saved registers that are touched in the function
// are not part of the pristines set anymore.
@@ -637,7 +635,7 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
Reg = ARM::PC;
(*MIB).setDesc(TII.get(ARM::tPOP_RET));
if (MI != MBB.end())
- MIB.copyImplicitOps(&*MI);
+ MIB.copyImplicitOps(*MI);
MI = MBB.erase(MI);
} else
// LR may only be popped into PC, as part of return sequence.
diff --git a/lib/Target/ARM/Thumb1FrameLowering.h b/lib/Target/ARM/Thumb1FrameLowering.h
index 27faac63683a..9de1ba1d7009 100644
--- a/lib/Target/ARM/Thumb1FrameLowering.h
+++ b/lib/Target/ARM/Thumb1FrameLowering.h
@@ -41,7 +41,7 @@ public:
bool hasReservedCallFrame(const MachineFunction &MF) const override;
- void
+ MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
diff --git a/lib/Target/ARM/Thumb1InstrInfo.cpp b/lib/Target/ARM/Thumb1InstrInfo.cpp
index 530e1d33839a..159731d8fc72 100644
--- a/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -38,9 +38,9 @@ unsigned Thumb1InstrInfo::getUnindexedOpcode(unsigned Opc) const {
}
void Thumb1InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
// Need to check the arch.
MachineFunction &MF = *MBB.getParent();
const ARMSubtarget &st = MF.getSubtarget<ARMSubtarget>();
@@ -118,11 +118,12 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
-void
-Thumb1InstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
- Reloc::Model RM) const {
- if (RM == Reloc::PIC_)
- expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_pcrel, ARM::tLDRi, RM);
+void Thumb1InstrInfo::expandLoadStackGuard(
+ MachineBasicBlock::iterator MI) const {
+ MachineFunction &MF = *MI->getParent()->getParent();
+ const TargetMachine &TM = MF.getTarget();
+ if (TM.isPositionIndependent())
+ expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_pcrel, ARM::tLDRi);
else
- expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_abs, ARM::tLDRi, RM);
+ expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_abs, ARM::tLDRi);
}
diff --git a/lib/Target/ARM/Thumb1InstrInfo.h b/lib/Target/ARM/Thumb1InstrInfo.h
index f3f493d89237..931914ad2799 100644
--- a/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/lib/Target/ARM/Thumb1InstrInfo.h
@@ -38,9 +38,8 @@ public:
///
const ThumbRegisterInfo &getRegisterInfo() const override { return RI; }
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -55,8 +54,7 @@ public:
const TargetRegisterInfo *TRI) const override;
private:
- void expandLoadStackGuard(MachineBasicBlock::iterator MI,
- Reloc::Model RM) const override;
+ void expandLoadStackGuard(MachineBasicBlock::iterator MI) const override;
};
}
diff --git a/lib/Target/ARM/Thumb2ITBlockPass.cpp b/lib/Target/ARM/Thumb2ITBlockPass.cpp
index bf0498dfda69..0c7055551632 100644
--- a/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -36,6 +36,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "Thumb IT blocks insertion pass";
}
@@ -165,7 +170,7 @@ Thumb2ITBlockPass::MoveCopyOutOfITBlock(MachineInstr *MI,
++I;
if (I != E) {
unsigned NPredReg = 0;
- ARMCC::CondCodes NCC = getITInstrPredicate(I, NPredReg);
+ ARMCC::CondCodes NCC = getITInstrPredicate(*I, NPredReg);
if (NCC == CC || NCC == OCC)
return true;
}
@@ -182,7 +187,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
MachineInstr *MI = &*MBBI;
DebugLoc dl = MI->getDebugLoc();
unsigned PredReg = 0;
- ARMCC::CondCodes CC = getITInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
if (CC == ARMCC::AL) {
++MBBI;
continue;
@@ -222,7 +227,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
MI = NMI;
unsigned NPredReg = 0;
- ARMCC::CondCodes NCC = getITInstrPredicate(NMI, NPredReg);
+ ARMCC::CondCodes NCC = getITInstrPredicate(*NMI, NPredReg);
if (NCC == CC || NCC == OCC) {
Mask |= (NCC & 1) << Pos;
// Add implicit use of ITSTATE.
diff --git a/lib/Target/ARM/Thumb2InstrInfo.cpp b/lib/Target/ARM/Thumb2InstrInfo.cpp
index 4da769f23280..e2e6dafd218a 100644
--- a/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -50,7 +50,7 @@ Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
MachineBasicBlock *NewDest) const {
MachineBasicBlock *MBB = Tail->getParent();
ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
- if (!AFI->hasITBlocks()) {
+ if (!AFI->hasITBlocks() || Tail->isBranch()) {
TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
return;
}
@@ -58,7 +58,7 @@ Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
// If the first instruction of Tail is predicated, we may have to update
// the IT instruction.
unsigned PredReg = 0;
- ARMCC::CondCodes CC = getInstrPredicate(Tail, PredReg);
+ ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg);
MachineBasicBlock::iterator MBBI = Tail;
if (CC != ARMCC::AL)
// Expecting at least the t2IT instruction before it.
@@ -106,13 +106,13 @@ Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
}
unsigned PredReg = 0;
- return getITInstrPredicate(MBBI, PredReg) == ARMCC::AL;
+ return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL;
}
void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
// Handle SPR, DPR, and QPR copies.
if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
@@ -148,8 +148,10 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
// Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for
// gsub_0, but needs an extra constraint for gsub_1 (which could be sp
// otherwise).
- MachineRegisterInfo *MRI = &MF.getRegInfo();
- MRI->constrainRegClass(SrcReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass);
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ MachineRegisterInfo *MRI = &MF.getRegInfo();
+ MRI->constrainRegClass(SrcReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass);
+ }
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8));
AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
@@ -187,8 +189,11 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
// Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for
// gsub_0, but needs an extra constraint for gsub_1 (which could be sp
// otherwise).
- MachineRegisterInfo *MRI = &MF.getRegInfo();
- MRI->constrainRegClass(DestReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass);
+ if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
+ MachineRegisterInfo *MRI = &MF.getRegInfo();
+ MRI->constrainRegClass(DestReg,
+ &ARM::GPRPair_with_gsub_1_in_rGPRRegClass);
+ }
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8));
AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
@@ -204,20 +209,22 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
}
-void
-Thumb2InstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
- Reloc::Model RM) const {
- if (RM == Reloc::PIC_)
- expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12, RM);
+void Thumb2InstrInfo::expandLoadStackGuard(
+ MachineBasicBlock::iterator MI) const {
+ MachineFunction &MF = *MI->getParent()->getParent();
+ if (MF.getTarget().isPositionIndependent())
+ expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12);
else
- expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12, RM);
+ expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12);
}
void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg, int NumBytes,
- ARMCC::CondCodes Pred, unsigned PredReg,
- const ARMBaseInstrInfo &TII, unsigned MIFlags) {
+ MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg,
+ unsigned BaseReg, int NumBytes,
+ ARMCC::CondCodes Pred, unsigned PredReg,
+ const ARMBaseInstrInfo &TII,
+ unsigned MIFlags) {
if (NumBytes == 0 && DestReg != BaseReg) {
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
.addReg(BaseReg, RegState::Kill)
@@ -459,7 +466,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
Offset += MI.getOperand(FrameRegIdx+1).getImm();
unsigned PredReg;
- if (Offset == 0 && getInstrPredicate(&MI, PredReg) == ARMCC::AL) {
+ if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL) {
// Turn it into a move.
MI.setDesc(TII.get(ARM::tMOVr));
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
@@ -627,9 +634,9 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
return Offset == 0;
}
-ARMCC::CondCodes
-llvm::getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
- unsigned Opc = MI->getOpcode();
+ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI,
+ unsigned &PredReg) {
+ unsigned Opc = MI.getOpcode();
if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
return ARMCC::AL;
return getInstrPredicate(MI, PredReg);
diff --git a/lib/Target/ARM/Thumb2InstrInfo.h b/lib/Target/ARM/Thumb2InstrInfo.h
index 916ab06ec305..15d63300b6a2 100644
--- a/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/lib/Target/ARM/Thumb2InstrInfo.h
@@ -39,9 +39,8 @@ public:
bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -63,16 +62,13 @@ public:
const ThumbRegisterInfo &getRegisterInfo() const override { return RI; }
private:
- void expandLoadStackGuard(MachineBasicBlock::iterator MI,
- Reloc::Model RM) const override;
+ void expandLoadStackGuard(MachineBasicBlock::iterator MI) const override;
};
/// getITInstrPredicate - Valid only in Thumb2 mode. This function is identical
/// to llvm::getInstrPredicate except it returns AL for conditional branch
/// instructions which are "predicated", but are not in IT blocks.
-ARMCC::CondCodes getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg);
-
-
+ARMCC::CondCodes getITInstrPredicate(const MachineInstr &MI, unsigned &PredReg);
}
#endif
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index bcd0e5751258..c4fdb9b3147d 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -18,11 +18,12 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/IR/Function.h" // To access Function attributes
+#include "llvm/IR/Function.h" // To access Function attributes
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
+#include <utility>
using namespace llvm;
#define DEBUG_TYPE "t2-reduce-size"
@@ -115,12 +116,14 @@ namespace {
{ ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
+ { ARM::t2LDR_POST,ARM::tLDMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
+ { ARM::t2STR_POST,ARM::tSTMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 },
{ ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
{ ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 },
@@ -143,6 +146,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "Thumb2 instruction size reduction pass";
}
@@ -208,7 +216,7 @@ namespace {
}
Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor)
- : MachineFunctionPass(ID), PredicateFtor(Ftor) {
+ : MachineFunctionPass(ID), PredicateFtor(std::move(Ftor)) {
OptimizeSize = MinimizeSize = false;
for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
unsigned FromOpc = ReduceTable[i].WideOpc;
@@ -417,6 +425,46 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
HasShift = true;
OpNum = 4;
break;
+ case ARM::t2LDR_POST:
+ case ARM::t2STR_POST: {
+ if (!MBB.getParent()->getFunction()->optForMinSize())
+ return false;
+
+ // We're creating a completely different type of load/store - LDM from LDR.
+ // For this reason we can't reuse the logic at the end of this function; we
+ // have to implement the MI building here.
+ bool IsStore = Entry.WideOpc == ARM::t2STR_POST;
+ unsigned Rt = MI->getOperand(IsStore ? 1 : 0).getReg();
+ unsigned Rn = MI->getOperand(IsStore ? 0 : 1).getReg();
+ unsigned Offset = MI->getOperand(3).getImm();
+ unsigned PredImm = MI->getOperand(4).getImm();
+ unsigned PredReg = MI->getOperand(5).getReg();
+ assert(isARMLowRegister(Rt));
+ assert(isARMLowRegister(Rn));
+
+ if (Offset != 4)
+ return false;
+
+ // Add the 16-bit load / store instruction.
+ DebugLoc dl = MI->getDebugLoc();
+ auto MIB = BuildMI(MBB, MI, dl, TII->get(Entry.NarrowOpc1))
+ .addReg(Rn, RegState::Define)
+ .addReg(Rn)
+ .addImm(PredImm)
+ .addReg(PredReg)
+ .addReg(Rt, IsStore ? 0 : RegState::Define);
+
+ // Transfer memoperands.
+ MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ // Transfer MI flags.
+ MIB.setMIFlags(MI->getFlags());
+
+ // Kill the old instruction.
+ MI->eraseFromBundle();
+ ++NumLdSts;
+ return true;
+ }
case ARM::t2LDMIA: {
unsigned BaseReg = MI->getOperand(0).getReg();
assert(isARMLowRegister(BaseReg));
@@ -597,7 +645,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
case ARM::t2ADDSri:
case ARM::t2ADDSrr: {
unsigned PredReg = 0;
- if (getInstrPredicate(MI, PredReg) == ARMCC::AL) {
+ if (getInstrPredicate(*MI, PredReg) == ARMCC::AL) {
switch (Opc) {
default: break;
case ARM::t2ADDSri: {
@@ -670,7 +718,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
if (Reg1 != Reg0)
return false;
// Try to commute the operands to make it a 2-address instruction.
- MachineInstr *CommutedMI = TII->commuteInstruction(MI);
+ MachineInstr *CommutedMI = TII->commuteInstruction(*MI);
if (!CommutedMI)
return false;
}
@@ -678,11 +726,11 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
// Try to commute the operands to make it a 2-address instruction.
unsigned CommOpIdx1 = 1;
unsigned CommOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
- if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
+ if (!TII->findCommutedOpIndices(*MI, CommOpIdx1, CommOpIdx2) ||
MI->getOperand(CommOpIdx2).getReg() != Reg0)
return false;
MachineInstr *CommutedMI =
- TII->commuteInstruction(MI, false, CommOpIdx1, CommOpIdx2);
+ TII->commuteInstruction(*MI, false, CommOpIdx1, CommOpIdx2);
if (!CommutedMI)
return false;
}
@@ -702,7 +750,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
// Check if it's possible / necessary to transfer the predicate.
const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2);
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
bool SkipPred = false;
if (Pred != ARMCC::AL) {
if (!NewMCID.isPredicable())
@@ -798,7 +846,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
// Check if it's possible / necessary to transfer the predicate.
const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1);
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
bool SkipPred = false;
if (Pred != ARMCC::AL) {
if (!NewMCID.isPredicable())
@@ -983,7 +1031,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
NextMII->bundleWithPred();
}
- if (!NextInSameBundle && MI->isInsideBundle()) {
+ if (BundleMI && !NextInSameBundle && MI->isInsideBundle()) {
// FIXME: Since post-ra scheduler operates on bundles, the CPSR kill
// marker is only on the BUNDLE instruction. Process the BUNDLE
// instruction as we finish with the bundled instruction to work around
@@ -1050,5 +1098,5 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
/// reduction pass.
FunctionPass *llvm::createThumb2SizeReductionPass(
std::function<bool(const Function &)> Ftor) {
- return new Thumb2SizeReduce(Ftor);
+ return new Thumb2SizeReduce(std::move(Ftor));
}
diff --git a/lib/Target/ARM/ThumbRegisterInfo.cpp b/lib/Target/ARM/ThumbRegisterInfo.cpp
index b5f9d7e38f27..6c26c8843865 100644
--- a/lib/Target/ARM/ThumbRegisterInfo.cpp
+++ b/lib/Target/ARM/ThumbRegisterInfo.cpp
@@ -61,7 +61,7 @@ ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- DebugLoc dl, unsigned DestReg,
+ const DebugLoc &dl, unsigned DestReg,
unsigned SubIdx, int Val,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned MIFlags) {
@@ -81,7 +81,7 @@ static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- DebugLoc dl, unsigned DestReg,
+ const DebugLoc &dl, unsigned DestReg,
unsigned SubIdx, int Val,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned MIFlags) {
@@ -101,9 +101,9 @@ static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
/// emitLoadConstPool - Emits a load from constpool to materialize the
/// specified immediate.
void ThumbRegisterInfo::emitLoadConstPool(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned SubIdx, int Val, ARMCC::CondCodes Pred,
- unsigned PredReg, unsigned MIFlags) const {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val,
+ ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
if (STI.isThumb1Only()) {
@@ -120,57 +120,55 @@ void ThumbRegisterInfo::emitLoadConstPool(
/// a destreg = basereg + immediate in Thumb code. Materialize the immediate
/// in a register using mov / mvn sequences or load the immediate from a
/// constpool entry.
-static
-void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned BaseReg,
- int NumBytes, bool CanChangeCC,
- const TargetInstrInfo &TII,
- const ARMBaseRegisterInfo& MRI,
- unsigned MIFlags = MachineInstr::NoFlags) {
- MachineFunction &MF = *MBB.getParent();
- bool isHigh = !isARMLowRegister(DestReg) ||
- (BaseReg != 0 && !isARMLowRegister(BaseReg));
- bool isSub = false;
- // Subtract doesn't have high register version. Load the negative value
- // if either base or dest register is a high register. Also, if do not
- // issue sub as part of the sequence if condition register is to be
- // preserved.
- if (NumBytes < 0 && !isHigh && CanChangeCC) {
- isSub = true;
- NumBytes = -NumBytes;
- }
- unsigned LdReg = DestReg;
- if (DestReg == ARM::SP)
- assert(BaseReg == ARM::SP && "Unexpected!");
- if (!isARMLowRegister(DestReg) && !MRI.isVirtualRegister(DestReg))
- LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
-
- if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
- AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
- .addImm(NumBytes).setMIFlags(MIFlags);
- } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
- AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
- .addImm(NumBytes).setMIFlags(MIFlags);
- AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg))
- .addReg(LdReg, RegState::Kill).setMIFlags(MIFlags);
- } else
- MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes,
- ARMCC::AL, 0, MIFlags);
-
- // Emit add / sub.
- int Opc = (isSub) ? ARM::tSUBrr : ((isHigh || !CanChangeCC) ? ARM::tADDhirr
- : ARM::tADDrr);
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
- if (Opc != ARM::tADDhirr)
- MIB = AddDefaultT1CC(MIB);
- if (DestReg == ARM::SP || isSub)
- MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
- else
- MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
- AddDefaultPred(MIB);
+static void emitThumbRegPlusImmInReg(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &dl, unsigned DestReg, unsigned BaseReg, int NumBytes,
+ bool CanChangeCC, const TargetInstrInfo &TII,
+ const ARMBaseRegisterInfo &MRI, unsigned MIFlags = MachineInstr::NoFlags) {
+ MachineFunction &MF = *MBB.getParent();
+ bool isHigh = !isARMLowRegister(DestReg) ||
+ (BaseReg != 0 && !isARMLowRegister(BaseReg));
+ bool isSub = false;
+ // Subtract doesn't have high register version. Load the negative value
+ // if either base or dest register is a high register. Also, if do not
+ // issue sub as part of the sequence if condition register is to be
+ // preserved.
+ if (NumBytes < 0 && !isHigh && CanChangeCC) {
+ isSub = true;
+ NumBytes = -NumBytes;
+ }
+ unsigned LdReg = DestReg;
+ if (DestReg == ARM::SP)
+ assert(BaseReg == ARM::SP && "Unexpected!");
+ if (!isARMLowRegister(DestReg) && !MRI.isVirtualRegister(DestReg))
+ LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
+
+ if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
+ AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
+ .addImm(NumBytes)
+ .setMIFlags(MIFlags);
+ } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
+ AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
+ .addImm(NumBytes)
+ .setMIFlags(MIFlags);
+ AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg))
+ .addReg(LdReg, RegState::Kill)
+ .setMIFlags(MIFlags);
+ } else
+ MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes, ARMCC::AL, 0,
+ MIFlags);
+
+ // Emit add / sub.
+ int Opc = (isSub) ? ARM::tSUBrr
+ : ((isHigh || !CanChangeCC) ? ARM::tADDhirr : ARM::tADDrr);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
+ if (Opc != ARM::tADDhirr)
+ MIB = AddDefaultT1CC(MIB);
+ if (DestReg == ARM::SP || isSub)
+ MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
+ else
+ MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
+ AddDefaultPred(MIB);
}
/// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
@@ -179,10 +177,10 @@ void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
/// be too long. This is allowed to modify the condition flags.
void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned BaseReg,
- int NumBytes, const TargetInstrInfo &TII,
- const ARMBaseRegisterInfo& MRI,
+ const DebugLoc &dl, unsigned DestReg,
+ unsigned BaseReg, int NumBytes,
+ const TargetInstrInfo &TII,
+ const ARMBaseRegisterInfo &MRI,
unsigned MIFlags) {
bool isSub = NumBytes < 0;
unsigned Bytes = (unsigned)NumBytes;
@@ -281,7 +279,7 @@ void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
unsigned RequiredExtraInstrs;
if (ExtraRange)
- RequiredExtraInstrs = RoundUpToAlignment(RangeAfterCopy, ExtraRange) / ExtraRange;
+ RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
else if (RangeAfterCopy > 0)
// We need an extra instruction but none is available
RequiredExtraInstrs = 1000000;
diff --git a/lib/Target/ARM/ThumbRegisterInfo.h b/lib/Target/ARM/ThumbRegisterInfo.h
index 23aaff37f409..e6b06959e428 100644
--- a/lib/Target/ARM/ThumbRegisterInfo.h
+++ b/lib/Target/ARM/ThumbRegisterInfo.h
@@ -39,8 +39,9 @@ public:
/// specified immediate.
void
emitLoadConstPool(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- DebugLoc dl, unsigned DestReg, unsigned SubIdx, int Val,
- ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0,
+ const DebugLoc &dl, unsigned DestReg, unsigned SubIdx,
+ int Val, ARMCC::CondCodes Pred = ARMCC::AL,
+ unsigned PredReg = 0,
unsigned MIFlags = MachineInstr::NoFlags) const override;
// rewrite MI to access 'Offset' bytes from the FP. Update Offset to be
diff --git a/lib/Target/AVR/AVR.h b/lib/Target/AVR/AVR.h
index 4c1667ed341c..041c77cfcb9d 100644
--- a/lib/Target/AVR/AVR.h
+++ b/lib/Target/AVR/AVR.h
@@ -30,9 +30,7 @@ FunctionPass *createAVRFrameAnalyzerPass();
FunctionPass *createAVRDynAllocaSRPass();
FunctionPass *createAVRBranchSelectionPass();
-/**
- * Contains the AVR backend.
- */
+/// Contains the AVR backend.
namespace AVR {
enum AddressSpace { DataMemory, ProgramMemory };
diff --git a/lib/Target/AVR/AVR.td b/lib/Target/AVR/AVR.td
index 9e80717cd680..27cf212704f0 100644
--- a/lib/Target/AVR/AVR.td
+++ b/lib/Target/AVR/AVR.td
@@ -514,9 +514,9 @@ include "AVRRegisterInfo.td"
// Instruction Descriptions
//===---------------------------------------------------------------------===//
-//include "AVRInstrInfo.td"
+include "AVRInstrInfo.td"
-//def AVRInstrInfo : InstrInfo;
+def AVRInstrInfo : InstrInfo;
//===---------------------------------------------------------------------===//
// Calling Conventions
@@ -554,7 +554,7 @@ include "AVRCallingConv.td"
//===---------------------------------------------------------------------===//
def AVR : Target {
-// let InstructionSet = AVRInstrInfo;
+ let InstructionSet = AVRInstrInfo;
// let AssemblyWriters = [AVRAsmWriter];
//
// let AssemblyParsers = [AVRAsmParser];
diff --git a/lib/Target/AVR/AVRConfig.h b/lib/Target/AVR/AVRConfig.h
deleted file mode 100644
index 65588bc50840..000000000000
--- a/lib/Target/AVR/AVRConfig.h
+++ /dev/null
@@ -1,15 +0,0 @@
-//===-- AVRConfig.h - AVR Backend Configuration Header ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_AVR_CONFIG_H
-#define LLVM_AVR_CONFIG_H
-
-#define LLVM_AVR_GCC_COMPAT
-
-#endif // LLVM_AVR_CONFIG_H
diff --git a/lib/Target/AVR/AVRFrameLowering.h b/lib/Target/AVR/AVRFrameLowering.h
new file mode 100644
index 000000000000..850a43abebfa
--- /dev/null
+++ b/lib/Target/AVR/AVRFrameLowering.h
@@ -0,0 +1,46 @@
+//===-- AVRFrameLowering.h - Define frame lowering for AVR ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_FRAME_LOWERING_H
+#define LLVM_AVR_FRAME_LOWERING_H
+
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+
+/// Utilities for creating function call frames.
+class AVRFrameLowering : public TargetFrameLowering {
+public:
+ explicit AVRFrameLowering();
+
+public:
+ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ bool hasFP(const MachineFunction &MF) const override;
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+ bool
+ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override;
+ void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+ RegScavenger *RS = nullptr) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_AVR_FRAME_LOWERING_H
diff --git a/lib/Target/AVR/AVRISelLowering.h b/lib/Target/AVR/AVRISelLowering.h
new file mode 100644
index 000000000000..2c8c9c88b6dd
--- /dev/null
+++ b/lib/Target/AVR/AVRISelLowering.h
@@ -0,0 +1,152 @@
+//===-- AVRISelLowering.h - AVR DAG Lowering Interface ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that AVR uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_ISEL_LOWERING_H
+#define LLVM_AVR_ISEL_LOWERING_H
+
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+
+namespace AVRISD {
+
+/// AVR Specific DAG Nodes
+enum NodeType {
+ /// Start the numbering where the builtin ops leave off.
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+ /// Return from subroutine.
+ RET_FLAG,
+ /// Return from ISR.
+ RETI_FLAG,
+ /// Represents an abstract call instruction,
+ /// which includes a bunch of information.
+ CALL,
+ /// A wrapper node for TargetConstantPool,
+ /// TargetExternalSymbol, and TargetGlobalAddress.
+ WRAPPER,
+ LSL, ///< Logical shift left.
+ LSR, ///< Logical shift right.
+ ASR, ///< Arithmetic shift right.
+ ROR, ///< Bit rotate right.
+ ROL, ///< Bit rotate left.
+ LSLLOOP, ///< A loop of single logical shift left instructions.
+ LSRLOOP, ///< A loop of single logical shift right instructions.
+ ASRLOOP, ///< A loop of single arithmetic shift right instructions.
+ /// AVR conditional branches. Operand 0 is the chain operand, operand 1
+ /// is the block to branch if condition is true, operand 2 is the
+ /// condition code, and operand 3 is the flag operand produced by a CMP
+ /// or TEST instruction.
+ BRCOND,
+ /// Compare instruction.
+ CMP,
+ /// Compare with carry instruction.
+ CMPC,
+ /// Test for zero or minus instruction.
+ TST,
+ /// Operand 0 and operand 1 are selection variable, operand 2
+ /// is condition code and operand 3 is flag operand.
+ SELECT_CC
+};
+
+} // end of namespace AVRISD
+
+class AVRTargetMachine;
+
+/// Performs target lowering for the AVR.
+class AVRTargetLowering : public TargetLowering {
+public:
+ explicit AVRTargetLowering(AVRTargetMachine &TM);
+
+public:
+ MVT getScalarShiftAmountTy(const DataLayout &, EVT LHSTy) const override {
+ return MVT::i8;
+ }
+ const char *getTargetNodeName(unsigned Opcode) const override;
+
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+
+ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
+ unsigned AS) const override;
+
+ bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
+
+ bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
+ SDValue &Offset, ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const override;
+
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
+
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
+
+ ConstraintType getConstraintType(StringRef Constraint) const override;
+
+ ConstraintWeight
+ getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const override;
+
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint, MVT VT) const override;
+
+ unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
+
+ void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+
+private:
+ SDValue getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AVRcc,
+ SelectionDAG &DAG, SDLoc dl) const;
+ SDValue LowerShifts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
+ SelectionDAG &DAG) const override;
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+private:
+ MachineBasicBlock *insertShift(MachineInstr *MI, MachineBasicBlock *BB) const;
+ MachineBasicBlock *insertMul(MachineInstr *MI, MachineBasicBlock *BB) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_AVR_ISEL_LOWERING_H
diff --git a/lib/Target/AVR/AVRInstrFormats.td b/lib/Target/AVR/AVRInstrFormats.td
new file mode 100644
index 000000000000..c10023dd2c0a
--- /dev/null
+++ b/lib/Target/AVR/AVRInstrFormats.td
@@ -0,0 +1,577 @@
+//===-- AVRInstrInfo.td - AVR Instruction Formats ----------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AVR Instruction Format Definitions.
+//
+//===----------------------------------------------------------------------===//
+
+// A generic AVR instruction.
+class AVRInst<dag outs, dag ins, string asmstr, list<dag> pattern> : Instruction
+{
+ let Namespace = "AVR";
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = asmstr;
+ let Pattern = pattern;
+}
+
+/// A 16-bit AVR instruction.
+class AVRInst16<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst<outs, ins, asmstr, pattern>
+{
+ field bits<16> Inst;
+
+ let Size = 2;
+}
+
+/// a 32-bit AVR instruction.
+class AVRInst32<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst<outs, ins, asmstr, pattern>
+{
+ field bits<32> Inst;
+
+ let Size = 4;
+}
+
+// A class for pseudo instructions.
+// Psuedo instructions are not real AVR instructions. The DAG stores
+// psuedo instructions which are replaced by real AVR instructions by
+// AVRExpandPseudoInsts.cpp.
+//
+// For example, the ADDW (add wide, as in add 16 bit values) instruction
+// is defined as a pseudo instruction. In AVRExpandPseudoInsts.cpp,
+// the instruction is then replaced by two add instructions - one for each byte.
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ let Pattern = pattern;
+
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Register / register instruction: <|opcode|ffrd|dddd|rrrr|>
+// opcode = 4 bits.
+// f = secondary opcode = 2 bits
+// d = destination = 5 bits
+// r = source = 5 bits
+// (Accepts all registers)
+//===----------------------------------------------------------------------===//
+class FRdRr<bits<4> opcode, bits<2> f, dag outs, dag ins, string asmstr,
+ list<dag> pattern> : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> rd;
+ bits<5> rr;
+
+ let Inst{15-12} = opcode;
+ let Inst{11-10} = f;
+ let Inst{9} = rr{4};
+ let Inst{8-4} = rd;
+ let Inst{3-0} = rr{3-0};
+}
+
+class FTST<bits<4> opcode, bits<2> f, dag outs, dag ins, string asmstr,
+ list<dag> pattern> : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> rd;
+
+ let Inst{15-12} = opcode;
+ let Inst{11-10} = f;
+ let Inst{9} = rd{4};
+ let Inst{8-4} = rd;
+ let Inst{3-0} = rd{3-0};
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction of the format `<mnemonic> Z, Rd`
+// <|1001|001r|rrrr|0ttt>
+//===----------------------------------------------------------------------===//
+class FZRd<bits<3> t, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> rd;
+
+ let Inst{15-12} = 0b1001;
+
+ let Inst{11-9} = 0b001;
+ let Inst{8} = rd{4};
+
+ let Inst{7-4} = rd{3-0};
+
+ let Inst{3} = 0;
+ let Inst{2-0} = t;
+}
+
+//===----------------------------------------------------------------------===//
+// Register / immediate8 instruction: <|opcode|KKKK|dddd|KKKK|>
+// opcode = 4 bits.
+// K = constant data = 8 bits
+// d = destination = 4 bits
+// (Only accepts r16-r31)
+//===----------------------------------------------------------------------===//
+class FRdK<bits<4> opcode, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<4> rd;
+ bits<8> k;
+
+ let Inst{15-12} = opcode;
+ let Inst{11-8} = k{7-4};
+ let Inst{7-4} = rd{3-0};
+ let Inst{3-0} = k{3-0};
+
+ let isAsCheapAsAMove = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Register instruction: <|opcode|fffd|dddd|ffff|>
+// opcode = 4 bits.
+// f = secondary opcode = 7 bits
+// d = destination = 5 bits
+// (Accepts all registers)
+//===----------------------------------------------------------------------===//
+class FRd<bits<4> opcode, bits<7> f, dag outs, dag ins, string asmstr,
+ list<dag> pattern> : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> d;
+
+ let Inst{15-12} = opcode;
+ let Inst{11-9} = f{6-4};
+ let Inst{8-4} = d;
+ let Inst{3-0} = f{3-0};
+}
+
+//===----------------------------------------------------------------------===//
+// [STD/LDD] P+q, Rr special encoding: <|10q0|qqtr|rrrr|pqqq>
+// t = type (1 for STD, 0 for LDD)
+// q = displacement (6 bits)
+// r = register (5 bits)
+// p = pointer register (1 bit) [1 for Y, 0 for Z]
+//===----------------------------------------------------------------------===//
+class FSTDLDD<bit type, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<7> memri;
+ bits<5> reg; // the GP register
+
+ let Inst{15-14} = 0b10;
+ let Inst{13} = memri{5};
+ let Inst{12} = 0;
+
+ let Inst{11-10} = memri{4-3};
+ let Inst{9} = type;
+ let Inst{8} = reg{4};
+
+ let Inst{7-4} = reg{3-0};
+
+ let Inst{3} = memri{6};
+ let Inst{2-0} = memri{2-0};
+}
+
+//===---------------------------------------------------------------------===//
+// An ST/LD instruction.
+// <|100i|00tr|rrrr|ppaa|>
+// t = type (1 for store, 0 for load)
+// a = regular/postinc/predec (reg = 0b00, postinc = 0b01, predec = 0b10)
+// p = pointer register
+// r = src/dst register
+//
+// Note that the bit labelled 'i' above does not follow a simple pattern,
+// so there exists a post encoder method to set it manually.
+//===---------------------------------------------------------------------===//
+class FSTLD<bit type, bits<2> mode, dag outs, dag ins,
+ string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<2> ptrreg;
+ bits<5> reg;
+
+ let Inst{15-13} = 0b100;
+ // This bit varies depending on the arguments and the mode.
+ // We have a post encoder method to set this bit manually.
+ let Inst{12} = 0;
+
+ let Inst{11-10} = 0b00;
+ let Inst{9} = type;
+ let Inst{8} = reg{4};
+
+ let Inst{7-4} = reg{3-0};
+
+ let Inst{3-2} = ptrreg{1-0};
+ let Inst{1-0} = mode{1-0};
+
+ let PostEncoderMethod = "loadStorePostEncoder";
+}
+
+//===---------------------------------------------------------------------===//
+// Special format for the LPM/ELPM instructions
+// [E]LPM Rd, Z[+]
+// <|1001|000d|dddd|01ep>
+// d = destination register
+// e = is elpm
+// p = is postincrement
+//===---------------------------------------------------------------------===//
+class FLPMX<bit e, bit p, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> reg;
+
+ let Inst{15-12} = 0b1001;
+
+ let Inst{11-9} = 0b000;
+ let Inst{8} = reg{4};
+
+ let Inst{7-4} = reg{3-0};
+
+ let Inst{3-2} = 0b01;
+ let Inst{1} = e;
+ let Inst{0} = p;
+}
+
+//===----------------------------------------------------------------------===//
+// MOVWRdRr special encoding: <|0000|0001|dddd|rrrr|>
+// d = destination = 4 bits
+// r = source = 4 bits
+// (Only accepts even registers)
+//===----------------------------------------------------------------------===//
+class FMOVWRdRr<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> d;
+ bits<5> r;
+
+ let Inst{15-8} = 0b00000001;
+ let Inst{7-4} = d{4-1};
+ let Inst{3-0} = r{4-1};
+}
+
+//===----------------------------------------------------------------------===//
+// MULSrr special encoding: <|0000|0010|dddd|rrrr|>
+// d = multiplicand = 4 bits
+// r = multiplier = 4 bits
+// (Only accepts r16-r31)
+//===----------------------------------------------------------------------===//
+class FMUL2RdRr<bit f, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> rd; // accept 5 bits but only encode the lower 4
+ bits<5> rr; // accept 5 bits but only encode the lower 4
+
+ let Inst{15-9} = 0b0000001;
+ let Inst{8} = f;
+ let Inst{7-4} = rd{3-0};
+ let Inst{3-0} = rr{3-0};
+}
+
+// Special encoding for the FMUL family of instructions.
+//
+// <0000|0011|fddd|frrr|>
+//
+// ff = 0b01 for FMUL
+// 0b10 for FMULS
+// 0b11 for FMULSU
+//
+// ddd = destination register
+// rrr = source register
+class FFMULRdRr<bits<2> f, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<3> rd;
+ bits<3> rr;
+
+ let Inst{15-8} = 0b00000011;
+ let Inst{7} = f{1};
+ let Inst{6-4} = rd;
+ let Inst{3} = f{0};
+ let Inst{2-0} = rr;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Arithmetic word instructions (ADIW / SBIW): <|1001|011f|kkdd|kkkk|>
+// f = secondary opcode = 1 bit
+// k = constant data = 6 bits
+// d = destination = 4 bits
+// (Only accepts r25:24 r27:26 r29:28 r31:30)
+//===----------------------------------------------------------------------===//
+class FWRdK<bit f, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> dst; // accept 5 bits but only encode bits 1 and 2
+ bits<6> k;
+
+ let Inst{15-9} = 0b1001011;
+ let Inst{8} = f;
+ let Inst{7-6} = k{5-4};
+ let Inst{5-4} = dst{2-1};
+ let Inst{3-0} = k{3-0};
+}
+
+//===----------------------------------------------------------------------===//
+// In I/O instruction: <|1011|0AAd|dddd|AAAA|>
+// A = I/O location address = 6 bits
+// d = destination = 5 bits
+// (Accepts all registers)
+//===----------------------------------------------------------------------===//
+class FIORdA<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> d;
+ bits<6> A;
+
+ let Inst{15-11} = 0b10110;
+ let Inst{10-9} = A{5-4};
+ let Inst{8-4} = d;
+ let Inst{3-0} = A{3-0};
+}
+
+//===----------------------------------------------------------------------===//
+// Out I/O instruction: <|1011|1AAr|rrrr|AAAA|>
+// A = I/O location address = 6 bits
+// d = destination = 5 bits
+// (Accepts all registers)
+//===----------------------------------------------------------------------===//
+class FIOARr<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<6> A;
+ bits<5> r;
+
+ let Inst{15-11} = 0b10111;
+ let Inst{10-9} = A{5-4};
+ let Inst{8-4} = r;
+ let Inst{3-0} = A{3-0};
+}
+
+//===----------------------------------------------------------------------===//
+// I/O bit instruction.
+// <|1001|10tt|AAAA|Abbb>
+// t = type (1 for SBI, 0 for CBI)
+// A = I/O location address (5 bits)
+// b = bit number
+//===----------------------------------------------------------------------===//
+class FIOBIT<bits<2> t, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> A;
+ bits<3> b;
+
+ let Inst{15-12} = 0b1001;
+
+ let Inst{11-10} = 0b10;
+ let Inst{9-8} = t;
+
+ let Inst{7-4} = A{4-1};
+
+ let Inst{3} = A{0};
+ let Inst{2-0} = b{2-0};
+}
+
+//===----------------------------------------------------------------------===//
+// BST/BLD instruction.
+// <|1111|1ttd|dddd|0bbb>
+// t = type (1 for BST, 0 for BLD)
+// d = destination register
+// b = bit
+//===----------------------------------------------------------------------===//
+class FRdB<bits<2> t, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<5> rd;
+ bits<3> b;
+
+ let Inst{15-12} = 0b1111;
+
+ let Inst{11} = 0b1;
+ let Inst{10-9} = t;
+ let Inst{8} = rd{4};
+
+ let Inst{7-4} = rd{3-0};
+
+ let Inst{3} = 0;
+ let Inst{2-0} = b;
+}
+
+// Special encoding for the `DES K` instruction.
+//
+// <|1001|0100|KKKK|1011>
+//
+// KKKK = 4 bit immediate
+class FDES<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<4> k;
+
+ let Inst{15-12} = 0b1001;
+
+ let Inst{11-8} = 0b0100;
+
+ let Inst{7-4} = k;
+
+ let Inst{3-0} = 0b1011;
+}
+
+//===----------------------------------------------------------------------===//
+// Conditional Branching instructions: <|1111|0fkk|kkkk|ksss|>
+// f = secondary opcode = 1 bit
+// k = constant address = 7 bits
+// s = bit in status register = 3 bits
+//===----------------------------------------------------------------------===//
+class FBRsk<bit f, bits<3> s, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<7> k;
+
+ let Inst{15-11} = 0b11110;
+ let Inst{10} = f;
+ let Inst{9-3} = k;
+ let Inst{2-0} = s;
+}
+
+//===----------------------------------------------------------------------===//
+// Special, opcode only instructions: <|opcode|>
+//===----------------------------------------------------------------------===//
+
+class F16<bits<16> opcode, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ let Inst = opcode;
+}
+
+class F32<bits<32> opcode, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst32<outs, ins, asmstr, pattern>
+{
+ let Inst = opcode;
+}
+
+//===----------------------------------------------------------------------===//
+// Branching instructions with immediate12: <|110f|kkkk|kkkk|kkkk|>
+// f = secondary opcode = 1 bit
+// k = constant address = 12 bits
+//===----------------------------------------------------------------------===//
+class FBRk<bit f, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<12> k;
+
+ let Inst{15-13} = 0b110;
+ let Inst{12} = f;
+ let Inst{11-0} = k;
+}
+
+//===----------------------------------------------------------------------===//
+// 32 bits branching instructions: <|1001|010k|kkkk|fffk|kkkk|kkkk|kkkk|kkkk|>
+// f = secondary opcode = 3 bits
+// k = constant address = 22 bits
+//===----------------------------------------------------------------------===//
+class F32BRk<bits<3> f, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst32<outs, ins, asmstr, pattern>
+{
+ bits<22> k;
+
+ let Inst{31-25} = 0b1001010;
+ let Inst{24-20} = k{21-17};
+ let Inst{19-17} = f;
+ let Inst{16-0} = k{16-0};
+}
+
+//===----------------------------------------------------------------------===//
+// 32 bits direct mem instructions: <|1001|00fd|dddd|0000|kkkk|kkkk|kkkk|kkkk|>
+// f = secondary opcode = 1 bit
+// d = destination = 5 bits
+// k = constant address = 16 bits
+// (Accepts all registers)
+//===----------------------------------------------------------------------===//
+class F32DM<bit f, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst32<outs, ins, asmstr, pattern>
+{
+ bits<5> rd;
+ bits<16> k;
+
+ let Inst{31-28} = 0b1001;
+
+ let Inst{27-26} = 0b00;
+ let Inst{25} = f;
+ let Inst{24} = rd{4};
+
+ let Inst{23-20} = rd{3-0};
+
+ let Inst{19-16} = 0b0000;
+
+ let Inst{15-0} = k;
+}
+
+// <|1001|0100|bfff|1000>
+class FS<bit b, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<3> s;
+
+ let Inst{15-12} = 0b1001;
+
+ let Inst{11-8} = 0b0100;
+
+ let Inst{7} = b;
+ let Inst{6-4} = s;
+
+ let Inst{3-0} = 0b1000;
+}
+
+// Set/clr bit in status flag instructions/
+// <BRBS|BRBC> s, k
+// ---------------------
+// <|1111|0fkk|kkkk|ksss>
+class FSK<bit f, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : AVRInst16<outs, ins, asmstr, pattern>
+{
+ bits<7> k;
+ bits<3> s;
+
+ let Inst{15-12} = 0b1111;
+
+ let Inst{11} = 0;
+ let Inst{10} = f;
+ let Inst{9-8} = k{6-5};
+
+ let Inst{7-4} = k{4-1};
+
+ let Inst{3} = k{0};
+ let Inst{2-0} = s;
+}
+
+class ExtensionPseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Pseudo<outs, ins, asmstr, pattern>
+{
+ let Defs = [SREG];
+}
+
+class StorePseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Pseudo<outs, ins, asmstr, pattern>
+{
+ let Defs = [SP];
+}
+
+class SelectPseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Pseudo<outs, ins, asmstr, pattern>
+{
+ let usesCustomInserter = 1;
+
+ let Uses = [SREG];
+}
+
+class ShiftPseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Pseudo<outs, ins, asmstr, pattern>
+{
+ let usesCustomInserter = 1;
+
+ let Defs = [SREG];
+}
+
diff --git a/lib/Target/AVR/AVRInstrInfo.cpp b/lib/Target/AVR/AVRInstrInfo.cpp
new file mode 100644
index 000000000000..0327c015cbbb
--- /dev/null
+++ b/lib/Target/AVR/AVRInstrInfo.cpp
@@ -0,0 +1,466 @@
+//===-- AVRInstrInfo.cpp - AVR Instruction Information --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AVR implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AVRInstrInfo.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#include "AVR.h"
+#include "AVRMachineFunctionInfo.h"
+#include "AVRTargetMachine.h"
+#include "MCTargetDesc/AVRMCTargetDesc.h"
+
+#define GET_INSTRINFO_CTOR_DTOR
+#include "AVRGenInstrInfo.inc"
+
+namespace llvm {
+
+AVRInstrInfo::AVRInstrInfo()
+ : AVRGenInstrInfo(AVR::ADJCALLSTACKDOWN, AVR::ADJCALLSTACKUP), RI() {}
+
+void AVRInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
+ unsigned Opc;
+
+ if (AVR::GPR8RegClass.contains(DestReg, SrcReg)) {
+ Opc = AVR::MOVRdRr;
+ } else if (AVR::DREGSRegClass.contains(DestReg, SrcReg)) {
+ Opc = AVR::MOVWRdRr;
+ } else if (SrcReg == AVR::SP && AVR::DREGSRegClass.contains(DestReg)) {
+ Opc = AVR::SPREAD;
+ } else if (DestReg == AVR::SP && AVR::DREGSRegClass.contains(SrcReg)) {
+ Opc = AVR::SPWRITE;
+ } else {
+ llvm_unreachable("Impossible reg-to-reg copy");
+ }
+
+ BuildMI(MBB, MI, DL, get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+}
+
+unsigned AVRInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ switch (MI.getOpcode()) {
+ case AVR::LDDRdPtrQ:
+ case AVR::LDDWRdYQ: { //:FIXME: remove this once PR13375 gets fixed
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+unsigned AVRInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ switch (MI.getOpcode()) {
+ case AVR::STDPtrQRr:
+ case AVR::STDWPtrQRr: {
+ if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
+ MI.getOperand(1).getImm() == 0) {
+ FrameIndex = MI.getOperand(0).getIndex();
+ return MI.getOperand(2).getReg();
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void AVRInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill,
+ int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ MachineFunction &MF = *MBB.getParent();
+
+ DebugLoc DL;
+ if (MI != MBB.end()) {
+ DL = MI->getDebugLoc();
+ }
+
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(MF, FrameIndex),
+ MachineMemOperand::MOStore, MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
+
+ unsigned Opcode = 0;
+ if (RC->hasType(MVT::i8)) {
+ Opcode = AVR::STDPtrQRr;
+ } else if (RC->hasType(MVT::i16)) {
+ Opcode = AVR::STDWPtrQRr;
+ } else {
+ llvm_unreachable("Cannot store this register into a stack slot!");
+ }
+
+ BuildMI(MBB, MI, DL, get(Opcode))
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill))
+ .addMemOperand(MMO);
+}
+
+void AVRInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (MI != MBB.end()) {
+ DL = MI->getDebugLoc();
+ }
+
+ MachineFunction &MF = *MBB.getParent();
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(MF, FrameIndex),
+ MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex),
+ MFI.getObjectAlignment(FrameIndex));
+
+ unsigned Opcode = 0;
+ if (RC->hasType(MVT::i8)) {
+ Opcode = AVR::LDDRdPtrQ;
+ } else if (RC->hasType(MVT::i16)) {
+ // Opcode = AVR::LDDWRdPtrQ;
+ //:FIXME: remove this once PR13375 gets fixed
+ Opcode = AVR::LDDWRdYQ;
+ } else {
+ llvm_unreachable("Cannot load this register from a stack slot!");
+ }
+
+ BuildMI(MBB, MI, DL, get(Opcode), DestReg)
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addMemOperand(MMO);
+}
+
+const MCInstrDesc &AVRInstrInfo::getBrCond(AVRCC::CondCodes CC) const {
+ switch (CC) {
+ default:
+ llvm_unreachable("Unknown condition code!");
+ case AVRCC::COND_EQ:
+ return get(AVR::BREQk);
+ case AVRCC::COND_NE:
+ return get(AVR::BRNEk);
+ case AVRCC::COND_GE:
+ return get(AVR::BRGEk);
+ case AVRCC::COND_LT:
+ return get(AVR::BRLTk);
+ case AVRCC::COND_SH:
+ return get(AVR::BRSHk);
+ case AVRCC::COND_LO:
+ return get(AVR::BRLOk);
+ case AVRCC::COND_MI:
+ return get(AVR::BRMIk);
+ case AVRCC::COND_PL:
+ return get(AVR::BRPLk);
+ }
+}
+
+AVRCC::CondCodes AVRInstrInfo::getCondFromBranchOpc(unsigned Opc) const {
+ switch (Opc) {
+ default:
+ return AVRCC::COND_INVALID;
+ case AVR::BREQk:
+ return AVRCC::COND_EQ;
+ case AVR::BRNEk:
+ return AVRCC::COND_NE;
+ case AVR::BRSHk:
+ return AVRCC::COND_SH;
+ case AVR::BRLOk:
+ return AVRCC::COND_LO;
+ case AVR::BRMIk:
+ return AVRCC::COND_MI;
+ case AVR::BRPLk:
+ return AVRCC::COND_PL;
+ case AVR::BRGEk:
+ return AVRCC::COND_GE;
+ case AVR::BRLTk:
+ return AVRCC::COND_LT;
+ }
+}
+
+AVRCC::CondCodes AVRInstrInfo::getOppositeCondition(AVRCC::CondCodes CC) const {
+ switch (CC) {
+ default:
+ llvm_unreachable("Invalid condition!");
+ case AVRCC::COND_EQ:
+ return AVRCC::COND_NE;
+ case AVRCC::COND_NE:
+ return AVRCC::COND_EQ;
+ case AVRCC::COND_SH:
+ return AVRCC::COND_LO;
+ case AVRCC::COND_LO:
+ return AVRCC::COND_SH;
+ case AVRCC::COND_GE:
+ return AVRCC::COND_LT;
+ case AVRCC::COND_LT:
+ return AVRCC::COND_GE;
+ case AVRCC::COND_MI:
+ return AVRCC::COND_PL;
+ case AVRCC::COND_PL:
+ return AVRCC::COND_MI;
+ }
+}
+
+bool AVRInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ // Start from the bottom of the block and work up, examining the
+ // terminator instructions.
+ MachineBasicBlock::iterator I = MBB.end();
+ MachineBasicBlock::iterator UnCondBrIter = MBB.end();
+
+ while (I != MBB.begin()) {
+ --I;
+ if (I->isDebugValue()) {
+ continue;
+ }
+
+ // Working from the bottom, when we see a non-terminator
+ // instruction, we're done.
+ if (!isUnpredicatedTerminator(*I)) {
+ break;
+ }
+
+ // A terminator that isn't a branch can't easily be handled
+ // by this analysis.
+ if (!I->getDesc().isBranch()) {
+ return true;
+ }
+
+ // Handle unconditional branches.
+ //:TODO: add here jmp
+ if (I->getOpcode() == AVR::RJMPk) {
+ UnCondBrIter = I;
+
+ if (!AllowModify) {
+ TBB = I->getOperand(0).getMBB();
+ continue;
+ }
+
+ // If the block has any instructions after a JMP, delete them.
+ while (std::next(I) != MBB.end()) {
+ std::next(I)->eraseFromParent();
+ }
+
+ Cond.clear();
+ FBB = 0;
+
+ // Delete the JMP if it's equivalent to a fall-through.
+ if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
+ TBB = 0;
+ I->eraseFromParent();
+ I = MBB.end();
+ UnCondBrIter = MBB.end();
+ continue;
+ }
+
+ // TBB is used to indicate the unconditinal destination.
+ TBB = I->getOperand(0).getMBB();
+ continue;
+ }
+
+ // Handle conditional branches.
+ AVRCC::CondCodes BranchCode = getCondFromBranchOpc(I->getOpcode());
+ if (BranchCode == AVRCC::COND_INVALID) {
+ return true; // Can't handle indirect branch.
+ }
+
+ // Working from the bottom, handle the first conditional branch.
+ if (Cond.empty()) {
+ MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
+ if (AllowModify && UnCondBrIter != MBB.end() &&
+ MBB.isLayoutSuccessor(TargetBB)) {
+ // If we can modify the code and it ends in something like:
+ //
+ // jCC L1
+ // jmp L2
+ // L1:
+ // ...
+ // L2:
+ //
+ // Then we can change this to:
+ //
+ // jnCC L2
+ // L1:
+ // ...
+ // L2:
+ //
+ // Which is a bit more efficient.
+ // We conditionally jump to the fall-through block.
+ BranchCode = getOppositeCondition(BranchCode);
+ unsigned JNCC = getBrCond(BranchCode).getOpcode();
+ MachineBasicBlock::iterator OldInst = I;
+
+ BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC))
+ .addMBB(UnCondBrIter->getOperand(0).getMBB());
+ BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(AVR::RJMPk))
+ .addMBB(TargetBB);
+
+ OldInst->eraseFromParent();
+ UnCondBrIter->eraseFromParent();
+
+ // Restart the analysis.
+ UnCondBrIter = MBB.end();
+ I = MBB.end();
+ continue;
+ }
+
+ FBB = TBB;
+ TBB = I->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(BranchCode));
+ continue;
+ }
+
+ // Handle subsequent conditional branches. Only handle the case where all
+ // conditional branches branch to the same destination.
+ assert(Cond.size() == 1);
+ assert(TBB);
+
+ // Only handle the case where all conditional branches branch to
+ // the same destination.
+ if (TBB != I->getOperand(0).getMBB()) {
+ return true;
+ }
+
+ AVRCC::CondCodes OldBranchCode = (AVRCC::CondCodes)Cond[0].getImm();
+ // If the conditions are the same, we can leave them alone.
+ if (OldBranchCode == BranchCode) {
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+unsigned AVRInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
+ // Shouldn't be a fall through.
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 1 || Cond.size() == 0) &&
+ "AVR branch conditions have one component!");
+
+ if (Cond.empty()) {
+ assert(!FBB && "Unconditional branch with multiple successors!");
+ BuildMI(&MBB, DL, get(AVR::RJMPk)).addMBB(TBB);
+ return 1;
+ }
+
+ // Conditional branch.
+ unsigned Count = 0;
+ AVRCC::CondCodes CC = (AVRCC::CondCodes)Cond[0].getImm();
+ BuildMI(&MBB, DL, getBrCond(CC)).addMBB(TBB);
+ ++Count;
+
+ if (FBB) {
+ // Two-way Conditional branch. Insert the second branch.
+ BuildMI(&MBB, DL, get(AVR::RJMPk)).addMBB(FBB);
+ ++Count;
+ }
+
+ return Count;
+}
+
+unsigned AVRInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator I = MBB.end();
+ unsigned Count = 0;
+
+ while (I != MBB.begin()) {
+ --I;
+ if (I->isDebugValue()) {
+ continue;
+ }
+ //:TODO: add here the missing jmp instructions once they are implemented
+ // like jmp, {e}ijmp, and other cond branches, ...
+ if (I->getOpcode() != AVR::RJMPk &&
+ getCondFromBranchOpc(I->getOpcode()) == AVRCC::COND_INVALID) {
+ break;
+ }
+
+ // Remove the branch.
+ I->eraseFromParent();
+ I = MBB.end();
+ ++Count;
+ }
+
+ return Count;
+}
+
+bool AVRInstrInfo::ReverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ assert(Cond.size() == 1 && "Invalid AVR branch condition!");
+
+ AVRCC::CondCodes CC = static_cast<AVRCC::CondCodes>(Cond[0].getImm());
+ Cond[0].setImm(getOppositeCondition(CC));
+
+ return false;
+}
+
+unsigned AVRInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
+ unsigned Opcode = MI->getOpcode();
+
+ switch (Opcode) {
+ // A regular instruction
+ default: {
+ const MCInstrDesc &Desc = get(Opcode);
+ return Desc.getSize();
+ }
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::DBG_VALUE:
+ return 0;
+ case TargetOpcode::INLINEASM: {
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const AVRTargetMachine &TM = static_cast<const AVRTargetMachine&>(MF->getTarget());
+ const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
+ return TII.getInlineAsmLength(MI->getOperand(0).getSymbolName(),
+ *TM.getMCAsmInfo());
+ }
+ }
+}
+
+} // end of namespace llvm
diff --git a/lib/Target/AVR/AVRInstrInfo.h b/lib/Target/AVR/AVRInstrInfo.h
new file mode 100644
index 000000000000..fc8945d82432
--- /dev/null
+++ b/lib/Target/AVR/AVRInstrInfo.h
@@ -0,0 +1,110 @@
+//===-- AVRInstrInfo.h - AVR Instruction Information ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AVR implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_INSTR_INFO_H
+#define LLVM_AVR_INSTR_INFO_H
+
+#include "llvm/Target/TargetInstrInfo.h"
+
+#include "AVRRegisterInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "AVRGenInstrInfo.inc"
+#undef GET_INSTRINFO_HEADER
+
+namespace llvm {
+
+namespace AVRCC {
+
+/// AVR specific condition codes.
+/// These correspond to `AVR_*_COND` in `AVRInstrInfo.td`.
+/// They must be kept in synch.
+enum CondCodes {
+ COND_EQ, //!< Equal
+ COND_NE, //!< Not equal
+ COND_GE, //!< Greater than or equal
+ COND_LT, //!< Less than
+ COND_SH, //!< Unsigned same or higher
+ COND_LO, //!< Unsigned lower
+ COND_MI, //!< Minus
+ COND_PL, //!< Plus
+ COND_INVALID
+};
+
+} // end of namespace AVRCC
+
+namespace AVRII {
+
+/// Specifies a target operand flag.
+enum TOF {
+ MO_NO_FLAG,
+
+ /// On a symbol operand, this represents the lo part.
+ MO_LO = (1 << 1),
+
+ /// On a symbol operand, this represents the hi part.
+ MO_HI = (1 << 2),
+
+ /// On a symbol operand, this represents it has to be negated.
+ MO_NEG = (1 << 3)
+};
+
+} // end of namespace AVRII
+
+/// Utilities related to the AVR instruction set.
+class AVRInstrInfo : public AVRGenInstrInfo {
+public:
+ explicit AVRInstrInfo();
+
+ const AVRRegisterInfo &getRegisterInfo() const { return RI; }
+ const MCInstrDesc &getBrCond(AVRCC::CondCodes CC) const;
+ AVRCC::CondCodes getCondFromBranchOpc(unsigned Opc) const;
+ AVRCC::CondCodes getOppositeCondition(AVRCC::CondCodes CC) const;
+ unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, unsigned SrcReg,
+ bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, unsigned DestReg,
+ int FrameIndex, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+
+ // Branch analysis.
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const override;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+ bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+
+private:
+ const AVRRegisterInfo RI;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_AVR_INSTR_INFO_H
diff --git a/lib/Target/AVR/AVRInstrInfo.td b/lib/Target/AVR/AVRInstrInfo.td
new file mode 100644
index 000000000000..e75683680150
--- /dev/null
+++ b/lib/Target/AVR/AVRInstrInfo.td
@@ -0,0 +1,1981 @@
+//===-- AVRInstrInfo.td - AVR Instruction defs -------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the AVR instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+include "AVRInstrFormats.td"
+
+//===----------------------------------------------------------------------===//
+// AVR Type Profiles
+//===----------------------------------------------------------------------===//
+
+def SDT_AVRCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i16>]>;
+def SDT_AVRCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i16>, SDTCisVT<1, i16>]>;
+def SDT_AVRCall : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
+def SDT_AVRWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def SDT_AVRBrcond : SDTypeProfile<0, 2,
+ [SDTCisVT<0, OtherVT>, SDTCisVT<1, i8>]>;
+def SDT_AVRCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
+def SDT_AVRTst : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_AVRSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
+
+//===----------------------------------------------------------------------===//
+// AVR Specific Node Definitions
+//===----------------------------------------------------------------------===//
+
+def AVRretflag : SDNode<"AVRISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def AVRretiflag : SDNode<"AVRISD::RETI_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+
+def AVRcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_AVRCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue]>;
+def AVRcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_AVRCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+
+def AVRcall : SDNode<"AVRISD::CALL", SDT_AVRCall,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
+
+def AVRWrapper : SDNode<"AVRISD::WRAPPER", SDT_AVRWrapper>;
+
+def AVRbrcond : SDNode<"AVRISD::BRCOND", SDT_AVRBrcond,
+ [SDNPHasChain, SDNPInGlue]>;
+def AVRcmp : SDNode<"AVRISD::CMP", SDT_AVRCmp, [SDNPOutGlue]>;
+def AVRcmpc : SDNode<"AVRISD::CMPC", SDT_AVRCmp, [SDNPInGlue, SDNPOutGlue]>;
+def AVRtst : SDNode<"AVRISD::TST", SDT_AVRTst, [SDNPOutGlue]>;
+def AVRselectcc: SDNode<"AVRISD::SELECT_CC", SDT_AVRSelectCC, [SDNPInGlue]>;
+
+// Shift nodes.
+def AVRlsl : SDNode<"AVRISD::LSL", SDTIntUnaryOp>;
+def AVRlsr : SDNode<"AVRISD::LSR", SDTIntUnaryOp>;
+def AVRrol : SDNode<"AVRISD::ROL", SDTIntUnaryOp>;
+def AVRror : SDNode<"AVRISD::ROR", SDTIntUnaryOp>;
+def AVRasr : SDNode<"AVRISD::ASR", SDTIntUnaryOp>;
+
+// Pseudo shift nodes for non-constant shift amounts.
+def AVRlslLoop : SDNode<"AVRISD::LSLLOOP", SDTIntShiftOp>;
+def AVRlsrLoop : SDNode<"AVRISD::LSRLOOP", SDTIntShiftOp>;
+def AVRasrLoop : SDNode<"AVRISD::ASRLOOP", SDTIntShiftOp>;
+
+//===----------------------------------------------------------------------===//
+// AVR Operands, Complex Patterns and Transformations Definitions.
+//===----------------------------------------------------------------------===//
+
+def imm8_neg_XFORM : SDNodeXForm<imm,
+[{
+ return CurDAG->getTargetConstant(-N->getAPIntValue(), SDLoc(N), MVT::i8);
+}]>;
+
+def imm16_neg_XFORM : SDNodeXForm<imm,
+[{
+ return CurDAG->getTargetConstant(-N->getAPIntValue(), SDLoc(N), MVT::i16);
+}]>;
+
+def imm0_63_neg : PatLeaf<(imm),
+[{
+ int64_t val = -N->getSExtValue();
+ return val >= 0 && val < 64;
+}], imm16_neg_XFORM>;
+
+def uimm6 : PatLeaf<(imm), [{ return isUInt<6>(N->getZExtValue()); }]>;
+
+def ioaddr_XFORM : SDNodeXForm<imm,
+[{
+ return CurDAG->getTargetConstant(uint8_t(N->getZExtValue()) - 0x20, SDLoc(N), MVT::i8);
+}]>;
+
+def iobitpos8_XFORM : SDNodeXForm<imm,
+[{
+ return CurDAG->getTargetConstant(Log2_32(uint8_t(N->getZExtValue())),
+ SDLoc(N), MVT::i8);
+}]>;
+
+def iobitposn8_XFORM : SDNodeXForm<imm,
+[{
+ return CurDAG->getTargetConstant(Log2_32(uint8_t(~N->getZExtValue())),
+ SDLoc(N), MVT::i8);
+}]>;
+
+def ioaddr8 : PatLeaf<(imm),
+[{
+ uint64_t val = N->getZExtValue();
+ return val >= 0x20 && val < 0x60;
+}], ioaddr_XFORM>;
+
+def lowioaddr8 : PatLeaf<(imm),
+[{
+ uint64_t val = N->getZExtValue();
+ return val >= 0x20 && val < 0x40;
+}], ioaddr_XFORM>;
+
+def ioaddr16 : PatLeaf<(imm),
+[{
+ uint64_t val = N->getZExtValue();
+ return val >= 0x20 && val < 0x5f;
+}], ioaddr_XFORM>;
+
+def iobitpos8 : PatLeaf<(imm),
+[{
+ return isPowerOf2_32(uint8_t(N->getZExtValue()));
+}], iobitpos8_XFORM>;
+
+def iobitposn8 : PatLeaf<(imm),
+[{
+ return isPowerOf2_32(uint8_t(~N->getZExtValue()));
+}], iobitposn8_XFORM>;
+
+def MemriAsmOperand : AsmOperandClass {
+ let Name = "Memri";
+ let ParserMethod = "parseMemriOperand";
+}
+
+/// Address operand for `reg+imm` used by STD and LDD.
+def memri : Operand<iPTR>
+{
+ let MIOperandInfo = (ops PTRDISPREGS, i16imm);
+
+ let PrintMethod = "printMemri";
+ let EncoderMethod = "encodeMemri";
+
+ let ParserMatchClass = MemriAsmOperand;
+}
+
+// Address operand for `SP+imm` used by STD{W}SPQRr
+def memspi : Operand<iPTR>
+{
+ let MIOperandInfo = (ops GPRSP, i16imm);
+}
+
+def i8imm_com : Operand<i8>
+{
+ let EncoderMethod = "encodeComplement";
+
+ let MIOperandInfo = (ops i8imm);
+}
+
+def relbrtarget_7 : Operand<OtherVT>
+{
+ let PrintMethod = "printPCRelImm";
+ let EncoderMethod = "encodeRelCondBrTarget<AVR::fixup_7_pcrel>";
+}
+
+def brtarget_13 : Operand<OtherVT>
+{
+ let PrintMethod = "printPCRelImm";
+ let EncoderMethod = "encodeRelCondBrTarget<AVR::fixup_13_pcrel>";
+}
+
+// The target of a 22 or 16-bit call/jmp instruction.
+def call_target : Operand<iPTR>
+{
+ let EncoderMethod = "encodeCallTarget";
+}
+
+// Addressing mode pattern reg+imm6
+def addr : ComplexPattern<iPTR, 2, "SelectAddr", [], [SDNPWantRoot]>;
+
+// AsmOperand class for a pointer register.
+// Used with the LD/ST family of instructions.
+// See FSTLD in AVRInstrFormats.td
+def PtrRegAsmOperand : AsmOperandClass
+{
+ let Name = "Reg";
+}
+
+// A special operand type for the LD/ST instructions.
+// It converts the pointer register number into a two-bit field used in the
+// instruction.
+def LDSTPtrReg : Operand<i16>
+{
+ let MIOperandInfo = (ops PTRREGS);
+ let EncoderMethod = "encodeLDSTPtrReg";
+
+ let ParserMatchClass = PtrRegAsmOperand;
+}
+
+// A special operand type for the LDD/STD instructions.
+// It behaves identically to the LD/ST version, except restricts
+// the pointer registers to Y and Z.
+def LDDSTDPtrReg : Operand<i16>
+{
+ let MIOperandInfo = (ops PTRDISPREGS);
+ let EncoderMethod = "encodeLDSTPtrReg";
+
+ let ParserMatchClass = PtrRegAsmOperand;
+}
+
+//===----------------------------------------------------------------------===//
+// AVR predicates for subtarget features
+//===----------------------------------------------------------------------===//
+
+def HasSRAM : Predicate<"Subtarget->hasSRAM()">,
+ AssemblerPredicate<"FeatureSRAM">;
+
+def HasJMPCALL : Predicate<"Subtarget->hasJMPCALL()">,
+ AssemblerPredicate<"FeatureJMPCALL">;
+
+def HasIJMPCALL : Predicate<"Subtarget->hasIJMPCALL()">,
+ AssemblerPredicate<"FeatureIJMPCALL">;
+
+def HasEIJMPCALL : Predicate<"Subtarget->hasEIJMPCALL()">,
+ AssemblerPredicate<"FeatureEIJMPCALL">;
+
+def HasADDSUBIW : Predicate<"Subtarget->hasADDSUBIW()">,
+ AssemblerPredicate<"FeatureADDSUBIW">;
+
+def HasSmallStack : Predicate<"Subtarget->HasSmallStack()">,
+ AssemblerPredicate<"FeatureSmallStack">;
+
+def HasMOVW : Predicate<"Subtarget->hasMOVW()">,
+ AssemblerPredicate<"FeatureMOVW">;
+
+def HasLPM : Predicate<"Subtarget->hasLPM()">,
+ AssemblerPredicate<"FeatureLPM">;
+
+def HasLPMX : Predicate<"Subtarget->hasLPMX()">,
+ AssemblerPredicate<"FeatureLPMX">;
+
+def HasELPM : Predicate<"Subtarget->hasELPM()">,
+ AssemblerPredicate<"FeatureELPM">;
+
+def HasELPMX : Predicate<"Subtarget->hasELPMX()">,
+ AssemblerPredicate<"FeatureELPMX">;
+
+def HasSPM : Predicate<"Subtarget->hasSPM()">,
+ AssemblerPredicate<"FeatureSPM">;
+
+def HasSPMX : Predicate<"Subtarget->hasSPMX()">,
+ AssemblerPredicate<"FeatureSPMX">;
+
+def HasDES : Predicate<"Subtarget->hasDES()">,
+ AssemblerPredicate<"FeatureDES">;
+
+def SupportsRMW : Predicate<"Subtarget->supportsRMW()">,
+ AssemblerPredicate<"FeatureRMW">;
+
+def SupportsMultiplication : Predicate<"Subtarget->supportsMultiplication()">,
+ AssemblerPredicate<"FeatureMultiplication">;
+
+def HasBREAK : Predicate<"Subtarget->hasBREAK()">,
+ AssemblerPredicate<"FeatureBREAK">;
+
+def HasTinyEncoding : Predicate<"Subtarget->hasTinyEncoding()">,
+ AssemblerPredicate<"FeatureTinyEncoding">;
+
+
+// AVR specific condition code. These correspond to AVR_*_COND in
+// AVRInstrInfo.td. They must be kept in synch.
+def AVR_COND_EQ : PatLeaf<(i8 0)>;
+def AVR_COND_NE : PatLeaf<(i8 1)>;
+def AVR_COND_GE : PatLeaf<(i8 2)>;
+def AVR_COND_LT : PatLeaf<(i8 3)>;
+def AVR_COND_SH : PatLeaf<(i8 4)>;
+def AVR_COND_LO : PatLeaf<(i8 5)>;
+def AVR_COND_MI : PatLeaf<(i8 6)>;
+def AVR_COND_PL : PatLeaf<(i8 7)>;
+
+
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// AVR Instruction list
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+
+// ADJCALLSTACKDOWN/UP implicitly use/def SP because they may be expanded into
+// a stack adjustment and the codegen must know that they may modify the stack
+// pointer before prolog-epilog rewriting occurs.
+// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
+// sub / add which can clobber SREG.
+let Defs = [SP, SREG],
+Uses = [SP] in
+{
+ def ADJCALLSTACKDOWN : Pseudo<(outs),
+ (ins i16imm:$amt),
+ "#ADJCALLSTACKDOWN",
+ [(AVRcallseq_start timm:$amt)]>;
+
+ // R31R30 is used to update SP, since it is a scratch reg and this instruction
+ // is placed after the function call then R31R30 should be always free.
+ //let Defs = [R31R30],
+ //Uses = [R31R30] in
+ //:TODO: if we enable this, the pseudo is killed because it looks dead
+ def ADJCALLSTACKUP : Pseudo<(outs),
+ (ins i16imm:$amt1, i16imm:$amt2),
+ "#ADJCALLSTACKUP",
+ [(AVRcallseq_end timm:$amt1, timm:$amt2)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Addition
+//===----------------------------------------------------------------------===//
+let isCommutable = 1,
+Constraints = "$src = $rd",
+Defs = [SREG] in
+{
+ // ADD Rd, Rr
+ // Adds two 8-bit registers.
+ def ADDRdRr : FRdRr<0b0000,
+ 0b11,
+ (outs GPR8:$rd),
+ (ins GPR8:$src, GPR8:$rr),
+ "add\t$rd, $rr",
+ [(set i8:$rd, (add i8:$src, i8:$rr)),
+ (implicit SREG)]>;
+
+ // ADDW Rd+1:Rd, Rr+1:Rr
+ // Pseudo instruction to add four 8-bit registers as two 16-bit values.
+ //
+ // Expands to:
+ // add Rd, Rr
+ // adc Rd+1, Rr+1
+ def ADDWRdRr : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src, DREGS:$rr),
+ "addw\t$rd, $rr",
+ [(set i16:$rd, (add i16:$src, i16:$rr)),
+ (implicit SREG)]>;
+
+ // ADC Rd, Rr
+ // Adds two 8-bit registers with carry.
+ let Uses = [SREG] in
+ def ADCRdRr : FRdRr<0b0001,
+ 0b11,
+ (outs GPR8:$rd),
+ (ins GPR8:$src, GPR8:$rr),
+ "adc\t$rd, $rr",
+ [(set i8:$rd, (adde i8:$src, i8:$rr)),
+ (implicit SREG)]>;
+
+ // ADCW Rd+1:Rd, Rr+1:Rr
+ // Pseudo instruction to add four 8-bit registers as two 16-bit values with
+ // carry.
+ //
+ // Expands to:
+ // adc Rd, Rr
+ // adc Rd+1, Rr+1
+ let Uses = [SREG] in
+ def ADCWRdRr : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src, DREGS:$rr),
+ "adcw\t$rd, $rr",
+ [(set i16:$rd, (adde i16:$src, i16:$rr)),
+ (implicit SREG)]>;
+
+ // AIDW Rd, k
+ // Adds an immediate 6-bit value K to Rd, placing the result in Rd.
+ def ADIWRdK : FWRdK<0b0,
+ (outs IWREGS:$rd),
+ (ins IWREGS:$src, i16imm:$k),
+ "adiw\t$rd, $k",
+ [(set i16:$rd, (add i16:$src, uimm6:$k)),
+ (implicit SREG)]>,
+ Requires<[HasADDSUBIW]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Subtraction
+//===----------------------------------------------------------------------===//
+let Constraints = "$src = $rd",
+Defs = [SREG] in
+{
+ // SUB Rd, Rr
+ // Subtracts the 8-bit value of Rr from Rd and places the value in Rd.
+ def SUBRdRr : FRdRr<0b0001,
+ 0b10,
+ (outs GPR8:$rd),
+ (ins GPR8:$src, GPR8:$rr),
+ "sub\t$rd, $rr",
+ [(set i8:$rd, (sub i8:$src, i8:$rr)),
+ (implicit SREG)]>;
+
+ // SUBW Rd+1:Rd, Rr+1:Rr
+ // Subtracts two 16-bit values and places the result into Rd.
+ //
+ // Expands to:
+ // sub Rd, Rr
+ // sbc Rd+1, Rr+1
+ def SUBWRdRr : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src, DREGS:$rr),
+ "subw\t$rd, $rr",
+ [(set i16:$rd, (sub i16:$src, i16:$rr)),
+ (implicit SREG)]>;
+
+ def SUBIRdK : FRdK<0b0101,
+ (outs LD8:$rd),
+ (ins LD8:$src, i8imm:$k),
+ "subi\t$rd, $k",
+ [(set i8:$rd, (sub i8:$src, imm:$k)),
+ (implicit SREG)]>;
+
+ // SUBIW Rd+1:Rd, K+1:K
+ //
+ // Expands to:
+ // subi Rd, K
+ // sbci Rd+1, K+1
+ def SUBIWRdK : Pseudo<(outs DLDREGS:$rd),
+ (ins DLDREGS:$src, i16imm:$rr),
+ "subiw\t$rd, $rr",
+ [(set i16:$rd, (sub i16:$src, imm:$rr)),
+ (implicit SREG)]>;
+
+ def SBIWRdK : FWRdK<0b1,
+ (outs IWREGS:$rd),
+ (ins IWREGS:$src, i16imm:$k),
+ "sbiw\t$rd, $k",
+ [(set i16:$rd, (sub i16:$src, uimm6:$k)),
+ (implicit SREG)]>,
+ Requires<[HasADDSUBIW]>;
+
+ // Subtract with carry operations which must read the carry flag in SREG.
+ let Uses = [SREG] in
+ {
+ def SBCRdRr : FRdRr<0b0000,
+ 0b10,
+ (outs GPR8:$rd),
+ (ins GPR8:$src, GPR8:$rr),
+ "sbc\t$rd, $rr",
+ [(set i8:$rd, (sube i8:$src, i8:$rr)),
+ (implicit SREG)]>;
+
+ // SBCW Rd+1:Rd, Rr+1:Rr
+ //
+ // Expands to:
+ // sbc Rd, Rr
+ // sbc Rd+1, Rr+1
+ def SBCWRdRr : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src, DREGS:$rr),
+ "sbcw\t$rd, $rr",
+ [(set i16:$rd, (sube i16:$src, i16:$rr)),
+ (implicit SREG)]>;
+
+ def SBCIRdK : FRdK<0b0100,
+ (outs LD8:$rd),
+ (ins LD8:$src, i8imm:$k),
+ "sbci\t$rd, $k",
+ [(set i8:$rd, (sube i8:$src, imm:$k)),
+ (implicit SREG)]>;
+
+ // SBCIW Rd+1:Rd, K+1:K
+ // sbci Rd, K
+ // sbci Rd+1, K+1
+ def SBCIWRdK : Pseudo<(outs DLDREGS:$rd),
+ (ins DLDREGS:$src, i16imm:$rr),
+ "sbciw\t$rd, $rr",
+ [(set i16:$rd, (sube i16:$src, imm:$rr)),
+ (implicit SREG)]>;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Increment and Decrement
+//===----------------------------------------------------------------------===//
+let Constraints = "$src = $rd",
+Defs = [SREG] in
+{
+ def INCRd : FRd<0b1001,
+ 0b0100011,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "inc\t$rd",
+ [(set i8:$rd, (add i8:$src, 1)), (implicit SREG)]>;
+
+ def DECRd : FRd<0b1001,
+ 0b0101010,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "dec\t$rd",
+ [(set i8:$rd, (add i8:$src, -1)), (implicit SREG)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Multiplication
+//===----------------------------------------------------------------------===//
+
+let isCommutable = 1,
+Defs = [R1, R0, SREG] in
+{
+ // MUL Rd, Rr
+ // Multiplies Rd by Rr and places the result into R1:R0.
+ let usesCustomInserter = 1 in {
+ def MULRdRr : FRdRr<0b1001, 0b11,
+ (outs),
+ (ins GPR8:$lhs, GPR8:$rhs),
+ "mul\t$lhs, $rhs",
+ [/*(set R1, R0, (smullohi i8:$lhs, i8:$rhs))*/]>,
+ Requires<[SupportsMultiplication]>;
+
+ def MULSRdRr : FMUL2RdRr<0,
+ (outs),
+ (ins GPR8:$lhs, GPR8:$rhs),
+ "muls\t$lhs, $rhs",
+ []>,
+ Requires<[SupportsMultiplication]>;
+ }
+
+ def MULSURdRr : FMUL2RdRr<1,
+ (outs),
+ (ins GPR8:$lhs, GPR8:$rhs),
+ "mulsu\t$lhs, $rhs",
+ []>,
+ Requires<[SupportsMultiplication]>;
+
+ def FMUL : FFMULRdRr<0b01,
+ (outs),
+ (ins GPR8:$lhs, GPR8:$rhs),
+ "fmul\t$lhs, $rhs",
+ []>,
+ Requires<[SupportsMultiplication]>;
+
+ def FMULS : FFMULRdRr<0b10,
+ (outs),
+ (ins GPR8:$lhs, GPR8:$rhs),
+ "fmuls\t$lhs, $rhs",
+ []>,
+ Requires<[SupportsMultiplication]>;
+
+ def FMULSU : FFMULRdRr<0b11,
+ (outs),
+ (ins GPR8:$lhs, GPR8:$rhs),
+ "fmulsu\t$lhs, $rhs",
+ []>,
+ Requires<[SupportsMultiplication]>;
+}
+
+let Defs = [R15, R14, R13, R12, R11, R10, R9,
+ R8, R7, R6, R5, R4, R3, R2, R1, R0] in
+def DESK : FDES<(outs),
+ (ins i8imm:$k),
+ "des\t$k",
+ []>,
+ Requires<[HasDES]>;
+
+//===----------------------------------------------------------------------===//
+// Logic
+//===----------------------------------------------------------------------===//
+let Constraints = "$src = $rd",
+Defs = [SREG] in
+{
+ // Register-Register logic instructions (which have the
+ // property of commutativity).
+ let isCommutable = 1 in
+ {
+ def ANDRdRr : FRdRr<0b0010,
+ 0b00,
+ (outs GPR8:$rd),
+ (ins GPR8:$src, GPR8:$rr),
+ "and\t$rd, $rr",
+ [(set i8:$rd, (and i8:$src, i8:$rr)),
+ (implicit SREG)]>;
+
+ // ANDW Rd+1:Rd, Rr+1:Rr
+ //
+ // Expands to:
+ // and Rd, Rr
+ // and Rd+1, Rr+1
+ def ANDWRdRr : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src, DREGS:$rr),
+ "andw\t$rd, $rr",
+ [(set i16:$rd, (and i16:$src, i16:$rr)),
+ (implicit SREG)]>;
+
+ def ORRdRr : FRdRr<0b0010,
+ 0b10,
+ (outs GPR8:$rd),
+ (ins GPR8:$src, GPR8:$rr),
+ "or\t$rd, $rr",
+ [(set i8:$rd, (or i8:$src, i8:$rr)),
+ (implicit SREG)]>;
+
+ // ORW Rd+1:Rd, Rr+1:Rr
+ //
+ // Expands to:
+ // or Rd, Rr
+ // or Rd+1, Rr+1
+ def ORWRdRr : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src, DREGS:$rr),
+ "orw\t$rd, $rr",
+ [(set i16:$rd, (or i16:$src, i16:$rr)),
+ (implicit SREG)]>;
+
+ def EORRdRr : FRdRr<0b0010,
+ 0b01,
+ (outs GPR8:$rd),
+ (ins GPR8:$src, GPR8:$rr),
+ "eor\t$rd, $rr",
+ [(set i8:$rd, (xor i8:$src, i8:$rr)),
+ (implicit SREG)]>;
+
+ // EORW Rd+1:Rd, Rr+1:Rr
+ //
+ // Expands to:
+ // eor Rd, Rr
+ // eor Rd+1, Rr+1
+ def EORWRdRr : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src, DREGS:$rr),
+ "eorw\t$rd, $rr",
+ [(set i16:$rd, (xor i16:$src, i16:$rr)),
+ (implicit SREG)]>;
+ }
+
+ def ANDIRdK : FRdK<0b0111,
+ (outs LD8:$rd),
+ (ins LD8:$src, i8imm:$k),
+ "andi\t$rd, $k",
+ [(set i8:$rd, (and i8:$src, imm:$k)),
+ (implicit SREG)]>;
+
+ // ANDI Rd+1:Rd, K+1:K
+ //
+ // Expands to:
+ // andi Rd, K
+ // andi Rd+1, K+1
+ def ANDIWRdK : Pseudo<(outs DLDREGS:$rd),
+ (ins DLDREGS:$src, i16imm:$k),
+ "andiw\t$rd, $k",
+ [(set i16:$rd, (and i16:$src, imm:$k)),
+ (implicit SREG)]>;
+
+ def ORIRdK : FRdK<0b0110,
+ (outs LD8:$rd),
+ (ins LD8:$src, i8imm:$k),
+ "ori\t$rd, $k",
+ [(set i8:$rd, (or i8:$src, imm:$k)),
+ (implicit SREG)]>;
+
+ // ORIW Rd+1:Rd, K+1,K
+ //
+ // Expands to:
+ // ori Rd, K
+ // ori Rd+1, K+1
+ def ORIWRdK : Pseudo<(outs DLDREGS:$rd),
+ (ins DLDREGS:$src, i16imm:$rr),
+ "oriw\t$rd, $rr",
+ [(set i16:$rd, (or i16:$src, imm:$rr)),
+ (implicit SREG)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// One's/Two's Compliment
+//===----------------------------------------------------------------------===//
+let Constraints = "$src = $rd",
+Defs = [SREG] in
+{
+ def COMRd : FRd<0b1001,
+ 0b0100000,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "com\t$rd",
+ [(set i8:$rd, (not i8:$src)), (implicit SREG)]>;
+
+ // COMW Rd+1:Rd
+ //
+ // Expands to:
+ // com Rd
+ // com Rd+1
+ def COMWRd : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src),
+ "comw\t$rd",
+ [(set i16:$rd, (not i16:$src)), (implicit SREG)]>;
+
+ //:TODO: optimize NEG for wider types
+ def NEGRd : FRd<0b1001,
+ 0b0100001,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "neg\t$rd",
+ [(set i8:$rd, (ineg i8:$src)), (implicit SREG)]>;
+}
+
+// TST Rd
+// Test for zero of minus.
+// This operation is identical to a `Rd AND Rd`.
+//def : InstAlias<"tst\t$rd", (ANDRdRr GPR8:$rd, GPR8:$rd), 1>;
+
+let Defs = [SREG] in
+def TSTRd : FTST<0b0010,
+ 0b00,
+ (outs),
+ (ins GPR8:$rd),
+ "tst\t$rd",
+ [(AVRtst i8:$rd)]>;
+
+//===----------------------------------------------------------------------===//
+// Jump instructions
+//===----------------------------------------------------------------------===//
+let isBarrier = 1,
+isBranch = 1,
+isTerminator = 1 in
+{
+ def RJMPk : FBRk<0,
+ (outs),
+ (ins brtarget_13:$target),
+ "rjmp\t$target",
+ [(br bb:$target)]>;
+
+ let isIndirectBranch = 1,
+ Uses = [R31R30] in
+ def IJMP : F16<0b1001010000001001,
+ (outs),
+ (ins),
+ "ijmp",
+ []>,
+ Requires<[HasIJMPCALL]>;
+
+ let isIndirectBranch = 1,
+ Uses = [R31R30] in
+ def EIJMP : F16<0b1001010000011001,
+ (outs),
+ (ins),
+ "eijmp",
+ []>,
+ Requires<[HasEIJMPCALL]>;
+
+ def JMPk : F32BRk<0b110,
+ (outs),
+ (ins call_target:$k),
+ "jmp\t$k",
+ []>,
+ Requires<[HasJMPCALL]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Call instructions
+//===----------------------------------------------------------------------===//
+let isCall = 1 in
+{
+ // SP is marked as a use to prevent stack-pointer assignments that appear
+ // immediately before calls from potentially appearing dead.
+ let Uses = [SP] in
+ def RCALLk : FBRk<1,
+ (outs),
+ (ins brtarget_13:$target),
+ "rcall\t$target",
+ []>;
+
+ // SP is marked as a use to prevent stack-pointer assignments that appear
+ // immediately before calls from potentially appearing dead.
+ let Uses = [SP, R31R30] in
+ def ICALL : F16<0b1001010100001001,
+ (outs),
+ (ins variable_ops),
+ "icall",
+ []>,
+ Requires<[HasIJMPCALL]>;
+
+ // SP is marked as a use to prevent stack-pointer assignments that appear
+ // immediately before calls from potentially appearing dead.
+ let Uses = [SP, R31R30] in
+ def EICALL : F16<0b1001010100011001,
+ (outs),
+ (ins variable_ops),
+ "eicall",
+ []>,
+ Requires<[HasEIJMPCALL]>;
+
+ // SP is marked as a use to prevent stack-pointer assignments that appear
+ // immediately before calls from potentially appearing dead.
+ //
+ //:TODO: the imm field can be either 16 or 22 bits in devices with more
+ // than 64k of ROM, fix it once we support the largest devices.
+ let Uses = [SP] in
+ def CALLk : F32BRk<0b111,
+ (outs),
+ (ins call_target:$k),
+ "call\t$k",
+ [(AVRcall imm:$k)]>,
+ Requires<[HasJMPCALL]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Return instructions.
+//===----------------------------------------------------------------------===//
+let isTerminator = 1,
+isReturn = 1,
+isBarrier = 1 in
+{
+ def RET : F16<0b1001010100001000,
+ (outs),
+ (ins),
+ "ret",
+ [(AVRretflag)]>;
+
+ def RETI : F16<0b1001010100011000,
+ (outs),
+ (ins),
+ "reti",
+ [(AVRretiflag)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Compare operations.
+//===----------------------------------------------------------------------===//
+let Defs = [SREG] in
+{
+ // CPSE Rd, Rr
+ // Compare Rd and Rr, skipping the next instruction if they are equal.
+ let isBarrier = 1,
+ isBranch = 1,
+ isTerminator = 1 in
+ def CPSE : FRdRr<0b0001,
+ 0b00,
+ (outs),
+ (ins GPR8:$rd, GPR8:$rr),
+ "cpse\t$rd, $rr",
+ []>;
+
+ def CPRdRr : FRdRr<0b0001,
+ 0b01,
+ (outs),
+ (ins GPR8:$rd, GPR8:$rr),
+ "cp\t$rd, $rr",
+ [(AVRcmp i8:$rd, i8:$rr), (implicit SREG)]>;
+
+ // CPW Rd+1:Rd, Rr+1:Rr
+ //
+ // Expands to:
+ // cp Rd, Rr
+ // cpc Rd+1, Rr+1
+ def CPWRdRr : Pseudo<(outs),
+ (ins DREGS:$src, DREGS:$src2),
+ "cpw\t$src, $src2",
+ [(AVRcmp i16:$src, i16:$src2), (implicit SREG)]>;
+
+ let Uses = [SREG] in
+ def CPCRdRr : FRdRr<0b0000,
+ 0b01,
+ (outs),
+ (ins GPR8:$rd, GPR8:$rr),
+ "cpc\t$rd, $rr",
+ [(AVRcmpc i8:$rd, i8:$rr), (implicit SREG)]>;
+
+ // CPCW Rd+1:Rd. Rr+1:Rr
+ //
+ // Expands to:
+ // cpc Rd, Rr
+ // cpc Rd+1, Rr+1
+ let Uses = [SREG] in
+ def CPCWRdRr : Pseudo<(outs),
+ (ins DREGS:$src, DREGS:$src2),
+ "cpcw\t$src, $src2",
+ [(AVRcmpc i16:$src, i16:$src2), (implicit SREG)]>;
+
+ // CPI Rd, K
+ // Compares a register with an 8 bit immediate.
+ let Uses = [SREG] in
+ def CPIRdK : FRdK<0b0011,
+ (outs),
+ (ins GPR8:$rd, i8imm:$k),
+ "cpi\t$rd, $k",
+ [(AVRcmp i8:$rd, imm:$k), (implicit SREG)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Register conditional skipping/branching operations.
+//===----------------------------------------------------------------------===//
+let isBranch = 1,
+isTerminator = 1 in
+{
+ // Conditional skipping on GPR register bits, and
+ // conditional skipping on IO register bits.
+ let isBarrier = 1 in
+ {
+ def SBRCRrB : FRdB<0b10,
+ (outs),
+ (ins GPR8:$rr, i8imm:$b),
+ "sbrc\t$rr, $b",
+ []>;
+
+ def SBRSRrB : FRdB<0b11,
+ (outs),
+ (ins GPR8:$rr, i8imm:$b),
+ "sbrs\t$rr, $b",
+ []>;
+
+ def SBICAb : FIOBIT<0b01,
+ (outs),
+ (ins i16imm:$a, i8imm:$b),
+ "sbic\t$a, $b",
+ []>;
+
+ def SBISAb : FIOBIT<0b11,
+ (outs),
+ (ins i16imm:$a, i8imm:$b),
+ "sbis\t$a, $b",
+ []>;
+ }
+
+ // Relative branches on status flag bits.
+ let Uses = [SREG] in
+ {
+ // BRBS s, k
+ // Branch if `s` flag in status register is set.
+ def BRBSsk : FSK<0,
+ (outs),
+ (ins i8imm:$s, relbrtarget_7:$k),
+ "brbs\t$s, $k",
+ []>;
+
+ // BRBC s, k
+ // Branch if `s` flag in status register is clear.
+ def BRBCsk : FSK<1,
+ (outs),
+ (ins i8imm:$s, relbrtarget_7:$k),
+ "brbc\t$s, $k",
+ []>;
+ }
+}
+
+
+// BRCS k
+// Branch if carry flag is set
+def : InstAlias<"brcs\t$k", (BRBSsk 0, relbrtarget_7:$k)>;
+
+// BRCC k
+// Branch if carry flag is clear
+def : InstAlias<"brcc\t$k", (BRBCsk 0, relbrtarget_7:$k)>;
+
+// BRHS k
+// Branch if half carry flag is set
+def : InstAlias<"brhs\t$k", (BRBSsk 5, relbrtarget_7:$k)>;
+
+// BRHC k
+// Branch if half carry flag is clear
+def : InstAlias<"brhc\t$k", (BRBCsk 5, relbrtarget_7:$k)>;
+
+// BRTS k
+// Branch if the T flag is set
+def : InstAlias<"brts\t$k", (BRBSsk 6, relbrtarget_7:$k)>;
+
+// BRTC k
+// Branch if the T flag is clear
+def : InstAlias<"brtc\t$k", (BRBCsk 6, relbrtarget_7:$k)>;
+
+// BRVS k
+// Branch if the overflow flag is set
+def : InstAlias<"brvs\t$k", (BRBSsk 3, relbrtarget_7:$k)>;
+
+// BRVC k
+// Branch if the overflow flag is clear
+def : InstAlias<"brvc\t$k", (BRBCsk 3, relbrtarget_7:$k)>;
+
+// BRIE k
+// Branch if the global interrupt flag is enabled
+def : InstAlias<"brie\t$k", (BRBSsk 7, relbrtarget_7:$k)>;
+
+// BRID k
+// Branch if the global interrupt flag is disabled
+def : InstAlias<"brid\t$k", (BRBCsk 7, relbrtarget_7:$k)>;
+
+//===----------------------------------------------------------------------===//
+// PC-relative conditional branches
+//===----------------------------------------------------------------------===//
+// Based on status register. We cannot simplify these into instruction aliases
+// because we also need to be able to specify a pattern to match for ISel.
+let isBranch = 1,
+isTerminator = 1,
+Uses = [SREG] in
+{
+ def BREQk : FBRsk<0,
+ 0b001,
+ (outs),
+ (ins relbrtarget_7:$target),
+ "breq\t$target",
+ [(AVRbrcond bb:$target, AVR_COND_EQ)]>;
+
+ def BRNEk : FBRsk<1,
+ 0b001,
+ (outs),
+ (ins relbrtarget_7:$target),
+ "brne\t$target",
+ [(AVRbrcond bb:$target, AVR_COND_NE)]>;
+
+
+ def BRSHk : FBRsk<1,
+ 0b000,
+ (outs),
+ (ins relbrtarget_7:$target),
+ "brsh\t$target",
+ [(AVRbrcond bb:$target, AVR_COND_SH)]>;
+
+ def BRLOk : FBRsk<0,
+ 0b000,
+ (outs),
+ (ins relbrtarget_7:$target),
+ "brlo\t$target",
+ [(AVRbrcond bb:$target, AVR_COND_LO)]>;
+
+ def BRMIk : FBRsk<0,
+ 0b010,
+ (outs),
+ (ins relbrtarget_7:$target),
+ "brmi\t$target",
+ [(AVRbrcond bb:$target, AVR_COND_MI)]>;
+
+ def BRPLk : FBRsk<1,
+ 0b010,
+ (outs),
+ (ins relbrtarget_7:$target),
+ "brpl\t$target",
+ [(AVRbrcond bb:$target, AVR_COND_PL)]>;
+
+ def BRGEk : FBRsk<1,
+ 0b100,
+ (outs),
+ (ins relbrtarget_7:$target),
+ "brge\t$target",
+ [(AVRbrcond bb:$target, AVR_COND_GE)]>;
+
+ def BRLTk : FBRsk<0,
+ 0b100,
+ (outs),
+ (ins relbrtarget_7:$target),
+ "brlt\t$target",
+ [(AVRbrcond bb:$target, AVR_COND_LT)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Data transfer instructions
+//===----------------------------------------------------------------------===//
+// 8 and 16-bit register move instructions.
+let hasSideEffects = 0 in
+{
+ def MOVRdRr : FRdRr<0b0010,
+ 0b11,
+ (outs GPR8:$rd),
+ (ins GPR8:$rr),
+ "mov\t$rd, $rr",
+ []>;
+
+ def MOVWRdRr : FMOVWRdRr<(outs DREGS:$dst),
+ (ins DREGS:$src),
+ "movw\t$dst, $src",
+ []>,
+ Requires<[HasMOVW]>;
+}
+
+// Load immediate values into registers.
+let isReMaterializable = 1 in
+{
+ def LDIRdK : FRdK<0b1110,
+ (outs LD8:$rd),
+ (ins i8imm:$k),
+ "ldi\t$rd, $k",
+ [(set i8:$rd, imm:$k)]>;
+
+ // LDIW Rd+1:Rd, K+1:K
+ //
+ // Expands to:
+ // ldi Rd, K
+ // ldi Rd+1, K+1
+ def LDIWRdK : Pseudo<(outs DLDREGS:$dst),
+ (ins i16imm:$src),
+ "ldiw\t$dst, $src",
+ [(set i16:$dst, imm:$src)]>;
+}
+
+// Load from data space into register.
+let canFoldAsLoad = 1,
+isReMaterializable = 1 in
+{
+ def LDSRdK : F32DM<0b0,
+ (outs GPR8:$rd),
+ (ins i16imm:$k),
+ "lds\t$rd, $k",
+ [(set i8:$rd, (load imm:$k))]>,
+ Requires<[HasSRAM]>;
+
+ // LDSW Rd+1:Rd, K+1:K
+ //
+ // Expands to:
+ // lds Rd, (K+1:K)
+ // lds Rd+1 (K+1:K) + 1
+ def LDSWRdK : Pseudo<(outs DREGS:$dst),
+ (ins i16imm:$src),
+ "ldsw\t$dst, $src",
+ [(set i16:$dst, (load imm:$src))]>,
+ Requires<[HasSRAM]>;
+}
+
+// Indirect loads.
+let canFoldAsLoad = 1,
+isReMaterializable = 1 in
+{
+ def LDRdPtr : FSTLD<0,
+ 0b00,
+ (outs GPR8:$reg),
+ (ins LDSTPtrReg:$ptrreg),
+ "ld\t$reg, $ptrreg",
+ [(set GPR8:$reg, (load i16:$ptrreg))]>,
+ Requires<[HasSRAM]>;
+
+ // LDW Rd+1:Rd, P
+ //
+ // Expands to:
+ // ld Rd, P+
+ // ld Rd+1, P+
+ let Constraints = "@earlyclobber $reg" in
+ def LDWRdPtr : Pseudo<(outs DREGS:$reg),
+ (ins PTRDISPREGS:$ptrreg),
+ "ldw\t$reg, $ptrreg",
+ [(set i16:$reg, (load i16:$ptrreg))]>,
+ Requires<[HasSRAM]>;
+}
+
+// Indirect loads (with postincrement or predecrement).
+let mayLoad = 1,
+hasSideEffects = 0,
+Constraints = "$ptrreg = $base_wb,@earlyclobber $reg,@earlyclobber $base_wb" in
+{
+ def LDRdPtrPi : FSTLD<0,
+ 0b01,
+ (outs GPR8:$reg, PTRREGS:$base_wb),
+ (ins LDSTPtrReg:$ptrreg),
+ "ld\t$reg, $ptrreg+",
+ []>,
+ Requires<[HasSRAM]>;
+
+ // LDW Rd+1:Rd, P+
+ // Expands to:
+ // ld Rd, P+
+ // ld Rd+1, P+
+ def LDWRdPtrPi : Pseudo<(outs DREGS:$reg, PTRREGS:$base_wb),
+ (ins PTRREGS:$ptrreg),
+ "ldw\t$reg, $ptrreg+",
+ []>,
+ Requires<[HasSRAM]>;
+
+ def LDRdPtrPd : FSTLD<0,
+ 0b10,
+ (outs GPR8:$reg, PTRREGS:$base_wb),
+ (ins LDSTPtrReg:$ptrreg),
+ "ld\t$reg, -$ptrreg",
+ []>,
+ Requires<[HasSRAM]>;
+
+ // LDW Rd+1:Rd, -P
+ //
+ // Expands to:
+ // ld Rd+1, -P
+ // ld Rd, -P
+ def LDWRdPtrPd : Pseudo<(outs DREGS:$reg, PTRREGS:$base_wb),
+ (ins PTRREGS:$ptrreg),
+ "ldw\t$reg, -$ptrreg",
+ []>,
+ Requires<[HasSRAM]>;
+}
+
+// Load indirect with displacement operations.
+let canFoldAsLoad = 1,
+isReMaterializable = 1 in
+{
+ def LDDRdPtrQ : FSTDLDD<0,
+ (outs GPR8:$reg),
+ (ins memri:$memri),
+ "ldd\t$reg, $memri",
+ [(set i8:$reg, (load addr:$memri))]>,
+ Requires<[HasSRAM]>;
+
+ // LDDW Rd+1:Rd, P+q
+ //
+ // Expands to:
+ // ldd Rd, P+q
+ // ldd Rd+1, P+q+1
+ let Constraints = "@earlyclobber $dst" in
+ def LDDWRdPtrQ : Pseudo<(outs DREGS:$dst),
+ (ins memri:$memri),
+ "lddw\t$dst, $memri",
+ [(set i16:$dst, (load addr:$memri))]>,
+ Requires<[HasSRAM]>;
+
+ //:FIXME: remove this once PR13375 gets fixed
+ // Bug report: https://llvm.org/bugs/show_bug.cgi?id=13375
+ let mayLoad = 1,
+ hasSideEffects = 0 in
+ def LDDWRdYQ : Pseudo<(outs DREGS:$dst),
+ (ins memri:$memri),
+ "lddw\t$dst, $memri",
+ []>,
+ Requires<[HasSRAM]>;
+}
+
+// Indirect store from register to data space.
+def STSKRr : F32DM<0b1,
+ (outs),
+ (ins i16imm:$k, GPR8:$rd),
+ "sts\t$k, $rd",
+ [(store i8:$rd, imm:$k)]>,
+ Requires<[HasSRAM]>;
+
+// STSW K+1:K, Rr+1:Rr
+//
+// Expands to:
+// sts Rr+1, (K+1:K) + 1
+// sts Rr, (K+1:K)
+def STSWKRr : Pseudo<(outs),
+ (ins i16imm:$dst, DREGS:$src),
+ "stsw\t$dst, $src",
+ [(store i16:$src, imm:$dst)]>,
+ Requires<[HasSRAM]>;
+
+// Indirect stores.
+// ST P, Rr
+// Stores the value of Rr into the location addressed by pointer P.
+def STPtrRr : FSTLD<1,
+ 0b00,
+ (outs),
+ (ins LDSTPtrReg:$ptrreg, GPR8:$reg),
+ "st\t$ptrreg, $reg",
+ [(store GPR8:$reg, i16:$ptrreg)]>,
+ Requires<[HasSRAM]>;
+
+// STW P, Rr+1:Rr
+// Stores the value of Rr into the location addressed by pointer P.
+//
+// Expands to:
+// st P, Rr
+// std P+1, Rr+1
+def STWPtrRr : Pseudo<(outs),
+ (ins PTRDISPREGS:$ptrreg, DREGS:$reg),
+ "stw\t$ptrreg, $reg",
+ [(store i16:$reg, i16:$ptrreg)]>,
+ Requires<[HasSRAM]>;
+
+// Indirect stores (with postincrement or predecrement).
+let Constraints = "$ptrreg = $base_wb,@earlyclobber $base_wb" in
+{
+
+ // ST P+, Rr
+ // Stores the value of Rr into the location addressed by pointer P.
+ // Post increments P.
+ def STPtrPiRr : FSTLD<1,
+ 0b01,
+ (outs LDSTPtrReg:$base_wb),
+ (ins LDSTPtrReg:$ptrreg, GPR8:$reg, i8imm:$offs),
+ "st\t$ptrreg+, $reg",
+ [(set i16:$base_wb,
+ (post_store GPR8:$reg, i16:$ptrreg, imm:$offs))]>,
+ Requires<[HasSRAM]>;
+
+ // STW P+, Rr+1:Rr
+ // Stores the value of Rr into the location addressed by pointer P.
+ // Post increments P.
+ //
+ // Expands to:
+ // st P+, Rr
+ // st P+, Rr+1
+ def STWPtrPiRr : Pseudo<(outs PTRREGS:$base_wb),
+ (ins PTRREGS:$ptrreg, DREGS:$trh, i8imm:$offs),
+ "stw\t$ptrreg+, $trh",
+ [(set PTRREGS:$base_wb,
+ (post_store DREGS:$trh, PTRREGS:$ptrreg, imm:$offs))]>,
+ Requires<[HasSRAM]>;
+
+ // ST -P, Rr
+ // Stores the value of Rr into the location addressed by pointer P.
+ // Pre decrements P.
+ def STPtrPdRr : FSTLD<1,
+ 0b10,
+ (outs LDSTPtrReg:$base_wb),
+ (ins LDSTPtrReg:$ptrreg, GPR8:$reg, i8imm:$offs),
+ "st\t-$ptrreg, $reg",
+ [(set i16:$base_wb,
+ (pre_store GPR8:$reg, i16:$ptrreg, imm:$offs))]>,
+ Requires<[HasSRAM]>;
+
+ // STW -P, Rr+1:Rr
+ // Stores the value of Rr into the location addressed by pointer P.
+ // Pre decrements P.
+ //
+ // Expands to:
+ // st -P, Rr+1
+ // st -P, Rr
+ def STWPtrPdRr : Pseudo<(outs PTRREGS:$base_wb),
+ (ins PTRREGS:$ptrreg, DREGS:$reg, i8imm:$offs),
+ "stw\t-$ptrreg, $reg",
+ [(set PTRREGS:$base_wb,
+ (pre_store i16:$reg, i16:$ptrreg, imm:$offs))]>,
+ Requires<[HasSRAM]>;
+}
+
+// Store indirect with displacement operations.
+// STD P+q, Rr
+// Stores the value of Rr into the location addressed by pointer P with a
+// displacement of q. Does not modify P.
+def STDPtrQRr : FSTDLDD<1,
+ (outs),
+ (ins memri:$memri, GPR8:$reg),
+ "std\t$memri, $reg",
+ [(store i8:$reg, addr:$memri)]>,
+ Requires<[HasSRAM]>;
+
+// STDW P+q, Rr+1:Rr
+// Stores the value of Rr into the location addressed by pointer P with a
+// displacement of q. Does not modify P.
+//
+// Expands to:
+// std P+q, Rr
+// std P+q+1, Rr+1
+def STDWPtrQRr : Pseudo<(outs),
+ (ins memri:$memri, DREGS:$src),
+ "stdw\t$memri, $src",
+ [(store i16:$src, addr:$memri)]>,
+ Requires<[HasSRAM]>;
+
+
+// Load program memory operations.
+let canFoldAsLoad = 1,
+isReMaterializable = 1,
+hasSideEffects = 0 in
+{
+ let Defs = [R0],
+ Uses = [R31R30] in
+ def LPM : F16<0b1001010111001000,
+ (outs),
+ (ins),
+ "lpm",
+ []>,
+ Requires<[HasLPM]>;
+
+ def LPMRdZ : FLPMX<0,
+ 0,
+ (outs GPR8:$dst),
+ (ins ZREGS:$z),
+ "lpm\t$dst, $z",
+ []>,
+ Requires<[HasLPMX]>;
+
+ def LPMWRdZ : Pseudo<(outs DREGS:$dst),
+ (ins ZREGS:$z),
+ "lpmw\t$dst, $z",
+ []>,
+ Requires<[HasLPMX]>;
+
+ // Load program memory, while postincrementing the Z register.
+ let mayLoad = 1,
+ Defs = [R31R30] in
+ {
+ def LPMRdZPi : FLPMX<0,
+ 1,
+ (outs GPR8:$dst),
+ (ins ZREGS:$z),
+ "lpm\t$dst, $z+",
+ []>,
+ Requires<[HasLPMX]>;
+
+ def LPMWRdZPi : Pseudo<(outs DREGS:$dst),
+ (ins ZREGS:$z),
+ "lpmw\t$dst, $z+",
+ []>,
+ Requires<[HasLPMX]>;
+ }
+}
+
+// Extended load program memory operations.
+let mayLoad = 1,
+hasSideEffects = 0 in
+{
+ let Defs = [R0],
+ Uses = [R31R30] in
+ def ELPM : F16<0b1001010111011000,
+ (outs),
+ (ins),
+ "elpm",
+ []>,
+ Requires<[HasELPM]>;
+
+ def ELPMRdZ : FLPMX<1,
+ 0,
+ (outs GPR8:$dst),
+ (ins ZREGS:$z),
+ "elpm\t$dst, $z",
+ []>,
+ Requires<[HasELPMX]>;
+
+ let Defs = [R31R30] in
+ def ELPMRdZPi : FLPMX<1,
+ 1,
+ (outs GPR8:$dst),
+ (ins ZREGS: $z),
+ "elpm\t$dst, $z+",
+ []>,
+ Requires<[HasELPMX]>;
+}
+
+// Store program memory operations.
+let Uses = [R1, R0] in
+{
+ let Uses = [R31R30, R1, R0] in
+ def SPM : F16<0b1001010111101000,
+ (outs),
+ (ins),
+ "spm",
+ []>,
+ Requires<[HasSPM]>;
+
+ let Defs = [R31R30] in
+ def SPMZPi : F16<0b1001010111111000,
+ (outs),
+ (ins ZREGS:$z),
+ "spm $z+",
+ []>,
+ Requires<[HasSPMX]>;
+}
+
+// Read data from IO location operations.
+let canFoldAsLoad = 1,
+isReMaterializable = 1 in
+{
+ def INRdA : FIORdA<(outs GPR8:$dst),
+ (ins i16imm:$src),
+ "in\t$dst, $src",
+ [(set i8:$dst, (load ioaddr8:$src))]>;
+
+ def INWRdA : Pseudo<(outs DREGS:$dst),
+ (ins i16imm:$src),
+ "inw\t$dst, $src",
+ [(set i16:$dst, (load ioaddr16:$src))]>;
+}
+
+// Write data to IO location operations.
+def OUTARr : FIOARr<(outs),
+ (ins i16imm:$dst, GPR8:$src),
+ "out\t$dst, $src",
+ [(store i8:$src, ioaddr8:$dst)]>;
+
+def OUTWARr : Pseudo<(outs),
+ (ins i16imm:$dst, DREGS:$src),
+ "outw\t$dst, $src",
+ [(store i16:$src, ioaddr16:$dst)]>;
+
+// Stack push/pop operations.
+let Defs = [SP],
+Uses = [SP],
+hasSideEffects = 0 in
+{
+ // Stack push operations.
+ let mayStore = 1 in
+ {
+ def PUSHRr : FRd<0b1001,
+ 0b0011111,
+ (outs),
+ (ins GPR8:$reg),
+ "push\t$reg",
+ []>,
+ Requires<[HasSRAM]>;
+
+ def PUSHWRr : Pseudo<(outs),
+ (ins DREGS:$reg),
+ "pushw\t$reg",
+ []>,
+ Requires<[HasSRAM]>;
+ }
+
+ // Stack pop operations.
+ let mayLoad = 1 in
+ {
+ def POPRd : FRd<0b1001,
+ 0b0001111,
+ (outs GPR8:$reg),
+ (ins),
+ "pop\t$reg",
+ []>,
+ Requires<[HasSRAM]>;
+
+ def POPWRd : Pseudo<(outs DREGS:$reg),
+ (ins),
+ "popw\t$reg",
+ []>,
+ Requires<[HasSRAM]>;
+ }
+}
+
+// Read-Write-Modify (RMW) instructions.
+def XCHZRd : FZRd<0b100,
+ (outs GPR8:$rd),
+ (ins ZREGS:$z),
+ "xch\t$z, $rd",
+ []>,
+ Requires<[SupportsRMW]>;
+
+def LASZRd : FZRd<0b101,
+ (outs GPR8:$rd),
+ (ins ZREGS:$z),
+ "las\t$z, $rd",
+ []>,
+ Requires<[SupportsRMW]>;
+
+def LACZRd : FZRd<0b110,
+ (outs GPR8:$rd),
+ (ins ZREGS:$z),
+ "lac\t$z, $rd",
+ []>,
+ Requires<[SupportsRMW]>;
+
+def LATZRd : FZRd<0b111,
+ (outs GPR8:$rd),
+ (ins ZREGS:$z),
+ "lat\t$z, $rd",
+ []>,
+ Requires<[SupportsRMW]>;
+
+//===----------------------------------------------------------------------===//
+// Bit and bit-test instructions
+//===----------------------------------------------------------------------===//
+
+// Bit shift/rotate operations.
+let Constraints = "$src = $rd",
+Defs = [SREG] in
+{
+ def LSLRd : FRdRr<0b0000,
+ 0b11,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "lsl\t$rd",
+ [(set i8:$rd, (AVRlsl i8:$src)), (implicit SREG)]>;
+
+ def LSLWRd : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src),
+ "lslw\t$rd",
+ [(set i16:$rd, (AVRlsl i16:$src)), (implicit SREG)]>;
+
+ def LSRRd : FRd<0b1001,
+ 0b0100110,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "lsr\t$rd",
+ [(set i8:$rd, (AVRlsr i8:$src)), (implicit SREG)]>;
+
+ def LSRWRd : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src),
+ "lsrw\t$rd",
+ [(set i16:$rd, (AVRlsr i16:$src)), (implicit SREG)]>;
+
+ def ASRRd : FRd<0b1001,
+ 0b0100101,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "asr\t$rd",
+ [(set i8:$rd, (AVRasr i8:$src)), (implicit SREG)]>;
+
+ def ASRWRd : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src),
+ "asrw\t$rd",
+ [(set i16:$rd, (AVRasr i16:$src)), (implicit SREG)]>;
+
+ // Bit rotate operations.
+ let Uses = [SREG] in
+ {
+ def ROLRd : FRdRr<0b0001,
+ 0b11,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "rol\t$rd",
+ [(set i8:$rd, (AVRrol i8:$src)), (implicit SREG)]>;
+
+ def ROLWRd : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src),
+ "rolw\t$rd",
+ [(set i16:$rd, (AVRrol i16:$src)), (implicit SREG)]>;
+
+ def RORRd : FRd<0b1001,
+ 0b0100111,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "ror\t$rd",
+ [(set i8:$rd, (AVRror i8:$src)), (implicit SREG)]>;
+
+ def RORWRd : Pseudo<(outs DREGS:$rd),
+ (ins DREGS:$src),
+ "rorw\t$rd",
+ [(set i16:$rd, (AVRror i16:$src)), (implicit SREG)]>;
+ }
+}
+
+// SWAP Rd
+// Swaps the high and low nibbles in a register.
+let Constraints = "$src = $rd" in
+def SWAPRd : FRd<0b1001,
+ 0b0100010,
+ (outs GPR8:$rd),
+ (ins GPR8:$src),
+ "swap\t$rd",
+ [(set i8:$rd, (bswap i8:$src))]>;
+
+// IO register bit set/clear operations.
+//:TODO: add patterns when popcount(imm)==2 to be expanded with 2 sbi/cbi
+// instead of in+ori+out which requires one more instr.
+def SBIAb : FIOBIT<0b10,
+ (outs),
+ (ins i16imm:$addr, i8imm:$bit),
+ "sbi\t$addr, $bit",
+ [(store (or (i8 (load lowioaddr8:$addr)), iobitpos8:$bit),
+ lowioaddr8:$addr)]>;
+
+def CBIAb : FIOBIT<0b00,
+ (outs),
+ (ins i16imm:$addr, i8imm:$bit),
+ "cbi\t$addr, $bit",
+ [(store (and (i8 (load lowioaddr8:$addr)), iobitposn8:$bit),
+ lowioaddr8:$addr)]>;
+
+// Status register bit load/store operations.
+let Defs = [SREG] in
+def BST : FRdB<0b01,
+ (outs),
+ (ins GPR8:$rd, i8imm:$b),
+ "bst\t$rd, $b",
+ []>;
+
+let Uses = [SREG] in
+def BLD : FRdB<0b00,
+ (outs),
+ (ins GPR8:$rd, i8imm:$b),
+ "bld\t$rd, $b",
+ []>;
+
+// Set/clear bit in register operations.
+let Constraints = "$src = $rd",
+Defs = [SREG] in
+{
+ // SBR Rd, K
+ // Alias for ORI Rd, K
+ def SBRRdK : FRdK<0b0110,
+ (outs LD8:$rd),
+ (ins LD8:$src, i8imm:$k),
+ "sbr\t$rd, $k",
+ [(set i8:$rd, (or i8:$src, imm:$k)),
+ (implicit SREG)]>;
+
+ // CBR Rd, K
+ // Alias for `ANDI Rd, COM(K)` where COM(K) is the compliment of K.
+ def CBRRdK : FRdK<0b0111,
+ (outs LD8:$rd),
+ (ins LD8:$src, i8imm_com:$k),
+ "cbr\t$rd, $k",
+ []>;
+}
+
+// CLR Rd
+// Alias for EOR Rd, Rd
+// -------------
+// Clears all bits in a register.
+def CLR : InstAlias<"clr\t$rd", (EORRdRr GPR8:$rd, GPR8:$rd)>;
+
+// SER Rd
+// Alias for LDI Rd, 0xff
+// ---------
+// Sets all bits in a register.
+def : InstAlias<"ser\t$rd", (LDIRdK LD8:$rd, 0xff), 0>;
+
+let Defs = [SREG] in
+def BSETs : FS<0,
+ (outs),
+ (ins i8imm:$s),
+ "bset\t$s",
+ []>;
+
+let Defs = [SREG] in
+def BCLRs : FS<1,
+ (outs),
+ (ins i8imm:$s),
+ "bclr\t$s",
+ []>;
+
+// Set/clear aliases for the carry (C) status flag (bit 0).
+def : InstAlias<"sec", (BSETs 0)>;
+def : InstAlias<"clc", (BCLRs 0)>;
+
+// Set/clear aliases for the zero (Z) status flag (bit 1).
+def : InstAlias<"sez", (BSETs 1)>;
+def : InstAlias<"clz", (BCLRs 1)>;
+
+// Set/clear aliases for the negative (N) status flag (bit 2).
+def : InstAlias<"sen", (BSETs 2)>;
+def : InstAlias<"cln", (BCLRs 2)>;
+
+// Set/clear aliases for the overflow (V) status flag (bit 3).
+def : InstAlias<"sev", (BSETs 3)>;
+def : InstAlias<"clv", (BCLRs 3)>;
+
+// Set/clear aliases for the signed (S) status flag (bit 4).
+def : InstAlias<"ses", (BSETs 4)>;
+def : InstAlias<"cls", (BCLRs 4)>;
+
+// Set/clear aliases for the half-carry (H) status flag (bit 5).
+def : InstAlias<"seh", (BSETs 5)>;
+def : InstAlias<"clh", (BCLRs 5)>;
+
+// Set/clear aliases for the T status flag (bit 6).
+def : InstAlias<"set", (BSETs 6)>;
+def : InstAlias<"clt", (BCLRs 6)>;
+
+// Set/clear aliases for the interrupt (I) status flag (bit 7).
+def : InstAlias<"sei", (BSETs 7)>;
+def : InstAlias<"cli", (BCLRs 7)>;
+
+//===----------------------------------------------------------------------===//
+// Special/Control instructions
+//===----------------------------------------------------------------------===//
+
+// BREAK
+// Breakpoint instruction
+// ---------
+// <|1001|0101|1001|1000>
+def BREAK : F16<0b1001010110011000,
+ (outs),
+ (ins),
+ "break",
+ []>,
+ Requires<[HasBREAK]>;
+
+// NOP
+// No-operation instruction
+// ---------
+// <|0000|0000|0000|0000>
+def NOP : F16<0b0000000000000000,
+ (outs),
+ (ins),
+ "nop",
+ []>;
+
+// SLEEP
+// Sleep instruction
+// ---------
+// <|1001|0101|1000|1000>
+def SLEEP : F16<0b1001010110001000,
+ (outs),
+ (ins),
+ "sleep",
+ []>;
+
+// WDR
+// Watchdog reset
+// ---------
+// <|1001|0101|1010|1000>
+def WDR : F16<0b1001010110101000,
+ (outs),
+ (ins),
+ "wdr",
+ []>;
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions for later expansion
+//===----------------------------------------------------------------------===//
+
+//:TODO: Optimize this for wider types AND optimize the following code
+// compile int foo(char a, char b, char c, char d) {return d+b;}
+// looks like a missed sext_inreg opportunity.
+def SEXT : ExtensionPseudo<
+ (outs DREGS:$dst),
+ (ins GPR8:$src),
+ "sext\t$dst, $src",
+ [(set i16:$dst, (sext i8:$src)), (implicit SREG)]
+>;
+
+def ZEXT : ExtensionPseudo<
+ (outs DREGS:$dst),
+ (ins GPR8:$src),
+ "zext\t$dst, $src",
+ [(set i16:$dst, (zext i8:$src)), (implicit SREG)]
+>;
+
+// This pseudo gets expanded into a movw+adiw thus it clobbers SREG.
+let Defs = [SREG],
+ hasSideEffects = 0 in
+def FRMIDX : Pseudo<(outs DLDREGS:$dst),
+ (ins DLDREGS:$src, i16imm:$src2),
+ "frmidx\t$dst, $src, $src2",
+ []>;
+
+// This pseudo is either converted to a regular store or a push which clobbers
+// SP.
+def STDSPQRr : StorePseudo<
+ (outs),
+ (ins memspi:$dst, GPR8:$src),
+ "stdstk\t$dst, $src",
+ [(store i8:$src, addr:$dst)]
+>;
+
+// This pseudo is either converted to a regular store or a push which clobbers
+// SP.
+def STDWSPQRr : StorePseudo<
+ (outs),
+ (ins memspi:$dst, DREGS:$src),
+ "stdwstk\t$dst, $src",
+ [(store i16:$src, addr:$dst)]
+>;
+
+// SP read/write pseudos.
+let hasSideEffects = 0 in
+{
+ let Uses = [SP] in
+ def SPREAD : Pseudo<
+ (outs DREGS:$dst),
+ (ins GPRSP:$src),
+ "spread\t$dst, $src",
+ []
+ >;
+
+ let Defs = [SP] in
+ def SPWRITE : Pseudo<
+ (outs GPRSP:$dst),
+ (ins DREGS:$src),
+ "spwrite\t$dst, $src",
+ []>;
+}
+
+def Select8 : SelectPseudo<
+ (outs GPR8:$dst),
+ (ins GPR8:$src, GPR8:$src2, i8imm:$cc),
+ "# Select8 PSEUDO",
+ [(set i8:$dst, (AVRselectcc i8:$src, i8:$src2, imm:$cc))]
+>;
+
+def Select16 : SelectPseudo<
+ (outs DREGS:$dst),
+ (ins DREGS:$src, DREGS:$src2, i8imm:$cc),
+ "# Select16 PSEUDO",
+ [(set i16:$dst, (AVRselectcc i16:$src, i16:$src2, imm:$cc))]
+>;
+
+def Lsl8 : ShiftPseudo<
+ (outs GPR8:$dst),
+ (ins GPR8:$src, GPR8:$cnt),
+ "# Lsl8 PSEUDO",
+ [(set i8:$dst, (AVRlslLoop i8:$src, i8:$cnt))]
+>;
+
+def Lsl16 : ShiftPseudo<
+ (outs DREGS:$dst),
+ (ins DREGS:$src, GPR8:$cnt),
+ "# Lsl16 PSEUDO",
+ [(set i16:$dst, (AVRlslLoop i16:$src, i8:$cnt))]
+>;
+
+def Lsr8 : ShiftPseudo<
+ (outs GPR8:$dst),
+ (ins GPR8:$src, GPR8:$cnt),
+ "# Lsr8 PSEUDO",
+ [(set i8:$dst, (AVRlsrLoop i8:$src, i8:$cnt))]
+>;
+
+
+def Lsr16 : ShiftPseudo<
+ (outs DREGS:$dst),
+ (ins DREGS:$src, GPR8:$cnt),
+ "# Lsr16 PSEUDO",
+ [(set i16:$dst, (AVRlsrLoop i16:$src, i8:$cnt))]
+>;
+
+def Asr8 : ShiftPseudo<
+ (outs GPR8:$dst),
+ (ins GPR8:$src, GPR8:$cnt),
+ "# Asr8 PSEUDO",
+ [(set i8:$dst, (AVRasrLoop i8:$src, i8:$cnt))]
+>;
+
+def Asr16 : ShiftPseudo<
+ (outs DREGS:$dst),
+ (ins DREGS:$src, GPR8:$cnt),
+ "# Asr16 PSEUDO",
+ [(set i16:$dst, (AVRasrLoop i16:$src, i8:$cnt))]
+>;
+
+
+//===----------------------------------------------------------------------===//
+// Non-Instruction Patterns
+//===----------------------------------------------------------------------===//
+
+//:TODO: look in x86InstrCompiler.td for odd encoding trick related to
+// add x, 128 -> sub x, -128. Clang is emitting an eor for this (ldi+eor)
+
+// the add instruction always writes the carry flag
+def : Pat<(addc i8:$src, i8:$src2),
+ (ADDRdRr i8:$src, i8:$src2)>;
+def : Pat<(addc DREGS:$src, DREGS:$src2),
+ (ADDWRdRr DREGS:$src, DREGS:$src2)>;
+
+// all sub instruction variants always writes the carry flag
+def : Pat<(subc i8:$src, i8:$src2),
+ (SUBRdRr i8:$src, i8:$src2)>;
+def : Pat<(subc i16:$src, i16:$src2),
+ (SUBWRdRr i16:$src, i16:$src2)>;
+def : Pat<(subc i8:$src, imm:$src2),
+ (SUBIRdK i8:$src, imm:$src2)>;
+def : Pat<(subc i16:$src, imm:$src2),
+ (SUBIWRdK i16:$src, imm:$src2)>;
+
+// These patterns convert add (x, -imm) to sub (x, imm) since we dont have
+// any add with imm instructions. Also take care of the adiw/sbiw instructions.
+def : Pat<(add i16:$src1, imm0_63_neg:$src2),
+ (SBIWRdK i16:$src1, (imm0_63_neg:$src2))>;
+def : Pat<(add i16:$src1, imm:$src2),
+ (SUBIWRdK i16:$src1, (imm16_neg_XFORM imm:$src2))>;
+def : Pat<(addc i16:$src1, imm:$src2),
+ (SUBIWRdK i16:$src1, (imm16_neg_XFORM imm:$src2))>;
+def : Pat<(adde i16:$src1, imm:$src2),
+ (SBCIWRdK i16:$src1, (imm16_neg_XFORM imm:$src2))>;
+
+def : Pat<(add i8:$src1, imm:$src2),
+ (SUBIRdK i8:$src1, (imm8_neg_XFORM imm:$src2))>;
+def : Pat<(addc i8:$src1, imm:$src2),
+ (SUBIRdK i8:$src1, (imm8_neg_XFORM imm:$src2))>;
+def : Pat<(adde i8:$src1, imm:$src2),
+ (SBCIRdK i8:$src1, (imm8_neg_XFORM imm:$src2))>;
+
+// Calls.
+def : Pat<(AVRcall (i16 tglobaladdr:$dst)),
+ (CALLk tglobaladdr:$dst)>;
+def : Pat<(AVRcall (i16 texternalsym:$dst)),
+ (CALLk texternalsym:$dst)>;
+
+// `anyext`
+def : Pat<(i16 (anyext i8:$src)),
+ (INSERT_SUBREG (i16 (IMPLICIT_DEF)), i8:$src, sub_lo)>;
+
+// `trunc`
+def : Pat<(i8 (trunc i16:$src)),
+ (EXTRACT_SUBREG i16:$src, sub_lo)>;
+
+// sext_inreg
+def : Pat<(sext_inreg i16:$src, i8),
+ (SEXT (i8 (EXTRACT_SUBREG i16:$src, sub_lo)))>;
+
+// GlobalAddress
+def : Pat<(i16 (AVRWrapper tglobaladdr:$dst)),
+ (LDIWRdK tglobaladdr:$dst)>;
+def : Pat<(add i16:$src, (AVRWrapper tglobaladdr:$src2)),
+ (SUBIWRdK i16:$src, tglobaladdr:$src2)>;
+def : Pat<(i8 (load (AVRWrapper tglobaladdr:$dst))),
+ (LDSRdK tglobaladdr:$dst)>;
+def : Pat<(i16 (load (AVRWrapper tglobaladdr:$dst))),
+ (LDSWRdK tglobaladdr:$dst)>;
+def : Pat<(store i8:$src, (i16 (AVRWrapper tglobaladdr:$dst))),
+ (STSKRr tglobaladdr:$dst, i8:$src)>;
+def : Pat<(store i16:$src, (i16 (AVRWrapper tglobaladdr:$dst))),
+ (STSWKRr tglobaladdr:$dst, i16:$src)>;
+
+// BlockAddress
+def : Pat<(i16 (AVRWrapper tblockaddress:$dst)),
+ (LDIWRdK tblockaddress:$dst)>;
+
+// hi-reg truncation : trunc(int16 >> 8)
+//:FIXME: i think it's better to emit an extract subreg node in the DAG than
+// all this mess once we get optimal shift code
+// lol... I think so, too. [@agnat]
+def : Pat<(i8 (trunc (AVRlsr (AVRlsr (AVRlsr (AVRlsr (AVRlsr (AVRlsr (AVRlsr
+ (AVRlsr DREGS:$src)))))))))),
+ (EXTRACT_SUBREG DREGS:$src, sub_hi)>;
+
+// :FIXME: DAGCombiner produces an shl node after legalization from these seq:
+// BR_JT -> (mul x, 2) -> (shl x, 1)
+def : Pat<(shl i16:$src1, (i8 1)),
+ (LSLWRd i16:$src1)>;
+
diff --git a/lib/Target/AVR/AVRMachineFunctionInfo.h b/lib/Target/AVR/AVRMachineFunctionInfo.h
index 6571d5d3e603..cf0c73576301 100644
--- a/lib/Target/AVR/AVRMachineFunctionInfo.h
+++ b/lib/Target/AVR/AVRMachineFunctionInfo.h
@@ -14,15 +14,11 @@
#ifndef LLVM_AVR_MACHINE_FUNCTION_INFO_H
#define LLVM_AVR_MACHINE_FUNCTION_INFO_H
-#include "AVRConfig.h"
-
#include "llvm/CodeGen/MachineFunction.h"
namespace llvm {
-/**
- * Contains AVR-specific information for each MachineFunction.
- */
+/// Contains AVR-specific information for each MachineFunction.
class AVRMachineFunctionInfo : public MachineFunctionInfo {
/// Indicates if a register has been spilled by the register
/// allocator.
diff --git a/lib/Target/AVR/AVRRegisterInfo.cpp b/lib/Target/AVR/AVRRegisterInfo.cpp
new file mode 100644
index 000000000000..5786f74d1c4f
--- /dev/null
+++ b/lib/Target/AVR/AVRRegisterInfo.cpp
@@ -0,0 +1,256 @@
+//===-- AVRRegisterInfo.cpp - AVR Register Information --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AVR implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AVRRegisterInfo.h"
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+#include "AVR.h"
+#include "AVRInstrInfo.h"
+#include "AVRTargetMachine.h"
+#include "MCTargetDesc/AVRMCTargetDesc.h"
+
+#define GET_REGINFO_TARGET_DESC
+#include "AVRGenRegisterInfo.inc"
+
+namespace llvm {
+
+AVRRegisterInfo::AVRRegisterInfo() : AVRGenRegisterInfo(0) {}
+
+const uint16_t *
+AVRRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ CallingConv::ID CC = MF->getFunction()->getCallingConv();
+
+ return ((CC == CallingConv::AVR_INTR || CC == CallingConv::AVR_SIGNAL)
+ ? CSR_Interrupts_SaveList
+ : CSR_Normal_SaveList);
+}
+
+const uint32_t *
+AVRRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID CC) const {
+ return ((CC == CallingConv::AVR_INTR || CC == CallingConv::AVR_SIGNAL)
+ ? CSR_Interrupts_RegMask
+ : CSR_Normal_RegMask);
+}
+
+BitVector AVRRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ BitVector Reserved(getNumRegs());
+ const AVRTargetMachine &TM = static_cast<const AVRTargetMachine&>(MF.getTarget());
+ const TargetFrameLowering *TFI = TM.getSubtargetImpl()->getFrameLowering();
+
+ // Reserve the intermediate result registers r1 and r2
+ // The result of instructions like 'mul' is always stored here.
+ Reserved.set(AVR::R0);
+ Reserved.set(AVR::R1);
+ Reserved.set(AVR::R1R0);
+
+ // Reserve the stack pointer.
+ Reserved.set(AVR::SPL);
+ Reserved.set(AVR::SPH);
+ Reserved.set(AVR::SP);
+
+ // Reserve the frame pointer registers r28 and r29 if the function requires one.
+ if (TFI->hasFP(MF)) {
+ Reserved.set(AVR::R28);
+ Reserved.set(AVR::R29);
+ Reserved.set(AVR::R29R28);
+ }
+
+ return Reserved;
+}
+
+const TargetRegisterClass *
+AVRRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
+ const MachineFunction &MF) const {
+ if (RC->hasType(MVT::i16)) {
+ return &AVR::DREGSRegClass;
+ }
+
+ if (RC->hasType(MVT::i8)) {
+ return &AVR::GPR8RegClass;
+ }
+
+ llvm_unreachable("Invalid register size");
+}
+
+/// Fold a frame offset shared between two add instructions into a single one.
+static void foldFrameOffset(MachineInstr &MI, int &Offset, unsigned DstReg) {
+ int Opcode = MI.getOpcode();
+
+ // Don't bother trying if the next instruction is not an add or a sub.
+ if ((Opcode != AVR::SUBIWRdK) && (Opcode != AVR::ADIWRdK)) {
+ return;
+ }
+
+ // Check that DstReg matches with next instruction, otherwise the instruction
+ // is not related to stack address manipulation.
+ if (DstReg != MI.getOperand(0).getReg()) {
+ return;
+ }
+
+ // Add the offset in the next instruction to our offset.
+ switch (Opcode) {
+ case AVR::SUBIWRdK:
+ Offset += -MI.getOperand(2).getImm();
+ break;
+ case AVR::ADIWRdK:
+ Offset += MI.getOperand(2).getImm();
+ break;
+ }
+
+ // Finally remove the instruction.
+ MI.eraseFromParent();
+}
+
+void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected SPAdj value");
+
+ MachineInstr &MI = *II;
+ DebugLoc dl = MI.getDebugLoc();
+ MachineBasicBlock &MBB = *MI.getParent();
+ const MachineFunction &MF = *MBB.getParent();
+ const AVRTargetMachine &TM = (const AVRTargetMachine &)MF.getTarget();
+ const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetFrameLowering *TFI = TM.getSubtargetImpl()->getFrameLowering();
+ int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
+ int Offset = MFI->getObjectOffset(FrameIndex);
+
+ // Add one to the offset because SP points to an empty slot.
+ Offset += MFI->getStackSize() - TFI->getOffsetOfLocalArea() + 1;
+ // Fold incoming offset.
+ Offset += MI.getOperand(FIOperandNum + 1).getImm();
+
+ // This is actually "load effective address" of the stack slot
+ // instruction. We have only two-address instructions, thus we need to
+ // expand it into move + add.
+ if (MI.getOpcode() == AVR::FRMIDX) {
+ MI.setDesc(TII.get(AVR::MOVWRdRr));
+ MI.getOperand(FIOperandNum).ChangeToRegister(AVR::R29R28, false);
+
+ assert(Offset > 0 && "Invalid offset");
+
+ // We need to materialize the offset via an add instruction.
+ unsigned Opcode;
+ unsigned DstReg = MI.getOperand(0).getReg();
+ assert(DstReg != AVR::R29R28 && "Dest reg cannot be the frame pointer");
+
+ // Generally, to load a frame address two add instructions are emitted that
+ // could get folded into a single one:
+ // movw r31:r30, r29:r28
+ // adiw r31:r30, 29
+ // adiw r31:r30, 16
+ // to:
+ // movw r31:r30, r29:r28
+ // adiw r31:r30, 45
+ foldFrameOffset(*std::next(II), Offset, DstReg);
+
+ // Select the best opcode based on DstReg and the offset size.
+ switch (DstReg) {
+ case AVR::R25R24:
+ case AVR::R27R26:
+ case AVR::R31R30: {
+ if (isUInt<6>(Offset)) {
+ Opcode = AVR::ADIWRdK;
+ break;
+ }
+ // Fallthrough
+ }
+ default: {
+ // This opcode will get expanded into a pair of subi/sbci.
+ Opcode = AVR::SUBIWRdK;
+ Offset = -Offset;
+ break;
+ }
+ }
+
+ MachineInstr *New = BuildMI(MBB, std::next(II), dl, TII.get(Opcode), DstReg)
+ .addReg(DstReg, RegState::Kill)
+ .addImm(Offset);
+ New->getOperand(3).setIsDead();
+
+ return;
+ }
+
+ // If the offset is too big we have to adjust and restore the frame pointer
+ // to materialize a valid load/store with displacement.
+ //:TODO: consider using only one adiw/sbiw chain for more than one frame index
+ if (Offset >= 63) {
+ unsigned AddOpc = AVR::ADIWRdK, SubOpc = AVR::SBIWRdK;
+ int AddOffset = Offset - 63 + 1;
+
+ // For huge offsets where adiw/sbiw cannot be used use a pair of subi/sbci.
+ if ((Offset - 63 + 1) > 63) {
+ AddOpc = AVR::SUBIWRdK;
+ SubOpc = AVR::SUBIWRdK;
+ AddOffset = -AddOffset;
+ }
+
+ // It is possible that the spiller places this frame instruction in between
+ // a compare and branch, invalidating the contents of SREG set by the
+ // compare instruction because of the add/sub pairs. Conservatively save and
+ // restore SREG before and after each add/sub pair.
+ BuildMI(MBB, II, dl, TII.get(AVR::INRdA), AVR::R0).addImm(0x3f);
+
+ MachineInstr *New = BuildMI(MBB, II, dl, TII.get(AddOpc), AVR::R29R28)
+ .addReg(AVR::R29R28, RegState::Kill)
+ .addImm(AddOffset);
+ New->getOperand(3).setIsDead();
+
+ // Restore SREG.
+ BuildMI(MBB, std::next(II), dl, TII.get(AVR::OUTARr))
+ .addImm(0x3f)
+ .addReg(AVR::R0, RegState::Kill);
+
+ // No need to set SREG as dead here otherwise if the next instruction is a
+ // cond branch it will be using a dead register.
+ New = BuildMI(MBB, std::next(II), dl, TII.get(SubOpc), AVR::R29R28)
+ .addReg(AVR::R29R28, RegState::Kill)
+ .addImm(Offset - 63 + 1);
+
+ Offset = 62;
+ }
+
+ MI.getOperand(FIOperandNum).ChangeToRegister(AVR::R29R28, false);
+ assert(isUInt<6>(Offset) && "Offset is out of range");
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+}
+
+unsigned AVRRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ if (TFI->hasFP(MF)) {
+ // The Y pointer register
+ return AVR::R28;
+ }
+
+ return AVR::SP;
+}
+
+const TargetRegisterClass *
+AVRRegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
+ // FIXME: Currently we're using avr-gcc as reference, so we restrict
+ // ptrs to Y and Z regs. Though avr-gcc has buggy implementation
+ // of memory constraint, so we can fix it and bit avr-gcc here ;-)
+ return &AVR::PTRDISPREGSRegClass;
+}
+
+} // end of namespace llvm
diff --git a/lib/Target/AVR/AVRRegisterInfo.h b/lib/Target/AVR/AVRRegisterInfo.h
new file mode 100644
index 000000000000..59c0849d209b
--- /dev/null
+++ b/lib/Target/AVR/AVRRegisterInfo.h
@@ -0,0 +1,56 @@
+//===-- AVRRegisterInfo.h - AVR Register Information Impl -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AVR implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_REGISTER_INFO_H
+#define LLVM_AVR_REGISTER_INFO_H
+
+#include "llvm/Target/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "AVRGenRegisterInfo.inc"
+
+namespace llvm {
+
+/// Utilities relating to AVR registers.
+class AVRRegisterInfo : public AVRGenRegisterInfo {
+public:
+ AVRRegisterInfo();
+
+public:
+ const uint16_t *
+ getCalleeSavedRegs(const MachineFunction *MF = 0) const override;
+ const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID CC) const override;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+
+ const TargetRegisterClass *
+ getLargestLegalSuperClass(const TargetRegisterClass *RC,
+ const MachineFunction &MF) const override;
+
+ /// Stack Frame Processing Methods
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = NULL) const override;
+
+ /// Debug information queries.
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
+
+ /// Returns a TargetRegisterClass used for pointer values.
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_AVR_REGISTER_INFO_H
diff --git a/lib/Target/AVR/AVRSelectionDAGInfo.h b/lib/Target/AVR/AVRSelectionDAGInfo.h
index ee832ad1bc75..6474c8779330 100644
--- a/lib/Target/AVR/AVRSelectionDAGInfo.h
+++ b/lib/Target/AVR/AVRSelectionDAGInfo.h
@@ -7,20 +7,19 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the AVR subclass for TargetSelectionDAGInfo.
+// This file defines the AVR subclass for SelectionDAGTargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_AVR_SELECTION_DAG_INFO_H
#define LLVM_AVR_SELECTION_DAG_INFO_H
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
namespace llvm {
-/**
- * Holds information about the AVR instruction selection DAG.
- */
-class AVRSelectionDAGInfo : public TargetSelectionDAGInfo {
+
+/// Holds information about the AVR instruction selection DAG.
+class AVRSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
};
diff --git a/lib/Target/AVR/AVRSubtarget.cpp b/lib/Target/AVR/AVRSubtarget.cpp
new file mode 100644
index 000000000000..c228d051d771
--- /dev/null
+++ b/lib/Target/AVR/AVRSubtarget.cpp
@@ -0,0 +1,47 @@
+//===-- AVRSubtarget.cpp - AVR Subtarget Information ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AVR specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AVRSubtarget.h"
+
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#include "AVR.h"
+#include "AVRTargetMachine.h"
+#include "MCTargetDesc/AVRMCTargetDesc.h"
+
+#define DEBUG_TYPE "avr-subtarget"
+
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "AVRGenSubtargetInfo.inc"
+
+namespace llvm {
+
+AVRSubtarget::AVRSubtarget(const Triple &TT, const std::string &CPU,
+ const std::string &FS, AVRTargetMachine &TM)
+ : AVRGenSubtargetInfo(TT, CPU, FS), InstrInfo(), FrameLowering(),
+ TLInfo(TM), TSInfo(),
+
+ // Subtarget features
+ m_hasSRAM(false), m_hasJMPCALL(false), m_hasIJMPCALL(false),
+ m_hasEIJMPCALL(false), m_hasADDSUBIW(false), m_hasSmallStack(false),
+ m_hasMOVW(false), m_hasLPM(false), m_hasLPMX(false), m_hasELPM(false),
+ m_hasELPMX(false), m_hasSPM(false), m_hasSPMX(false), m_hasDES(false),
+ m_supportsRMW(false), m_supportsMultiplication(false), m_hasBREAK(false),
+ m_hasTinyEncoding(false), ELFArch(false), m_FeatureSetDummy(false) {
+ // Parse features string.
+ ParseSubtargetFeatures(CPU, FS);
+}
+
+} // end of namespace llvm
diff --git a/lib/Target/AVR/AVRSubtarget.h b/lib/Target/AVR/AVRSubtarget.h
new file mode 100644
index 000000000000..a37849c3f3f7
--- /dev/null
+++ b/lib/Target/AVR/AVRSubtarget.h
@@ -0,0 +1,119 @@
+//===-- AVRSubtarget.h - Define Subtarget for the AVR -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the AVR specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_SUBTARGET_H
+#define LLVM_AVR_SUBTARGET_H
+
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include "AVRFrameLowering.h"
+#include "AVRISelLowering.h"
+#include "AVRInstrInfo.h"
+#include "AVRSelectionDAGInfo.h"
+
+#define GET_SUBTARGETINFO_HEADER
+#include "AVRGenSubtargetInfo.inc"
+
+namespace llvm {
+
+/// A specific AVR target MCU.
+class AVRSubtarget : public AVRGenSubtargetInfo {
+public:
+ //! Creates an AVR subtarget.
+ //! \param TT The target triple.
+ //! \param CPU The CPU to target.
+ //! \param FS The feature string.
+ //! \param TM The target machine.
+ AVRSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
+ AVRTargetMachine &TM);
+
+ const AVRInstrInfo *getInstrInfo() const override { return &InstrInfo; }
+ const TargetFrameLowering *getFrameLowering() const override { return &FrameLowering; }
+ const AVRTargetLowering *getTargetLowering() const override { return &TLInfo; }
+ const AVRSelectionDAGInfo *getSelectionDAGInfo() const override { return &TSInfo; }
+ const AVRRegisterInfo *getRegisterInfo() const override { return &InstrInfo.getRegisterInfo(); }
+
+ /// Parses a subtarget feature string, setting appropriate options.
+ /// \note Definition of function is auto generated by `tblgen`.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ // Subtarget feature getters.
+ // See AVR.td for details.
+ bool hasSRAM() const { return m_hasSRAM; }
+ bool hasJMPCALL() const { return m_hasJMPCALL; }
+ bool hasIJMPCALL() const { return m_hasIJMPCALL; }
+ bool hasEIJMPCALL() const { return m_hasEIJMPCALL; }
+ bool hasADDSUBIW() const { return m_hasADDSUBIW; }
+ bool hasSmallStack() const { return m_hasSmallStack; }
+ bool hasMOVW() const { return m_hasMOVW; }
+ bool hasLPM() const { return m_hasLPM; }
+ bool hasLPMX() const { return m_hasLPMX; }
+ bool hasELPM() const { return m_hasELPM; }
+ bool hasELPMX() const { return m_hasELPMX; }
+ bool hasSPM() const { return m_hasSPM; }
+ bool hasSPMX() const { return m_hasSPMX; }
+ bool hasDES() const { return m_hasDES; }
+ bool supportsRMW() const { return m_supportsRMW; }
+ bool supportsMultiplication() const { return m_supportsMultiplication; }
+ bool hasBREAK() const { return m_hasBREAK; }
+ bool hasTinyEncoding() const { return m_hasTinyEncoding; }
+
+ /// Gets the ELF architecture for the e_flags field
+ /// of an ELF object file.
+ unsigned getELFArch() const {
+ assert(ELFArch != 0 &&
+ "every device must have an associate ELF architecture");
+ return ELFArch;
+ }
+
+private:
+ AVRInstrInfo InstrInfo;
+ AVRFrameLowering FrameLowering;
+ AVRTargetLowering TLInfo;
+ AVRSelectionDAGInfo TSInfo;
+
+ // Subtarget feature settings
+ // See AVR.td for details.
+ bool m_hasSRAM;
+ bool m_hasJMPCALL;
+ bool m_hasIJMPCALL;
+ bool m_hasEIJMPCALL;
+ bool m_hasADDSUBIW;
+ bool m_hasSmallStack;
+ bool m_hasMOVW;
+ bool m_hasLPM;
+ bool m_hasLPMX;
+ bool m_hasELPM;
+ bool m_hasELPMX;
+ bool m_hasSPM;
+ bool m_hasSPMX;
+ bool m_hasDES;
+ bool m_supportsRMW;
+ bool m_supportsMultiplication;
+ bool m_hasBREAK;
+ bool m_hasTinyEncoding;
+
+ /// The ELF e_flags architecture.
+ unsigned ELFArch;
+
+ // Dummy member, used by FeatureSet's. We cannot have a SubtargetFeature with
+ // no variable, so we instead bind pseudo features to this variable.
+ bool m_FeatureSetDummy;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_AVR_SUBTARGET_H
diff --git a/lib/Target/AVR/AVRTargetMachine.cpp b/lib/Target/AVR/AVRTargetMachine.cpp
index a91dce8a63f4..508723e91c60 100644
--- a/lib/Target/AVR/AVRTargetMachine.cpp
+++ b/lib/Target/AVR/AVRTargetMachine.cpp
@@ -1,4 +1,105 @@
+//===-- AVRTargetMachine.cpp - Define TargetMachine for AVR ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AVR specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AVRTargetMachine.h"
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#include "AVRTargetObjectFile.h"
+#include "AVR.h"
+#include "MCTargetDesc/AVRMCTargetDesc.h"
+
+namespace llvm {
+
+/// Processes a CPU name.
+static StringRef getCPU(StringRef CPU) {
+ if (CPU.empty() || CPU == "generic") {
+ return "avr2";
+ }
+
+ return CPU;
+}
+
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ return RM.hasValue() ? *RM : Reloc::Static;
+}
+
+AVRTargetMachine::AVRTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(
+ T, "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8-i64:8:8-f32:8:8-f64:8:8-n8", TT,
+ getCPU(CPU), FS, Options, getEffectiveRelocModel(RM), CM, OL),
+ SubTarget(TT, getCPU(CPU), FS, *this) {
+ this->TLOF = make_unique<AVRTargetObjectFile>();
+ initAsmInfo();
+}
+
+namespace {
+/// AVR Code Generator Pass Configuration Options.
+class AVRPassConfig : public TargetPassConfig {
+public:
+ AVRPassConfig(AVRTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ AVRTargetMachine &getAVRTargetMachine() const {
+ return getTM<AVRTargetMachine>();
+ }
+
+ bool addInstSelector() override;
+ void addPreSched2() override;
+ void addPreRegAlloc() override;
+ void addPreEmitPass() override;
+};
+} // namespace
+
+TargetPassConfig *AVRTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new AVRPassConfig(this, PM);
+}
extern "C" void LLVMInitializeAVRTarget() {
+ // Register the target.
+ RegisterTargetMachine<AVRTargetMachine> X(TheAVRTarget);
+}
+
+const AVRSubtarget *AVRTargetMachine::getSubtargetImpl() const {
+ return &SubTarget;
+}
+
+const AVRSubtarget *AVRTargetMachine::getSubtargetImpl(const Function &) const {
+ return &SubTarget;
+}
+//===----------------------------------------------------------------------===//
+// Pass Pipeline Configuration
+//===----------------------------------------------------------------------===//
+
+bool AVRPassConfig::addInstSelector() {
+ return false;
+}
+
+void AVRPassConfig::addPreRegAlloc() {
}
+
+void AVRPassConfig::addPreSched2() { }
+
+void AVRPassConfig::addPreEmitPass() {
+}
+
+} // end of namespace llvm
diff --git a/lib/Target/AVR/AVRTargetMachine.h b/lib/Target/AVR/AVRTargetMachine.h
new file mode 100644
index 000000000000..10345193d14a
--- /dev/null
+++ b/lib/Target/AVR/AVRTargetMachine.h
@@ -0,0 +1,51 @@
+//===-- AVRTargetMachine.h - Define TargetMachine for AVR -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the AVR specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_TARGET_MACHINE_H
+#define LLVM_AVR_TARGET_MACHINE_H
+
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include "AVRFrameLowering.h"
+#include "AVRISelLowering.h"
+#include "AVRInstrInfo.h"
+#include "AVRSelectionDAGInfo.h"
+#include "AVRSubtarget.h"
+
+namespace llvm {
+
+/// A generic AVR implementation.
+class AVRTargetMachine : public LLVMTargetMachine {
+public:
+ AVRTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options, Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
+
+ const AVRSubtarget *getSubtargetImpl() const;
+ const AVRSubtarget *getSubtargetImpl(const Function &) const override;
+
+ TargetLoweringObjectFile *getObjFileLowering() const override {
+ return this->TLOF.get();
+ }
+
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+private:
+ std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ AVRSubtarget SubTarget;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_AVR_TARGET_MACHINE_H
diff --git a/lib/Target/AVR/AVRTargetObjectFile.h b/lib/Target/AVR/AVRTargetObjectFile.h
index bdda35b34993..587612584314 100644
--- a/lib/Target/AVR/AVRTargetObjectFile.h
+++ b/lib/Target/AVR/AVRTargetObjectFile.h
@@ -13,9 +13,8 @@
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
namespace llvm {
-/**
- * Lowering for an AVR ELF32 object file.
- */
+
+/// Lowering for an AVR ELF32 object file.
class AVRTargetObjectFile : public TargetLoweringObjectFileELF {
typedef TargetLoweringObjectFileELF Base;
diff --git a/lib/Target/AVR/CMakeLists.txt b/lib/Target/AVR/CMakeLists.txt
index b4fb0d9ceac2..4b8354d64879 100644
--- a/lib/Target/AVR/CMakeLists.txt
+++ b/lib/Target/AVR/CMakeLists.txt
@@ -1,15 +1,21 @@
set(LLVM_TARGET_DEFINITIONS AVR.td)
tablegen(LLVM AVRGenRegisterInfo.inc -gen-register-info)
+tablegen(LLVM AVRGenInstrInfo.inc -gen-instr-info)
tablegen(LLVM AVRGenCallingConv.inc -gen-callingconv)
+tablegen(LLVM AVRGenSubtargetInfo.inc -gen-subtarget)
add_public_tablegen_target(AVRCommonTableGen)
add_llvm_target(AVRCodeGen
- AVRTargetMachine.cpp
- AVRTargetObjectFile.cpp
- )
+ AVRInstrInfo.cpp
+ AVRRegisterInfo.cpp
+ AVRSubtarget.cpp
+ AVRTargetMachine.cpp
+ AVRTargetObjectFile.cpp
+)
add_dependencies(LLVMAVRCodeGen intrinsics_gen)
+add_subdirectory(MCTargetDesc)
add_subdirectory(TargetInfo)
diff --git a/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp b/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp
new file mode 100644
index 000000000000..481de320b22f
--- /dev/null
+++ b/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.cpp
@@ -0,0 +1,66 @@
+#include "AVRELFStreamer.h"
+
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/FormattedStream.h"
+
+#include "AVRMCTargetDesc.h"
+
+namespace llvm {
+
+static unsigned getEFlagsForFeatureSet(const FeatureBitset &Features) {
+ unsigned EFlags = 0;
+
+ // Set architecture
+ if (Features[AVR::ELFArchAVR1])
+ EFlags |= ELF::EF_AVR_ARCH_AVR1;
+ else if (Features[AVR::ELFArchAVR2])
+ EFlags |= ELF::EF_AVR_ARCH_AVR2;
+ else if (Features[AVR::ELFArchAVR25])
+ EFlags |= ELF::EF_AVR_ARCH_AVR25;
+ else if (Features[AVR::ELFArchAVR3])
+ EFlags |= ELF::EF_AVR_ARCH_AVR3;
+ else if (Features[AVR::ELFArchAVR31])
+ EFlags |= ELF::EF_AVR_ARCH_AVR31;
+ else if (Features[AVR::ELFArchAVR35])
+ EFlags |= ELF::EF_AVR_ARCH_AVR35;
+ else if (Features[AVR::ELFArchAVR4])
+ EFlags |= ELF::EF_AVR_ARCH_AVR4;
+ else if (Features[AVR::ELFArchAVR5])
+ EFlags |= ELF::EF_AVR_ARCH_AVR5;
+ else if (Features[AVR::ELFArchAVR51])
+ EFlags |= ELF::EF_AVR_ARCH_AVR51;
+ else if (Features[AVR::ELFArchAVR6])
+ EFlags |= ELF::EF_AVR_ARCH_AVR6;
+ else if (Features[AVR::ELFArchAVRTiny])
+ EFlags |= ELF::EF_AVR_ARCH_AVRTINY;
+ else if (Features[AVR::ELFArchXMEGA1])
+ EFlags |= ELF::EF_AVR_ARCH_XMEGA1;
+ else if (Features[AVR::ELFArchXMEGA2])
+ EFlags |= ELF::EF_AVR_ARCH_XMEGA2;
+ else if (Features[AVR::ELFArchXMEGA3])
+ EFlags |= ELF::EF_AVR_ARCH_XMEGA3;
+ else if (Features[AVR::ELFArchXMEGA4])
+ EFlags |= ELF::EF_AVR_ARCH_XMEGA4;
+ else if (Features[AVR::ELFArchXMEGA5])
+ EFlags |= ELF::EF_AVR_ARCH_XMEGA5;
+ else if (Features[AVR::ELFArchXMEGA6])
+ EFlags |= ELF::EF_AVR_ARCH_XMEGA6;
+ else if (Features[AVR::ELFArchXMEGA7])
+ EFlags |= ELF::EF_AVR_ARCH_XMEGA7;
+
+ return EFlags;
+}
+
+AVRELFStreamer::AVRELFStreamer(MCStreamer &S,
+ const MCSubtargetInfo &STI)
+ : AVRTargetStreamer(S) {
+
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned EFlags = MCA.getELFHeaderEFlags();
+
+ EFlags |= getEFlagsForFeatureSet(STI.getFeatureBits());
+
+ MCA.setELFHeaderEFlags(EFlags);
+}
+
+} // end namespace llvm
diff --git a/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.h b/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.h
new file mode 100644
index 000000000000..e5df6cc34e40
--- /dev/null
+++ b/lib/Target/AVR/MCTargetDesc/AVRELFStreamer.h
@@ -0,0 +1,29 @@
+//===----- AVRELFStreamer.h - AVR Target Streamer --------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_ELF_STREAMER_H
+#define LLVM_AVR_ELF_STREAMER_H
+
+#include "AVRTargetStreamer.h"
+
+namespace llvm {
+
+/// A target streamer for an AVR ELF object file.
+class AVRELFStreamer : public AVRTargetStreamer {
+public:
+ AVRELFStreamer(MCStreamer &S, const MCSubtargetInfo &STI);
+
+ MCELFStreamer &getStreamer() {
+ return static_cast<MCELFStreamer &>(Streamer);
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp b/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp
new file mode 100644
index 000000000000..cca3bcc4968a
--- /dev/null
+++ b/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.cpp
@@ -0,0 +1,28 @@
+//===-- AVRMCAsmInfo.cpp - AVR asm properties -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the AVRMCAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AVRMCAsmInfo.h"
+
+#include "llvm/ADT/Triple.h"
+
+namespace llvm {
+
+AVRMCAsmInfo::AVRMCAsmInfo(const Triple &TT) {
+ PointerSize = 2;
+ CalleeSaveStackSlotSize = 2;
+ CommentString = ";";
+ PrivateGlobalPrefix = ".L";
+ UsesELFSectionDirectiveForBSS = true;
+}
+
+} // end of namespace llvm
diff --git a/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h b/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h
new file mode 100644
index 000000000000..cc2207a3cfae
--- /dev/null
+++ b/lib/Target/AVR/MCTargetDesc/AVRMCAsmInfo.h
@@ -0,0 +1,31 @@
+//===-- AVRMCAsmInfo.h - AVR asm properties ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the AVRMCAsmInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_ASM_INFO_H
+#define LLVM_AVR_ASM_INFO_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+
+class Triple;
+
+/// Specifies the format of AVR assembly files.
+class AVRMCAsmInfo : public MCAsmInfo {
+public:
+ explicit AVRMCAsmInfo(const Triple &TT);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_AVR_ASM_INFO_H
diff --git a/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.h b/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.h
new file mode 100644
index 000000000000..b72793d0fab4
--- /dev/null
+++ b/lib/Target/AVR/MCTargetDesc/AVRMCTargetDesc.h
@@ -0,0 +1,57 @@
+//===-- AVRMCTargetDesc.h - AVR Target Descriptions -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides AVR specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_MCTARGET_DESC_H
+#define LLVM_AVR_MCTARGET_DESC_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCInstrInfo;
+class MCObjectWriter;
+class MCRegisterInfo;
+class StringRef;
+class Target;
+class Triple;
+class raw_pwrite_stream;
+
+extern Target TheAVRTarget;
+
+/// Creates a machine code emitter for AVR.
+MCCodeEmitter *createAVRMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx);
+
+/// Creates an assembly backend for AVR.
+MCAsmBackend *createAVRAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
+
+/// Creates an ELF object writer for AVR.
+MCObjectWriter *createAVRELFObjectWriter(raw_pwrite_stream &OS, uint8_t OSABI);
+
+} // end namespace llvm
+
+#define GET_REGINFO_ENUM
+#include "AVRGenRegisterInfo.inc"
+
+#define GET_INSTRINFO_ENUM
+#include "AVRGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "AVRGenSubtargetInfo.inc"
+
+#endif // LLVM_AVR_MCTARGET_DESC_H
diff --git a/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.cpp b/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.cpp
new file mode 100644
index 000000000000..a2d8c16eeb8c
--- /dev/null
+++ b/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.cpp
@@ -0,0 +1,24 @@
+//===-- AVRTargetStreamer.cpp - AVR Target Streamer Methods ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides AVR specific target streamer methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AVRTargetStreamer.h"
+
+namespace llvm {
+
+AVRTargetStreamer::AVRTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
+
+AVRTargetAsmStreamer::AVRTargetAsmStreamer(MCStreamer &S)
+ : AVRTargetStreamer(S) {}
+
+} // end namespace llvm
+
diff --git a/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.h b/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.h
new file mode 100644
index 000000000000..99a536699ae9
--- /dev/null
+++ b/lib/Target/AVR/MCTargetDesc/AVRTargetStreamer.h
@@ -0,0 +1,32 @@
+//===-- AVRTargetStreamer.h - AVR Target Streamer --------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_AVR_TARGET_STREAMER_H
+#define LLVM_AVR_TARGET_STREAMER_H
+
+#include "llvm/MC/MCELFStreamer.h"
+
+namespace llvm {
+class MCStreamer;
+
+/// A generic AVR target output stream.
+class AVRTargetStreamer : public MCTargetStreamer {
+public:
+ explicit AVRTargetStreamer(MCStreamer &S);
+};
+
+/// A target streamer for textual AVR assembly code.
+class AVRTargetAsmStreamer : public AVRTargetStreamer {
+public:
+ explicit AVRTargetAsmStreamer(MCStreamer &S);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_AVR_TARGET_STREAMER_H
diff --git a/lib/Target/AVR/MCTargetDesc/CMakeLists.txt b/lib/Target/AVR/MCTargetDesc/CMakeLists.txt
new file mode 100644
index 000000000000..3cceb49acb30
--- /dev/null
+++ b/lib/Target/AVR/MCTargetDesc/CMakeLists.txt
@@ -0,0 +1,8 @@
+add_llvm_library(LLVMAVRDesc
+ AVRELFStreamer.cpp
+ AVRMCAsmInfo.cpp
+ AVRTargetStreamer.cpp
+)
+
+add_dependencies(LLVMAVRDesc AVRCommonTableGen)
+
diff --git a/lib/Target/CppBackend/LLVMBuild.txt b/lib/Target/AVR/MCTargetDesc/LLVMBuild.txt
index 122b5e7502fc..8786f563cdc8 100644
--- a/lib/Target/CppBackend/LLVMBuild.txt
+++ b/lib/Target/AVR/MCTargetDesc/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/CppBackend/LLVMBuild.txt --------------------*- Conf -*--===;
+;===- ./lib/Target/AVR/MCTargetDesc/LLVMBuild.txt --------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
@@ -15,17 +15,9 @@
;
;===------------------------------------------------------------------------===;
-[common]
-subdirectories = TargetInfo
-
[component_0]
-type = TargetGroup
-name = CppBackend
-parent = Target
-
-[component_1]
type = Library
-name = CppBackendCodeGen
-parent = CppBackend
-required_libraries = Core CppBackendInfo Support Target
-add_to_library_groups = CppBackend
+name = AVRDesc
+parent = AVR
+required_libraries = MC AVRInfo Support
+add_to_library_groups = AVR
diff --git a/lib/Target/AVR/Makefile b/lib/Target/AVR/Makefile
deleted file mode 100644
index c91b6f5c0ae9..000000000000
--- a/lib/Target/AVR/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-##===- lib/Target/AVR/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMAVRCodeGen
-TARGET = AVR
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = AVRGenRegisterInfo.inc
-
-DIRS = TargetInfo
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AVR/TODO.md b/lib/Target/AVR/TODO.md
new file mode 100644
index 000000000000..3a333355646d
--- /dev/null
+++ b/lib/Target/AVR/TODO.md
@@ -0,0 +1,7 @@
+# Write an XFAIL test for this `FIXME` in `AVRInstrInfo.td`
+
+```
+// :FIXME: DAGCombiner produces an shl node after legalization from these seq:
+// BR_JT -> (mul x, 2) -> (shl x, 1)
+```
+
diff --git a/lib/Target/AVR/TargetInfo/CMakeLists.txt b/lib/Target/AVR/TargetInfo/CMakeLists.txt
index f27090037702..557c55ae2f72 100644
--- a/lib/Target/AVR/TargetInfo/CMakeLists.txt
+++ b/lib/Target/AVR/TargetInfo/CMakeLists.txt
@@ -5,3 +5,5 @@ add_llvm_library(LLVMAVRInfo
AVRTargetInfo.cpp
)
+add_dependencies(LLVMAVRInfo AVRCommonTableGen)
+
diff --git a/lib/Target/AVR/TargetInfo/Makefile b/lib/Target/AVR/TargetInfo/Makefile
deleted file mode 100644
index 92b483dd028b..000000000000
--- a/lib/Target/AVR/TargetInfo/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/AVR/TargetInfo/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMAVRInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/BPF/BPFAsmPrinter.cpp b/lib/Target/BPF/BPFAsmPrinter.cpp
index 10ec6587550b..1078b0652189 100644
--- a/lib/Target/BPF/BPFAsmPrinter.cpp
+++ b/lib/Target/BPF/BPFAsmPrinter.cpp
@@ -40,38 +40,10 @@ public:
const char *getPassName() const override { return "BPF Assembly Printer"; }
- void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O,
- const char *Modifier = nullptr);
void EmitInstruction(const MachineInstr *MI) override;
};
}
-void BPFAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O, const char *Modifier) {
- const MachineOperand &MO = MI->getOperand(OpNum);
-
- switch (MO.getType()) {
- case MachineOperand::MO_Register:
- O << BPFInstPrinter::getRegisterName(MO.getReg());
- break;
-
- case MachineOperand::MO_Immediate:
- O << MO.getImm();
- break;
-
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- break;
-
- case MachineOperand::MO_GlobalAddress:
- O << *getSymbol(MO.getGlobal());
- break;
-
- default:
- llvm_unreachable("<unknown operand type>");
- }
-}
-
void BPFAsmPrinter::EmitInstruction(const MachineInstr *MI) {
BPFMCInstLower MCInstLowering(OutContext, *this);
diff --git a/lib/Target/BPF/BPFFrameLowering.h b/lib/Target/BPF/BPFFrameLowering.h
index 251cda965ff5..5db963f518b1 100644
--- a/lib/Target/BPF/BPFFrameLowering.h
+++ b/lib/Target/BPF/BPFFrameLowering.h
@@ -31,10 +31,10 @@ public:
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
RegScavenger *RS) const override;
- void
+ MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override {
- MBB.erase(MI);
+ return MBB.erase(MI);
}
};
}
diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp
index 9d5f1d406d0e..ac2af036b6f8 100644
--- a/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -46,7 +46,7 @@ private:
// Include the pieces autogenerated from the target description.
#include "BPFGenDAGISel.inc"
- SDNode *Select(SDNode *N) override;
+ void Select(SDNode *N) override;
// Complex Pattern for address selection.
bool SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset);
@@ -115,7 +115,7 @@ bool BPFDAGToDAGISel::SelectFIAddr(SDValue Addr, SDValue &Base, SDValue &Offset)
return false;
}
-SDNode *BPFDAGToDAGISel::Select(SDNode *Node) {
+void BPFDAGToDAGISel::Select(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
// Dump information about the Node being selected
@@ -124,12 +124,24 @@ SDNode *BPFDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
- return NULL;
+ return;
}
// tablegen selection should be handled here.
switch (Opcode) {
default: break;
+ case ISD::SDIV: {
+ DebugLoc Empty;
+ const DebugLoc &DL = Node->getDebugLoc();
+ if (DL != Empty)
+ errs() << "Error at line " << DL.getLine() << ": ";
+ else
+ errs() << "Error: ";
+ errs() << "Unsupport signed division for DAG: ";
+ Node->dump(CurDAG);
+ errs() << "Please convert to unsigned div/mod.\n";
+ break;
+ }
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
switch (IntNo) {
@@ -156,22 +168,17 @@ SDNode *BPFDAGToDAGISel::Select(SDNode *Node) {
EVT VT = Node->getValueType(0);
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
unsigned Opc = BPF::MOV_rr;
- if (Node->hasOneUse())
- return CurDAG->SelectNodeTo(Node, Opc, VT, TFI);
- return CurDAG->getMachineNode(Opc, SDLoc(Node), VT, TFI);
+ if (Node->hasOneUse()) {
+ CurDAG->SelectNodeTo(Node, Opc, VT, TFI);
+ return;
+ }
+ ReplaceNode(Node, CurDAG->getMachineNode(Opc, SDLoc(Node), VT, TFI));
+ return;
}
}
// Select the default instruction
- SDNode *ResNode = SelectCode(Node);
-
- DEBUG(dbgs() << "=> ";
- if (ResNode == nullptr || ResNode == Node)
- Node->dump(CurDAG);
- else
- ResNode->dump(CurDAG);
- dbgs() << '\n');
- return ResNode;
+ SelectCode(Node);
}
FunctionPass *llvm::createBPFISelDag(BPFTargetMachine &TM) {
diff --git a/lib/Target/BPF/BPFISelLowering.cpp b/lib/Target/BPF/BPFISelLowering.cpp
index 6a5b37e153d8..cca3492a1992 100644
--- a/lib/Target/BPF/BPFISelLowering.cpp
+++ b/lib/Target/BPF/BPFISelLowering.cpp
@@ -14,8 +14,8 @@
#include "BPFISelLowering.h"
#include "BPF.h"
-#include "BPFTargetMachine.h"
#include "BPFSubtarget.h"
+#include "BPFTargetMachine.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -24,68 +24,31 @@
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/IR/DiagnosticInfo.h"
-#include "llvm/IR/DiagnosticPrinter.h"
using namespace llvm;
#define DEBUG_TYPE "bpf-lower"
-namespace {
-
-// Diagnostic information for unimplemented or unsupported feature reporting.
-class DiagnosticInfoUnsupported : public DiagnosticInfo {
-private:
- // Debug location where this diagnostic is triggered.
- DebugLoc DLoc;
- const Twine &Description;
- const Function &Fn;
- SDValue Value;
-
- static int KindID;
-
- static int getKindID() {
- if (KindID == 0)
- KindID = llvm::getNextAvailablePluginDiagnosticKind();
- return KindID;
- }
-
-public:
- DiagnosticInfoUnsupported(SDLoc DLoc, const Function &Fn, const Twine &Desc,
- SDValue Value)
- : DiagnosticInfo(getKindID(), DS_Error), DLoc(DLoc.getDebugLoc()),
- Description(Desc), Fn(Fn), Value(Value) {}
-
- void print(DiagnosticPrinter &DP) const override {
- std::string Str;
- raw_string_ostream OS(Str);
-
- if (DLoc) {
- auto DIL = DLoc.get();
- StringRef Filename = DIL->getFilename();
- unsigned Line = DIL->getLine();
- unsigned Column = DIL->getColumn();
- OS << Filename << ':' << Line << ':' << Column << ' ';
- }
-
- OS << "in function " << Fn.getName() << ' ' << *Fn.getFunctionType() << '\n'
- << Description;
- if (Value)
- Value->print(OS);
- OS << '\n';
- OS.flush();
- DP << Str;
- }
-
- static bool classof(const DiagnosticInfo *DI) {
- return DI->getKind() == getKindID();
- }
-};
+static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ DAG.getContext()->diagnose(
+ DiagnosticInfoUnsupported(*MF.getFunction(), Msg, DL.getDebugLoc()));
+}
-int DiagnosticInfoUnsupported::KindID = 0;
+static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg,
+ SDValue Val) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ std::string Str;
+ raw_string_ostream OS(Str);
+ OS << Msg;
+ Val->print(OS);
+ OS.flush();
+ DAG.getContext()->diagnose(
+ DiagnosticInfoUnsupported(*MF.getFunction(), Str, DL.getDebugLoc()));
}
BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
@@ -187,8 +150,8 @@ SDValue BPFTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
SDValue BPFTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
switch (CallConv) {
default:
llvm_unreachable("Unsupported calling convention");
@@ -212,7 +175,7 @@ SDValue BPFTargetLowering::LowerFormalArguments(
switch (RegVT.getSimpleVT().SimpleTy) {
default: {
errs() << "LowerFormalArguments Unhandled argument type: "
- << RegVT.getSimpleVT().SimpleTy << '\n';
+ << RegVT.getEVTString() << '\n';
llvm_unreachable(0);
}
case MVT::i64:
@@ -236,22 +199,20 @@ SDValue BPFTargetLowering::LowerFormalArguments(
InVals.push_back(ArgValue);
}
} else {
- DiagnosticInfoUnsupported Err(DL, *MF.getFunction(),
- "defined with too many args", SDValue());
- DAG.getContext()->diagnose(Err);
+ fail(DL, DAG, "defined with too many args");
+ InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
}
}
if (IsVarArg || MF.getFunction()->hasStructRetAttr()) {
- DiagnosticInfoUnsupported Err(
- DL, *MF.getFunction(),
- "functions with VarArgs or StructRet are not supported", SDValue());
- DAG.getContext()->diagnose(Err);
+ fail(DL, DAG, "functions with VarArgs or StructRet are not supported");
}
return Chain;
}
+const unsigned BPFTargetLowering::MaxArgs = 5;
+
SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
@@ -284,30 +245,27 @@ SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned NumBytes = CCInfo.getNextStackOffset();
- if (Outs.size() >= 6) {
- DiagnosticInfoUnsupported Err(CLI.DL, *MF.getFunction(),
- "too many args to ", Callee);
- DAG.getContext()->diagnose(Err);
- }
+ if (Outs.size() > MaxArgs)
+ fail(CLI.DL, DAG, "too many args to ", Callee);
for (auto &Arg : Outs) {
ISD::ArgFlagsTy Flags = Arg.Flags;
if (!Flags.isByVal())
continue;
- DiagnosticInfoUnsupported Err(CLI.DL, *MF.getFunction(),
- "pass by value not supported ", Callee);
- DAG.getContext()->diagnose(Err);
+ fail(CLI.DL, DAG, "pass by value not supported ", Callee);
}
auto PtrVT = getPointerTy(MF.getDataLayout());
Chain = DAG.getCALLSEQ_START(
Chain, DAG.getConstant(NumBytes, CLI.DL, PtrVT, true), CLI.DL);
- SmallVector<std::pair<unsigned, SDValue>, 5> RegsToPass;
+ SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
// Walk arg assignments
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ for (unsigned i = 0,
+ e = std::min(static_cast<unsigned>(ArgLocs.size()), MaxArgs);
+ i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
@@ -388,7 +346,8 @@ BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
+ const SDLoc &DL, SelectionDAG &DAG) const {
+ unsigned Opc = BPFISD::RET_FLAG;
// CCValAssign - represent the assignment of the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@@ -398,9 +357,8 @@ BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
if (MF.getFunction()->getReturnType()->isAggregateType()) {
- DiagnosticInfoUnsupported Err(DL, *MF.getFunction(),
- "only integer returns supported", SDValue());
- DAG.getContext()->diagnose(Err);
+ fail(DL, DAG, "only integer returns supported");
+ return DAG.getNode(Opc, DL, MVT::Other, Chain);
}
// Analize return values.
@@ -422,7 +380,6 @@ BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
- unsigned Opc = BPFISD::RET_FLAG;
RetOps[0] = Chain; // Update chain.
// Add the flag if we have it.
@@ -434,8 +391,8 @@ BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
SDValue BPFTargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
// Assign locations to each value returned by this call.
@@ -443,9 +400,10 @@ SDValue BPFTargetLowering::LowerCallResult(
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
if (Ins.size() >= 2) {
- DiagnosticInfoUnsupported Err(DL, *MF.getFunction(),
- "only small returns supported", SDValue());
- DAG.getContext()->diagnose(Err);
+ fail(DL, DAG, "only small returns supported");
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i)
+ InVals.push_back(DAG.getConstant(0, DL, Ins[i].VT));
+ return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InFlag).getValue(1);
}
CCInfo.AnalyzeCallResult(Ins, RetCC_BPF64);
@@ -535,12 +493,12 @@ SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
}
MachineBasicBlock *
-BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
- assert(MI->getOpcode() == BPF::Select && "Unexpected instr type to insert");
+ assert(MI.getOpcode() == BPF::Select && "Unexpected instr type to insert");
// To "insert" a SELECT instruction, we actually have to insert the diamond
// control-flow pattern. The incoming instruction knows the destination vreg
@@ -571,9 +529,9 @@ BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB->addSuccessor(Copy1MBB);
// Insert Branch if Flag
- unsigned LHS = MI->getOperand(1).getReg();
- unsigned RHS = MI->getOperand(2).getReg();
- int CC = MI->getOperand(3).getImm();
+ unsigned LHS = MI.getOperand(1).getReg();
+ unsigned RHS = MI.getOperand(2).getReg();
+ int CC = MI.getOperand(3).getImm();
switch (CC) {
case ISD::SETGT:
BuildMI(BB, DL, TII.get(BPF::JSGT_rr))
@@ -627,12 +585,12 @@ BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
// ...
BB = Copy1MBB;
- BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(5).getReg())
+ BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(5).getReg())
.addMBB(Copy0MBB)
- .addReg(MI->getOperand(4).getReg())
+ .addReg(MI.getOperand(4).getReg())
.addMBB(ThisMBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/lib/Target/BPF/BPFISelLowering.h b/lib/Target/BPF/BPFISelLowering.h
index ec71dca2faeb..3d1726be286e 100644
--- a/lib/Target/BPF/BPFISelLowering.h
+++ b/lib/Target/BPF/BPFISelLowering.h
@@ -43,7 +43,7 @@ public:
const char *getTargetNodeName(unsigned Opcode) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
+ EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const override;
private:
@@ -54,10 +54,13 @@ private:
// Lower the result values of a call, copying them out of physregs into vregs
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
- SelectionDAG &DAG,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
+ // Maximum number of arguments to a call
+ static const unsigned MaxArgs;
+
// Lower a call into CALLSEQ_START - BPFISD:CALL - CALLSEQ_END chain
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
@@ -66,12 +69,12 @@ private:
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
+ const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const override;
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
diff --git a/lib/Target/BPF/BPFInstrInfo.cpp b/lib/Target/BPF/BPFInstrInfo.cpp
index 83d14efc1a6c..7aea0512ac78 100644
--- a/lib/Target/BPF/BPFInstrInfo.cpp
+++ b/lib/Target/BPF/BPFInstrInfo.cpp
@@ -32,9 +32,9 @@ BPFInstrInfo::BPFInstrInfo()
: BPFGenInstrInfo(BPF::ADJCALLSTACKDOWN, BPF::ADJCALLSTACKUP) {}
void BPFInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
if (BPF::GPRRegClass.contains(DestReg, SrcReg))
BuildMI(MBB, I, DL, get(BPF::MOV_rr), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
@@ -75,7 +75,7 @@ void BPFInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
llvm_unreachable("Can't load this register from stack slot");
}
-bool BPFInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool BPFInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -90,7 +90,7 @@ bool BPFInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Working from the bottom, when we see a non-terminator
// instruction, we're done.
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
break;
// A terminator that isn't a branch can't easily be handled
@@ -134,7 +134,7 @@ unsigned BPFInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
diff --git a/lib/Target/BPF/BPFInstrInfo.h b/lib/Target/BPF/BPFInstrInfo.h
index ac60188804d2..cc2e41e4c603 100644
--- a/lib/Target/BPF/BPFInstrInfo.h
+++ b/lib/Target/BPF/BPFInstrInfo.h
@@ -31,7 +31,7 @@ public:
const BPFRegisterInfo &getRegisterInfo() const { return RI; }
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -44,7 +44,7 @@ public:
MachineBasicBlock::iterator MBBI, unsigned DestReg,
int FrameIndex, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
@@ -52,7 +52,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
};
}
diff --git a/lib/Target/BPF/BPFMCInstLower.cpp b/lib/Target/BPF/BPFMCInstLower.cpp
index 00bd8d9c090c..f64defecf3cc 100644
--- a/lib/Target/BPF/BPFMCInstLower.cpp
+++ b/lib/Target/BPF/BPFMCInstLower.cpp
@@ -20,9 +20,8 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
MCSymbol *
diff --git a/lib/Target/BPF/BPFSubtarget.h b/lib/Target/BPF/BPFSubtarget.h
index 5ad58db75395..27cc9a262fc3 100644
--- a/lib/Target/BPF/BPFSubtarget.h
+++ b/lib/Target/BPF/BPFSubtarget.h
@@ -17,7 +17,7 @@
#include "BPFFrameLowering.h"
#include "BPFISelLowering.h"
#include "BPFInstrInfo.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
@@ -33,7 +33,7 @@ class BPFSubtarget : public BPFGenSubtargetInfo {
BPFInstrInfo InstrInfo;
BPFFrameLowering FrameLowering;
BPFTargetLowering TLInfo;
- TargetSelectionDAGInfo TSInfo;
+ SelectionDAGTargetInfo TSInfo;
public:
// This constructor initializes the data members to match that
@@ -52,7 +52,7 @@ public:
const BPFTargetLowering *getTargetLowering() const override {
return &TLInfo;
}
- const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
const TargetRegisterInfo *getRegisterInfo() const override {
diff --git a/lib/Target/BPF/BPFTargetMachine.cpp b/lib/Target/BPF/BPFTargetMachine.cpp
index 06cba2252a25..5fc6f2f0ce55 100644
--- a/lib/Target/BPF/BPFTargetMachine.cpp
+++ b/lib/Target/BPF/BPFTargetMachine.cpp
@@ -16,6 +16,7 @@
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
@@ -36,13 +37,19 @@ static std::string computeDataLayout(const Triple &TT) {
return "e-m:e-p:64:64-i64:64-n32:64-S128";
}
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::PIC_;
+ return *RM;
+}
+
BPFTargetMachine::BPFTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, RM, CM,
- OL),
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options,
+ getEffectiveRelocModel(RM), CM, OL),
TLOF(make_unique<TargetLoweringObjectFileELF>()),
Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
diff --git a/lib/Target/BPF/BPFTargetMachine.h b/lib/Target/BPF/BPFTargetMachine.h
index a0086df2d32c..644481446883 100644
--- a/lib/Target/BPF/BPFTargetMachine.h
+++ b/lib/Target/BPF/BPFTargetMachine.h
@@ -24,8 +24,9 @@ class BPFTargetMachine : public LLVMTargetMachine {
public:
BPFTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
const BPFSubtarget *getSubtargetImpl() const { return &Subtarget; }
const BPFSubtarget *getSubtargetImpl(const Function &) const override {
diff --git a/lib/Target/BPF/InstPrinter/Makefile b/lib/Target/BPF/InstPrinter/Makefile
deleted file mode 100644
index f46af83346d8..000000000000
--- a/lib/Target/BPF/InstPrinter/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/BPF/InstPrinter/Makefile -----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMBPFAsmPrinter
-
-# Hack: we need to include 'main' BPF target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
index 8c358cab62e8..2de40aab3a74 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
@@ -47,7 +47,8 @@ public:
bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {}
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {}
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
};
diff --git a/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp b/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
index 87cdd5eb9dad..4b92e3eb019b 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
@@ -22,18 +22,18 @@ public:
~BPFELFObjectWriter() override;
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
};
}
BPFELFObjectWriter::BPFELFObjectWriter(uint8_t OSABI)
- : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_NONE,
+ : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_BPF,
/*HasRelocationAddend*/ false) {}
BPFELFObjectWriter::~BPFELFObjectWriter() {}
-unsigned BPFELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned BPFELFObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
// determine the type of the relocation
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h b/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
index 1f440fe87871..9a2e223bcbd6 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
@@ -14,13 +14,11 @@
#ifndef LLVM_LIB_TARGET_BPF_MCTARGETDESC_BPFMCASMINFO_H
#define LLVM_LIB_TARGET_BPF_MCTARGETDESC_BPFMCASMINFO_H
-#include "llvm/ADT/StringRef.h"
-#include "llvm/MC/MCAsmInfo.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
-class Triple;
class BPFMCAsmInfo : public MCAsmInfo {
public:
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp b/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp
index dc4ede30f191..c6561ddda26e 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp
@@ -20,6 +20,7 @@
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/EndianStream.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp
index 840570ebc392..03d6b193fe27 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp
@@ -15,7 +15,6 @@
#include "BPFMCTargetDesc.h"
#include "BPFMCAsmInfo.h"
#include "InstPrinter/BPFInstPrinter.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
@@ -51,14 +50,6 @@ static MCSubtargetInfo *createBPFMCSubtargetInfo(const Triple &TT,
return createBPFMCSubtargetInfoImpl(TT, CPU, FS);
}
-static MCCodeGenInfo *createBPFMCCodeGenInfo(const Triple &TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
-}
-
static MCStreamer *createBPFMCStreamer(const Triple &T,
MCContext &Ctx, MCAsmBackend &MAB,
raw_pwrite_stream &OS, MCCodeEmitter *Emitter,
@@ -81,9 +72,6 @@ extern "C" void LLVMInitializeBPFTargetMC() {
// Register the MC asm info.
RegisterMCAsmInfo<BPFMCAsmInfo> X(*T);
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(*T, createBPFMCCodeGenInfo);
-
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createBPFMCInstrInfo);
diff --git a/lib/Target/BPF/MCTargetDesc/Makefile b/lib/Target/BPF/MCTargetDesc/Makefile
deleted file mode 100644
index af70cd059e5d..000000000000
--- a/lib/Target/BPF/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/BPF/MCTargetDesc/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMBPFDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/BPF/Makefile b/lib/Target/BPF/Makefile
deleted file mode 100644
index 7492f5edd517..000000000000
--- a/lib/Target/BPF/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-##===- lib/Target/BPF/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMBPFCodeGen
-TARGET = BPF
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = BPFGenRegisterInfo.inc BPFGenInstrInfo.inc \
- BPFGenAsmWriter.inc BPFGenAsmMatcher.inc BPFGenDAGISel.inc \
- BPFGenMCCodeEmitter.inc BPFGenSubtargetInfo.inc BPFGenCallingConv.inc
-
-DIRS = InstPrinter TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/BPF/TargetInfo/Makefile b/lib/Target/BPF/TargetInfo/Makefile
deleted file mode 100644
index 02af58ea1876..000000000000
--- a/lib/Target/BPF/TargetInfo/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/BPF/TargetInfo/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMBPFInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/CppBackend/CMakeLists.txt b/lib/Target/CppBackend/CMakeLists.txt
deleted file mode 100644
index 515e1dd7e39f..000000000000
--- a/lib/Target/CppBackend/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-add_llvm_target(CppBackendCodeGen
- CPPBackend.cpp
- )
-
-add_subdirectory(TargetInfo)
diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp
deleted file mode 100644
index 5ea6551ebc9c..000000000000
--- a/lib/Target/CppBackend/CPPBackend.cpp
+++ /dev/null
@@ -1,2143 +0,0 @@
-//===-- CPPBackend.cpp - Library for converting LLVM code to C++ code -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the writing of the LLVM IR as a set of C++ calls to the
-// LLVM IR interface. The input module is assumed to be verified.
-//
-//===----------------------------------------------------------------------===//
-
-#include "CPPTargetMachine.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Config/config.h"
-#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/InlineAsm.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/LegacyPassManager.h"
-#include "llvm/IR/Module.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/TargetRegistry.h"
-#include <algorithm>
-#include <cctype>
-#include <cstdio>
-#include <map>
-#include <set>
-using namespace llvm;
-
-static cl::opt<std::string>
-FuncName("cppfname", cl::desc("Specify the name of the generated function"),
- cl::value_desc("function name"));
-
-enum WhatToGenerate {
- GenProgram,
- GenModule,
- GenContents,
- GenFunction,
- GenFunctions,
- GenInline,
- GenVariable,
- GenType
-};
-
-static cl::opt<WhatToGenerate> GenerationType("cppgen", cl::Optional,
- cl::desc("Choose what kind of output to generate"),
- cl::init(GenProgram),
- cl::values(
- clEnumValN(GenProgram, "program", "Generate a complete program"),
- clEnumValN(GenModule, "module", "Generate a module definition"),
- clEnumValN(GenContents, "contents", "Generate contents of a module"),
- clEnumValN(GenFunction, "function", "Generate a function definition"),
- clEnumValN(GenFunctions,"functions", "Generate all function definitions"),
- clEnumValN(GenInline, "inline", "Generate an inline function"),
- clEnumValN(GenVariable, "variable", "Generate a variable definition"),
- clEnumValN(GenType, "type", "Generate a type definition"),
- clEnumValEnd
- )
-);
-
-static cl::opt<std::string> NameToGenerate("cppfor", cl::Optional,
- cl::desc("Specify the name of the thing to generate"),
- cl::init("!bad!"));
-
-extern "C" void LLVMInitializeCppBackendTarget() {
- // Register the target.
- RegisterTargetMachine<CPPTargetMachine> X(TheCppBackendTarget);
-}
-
-namespace {
- typedef std::vector<Type*> TypeList;
- typedef std::map<Type*,std::string> TypeMap;
- typedef std::map<const Value*,std::string> ValueMap;
- typedef std::set<std::string> NameSet;
- typedef std::set<Type*> TypeSet;
- typedef std::set<const Value*> ValueSet;
- typedef std::map<const Value*,std::string> ForwardRefMap;
-
- /// CppWriter - This class is the main chunk of code that converts an LLVM
- /// module to a C++ translation unit.
- class CppWriter : public ModulePass {
- std::unique_ptr<formatted_raw_ostream> OutOwner;
- formatted_raw_ostream &Out;
- const Module *TheModule;
- uint64_t uniqueNum;
- TypeMap TypeNames;
- ValueMap ValueNames;
- NameSet UsedNames;
- TypeSet DefinedTypes;
- ValueSet DefinedValues;
- ForwardRefMap ForwardRefs;
- bool is_inline;
- unsigned indent_level;
-
- public:
- static char ID;
- explicit CppWriter(std::unique_ptr<formatted_raw_ostream> o)
- : ModulePass(ID), OutOwner(std::move(o)), Out(*OutOwner), uniqueNum(0),
- is_inline(false), indent_level(0) {}
-
- const char *getPassName() const override { return "C++ backend"; }
-
- bool runOnModule(Module &M) override;
-
- void printProgram(const std::string& fname, const std::string& modName );
- void printModule(const std::string& fname, const std::string& modName );
- void printContents(const std::string& fname, const std::string& modName );
- void printFunction(const std::string& fname, const std::string& funcName );
- void printFunctions();
- void printInline(const std::string& fname, const std::string& funcName );
- void printVariable(const std::string& fname, const std::string& varName );
- void printType(const std::string& fname, const std::string& typeName );
-
- void error(const std::string& msg);
-
-
- formatted_raw_ostream& nl(formatted_raw_ostream &Out, int delta = 0);
- inline void in() { indent_level++; }
- inline void out() { if (indent_level >0) indent_level--; }
-
- private:
- void printLinkageType(GlobalValue::LinkageTypes LT);
- void printVisibilityType(GlobalValue::VisibilityTypes VisTypes);
- void printDLLStorageClassType(GlobalValue::DLLStorageClassTypes DSCType);
- void printThreadLocalMode(GlobalVariable::ThreadLocalMode TLM);
- void printCallingConv(CallingConv::ID cc);
- void printEscapedString(const std::string& str);
- void printCFP(const ConstantFP* CFP);
-
- std::string getCppName(Type* val);
- inline void printCppName(Type* val);
-
- std::string getCppName(const Value* val);
- inline void printCppName(const Value* val);
-
- void printAttributes(const AttributeSet &PAL, const std::string &name);
- void printType(Type* Ty);
- void printTypes(const Module* M);
-
- void printConstant(const Constant *CPV);
- void printConstants(const Module* M);
-
- void printVariableUses(const GlobalVariable *GV);
- void printVariableHead(const GlobalVariable *GV);
- void printVariableBody(const GlobalVariable *GV);
-
- void printFunctionUses(const Function *F);
- void printFunctionHead(const Function *F);
- void printFunctionBody(const Function *F);
- void printInstruction(const Instruction *I, const std::string& bbname);
- std::string getOpName(const Value*);
-
- void printModuleBody();
- };
-} // end anonymous namespace.
-
-formatted_raw_ostream &CppWriter::nl(formatted_raw_ostream &Out, int delta) {
- Out << '\n';
- if (delta >= 0 || indent_level >= unsigned(-delta))
- indent_level += delta;
- Out.indent(indent_level);
- return Out;
-}
-
-static inline void sanitize(std::string &str) {
- for (size_t i = 0; i < str.length(); ++i)
- if (!isalnum(str[i]) && str[i] != '_')
- str[i] = '_';
-}
-
-static std::string getTypePrefix(Type *Ty) {
- switch (Ty->getTypeID()) {
- case Type::VoidTyID: return "void_";
- case Type::IntegerTyID:
- return "int" + utostr(cast<IntegerType>(Ty)->getBitWidth()) + "_";
- case Type::FloatTyID: return "float_";
- case Type::DoubleTyID: return "double_";
- case Type::LabelTyID: return "label_";
- case Type::FunctionTyID: return "func_";
- case Type::StructTyID: return "struct_";
- case Type::ArrayTyID: return "array_";
- case Type::PointerTyID: return "ptr_";
- case Type::VectorTyID: return "packed_";
- default: return "other_";
- }
-}
-
-void CppWriter::error(const std::string& msg) {
- report_fatal_error(msg);
-}
-
-static inline std::string ftostr(const APFloat& V) {
- std::string Buf;
- if (&V.getSemantics() == &APFloat::IEEEdouble) {
- raw_string_ostream(Buf) << V.convertToDouble();
- return Buf;
- } else if (&V.getSemantics() == &APFloat::IEEEsingle) {
- raw_string_ostream(Buf) << (double)V.convertToFloat();
- return Buf;
- }
- return "<unknown format in ftostr>"; // error
-}
-
-// printCFP - Print a floating point constant .. very carefully :)
-// This makes sure that conversion to/from floating yields the same binary
-// result so that we don't lose precision.
-void CppWriter::printCFP(const ConstantFP *CFP) {
- bool ignored;
- APFloat APF = APFloat(CFP->getValueAPF()); // copy
- if (CFP->getType() == Type::getFloatTy(CFP->getContext()))
- APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored);
- Out << "ConstantFP::get(mod->getContext(), ";
- Out << "APFloat(";
-#if HAVE_PRINTF_A
- char Buffer[100];
- sprintf(Buffer, "%A", APF.convertToDouble());
- if ((!strncmp(Buffer, "0x", 2) ||
- !strncmp(Buffer, "-0x", 3) ||
- !strncmp(Buffer, "+0x", 3)) &&
- APF.bitwiseIsEqual(APFloat(atof(Buffer)))) {
- if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
- Out << "BitsToDouble(" << Buffer << ")";
- else
- Out << "BitsToFloat((float)" << Buffer << ")";
- Out << ")";
- } else {
-#endif
- std::string StrVal = ftostr(CFP->getValueAPF());
-
- while (StrVal[0] == ' ')
- StrVal.erase(StrVal.begin());
-
- // Check to make sure that the stringized number is not some string like
- // "Inf" or NaN. Check that the string matches the "[-+]?[0-9]" regex.
- if (((StrVal[0] >= '0' && StrVal[0] <= '9') ||
- ((StrVal[0] == '-' || StrVal[0] == '+') &&
- (StrVal[1] >= '0' && StrVal[1] <= '9'))) &&
- (CFP->isExactlyValue(atof(StrVal.c_str())))) {
- if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
- Out << StrVal;
- else
- Out << StrVal << "f";
- } else if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
- Out << "BitsToDouble(0x"
- << utohexstr(CFP->getValueAPF().bitcastToAPInt().getZExtValue())
- << "ULL) /* " << StrVal << " */";
- else
- Out << "BitsToFloat(0x"
- << utohexstr((uint32_t)CFP->getValueAPF().
- bitcastToAPInt().getZExtValue())
- << "U) /* " << StrVal << " */";
- Out << ")";
-#if HAVE_PRINTF_A
- }
-#endif
- Out << ")";
-}
-
-void CppWriter::printCallingConv(CallingConv::ID cc){
- // Print the calling convention.
- switch (cc) {
- case CallingConv::C: Out << "CallingConv::C"; break;
- case CallingConv::Fast: Out << "CallingConv::Fast"; break;
- case CallingConv::Cold: Out << "CallingConv::Cold"; break;
- case CallingConv::FirstTargetCC: Out << "CallingConv::FirstTargetCC"; break;
- default: Out << cc; break;
- }
-}
-
-void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
- switch (LT) {
- case GlobalValue::InternalLinkage:
- Out << "GlobalValue::InternalLinkage"; break;
- case GlobalValue::PrivateLinkage:
- Out << "GlobalValue::PrivateLinkage"; break;
- case GlobalValue::AvailableExternallyLinkage:
- Out << "GlobalValue::AvailableExternallyLinkage "; break;
- case GlobalValue::LinkOnceAnyLinkage:
- Out << "GlobalValue::LinkOnceAnyLinkage "; break;
- case GlobalValue::LinkOnceODRLinkage:
- Out << "GlobalValue::LinkOnceODRLinkage "; break;
- case GlobalValue::WeakAnyLinkage:
- Out << "GlobalValue::WeakAnyLinkage"; break;
- case GlobalValue::WeakODRLinkage:
- Out << "GlobalValue::WeakODRLinkage"; break;
- case GlobalValue::AppendingLinkage:
- Out << "GlobalValue::AppendingLinkage"; break;
- case GlobalValue::ExternalLinkage:
- Out << "GlobalValue::ExternalLinkage"; break;
- case GlobalValue::ExternalWeakLinkage:
- Out << "GlobalValue::ExternalWeakLinkage"; break;
- case GlobalValue::CommonLinkage:
- Out << "GlobalValue::CommonLinkage"; break;
- }
-}
-
-void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
- switch (VisType) {
- case GlobalValue::DefaultVisibility:
- Out << "GlobalValue::DefaultVisibility";
- break;
- case GlobalValue::HiddenVisibility:
- Out << "GlobalValue::HiddenVisibility";
- break;
- case GlobalValue::ProtectedVisibility:
- Out << "GlobalValue::ProtectedVisibility";
- break;
- }
-}
-
-void CppWriter::printDLLStorageClassType(
- GlobalValue::DLLStorageClassTypes DSCType) {
- switch (DSCType) {
- case GlobalValue::DefaultStorageClass:
- Out << "GlobalValue::DefaultStorageClass";
- break;
- case GlobalValue::DLLImportStorageClass:
- Out << "GlobalValue::DLLImportStorageClass";
- break;
- case GlobalValue::DLLExportStorageClass:
- Out << "GlobalValue::DLLExportStorageClass";
- break;
- }
-}
-
-void CppWriter::printThreadLocalMode(GlobalVariable::ThreadLocalMode TLM) {
- switch (TLM) {
- case GlobalVariable::NotThreadLocal:
- Out << "GlobalVariable::NotThreadLocal";
- break;
- case GlobalVariable::GeneralDynamicTLSModel:
- Out << "GlobalVariable::GeneralDynamicTLSModel";
- break;
- case GlobalVariable::LocalDynamicTLSModel:
- Out << "GlobalVariable::LocalDynamicTLSModel";
- break;
- case GlobalVariable::InitialExecTLSModel:
- Out << "GlobalVariable::InitialExecTLSModel";
- break;
- case GlobalVariable::LocalExecTLSModel:
- Out << "GlobalVariable::LocalExecTLSModel";
- break;
- }
-}
-
-// printEscapedString - Print each character of the specified string, escaping
-// it if it is not printable or if it is an escape char.
-void CppWriter::printEscapedString(const std::string &Str) {
- for (unsigned i = 0, e = Str.size(); i != e; ++i) {
- unsigned char C = Str[i];
- if (isprint(C) && C != '"' && C != '\\') {
- Out << C;
- } else {
- Out << "\\x"
- << (char) ((C/16 < 10) ? ( C/16 +'0') : ( C/16 -10+'A'))
- << (char)(((C&15) < 10) ? ((C&15)+'0') : ((C&15)-10+'A'));
- }
- }
-}
-
-std::string CppWriter::getCppName(Type* Ty) {
- switch (Ty->getTypeID()) {
- default:
- break;
- case Type::VoidTyID:
- return "Type::getVoidTy(mod->getContext())";
- case Type::IntegerTyID: {
- unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
- return "IntegerType::get(mod->getContext(), " + utostr(BitWidth) + ")";
- }
- case Type::X86_FP80TyID:
- return "Type::getX86_FP80Ty(mod->getContext())";
- case Type::FloatTyID:
- return "Type::getFloatTy(mod->getContext())";
- case Type::DoubleTyID:
- return "Type::getDoubleTy(mod->getContext())";
- case Type::LabelTyID:
- return "Type::getLabelTy(mod->getContext())";
- case Type::X86_MMXTyID:
- return "Type::getX86_MMXTy(mod->getContext())";
- }
-
- // Now, see if we've seen the type before and return that
- TypeMap::iterator I = TypeNames.find(Ty);
- if (I != TypeNames.end())
- return I->second;
-
- // Okay, let's build a new name for this type. Start with a prefix
- const char* prefix = nullptr;
- switch (Ty->getTypeID()) {
- case Type::FunctionTyID: prefix = "FuncTy_"; break;
- case Type::StructTyID: prefix = "StructTy_"; break;
- case Type::ArrayTyID: prefix = "ArrayTy_"; break;
- case Type::PointerTyID: prefix = "PointerTy_"; break;
- case Type::VectorTyID: prefix = "VectorTy_"; break;
- default: prefix = "OtherTy_"; break; // prevent breakage
- }
-
- // See if the type has a name in the symboltable and build accordingly
- std::string name;
- if (StructType *STy = dyn_cast<StructType>(Ty))
- if (STy->hasName())
- name = STy->getName();
-
- if (name.empty())
- name = utostr(uniqueNum++);
-
- name = std::string(prefix) + name;
- sanitize(name);
-
- // Save the name
- return TypeNames[Ty] = name;
-}
-
-void CppWriter::printCppName(Type* Ty) {
- printEscapedString(getCppName(Ty));
-}
-
-std::string CppWriter::getCppName(const Value* val) {
- std::string name;
- ValueMap::iterator I = ValueNames.find(val);
- if (I != ValueNames.end() && I->first == val)
- return I->second;
-
- if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(val)) {
- name = std::string("gvar_") +
- getTypePrefix(GV->getType()->getElementType());
- } else if (isa<Function>(val)) {
- name = std::string("func_");
- } else if (const Constant* C = dyn_cast<Constant>(val)) {
- name = std::string("const_") + getTypePrefix(C->getType());
- } else if (const Argument* Arg = dyn_cast<Argument>(val)) {
- if (is_inline) {
- unsigned argNum = std::distance(Arg->getParent()->arg_begin(),
- Function::const_arg_iterator(Arg)) + 1;
- name = std::string("arg_") + utostr(argNum);
- NameSet::iterator NI = UsedNames.find(name);
- if (NI != UsedNames.end())
- name += std::string("_") + utostr(uniqueNum++);
- UsedNames.insert(name);
- return ValueNames[val] = name;
- } else {
- name = getTypePrefix(val->getType());
- }
- } else {
- name = getTypePrefix(val->getType());
- }
- if (val->hasName())
- name += val->getName();
- else
- name += utostr(uniqueNum++);
- sanitize(name);
- NameSet::iterator NI = UsedNames.find(name);
- if (NI != UsedNames.end())
- name += std::string("_") + utostr(uniqueNum++);
- UsedNames.insert(name);
- return ValueNames[val] = name;
-}
-
-void CppWriter::printCppName(const Value* val) {
- printEscapedString(getCppName(val));
-}
-
-void CppWriter::printAttributes(const AttributeSet &PAL,
- const std::string &name) {
- Out << "AttributeSet " << name << "_PAL;";
- nl(Out);
- if (!PAL.isEmpty()) {
- Out << '{'; in(); nl(Out);
- Out << "SmallVector<AttributeSet, 4> Attrs;"; nl(Out);
- Out << "AttributeSet PAS;"; in(); nl(Out);
- for (unsigned i = 0; i < PAL.getNumSlots(); ++i) {
- unsigned index = PAL.getSlotIndex(i);
- AttrBuilder attrs(PAL.getSlotAttributes(i), index);
- Out << "{"; in(); nl(Out);
- Out << "AttrBuilder B;"; nl(Out);
-
-#define HANDLE_ATTR(X) \
- if (attrs.contains(Attribute::X)) { \
- Out << "B.addAttribute(Attribute::" #X ");"; nl(Out); \
- attrs.removeAttribute(Attribute::X); \
- }
-
- HANDLE_ATTR(SExt);
- HANDLE_ATTR(ZExt);
- HANDLE_ATTR(NoReturn);
- HANDLE_ATTR(InReg);
- HANDLE_ATTR(StructRet);
- HANDLE_ATTR(NoUnwind);
- HANDLE_ATTR(NoAlias);
- HANDLE_ATTR(ByVal);
- HANDLE_ATTR(InAlloca);
- HANDLE_ATTR(Nest);
- HANDLE_ATTR(ReadNone);
- HANDLE_ATTR(ReadOnly);
- HANDLE_ATTR(NoInline);
- HANDLE_ATTR(AlwaysInline);
- HANDLE_ATTR(OptimizeNone);
- HANDLE_ATTR(OptimizeForSize);
- HANDLE_ATTR(StackProtect);
- HANDLE_ATTR(StackProtectReq);
- HANDLE_ATTR(StackProtectStrong);
- HANDLE_ATTR(SafeStack);
- HANDLE_ATTR(NoCapture);
- HANDLE_ATTR(NoRedZone);
- HANDLE_ATTR(NoImplicitFloat);
- HANDLE_ATTR(Naked);
- HANDLE_ATTR(InlineHint);
- HANDLE_ATTR(ReturnsTwice);
- HANDLE_ATTR(UWTable);
- HANDLE_ATTR(NonLazyBind);
- HANDLE_ATTR(MinSize);
-#undef HANDLE_ATTR
-
- if (attrs.contains(Attribute::StackAlignment)) {
- Out << "B.addStackAlignmentAttr(" << attrs.getStackAlignment()<<')';
- nl(Out);
- attrs.removeAttribute(Attribute::StackAlignment);
- }
-
- Out << "PAS = AttributeSet::get(mod->getContext(), ";
- if (index == ~0U)
- Out << "~0U,";
- else
- Out << index << "U,";
- Out << " B);"; out(); nl(Out);
- Out << "}"; out(); nl(Out);
- nl(Out);
- Out << "Attrs.push_back(PAS);"; nl(Out);
- }
- Out << name << "_PAL = AttributeSet::get(mod->getContext(), Attrs);";
- nl(Out);
- out(); nl(Out);
- Out << '}'; nl(Out);
- }
-}
-
-void CppWriter::printType(Type* Ty) {
- // We don't print definitions for primitive types
- if (Ty->isFloatingPointTy() || Ty->isX86_MMXTy() || Ty->isIntegerTy() ||
- Ty->isLabelTy() || Ty->isMetadataTy() || Ty->isVoidTy() ||
- Ty->isTokenTy())
- return;
-
- // If we already defined this type, we don't need to define it again.
- if (DefinedTypes.find(Ty) != DefinedTypes.end())
- return;
-
- // Everything below needs the name for the type so get it now.
- std::string typeName(getCppName(Ty));
-
- // Print the type definition
- switch (Ty->getTypeID()) {
- case Type::FunctionTyID: {
- FunctionType* FT = cast<FunctionType>(Ty);
- Out << "std::vector<Type*>" << typeName << "_args;";
- nl(Out);
- FunctionType::param_iterator PI = FT->param_begin();
- FunctionType::param_iterator PE = FT->param_end();
- for (; PI != PE; ++PI) {
- Type* argTy = static_cast<Type*>(*PI);
- printType(argTy);
- std::string argName(getCppName(argTy));
- Out << typeName << "_args.push_back(" << argName;
- Out << ");";
- nl(Out);
- }
- printType(FT->getReturnType());
- std::string retTypeName(getCppName(FT->getReturnType()));
- Out << "FunctionType* " << typeName << " = FunctionType::get(";
- in(); nl(Out) << "/*Result=*/" << retTypeName;
- Out << ",";
- nl(Out) << "/*Params=*/" << typeName << "_args,";
- nl(Out) << "/*isVarArg=*/" << (FT->isVarArg() ? "true" : "false") << ");";
- out();
- nl(Out);
- break;
- }
- case Type::StructTyID: {
- StructType* ST = cast<StructType>(Ty);
- if (!ST->isLiteral()) {
- Out << "StructType *" << typeName << " = mod->getTypeByName(\"";
- printEscapedString(ST->getName());
- Out << "\");";
- nl(Out);
- Out << "if (!" << typeName << ") {";
- nl(Out);
- Out << typeName << " = ";
- Out << "StructType::create(mod->getContext(), \"";
- printEscapedString(ST->getName());
- Out << "\");";
- nl(Out);
- Out << "}";
- nl(Out);
- // Indicate that this type is now defined.
- DefinedTypes.insert(Ty);
- }
-
- Out << "std::vector<Type*>" << typeName << "_fields;";
- nl(Out);
- StructType::element_iterator EI = ST->element_begin();
- StructType::element_iterator EE = ST->element_end();
- for (; EI != EE; ++EI) {
- Type* fieldTy = static_cast<Type*>(*EI);
- printType(fieldTy);
- std::string fieldName(getCppName(fieldTy));
- Out << typeName << "_fields.push_back(" << fieldName;
- Out << ");";
- nl(Out);
- }
-
- if (ST->isLiteral()) {
- Out << "StructType *" << typeName << " = ";
- Out << "StructType::get(" << "mod->getContext(), ";
- } else {
- Out << "if (" << typeName << "->isOpaque()) {";
- nl(Out);
- Out << typeName << "->setBody(";
- }
-
- Out << typeName << "_fields, /*isPacked=*/"
- << (ST->isPacked() ? "true" : "false") << ");";
- nl(Out);
- if (!ST->isLiteral()) {
- Out << "}";
- nl(Out);
- }
- break;
- }
- case Type::ArrayTyID: {
- ArrayType* AT = cast<ArrayType>(Ty);
- Type* ET = AT->getElementType();
- printType(ET);
- if (DefinedTypes.find(Ty) == DefinedTypes.end()) {
- std::string elemName(getCppName(ET));
- Out << "ArrayType* " << typeName << " = ArrayType::get("
- << elemName << ", " << AT->getNumElements() << ");";
- nl(Out);
- }
- break;
- }
- case Type::PointerTyID: {
- PointerType* PT = cast<PointerType>(Ty);
- Type* ET = PT->getElementType();
- printType(ET);
- if (DefinedTypes.find(Ty) == DefinedTypes.end()) {
- std::string elemName(getCppName(ET));
- Out << "PointerType* " << typeName << " = PointerType::get("
- << elemName << ", " << PT->getAddressSpace() << ");";
- nl(Out);
- }
- break;
- }
- case Type::VectorTyID: {
- VectorType* PT = cast<VectorType>(Ty);
- Type* ET = PT->getElementType();
- printType(ET);
- if (DefinedTypes.find(Ty) == DefinedTypes.end()) {
- std::string elemName(getCppName(ET));
- Out << "VectorType* " << typeName << " = VectorType::get("
- << elemName << ", " << PT->getNumElements() << ");";
- nl(Out);
- }
- break;
- }
- default:
- error("Invalid TypeID");
- }
-
- // Indicate that this type is now defined.
- DefinedTypes.insert(Ty);
-
- // Finally, separate the type definition from other with a newline.
- nl(Out);
-}
-
-void CppWriter::printTypes(const Module* M) {
- // Add all of the global variables to the value table.
- for (Module::const_global_iterator I = TheModule->global_begin(),
- E = TheModule->global_end(); I != E; ++I) {
- if (I->hasInitializer())
- printType(I->getInitializer()->getType());
- printType(I->getType());
- }
-
- // Add all the functions to the table
- for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
- FI != FE; ++FI) {
- printType(FI->getReturnType());
- printType(FI->getFunctionType());
- // Add all the function arguments
- for (Function::const_arg_iterator AI = FI->arg_begin(),
- AE = FI->arg_end(); AI != AE; ++AI) {
- printType(AI->getType());
- }
-
- // Add all of the basic blocks and instructions
- for (Function::const_iterator BB = FI->begin(),
- E = FI->end(); BB != E; ++BB) {
- printType(BB->getType());
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
- ++I) {
- printType(I->getType());
- for (unsigned i = 0; i < I->getNumOperands(); ++i)
- printType(I->getOperand(i)->getType());
- }
- }
- }
-}
-
-
-// printConstant - Print out a constant pool entry...
-void CppWriter::printConstant(const Constant *CV) {
- // First, if the constant is actually a GlobalValue (variable or function)
- // or its already in the constant list then we've printed it already and we
- // can just return.
- if (isa<GlobalValue>(CV) || ValueNames.find(CV) != ValueNames.end())
- return;
-
- std::string constName(getCppName(CV));
- std::string typeName(getCppName(CV->getType()));
-
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
- std::string constValue = CI->getValue().toString(10, true);
- Out << "ConstantInt* " << constName
- << " = ConstantInt::get(mod->getContext(), APInt("
- << cast<IntegerType>(CI->getType())->getBitWidth()
- << ", StringRef(\"" << constValue << "\"), 10));";
- } else if (isa<ConstantAggregateZero>(CV)) {
- Out << "ConstantAggregateZero* " << constName
- << " = ConstantAggregateZero::get(" << typeName << ");";
- } else if (isa<ConstantPointerNull>(CV)) {
- Out << "ConstantPointerNull* " << constName
- << " = ConstantPointerNull::get(" << typeName << ");";
- } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
- Out << "ConstantFP* " << constName << " = ";
- printCFP(CFP);
- Out << ";";
- } else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
- Out << "std::vector<Constant*> " << constName << "_elems;";
- nl(Out);
- unsigned N = CA->getNumOperands();
- for (unsigned i = 0; i < N; ++i) {
- printConstant(CA->getOperand(i)); // recurse to print operands
- Out << constName << "_elems.push_back("
- << getCppName(CA->getOperand(i)) << ");";
- nl(Out);
- }
- Out << "Constant* " << constName << " = ConstantArray::get("
- << typeName << ", " << constName << "_elems);";
- } else if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
- Out << "std::vector<Constant*> " << constName << "_fields;";
- nl(Out);
- unsigned N = CS->getNumOperands();
- for (unsigned i = 0; i < N; i++) {
- printConstant(CS->getOperand(i));
- Out << constName << "_fields.push_back("
- << getCppName(CS->getOperand(i)) << ");";
- nl(Out);
- }
- Out << "Constant* " << constName << " = ConstantStruct::get("
- << typeName << ", " << constName << "_fields);";
- } else if (const ConstantVector *CVec = dyn_cast<ConstantVector>(CV)) {
- Out << "std::vector<Constant*> " << constName << "_elems;";
- nl(Out);
- unsigned N = CVec->getNumOperands();
- for (unsigned i = 0; i < N; ++i) {
- printConstant(CVec->getOperand(i));
- Out << constName << "_elems.push_back("
- << getCppName(CVec->getOperand(i)) << ");";
- nl(Out);
- }
- Out << "Constant* " << constName << " = ConstantVector::get("
- << typeName << ", " << constName << "_elems);";
- } else if (isa<UndefValue>(CV)) {
- Out << "UndefValue* " << constName << " = UndefValue::get("
- << typeName << ");";
- } else if (const ConstantDataSequential *CDS =
- dyn_cast<ConstantDataSequential>(CV)) {
- if (CDS->isString()) {
- Out << "Constant *" << constName <<
- " = ConstantDataArray::getString(mod->getContext(), \"";
- StringRef Str = CDS->getAsString();
- bool nullTerminate = false;
- if (Str.back() == 0) {
- Str = Str.drop_back();
- nullTerminate = true;
- }
- printEscapedString(Str);
- // Determine if we want null termination or not.
- if (nullTerminate)
- Out << "\", true);";
- else
- Out << "\", false);";// No null terminator
- } else {
- // TODO: Could generate more efficient code generating CDS calls instead.
- Out << "std::vector<Constant*> " << constName << "_elems;";
- nl(Out);
- for (unsigned i = 0; i != CDS->getNumElements(); ++i) {
- Constant *Elt = CDS->getElementAsConstant(i);
- printConstant(Elt);
- Out << constName << "_elems.push_back(" << getCppName(Elt) << ");";
- nl(Out);
- }
- Out << "Constant* " << constName;
-
- if (isa<ArrayType>(CDS->getType()))
- Out << " = ConstantArray::get(";
- else
- Out << " = ConstantVector::get(";
- Out << typeName << ", " << constName << "_elems);";
- }
- } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
- if (CE->getOpcode() == Instruction::GetElementPtr) {
- Out << "std::vector<Constant*> " << constName << "_indices;";
- nl(Out);
- printConstant(CE->getOperand(0));
- for (unsigned i = 1; i < CE->getNumOperands(); ++i ) {
- printConstant(CE->getOperand(i));
- Out << constName << "_indices.push_back("
- << getCppName(CE->getOperand(i)) << ");";
- nl(Out);
- }
- Out << "Constant* " << constName
- << " = ConstantExpr::getGetElementPtr("
- << getCppName(CE->getOperand(0)) << ", "
- << constName << "_indices);";
- } else if (CE->isCast()) {
- printConstant(CE->getOperand(0));
- Out << "Constant* " << constName << " = ConstantExpr::getCast(";
- switch (CE->getOpcode()) {
- default: llvm_unreachable("Invalid cast opcode");
- case Instruction::Trunc: Out << "Instruction::Trunc"; break;
- case Instruction::ZExt: Out << "Instruction::ZExt"; break;
- case Instruction::SExt: Out << "Instruction::SExt"; break;
- case Instruction::FPTrunc: Out << "Instruction::FPTrunc"; break;
- case Instruction::FPExt: Out << "Instruction::FPExt"; break;
- case Instruction::FPToUI: Out << "Instruction::FPToUI"; break;
- case Instruction::FPToSI: Out << "Instruction::FPToSI"; break;
- case Instruction::UIToFP: Out << "Instruction::UIToFP"; break;
- case Instruction::SIToFP: Out << "Instruction::SIToFP"; break;
- case Instruction::PtrToInt: Out << "Instruction::PtrToInt"; break;
- case Instruction::IntToPtr: Out << "Instruction::IntToPtr"; break;
- case Instruction::BitCast: Out << "Instruction::BitCast"; break;
- }
- Out << ", " << getCppName(CE->getOperand(0)) << ", "
- << getCppName(CE->getType()) << ");";
- } else {
- unsigned N = CE->getNumOperands();
- for (unsigned i = 0; i < N; ++i ) {
- printConstant(CE->getOperand(i));
- }
- Out << "Constant* " << constName << " = ConstantExpr::";
- switch (CE->getOpcode()) {
- case Instruction::Add: Out << "getAdd("; break;
- case Instruction::FAdd: Out << "getFAdd("; break;
- case Instruction::Sub: Out << "getSub("; break;
- case Instruction::FSub: Out << "getFSub("; break;
- case Instruction::Mul: Out << "getMul("; break;
- case Instruction::FMul: Out << "getFMul("; break;
- case Instruction::UDiv: Out << "getUDiv("; break;
- case Instruction::SDiv: Out << "getSDiv("; break;
- case Instruction::FDiv: Out << "getFDiv("; break;
- case Instruction::URem: Out << "getURem("; break;
- case Instruction::SRem: Out << "getSRem("; break;
- case Instruction::FRem: Out << "getFRem("; break;
- case Instruction::And: Out << "getAnd("; break;
- case Instruction::Or: Out << "getOr("; break;
- case Instruction::Xor: Out << "getXor("; break;
- case Instruction::ICmp:
- Out << "getICmp(ICmpInst::ICMP_";
- switch (CE->getPredicate()) {
- case ICmpInst::ICMP_EQ: Out << "EQ"; break;
- case ICmpInst::ICMP_NE: Out << "NE"; break;
- case ICmpInst::ICMP_SLT: Out << "SLT"; break;
- case ICmpInst::ICMP_ULT: Out << "ULT"; break;
- case ICmpInst::ICMP_SGT: Out << "SGT"; break;
- case ICmpInst::ICMP_UGT: Out << "UGT"; break;
- case ICmpInst::ICMP_SLE: Out << "SLE"; break;
- case ICmpInst::ICMP_ULE: Out << "ULE"; break;
- case ICmpInst::ICMP_SGE: Out << "SGE"; break;
- case ICmpInst::ICMP_UGE: Out << "UGE"; break;
- default: error("Invalid ICmp Predicate");
- }
- break;
- case Instruction::FCmp:
- Out << "getFCmp(FCmpInst::FCMP_";
- switch (CE->getPredicate()) {
- case FCmpInst::FCMP_FALSE: Out << "FALSE"; break;
- case FCmpInst::FCMP_ORD: Out << "ORD"; break;
- case FCmpInst::FCMP_UNO: Out << "UNO"; break;
- case FCmpInst::FCMP_OEQ: Out << "OEQ"; break;
- case FCmpInst::FCMP_UEQ: Out << "UEQ"; break;
- case FCmpInst::FCMP_ONE: Out << "ONE"; break;
- case FCmpInst::FCMP_UNE: Out << "UNE"; break;
- case FCmpInst::FCMP_OLT: Out << "OLT"; break;
- case FCmpInst::FCMP_ULT: Out << "ULT"; break;
- case FCmpInst::FCMP_OGT: Out << "OGT"; break;
- case FCmpInst::FCMP_UGT: Out << "UGT"; break;
- case FCmpInst::FCMP_OLE: Out << "OLE"; break;
- case FCmpInst::FCMP_ULE: Out << "ULE"; break;
- case FCmpInst::FCMP_OGE: Out << "OGE"; break;
- case FCmpInst::FCMP_UGE: Out << "UGE"; break;
- case FCmpInst::FCMP_TRUE: Out << "TRUE"; break;
- default: error("Invalid FCmp Predicate");
- }
- break;
- case Instruction::Shl: Out << "getShl("; break;
- case Instruction::LShr: Out << "getLShr("; break;
- case Instruction::AShr: Out << "getAShr("; break;
- case Instruction::Select: Out << "getSelect("; break;
- case Instruction::ExtractElement: Out << "getExtractElement("; break;
- case Instruction::InsertElement: Out << "getInsertElement("; break;
- case Instruction::ShuffleVector: Out << "getShuffleVector("; break;
- default:
- error("Invalid constant expression");
- break;
- }
- Out << getCppName(CE->getOperand(0));
- for (unsigned i = 1; i < CE->getNumOperands(); ++i)
- Out << ", " << getCppName(CE->getOperand(i));
- Out << ");";
- }
- } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) {
- Out << "Constant* " << constName << " = ";
- Out << "BlockAddress::get(" << getOpName(BA->getBasicBlock()) << ");";
- } else {
- error("Bad Constant");
- Out << "Constant* " << constName << " = 0; ";
- }
- nl(Out);
-}
-
-void CppWriter::printConstants(const Module* M) {
- // Traverse all the global variables looking for constant initializers
- for (Module::const_global_iterator I = TheModule->global_begin(),
- E = TheModule->global_end(); I != E; ++I)
- if (I->hasInitializer())
- printConstant(I->getInitializer());
-
- // Traverse the LLVM functions looking for constants
- for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
- FI != FE; ++FI) {
- // Add all of the basic blocks and instructions
- for (Function::const_iterator BB = FI->begin(),
- E = FI->end(); BB != E; ++BB) {
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
- ++I) {
- for (unsigned i = 0; i < I->getNumOperands(); ++i) {
- if (Constant* C = dyn_cast<Constant>(I->getOperand(i))) {
- printConstant(C);
- }
- }
- }
- }
- }
-}
-
-void CppWriter::printVariableUses(const GlobalVariable *GV) {
- nl(Out) << "// Type Definitions";
- nl(Out);
- printType(GV->getType());
- if (GV->hasInitializer()) {
- const Constant *Init = GV->getInitializer();
- printType(Init->getType());
- if (const Function *F = dyn_cast<Function>(Init)) {
- nl(Out)<< "/ Function Declarations"; nl(Out);
- printFunctionHead(F);
- } else if (const GlobalVariable* gv = dyn_cast<GlobalVariable>(Init)) {
- nl(Out) << "// Global Variable Declarations"; nl(Out);
- printVariableHead(gv);
-
- nl(Out) << "// Global Variable Definitions"; nl(Out);
- printVariableBody(gv);
- } else {
- nl(Out) << "// Constant Definitions"; nl(Out);
- printConstant(Init);
- }
- }
-}
-
-void CppWriter::printVariableHead(const GlobalVariable *GV) {
- nl(Out) << "GlobalVariable* " << getCppName(GV);
- if (is_inline) {
- Out << " = mod->getGlobalVariable(mod->getContext(), ";
- printEscapedString(GV->getName());
- Out << ", " << getCppName(GV->getType()->getElementType()) << ",true)";
- nl(Out) << "if (!" << getCppName(GV) << ") {";
- in(); nl(Out) << getCppName(GV);
- }
- Out << " = new GlobalVariable(/*Module=*/*mod, ";
- nl(Out) << "/*Type=*/";
- printCppName(GV->getType()->getElementType());
- Out << ",";
- nl(Out) << "/*isConstant=*/" << (GV->isConstant()?"true":"false");
- Out << ",";
- nl(Out) << "/*Linkage=*/";
- printLinkageType(GV->getLinkage());
- Out << ",";
- nl(Out) << "/*Initializer=*/0, ";
- if (GV->hasInitializer()) {
- Out << "// has initializer, specified below";
- }
- nl(Out) << "/*Name=*/\"";
- printEscapedString(GV->getName());
- Out << "\");";
- nl(Out);
-
- if (GV->hasSection()) {
- printCppName(GV);
- Out << "->setSection(\"";
- printEscapedString(GV->getSection());
- Out << "\");";
- nl(Out);
- }
- if (GV->getAlignment()) {
- printCppName(GV);
- Out << "->setAlignment(" << GV->getAlignment() << ");";
- nl(Out);
- }
- if (GV->getVisibility() != GlobalValue::DefaultVisibility) {
- printCppName(GV);
- Out << "->setVisibility(";
- printVisibilityType(GV->getVisibility());
- Out << ");";
- nl(Out);
- }
- if (GV->getDLLStorageClass() != GlobalValue::DefaultStorageClass) {
- printCppName(GV);
- Out << "->setDLLStorageClass(";
- printDLLStorageClassType(GV->getDLLStorageClass());
- Out << ");";
- nl(Out);
- }
- if (GV->isThreadLocal()) {
- printCppName(GV);
- Out << "->setThreadLocalMode(";
- printThreadLocalMode(GV->getThreadLocalMode());
- Out << ");";
- nl(Out);
- }
- if (is_inline) {
- out(); Out << "}"; nl(Out);
- }
-}
-
-void CppWriter::printVariableBody(const GlobalVariable *GV) {
- if (GV->hasInitializer()) {
- printCppName(GV);
- Out << "->setInitializer(";
- Out << getCppName(GV->getInitializer()) << ");";
- nl(Out);
- }
-}
-
-std::string CppWriter::getOpName(const Value* V) {
- if (!isa<Instruction>(V) || DefinedValues.find(V) != DefinedValues.end())
- return getCppName(V);
-
- // See if its alread in the map of forward references, if so just return the
- // name we already set up for it
- ForwardRefMap::const_iterator I = ForwardRefs.find(V);
- if (I != ForwardRefs.end())
- return I->second;
-
- // This is a new forward reference. Generate a unique name for it
- std::string result(std::string("fwdref_") + utostr(uniqueNum++));
-
- // Yes, this is a hack. An Argument is the smallest instantiable value that
- // we can make as a placeholder for the real value. We'll replace these
- // Argument instances later.
- Out << "Argument* " << result << " = new Argument("
- << getCppName(V->getType()) << ");";
- nl(Out);
- ForwardRefs[V] = result;
- return result;
-}
-
-static StringRef ConvertAtomicOrdering(AtomicOrdering Ordering) {
- switch (Ordering) {
- case NotAtomic: return "NotAtomic";
- case Unordered: return "Unordered";
- case Monotonic: return "Monotonic";
- case Acquire: return "Acquire";
- case Release: return "Release";
- case AcquireRelease: return "AcquireRelease";
- case SequentiallyConsistent: return "SequentiallyConsistent";
- }
- llvm_unreachable("Unknown ordering");
-}
-
-static StringRef ConvertAtomicSynchScope(SynchronizationScope SynchScope) {
- switch (SynchScope) {
- case SingleThread: return "SingleThread";
- case CrossThread: return "CrossThread";
- }
- llvm_unreachable("Unknown synch scope");
-}
-
-// printInstruction - This member is called for each Instruction in a function.
-void CppWriter::printInstruction(const Instruction *I,
- const std::string& bbname) {
- std::string iName(getCppName(I));
-
- // Before we emit this instruction, we need to take care of generating any
- // forward references. So, we get the names of all the operands in advance
- const unsigned Ops(I->getNumOperands());
- std::string* opNames = new std::string[Ops];
- for (unsigned i = 0; i < Ops; i++)
- opNames[i] = getOpName(I->getOperand(i));
-
- switch (I->getOpcode()) {
- default:
- error("Invalid instruction");
- break;
-
- case Instruction::Ret: {
- const ReturnInst* ret = cast<ReturnInst>(I);
- Out << "ReturnInst::Create(mod->getContext(), "
- << (ret->getReturnValue() ? opNames[0] + ", " : "") << bbname << ");";
- break;
- }
- case Instruction::Br: {
- const BranchInst* br = cast<BranchInst>(I);
- Out << "BranchInst::Create(" ;
- if (br->getNumOperands() == 3) {
- Out << opNames[2] << ", "
- << opNames[1] << ", "
- << opNames[0] << ", ";
-
- } else if (br->getNumOperands() == 1) {
- Out << opNames[0] << ", ";
- } else {
- error("Branch with 2 operands?");
- }
- Out << bbname << ");";
- break;
- }
- case Instruction::Switch: {
- const SwitchInst *SI = cast<SwitchInst>(I);
- Out << "SwitchInst* " << iName << " = SwitchInst::Create("
- << getOpName(SI->getCondition()) << ", "
- << getOpName(SI->getDefaultDest()) << ", "
- << SI->getNumCases() << ", " << bbname << ");";
- nl(Out);
- for (SwitchInst::ConstCaseIt i = SI->case_begin(), e = SI->case_end();
- i != e; ++i) {
- const ConstantInt* CaseVal = i.getCaseValue();
- const BasicBlock *BB = i.getCaseSuccessor();
- Out << iName << "->addCase("
- << getOpName(CaseVal) << ", "
- << getOpName(BB) << ");";
- nl(Out);
- }
- break;
- }
- case Instruction::IndirectBr: {
- const IndirectBrInst *IBI = cast<IndirectBrInst>(I);
- Out << "IndirectBrInst *" << iName << " = IndirectBrInst::Create("
- << opNames[0] << ", " << IBI->getNumDestinations() << ");";
- nl(Out);
- for (unsigned i = 1; i != IBI->getNumOperands(); ++i) {
- Out << iName << "->addDestination(" << opNames[i] << ");";
- nl(Out);
- }
- break;
- }
- case Instruction::Resume: {
- Out << "ResumeInst::Create(" << opNames[0] << ", " << bbname << ");";
- break;
- }
- case Instruction::Invoke: {
- const InvokeInst* inv = cast<InvokeInst>(I);
- Out << "std::vector<Value*> " << iName << "_params;";
- nl(Out);
- for (unsigned i = 0; i < inv->getNumArgOperands(); ++i) {
- Out << iName << "_params.push_back("
- << getOpName(inv->getArgOperand(i)) << ");";
- nl(Out);
- }
- // FIXME: This shouldn't use magic numbers -3, -2, and -1.
- Out << "InvokeInst *" << iName << " = InvokeInst::Create("
- << getOpName(inv->getCalledValue()) << ", "
- << getOpName(inv->getNormalDest()) << ", "
- << getOpName(inv->getUnwindDest()) << ", "
- << iName << "_params, \"";
- printEscapedString(inv->getName());
- Out << "\", " << bbname << ");";
- nl(Out) << iName << "->setCallingConv(";
- printCallingConv(inv->getCallingConv());
- Out << ");";
- printAttributes(inv->getAttributes(), iName);
- Out << iName << "->setAttributes(" << iName << "_PAL);";
- nl(Out);
- break;
- }
- case Instruction::Unreachable: {
- Out << "new UnreachableInst("
- << "mod->getContext(), "
- << bbname << ");";
- break;
- }
- case Instruction::Add:
- case Instruction::FAdd:
- case Instruction::Sub:
- case Instruction::FSub:
- case Instruction::Mul:
- case Instruction::FMul:
- case Instruction::UDiv:
- case Instruction::SDiv:
- case Instruction::FDiv:
- case Instruction::URem:
- case Instruction::SRem:
- case Instruction::FRem:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:{
- Out << "BinaryOperator* " << iName << " = BinaryOperator::Create(";
- switch (I->getOpcode()) {
- case Instruction::Add: Out << "Instruction::Add"; break;
- case Instruction::FAdd: Out << "Instruction::FAdd"; break;
- case Instruction::Sub: Out << "Instruction::Sub"; break;
- case Instruction::FSub: Out << "Instruction::FSub"; break;
- case Instruction::Mul: Out << "Instruction::Mul"; break;
- case Instruction::FMul: Out << "Instruction::FMul"; break;
- case Instruction::UDiv:Out << "Instruction::UDiv"; break;
- case Instruction::SDiv:Out << "Instruction::SDiv"; break;
- case Instruction::FDiv:Out << "Instruction::FDiv"; break;
- case Instruction::URem:Out << "Instruction::URem"; break;
- case Instruction::SRem:Out << "Instruction::SRem"; break;
- case Instruction::FRem:Out << "Instruction::FRem"; break;
- case Instruction::And: Out << "Instruction::And"; break;
- case Instruction::Or: Out << "Instruction::Or"; break;
- case Instruction::Xor: Out << "Instruction::Xor"; break;
- case Instruction::Shl: Out << "Instruction::Shl"; break;
- case Instruction::LShr:Out << "Instruction::LShr"; break;
- case Instruction::AShr:Out << "Instruction::AShr"; break;
- default: Out << "Instruction::BadOpCode"; break;
- }
- Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
- printEscapedString(I->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::FCmp: {
- Out << "FCmpInst* " << iName << " = new FCmpInst(*" << bbname << ", ";
- switch (cast<FCmpInst>(I)->getPredicate()) {
- case FCmpInst::FCMP_FALSE: Out << "FCmpInst::FCMP_FALSE"; break;
- case FCmpInst::FCMP_OEQ : Out << "FCmpInst::FCMP_OEQ"; break;
- case FCmpInst::FCMP_OGT : Out << "FCmpInst::FCMP_OGT"; break;
- case FCmpInst::FCMP_OGE : Out << "FCmpInst::FCMP_OGE"; break;
- case FCmpInst::FCMP_OLT : Out << "FCmpInst::FCMP_OLT"; break;
- case FCmpInst::FCMP_OLE : Out << "FCmpInst::FCMP_OLE"; break;
- case FCmpInst::FCMP_ONE : Out << "FCmpInst::FCMP_ONE"; break;
- case FCmpInst::FCMP_ORD : Out << "FCmpInst::FCMP_ORD"; break;
- case FCmpInst::FCMP_UNO : Out << "FCmpInst::FCMP_UNO"; break;
- case FCmpInst::FCMP_UEQ : Out << "FCmpInst::FCMP_UEQ"; break;
- case FCmpInst::FCMP_UGT : Out << "FCmpInst::FCMP_UGT"; break;
- case FCmpInst::FCMP_UGE : Out << "FCmpInst::FCMP_UGE"; break;
- case FCmpInst::FCMP_ULT : Out << "FCmpInst::FCMP_ULT"; break;
- case FCmpInst::FCMP_ULE : Out << "FCmpInst::FCMP_ULE"; break;
- case FCmpInst::FCMP_UNE : Out << "FCmpInst::FCMP_UNE"; break;
- case FCmpInst::FCMP_TRUE : Out << "FCmpInst::FCMP_TRUE"; break;
- default: Out << "FCmpInst::BAD_ICMP_PREDICATE"; break;
- }
- Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
- printEscapedString(I->getName());
- Out << "\");";
- break;
- }
- case Instruction::ICmp: {
- Out << "ICmpInst* " << iName << " = new ICmpInst(*" << bbname << ", ";
- switch (cast<ICmpInst>(I)->getPredicate()) {
- case ICmpInst::ICMP_EQ: Out << "ICmpInst::ICMP_EQ"; break;
- case ICmpInst::ICMP_NE: Out << "ICmpInst::ICMP_NE"; break;
- case ICmpInst::ICMP_ULE: Out << "ICmpInst::ICMP_ULE"; break;
- case ICmpInst::ICMP_SLE: Out << "ICmpInst::ICMP_SLE"; break;
- case ICmpInst::ICMP_UGE: Out << "ICmpInst::ICMP_UGE"; break;
- case ICmpInst::ICMP_SGE: Out << "ICmpInst::ICMP_SGE"; break;
- case ICmpInst::ICMP_ULT: Out << "ICmpInst::ICMP_ULT"; break;
- case ICmpInst::ICMP_SLT: Out << "ICmpInst::ICMP_SLT"; break;
- case ICmpInst::ICMP_UGT: Out << "ICmpInst::ICMP_UGT"; break;
- case ICmpInst::ICMP_SGT: Out << "ICmpInst::ICMP_SGT"; break;
- default: Out << "ICmpInst::BAD_ICMP_PREDICATE"; break;
- }
- Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
- printEscapedString(I->getName());
- Out << "\");";
- break;
- }
- case Instruction::Alloca: {
- const AllocaInst* allocaI = cast<AllocaInst>(I);
- Out << "AllocaInst* " << iName << " = new AllocaInst("
- << getCppName(allocaI->getAllocatedType()) << ", ";
- if (allocaI->isArrayAllocation())
- Out << opNames[0] << ", ";
- Out << "\"";
- printEscapedString(allocaI->getName());
- Out << "\", " << bbname << ");";
- if (allocaI->getAlignment())
- nl(Out) << iName << "->setAlignment("
- << allocaI->getAlignment() << ");";
- break;
- }
- case Instruction::Load: {
- const LoadInst* load = cast<LoadInst>(I);
- Out << "LoadInst* " << iName << " = new LoadInst("
- << opNames[0] << ", \"";
- printEscapedString(load->getName());
- Out << "\", " << (load->isVolatile() ? "true" : "false" )
- << ", " << bbname << ");";
- if (load->getAlignment())
- nl(Out) << iName << "->setAlignment("
- << load->getAlignment() << ");";
- if (load->isAtomic()) {
- StringRef Ordering = ConvertAtomicOrdering(load->getOrdering());
- StringRef CrossThread = ConvertAtomicSynchScope(load->getSynchScope());
- nl(Out) << iName << "->setAtomic("
- << Ordering << ", " << CrossThread << ");";
- }
- break;
- }
- case Instruction::Store: {
- const StoreInst* store = cast<StoreInst>(I);
- Out << "StoreInst* " << iName << " = new StoreInst("
- << opNames[0] << ", "
- << opNames[1] << ", "
- << (store->isVolatile() ? "true" : "false")
- << ", " << bbname << ");";
- if (store->getAlignment())
- nl(Out) << iName << "->setAlignment("
- << store->getAlignment() << ");";
- if (store->isAtomic()) {
- StringRef Ordering = ConvertAtomicOrdering(store->getOrdering());
- StringRef CrossThread = ConvertAtomicSynchScope(store->getSynchScope());
- nl(Out) << iName << "->setAtomic("
- << Ordering << ", " << CrossThread << ");";
- }
- break;
- }
- case Instruction::GetElementPtr: {
- const GetElementPtrInst* gep = cast<GetElementPtrInst>(I);
- Out << "GetElementPtrInst* " << iName << " = GetElementPtrInst::Create("
- << getCppName(gep->getSourceElementType()) << ", " << opNames[0] << ", {";
- in();
- for (unsigned i = 1; i < gep->getNumOperands(); ++i ) {
- if (i != 1) {
- Out << ", ";
- }
- nl(Out);
- Out << opNames[i];
- }
- out();
- nl(Out) << "}, \"";
- printEscapedString(gep->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::PHI: {
- const PHINode* phi = cast<PHINode>(I);
-
- Out << "PHINode* " << iName << " = PHINode::Create("
- << getCppName(phi->getType()) << ", "
- << phi->getNumIncomingValues() << ", \"";
- printEscapedString(phi->getName());
- Out << "\", " << bbname << ");";
- nl(Out);
- for (unsigned i = 0; i < phi->getNumIncomingValues(); ++i) {
- Out << iName << "->addIncoming("
- << opNames[PHINode::getOperandNumForIncomingValue(i)] << ", "
- << getOpName(phi->getIncomingBlock(i)) << ");";
- nl(Out);
- }
- break;
- }
- case Instruction::Trunc:
- case Instruction::ZExt:
- case Instruction::SExt:
- case Instruction::FPTrunc:
- case Instruction::FPExt:
- case Instruction::FPToUI:
- case Instruction::FPToSI:
- case Instruction::UIToFP:
- case Instruction::SIToFP:
- case Instruction::PtrToInt:
- case Instruction::IntToPtr:
- case Instruction::BitCast: {
- const CastInst* cst = cast<CastInst>(I);
- Out << "CastInst* " << iName << " = new ";
- switch (I->getOpcode()) {
- case Instruction::Trunc: Out << "TruncInst"; break;
- case Instruction::ZExt: Out << "ZExtInst"; break;
- case Instruction::SExt: Out << "SExtInst"; break;
- case Instruction::FPTrunc: Out << "FPTruncInst"; break;
- case Instruction::FPExt: Out << "FPExtInst"; break;
- case Instruction::FPToUI: Out << "FPToUIInst"; break;
- case Instruction::FPToSI: Out << "FPToSIInst"; break;
- case Instruction::UIToFP: Out << "UIToFPInst"; break;
- case Instruction::SIToFP: Out << "SIToFPInst"; break;
- case Instruction::PtrToInt: Out << "PtrToIntInst"; break;
- case Instruction::IntToPtr: Out << "IntToPtrInst"; break;
- case Instruction::BitCast: Out << "BitCastInst"; break;
- default: llvm_unreachable("Unreachable");
- }
- Out << "(" << opNames[0] << ", "
- << getCppName(cst->getType()) << ", \"";
- printEscapedString(cst->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::Call: {
- const CallInst* call = cast<CallInst>(I);
- if (const InlineAsm* ila = dyn_cast<InlineAsm>(call->getCalledValue())) {
- Out << "InlineAsm* " << getCppName(ila) << " = InlineAsm::get("
- << getCppName(ila->getFunctionType()) << ", \""
- << ila->getAsmString() << "\", \""
- << ila->getConstraintString() << "\","
- << (ila->hasSideEffects() ? "true" : "false") << ");";
- nl(Out);
- }
- if (call->getNumArgOperands() > 1) {
- Out << "std::vector<Value*> " << iName << "_params;";
- nl(Out);
- for (unsigned i = 0; i < call->getNumArgOperands(); ++i) {
- Out << iName << "_params.push_back(" << opNames[i] << ");";
- nl(Out);
- }
- Out << "CallInst* " << iName << " = CallInst::Create("
- << opNames[call->getNumArgOperands()] << ", "
- << iName << "_params, \"";
- } else if (call->getNumArgOperands() == 1) {
- Out << "CallInst* " << iName << " = CallInst::Create("
- << opNames[call->getNumArgOperands()] << ", " << opNames[0] << ", \"";
- } else {
- Out << "CallInst* " << iName << " = CallInst::Create("
- << opNames[call->getNumArgOperands()] << ", \"";
- }
- printEscapedString(call->getName());
- Out << "\", " << bbname << ");";
- nl(Out) << iName << "->setCallingConv(";
- printCallingConv(call->getCallingConv());
- Out << ");";
- nl(Out) << iName << "->setTailCall("
- << (call->isTailCall() ? "true" : "false");
- Out << ");";
- nl(Out);
- printAttributes(call->getAttributes(), iName);
- Out << iName << "->setAttributes(" << iName << "_PAL);";
- nl(Out);
- break;
- }
- case Instruction::Select: {
- const SelectInst* sel = cast<SelectInst>(I);
- Out << "SelectInst* " << getCppName(sel) << " = SelectInst::Create(";
- Out << opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", \"";
- printEscapedString(sel->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::UserOp1:
- /// FALL THROUGH
- case Instruction::UserOp2: {
- /// FIXME: What should be done here?
- break;
- }
- case Instruction::VAArg: {
- const VAArgInst* va = cast<VAArgInst>(I);
- Out << "VAArgInst* " << getCppName(va) << " = new VAArgInst("
- << opNames[0] << ", " << getCppName(va->getType()) << ", \"";
- printEscapedString(va->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::ExtractElement: {
- const ExtractElementInst* eei = cast<ExtractElementInst>(I);
- Out << "ExtractElementInst* " << getCppName(eei)
- << " = new ExtractElementInst(" << opNames[0]
- << ", " << opNames[1] << ", \"";
- printEscapedString(eei->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::InsertElement: {
- const InsertElementInst* iei = cast<InsertElementInst>(I);
- Out << "InsertElementInst* " << getCppName(iei)
- << " = InsertElementInst::Create(" << opNames[0]
- << ", " << opNames[1] << ", " << opNames[2] << ", \"";
- printEscapedString(iei->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::ShuffleVector: {
- const ShuffleVectorInst* svi = cast<ShuffleVectorInst>(I);
- Out << "ShuffleVectorInst* " << getCppName(svi)
- << " = new ShuffleVectorInst(" << opNames[0]
- << ", " << opNames[1] << ", " << opNames[2] << ", \"";
- printEscapedString(svi->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::ExtractValue: {
- const ExtractValueInst *evi = cast<ExtractValueInst>(I);
- Out << "std::vector<unsigned> " << iName << "_indices;";
- nl(Out);
- for (unsigned i = 0; i < evi->getNumIndices(); ++i) {
- Out << iName << "_indices.push_back("
- << evi->idx_begin()[i] << ");";
- nl(Out);
- }
- Out << "ExtractValueInst* " << getCppName(evi)
- << " = ExtractValueInst::Create(" << opNames[0]
- << ", "
- << iName << "_indices, \"";
- printEscapedString(evi->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::InsertValue: {
- const InsertValueInst *ivi = cast<InsertValueInst>(I);
- Out << "std::vector<unsigned> " << iName << "_indices;";
- nl(Out);
- for (unsigned i = 0; i < ivi->getNumIndices(); ++i) {
- Out << iName << "_indices.push_back("
- << ivi->idx_begin()[i] << ");";
- nl(Out);
- }
- Out << "InsertValueInst* " << getCppName(ivi)
- << " = InsertValueInst::Create(" << opNames[0]
- << ", " << opNames[1] << ", "
- << iName << "_indices, \"";
- printEscapedString(ivi->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::Fence: {
- const FenceInst *fi = cast<FenceInst>(I);
- StringRef Ordering = ConvertAtomicOrdering(fi->getOrdering());
- StringRef CrossThread = ConvertAtomicSynchScope(fi->getSynchScope());
- Out << "FenceInst* " << iName
- << " = new FenceInst(mod->getContext(), "
- << Ordering << ", " << CrossThread << ", " << bbname
- << ");";
- break;
- }
- case Instruction::AtomicCmpXchg: {
- const AtomicCmpXchgInst *cxi = cast<AtomicCmpXchgInst>(I);
- StringRef SuccessOrdering =
- ConvertAtomicOrdering(cxi->getSuccessOrdering());
- StringRef FailureOrdering =
- ConvertAtomicOrdering(cxi->getFailureOrdering());
- StringRef CrossThread = ConvertAtomicSynchScope(cxi->getSynchScope());
- Out << "AtomicCmpXchgInst* " << iName
- << " = new AtomicCmpXchgInst("
- << opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", "
- << SuccessOrdering << ", " << FailureOrdering << ", "
- << CrossThread << ", " << bbname
- << ");";
- nl(Out) << iName << "->setName(\"";
- printEscapedString(cxi->getName());
- Out << "\");";
- nl(Out) << iName << "->setVolatile("
- << (cxi->isVolatile() ? "true" : "false") << ");";
- nl(Out) << iName << "->setWeak("
- << (cxi->isWeak() ? "true" : "false") << ");";
- break;
- }
- case Instruction::AtomicRMW: {
- const AtomicRMWInst *rmwi = cast<AtomicRMWInst>(I);
- StringRef Ordering = ConvertAtomicOrdering(rmwi->getOrdering());
- StringRef CrossThread = ConvertAtomicSynchScope(rmwi->getSynchScope());
- StringRef Operation;
- switch (rmwi->getOperation()) {
- case AtomicRMWInst::Xchg: Operation = "AtomicRMWInst::Xchg"; break;
- case AtomicRMWInst::Add: Operation = "AtomicRMWInst::Add"; break;
- case AtomicRMWInst::Sub: Operation = "AtomicRMWInst::Sub"; break;
- case AtomicRMWInst::And: Operation = "AtomicRMWInst::And"; break;
- case AtomicRMWInst::Nand: Operation = "AtomicRMWInst::Nand"; break;
- case AtomicRMWInst::Or: Operation = "AtomicRMWInst::Or"; break;
- case AtomicRMWInst::Xor: Operation = "AtomicRMWInst::Xor"; break;
- case AtomicRMWInst::Max: Operation = "AtomicRMWInst::Max"; break;
- case AtomicRMWInst::Min: Operation = "AtomicRMWInst::Min"; break;
- case AtomicRMWInst::UMax: Operation = "AtomicRMWInst::UMax"; break;
- case AtomicRMWInst::UMin: Operation = "AtomicRMWInst::UMin"; break;
- case AtomicRMWInst::BAD_BINOP: llvm_unreachable("Bad atomic operation");
- }
- Out << "AtomicRMWInst* " << iName
- << " = new AtomicRMWInst("
- << Operation << ", "
- << opNames[0] << ", " << opNames[1] << ", "
- << Ordering << ", " << CrossThread << ", " << bbname
- << ");";
- nl(Out) << iName << "->setName(\"";
- printEscapedString(rmwi->getName());
- Out << "\");";
- nl(Out) << iName << "->setVolatile("
- << (rmwi->isVolatile() ? "true" : "false") << ");";
- break;
- }
- case Instruction::LandingPad: {
- const LandingPadInst *lpi = cast<LandingPadInst>(I);
- Out << "LandingPadInst* " << iName << " = LandingPadInst::Create(";
- printCppName(lpi->getType());
- Out << ", " << opNames[0] << ", " << lpi->getNumClauses() << ", \"";
- printEscapedString(lpi->getName());
- Out << "\", " << bbname << ");";
- nl(Out) << iName << "->setCleanup("
- << (lpi->isCleanup() ? "true" : "false")
- << ");";
- for (unsigned i = 0, e = lpi->getNumClauses(); i != e; ++i)
- nl(Out) << iName << "->addClause(" << opNames[i+1] << ");";
- break;
- }
- }
- DefinedValues.insert(I);
- nl(Out);
- delete [] opNames;
-}
-
-// Print out the types, constants and declarations needed by one function
-void CppWriter::printFunctionUses(const Function* F) {
- nl(Out) << "// Type Definitions"; nl(Out);
- if (!is_inline) {
- // Print the function's return type
- printType(F->getReturnType());
-
- // Print the function's function type
- printType(F->getFunctionType());
-
- // Print the types of each of the function's arguments
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- printType(AI->getType());
- }
- }
-
- // Print type definitions for every type referenced by an instruction and
- // make a note of any global values or constants that are referenced
- SmallPtrSet<GlobalValue*,64> gvs;
- SmallPtrSet<Constant*,64> consts;
- for (Function::const_iterator BB = F->begin(), BE = F->end();
- BB != BE; ++BB){
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- // Print the type of the instruction itself
- printType(I->getType());
-
- // Print the type of each of the instruction's operands
- for (unsigned i = 0; i < I->getNumOperands(); ++i) {
- Value* operand = I->getOperand(i);
- printType(operand->getType());
-
- // If the operand references a GVal or Constant, make a note of it
- if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
- gvs.insert(GV);
- if (GenerationType != GenFunction)
- if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- if (GVar->hasInitializer())
- consts.insert(GVar->getInitializer());
- } else if (Constant* C = dyn_cast<Constant>(operand)) {
- consts.insert(C);
- for (Value* operand : C->operands()) {
- // If the operand references a GVal or Constant, make a note of it
- printType(operand->getType());
- if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
- gvs.insert(GV);
- if (GenerationType != GenFunction)
- if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- if (GVar->hasInitializer())
- consts.insert(GVar->getInitializer());
- }
- }
- }
- }
- }
- }
-
- // Print the function declarations for any functions encountered
- nl(Out) << "// Function Declarations"; nl(Out);
- for (auto *GV : gvs) {
- if (Function *Fun = dyn_cast<Function>(GV)) {
- if (!is_inline || Fun != F)
- printFunctionHead(Fun);
- }
- }
-
- // Print the global variable declarations for any variables encountered
- nl(Out) << "// Global Variable Declarations"; nl(Out);
- for (auto *GV : gvs) {
- if (GlobalVariable *F = dyn_cast<GlobalVariable>(GV))
- printVariableHead(F);
- }
-
- // Print the constants found
- nl(Out) << "// Constant Definitions"; nl(Out);
- for (const auto *C : consts) {
- printConstant(C);
- }
-
- // Process the global variables definitions now that all the constants have
- // been emitted. These definitions just couple the gvars with their constant
- // initializers.
- if (GenerationType != GenFunction) {
- nl(Out) << "// Global Variable Definitions"; nl(Out);
- for (auto *GV : gvs) {
- if (GlobalVariable *Var = dyn_cast<GlobalVariable>(GV))
- printVariableBody(Var);
- }
- }
-}
-
-void CppWriter::printFunctionHead(const Function* F) {
- nl(Out) << "Function* " << getCppName(F);
- Out << " = mod->getFunction(\"";
- printEscapedString(F->getName());
- Out << "\");";
- nl(Out) << "if (!" << getCppName(F) << ") {";
- nl(Out) << getCppName(F);
-
- Out<< " = Function::Create(";
- nl(Out,1) << "/*Type=*/" << getCppName(F->getFunctionType()) << ",";
- nl(Out) << "/*Linkage=*/";
- printLinkageType(F->getLinkage());
- Out << ",";
- nl(Out) << "/*Name=*/\"";
- printEscapedString(F->getName());
- Out << "\", mod); " << (F->isDeclaration()? "// (external, no body)" : "");
- nl(Out,-1);
- printCppName(F);
- Out << "->setCallingConv(";
- printCallingConv(F->getCallingConv());
- Out << ");";
- nl(Out);
- if (F->hasSection()) {
- printCppName(F);
- Out << "->setSection(\"" << F->getSection() << "\");";
- nl(Out);
- }
- if (F->getAlignment()) {
- printCppName(F);
- Out << "->setAlignment(" << F->getAlignment() << ");";
- nl(Out);
- }
- if (F->getVisibility() != GlobalValue::DefaultVisibility) {
- printCppName(F);
- Out << "->setVisibility(";
- printVisibilityType(F->getVisibility());
- Out << ");";
- nl(Out);
- }
- if (F->getDLLStorageClass() != GlobalValue::DefaultStorageClass) {
- printCppName(F);
- Out << "->setDLLStorageClass(";
- printDLLStorageClassType(F->getDLLStorageClass());
- Out << ");";
- nl(Out);
- }
- if (F->hasGC()) {
- printCppName(F);
- Out << "->setGC(\"" << F->getGC() << "\");";
- nl(Out);
- }
- Out << "}";
- nl(Out);
- printAttributes(F->getAttributes(), getCppName(F));
- printCppName(F);
- Out << "->setAttributes(" << getCppName(F) << "_PAL);";
- nl(Out);
-}
-
-void CppWriter::printFunctionBody(const Function *F) {
- if (F->isDeclaration())
- return; // external functions have no bodies.
-
- // Clear the DefinedValues and ForwardRefs maps because we can't have
- // cross-function forward refs
- ForwardRefs.clear();
- DefinedValues.clear();
-
- // Create all the argument values
- if (!is_inline) {
- if (!F->arg_empty()) {
- Out << "Function::arg_iterator args = " << getCppName(F)
- << "->arg_begin();";
- nl(Out);
- }
- for (const Argument &AI : F->args()) {
- Out << "Value* " << getCppName(&AI) << " = args++;";
- nl(Out);
- if (AI.hasName()) {
- Out << getCppName(&AI) << "->setName(\"";
- printEscapedString(AI.getName());
- Out << "\");";
- nl(Out);
- }
- }
- }
-
- // Create all the basic blocks
- nl(Out);
- for (const BasicBlock &BI : *F) {
- std::string bbname(getCppName(&BI));
- Out << "BasicBlock* " << bbname <<
- " = BasicBlock::Create(mod->getContext(), \"";
- if (BI.hasName())
- printEscapedString(BI.getName());
- Out << "\"," << getCppName(BI.getParent()) << ",0);";
- nl(Out);
- }
-
- // Output all of its basic blocks... for the function
- for (const BasicBlock &BI : *F) {
- std::string bbname(getCppName(&BI));
- nl(Out) << "// Block " << BI.getName() << " (" << bbname << ")";
- nl(Out);
-
- // Output all of the instructions in the basic block...
- for (const Instruction &I : BI)
- printInstruction(&I, bbname);
- }
-
- // Loop over the ForwardRefs and resolve them now that all instructions
- // are generated.
- if (!ForwardRefs.empty()) {
- nl(Out) << "// Resolve Forward References";
- nl(Out);
- }
-
- while (!ForwardRefs.empty()) {
- ForwardRefMap::iterator I = ForwardRefs.begin();
- Out << I->second << "->replaceAllUsesWith("
- << getCppName(I->first) << "); delete " << I->second << ";";
- nl(Out);
- ForwardRefs.erase(I);
- }
-}
-
-void CppWriter::printInline(const std::string& fname,
- const std::string& func) {
- const Function* F = TheModule->getFunction(func);
- if (!F) {
- error(std::string("Function '") + func + "' not found in input module");
- return;
- }
- if (F->isDeclaration()) {
- error(std::string("Function '") + func + "' is external!");
- return;
- }
- nl(Out) << "BasicBlock* " << fname << "(Module* mod, Function *"
- << getCppName(F);
- unsigned arg_count = 1;
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- Out << ", Value* arg_" << arg_count++;
- }
- Out << ") {";
- nl(Out);
- is_inline = true;
- printFunctionUses(F);
- printFunctionBody(F);
- is_inline = false;
- Out << "return " << getCppName(&F->front()) << ";";
- nl(Out) << "}";
- nl(Out);
-}
-
-void CppWriter::printModuleBody() {
- // Print out all the type definitions
- nl(Out) << "// Type Definitions"; nl(Out);
- printTypes(TheModule);
-
- // Functions can call each other and global variables can reference them so
- // define all the functions first before emitting their function bodies.
- nl(Out) << "// Function Declarations"; nl(Out);
- for (const Function &I : *TheModule)
- printFunctionHead(&I);
-
- // Process the global variables declarations. We can't initialze them until
- // after the constants are printed so just print a header for each global
- nl(Out) << "// Global Variable Declarations\n"; nl(Out);
- for (const GlobalVariable &I : TheModule->globals())
- printVariableHead(&I);
-
- // Print out all the constants definitions. Constants don't recurse except
- // through GlobalValues. All GlobalValues have been declared at this point
- // so we can proceed to generate the constants.
- nl(Out) << "// Constant Definitions"; nl(Out);
- printConstants(TheModule);
-
- // Process the global variables definitions now that all the constants have
- // been emitted. These definitions just couple the gvars with their constant
- // initializers.
- nl(Out) << "// Global Variable Definitions"; nl(Out);
- for (const GlobalVariable &I : TheModule->globals())
- printVariableBody(&I);
-
- // Finally, we can safely put out all of the function bodies.
- nl(Out) << "// Function Definitions"; nl(Out);
- for (const Function &I : *TheModule) {
- if (!I.isDeclaration()) {
- nl(Out) << "// Function: " << I.getName() << " (" << getCppName(&I)
- << ")";
- nl(Out) << "{";
- nl(Out,1);
- printFunctionBody(&I);
- nl(Out,-1) << "}";
- nl(Out);
- }
- }
-}
-
-void CppWriter::printProgram(const std::string& fname,
- const std::string& mName) {
- Out << "#include <llvm/Pass.h>\n";
-
- Out << "#include <llvm/ADT/SmallVector.h>\n";
- Out << "#include <llvm/Analysis/Verifier.h>\n";
- Out << "#include <llvm/IR/BasicBlock.h>\n";
- Out << "#include <llvm/IR/CallingConv.h>\n";
- Out << "#include <llvm/IR/Constants.h>\n";
- Out << "#include <llvm/IR/DerivedTypes.h>\n";
- Out << "#include <llvm/IR/Function.h>\n";
- Out << "#include <llvm/IR/GlobalVariable.h>\n";
- Out << "#include <llvm/IR/IRPrintingPasses.h>\n";
- Out << "#include <llvm/IR/InlineAsm.h>\n";
- Out << "#include <llvm/IR/Instructions.h>\n";
- Out << "#include <llvm/IR/LLVMContext.h>\n";
- Out << "#include <llvm/IR/LegacyPassManager.h>\n";
- Out << "#include <llvm/IR/Module.h>\n";
- Out << "#include <llvm/Support/FormattedStream.h>\n";
- Out << "#include <llvm/Support/MathExtras.h>\n";
- Out << "#include <algorithm>\n";
- Out << "using namespace llvm;\n\n";
- Out << "Module* " << fname << "();\n\n";
- Out << "int main(int argc, char**argv) {\n";
- Out << " Module* Mod = " << fname << "();\n";
- Out << " verifyModule(*Mod, PrintMessageAction);\n";
- Out << " PassManager PM;\n";
- Out << " PM.add(createPrintModulePass(&outs()));\n";
- Out << " PM.run(*Mod);\n";
- Out << " return 0;\n";
- Out << "}\n\n";
- printModule(fname,mName);
-}
-
-void CppWriter::printModule(const std::string& fname,
- const std::string& mName) {
- nl(Out) << "Module* " << fname << "() {";
- nl(Out,1) << "// Module Construction";
- nl(Out) << "Module* mod = new Module(\"";
- printEscapedString(mName);
- Out << "\", getGlobalContext());";
- if (!TheModule->getTargetTriple().empty()) {
- nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayoutStr()
- << "\");";
- }
- if (!TheModule->getTargetTriple().empty()) {
- nl(Out) << "mod->setTargetTriple(\"" << TheModule->getTargetTriple()
- << "\");";
- }
-
- if (!TheModule->getModuleInlineAsm().empty()) {
- nl(Out) << "mod->setModuleInlineAsm(\"";
- printEscapedString(TheModule->getModuleInlineAsm());
- Out << "\");";
- }
- nl(Out);
-
- printModuleBody();
- nl(Out) << "return mod;";
- nl(Out,-1) << "}";
- nl(Out);
-}
-
-void CppWriter::printContents(const std::string& fname,
- const std::string& mName) {
- Out << "\nModule* " << fname << "(Module *mod) {\n";
- Out << "\nmod->setModuleIdentifier(\"";
- printEscapedString(mName);
- Out << "\");\n";
- printModuleBody();
- Out << "\nreturn mod;\n";
- Out << "\n}\n";
-}
-
-void CppWriter::printFunction(const std::string& fname,
- const std::string& funcName) {
- const Function* F = TheModule->getFunction(funcName);
- if (!F) {
- error(std::string("Function '") + funcName + "' not found in input module");
- return;
- }
- Out << "\nFunction* " << fname << "(Module *mod) {\n";
- printFunctionUses(F);
- printFunctionHead(F);
- printFunctionBody(F);
- Out << "return " << getCppName(F) << ";\n";
- Out << "}\n";
-}
-
-void CppWriter::printFunctions() {
- const Module::FunctionListType &funcs = TheModule->getFunctionList();
- Module::const_iterator I = funcs.begin();
- Module::const_iterator IE = funcs.end();
-
- for (; I != IE; ++I) {
- const Function &func = *I;
- if (!func.isDeclaration()) {
- std::string name("define_");
- name += func.getName();
- printFunction(name, func.getName());
- }
- }
-}
-
-void CppWriter::printVariable(const std::string& fname,
- const std::string& varName) {
- const GlobalVariable* GV = TheModule->getNamedGlobal(varName);
-
- if (!GV) {
- error(std::string("Variable '") + varName + "' not found in input module");
- return;
- }
- Out << "\nGlobalVariable* " << fname << "(Module *mod) {\n";
- printVariableUses(GV);
- printVariableHead(GV);
- printVariableBody(GV);
- Out << "return " << getCppName(GV) << ";\n";
- Out << "}\n";
-}
-
-void CppWriter::printType(const std::string &fname,
- const std::string &typeName) {
- Type* Ty = TheModule->getTypeByName(typeName);
- if (!Ty) {
- error(std::string("Type '") + typeName + "' not found in input module");
- return;
- }
- Out << "\nType* " << fname << "(Module *mod) {\n";
- printType(Ty);
- Out << "return " << getCppName(Ty) << ";\n";
- Out << "}\n";
-}
-
-bool CppWriter::runOnModule(Module &M) {
- TheModule = &M;
-
- // Emit a header
- Out << "// Generated by llvm2cpp - DO NOT MODIFY!\n\n";
-
- // Get the name of the function we're supposed to generate
- std::string fname = FuncName.getValue();
-
- // Get the name of the thing we are to generate
- std::string tgtname = NameToGenerate.getValue();
- if (GenerationType == GenModule ||
- GenerationType == GenContents ||
- GenerationType == GenProgram ||
- GenerationType == GenFunctions) {
- if (tgtname == "!bad!") {
- if (M.getModuleIdentifier() == "-")
- tgtname = "<stdin>";
- else
- tgtname = M.getModuleIdentifier();
- }
- } else if (tgtname == "!bad!")
- error("You must use the -for option with -gen-{function,variable,type}");
-
- switch (WhatToGenerate(GenerationType)) {
- case GenProgram:
- if (fname.empty())
- fname = "makeLLVMModule";
- printProgram(fname,tgtname);
- break;
- case GenModule:
- if (fname.empty())
- fname = "makeLLVMModule";
- printModule(fname,tgtname);
- break;
- case GenContents:
- if (fname.empty())
- fname = "makeLLVMModuleContents";
- printContents(fname,tgtname);
- break;
- case GenFunction:
- if (fname.empty())
- fname = "makeLLVMFunction";
- printFunction(fname,tgtname);
- break;
- case GenFunctions:
- printFunctions();
- break;
- case GenInline:
- if (fname.empty())
- fname = "makeLLVMInline";
- printInline(fname,tgtname);
- break;
- case GenVariable:
- if (fname.empty())
- fname = "makeLLVMVariable";
- printVariable(fname,tgtname);
- break;
- case GenType:
- if (fname.empty())
- fname = "makeLLVMType";
- printType(fname,tgtname);
- break;
- }
-
- return false;
-}
-
-char CppWriter::ID = 0;
-
-//===----------------------------------------------------------------------===//
-// External Interface declaration
-//===----------------------------------------------------------------------===//
-
-bool CPPTargetMachine::addPassesToEmitFile(
- PassManagerBase &PM, raw_pwrite_stream &o, CodeGenFileType FileType,
- bool DisableVerify, AnalysisID StartBefore, AnalysisID StartAfter,
- AnalysisID StopAfter, MachineFunctionInitializer *MFInitializer) {
- if (FileType != TargetMachine::CGFT_AssemblyFile)
- return true;
- auto FOut = llvm::make_unique<formatted_raw_ostream>(o);
- PM.add(new CppWriter(std::move(FOut)));
- return false;
-}
diff --git a/lib/Target/CppBackend/CPPTargetMachine.h b/lib/Target/CppBackend/CPPTargetMachine.h
deleted file mode 100644
index 00e402feffbc..000000000000
--- a/lib/Target/CppBackend/CPPTargetMachine.h
+++ /dev/null
@@ -1,44 +0,0 @@
-//===-- CPPTargetMachine.h - TargetMachine for the C++ backend --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the TargetMachine that is used by the C++ backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_CPPBACKEND_CPPTARGETMACHINE_H
-#define LLVM_LIB_TARGET_CPPBACKEND_CPPTARGETMACHINE_H
-
-#include "llvm/IR/DataLayout.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-
-namespace llvm {
-
-class formatted_raw_ostream;
-
-struct CPPTargetMachine : public TargetMachine {
- CPPTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL)
- : TargetMachine(T, "", TT, CPU, FS, Options) {}
-
-public:
- bool addPassesToEmitFile(PassManagerBase &PM, raw_pwrite_stream &Out,
- CodeGenFileType FileType, bool DisableVerify,
- AnalysisID StartBefore, AnalysisID StartAfter,
- AnalysisID StopAfter,
- MachineFunctionInitializer *MFInitializer) override;
-};
-
-extern Target TheCppBackendTarget;
-
-} // End llvm namespace
-
-
-#endif
diff --git a/lib/Target/CppBackend/Makefile b/lib/Target/CppBackend/Makefile
deleted file mode 100644
index efc7463fda3d..000000000000
--- a/lib/Target/CppBackend/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/CppBackend/Makefile --- ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMCppBackendCodeGen
-DIRS = TargetInfo
-
-include $(LEVEL)/Makefile.common
-
-CompileCommonOpts += -Wno-format
diff --git a/lib/Target/CppBackend/TargetInfo/CMakeLists.txt b/lib/Target/CppBackend/TargetInfo/CMakeLists.txt
deleted file mode 100644
index d86446f6bc02..000000000000
--- a/lib/Target/CppBackend/TargetInfo/CMakeLists.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMCppBackendInfo
- CppBackendTargetInfo.cpp
- )
diff --git a/lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp b/lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp
deleted file mode 100644
index f88d82228ca4..000000000000
--- a/lib/Target/CppBackend/TargetInfo/CppBackendTargetInfo.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-//===-- CppBackendTargetInfo.cpp - CppBackend Target Implementation -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "CPPTargetMachine.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Support/TargetRegistry.h"
-using namespace llvm;
-
-Target llvm::TheCppBackendTarget;
-
-static bool CppBackend_TripleMatchQuality(Triple::ArchType Arch) {
- // This backend doesn't correspond to any architecture. It must be explicitly
- // selected with -march.
- return false;
-}
-
-extern "C" void LLVMInitializeCppBackendTargetInfo() {
- TargetRegistry::RegisterTarget(TheCppBackendTarget, "cpp",
- "C++ backend",
- &CppBackend_TripleMatchQuality);
-}
-
-extern "C" void LLVMInitializeCppBackendTargetMC() {}
diff --git a/lib/Target/CppBackend/TargetInfo/Makefile b/lib/Target/CppBackend/TargetInfo/Makefile
deleted file mode 100644
index 6e682838daec..000000000000
--- a/lib/Target/CppBackend/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/CppBackend/TargetInfo/Makefile -----------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMCppBackendInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index a8622a96527c..496efbf7374b 100644
--- a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -13,14 +13,13 @@
#include "HexagonRegisterInfo.h"
#include "HexagonTargetStreamer.h"
#include "MCTargetDesc/HexagonBaseInfo.h"
-#include "MCTargetDesc/HexagonMCELFStreamer.h"
+#include "MCTargetDesc/HexagonMCAsmInfo.h"
#include "MCTargetDesc/HexagonMCChecker.h"
+#include "MCTargetDesc/HexagonMCELFStreamer.h"
#include "MCTargetDesc/HexagonMCExpr.h"
#include "MCTargetDesc/HexagonMCShuffler.h"
#include "MCTargetDesc/HexagonMCTargetDesc.h"
-#include "MCTargetDesc/HexagonMCAsmInfo.h"
#include "MCTargetDesc/HexagonShuffler.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
@@ -31,19 +30,19 @@
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/MC/MCValue.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/Format.h"
-#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include <sstream>
using namespace llvm;
@@ -108,7 +107,7 @@ class HexagonAsmParser : public MCTargetAsmParser {
void canonicalizeImmediates(MCInst &MCI);
bool matchOneInstruction(MCInst &MCB, SMLoc IDLoc,
OperandVector &InstOperands, uint64_t &ErrorInfo,
- bool MatchingInlineAsm, bool &MustExtend);
+ bool MatchingInlineAsm);
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
@@ -117,7 +116,7 @@ class HexagonAsmParser : public MCTargetAsmParser {
unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind) override;
void OutOfRange(SMLoc IDLoc, long long Val, long long Max);
int processInstruction(MCInst &Inst, OperandVector const &Operands,
- SMLoc IDLoc, bool &MustExtend);
+ SMLoc IDLoc);
// Check if we have an assembler and, if so, set the ELF e_header flags.
void chksetELFHeaderEFlags(unsigned flags) {
@@ -125,6 +124,8 @@ class HexagonAsmParser : public MCTargetAsmParser {
getAssembler()->setELFHeaderEFlags(flags);
}
+ unsigned matchRegister(StringRef Name);
+
/// @name Auto-generated Match Functions
/// {
@@ -150,7 +151,6 @@ public:
}
}
- bool mustExtend(OperandVector &Operands);
bool splitIdentifier(OperandVector &Operands);
bool parseOperand(OperandVector &Operands);
bool parseInstruction(OperandVector &Operands);
@@ -186,7 +186,6 @@ struct HexagonOperand : public MCParsedAsmOperand {
struct ImmTy {
const MCExpr *Val;
- bool MustExtend;
};
struct InstTy {
@@ -243,8 +242,8 @@ public:
bool CheckImmRange(int immBits, int zeroBits, bool isSigned,
bool isRelocatable, bool Extendable) const {
if (Kind == Immediate) {
- const MCExpr *myMCExpr = getImm();
- if (Imm.MustExtend && !Extendable)
+ const MCExpr *myMCExpr = &HexagonMCInstrInfo::getExpr(*getImm());
+ if (HexagonMCInstrInfo::mustExtend(*Imm.Val) && !Extendable)
return false;
int64_t Res;
if (myMCExpr->evaluateAsAbsolute(Res)) {
@@ -278,6 +277,7 @@ public:
bool isf32Ext() const { return false; }
bool iss32Imm() const { return CheckImmRange(32, 0, true, true, false); }
+ bool iss23_2Imm() const { return CheckImmRange(23, 2, true, true, false); }
bool iss8Imm() const { return CheckImmRange(8, 0, true, false, false); }
bool iss8Imm64() const { return CheckImmRange(8, 0, true, true, false); }
bool iss7Imm() const { return CheckImmRange(7, 0, true, false, false); }
@@ -347,7 +347,7 @@ public:
bool isu6_1Ext() const { return CheckImmRange(6 + 26, 1, false, true, true); }
bool isu6_2Ext() const { return CheckImmRange(6 + 26, 2, false, true, true); }
bool isu6_3Ext() const { return CheckImmRange(6 + 26, 3, false, true, true); }
- bool isu32MustExt() const { return isImm() && Imm.MustExtend; }
+ bool isu32MustExt() const { return isImm(); }
void addRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
@@ -361,20 +361,17 @@ public:
void addSignedImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- MCExpr const *Expr = getImm();
+ HexagonMCExpr *Expr =
+ const_cast<HexagonMCExpr *>(cast<HexagonMCExpr>(getImm()));
int64_t Value;
if (!Expr->evaluateAsAbsolute(Value)) {
Inst.addOperand(MCOperand::createExpr(Expr));
return;
}
- int64_t Extended = SignExtend64 (Value, 32);
- if ((Extended < 0) == (Value < 0)) {
- Inst.addOperand(MCOperand::createExpr(Expr));
- return;
- }
- // Flip bit 33 to signal signed unsigned mismatch
- Extended ^= 0x100000000;
- Inst.addOperand(MCOperand::createImm(Extended));
+ int64_t Extended = SignExtend64(Value, 32);
+ if ((Extended < 0) != (Value < 0))
+ Expr->setSignMismatch();
+ Inst.addOperand(MCOperand::createExpr(Expr));
}
void addf32ExtOperands(MCInst &Inst, unsigned N) const {
@@ -384,6 +381,9 @@ public:
void adds32ImmOperands(MCInst &Inst, unsigned N) const {
addSignedImmOperands(Inst, N);
}
+ void adds23_2ImmOperands(MCInst &Inst, unsigned N) const {
+ addSignedImmOperands(Inst, N);
+ }
void adds8ImmOperands(MCInst &Inst, unsigned N) const {
addSignedImmOperands(Inst, N);
}
@@ -553,13 +553,15 @@ public:
void adds4_6ImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ const MCConstantExpr *CE =
+ dyn_cast<MCConstantExpr>(&HexagonMCInstrInfo::getExpr(*getImm()));
Inst.addOperand(MCOperand::createImm(CE->getValue() * 64));
}
void adds3_6ImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ const MCConstantExpr *CE =
+ dyn_cast<MCConstantExpr>(&HexagonMCInstrInfo::getExpr(*getImm()));
Inst.addOperand(MCOperand::createImm(CE->getValue() * 64));
}
@@ -592,7 +594,6 @@ public:
SMLoc E) {
HexagonOperand *Op = new HexagonOperand(Immediate);
Op->Imm.Val = Val;
- Op->Imm.MustExtend = false;
Op->StartLoc = S;
Op->EndLoc = E;
return std::unique_ptr<HexagonOperand>(Op);
@@ -616,9 +617,6 @@ void HexagonOperand::print(raw_ostream &OS) const {
}
}
-/// @name Auto-generated Match Functions
-static unsigned MatchRegisterName(StringRef Name);
-
bool HexagonAsmParser::finishBundle(SMLoc IDLoc, MCStreamer &Out) {
DEBUG(dbgs() << "Bundle:");
DEBUG(MCB.dump_pretty(dbgs()));
@@ -730,11 +728,10 @@ bool HexagonAsmParser::finishBundle(SMLoc IDLoc, MCStreamer &Out) {
bool HexagonAsmParser::matchBundleOptions() {
MCAsmParser &Parser = getParser();
- MCAsmLexer &Lexer = getLexer();
while (true) {
if (!Parser.getTok().is(AsmToken::Colon))
return false;
- Lexer.Lex();
+ Lex();
StringRef Option = Parser.getTok().getString();
if (Option.compare_lower("endloop0") == 0)
HexagonMCInstrInfo::setInnerLoop(MCB);
@@ -746,7 +743,7 @@ bool HexagonAsmParser::matchBundleOptions() {
HexagonMCInstrInfo::setMemStoreReorderEnabled(MCB);
else
return true;
- Lexer.Lex();
+ Lex();
}
}
@@ -759,33 +756,29 @@ void HexagonAsmParser::canonicalizeImmediates(MCInst &MCI) {
for (MCOperand &I : MCI)
if (I.isImm()) {
int64_t Value (I.getImm());
- if ((Value & 0x100000000) != (Value & 0x80000000)) {
- // Detect flipped bit 33 wrt bit 32 and signal warning
- Value ^= 0x100000000;
- if (WarnSignedMismatch)
- Warning (MCI.getLoc(), "Signed/Unsigned mismatch");
- }
- NewInst.addOperand(MCOperand::createExpr(
- MCConstantExpr::create(Value, getContext())));
+ NewInst.addOperand(MCOperand::createExpr(HexagonMCExpr::create(
+ MCConstantExpr::create(Value, getContext()), getContext())));
}
- else
+ else {
+ if (I.isExpr() && cast<HexagonMCExpr>(I.getExpr())->signMismatch() &&
+ WarnSignedMismatch)
+ Warning (MCI.getLoc(), "Signed/Unsigned mismatch");
NewInst.addOperand(I);
+ }
MCI = NewInst;
}
bool HexagonAsmParser::matchOneInstruction(MCInst &MCI, SMLoc IDLoc,
OperandVector &InstOperands,
uint64_t &ErrorInfo,
- bool MatchingInlineAsm,
- bool &MustExtend) {
+ bool MatchingInlineAsm) {
// Perform matching with tablegen asmmatcher generated function
int result =
MatchInstructionImpl(InstOperands, MCI, ErrorInfo, MatchingInlineAsm);
if (result == Match_Success) {
MCI.setLoc(IDLoc);
- MustExtend = mustExtend(InstOperands);
canonicalizeImmediates(MCI);
- result = processInstruction(MCI, InstOperands, IDLoc, MustExtend);
+ result = processInstruction(MCI, InstOperands, IDLoc);
DEBUG(dbgs() << "Insn:");
DEBUG(MCI.dump_pretty(dbgs()));
@@ -823,17 +816,6 @@ bool HexagonAsmParser::matchOneInstruction(MCInst &MCI, SMLoc IDLoc,
llvm_unreachable("Implement any new match types added!");
}
-bool HexagonAsmParser::mustExtend(OperandVector &Operands) {
- unsigned Count = 0;
- for (std::unique_ptr<MCParsedAsmOperand> &i : Operands)
- if (i->isImm())
- if (static_cast<HexagonOperand *>(i.get())->Imm.MustExtend)
- ++Count;
- // Multiple extenders should have been filtered by iss9Ext et. al.
- assert(Count < 2 && "Multiple extenders");
- return Count == 1;
-}
-
bool HexagonAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
@@ -865,13 +847,11 @@ bool HexagonAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return finishBundle(IDLoc, Out);
}
MCInst *SubInst = new (getParser().getContext()) MCInst;
- bool MustExtend = false;
if (matchOneInstruction(*SubInst, IDLoc, Operands, ErrorInfo,
- MatchingInlineAsm, MustExtend))
+ MatchingInlineAsm))
return true;
HexagonMCInstrInfo::extendIfNeeded(
- getParser().getContext(), MCII, MCB, *SubInst,
- HexagonMCInstrInfo::isExtended(MCII, *SubInst) || MustExtend);
+ getParser().getContext(), MCII, MCB, *SubInst);
MCB.addOperand(MCOperand::createInst(SubInst));
if (!InBrackets)
return finishBundle(IDLoc, Out);
@@ -916,7 +896,8 @@ bool HexagonAsmParser::ParseDirectiveSubsection(SMLoc L) {
// end of the section. Only legacy hexagon-gcc created assembly code
// used negative subsections.
if ((Res < 0) && (Res > -8193))
- Subsection = MCConstantExpr::create(8192 + Res, this->getContext());
+ Subsection = HexagonMCExpr::create(
+ MCConstantExpr::create(8192 + Res, getContext()), getContext());
getStreamer().SubSection(Subsection);
return false;
@@ -1110,7 +1091,7 @@ bool HexagonAsmParser::splitIdentifier(OperandVector &Operands) {
AsmToken const &Token = getParser().getTok();
StringRef String = Token.getString();
SMLoc Loc = Token.getLoc();
- getLexer().Lex();
+ Lex();
do {
std::pair<StringRef, StringRef> HeadTail = String.split('.');
if (!HeadTail.first.empty())
@@ -1144,7 +1125,7 @@ bool HexagonAsmParser::parseOperand(OperandVector &Operands) {
static char const *RParen = ")";
Operands.push_back(HexagonOperand::CreateToken(LParen, Begin));
Operands.push_back(HexagonOperand::CreateReg(Register, Begin, End));
- AsmToken MaybeDotNew = Lexer.getTok();
+ const AsmToken &MaybeDotNew = Lexer.getTok();
if (MaybeDotNew.is(AsmToken::TokenKind::Identifier) &&
MaybeDotNew.getString().equals_lower(".new"))
splitIdentifier(Operands);
@@ -1160,7 +1141,7 @@ bool HexagonAsmParser::parseOperand(OperandVector &Operands) {
Operands.insert(Operands.end () - 1,
HexagonOperand::CreateToken(LParen, Begin));
Operands.push_back(HexagonOperand::CreateReg(Register, Begin, End));
- AsmToken MaybeDotNew = Lexer.getTok();
+ const AsmToken &MaybeDotNew = Lexer.getTok();
if (MaybeDotNew.is(AsmToken::TokenKind::Identifier) &&
MaybeDotNew.getString().equals_lower(".new"))
splitIdentifier(Operands);
@@ -1186,7 +1167,7 @@ bool HexagonAsmParser::isLabel(AsmToken &Token) {
return false;
if (!Token.is(AsmToken::TokenKind::Identifier))
return true;
- if (!MatchRegisterName(String.lower()))
+ if (!matchRegister(String.lower()))
return true;
(void)Second;
assert(Second.is(AsmToken::Colon));
@@ -1197,7 +1178,7 @@ bool HexagonAsmParser::isLabel(AsmToken &Token) {
Collapsed.end());
StringRef Whole = Collapsed;
std::pair<StringRef, StringRef> DotSplit = Whole.split('.');
- if (!MatchRegisterName(DotSplit.first.lower()))
+ if (!matchRegister(DotSplit.first.lower()))
return true;
return false;
}
@@ -1242,7 +1223,7 @@ bool HexagonAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &En
Collapsed.end());
StringRef FullString = Collapsed;
std::pair<StringRef, StringRef> DotSplit = FullString.split('.');
- unsigned DotReg = MatchRegisterName(DotSplit.first.lower());
+ unsigned DotReg = matchRegister(DotSplit.first.lower());
if (DotReg != Hexagon::NoRegister && RegisterMatchesArch(DotReg)) {
if (DotSplit.second.empty()) {
RegNo = DotReg;
@@ -1262,7 +1243,7 @@ bool HexagonAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &En
}
}
std::pair<StringRef, StringRef> ColonSplit = StringRef(FullString).split(':');
- unsigned ColonReg = MatchRegisterName(ColonSplit.first.lower());
+ unsigned ColonReg = matchRegister(ColonSplit.first.lower());
if (ColonReg != Hexagon::NoRegister && RegisterMatchesArch(DotReg)) {
Lexer.UnLex(Lookahead.back());
Lookahead.pop_back();
@@ -1302,7 +1283,7 @@ bool HexagonAsmParser::parseExpression(MCExpr const *& Expr) {
static char const * Comma = ",";
do {
Tokens.emplace_back (Lexer.getTok());
- Lexer.Lex();
+ Lex();
switch (Tokens.back().getKind())
{
case AsmToken::TokenKind::Hash:
@@ -1333,11 +1314,12 @@ bool HexagonAsmParser::parseExpressionOrOperand(OperandVector &Operands) {
if (implicitExpressionLocation(Operands)) {
MCAsmParser &Parser = getParser();
SMLoc Loc = Parser.getLexer().getLoc();
- std::unique_ptr<HexagonOperand> Expr =
- HexagonOperand::CreateImm(nullptr, Loc, Loc);
- MCExpr const *& Val = Expr->Imm.Val;
- Operands.push_back(std::move(Expr));
- return parseExpression(Val);
+ MCExpr const *Expr = nullptr;
+ bool Error = parseExpression(Expr);
+ Expr = HexagonMCExpr::create(Expr, getContext());
+ if (!Error)
+ Operands.push_back(HexagonOperand::CreateImm(Expr, Loc, Loc));
+ return Error;
}
return parseOperand(Operands);
}
@@ -1350,7 +1332,7 @@ bool HexagonAsmParser::parseInstruction(OperandVector &Operands) {
AsmToken const &Token = Parser.getTok();
switch (Token.getKind()) {
case AsmToken::EndOfStatement: {
- Lexer.Lex();
+ Lex();
return false;
}
case AsmToken::LCurly: {
@@ -1358,19 +1340,19 @@ bool HexagonAsmParser::parseInstruction(OperandVector &Operands) {
return true;
Operands.push_back(
HexagonOperand::CreateToken(Token.getString(), Token.getLoc()));
- Lexer.Lex();
+ Lex();
return false;
}
case AsmToken::RCurly: {
if (Operands.empty()) {
Operands.push_back(
HexagonOperand::CreateToken(Token.getString(), Token.getLoc()));
- Lexer.Lex();
+ Lex();
}
return false;
}
case AsmToken::Comma: {
- Lexer.Lex();
+ Lex();
continue;
}
case AsmToken::EqualEqual:
@@ -1383,30 +1365,28 @@ bool HexagonAsmParser::parseInstruction(OperandVector &Operands) {
Token.getString().substr(0, 1), Token.getLoc()));
Operands.push_back(HexagonOperand::CreateToken(
Token.getString().substr(1, 1), Token.getLoc()));
- Lexer.Lex();
+ Lex();
continue;
}
case AsmToken::Hash: {
bool MustNotExtend = false;
bool ImplicitExpression = implicitExpressionLocation(Operands);
- std::unique_ptr<HexagonOperand> Expr = HexagonOperand::CreateImm(
- nullptr, Lexer.getLoc(), Lexer.getLoc());
+ SMLoc ExprLoc = Lexer.getLoc();
if (!ImplicitExpression)
Operands.push_back(
HexagonOperand::CreateToken(Token.getString(), Token.getLoc()));
- Lexer.Lex();
+ Lex();
bool MustExtend = false;
bool HiOnly = false;
bool LoOnly = false;
if (Lexer.is(AsmToken::Hash)) {
- Lexer.Lex();
+ Lex();
MustExtend = true;
} else if (ImplicitExpression)
MustNotExtend = true;
AsmToken const &Token = Parser.getTok();
if (Token.is(AsmToken::Identifier)) {
StringRef String = Token.getString();
- AsmToken IDToken = Token;
if (String.lower() == "hi") {
HiOnly = true;
} else if (String.lower() == "lo") {
@@ -1418,27 +1398,46 @@ bool HexagonAsmParser::parseInstruction(OperandVector &Operands) {
HiOnly = false;
LoOnly = false;
} else {
- Lexer.Lex();
+ Lex();
}
}
}
- if (parseExpression(Expr->Imm.Val))
+ MCExpr const *Expr = nullptr;
+ if (parseExpression(Expr))
return true;
int64_t Value;
MCContext &Context = Parser.getContext();
- assert(Expr->Imm.Val != nullptr);
- if (Expr->Imm.Val->evaluateAsAbsolute(Value)) {
+ assert(Expr != nullptr);
+ if (Expr->evaluateAsAbsolute(Value)) {
if (HiOnly)
- Expr->Imm.Val = MCBinaryExpr::createLShr(
- Expr->Imm.Val, MCConstantExpr::create(16, Context), Context);
+ Expr = MCBinaryExpr::createLShr(
+ Expr, MCConstantExpr::create(16, Context), Context);
if (HiOnly || LoOnly)
- Expr->Imm.Val = MCBinaryExpr::createAnd(
- Expr->Imm.Val, MCConstantExpr::create(0xffff, Context), Context);
+ Expr = MCBinaryExpr::createAnd(Expr,
+ MCConstantExpr::create(0xffff, Context),
+ Context);
+ } else {
+ MCValue Value;
+ if (Expr->evaluateAsRelocatable(Value, nullptr, nullptr)) {
+ if (!Value.isAbsolute()) {
+ switch(Value.getAccessVariant()) {
+ case MCSymbolRefExpr::VariantKind::VK_TPREL:
+ case MCSymbolRefExpr::VariantKind::VK_DTPREL:
+ // Don't lazy extend these expression variants
+ MustNotExtend = !MustExtend;
+ break;
+ default:
+ break;
+ }
+ }
+ }
}
- if (MustNotExtend)
- Expr->Imm.Val = HexagonNoExtendOperand::Create(Expr->Imm.Val, Context);
- Expr->Imm.MustExtend = MustExtend;
- Operands.push_back(std::move(Expr));
+ Expr = HexagonMCExpr::create(Expr, Context);
+ HexagonMCInstrInfo::setMustNotExtend(*Expr, MustNotExtend);
+ HexagonMCInstrInfo::setMustExtend(*Expr, MustExtend);
+ std::unique_ptr<HexagonOperand> Operand =
+ HexagonOperand::CreateImm(Expr, ExprLoc, ExprLoc);
+ Operands.push_back(std::move(Operand));
continue;
}
default:
@@ -1524,7 +1523,7 @@ void HexagonAsmParser::OutOfRange(SMLoc IDLoc, long long Val, long long Max) {
int HexagonAsmParser::processInstruction(MCInst &Inst,
OperandVector const &Operands,
- SMLoc IDLoc, bool &MustExtend) {
+ SMLoc IDLoc) {
MCContext &Context = getParser().getContext();
const MCRegisterInfo *RI = getContext().getRegisterInfo();
std::string r = "r";
@@ -1536,6 +1535,18 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
default:
break;
+ case Hexagon::A2_iconst: {
+ Inst.setOpcode(Hexagon::A2_addi);
+ MCOperand Reg = Inst.getOperand(0);
+ MCOperand S16 = Inst.getOperand(1);
+ HexagonMCInstrInfo::setMustNotExtend(*S16.getExpr());
+ HexagonMCInstrInfo::setS23_2_reloc(*S16.getExpr());
+ Inst.clear();
+ Inst.addOperand(Reg);
+ Inst.addOperand(MCOperand::createReg(Hexagon::R0));
+ Inst.addOperand(S16);
+ break;
+ }
case Hexagon::M4_mpyrr_addr:
case Hexagon::S4_addi_asl_ri:
case Hexagon::S4_addi_lsr_ri:
@@ -1555,8 +1566,8 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
case Hexagon::C2_cmpgei: {
MCOperand &MO = Inst.getOperand(2);
- MO.setExpr(MCBinaryExpr::createSub(
- MO.getExpr(), MCConstantExpr::create(1, Context), Context));
+ MO.setExpr(HexagonMCExpr::create(MCBinaryExpr::createSub(
+ MO.getExpr(), MCConstantExpr::create(1, Context), Context), Context));
Inst.setOpcode(Hexagon::C2_cmpgti);
break;
}
@@ -1577,49 +1588,24 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
TmpInst.addOperand(Rt);
Inst = TmpInst;
} else {
- MO.setExpr(MCBinaryExpr::createSub(
- MO.getExpr(), MCConstantExpr::create(1, Context), Context));
+ MO.setExpr(HexagonMCExpr::create(MCBinaryExpr::createSub(
+ MO.getExpr(), MCConstantExpr::create(1, Context), Context), Context));
Inst.setOpcode(Hexagon::C2_cmpgtui);
}
break;
}
- case Hexagon::J2_loop1r:
- case Hexagon::J2_loop1i:
- case Hexagon::J2_loop0r:
- case Hexagon::J2_loop0i: {
- MCOperand &MO = Inst.getOperand(0);
- // Loop has different opcodes for extended vs not extended, but we should
- // not use the other opcode as it is a legacy artifact of TD files.
- int64_t Value;
- if (MO.getExpr()->evaluateAsAbsolute(Value)) {
- // if the operand can fit within a 7:2 field
- if (Value < (1 << 8) && Value >= -(1 << 8)) {
- SMLoc myLoc = Operands[2]->getStartLoc();
- // # is left in startLoc in the case of ##
- // If '##' found then force extension.
- if (*myLoc.getPointer() == '#') {
- MustExtend = true;
- break;
- }
- } else {
- // If immediate and out of 7:2 range.
- MustExtend = true;
- }
- }
- break;
- }
// Translate a "$Rdd = $Rss" to "$Rdd = combine($Rs, $Rt)"
case Hexagon::A2_tfrp: {
MCOperand &MO = Inst.getOperand(1);
unsigned int RegPairNum = RI->getEncodingValue(MO.getReg());
- std::string R1 = r + llvm::utostr_32(RegPairNum + 1);
+ std::string R1 = r + llvm::utostr(RegPairNum + 1);
StringRef Reg1(R1);
- MO.setReg(MatchRegisterName(Reg1));
+ MO.setReg(matchRegister(Reg1));
// Add a new operand for the second register in the pair.
- std::string R2 = r + llvm::utostr_32(RegPairNum);
+ std::string R2 = r + llvm::utostr(RegPairNum);
StringRef Reg2(R2);
- Inst.addOperand(MCOperand::createReg(MatchRegisterName(Reg2)));
+ Inst.addOperand(MCOperand::createReg(matchRegister(Reg2)));
Inst.setOpcode(Hexagon::A2_combinew);
break;
}
@@ -1628,13 +1614,13 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
case Hexagon::A2_tfrpf: {
MCOperand &MO = Inst.getOperand(2);
unsigned int RegPairNum = RI->getEncodingValue(MO.getReg());
- std::string R1 = r + llvm::utostr_32(RegPairNum + 1);
+ std::string R1 = r + llvm::utostr(RegPairNum + 1);
StringRef Reg1(R1);
- MO.setReg(MatchRegisterName(Reg1));
+ MO.setReg(matchRegister(Reg1));
// Add a new operand for the second register in the pair.
- std::string R2 = r + llvm::utostr_32(RegPairNum);
+ std::string R2 = r + llvm::utostr(RegPairNum);
StringRef Reg2(R2);
- Inst.addOperand(MCOperand::createReg(MatchRegisterName(Reg2)));
+ Inst.addOperand(MCOperand::createReg(matchRegister(Reg2)));
Inst.setOpcode((Inst.getOpcode() == Hexagon::A2_tfrpt)
? Hexagon::C2_ccombinewt
: Hexagon::C2_ccombinewf);
@@ -1644,19 +1630,32 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
case Hexagon::A2_tfrpfnew: {
MCOperand &MO = Inst.getOperand(2);
unsigned int RegPairNum = RI->getEncodingValue(MO.getReg());
- std::string R1 = r + llvm::utostr_32(RegPairNum + 1);
+ std::string R1 = r + llvm::utostr(RegPairNum + 1);
StringRef Reg1(R1);
- MO.setReg(MatchRegisterName(Reg1));
+ MO.setReg(matchRegister(Reg1));
// Add a new operand for the second register in the pair.
- std::string R2 = r + llvm::utostr_32(RegPairNum);
+ std::string R2 = r + llvm::utostr(RegPairNum);
StringRef Reg2(R2);
- Inst.addOperand(MCOperand::createReg(MatchRegisterName(Reg2)));
+ Inst.addOperand(MCOperand::createReg(matchRegister(Reg2)));
Inst.setOpcode((Inst.getOpcode() == Hexagon::A2_tfrptnew)
? Hexagon::C2_ccombinewnewt
: Hexagon::C2_ccombinewnewf);
break;
}
+ // Translate a "$Vdd = $Vss" to "$Vdd = vcombine($Vs, $Vt)"
+ case Hexagon::HEXAGON_V6_vassignpair: {
+ MCOperand &MO = Inst.getOperand(1);
+ unsigned int RegPairNum = RI->getEncodingValue(MO.getReg());
+ std::string R1 = v + llvm::utostr(RegPairNum + 1);
+ MO.setReg(MatchRegisterName(R1));
+ // Add a new operand for the second register in the pair.
+ std::string R2 = v + llvm::utostr(RegPairNum);
+ Inst.addOperand(MCOperand::createReg(MatchRegisterName(R2)));
+ Inst.setOpcode(Hexagon::V6_vcombine);
+ break;
+ }
+
// Translate a "$Rx = CONST32(#imm)" to "$Rx = memw(gp+#LABEL) "
case Hexagon::CONST32:
case Hexagon::CONST32_Float_Real:
@@ -1773,7 +1772,8 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
MCOperand &MO = Inst.getOperand(1);
int64_t Value;
int sVal = (MO.getExpr()->evaluateAsAbsolute(Value) && Value < 0) ? -1 : 0;
- MCOperand imm(MCOperand::createExpr(MCConstantExpr::create(sVal, Context)));
+ MCOperand imm(MCOperand::createExpr(
+ HexagonMCExpr::create(MCConstantExpr::create(sVal, Context), Context)));
Inst = makeCombineInst(Hexagon::A2_combineii, Rdd, imm, MO);
break;
}
@@ -1784,18 +1784,19 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
MCOperand &MO = Inst.getOperand(1);
int64_t Value;
if (MO.getExpr()->evaluateAsAbsolute(Value)) {
- unsigned long long u64 = Value;
- signed int s8 = (u64 >> 32) & 0xFFFFFFFF;
- if (s8 < -128 || s8 > 127)
+ int s8 = Hi_32(Value);
+ if (!isInt<8>(s8))
OutOfRange(IDLoc, s8, -128);
- MCOperand imm(MCOperand::createExpr(
- MCConstantExpr::create(s8, Context))); // upper 32
- MCOperand imm2(MCOperand::createExpr(
- MCConstantExpr::create(u64 & 0xFFFFFFFF, Context))); // lower 32
+ MCOperand imm(MCOperand::createExpr(HexagonMCExpr::create(
+ MCConstantExpr::create(s8, Context), Context))); // upper 32
+ auto Expr = HexagonMCExpr::create(
+ MCConstantExpr::create(Lo_32(Value), Context), Context);
+ HexagonMCInstrInfo::setMustExtend(*Expr, HexagonMCInstrInfo::mustExtend(*MO.getExpr()));
+ MCOperand imm2(MCOperand::createExpr(Expr)); // lower 32
Inst = makeCombineInst(Hexagon::A4_combineii, Rdd, imm, imm2);
} else {
- MCOperand imm(MCOperand::createExpr(
- MCConstantExpr::create(0, Context))); // upper 32
+ MCOperand imm(MCOperand::createExpr(HexagonMCExpr::create(
+ MCConstantExpr::create(0, Context), Context))); // upper 32
Inst = makeCombineInst(Hexagon::A4_combineii, Rdd, imm, MO);
}
break;
@@ -1843,8 +1844,8 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
MCOperand &Rs = Inst.getOperand(2);
MCOperand &Imm4 = Inst.getOperand(3);
MCOperand &Imm6 = Inst.getOperand(4);
- Imm6.setExpr(MCBinaryExpr::createSub(
- Imm6.getExpr(), MCConstantExpr::create(1, Context), Context));
+ Imm6.setExpr(HexagonMCExpr::create(MCBinaryExpr::createSub(
+ Imm6.getExpr(), MCConstantExpr::create(1, Context), Context), Context));
TmpInst.setOpcode(Hexagon::S2_tableidxh);
TmpInst.addOperand(Rx);
TmpInst.addOperand(_dst_);
@@ -1862,8 +1863,8 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
MCOperand &Rs = Inst.getOperand(2);
MCOperand &Imm4 = Inst.getOperand(3);
MCOperand &Imm6 = Inst.getOperand(4);
- Imm6.setExpr(MCBinaryExpr::createSub(
- Imm6.getExpr(), MCConstantExpr::create(2, Context), Context));
+ Imm6.setExpr(HexagonMCExpr::create(MCBinaryExpr::createSub(
+ Imm6.getExpr(), MCConstantExpr::create(2, Context), Context), Context));
TmpInst.setOpcode(Hexagon::S2_tableidxw);
TmpInst.addOperand(Rx);
TmpInst.addOperand(_dst_);
@@ -1881,8 +1882,8 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
MCOperand &Rs = Inst.getOperand(2);
MCOperand &Imm4 = Inst.getOperand(3);
MCOperand &Imm6 = Inst.getOperand(4);
- Imm6.setExpr(MCBinaryExpr::createSub(
- Imm6.getExpr(), MCConstantExpr::create(3, Context), Context));
+ Imm6.setExpr(HexagonMCExpr::create(MCBinaryExpr::createSub(
+ Imm6.getExpr(), MCConstantExpr::create(3, Context), Context), Context));
TmpInst.setOpcode(Hexagon::S2_tableidxd);
TmpInst.addOperand(Rx);
TmpInst.addOperand(_dst_);
@@ -1903,12 +1904,14 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
MCOperand &Rs = Inst.getOperand(1);
MCOperand &Imm = Inst.getOperand(2);
int64_t Value;
- bool Absolute = Imm.getExpr()->evaluateAsAbsolute(Value);
+ MCExpr const &Expr = *Imm.getExpr();
+ bool Absolute = Expr.evaluateAsAbsolute(Value);
assert(Absolute);
(void)Absolute;
- if (!MustExtend) {
+ if (!HexagonMCInstrInfo::mustExtend(Expr)) {
if (Value < 0 && Value > -256) {
- Imm.setExpr(MCConstantExpr::create(Value * -1, Context));
+ Imm.setExpr(HexagonMCExpr::create(
+ MCConstantExpr::create(Value * -1, Context), Context));
TmpInst.setOpcode(Hexagon::M2_mpysin);
} else if (Value < 256 && Value >= 0)
TmpInst.setOpcode(Hexagon::M2_mpysip);
@@ -1941,8 +1944,10 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
TmpInst.addOperand(Rd);
TmpInst.addOperand(Rs);
} else {
- Imm.setExpr(MCBinaryExpr::createSub(
- Imm.getExpr(), MCConstantExpr::create(1, Context), Context));
+ Imm.setExpr(HexagonMCExpr::create(
+ MCBinaryExpr::createSub(Imm.getExpr(),
+ MCConstantExpr::create(1, Context), Context),
+ Context));
TmpInst.setOpcode(Hexagon::S2_asr_i_r_rnd);
MCOperand &Rd = Inst.getOperand(0);
MCOperand &Rs = Inst.getOperand(1);
@@ -1965,20 +1970,22 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
if (Value == 0) { // convert to $Rdd = combine ($Rs[0], $Rs[1])
MCInst TmpInst;
unsigned int RegPairNum = RI->getEncodingValue(Rss.getReg());
- std::string R1 = r + llvm::utostr_32(RegPairNum + 1);
+ std::string R1 = r + llvm::utostr(RegPairNum + 1);
StringRef Reg1(R1);
- Rss.setReg(MatchRegisterName(Reg1));
+ Rss.setReg(matchRegister(Reg1));
// Add a new operand for the second register in the pair.
- std::string R2 = r + llvm::utostr_32(RegPairNum);
+ std::string R2 = r + llvm::utostr(RegPairNum);
StringRef Reg2(R2);
TmpInst.setOpcode(Hexagon::A2_combinew);
TmpInst.addOperand(Rdd);
TmpInst.addOperand(Rss);
- TmpInst.addOperand(MCOperand::createReg(MatchRegisterName(Reg2)));
+ TmpInst.addOperand(MCOperand::createReg(matchRegister(Reg2)));
Inst = TmpInst;
} else {
- Imm.setExpr(MCBinaryExpr::createSub(
- Imm.getExpr(), MCConstantExpr::create(1, Context), Context));
+ Imm.setExpr(HexagonMCExpr::create(
+ MCBinaryExpr::createSub(Imm.getExpr(),
+ MCConstantExpr::create(1, Context), Context),
+ Context));
Inst.setOpcode(Hexagon::S2_asr_i_p_rnd);
}
break;
@@ -1990,15 +1997,15 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
if (RegNum & 1) { // Odd mapped to raw:hi, regpair is rodd:odd-1, like r3:2
Inst.setOpcode(Hexagon::A4_boundscheck_hi);
std::string Name =
- r + llvm::utostr_32(RegNum) + Colon + llvm::utostr_32(RegNum - 1);
+ r + llvm::utostr(RegNum) + Colon + llvm::utostr(RegNum - 1);
StringRef RegPair = Name;
- Rs.setReg(MatchRegisterName(RegPair));
+ Rs.setReg(matchRegister(RegPair));
} else { // raw:lo
Inst.setOpcode(Hexagon::A4_boundscheck_lo);
std::string Name =
- r + llvm::utostr_32(RegNum + 1) + Colon + llvm::utostr_32(RegNum);
+ r + llvm::utostr(RegNum + 1) + Colon + llvm::utostr(RegNum);
StringRef RegPair = Name;
- Rs.setReg(MatchRegisterName(RegPair));
+ Rs.setReg(matchRegister(RegPair));
}
break;
}
@@ -2009,15 +2016,15 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
if (RegNum & 1) { // Odd mapped to raw:hi
Inst.setOpcode(Hexagon::A2_addsph);
std::string Name =
- r + llvm::utostr_32(RegNum) + Colon + llvm::utostr_32(RegNum - 1);
+ r + llvm::utostr(RegNum) + Colon + llvm::utostr(RegNum - 1);
StringRef RegPair = Name;
- Rs.setReg(MatchRegisterName(RegPair));
+ Rs.setReg(matchRegister(RegPair));
} else { // Even mapped raw:lo
Inst.setOpcode(Hexagon::A2_addspl);
std::string Name =
- r + llvm::utostr_32(RegNum + 1) + Colon + llvm::utostr_32(RegNum);
+ r + llvm::utostr(RegNum + 1) + Colon + llvm::utostr(RegNum);
StringRef RegPair = Name;
- Rs.setReg(MatchRegisterName(RegPair));
+ Rs.setReg(matchRegister(RegPair));
}
break;
}
@@ -2028,15 +2035,15 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
if (RegNum & 1) { // Odd mapped to sat:raw:hi
Inst.setOpcode(Hexagon::M2_vrcmpys_s1_h);
std::string Name =
- r + llvm::utostr_32(RegNum) + Colon + llvm::utostr_32(RegNum - 1);
+ r + llvm::utostr(RegNum) + Colon + llvm::utostr(RegNum - 1);
StringRef RegPair = Name;
- Rt.setReg(MatchRegisterName(RegPair));
+ Rt.setReg(matchRegister(RegPair));
} else { // Even mapped sat:raw:lo
Inst.setOpcode(Hexagon::M2_vrcmpys_s1_l);
std::string Name =
- r + llvm::utostr_32(RegNum + 1) + Colon + llvm::utostr_32(RegNum);
+ r + llvm::utostr(RegNum + 1) + Colon + llvm::utostr(RegNum);
StringRef RegPair = Name;
- Rt.setReg(MatchRegisterName(RegPair));
+ Rt.setReg(matchRegister(RegPair));
}
break;
}
@@ -2050,15 +2057,15 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
if (RegNum & 1) { // Odd mapped to sat:raw:hi
TmpInst.setOpcode(Hexagon::M2_vrcmpys_acc_s1_h);
std::string Name =
- r + llvm::utostr_32(RegNum) + Colon + llvm::utostr_32(RegNum - 1);
+ r + llvm::utostr(RegNum) + Colon + llvm::utostr(RegNum - 1);
StringRef RegPair = Name;
- Rt.setReg(MatchRegisterName(RegPair));
+ Rt.setReg(matchRegister(RegPair));
} else { // Even mapped sat:raw:lo
TmpInst.setOpcode(Hexagon::M2_vrcmpys_acc_s1_l);
std::string Name =
- r + llvm::utostr_32(RegNum + 1) + Colon + llvm::utostr_32(RegNum);
+ r + llvm::utostr(RegNum + 1) + Colon + llvm::utostr(RegNum);
StringRef RegPair = Name;
- Rt.setReg(MatchRegisterName(RegPair));
+ Rt.setReg(matchRegister(RegPair));
}
// Registers are in different positions
TmpInst.addOperand(Rxx);
@@ -2075,15 +2082,15 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
if (RegNum & 1) { // Odd mapped to rnd:sat:raw:hi
Inst.setOpcode(Hexagon::M2_vrcmpys_s1rp_h);
std::string Name =
- r + llvm::utostr_32(RegNum) + Colon + llvm::utostr_32(RegNum - 1);
+ r + llvm::utostr(RegNum) + Colon + llvm::utostr(RegNum - 1);
StringRef RegPair = Name;
- Rt.setReg(MatchRegisterName(RegPair));
+ Rt.setReg(matchRegister(RegPair));
} else { // Even mapped rnd:sat:raw:lo
Inst.setOpcode(Hexagon::M2_vrcmpys_s1rp_l);
std::string Name =
- r + llvm::utostr_32(RegNum + 1) + Colon + llvm::utostr_32(RegNum);
+ r + llvm::utostr(RegNum + 1) + Colon + llvm::utostr(RegNum);
StringRef RegPair = Name;
- Rt.setReg(MatchRegisterName(RegPair));
+ Rt.setReg(matchRegister(RegPair));
}
break;
}
@@ -2097,8 +2104,10 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
if (Value == 0)
Inst.setOpcode(Hexagon::S2_vsathub);
else {
- Imm.setExpr(MCBinaryExpr::createSub(
- Imm.getExpr(), MCConstantExpr::create(1, Context), Context));
+ Imm.setExpr(HexagonMCExpr::create(
+ MCBinaryExpr::createSub(Imm.getExpr(),
+ MCConstantExpr::create(1, Context), Context),
+ Context));
Inst.setOpcode(Hexagon::S5_asrhub_rnd_sat);
}
break;
@@ -2115,20 +2124,22 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
if (Value == 0) {
MCInst TmpInst;
unsigned int RegPairNum = RI->getEncodingValue(Rss.getReg());
- std::string R1 = r + llvm::utostr_32(RegPairNum + 1);
+ std::string R1 = r + llvm::utostr(RegPairNum + 1);
StringRef Reg1(R1);
- Rss.setReg(MatchRegisterName(Reg1));
+ Rss.setReg(matchRegister(Reg1));
// Add a new operand for the second register in the pair.
- std::string R2 = r + llvm::utostr_32(RegPairNum);
+ std::string R2 = r + llvm::utostr(RegPairNum);
StringRef Reg2(R2);
TmpInst.setOpcode(Hexagon::A2_combinew);
TmpInst.addOperand(Rdd);
TmpInst.addOperand(Rss);
- TmpInst.addOperand(MCOperand::createReg(MatchRegisterName(Reg2)));
+ TmpInst.addOperand(MCOperand::createReg(matchRegister(Reg2)));
Inst = TmpInst;
} else {
- Imm.setExpr(MCBinaryExpr::createSub(
- Imm.getExpr(), MCConstantExpr::create(1, Context), Context));
+ Imm.setExpr(HexagonMCExpr::create(
+ MCBinaryExpr::createSub(Imm.getExpr(),
+ MCConstantExpr::create(1, Context), Context),
+ Context));
Inst.setOpcode(Hexagon::S5_vasrhrnd);
}
break;
@@ -2140,8 +2151,8 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
MCOperand &Rs = Inst.getOperand(1);
TmpInst.setOpcode(Hexagon::A2_subri);
TmpInst.addOperand(Rd);
- TmpInst.addOperand(
- MCOperand::createExpr(MCConstantExpr::create(-1, Context)));
+ TmpInst.addOperand(MCOperand::createExpr(
+ HexagonMCExpr::create(MCConstantExpr::create(-1, Context), Context)));
TmpInst.addOperand(Rs);
Inst = TmpInst;
break;
@@ -2150,3 +2161,10 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
return Match_Success;
}
+
+
+unsigned HexagonAsmParser::matchRegister(StringRef Name) {
+ if (unsigned Reg = MatchRegisterName(Name))
+ return Reg;
+ return MatchRegisterAltName(Name);
+}
diff --git a/lib/Target/Hexagon/AsmParser/Makefile b/lib/Target/Hexagon/AsmParser/Makefile
deleted file mode 100644
index 0aa0b4140c3e..000000000000
--- a/lib/Target/Hexagon/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/Hexagon/AsmParser/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMHexagonAsmParser
-
-# Hack: we need to include 'main' Hexagon target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Hexagon/BitTracker.cpp b/lib/Target/Hexagon/BitTracker.cpp
index ea96eb0ee10a..d052a835fbd8 100644
--- a/lib/Target/Hexagon/BitTracker.cpp
+++ b/lib/Target/Hexagon/BitTracker.cpp
@@ -84,87 +84,89 @@ namespace {
}
}
-raw_ostream &llvm::operator<<(raw_ostream &OS, const BT::BitValue &BV) {
- switch (BV.Type) {
- case BT::BitValue::Top:
- OS << 'T';
- break;
- case BT::BitValue::Zero:
- OS << '0';
- break;
- case BT::BitValue::One:
- OS << '1';
- break;
- case BT::BitValue::Ref:
- OS << printv(BV.RefI.Reg) << '[' << BV.RefI.Pos << ']';
- break;
+namespace llvm {
+ raw_ostream &operator<<(raw_ostream &OS, const BT::BitValue &BV) {
+ switch (BV.Type) {
+ case BT::BitValue::Top:
+ OS << 'T';
+ break;
+ case BT::BitValue::Zero:
+ OS << '0';
+ break;
+ case BT::BitValue::One:
+ OS << '1';
+ break;
+ case BT::BitValue::Ref:
+ OS << printv(BV.RefI.Reg) << '[' << BV.RefI.Pos << ']';
+ break;
+ }
+ return OS;
}
- return OS;
-}
-raw_ostream &llvm::operator<<(raw_ostream &OS, const BT::RegisterCell &RC) {
- unsigned n = RC.Bits.size();
- OS << "{ w:" << n;
- // Instead of printing each bit value individually, try to group them
- // into logical segments, such as sequences of 0 or 1 bits or references
- // to consecutive bits (e.g. "bits 3-5 are same as bits 7-9 of reg xyz").
- // "Start" will be the index of the beginning of the most recent segment.
- unsigned Start = 0;
- bool SeqRef = false; // A sequence of refs to consecutive bits.
- bool ConstRef = false; // A sequence of refs to the same bit.
-
- for (unsigned i = 1, n = RC.Bits.size(); i < n; ++i) {
- const BT::BitValue &V = RC[i];
- const BT::BitValue &SV = RC[Start];
- bool IsRef = (V.Type == BT::BitValue::Ref);
- // If the current value is the same as Start, skip to the next one.
- if (!IsRef && V == SV)
- continue;
- if (IsRef && SV.Type == BT::BitValue::Ref && V.RefI.Reg == SV.RefI.Reg) {
- if (Start+1 == i) {
- SeqRef = (V.RefI.Pos == SV.RefI.Pos+1);
- ConstRef = (V.RefI.Pos == SV.RefI.Pos);
- }
- if (SeqRef && V.RefI.Pos == SV.RefI.Pos+(i-Start))
- continue;
- if (ConstRef && V.RefI.Pos == SV.RefI.Pos)
+ raw_ostream &operator<<(raw_ostream &OS, const BT::RegisterCell &RC) {
+ unsigned n = RC.Bits.size();
+ OS << "{ w:" << n;
+ // Instead of printing each bit value individually, try to group them
+ // into logical segments, such as sequences of 0 or 1 bits or references
+ // to consecutive bits (e.g. "bits 3-5 are same as bits 7-9 of reg xyz").
+ // "Start" will be the index of the beginning of the most recent segment.
+ unsigned Start = 0;
+ bool SeqRef = false; // A sequence of refs to consecutive bits.
+ bool ConstRef = false; // A sequence of refs to the same bit.
+
+ for (unsigned i = 1, n = RC.Bits.size(); i < n; ++i) {
+ const BT::BitValue &V = RC[i];
+ const BT::BitValue &SV = RC[Start];
+ bool IsRef = (V.Type == BT::BitValue::Ref);
+ // If the current value is the same as Start, skip to the next one.
+ if (!IsRef && V == SV)
continue;
+ if (IsRef && SV.Type == BT::BitValue::Ref && V.RefI.Reg == SV.RefI.Reg) {
+ if (Start+1 == i) {
+ SeqRef = (V.RefI.Pos == SV.RefI.Pos+1);
+ ConstRef = (V.RefI.Pos == SV.RefI.Pos);
+ }
+ if (SeqRef && V.RefI.Pos == SV.RefI.Pos+(i-Start))
+ continue;
+ if (ConstRef && V.RefI.Pos == SV.RefI.Pos)
+ continue;
+ }
+
+ // The current value is different. Print the previous one and reset
+ // the Start.
+ OS << " [" << Start;
+ unsigned Count = i - Start;
+ if (Count == 1) {
+ OS << "]:" << SV;
+ } else {
+ OS << '-' << i-1 << "]:";
+ if (SV.Type == BT::BitValue::Ref && SeqRef)
+ OS << printv(SV.RefI.Reg) << '[' << SV.RefI.Pos << '-'
+ << SV.RefI.Pos+(Count-1) << ']';
+ else
+ OS << SV;
+ }
+ Start = i;
+ SeqRef = ConstRef = false;
}
- // The current value is different. Print the previous one and reset
- // the Start.
OS << " [" << Start;
- unsigned Count = i - Start;
- if (Count == 1) {
- OS << "]:" << SV;
+ unsigned Count = n - Start;
+ if (n-Start == 1) {
+ OS << "]:" << RC[Start];
} else {
- OS << '-' << i-1 << "]:";
+ OS << '-' << n-1 << "]:";
+ const BT::BitValue &SV = RC[Start];
if (SV.Type == BT::BitValue::Ref && SeqRef)
OS << printv(SV.RefI.Reg) << '[' << SV.RefI.Pos << '-'
<< SV.RefI.Pos+(Count-1) << ']';
else
OS << SV;
}
- Start = i;
- SeqRef = ConstRef = false;
- }
+ OS << " }";
- OS << " [" << Start;
- unsigned Count = n - Start;
- if (n-Start == 1) {
- OS << "]:" << RC[Start];
- } else {
- OS << '-' << n-1 << "]:";
- const BT::BitValue &SV = RC[Start];
- if (SV.Type == BT::BitValue::Ref && SeqRef)
- OS << printv(SV.RefI.Reg) << '[' << SV.RefI.Pos << '-'
- << SV.RefI.Pos+(Count-1) << ']';
- else
- OS << SV;
+ return OS;
}
- OS << " }";
-
- return OS;
}
BitTracker::BitTracker(const MachineEvaluator &E, MachineFunction &F)
@@ -420,7 +422,7 @@ BT::RegisterCell BT::MachineEvaluator::eIMM(int64_t V, uint16_t W) const {
BT::RegisterCell BT::MachineEvaluator::eIMM(const ConstantInt *CI) const {
- APInt A = CI->getValue();
+ const APInt &A = CI->getValue();
uint16_t BW = A.getBitWidth();
assert((unsigned)BW == A.getBitWidth() && "BitWidth overflow");
RegisterCell Res(BW);
@@ -731,18 +733,18 @@ BT::BitMask BT::MachineEvaluator::mask(unsigned Reg, unsigned Sub) const {
return BitMask(0, W-1);
}
-
-bool BT::MachineEvaluator::evaluate(const MachineInstr *MI,
- const CellMapType &Inputs, CellMapType &Outputs) const {
- unsigned Opc = MI->getOpcode();
+bool BT::MachineEvaluator::evaluate(const MachineInstr &MI,
+ const CellMapType &Inputs,
+ CellMapType &Outputs) const {
+ unsigned Opc = MI.getOpcode();
switch (Opc) {
case TargetOpcode::REG_SEQUENCE: {
- RegisterRef RD = MI->getOperand(0);
+ RegisterRef RD = MI.getOperand(0);
assert(RD.Sub == 0);
- RegisterRef RS = MI->getOperand(1);
- unsigned SS = MI->getOperand(2).getImm();
- RegisterRef RT = MI->getOperand(3);
- unsigned ST = MI->getOperand(4).getImm();
+ RegisterRef RS = MI.getOperand(1);
+ unsigned SS = MI.getOperand(2).getImm();
+ RegisterRef RT = MI.getOperand(3);
+ unsigned ST = MI.getOperand(4).getImm();
assert(SS != ST);
uint16_t W = getRegBitWidth(RD);
@@ -756,8 +758,8 @@ bool BT::MachineEvaluator::evaluate(const MachineInstr *MI,
case TargetOpcode::COPY: {
// COPY can transfer a smaller register into a wider one.
// If that is the case, fill the remaining high bits with 0.
- RegisterRef RD = MI->getOperand(0);
- RegisterRef RS = MI->getOperand(1);
+ RegisterRef RD = MI.getOperand(0);
+ RegisterRef RS = MI.getOperand(1);
assert(RD.Sub == 0);
uint16_t WD = getRegBitWidth(RD);
uint16_t WS = getRegBitWidth(RS);
@@ -780,12 +782,12 @@ bool BT::MachineEvaluator::evaluate(const MachineInstr *MI,
// Main W-Z implementation.
-void BT::visitPHI(const MachineInstr *PI) {
- int ThisN = PI->getParent()->getNumber();
+void BT::visitPHI(const MachineInstr &PI) {
+ int ThisN = PI.getParent()->getNumber();
if (Trace)
- dbgs() << "Visit FI(BB#" << ThisN << "): " << *PI;
+ dbgs() << "Visit FI(BB#" << ThisN << "): " << PI;
- const MachineOperand &MD = PI->getOperand(0);
+ const MachineOperand &MD = PI.getOperand(0);
assert(MD.getSubReg() == 0 && "Unexpected sub-register in definition");
RegisterRef DefRR(MD);
uint16_t DefBW = ME.getRegBitWidth(DefRR);
@@ -796,8 +798,8 @@ void BT::visitPHI(const MachineInstr *PI) {
bool Changed = false;
- for (unsigned i = 1, n = PI->getNumOperands(); i < n; i += 2) {
- const MachineBasicBlock *PB = PI->getOperand(i+1).getMBB();
+ for (unsigned i = 1, n = PI.getNumOperands(); i < n; i += 2) {
+ const MachineBasicBlock *PB = PI.getOperand(i + 1).getMBB();
int PredN = PB->getNumber();
if (Trace)
dbgs() << " edge BB#" << PredN << "->BB#" << ThisN;
@@ -807,7 +809,7 @@ void BT::visitPHI(const MachineInstr *PI) {
continue;
}
- RegisterRef RU = PI->getOperand(i);
+ RegisterRef RU = PI.getOperand(i);
RegisterCell ResC = ME.getCell(RU, Map);
if (Trace)
dbgs() << " input reg: " << PrintReg(RU.Reg, &ME.TRI, RU.Sub)
@@ -824,22 +826,21 @@ void BT::visitPHI(const MachineInstr *PI) {
}
}
-
-void BT::visitNonBranch(const MachineInstr *MI) {
+void BT::visitNonBranch(const MachineInstr &MI) {
if (Trace) {
- int ThisN = MI->getParent()->getNumber();
- dbgs() << "Visit MI(BB#" << ThisN << "): " << *MI;
+ int ThisN = MI.getParent()->getNumber();
+ dbgs() << "Visit MI(BB#" << ThisN << "): " << MI;
}
- if (MI->isDebugValue())
+ if (MI.isDebugValue())
return;
- assert(!MI->isBranch() && "Unexpected branch instruction");
+ assert(!MI.isBranch() && "Unexpected branch instruction");
CellMapType ResMap;
bool Eval = ME.evaluate(MI, Map, ResMap);
if (Trace && Eval) {
- for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isUse())
continue;
RegisterRef RU(MO);
@@ -857,8 +858,8 @@ void BT::visitNonBranch(const MachineInstr *MI) {
// Iterate over all definitions of the instruction, and update the
// cells accordingly.
- for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
// Visit register defs only.
if (!MO.isReg() || !MO.isDef())
continue;
@@ -905,9 +906,8 @@ void BT::visitNonBranch(const MachineInstr *MI) {
}
}
-
-void BT::visitBranchesFrom(const MachineInstr *BI) {
- const MachineBasicBlock &B = *BI->getParent();
+void BT::visitBranchesFrom(const MachineInstr &BI) {
+ const MachineBasicBlock &B = *BI.getParent();
MachineBasicBlock::const_iterator It = BI, End = B.end();
BranchTargetList Targets, BTs;
bool FallsThrough = true, DefaultToAll = false;
@@ -915,11 +915,11 @@ void BT::visitBranchesFrom(const MachineInstr *BI) {
do {
BTs.clear();
- const MachineInstr *MI = &*It;
+ const MachineInstr &MI = *It;
if (Trace)
- dbgs() << "Visit BR(BB#" << ThisN << "): " << *MI;
- assert(MI->isBranch() && "Expecting branch instruction");
- InstrExec.insert(MI);
+ dbgs() << "Visit BR(BB#" << ThisN << "): " << MI;
+ assert(MI.isBranch() && "Expecting branch instruction");
+ InstrExec.insert(&MI);
bool Eval = ME.evaluate(MI, Map, BTs, FallsThrough);
if (!Eval) {
// If the evaluation failed, we will add all targets. Keep going in
@@ -983,11 +983,11 @@ void BT::visitUsesOf(unsigned Reg) {
if (!InstrExec.count(UseI))
continue;
if (UseI->isPHI())
- visitPHI(UseI);
+ visitPHI(*UseI);
else if (!UseI->isBranch())
- visitNonBranch(UseI);
+ visitNonBranch(*UseI);
else
- visitBranchesFrom(UseI);
+ visitBranchesFrom(*UseI);
}
}
@@ -1084,8 +1084,8 @@ void BT::run() {
MachineBasicBlock::const_iterator It = B.begin(), End = B.end();
// Visit PHI nodes first.
while (It != End && It->isPHI()) {
- const MachineInstr *PI = &*It++;
- InstrExec.insert(PI);
+ const MachineInstr &PI = *It++;
+ InstrExec.insert(&PI);
visitPHI(PI);
}
@@ -1098,8 +1098,8 @@ void BT::run() {
// Visit non-branch instructions.
while (It != End && !It->isBranch()) {
- const MachineInstr *MI = &*It++;
- InstrExec.insert(MI);
+ const MachineInstr &MI = *It++;
+ InstrExec.insert(&MI);
visitNonBranch(MI);
}
// If block end has been reached, add the fall-through edge to the queue.
@@ -1114,7 +1114,7 @@ void BT::run() {
} else {
// Handle the remaining sequence of branches. This function will update
// the work queue.
- visitBranchesFrom(It);
+ visitBranchesFrom(*It);
}
} // while (!FlowQ->empty())
diff --git a/lib/Target/Hexagon/BitTracker.h b/lib/Target/Hexagon/BitTracker.h
index 959c8318fd60..5b925fe696f8 100644
--- a/lib/Target/Hexagon/BitTracker.h
+++ b/lib/Target/Hexagon/BitTracker.h
@@ -51,9 +51,9 @@ struct BitTracker {
bool reached(const MachineBasicBlock *B) const;
private:
- void visitPHI(const MachineInstr *PI);
- void visitNonBranch(const MachineInstr *MI);
- void visitBranchesFrom(const MachineInstr *BI);
+ void visitPHI(const MachineInstr &PI);
+ void visitNonBranch(const MachineInstr &MI);
+ void visitBranchesFrom(const MachineInstr &BI);
void visitUsesOf(unsigned Reg);
void reset();
@@ -417,13 +417,13 @@ struct BitTracker::MachineEvaluator {
// Evaluate a non-branching machine instruction, given the cell map with
// the input values. Place the results in the Outputs map. Return "true"
// if evaluation succeeded, "false" otherwise.
- virtual bool evaluate(const MachineInstr *MI, const CellMapType &Inputs,
+ virtual bool evaluate(const MachineInstr &MI, const CellMapType &Inputs,
CellMapType &Outputs) const;
// Evaluate a branch, given the cell map with the input values. Fill out
// a list of all possible branch targets and indicate (through a flag)
// whether the branch could fall-through. Return "true" if this information
// has been successfully computed, "false" otherwise.
- virtual bool evaluate(const MachineInstr *BI, const CellMapType &Inputs,
+ virtual bool evaluate(const MachineInstr &BI, const CellMapType &Inputs,
BranchTargetList &Targets, bool &FallsThru) const = 0;
const TargetRegisterInfo &TRI;
diff --git a/lib/Target/Hexagon/CMakeLists.txt b/lib/Target/Hexagon/CMakeLists.txt
index 333ca6a757aa..0e32f25f52b7 100644
--- a/lib/Target/Hexagon/CMakeLists.txt
+++ b/lib/Target/Hexagon/CMakeLists.txt
@@ -17,12 +17,13 @@ add_llvm_target(HexagonCodeGen
HexagonAsmPrinter.cpp
HexagonBitSimplify.cpp
HexagonBitTracker.cpp
+ HexagonBlockRanges.cpp
+ HexagonBranchRelaxation.cpp
HexagonCFGOptimizer.cpp
HexagonCommonGEP.cpp
HexagonCopyToCombine.cpp
HexagonEarlyIfConv.cpp
HexagonExpandCondsets.cpp
- HexagonExpandPredSpillCode.cpp
HexagonFixupHwLoops.cpp
HexagonFrameLowering.cpp
HexagonGenExtract.cpp
@@ -37,6 +38,7 @@ add_llvm_target(HexagonCodeGen
HexagonMachineScheduler.cpp
HexagonMCInstLower.cpp
HexagonNewValueJump.cpp
+ HexagonOptAddrMode.cpp
HexagonOptimizeSZextends.cpp
HexagonPeephole.cpp
HexagonRDF.cpp
@@ -55,7 +57,7 @@ add_llvm_target(HexagonCodeGen
RDFDeadCode.cpp
RDFGraph.cpp
RDFLiveness.cpp
-)
+ )
add_subdirectory(AsmParser)
add_subdirectory(TargetInfo)
diff --git a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
index 4a9c3413cb29..7bc08ecfcab6 100644
--- a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
+++ b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
@@ -16,7 +16,7 @@
#include "MCTargetDesc/HexagonMCInstrInfo.h"
#include "MCTargetDesc/HexagonInstPrinter.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
@@ -30,7 +30,6 @@
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/TargetRegistry.h"
-#include <vector>
using namespace llvm;
using namespace Hexagon;
@@ -382,7 +381,8 @@ DecodeStatus HexagonDisassembler::getSingleInstruction(
if (Producer >= Hexagon::W0 && Producer <= Hexagon::W15)
Producer = ((Producer - Hexagon::W0) << 1) + SubregBit + Hexagon::V0;
else if (SubregBit)
- // Subreg bit should not be set for non-doublevector newvalue producers
+ // Hexagon PRM 10.11 New-value operands
+ // Nt[0] is reserved and should always be encoded as zero.
return MCDisassembler::Fail;
assert(Producer != Hexagon::NoRegister);
MCO.setReg(Producer);
@@ -1459,6 +1459,7 @@ void HexagonDisassembler::addSubinstOperands(MCInst *MI, unsigned opcode,
operand = getRegFromSubinstEncoding((inst & 0xf0) >> 4);
Op = MCOperand::createReg(operand);
MI->addOperand(Op);
+ break;
case Hexagon::V4_SA1_and1:
case Hexagon::V4_SA1_dec:
case Hexagon::V4_SA1_inc:
diff --git a/lib/Target/Hexagon/Disassembler/Makefile b/lib/Target/Hexagon/Disassembler/Makefile
deleted file mode 100644
index 16c305fe4074..000000000000
--- a/lib/Target/Hexagon/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===-- lib/Target/Hexagon/Disassembler/Makefile -----------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMHexagonDisassembler
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Hexagon/Hexagon.td b/lib/Target/Hexagon/Hexagon.td
index 5a7eb215de42..aaa0f3e9b3d3 100644
--- a/lib/Target/Hexagon/Hexagon.td
+++ b/lib/Target/Hexagon/Hexagon.td
@@ -47,7 +47,6 @@ def IEEERndNearV5T : Predicate<"HST->modeIEEERndNear()">;
def UseHVXDbl : Predicate<"HST->useHVXDblOps()">,
AssemblerPredicate<"ExtensionHVXDbl">;
def UseHVXSgl : Predicate<"HST->useHVXSglOps()">;
-
def UseHVX : Predicate<"HST->useHVXSglOps() ||HST->useHVXDblOps()">,
AssemblerPredicate<"ExtensionHVX">;
@@ -171,6 +170,15 @@ def getBaseWithImmOffset : InstrMapping {
let ValueCols = [["BaseImmOffset"]];
}
+def getAbsoluteForm : InstrMapping {
+ let FilterClass = "AddrModeRel";
+ let RowFields = ["CextOpcode", "PredSense", "PNewValue", "isNVStore",
+ "isFloat"];
+ let ColFields = ["addrMode"];
+ let KeyCol = ["BaseImmOffset"];
+ let ValueCols = [["Absolute"]];
+}
+
def getBaseWithRegOffset : InstrMapping {
let FilterClass = "AddrModeRel";
let RowFields = ["CextOpcode", "PredSense", "PNewValue", "isNVStore"];
@@ -179,6 +187,22 @@ def getBaseWithRegOffset : InstrMapping {
let ValueCols = [["BaseRegOffset"]];
}
+def xformRegToImmOffset : InstrMapping {
+ let FilterClass = "AddrModeRel";
+ let RowFields = ["CextOpcode", "PredSense", "PNewValue", "isNVStore"];
+ let ColFields = ["addrMode"];
+ let KeyCol = ["BaseRegOffset"];
+ let ValueCols = [["BaseImmOffset"]];
+}
+
+def getBaseWithLongOffset : InstrMapping {
+ let FilterClass = "ImmRegShl";
+ let RowFields = ["CextOpcode", "PredSense", "PNewValue", "isNVStore"];
+ let ColFields = ["addrMode"];
+ let KeyCol = ["BaseRegOffset"];
+ let ValueCols = [["BaseLongOffset"]];
+}
+
def getRegForm : InstrMapping {
let FilterClass = "ImmRegRel";
let RowFields = ["CextOpcode", "PredSense", "PNewValue"];
@@ -252,6 +276,7 @@ def : Proc<"hexagonv60", HexagonModelV60,
//===----------------------------------------------------------------------===//
def HexagonAsmParser : AsmParser {
+ let ShouldEmitMatchRegisterAltName = 1;
bit HasMnemonicFirst = 0;
}
diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
index 4c7c0392a132..cd954a146104 100644
--- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp
+++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
@@ -21,8 +21,6 @@
#include "MCTargetDesc/HexagonInstPrinter.h"
#include "MCTargetDesc/HexagonMCInstrInfo.h"
#include "MCTargetDesc/HexagonMCShuffler.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/CodeGen/AsmPrinter.h"
@@ -44,7 +42,6 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/Format.h"
@@ -264,6 +261,19 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
switch (Inst.getOpcode()) {
default: return;
+ case Hexagon::A2_iconst: {
+ Inst.setOpcode(Hexagon::A2_addi);
+ MCOperand Reg = Inst.getOperand(0);
+ MCOperand S16 = Inst.getOperand(1);
+ HexagonMCInstrInfo::setMustNotExtend(*S16.getExpr());
+ HexagonMCInstrInfo::setS23_2_reloc(*S16.getExpr());
+ Inst.clear();
+ Inst.addOperand(Reg);
+ Inst.addOperand(MCOperand::createReg(Hexagon::R0));
+ Inst.addOperand(S16);
+ break;
+ }
+
// "$dst = CONST64(#$src1)",
case Hexagon::CONST64_Float_Real:
case Hexagon::CONST64_Int_Real:
@@ -297,8 +307,8 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
MCOperand &Reg = MappedInst.getOperand(0);
TmpInst.setOpcode(Hexagon::L2_loadrigp);
TmpInst.addOperand(Reg);
- TmpInst.addOperand(MCOperand::createExpr(
- MCSymbolRefExpr::create(Sym, OutContext)));
+ TmpInst.addOperand(MCOperand::createExpr(HexagonMCExpr::create(
+ MCSymbolRefExpr::create(Sym, OutContext), OutContext)));
MappedInst = TmpInst;
}
break;
@@ -367,7 +377,8 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
int64_t Imm;
MCExpr const *Expr = MO.getExpr();
bool Success = Expr->evaluateAsAbsolute(Imm);
- assert (Success && "Expected immediate and none was found");(void)Success;
+ assert (Success && "Expected immediate and none was found");
+ (void)Success;
MCInst TmpInst;
if (Imm == 0) {
TmpInst.setOpcode(Hexagon::S2_vsathub);
@@ -381,7 +392,8 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
TmpInst.addOperand(MappedInst.getOperand(1));
const MCExpr *One = MCConstantExpr::create(1, OutContext);
const MCExpr *Sub = MCBinaryExpr::createSub(Expr, One, OutContext);
- TmpInst.addOperand(MCOperand::createExpr(Sub));
+ TmpInst.addOperand(
+ MCOperand::createExpr(HexagonMCExpr::create(Sub, OutContext)));
MappedInst = TmpInst;
return;
}
@@ -391,7 +403,8 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
MCExpr const *Expr = MO2.getExpr();
int64_t Imm;
bool Success = Expr->evaluateAsAbsolute(Imm);
- assert (Success && "Expected immediate and none was found");(void)Success;
+ assert (Success && "Expected immediate and none was found");
+ (void)Success;
MCInst TmpInst;
if (Imm == 0) {
TmpInst.setOpcode(Hexagon::A2_combinew);
@@ -414,7 +427,8 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
TmpInst.addOperand(MappedInst.getOperand(1));
const MCExpr *One = MCConstantExpr::create(1, OutContext);
const MCExpr *Sub = MCBinaryExpr::createSub(Expr, One, OutContext);
- TmpInst.addOperand(MCOperand::createExpr(Sub));
+ TmpInst.addOperand(
+ MCOperand::createExpr(HexagonMCExpr::create(Sub, OutContext)));
MappedInst = TmpInst;
return;
}
@@ -424,7 +438,8 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
MCExpr const *Expr = MO.getExpr();
int64_t Imm;
bool Success = Expr->evaluateAsAbsolute(Imm);
- assert (Success && "Expected immediate and none was found");(void)Success;
+ assert (Success && "Expected immediate and none was found");
+ (void)Success;
MCInst TmpInst;
if (Imm == 0) {
TmpInst.setOpcode(Hexagon::A2_tfr);
@@ -438,7 +453,8 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
TmpInst.addOperand(MappedInst.getOperand(1));
const MCExpr *One = MCConstantExpr::create(1, OutContext);
const MCExpr *Sub = MCBinaryExpr::createSub(Expr, One, OutContext);
- TmpInst.addOperand(MCOperand::createExpr(Sub));
+ TmpInst.addOperand(
+ MCOperand::createExpr(HexagonMCExpr::create(Sub, OutContext)));
MappedInst = TmpInst;
return;
}
@@ -470,10 +486,10 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
bool Success = MO.getExpr()->evaluateAsAbsolute(Imm);
if (Success && Imm < 0) {
const MCExpr *MOne = MCConstantExpr::create(-1, OutContext);
- TmpInst.addOperand(MCOperand::createExpr(MOne));
+ TmpInst.addOperand(MCOperand::createExpr(HexagonMCExpr::create(MOne, OutContext)));
} else {
const MCExpr *Zero = MCConstantExpr::create(0, OutContext);
- TmpInst.addOperand(MCOperand::createExpr(Zero));
+ TmpInst.addOperand(MCOperand::createExpr(HexagonMCExpr::create(Zero, OutContext)));
}
TmpInst.addOperand(MO);
MappedInst = TmpInst;
@@ -523,12 +539,13 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
MCExpr const *Expr = Imm.getExpr();
int64_t Value;
bool Success = Expr->evaluateAsAbsolute(Value);
- assert(Success);(void)Success;
+ assert(Success);
+ (void)Success;
if (Value < 0 && Value > -256) {
MappedInst.setOpcode(Hexagon::M2_mpysin);
- Imm.setExpr(MCUnaryExpr::createMinus(Expr, OutContext));
- }
- else
+ Imm.setExpr(HexagonMCExpr::create(
+ MCUnaryExpr::createMinus(Expr, OutContext), OutContext));
+ } else
MappedInst.setOpcode(Hexagon::M2_mpysip);
return;
}
diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 4d2b54521e83..c8b4a4cf9382 100644
--- a/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -9,18 +9,17 @@
#define DEBUG_TYPE "hexbit"
-#include "llvm/CodeGen/Passes.h"
+#include "HexagonBitTracker.h"
+#include "HexagonTargetMachine.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "HexagonTargetMachine.h"
-#include "HexagonBitTracker.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
@@ -159,8 +158,6 @@ namespace {
static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses);
static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1,
const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W);
- static bool isConst(const BitTracker::RegisterCell &RC, uint16_t B,
- uint16_t W);
static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B,
uint16_t W);
static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B,
@@ -284,17 +281,6 @@ bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1,
return true;
}
-
-bool HexagonBitSimplify::isConst(const BitTracker::RegisterCell &RC,
- uint16_t B, uint16_t W) {
- assert(B < RC.width() && B+W <= RC.width());
- for (uint16_t i = B; i < B+W; ++i)
- if (!RC[i].num())
- return false;
- return true;
-}
-
-
bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC,
uint16_t B, uint16_t W) {
assert(B < RC.width() && B+W <= RC.width());
@@ -876,6 +862,12 @@ const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
case Hexagon::DoubleRegsRegClassID:
VerifySR(RR.Sub);
return &Hexagon::IntRegsRegClass;
+ case Hexagon::VecDblRegsRegClassID:
+ VerifySR(RR.Sub);
+ return &Hexagon::VectorRegsRegClass;
+ case Hexagon::VecDblRegs128BRegClassID:
+ VerifySR(RR.Sub);
+ return &Hexagon::VectorRegs128BRegClass;
}
return nullptr;
}
@@ -1297,7 +1289,7 @@ bool RedundantInstrElimination::processBlock(MachineBasicBlock &B,
continue;
// If found, replace the instruction with a COPY.
- DebugLoc DL = MI->getDebugLoc();
+ const DebugLoc &DL = MI->getDebugLoc();
const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
unsigned NewR = MRI.createVirtualRegister(FRC);
BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
@@ -1326,7 +1318,7 @@ namespace {
: Transformation(true), HII(hii), MRI(mri), BT(bt) {}
bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
private:
- bool isTfrConst(const MachineInstr *MI) const;
+ bool isTfrConst(const MachineInstr &MI) const;
bool isConst(unsigned R, int64_t &V) const;
unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C,
MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL);
@@ -1354,9 +1346,8 @@ bool ConstGeneration::isConst(unsigned R, int64_t &C) const {
return true;
}
-
-bool ConstGeneration::isTfrConst(const MachineInstr *MI) const {
- unsigned Opc = MI->getOpcode();
+bool ConstGeneration::isTfrConst(const MachineInstr &MI) const {
+ unsigned Opc = MI.getOpcode();
switch (Opc) {
case Hexagon::A2_combineii:
case Hexagon::A4_combineii:
@@ -1426,7 +1417,7 @@ bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) {
RegisterSet Defs;
for (auto I = B.begin(), E = B.end(); I != E; ++I) {
- if (isTfrConst(I))
+ if (isTfrConst(*I))
continue;
Defs.clear();
HBS::getInstrDefs(*I, Defs);
@@ -1960,11 +1951,10 @@ bool BitSimplification::genExtractHalf(MachineInstr *MI,
NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR)
.addReg(L.Reg, 0, L.Sub);
- } else if (!L.Low && Opc != Hexagon::S2_extractu) {
+ } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) {
NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
- BuildMI(B, MI, DL, HII.get(Hexagon::S2_extractu), NewR)
+ BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR)
.addReg(L.Reg, 0, L.Sub)
- .addImm(16)
.addImm(16);
}
if (NewR == 0)
@@ -2187,6 +2177,9 @@ bool BitSimplification::processBlock(MachineBasicBlock &B,
bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
auto &HST = MF.getSubtarget<HexagonSubtarget>();
auto &HRI = *HST.getRegisterInfo();
auto &HII = *HST.getInstrInfo();
@@ -2729,6 +2722,9 @@ bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
auto &HST = MF.getSubtarget<HexagonSubtarget>();
HII = HST.getInstrInfo();
HRI = HST.getRegisterInfo();
diff --git a/lib/Target/Hexagon/HexagonBitTracker.cpp b/lib/Target/Hexagon/HexagonBitTracker.cpp
index d5848dc45a3b..78b57d27ad50 100644
--- a/lib/Target/Hexagon/HexagonBitTracker.cpp
+++ b/lib/Target/Hexagon/HexagonBitTracker.cpp
@@ -102,9 +102,9 @@ class RegisterRefs {
std::vector<BT::RegisterRef> Vector;
public:
- RegisterRefs(const MachineInstr *MI) : Vector(MI->getNumOperands()) {
+ RegisterRefs(const MachineInstr &MI) : Vector(MI.getNumOperands()) {
for (unsigned i = 0, n = Vector.size(); i < n; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ const MachineOperand &MO = MI.getOperand(i);
if (MO.isReg())
Vector[i] = BT::RegisterRef(MO);
// For indices that don't correspond to registers, the entry will
@@ -121,13 +121,14 @@ public:
};
}
-bool HexagonEvaluator::evaluate(const MachineInstr *MI,
- const CellMapType &Inputs, CellMapType &Outputs) const {
+bool HexagonEvaluator::evaluate(const MachineInstr &MI,
+ const CellMapType &Inputs,
+ CellMapType &Outputs) const {
unsigned NumDefs = 0;
// Sanity verification: there should not be any defs with subregisters.
- for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
NumDefs++;
@@ -137,7 +138,7 @@ bool HexagonEvaluator::evaluate(const MachineInstr *MI,
if (NumDefs == 0)
return false;
- if (MI->mayLoad())
+ if (MI.mayLoad())
return evaluateLoad(MI, Inputs, Outputs);
// Check COPY instructions that copy formal parameters into virtual
@@ -154,7 +155,7 @@ bool HexagonEvaluator::evaluate(const MachineInstr *MI,
// was not a COPY, it would not be clear how to mirror that extension
// on the callee's side. For that reason, only check COPY instructions
// for potential extensions.
- if (MI->isCopy()) {
+ if (MI.isCopy()) {
if (evaluateFormalCopy(MI, Inputs, Outputs))
return true;
}
@@ -165,19 +166,19 @@ bool HexagonEvaluator::evaluate(const MachineInstr *MI,
// checking what kind of operand a given instruction has individually
// for each instruction, do it here. Global symbols as operands gene-
// rally do not provide any useful information.
- for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() ||
MO.isCPI())
return false;
}
RegisterRefs Reg(MI);
- unsigned Opc = MI->getOpcode();
+ unsigned Opc = MI.getOpcode();
using namespace Hexagon;
- #define op(i) MI->getOperand(i)
- #define rc(i) RegisterCell::ref(getCell(Reg[i],Inputs))
- #define im(i) MI->getOperand(i).getImm()
+#define op(i) MI.getOperand(i)
+#define rc(i) RegisterCell::ref(getCell(Reg[i], Inputs))
+#define im(i) MI.getOperand(i).getImm()
// If the instruction has no register operands, skip it.
if (Reg.size() == 0)
@@ -190,9 +191,9 @@ bool HexagonEvaluator::evaluate(const MachineInstr *MI,
return true;
};
// Get the cell corresponding to the N-th operand.
- auto cop = [this,&Reg,&MI,&Inputs] (unsigned N, uint16_t W)
- -> BT::RegisterCell {
- const MachineOperand &Op = MI->getOperand(N);
+ auto cop = [this, &Reg, &MI, &Inputs](unsigned N,
+ uint16_t W) -> BT::RegisterCell {
+ const MachineOperand &Op = MI.getOperand(N);
if (Op.isImm())
return eIMM(Op.getImm(), W);
if (!Op.isReg())
@@ -879,13 +880,13 @@ bool HexagonEvaluator::evaluate(const MachineInstr *MI,
return false;
}
-
-bool HexagonEvaluator::evaluate(const MachineInstr *BI,
- const CellMapType &Inputs, BranchTargetList &Targets,
- bool &FallsThru) const {
+bool HexagonEvaluator::evaluate(const MachineInstr &BI,
+ const CellMapType &Inputs,
+ BranchTargetList &Targets,
+ bool &FallsThru) const {
// We need to evaluate one branch at a time. TII::AnalyzeBranch checks
// all the branches in a basic block at once, so we cannot use it.
- unsigned Opc = BI->getOpcode();
+ unsigned Opc = BI.getOpcode();
bool SimpleBranch = false;
bool Negated = false;
switch (Opc) {
@@ -901,7 +902,7 @@ bool HexagonEvaluator::evaluate(const MachineInstr *BI,
SimpleBranch = true;
break;
case Hexagon::J2_jump:
- Targets.insert(BI->getOperand(0).getMBB());
+ Targets.insert(BI.getOperand(0).getMBB());
FallsThru = false;
return true;
default:
@@ -914,7 +915,7 @@ bool HexagonEvaluator::evaluate(const MachineInstr *BI,
return false;
// BI is a conditional branch if we got here.
- RegisterRef PR = BI->getOperand(0);
+ RegisterRef PR = BI.getOperand(0);
RegisterCell PC = getCell(PR, Inputs);
const BT::BitValue &Test = PC[0];
@@ -929,18 +930,18 @@ bool HexagonEvaluator::evaluate(const MachineInstr *BI,
return true;
}
- Targets.insert(BI->getOperand(1).getMBB());
+ Targets.insert(BI.getOperand(1).getMBB());
FallsThru = false;
return true;
}
-
-bool HexagonEvaluator::evaluateLoad(const MachineInstr *MI,
- const CellMapType &Inputs, CellMapType &Outputs) const {
+bool HexagonEvaluator::evaluateLoad(const MachineInstr &MI,
+ const CellMapType &Inputs,
+ CellMapType &Outputs) const {
if (TII.isPredicated(MI))
return false;
- assert(MI->mayLoad() && "A load that mayn't?");
- unsigned Opc = MI->getOpcode();
+ assert(MI.mayLoad() && "A load that mayn't?");
+ unsigned Opc = MI.getOpcode();
uint16_t BitNum;
bool SignEx;
@@ -1067,7 +1068,7 @@ bool HexagonEvaluator::evaluateLoad(const MachineInstr *MI,
break;
}
- const MachineOperand &MD = MI->getOperand(0);
+ const MachineOperand &MD = MI.getOperand(0);
assert(MD.isReg() && MD.isDef());
RegisterRef RD = MD;
@@ -1091,15 +1092,15 @@ bool HexagonEvaluator::evaluateLoad(const MachineInstr *MI,
return true;
}
-
-bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr *MI,
- const CellMapType &Inputs, CellMapType &Outputs) const {
+bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr &MI,
+ const CellMapType &Inputs,
+ CellMapType &Outputs) const {
// If MI defines a formal parameter, but is not a copy (loads are handled
// in evaluateLoad), then it's not clear what to do.
- assert(MI->isCopy());
+ assert(MI.isCopy());
- RegisterRef RD = MI->getOperand(0);
- RegisterRef RS = MI->getOperand(1);
+ RegisterRef RD = MI.getOperand(0);
+ RegisterRef RS = MI.getOperand(1);
assert(RD.Sub == 0);
if (!TargetRegisterInfo::isPhysicalRegister(RS.Reg))
return false;
diff --git a/lib/Target/Hexagon/HexagonBitTracker.h b/lib/Target/Hexagon/HexagonBitTracker.h
index 897af2d71870..9e7b1dbe298f 100644
--- a/lib/Target/Hexagon/HexagonBitTracker.h
+++ b/lib/Target/Hexagon/HexagonBitTracker.h
@@ -26,9 +26,9 @@ struct HexagonEvaluator : public BitTracker::MachineEvaluator {
HexagonEvaluator(const HexagonRegisterInfo &tri, MachineRegisterInfo &mri,
const HexagonInstrInfo &tii, MachineFunction &mf);
- bool evaluate(const MachineInstr *MI, const CellMapType &Inputs,
+ bool evaluate(const MachineInstr &MI, const CellMapType &Inputs,
CellMapType &Outputs) const override;
- bool evaluate(const MachineInstr *BI, const CellMapType &Inputs,
+ bool evaluate(const MachineInstr &BI, const CellMapType &Inputs,
BranchTargetList &Targets, bool &FallsThru) const override;
BitTracker::BitMask mask(unsigned Reg, unsigned Sub) const override;
@@ -38,9 +38,9 @@ struct HexagonEvaluator : public BitTracker::MachineEvaluator {
const HexagonInstrInfo &TII;
private:
- bool evaluateLoad(const MachineInstr *MI, const CellMapType &Inputs,
+ bool evaluateLoad(const MachineInstr &MI, const CellMapType &Inputs,
CellMapType &Outputs) const;
- bool evaluateFormalCopy(const MachineInstr *MI, const CellMapType &Inputs,
+ bool evaluateFormalCopy(const MachineInstr &MI, const CellMapType &Inputs,
CellMapType &Outputs) const;
unsigned getNextPhysReg(unsigned PReg, unsigned Width) const;
diff --git a/lib/Target/Hexagon/HexagonBlockRanges.cpp b/lib/Target/Hexagon/HexagonBlockRanges.cpp
new file mode 100644
index 000000000000..5c44029dc6e7
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonBlockRanges.cpp
@@ -0,0 +1,483 @@
+//===--- HexagonBlockRanges.cpp -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hbr"
+
+#include "HexagonBlockRanges.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonSubtarget.h"
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+#include <map>
+
+using namespace llvm;
+
+bool HexagonBlockRanges::IndexRange::overlaps(const IndexRange &A) const {
+ // If A contains start(), or "this" contains A.start(), then overlap.
+ IndexType S = start(), E = end(), AS = A.start(), AE = A.end();
+ if (AS == S)
+ return true;
+ bool SbAE = (S < AE) || (S == AE && A.TiedEnd); // S-before-AE.
+ bool ASbE = (AS < E) || (AS == E && TiedEnd); // AS-before-E.
+ if ((AS < S && SbAE) || (S < AS && ASbE))
+ return true;
+ // Otherwise no overlap.
+ return false;
+}
+
+
+bool HexagonBlockRanges::IndexRange::contains(const IndexRange &A) const {
+ if (start() <= A.start()) {
+ // Treat "None" in the range end as equal to the range start.
+ IndexType E = (end() != IndexType::None) ? end() : start();
+ IndexType AE = (A.end() != IndexType::None) ? A.end() : A.start();
+ if (AE <= E)
+ return true;
+ }
+ return false;
+}
+
+
+void HexagonBlockRanges::IndexRange::merge(const IndexRange &A) {
+ // Allow merging adjacent ranges.
+ assert(end() == A.start() || overlaps(A));
+ IndexType AS = A.start(), AE = A.end();
+ if (AS < start() || start() == IndexType::None)
+ setStart(AS);
+ if (end() < AE || end() == IndexType::None) {
+ setEnd(AE);
+ TiedEnd = A.TiedEnd;
+ } else {
+ if (end() == AE)
+ TiedEnd |= A.TiedEnd;
+ }
+ if (A.Fixed)
+ Fixed = true;
+}
+
+
+void HexagonBlockRanges::RangeList::include(const RangeList &RL) {
+ for (auto &R : RL)
+ if (std::find(begin(), end(), R) == end())
+ push_back(R);
+}
+
+
+// Merge all overlapping ranges in the list, so that all that remains
+// is a list of disjoint ranges.
+void HexagonBlockRanges::RangeList::unionize(bool MergeAdjacent) {
+ if (empty())
+ return;
+
+ std::sort(begin(), end());
+ iterator Iter = begin();
+
+ while (Iter != end()-1) {
+ iterator Next = std::next(Iter);
+ // If MergeAdjacent is true, merge ranges A and B, where A.end == B.start.
+ // This allows merging dead ranges, but is not valid for live ranges.
+ bool Merge = MergeAdjacent && (Iter->end() == Next->start());
+ if (Merge || Iter->overlaps(*Next)) {
+ Iter->merge(*Next);
+ erase(Next);
+ continue;
+ }
+ ++Iter;
+ }
+}
+
+
+// Compute a range A-B and add it to the list.
+void HexagonBlockRanges::RangeList::addsub(const IndexRange &A,
+ const IndexRange &B) {
+ // Exclusion of non-overlapping ranges makes some checks simpler
+ // later in this function.
+ if (!A.overlaps(B)) {
+ // A - B = A.
+ add(A);
+ return;
+ }
+
+ IndexType AS = A.start(), AE = A.end();
+ IndexType BS = B.start(), BE = B.end();
+
+ // If AE is None, then A is included in B, since A and B overlap.
+ // The result of subtraction if empty, so just return.
+ if (AE == IndexType::None)
+ return;
+
+ if (AS < BS) {
+ // A starts before B.
+ // AE cannot be None since A and B overlap.
+ assert(AE != IndexType::None);
+ // Add the part of A that extends on the "less" side of B.
+ add(AS, BS, A.Fixed, false);
+ }
+
+ if (BE < AE) {
+ // BE cannot be Exit here.
+ if (BE == IndexType::None)
+ add(BS, AE, A.Fixed, false);
+ else
+ add(BE, AE, A.Fixed, false);
+ }
+}
+
+
+// Subtract a given range from each element in the list.
+void HexagonBlockRanges::RangeList::subtract(const IndexRange &Range) {
+ // Cannot assume that the list is unionized (i.e. contains only non-
+ // overlapping ranges.
+ RangeList T;
+ for (iterator Next, I = begin(); I != end(); I = Next) {
+ IndexRange &Rg = *I;
+ if (Rg.overlaps(Range)) {
+ T.addsub(Rg, Range);
+ Next = this->erase(I);
+ } else {
+ Next = std::next(I);
+ }
+ }
+ include(T);
+}
+
+
+HexagonBlockRanges::InstrIndexMap::InstrIndexMap(MachineBasicBlock &B)
+ : Block(B) {
+ IndexType Idx = IndexType::First;
+ First = Idx;
+ for (auto &In : B) {
+ if (In.isDebugValue())
+ continue;
+ assert(getIndex(&In) == IndexType::None && "Instruction already in map");
+ Map.insert(std::make_pair(Idx, &In));
+ ++Idx;
+ }
+ Last = B.empty() ? IndexType::None : unsigned(Idx)-1;
+}
+
+
+MachineInstr *HexagonBlockRanges::InstrIndexMap::getInstr(IndexType Idx) const {
+ auto F = Map.find(Idx);
+ return (F != Map.end()) ? F->second : 0;
+}
+
+
+HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getIndex(
+ MachineInstr *MI) const {
+ for (auto &I : Map)
+ if (I.second == MI)
+ return I.first;
+ return IndexType::None;
+}
+
+
+HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getPrevIndex(
+ IndexType Idx) const {
+ assert (Idx != IndexType::None);
+ if (Idx == IndexType::Entry)
+ return IndexType::None;
+ if (Idx == IndexType::Exit)
+ return Last;
+ if (Idx == First)
+ return IndexType::Entry;
+ return unsigned(Idx)-1;
+}
+
+
+HexagonBlockRanges::IndexType HexagonBlockRanges::InstrIndexMap::getNextIndex(
+ IndexType Idx) const {
+ assert (Idx != IndexType::None);
+ if (Idx == IndexType::Entry)
+ return IndexType::First;
+ if (Idx == IndexType::Exit || Idx == Last)
+ return IndexType::None;
+ return unsigned(Idx)+1;
+}
+
+
+void HexagonBlockRanges::InstrIndexMap::replaceInstr(MachineInstr *OldMI,
+ MachineInstr *NewMI) {
+ for (auto &I : Map) {
+ if (I.second != OldMI)
+ continue;
+ if (NewMI != nullptr)
+ I.second = NewMI;
+ else
+ Map.erase(I.first);
+ break;
+ }
+}
+
+
+HexagonBlockRanges::HexagonBlockRanges(MachineFunction &mf)
+ : MF(mf), HST(mf.getSubtarget<HexagonSubtarget>()),
+ TII(*HST.getInstrInfo()), TRI(*HST.getRegisterInfo()),
+ Reserved(TRI.getReservedRegs(mf)) {
+ // Consider all non-allocatable registers as reserved.
+ for (auto I = TRI.regclass_begin(), E = TRI.regclass_end(); I != E; ++I) {
+ auto *RC = *I;
+ if (RC->isAllocatable())
+ continue;
+ for (unsigned R : *RC)
+ Reserved[R] = true;
+ }
+}
+
+
+HexagonBlockRanges::RegisterSet HexagonBlockRanges::getLiveIns(
+ const MachineBasicBlock &B) {
+ RegisterSet LiveIns;
+ for (auto I : B.liveins())
+ if (!Reserved[I.PhysReg])
+ LiveIns.insert({I.PhysReg, 0});
+ return LiveIns;
+}
+
+
+HexagonBlockRanges::RegisterSet HexagonBlockRanges::expandToSubRegs(
+ RegisterRef R, const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI) {
+ RegisterSet SRs;
+
+ if (R.Sub != 0) {
+ SRs.insert(R);
+ return SRs;
+ }
+
+ if (TargetRegisterInfo::isPhysicalRegister(R.Reg)) {
+ MCSubRegIterator I(R.Reg, &TRI);
+ if (!I.isValid())
+ SRs.insert({R.Reg, 0});
+ for (; I.isValid(); ++I)
+ SRs.insert({*I, 0});
+ } else {
+ assert(TargetRegisterInfo::isVirtualRegister(R.Reg));
+ auto &RC = *MRI.getRegClass(R.Reg);
+ unsigned PReg = *RC.begin();
+ MCSubRegIndexIterator I(PReg, &TRI);
+ if (!I.isValid())
+ SRs.insert({R.Reg, 0});
+ for (; I.isValid(); ++I)
+ SRs.insert({R.Reg, I.getSubRegIndex()});
+ }
+ return SRs;
+}
+
+
+void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
+ RegToRangeMap &LiveMap) {
+ std::map<RegisterRef,IndexType> LastDef, LastUse;
+ RegisterSet LiveOnEntry;
+ MachineBasicBlock &B = IndexMap.getBlock();
+ MachineRegisterInfo &MRI = B.getParent()->getRegInfo();
+
+ for (auto R : getLiveIns(B))
+ for (auto S : expandToSubRegs(R, MRI, TRI))
+ LiveOnEntry.insert(S);
+
+ for (auto R : LiveOnEntry)
+ LastDef[R] = IndexType::Entry;
+
+ auto closeRange = [&LastUse,&LastDef,&LiveMap] (RegisterRef R) -> void {
+ auto LD = LastDef[R], LU = LastUse[R];
+ if (LD == IndexType::None)
+ LD = IndexType::Entry;
+ if (LU == IndexType::None)
+ LU = IndexType::Exit;
+ LiveMap[R].add(LD, LU, false, false);
+ LastUse[R] = LastDef[R] = IndexType::None;
+ };
+
+ for (auto &In : B) {
+ if (In.isDebugValue())
+ continue;
+ IndexType Index = IndexMap.getIndex(&In);
+ // Process uses first.
+ for (auto &Op : In.operands()) {
+ if (!Op.isReg() || !Op.isUse() || Op.isUndef())
+ continue;
+ RegisterRef R = { Op.getReg(), Op.getSubReg() };
+ if (TargetRegisterInfo::isPhysicalRegister(R.Reg) && Reserved[R.Reg])
+ continue;
+ bool IsKill = Op.isKill();
+ for (auto S : expandToSubRegs(R, MRI, TRI)) {
+ LastUse[S] = Index;
+ if (IsKill)
+ closeRange(S);
+ }
+ }
+ // Process defs.
+ for (auto &Op : In.operands()) {
+ if (!Op.isReg() || !Op.isDef() || Op.isUndef())
+ continue;
+ RegisterRef R = { Op.getReg(), Op.getSubReg() };
+ if (TargetRegisterInfo::isPhysicalRegister(R.Reg) && Reserved[R.Reg])
+ continue;
+ for (auto S : expandToSubRegs(R, MRI, TRI)) {
+ if (LastDef[S] != IndexType::None || LastUse[S] != IndexType::None)
+ closeRange(S);
+ LastDef[S] = Index;
+ }
+ }
+ }
+
+ // Collect live-on-exit.
+ RegisterSet LiveOnExit;
+ for (auto *SB : B.successors())
+ for (auto R : getLiveIns(*SB))
+ for (auto S : expandToSubRegs(R, MRI, TRI))
+ LiveOnExit.insert(S);
+
+ for (auto R : LiveOnExit)
+ LastUse[R] = IndexType::Exit;
+
+ // Process remaining registers.
+ RegisterSet Left;
+ for (auto &I : LastUse)
+ if (I.second != IndexType::None)
+ Left.insert(I.first);
+ for (auto &I : LastDef)
+ if (I.second != IndexType::None)
+ Left.insert(I.first);
+ for (auto R : Left)
+ closeRange(R);
+
+ // Finalize the live ranges.
+ for (auto &P : LiveMap)
+ P.second.unionize();
+}
+
+
+HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeLiveMap(
+ InstrIndexMap &IndexMap) {
+ RegToRangeMap LiveMap;
+ DEBUG(dbgs() << LLVM_FUNCTION_NAME << ": index map\n" << IndexMap << '\n');
+ computeInitialLiveRanges(IndexMap, LiveMap);
+ DEBUG(dbgs() << LLVM_FUNCTION_NAME << ": live map\n"
+ << PrintRangeMap(LiveMap, TRI) << '\n');
+ return LiveMap;
+}
+
+
+HexagonBlockRanges::RegToRangeMap HexagonBlockRanges::computeDeadMap(
+ InstrIndexMap &IndexMap, RegToRangeMap &LiveMap) {
+ RegToRangeMap DeadMap;
+
+ auto addDeadRanges = [&IndexMap,&LiveMap,&DeadMap] (RegisterRef R) -> void {
+ auto F = LiveMap.find(R);
+ if (F == LiveMap.end() || F->second.empty()) {
+ DeadMap[R].add(IndexType::Entry, IndexType::Exit, false, false);
+ return;
+ }
+
+ RangeList &RL = F->second;
+ RangeList::iterator A = RL.begin(), Z = RL.end()-1;
+
+ // Try to create the initial range.
+ if (A->start() != IndexType::Entry) {
+ IndexType DE = IndexMap.getPrevIndex(A->start());
+ if (DE != IndexType::Entry)
+ DeadMap[R].add(IndexType::Entry, DE, false, false);
+ }
+
+ while (A != Z) {
+ // Creating a dead range that follows A. Pay attention to empty
+ // ranges (i.e. those ending with "None").
+ IndexType AE = (A->end() == IndexType::None) ? A->start() : A->end();
+ IndexType DS = IndexMap.getNextIndex(AE);
+ ++A;
+ IndexType DE = IndexMap.getPrevIndex(A->start());
+ if (DS < DE)
+ DeadMap[R].add(DS, DE, false, false);
+ }
+
+ // Try to create the final range.
+ if (Z->end() != IndexType::Exit) {
+ IndexType ZE = (Z->end() == IndexType::None) ? Z->start() : Z->end();
+ IndexType DS = IndexMap.getNextIndex(ZE);
+ if (DS < IndexType::Exit)
+ DeadMap[R].add(DS, IndexType::Exit, false, false);
+ }
+ };
+
+ MachineFunction &MF = *IndexMap.getBlock().getParent();
+ auto &MRI = MF.getRegInfo();
+ unsigned NumRegs = TRI.getNumRegs();
+ BitVector Visited(NumRegs);
+ for (unsigned R = 1; R < NumRegs; ++R) {
+ for (auto S : expandToSubRegs({R,0}, MRI, TRI)) {
+ if (Reserved[S.Reg] || Visited[S.Reg])
+ continue;
+ addDeadRanges(S);
+ Visited[S.Reg] = true;
+ }
+ }
+ for (auto &P : LiveMap)
+ if (TargetRegisterInfo::isVirtualRegister(P.first.Reg))
+ addDeadRanges(P.first);
+
+ DEBUG(dbgs() << LLVM_FUNCTION_NAME << ": dead map\n"
+ << PrintRangeMap(DeadMap, TRI) << '\n');
+ return DeadMap;
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS,
+ HexagonBlockRanges::IndexType Idx) {
+ if (Idx == HexagonBlockRanges::IndexType::None)
+ return OS << '-';
+ if (Idx == HexagonBlockRanges::IndexType::Entry)
+ return OS << 'n';
+ if (Idx == HexagonBlockRanges::IndexType::Exit)
+ return OS << 'x';
+ return OS << unsigned(Idx)-HexagonBlockRanges::IndexType::First+1;
+}
+
+// A mapping to translate between instructions and their indices.
+raw_ostream &llvm::operator<<(raw_ostream &OS,
+ const HexagonBlockRanges::IndexRange &IR) {
+ OS << '[' << IR.start() << ':' << IR.end() << (IR.TiedEnd ? '}' : ']');
+ if (IR.Fixed)
+ OS << '!';
+ return OS;
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS,
+ const HexagonBlockRanges::RangeList &RL) {
+ for (auto &R : RL)
+ OS << R << " ";
+ return OS;
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS,
+ const HexagonBlockRanges::InstrIndexMap &M) {
+ for (auto &In : M.Block) {
+ HexagonBlockRanges::IndexType Idx = M.getIndex(&In);
+ OS << Idx << (Idx == M.Last ? ". " : " ") << In;
+ }
+ return OS;
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS,
+ const HexagonBlockRanges::PrintRangeMap &P) {
+ for (auto &I : P.Map) {
+ const HexagonBlockRanges::RangeList &RL = I.second;
+ OS << PrintReg(I.first.Reg, &P.TRI, I.first.Sub) << " -> " << RL << "\n";
+ }
+ return OS;
+}
diff --git a/lib/Target/Hexagon/HexagonBlockRanges.h b/lib/Target/Hexagon/HexagonBlockRanges.h
new file mode 100644
index 000000000000..9c3f938f99eb
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonBlockRanges.h
@@ -0,0 +1,239 @@
+//===--- HexagonBlockRanges.h ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef HEXAGON_BLOCK_RANGES_H
+#define HEXAGON_BLOCK_RANGES_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/MC/MCRegisterInfo.h" // For MCPhysReg.
+#include <map>
+#include <set>
+#include <vector>
+
+namespace llvm {
+ class Function;
+ class HexagonSubtarget;
+ class MachineBasicBlock;
+ class MachineFunction;
+ class MachineInstr;
+ class MCInstrDesc;
+ class raw_ostream;
+ class TargetInstrInfo;
+ class TargetRegisterClass;
+ class TargetRegisterInfo;
+ class Type;
+
+struct HexagonBlockRanges {
+ HexagonBlockRanges(MachineFunction &MF);
+
+ struct RegisterRef {
+ unsigned Reg, Sub;
+ bool operator<(RegisterRef R) const {
+ return Reg < R.Reg || (Reg == R.Reg && Sub < R.Sub);
+ }
+ };
+ typedef std::set<RegisterRef> RegisterSet;
+
+ // This is to represent an "index", which is an abstraction of a position
+ // of an instruction within a basic block.
+ class IndexType {
+ public:
+ enum : unsigned {
+ None = 0,
+ Entry = 1,
+ Exit = 2,
+ First = 11 // 10th + 1st
+ };
+ static bool isInstr(IndexType X) { return X.Index >= First; }
+
+ IndexType() : Index(None) {}
+ IndexType(unsigned Idx) : Index(Idx) {}
+ operator unsigned() const;
+ bool operator== (unsigned x) const;
+ bool operator== (IndexType Idx) const;
+ bool operator!= (unsigned x) const;
+ bool operator!= (IndexType Idx) const;
+ IndexType operator++ ();
+ bool operator< (unsigned Idx) const;
+ bool operator< (IndexType Idx) const;
+ bool operator<= (IndexType Idx) const;
+
+ private:
+ bool operator> (IndexType Idx) const;
+ bool operator>= (IndexType Idx) const;
+
+ unsigned Index;
+ };
+
+ // A range of indices, essentially a representation of a live range.
+ // This is also used to represent "dead ranges", i.e. ranges where a
+ // register is dead.
+ class IndexRange : public std::pair<IndexType,IndexType> {
+ public:
+ IndexRange() : Fixed(false), TiedEnd(false) {}
+ IndexRange(IndexType Start, IndexType End, bool F = false, bool T = false)
+ : std::pair<IndexType,IndexType>(Start, End), Fixed(F), TiedEnd(T) {}
+ IndexType start() const { return first; }
+ IndexType end() const { return second; }
+
+ bool operator< (const IndexRange &A) const {
+ return start() < A.start();
+ }
+ bool overlaps(const IndexRange &A) const;
+ bool contains(const IndexRange &A) const;
+ void merge(const IndexRange &A);
+
+ bool Fixed; // Can be renamed? "Fixed" means "no".
+ bool TiedEnd; // The end is not a use, but a dead def tied to a use.
+
+ private:
+ void setStart(const IndexType &S) { first = S; }
+ void setEnd(const IndexType &E) { second = E; }
+ };
+
+ // A list of index ranges. This represents liveness of a register
+ // in a basic block.
+ class RangeList : public std::vector<IndexRange> {
+ public:
+ void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd) {
+ push_back(IndexRange(Start, End, Fixed, TiedEnd));
+ }
+ void add(const IndexRange &Range) {
+ push_back(Range);
+ }
+ void include(const RangeList &RL);
+ void unionize(bool MergeAdjacent = false);
+ void subtract(const IndexRange &Range);
+
+ private:
+ void addsub(const IndexRange &A, const IndexRange &B);
+ };
+
+ class InstrIndexMap {
+ public:
+ InstrIndexMap(MachineBasicBlock &B);
+ MachineInstr *getInstr(IndexType Idx) const;
+ IndexType getIndex(MachineInstr *MI) const;
+ MachineBasicBlock &getBlock() const { return Block; }
+ IndexType getPrevIndex(IndexType Idx) const;
+ IndexType getNextIndex(IndexType Idx) const;
+ void replaceInstr(MachineInstr *OldMI, MachineInstr *NewMI);
+
+ friend raw_ostream &operator<< (raw_ostream &OS, const InstrIndexMap &Map);
+ IndexType First, Last;
+
+ private:
+ MachineBasicBlock &Block;
+ std::map<IndexType,MachineInstr*> Map;
+ };
+
+ typedef std::map<RegisterRef,RangeList> RegToRangeMap;
+ RegToRangeMap computeLiveMap(InstrIndexMap &IndexMap);
+ RegToRangeMap computeDeadMap(InstrIndexMap &IndexMap, RegToRangeMap &LiveMap);
+ static RegisterSet expandToSubRegs(RegisterRef R,
+ const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI);
+
+ struct PrintRangeMap {
+ PrintRangeMap(const RegToRangeMap &M, const TargetRegisterInfo &I)
+ : Map(M), TRI(I) {}
+
+ friend raw_ostream &operator<< (raw_ostream &OS, const PrintRangeMap &P);
+ private:
+ const RegToRangeMap &Map;
+ const TargetRegisterInfo &TRI;
+ };
+
+private:
+ RegisterSet getLiveIns(const MachineBasicBlock &B);
+
+ void computeInitialLiveRanges(InstrIndexMap &IndexMap,
+ RegToRangeMap &LiveMap);
+
+ MachineFunction &MF;
+ const HexagonSubtarget &HST;
+ const TargetInstrInfo &TII;
+ const TargetRegisterInfo &TRI;
+ BitVector Reserved;
+};
+
+
+inline HexagonBlockRanges::IndexType::operator unsigned() const {
+ assert(Index >= First);
+ return Index;
+}
+
+inline bool HexagonBlockRanges::IndexType::operator== (unsigned x) const {
+ return Index == x;
+}
+
+inline bool HexagonBlockRanges::IndexType::operator== (IndexType Idx) const {
+ return Index == Idx.Index;
+}
+
+inline bool HexagonBlockRanges::IndexType::operator!= (unsigned x) const {
+ return Index != x;
+}
+
+inline bool HexagonBlockRanges::IndexType::operator!= (IndexType Idx) const {
+ return Index != Idx.Index;
+}
+
+inline
+HexagonBlockRanges::IndexType HexagonBlockRanges::IndexType::operator++ () {
+ assert(Index != None);
+ assert(Index != Exit);
+ if (Index == Entry)
+ Index = First;
+ else
+ ++Index;
+ return *this;
+}
+
+inline bool HexagonBlockRanges::IndexType::operator< (unsigned Idx) const {
+ return operator< (IndexType(Idx));
+}
+
+inline bool HexagonBlockRanges::IndexType::operator< (IndexType Idx) const {
+ // !(x < x).
+ if (Index == Idx.Index)
+ return false;
+ // !(None < x) for all x.
+ // !(x < None) for all x.
+ if (Index == None || Idx.Index == None)
+ return false;
+ // !(Exit < x) for all x.
+ // !(x < Entry) for all x.
+ if (Index == Exit || Idx.Index == Entry)
+ return false;
+ // Entry < x for all x != Entry.
+ // x < Exit for all x != Exit.
+ if (Index == Entry || Idx.Index == Exit)
+ return true;
+
+ return Index < Idx.Index;
+}
+
+inline bool HexagonBlockRanges::IndexType::operator<= (IndexType Idx) const {
+ return operator==(Idx) || operator<(Idx);
+}
+
+
+raw_ostream &operator<< (raw_ostream &OS, HexagonBlockRanges::IndexType Idx);
+raw_ostream &operator<< (raw_ostream &OS,
+ const HexagonBlockRanges::IndexRange &IR);
+raw_ostream &operator<< (raw_ostream &OS,
+ const HexagonBlockRanges::RangeList &RL);
+raw_ostream &operator<< (raw_ostream &OS,
+ const HexagonBlockRanges::InstrIndexMap &M);
+raw_ostream &operator<< (raw_ostream &OS,
+ const HexagonBlockRanges::PrintRangeMap &P);
+
+} // namespace llvm
+
+#endif
diff --git a/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
new file mode 100644
index 000000000000..f042baf1ef05
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
@@ -0,0 +1,211 @@
+//===--- HexagonBranchRelaxation.cpp - Identify and relax long jumps ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hexagon-brelax"
+
+#include "Hexagon.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/PassSupport.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+// Since we have no exact knowledge of code layout, allow some safety buffer
+// for jump target. This is measured in bytes.
+static cl::opt<uint32_t> BranchRelaxSafetyBuffer("branch-relax-safety-buffer",
+ cl::init(200), cl::Hidden, cl::ZeroOrMore, cl::desc("safety buffer size"));
+
+namespace llvm {
+ FunctionPass *createHexagonBranchRelaxation();
+ void initializeHexagonBranchRelaxationPass(PassRegistry&);
+}
+
+namespace {
+ struct HexagonBranchRelaxation : public MachineFunctionPass {
+ public:
+ static char ID;
+ HexagonBranchRelaxation() : MachineFunctionPass(ID) {
+ initializeHexagonBranchRelaxationPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "Hexagon Branch Relaxation";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ private:
+ const HexagonInstrInfo *HII;
+ const HexagonRegisterInfo *HRI;
+
+ bool relaxBranches(MachineFunction &MF);
+ void computeOffset(MachineFunction &MF,
+ DenseMap<MachineBasicBlock*, unsigned> &BlockToInstOffset);
+ bool reGenerateBranch(MachineFunction &MF,
+ DenseMap<MachineBasicBlock*, unsigned> &BlockToInstOffset);
+ bool isJumpOutOfRange(MachineInstr &MI,
+ DenseMap<MachineBasicBlock*, unsigned> &BlockToInstOffset);
+ };
+
+ char HexagonBranchRelaxation::ID = 0;
+} // end anonymous namespace
+
+INITIALIZE_PASS(HexagonBranchRelaxation, "hexagon-brelax",
+ "Hexagon Branch Relaxation", false, false)
+
+FunctionPass *llvm::createHexagonBranchRelaxation() {
+ return new HexagonBranchRelaxation();
+}
+
+
+bool HexagonBranchRelaxation::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "****** Hexagon Branch Relaxation ******\n");
+
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ HII = HST.getInstrInfo();
+ HRI = HST.getRegisterInfo();
+
+ bool Changed = false;
+ Changed = relaxBranches(MF);
+ return Changed;
+}
+
+
+void HexagonBranchRelaxation::computeOffset(MachineFunction &MF,
+ DenseMap<MachineBasicBlock*, unsigned> &OffsetMap) {
+ // offset of the current instruction from the start.
+ unsigned InstOffset = 0;
+ for (auto &B : MF) {
+ if (B.getAlignment()) {
+ // Although we don't know the exact layout of the final code, we need
+ // to account for alignment padding somehow. This heuristic pads each
+ // aligned basic block according to the alignment value.
+ int ByteAlign = (1u << B.getAlignment()) - 1;
+ InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign);
+ }
+ OffsetMap[&B] = InstOffset;
+ for (auto &MI : B.instrs())
+ InstOffset += HII->getSize(&MI);
+ }
+}
+
+
+/// relaxBranches - For Hexagon, if the jump target/loop label is too far from
+/// the jump/loop instruction then, we need to make sure that we have constant
+/// extenders set for jumps and loops.
+
+/// There are six iterations in this phase. It's self explanatory below.
+bool HexagonBranchRelaxation::relaxBranches(MachineFunction &MF) {
+ // Compute the offset of each basic block
+ // offset of the current instruction from the start.
+ // map for each instruction to the beginning of the function
+ DenseMap<MachineBasicBlock*, unsigned> BlockToInstOffset;
+ computeOffset(MF, BlockToInstOffset);
+
+ return reGenerateBranch(MF, BlockToInstOffset);
+}
+
+
+/// Check if a given instruction is:
+/// - a jump to a distant target
+/// - that exceeds its immediate range
+/// If both conditions are true, it requires constant extension.
+bool HexagonBranchRelaxation::isJumpOutOfRange(MachineInstr &MI,
+ DenseMap<MachineBasicBlock*, unsigned> &BlockToInstOffset) {
+ MachineBasicBlock &B = *MI.getParent();
+ auto FirstTerm = B.getFirstInstrTerminator();
+ if (FirstTerm == B.instr_end())
+ return false;
+
+ unsigned InstOffset = BlockToInstOffset[&B];
+ unsigned Distance = 0;
+
+ // To save time, estimate exact position of a branch instruction
+ // as one at the end of the MBB.
+ // Number of instructions times typical instruction size.
+ InstOffset += HII->nonDbgBBSize(&B) * HEXAGON_INSTR_SIZE;
+
+ MachineBasicBlock *TBB = NULL, *FBB = NULL;
+ SmallVector<MachineOperand, 4> Cond;
+
+ // Try to analyze this branch.
+ if (HII->analyzeBranch(B, TBB, FBB, Cond, false)) {
+ // Could not analyze it. See if this is something we can recognize.
+ // If it is a NVJ, it should always have its target in
+ // a fixed location.
+ if (HII->isNewValueJump(&*FirstTerm))
+ TBB = FirstTerm->getOperand(HII->getCExtOpNum(&*FirstTerm)).getMBB();
+ }
+ if (TBB && &MI == &*FirstTerm) {
+ Distance = std::abs((long long)InstOffset - BlockToInstOffset[TBB])
+ + BranchRelaxSafetyBuffer;
+ return !HII->isJumpWithinBranchRange(&*FirstTerm, Distance);
+ }
+ if (FBB) {
+ // Look for second terminator.
+ auto SecondTerm = std::next(FirstTerm);
+ assert(SecondTerm != B.instr_end() &&
+ (SecondTerm->isBranch() || SecondTerm->isCall()) &&
+ "Bad second terminator");
+ if (&MI != &*SecondTerm)
+ return false;
+ // Analyze the second branch in the BB.
+ Distance = std::abs((long long)InstOffset - BlockToInstOffset[FBB])
+ + BranchRelaxSafetyBuffer;
+ return !HII->isJumpWithinBranchRange(&*SecondTerm, Distance);
+ }
+ return false;
+}
+
+
+bool HexagonBranchRelaxation::reGenerateBranch(MachineFunction &MF,
+ DenseMap<MachineBasicBlock*, unsigned> &BlockToInstOffset) {
+ bool Changed = false;
+
+ for (auto &B : MF) {
+ for (auto &MI : B) {
+ if (!MI.isBranch() || !isJumpOutOfRange(MI, BlockToInstOffset))
+ continue;
+ DEBUG(dbgs() << "Long distance jump. isExtendable("
+ << HII->isExtendable(&MI) << ") isConstExtended("
+ << HII->isConstExtended(&MI) << ") " << MI);
+
+ // Since we have not merged HW loops relaxation into
+ // this code (yet), soften our approach for the moment.
+ if (!HII->isExtendable(&MI) && !HII->isExtended(&MI)) {
+ DEBUG(dbgs() << "\tUnderimplemented relax branch instruction.\n");
+ } else {
+ // Find which operand is expandable.
+ int ExtOpNum = HII->getCExtOpNum(&MI);
+ MachineOperand &MO = MI.getOperand(ExtOpNum);
+ // This need to be something we understand. So far we assume all
+ // branches have only MBB address as expandable field.
+ // If it changes, this will need to be expanded.
+ assert(MO.isMBB() && "Branch with unknown expandable field type");
+ // Mark given operand as extended.
+ MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
+ Changed = true;
+ }
+ }
+ }
+ return Changed;
+}
diff --git a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
index efafdd007289..559bdfb16a6f 100644
--- a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
+++ b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
@@ -16,7 +16,6 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -38,9 +37,9 @@ namespace {
class HexagonCFGOptimizer : public MachineFunctionPass {
private:
- void InvertAndChangeJumpTarget(MachineInstr*, MachineBasicBlock*);
+ void InvertAndChangeJumpTarget(MachineInstr &, MachineBasicBlock *);
- public:
+public:
static char ID;
HexagonCFGOptimizer() : MachineFunctionPass(ID) {
initializeHexagonCFGOptimizerPass(*PassRegistry::getPassRegistry());
@@ -50,6 +49,10 @@ private:
return "Hexagon CFG Optimizer";
}
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
};
@@ -65,14 +68,12 @@ static bool IsUnconditionalJump(int Opc) {
return (Opc == Hexagon::J2_jump);
}
-
-void
-HexagonCFGOptimizer::InvertAndChangeJumpTarget(MachineInstr* MI,
- MachineBasicBlock* NewTarget) {
+void HexagonCFGOptimizer::InvertAndChangeJumpTarget(
+ MachineInstr &MI, MachineBasicBlock *NewTarget) {
const TargetInstrInfo *TII =
- MI->getParent()->getParent()->getSubtarget().getInstrInfo();
+ MI.getParent()->getParent()->getSubtarget().getInstrInfo();
int NewOpcode = 0;
- switch(MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case Hexagon::J2_jumpt:
NewOpcode = Hexagon::J2_jumpf;
break;
@@ -93,12 +94,15 @@ HexagonCFGOptimizer::InvertAndChangeJumpTarget(MachineInstr* MI,
llvm_unreachable("Cannot handle this case");
}
- MI->setDesc(TII->get(NewOpcode));
- MI->getOperand(1).setMBB(NewTarget);
+ MI.setDesc(TII->get(NewOpcode));
+ MI.getOperand(1).setMBB(NewTarget);
}
bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipFunction(*Fn.getFunction()))
+ return false;
+
// Loop over all of the basic blocks.
for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
MBBb != MBBe; ++MBBb) {
@@ -107,8 +111,8 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
// Traverse the basic block.
MachineBasicBlock::iterator MII = MBB->getFirstTerminator();
if (MII != MBB->end()) {
- MachineInstr *MI = MII;
- int Opc = MI->getOpcode();
+ MachineInstr &MI = *MII;
+ int Opc = MI.getOpcode();
if (IsConditionalBranch(Opc)) {
//
@@ -160,9 +164,9 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
// The target of the unconditional branch must be JumpAroundTarget.
// TODO: If not, we should not invert the unconditional branch.
MachineBasicBlock* CondBranchTarget = nullptr;
- if ((MI->getOpcode() == Hexagon::J2_jumpt) ||
- (MI->getOpcode() == Hexagon::J2_jumpf)) {
- CondBranchTarget = MI->getOperand(1).getMBB();
+ if (MI.getOpcode() == Hexagon::J2_jumpt ||
+ MI.getOpcode() == Hexagon::J2_jumpf) {
+ CondBranchTarget = MI.getOperand(1).getMBB();
}
if (!LayoutSucc || (CondBranchTarget != JumpAroundTarget)) {
@@ -174,6 +178,7 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
// Ensure that BB2 has one instruction -- an unconditional jump.
if ((LayoutSucc->size() == 1) &&
IsUnconditionalJump(LayoutSucc->front().getOpcode())) {
+ assert(JumpAroundTarget && "jump target is needed to process second basic block");
MachineBasicBlock* UncondTarget =
LayoutSucc->front().getOperand(0).getMBB();
// Check if the layout successor of BB2 is BB3.
@@ -232,15 +237,8 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
// Public Constructor Functions
//===----------------------------------------------------------------------===//
-static void initializePassOnce(PassRegistry &Registry) {
- PassInfo *PI = new PassInfo("Hexagon CFG Optimizer", "hexagon-cfg",
- &HexagonCFGOptimizer::ID, nullptr, false, false);
- Registry.registerPass(*PI, true);
-}
-
-void llvm::initializeHexagonCFGOptimizerPass(PassRegistry &Registry) {
- CALL_ONCE_INITIALIZATION(initializePassOnce)
-}
+INITIALIZE_PASS(HexagonCFGOptimizer, "hexagon-cfg", "Hexagon CFG Optimizer",
+ false, false)
FunctionPass *llvm::createHexagonCFGOptimizer() {
return new HexagonCFGOptimizer();
diff --git a/lib/Target/Hexagon/HexagonCommonGEP.cpp b/lib/Target/Hexagon/HexagonCommonGEP.cpp
index 931db6687bf8..b612b11aed50 100644
--- a/lib/Target/Hexagon/HexagonCommonGEP.cpp
+++ b/lib/Target/Hexagon/HexagonCommonGEP.cpp
@@ -90,8 +90,8 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addRequired<PostDominatorTree>();
- AU.addPreserved<PostDominatorTree>();
+ AU.addRequired<PostDominatorTreeWrapperPass>();
+ AU.addPreserved<PostDominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
AU.addPreserved<LoopInfoWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
@@ -147,7 +147,7 @@ char HexagonCommonGEP::ID = 0;
INITIALIZE_PASS_BEGIN(HexagonCommonGEP, "hcommgep", "Hexagon Common GEP",
false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_END(HexagonCommonGEP, "hcommgep", "Hexagon Common GEP",
false, false)
@@ -212,7 +212,6 @@ namespace {
if (Comma)
OS << ',';
OS << "used";
- Comma = true;
}
OS << "} ";
if (GN.Flags & GepNode::Root)
@@ -1268,6 +1267,9 @@ void HexagonCommonGEP::removeDeadCode() {
bool HexagonCommonGEP::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
// For now bail out on C++ exception handling.
for (Function::iterator A = F.begin(), Z = F.end(); A != Z; ++A)
for (BasicBlock::iterator I = A->begin(), E = A->end(); I != E; ++I)
@@ -1276,7 +1278,7 @@ bool HexagonCommonGEP::runOnFunction(Function &F) {
Fn = &F;
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- PDT = &getAnalysis<PostDominatorTree>();
+ PDT = &getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
Ctx = &F.getContext();
@@ -1295,7 +1297,7 @@ bool HexagonCommonGEP::runOnFunction(Function &F) {
materialize(Loc);
removeDeadCode();
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
// Run this only when expensive checks are enabled.
verifyFunction(F);
#endif
diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index 9fd863f6e153..face0f3f64b4 100644
--- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -42,6 +42,11 @@ cl::opt<bool> IsCombinesDisabled("disable-merge-into-combines",
cl::init(false),
cl::desc("Disable merging into combines"));
static
+cl::opt<bool> IsConst64Disabled("disable-const64",
+ cl::Hidden, cl::ZeroOrMore,
+ cl::init(false),
+ cl::desc("Disable generation of const64"));
+static
cl::opt<unsigned>
MaxNumOfInstsBetweenNewValueStoreAndTFR("max-num-inst-between-tfr-and-nv-store",
cl::Hidden, cl::init(4),
@@ -62,6 +67,8 @@ class HexagonCopyToCombine : public MachineFunctionPass {
bool ShouldCombineAggressively;
DenseSet<MachineInstr *> PotentiallyNewifiableTFR;
+ SmallVector<MachineInstr *, 8> DbgMItoMove;
+
public:
static char ID;
@@ -79,15 +86,22 @@ public:
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
private:
- MachineInstr *findPairable(MachineInstr *I1, bool &DoInsertAtI1);
+ MachineInstr *findPairable(MachineInstr &I1, bool &DoInsertAtI1,
+ bool AllowC64);
void findPotentialNewifiableTFRs(MachineBasicBlock &);
- void combine(MachineInstr *I1, MachineInstr *I2,
- MachineBasicBlock::iterator &MI, bool DoInsertAtI1);
+ void combine(MachineInstr &I1, MachineInstr &I2,
+ MachineBasicBlock::iterator &MI, bool DoInsertAtI1,
+ bool OptForSize);
- bool isSafeToMoveTogether(MachineInstr *I1, MachineInstr *I2,
+ bool isSafeToMoveTogether(MachineInstr &I1, MachineInstr &I2,
unsigned I1DestReg, unsigned I2DestReg,
bool &DoInsertAtI1);
@@ -102,6 +116,9 @@ private:
void emitCombineII(MachineBasicBlock::iterator &Before, unsigned DestReg,
MachineOperand &HiOperand, MachineOperand &LoOperand);
+
+ void emitConst64(MachineBasicBlock::iterator &Before, unsigned DestReg,
+ MachineOperand &HiOperand, MachineOperand &LoOperand);
};
} // End anonymous namespace.
@@ -111,14 +128,13 @@ char HexagonCopyToCombine::ID = 0;
INITIALIZE_PASS(HexagonCopyToCombine, "hexagon-copy-combine",
"Hexagon Copy-To-Combine Pass", false, false)
-static bool isCombinableInstType(MachineInstr *MI,
- const HexagonInstrInfo *TII,
+static bool isCombinableInstType(MachineInstr &MI, const HexagonInstrInfo *TII,
bool ShouldCombineAggressively) {
- switch(MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case Hexagon::A2_tfr: {
// A COPY instruction can be combined if its arguments are IntRegs (32bit).
- const MachineOperand &Op0 = MI->getOperand(0);
- const MachineOperand &Op1 = MI->getOperand(1);
+ const MachineOperand &Op0 = MI.getOperand(0);
+ const MachineOperand &Op1 = MI.getOperand(1);
assert(Op0.isReg() && Op1.isReg());
unsigned DestReg = Op0.getReg();
@@ -130,8 +146,8 @@ static bool isCombinableInstType(MachineInstr *MI,
case Hexagon::A2_tfrsi: {
// A transfer-immediate can be combined if its argument is a signed 8bit
// value.
- const MachineOperand &Op0 = MI->getOperand(0);
- const MachineOperand &Op1 = MI->getOperand(1);
+ const MachineOperand &Op0 = MI.getOperand(0);
+ const MachineOperand &Op1 = MI.getOperand(1);
assert(Op0.isReg());
unsigned DestReg = Op0.getReg();
@@ -154,11 +170,10 @@ static bool isCombinableInstType(MachineInstr *MI,
return false;
}
-template <unsigned N>
-static bool isGreaterThanNBitTFRI(const MachineInstr *I) {
- if (I->getOpcode() == Hexagon::TFRI64_V4 ||
- I->getOpcode() == Hexagon::A2_tfrsi) {
- const MachineOperand &Op = I->getOperand(1);
+template <unsigned N> static bool isGreaterThanNBitTFRI(const MachineInstr &I) {
+ if (I.getOpcode() == Hexagon::TFRI64_V4 ||
+ I.getOpcode() == Hexagon::A2_tfrsi) {
+ const MachineOperand &Op = I.getOperand(1);
return !Op.isImm() || !isInt<N>(Op.getImm());
}
return false;
@@ -167,19 +182,34 @@ static bool isGreaterThanNBitTFRI(const MachineInstr *I) {
/// areCombinableOperations - Returns true if the two instruction can be merge
/// into a combine (ignoring register constraints).
static bool areCombinableOperations(const TargetRegisterInfo *TRI,
- MachineInstr *HighRegInst,
- MachineInstr *LowRegInst) {
- unsigned HiOpc = HighRegInst->getOpcode();
- unsigned LoOpc = LowRegInst->getOpcode();
+ MachineInstr &HighRegInst,
+ MachineInstr &LowRegInst, bool AllowC64) {
+ unsigned HiOpc = HighRegInst.getOpcode();
+ unsigned LoOpc = LowRegInst.getOpcode();
(void)HiOpc; // Fix compiler warning
(void)LoOpc; // Fix compiler warning
assert((HiOpc == Hexagon::A2_tfr || HiOpc == Hexagon::A2_tfrsi) &&
(LoOpc == Hexagon::A2_tfr || LoOpc == Hexagon::A2_tfrsi) &&
"Assume individual instructions are of a combinable type");
- // There is no combine of two constant extended values.
+ if (!AllowC64) {
+ // There is no combine of two constant extended values.
+ if (isGreaterThanNBitTFRI<8>(HighRegInst) &&
+ isGreaterThanNBitTFRI<6>(LowRegInst))
+ return false;
+ }
+
+ // There is a combine of two constant extended values into CONST64,
+ // provided both constants are true immediates.
+ if (isGreaterThanNBitTFRI<16>(HighRegInst) &&
+ isGreaterThanNBitTFRI<16>(LowRegInst))
+ return (HighRegInst.getOperand(1).isImm() &&
+ LowRegInst.getOperand(1).isImm());
+
+ // There is no combine of two constant extended values, unless handled above
+ // Make both 8-bit size checks to allow both combine (#,##) and combine(##,#)
if (isGreaterThanNBitTFRI<8>(HighRegInst) &&
- isGreaterThanNBitTFRI<6>(LowRegInst))
+ isGreaterThanNBitTFRI<8>(LowRegInst))
return false;
return true;
@@ -191,25 +221,23 @@ static bool isEvenReg(unsigned Reg) {
return (Reg - Hexagon::R0) % 2 == 0;
}
-static void removeKillInfo(MachineInstr *MI, unsigned RegNotKilled) {
- for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
- MachineOperand &Op = MI->getOperand(I);
+static void removeKillInfo(MachineInstr &MI, unsigned RegNotKilled) {
+ for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+ MachineOperand &Op = MI.getOperand(I);
if (!Op.isReg() || Op.getReg() != RegNotKilled || !Op.isKill())
continue;
Op.setIsKill(false);
}
}
-/// isUnsafeToMoveAcross - Returns true if it is unsafe to move a copy
-/// instruction from \p UseReg to \p DestReg over the instruction \p I.
-static bool isUnsafeToMoveAcross(MachineInstr *I, unsigned UseReg,
- unsigned DestReg,
- const TargetRegisterInfo *TRI) {
- return (UseReg && (I->modifiesRegister(UseReg, TRI))) ||
- I->modifiesRegister(DestReg, TRI) ||
- I->readsRegister(DestReg, TRI) ||
- I->hasUnmodeledSideEffects() ||
- I->isInlineAsm() || I->isDebugValue();
+/// Returns true if it is unsafe to move a copy instruction from \p UseReg to
+/// \p DestReg over the instruction \p MI.
+static bool isUnsafeToMoveAcross(MachineInstr &MI, unsigned UseReg,
+ unsigned DestReg,
+ const TargetRegisterInfo *TRI) {
+ return (UseReg && (MI.modifiesRegister(UseReg, TRI))) ||
+ MI.modifiesRegister(DestReg, TRI) || MI.readsRegister(DestReg, TRI) ||
+ MI.hasUnmodeledSideEffects() || MI.isInlineAsm() || MI.isDebugValue();
}
static unsigned UseReg(const MachineOperand& MO) {
@@ -218,16 +246,16 @@ static unsigned UseReg(const MachineOperand& MO) {
/// isSafeToMoveTogether - Returns true if it is safe to move I1 next to I2 such
/// that the two instructions can be paired in a combine.
-bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
- MachineInstr *I2,
+bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr &I1,
+ MachineInstr &I2,
unsigned I1DestReg,
unsigned I2DestReg,
bool &DoInsertAtI1) {
- unsigned I2UseReg = UseReg(I2->getOperand(1));
+ unsigned I2UseReg = UseReg(I2.getOperand(1));
// It is not safe to move I1 and I2 into one combine if I2 has a true
// dependence on I1.
- if (I2UseReg && I1->modifiesRegister(I2UseReg, TRI))
+ if (I2UseReg && I1.modifiesRegister(I2UseReg, TRI))
return false;
bool isSafe = true;
@@ -246,7 +274,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// uses I2's use reg we need to modify that (first) instruction to now kill
// this reg.
unsigned KilledOperand = 0;
- if (I2->killsRegister(I2UseReg))
+ if (I2.killsRegister(I2UseReg))
KilledOperand = I2UseReg;
MachineInstr *KillingInstr = nullptr;
@@ -257,7 +285,10 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// * reads I2's def reg
// * or has unmodelled side effects
// we can't move I2 across it.
- if (isUnsafeToMoveAcross(&*I, I2UseReg, I2DestReg, TRI)) {
+ if (I->isDebugValue())
+ continue;
+
+ if (isUnsafeToMoveAcross(*I, I2UseReg, I2DestReg, TRI)) {
isSafe = false;
break;
}
@@ -287,7 +318,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// At O3 we got better results (dhrystone) by being more conservative here.
if (!ShouldCombineAggressively)
End = std::next(MachineBasicBlock::iterator(I2));
- unsigned I1UseReg = UseReg(I1->getOperand(1));
+ unsigned I1UseReg = UseReg(I1.getOperand(1));
// Track killed operands. If we move across an instruction that kills our
// operand, we need to update the kill information on the moved I1. It kills
// the operand now.
@@ -295,7 +326,8 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
unsigned KilledOperand = 0;
while(++I != End) {
- // If the intervening instruction I:
+ MachineInstr &MI = *I;
+ // If the intervening instruction MI:
// * modifies I1's use reg
// * modifies I1's def reg
// * reads I1's def reg
@@ -304,30 +336,36 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// kill flag for a register (a removeRegisterKilled() analogous to
// addRegisterKilled) that handles aliased register correctly.
// * or has a killed aliased register use of I1's use reg
- // %D4<def> = TFRI64 16
- // %R6<def> = TFR %R9
+ // %D4<def> = A2_tfrpi 16
+ // %R6<def> = A2_tfr %R9
// %R8<def> = KILL %R8, %D4<imp-use,kill>
// If we want to move R6 = across the KILL instruction we would have
// to remove the %D4<imp-use,kill> operand. For now, we are
// conservative and disallow the move.
// we can't move I1 across it.
- if (isUnsafeToMoveAcross(I, I1UseReg, I1DestReg, TRI) ||
+ if (MI.isDebugValue()) {
+ if (MI.readsRegister(I1DestReg, TRI)) // Move this instruction after I2.
+ DbgMItoMove.push_back(&MI);
+ continue;
+ }
+
+ if (isUnsafeToMoveAcross(MI, I1UseReg, I1DestReg, TRI) ||
// Check for an aliased register kill. Bail out if we see one.
- (!I->killsRegister(I1UseReg) && I->killsRegister(I1UseReg, TRI)))
+ (!MI.killsRegister(I1UseReg) && MI.killsRegister(I1UseReg, TRI)))
return false;
// Check for an exact kill (registers match).
- if (I1UseReg && I->killsRegister(I1UseReg)) {
+ if (I1UseReg && MI.killsRegister(I1UseReg)) {
assert(!KillingInstr && "Should only see one killing instruction");
KilledOperand = I1UseReg;
- KillingInstr = &*I;
+ KillingInstr = &MI;
}
}
if (KillingInstr) {
- removeKillInfo(KillingInstr, KilledOperand);
+ removeKillInfo(*KillingInstr, KilledOperand);
// Update I1 to set the kill flag. This flag will later be picked up by
// the new COMBINE instruction.
- bool Added = I1->addRegisterKilled(KilledOperand, TRI);
+ bool Added = I1.addRegisterKilled(KilledOperand, TRI);
(void)Added; // suppress compiler warning
assert(Added && "Must successfully update kill flag");
}
@@ -342,14 +380,16 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
void
HexagonCopyToCombine::findPotentialNewifiableTFRs(MachineBasicBlock &BB) {
DenseMap<unsigned, MachineInstr *> LastDef;
- for (MachineBasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
- MachineInstr *MI = I;
+ for (MachineInstr &MI : BB) {
+ if (MI.isDebugValue())
+ continue;
+
// Mark TFRs that feed a potential new value store as such.
- if(TII->mayBeNewStore(MI)) {
+ if (TII->mayBeNewStore(&MI)) {
// Look for uses of TFR instructions.
- for (unsigned OpdIdx = 0, OpdE = MI->getNumOperands(); OpdIdx != OpdE;
+ for (unsigned OpdIdx = 0, OpdE = MI.getNumOperands(); OpdIdx != OpdE;
++OpdIdx) {
- MachineOperand &Op = MI->getOperand(OpdIdx);
+ MachineOperand &Op = MI.getOperand(OpdIdx);
// Skip over anything except register uses.
if (!Op.isReg() || !Op.isUse() || !Op.getReg())
@@ -360,14 +400,18 @@ HexagonCopyToCombine::findPotentialNewifiableTFRs(MachineBasicBlock &BB) {
MachineInstr *DefInst = LastDef[Reg];
if (!DefInst)
continue;
- if (!isCombinableInstType(DefInst, TII, ShouldCombineAggressively))
+ if (!isCombinableInstType(*DefInst, TII, ShouldCombineAggressively))
continue;
// Only close newifiable stores should influence the decision.
+ // Ignore the debug instructions in between.
MachineBasicBlock::iterator It(DefInst);
unsigned NumInstsToDef = 0;
- while (&*It++ != MI)
- ++NumInstsToDef;
+ while (&*It != &MI) {
+ if (!It->isDebugValue())
+ ++NumInstsToDef;
+ ++It;
+ }
if (NumInstsToDef > MaxNumOfInstsBetweenNewValueStoreAndTFR)
continue;
@@ -380,17 +424,17 @@ HexagonCopyToCombine::findPotentialNewifiableTFRs(MachineBasicBlock &BB) {
// Put instructions that last defined integer or double registers into the
// map.
- for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
- MachineOperand &Op = MI->getOperand(I);
+ for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+ MachineOperand &Op = MI.getOperand(I);
if (!Op.isReg() || !Op.isDef() || !Op.getReg())
continue;
unsigned Reg = Op.getReg();
if (Hexagon::DoubleRegsRegClass.contains(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
- LastDef[*SubRegs] = MI;
+ LastDef[*SubRegs] = &MI;
}
} else if (Hexagon::IntRegsRegClass.contains(Reg))
- LastDef[Reg] = MI;
+ LastDef[Reg] = &MI;
}
}
}
@@ -405,6 +449,9 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getSubtarget().getRegisterInfo();
TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
+ const Function *F = MF.getFunction();
+ bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
+
// Combine aggressively (for code size)
ShouldCombineAggressively =
MF.getTarget().getOptLevel() <= CodeGenOpt::Default;
@@ -418,11 +465,15 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
// Traverse instructions in basic block.
for(MachineBasicBlock::iterator MI = BI->begin(), End = BI->end();
MI != End;) {
- MachineInstr *I1 = MI++;
+ MachineInstr &I1 = *MI++;
+
+ if (I1.isDebugValue())
+ continue;
+
// Don't combine a TFR whose user could be newified (instructions that
// define double registers can not be newified - Programmer's Ref Manual
// 5.4.2 New-value stores).
- if (ShouldCombineAggressively && PotentiallyNewifiableTFR.count(I1))
+ if (ShouldCombineAggressively && PotentiallyNewifiableTFR.count(&I1))
continue;
// Ignore instructions that are not combinable.
@@ -430,12 +481,14 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
continue;
// Find a second instruction that can be merged into a combine
- // instruction.
+ // instruction. In addition, also find all the debug instructions that
+ // need to be moved along with it.
bool DoInsertAtI1 = false;
- MachineInstr *I2 = findPairable(I1, DoInsertAtI1);
+ DbgMItoMove.clear();
+ MachineInstr *I2 = findPairable(I1, DoInsertAtI1, OptForSize);
if (I2) {
HasChanged = true;
- combine(I1, I2, MI, DoInsertAtI1);
+ combine(I1, *I2, MI, DoInsertAtI1, OptForSize);
}
}
}
@@ -447,23 +500,28 @@ bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
/// COMBINE instruction or 0 if no such instruction can be found. Returns true
/// in \p DoInsertAtI1 if the combine must be inserted at instruction \p I1
/// false if the combine must be inserted at the returned instruction.
-MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1,
- bool &DoInsertAtI1) {
+MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr &I1,
+ bool &DoInsertAtI1,
+ bool AllowC64) {
MachineBasicBlock::iterator I2 = std::next(MachineBasicBlock::iterator(I1));
- unsigned I1DestReg = I1->getOperand(0).getReg();
- for (MachineBasicBlock::iterator End = I1->getParent()->end(); I2 != End;
+ while (I2->isDebugValue())
+ ++I2;
+
+ unsigned I1DestReg = I1.getOperand(0).getReg();
+
+ for (MachineBasicBlock::iterator End = I1.getParent()->end(); I2 != End;
++I2) {
// Bail out early if we see a second definition of I1DestReg.
if (I2->modifiesRegister(I1DestReg, TRI))
break;
// Ignore non-combinable instructions.
- if (!isCombinableInstType(I2, TII, ShouldCombineAggressively))
+ if (!isCombinableInstType(*I2, TII, ShouldCombineAggressively))
continue;
// Don't combine a TFR whose user could be newified.
- if (ShouldCombineAggressively && PotentiallyNewifiableTFR.count(I2))
+ if (ShouldCombineAggressively && PotentiallyNewifiableTFR.count(&*I2))
continue;
unsigned I2DestReg = I2->getOperand(0).getReg();
@@ -478,15 +536,14 @@ MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1,
// Check that the two instructions are combinable. V4 allows more
// instructions to be merged into a combine.
- // The order matters because in a TFRI we might can encode a int8 as the
- // hi reg operand but only a uint6 as the low reg operand.
- if ((IsI2LowReg && !areCombinableOperations(TRI, I1, I2)) ||
- (IsI1LowReg && !areCombinableOperations(TRI, I2, I1)))
+ // The order matters because in a A2_tfrsi we might can encode a int8 as
+ // the hi reg operand but only a uint6 as the low reg operand.
+ if ((IsI2LowReg && !areCombinableOperations(TRI, I1, *I2, AllowC64)) ||
+ (IsI1LowReg && !areCombinableOperations(TRI, *I2, I1, AllowC64)))
break;
- if (isSafeToMoveTogether(I1, I2, I1DestReg, I2DestReg,
- DoInsertAtI1))
- return I2;
+ if (isSafeToMoveTogether(I1, *I2, I1DestReg, I2DestReg, DoInsertAtI1))
+ return &*I2;
// Not safe. Stop searching.
break;
@@ -494,16 +551,17 @@ MachineInstr *HexagonCopyToCombine::findPairable(MachineInstr *I1,
return nullptr;
}
-void HexagonCopyToCombine::combine(MachineInstr *I1, MachineInstr *I2,
+void HexagonCopyToCombine::combine(MachineInstr &I1, MachineInstr &I2,
MachineBasicBlock::iterator &MI,
- bool DoInsertAtI1) {
+ bool DoInsertAtI1, bool OptForSize) {
// We are going to delete I2. If MI points to I2 advance it to the next
// instruction.
- if ((MachineInstr *)MI == I2) ++MI;
+ if (MI == I2.getIterator())
+ ++MI;
// Figure out whether I1 or I2 goes into the lowreg part.
- unsigned I1DestReg = I1->getOperand(0).getReg();
- unsigned I2DestReg = I2->getOperand(0).getReg();
+ unsigned I1DestReg = I1.getOperand(0).getReg();
+ unsigned I2DestReg = I2.getOperand(0).getReg();
bool IsI1Loreg = (I2DestReg - I1DestReg) == 1;
unsigned LoRegDef = IsI1Loreg ? I1DestReg : I2DestReg;
@@ -515,15 +573,17 @@ void HexagonCopyToCombine::combine(MachineInstr *I1, MachineInstr *I2,
// Setup source operands.
- MachineOperand &LoOperand = IsI1Loreg ? I1->getOperand(1) :
- I2->getOperand(1);
- MachineOperand &HiOperand = IsI1Loreg ? I2->getOperand(1) :
- I1->getOperand(1);
+ MachineOperand &LoOperand = IsI1Loreg ? I1.getOperand(1) : I2.getOperand(1);
+ MachineOperand &HiOperand = IsI1Loreg ? I2.getOperand(1) : I1.getOperand(1);
// Figure out which source is a register and which a constant.
bool IsHiReg = HiOperand.isReg();
bool IsLoReg = LoOperand.isReg();
+ // There is a combine of two constant extended values into CONST64.
+ bool IsC64 = OptForSize && LoOperand.isImm() && HiOperand.isImm() &&
+ isGreaterThanNBitTFRI<16>(I1) && isGreaterThanNBitTFRI<16>(I2);
+
MachineBasicBlock::iterator InsertPt(DoInsertAtI1 ? I1 : I2);
// Emit combine.
if (IsHiReg && IsLoReg)
@@ -532,11 +592,45 @@ void HexagonCopyToCombine::combine(MachineInstr *I1, MachineInstr *I2,
emitCombineRI(InsertPt, DoubleRegDest, HiOperand, LoOperand);
else if (IsLoReg)
emitCombineIR(InsertPt, DoubleRegDest, HiOperand, LoOperand);
+ else if (IsC64 && !IsConst64Disabled)
+ emitConst64(InsertPt, DoubleRegDest, HiOperand, LoOperand);
else
emitCombineII(InsertPt, DoubleRegDest, HiOperand, LoOperand);
- I1->eraseFromParent();
- I2->eraseFromParent();
+ // Move debug instructions along with I1 if it's being
+ // moved towards I2.
+ if (!DoInsertAtI1 && DbgMItoMove.size() != 0) {
+ // Insert debug instructions at the new location before I2.
+ MachineBasicBlock *BB = InsertPt->getParent();
+ for (auto NewMI : DbgMItoMove) {
+ // If iterator MI is pointing to DEBUG_VAL, make sure
+ // MI now points to next relevant instruction.
+ if (NewMI == (MachineInstr*)MI)
+ ++MI;
+ BB->splice(InsertPt, BB, NewMI);
+ }
+ }
+
+ I1.eraseFromParent();
+ I2.eraseFromParent();
+}
+
+void HexagonCopyToCombine::emitConst64(MachineBasicBlock::iterator &InsertPt,
+ unsigned DoubleDestReg,
+ MachineOperand &HiOperand,
+ MachineOperand &LoOperand) {
+ DEBUG(dbgs() << "Found a CONST64\n");
+
+ DebugLoc DL = InsertPt->getDebugLoc();
+ MachineBasicBlock *BB = InsertPt->getParent();
+ assert(LoOperand.isImm() && HiOperand.isImm() &&
+ "Both operands must be immediate");
+
+ int64_t V = HiOperand.getImm();
+ V = (V << 32) | (0x0ffffffffLL & LoOperand.getImm());
+ BuildMI(*BB, InsertPt, DL, TII->get(Hexagon::CONST64_Int_Real),
+ DoubleDestReg)
+ .addImm(V);
}
void HexagonCopyToCombine::emitCombineII(MachineBasicBlock::iterator &InsertPt,
diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index ee0c318ffb5d..2665acd19fb1 100644
--- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -78,8 +78,6 @@
#include "HexagonTargetMachine.h"
#include <functional>
-#include <set>
-#include <vector>
using namespace llvm;
@@ -359,7 +357,7 @@ bool HexagonEarlyIfConversion::isValidCandidate(const MachineBasicBlock *B)
// update the use of it after predication). PHI uses will be updated
// to use a result of a MUX, and a MUX cannot be created for predicate
// registers.
- for (ConstMIOperands MO(&MI); MO.isValid(); ++MO) {
+ for (ConstMIOperands MO(MI); MO.isValid(); ++MO) {
if (!MO->isReg() || !MO->isDef())
continue;
unsigned R = MO->getReg();
@@ -377,7 +375,7 @@ bool HexagonEarlyIfConversion::isValidCandidate(const MachineBasicBlock *B)
bool HexagonEarlyIfConversion::usesUndefVReg(const MachineInstr *MI) const {
- for (ConstMIOperands MO(MI); MO.isValid(); ++MO) {
+ for (ConstMIOperands MO(*MI); MO.isValid(); ++MO) {
if (!MO->isReg() || !MO->isUse())
continue;
unsigned R = MO->getReg();
@@ -445,7 +443,7 @@ unsigned HexagonEarlyIfConversion::computePhiCost(MachineBasicBlock *B) const {
}
MachineInstr *Def1 = MRI->getVRegDef(RO1.getReg());
MachineInstr *Def3 = MRI->getVRegDef(RO3.getReg());
- if (!TII->isPredicable(Def1) || !TII->isPredicable(Def3))
+ if (!TII->isPredicable(*Def1) || !TII->isPredicable(*Def3))
Cost++;
}
return Cost;
@@ -456,7 +454,7 @@ unsigned HexagonEarlyIfConversion::countPredicateDefs(
const MachineBasicBlock *B) const {
unsigned PredDefs = 0;
for (auto &MI : *B) {
- for (ConstMIOperands MO(&MI); MO.isValid(); ++MO) {
+ for (ConstMIOperands MO(MI); MO.isValid(); ++MO) {
if (!MO->isReg() || !MO->isDef())
continue;
unsigned R = MO->getReg();
@@ -721,7 +719,7 @@ void HexagonEarlyIfConversion::predicateInstr(MachineBasicBlock *ToB,
assert(COpc);
MachineInstrBuilder MIB = BuildMI(*ToB, At, DL, TII->get(COpc))
.addReg(PredR);
- for (MIOperands MO(MI); MO.isValid(); ++MO)
+ for (MIOperands MO(*MI); MO.isValid(); ++MO)
MIB.addOperand(*MO);
// Set memory references.
@@ -962,7 +960,7 @@ void HexagonEarlyIfConversion::eliminatePhis(MachineBasicBlock *B) {
// MRI.replaceVregUsesWith does not allow to update the subregister,
// so instead of doing the use-iteration here, create a copy into a
// "non-subregistered" register.
- DebugLoc DL = PN->getDebugLoc();
+ const DebugLoc &DL = PN->getDebugLoc();
const TargetRegisterClass *RC = MRI->getRegClass(DefR);
NewR = MRI->createVirtualRegister(RC);
NonPHI = BuildMI(*B, NonPHI, DL, TII->get(TargetOpcode::COPY), NewR)
@@ -980,7 +978,7 @@ void HexagonEarlyIfConversion::replacePhiEdges(MachineBasicBlock *OldB,
MachineBasicBlock *SB = *I;
MachineBasicBlock::iterator P, N = SB->getFirstNonPHI();
for (P = SB->begin(); P != N; ++P) {
- MachineInstr *PN = &*P;
+ MachineInstr &PN = *P;
for (MIOperands MO(PN); MO.isValid(); ++MO)
if (MO->isMBB() && MO->getMBB() == OldB)
MO->setMBB(NewB);
@@ -1034,6 +1032,9 @@ void HexagonEarlyIfConversion::simplifyFlowGraph(const FlowPattern &FP) {
bool HexagonEarlyIfConversion::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
auto &ST = MF.getSubtarget();
TII = ST.getInstrInfo();
TRI = ST.getRegisterInfo();
diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index ce10aeadef94..bd5bb9cbc235 100644
--- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -17,10 +17,10 @@
//
// Liveness tracking aside, the main functionality of this pass is divided
// into two steps. The first step is to replace an instruction
-// vreg0 = C2_mux vreg0, vreg1, vreg2
+// vreg0 = C2_mux vreg1, vreg2, vreg3
// with a pair of conditional transfers
-// vreg0 = A2_tfrt vreg0, vreg1
-// vreg0 = A2_tfrf vreg0, vreg2
+// vreg0 = A2_tfrt vreg1, vreg2
+// vreg0 = A2_tfrf vreg1, vreg3
// It is the intention that the execution of this pass could be terminated
// after this step, and the code generated would be functionally correct.
//
@@ -60,12 +60,92 @@
// vreg3 = A2_tfrf vreg0, vreg2
//
+// Splitting a definition of a register into two predicated transfers
+// creates a complication in liveness tracking. Live interval computation
+// will see both instructions as actual definitions, and will mark the
+// first one as dead. The definition is not actually dead, and this
+// situation will need to be fixed. For example:
+// vreg1<def,dead> = A2_tfrt ... ; marked as dead
+// vreg1<def> = A2_tfrf ...
+//
+// Since any of the individual predicated transfers may end up getting
+// removed (in case it is an identity copy), some pre-existing def may
+// be marked as dead after live interval recomputation:
+// vreg1<def,dead> = ... ; marked as dead
+// ...
+// vreg1<def> = A2_tfrf ... ; if A2_tfrt is removed
+// This case happens if vreg1 was used as a source in A2_tfrt, which means
+// that is it actually live at the A2_tfrf, and so the now dead definition
+// of vreg1 will need to be updated to non-dead at some point.
+//
+// This issue could be remedied by adding implicit uses to the predicated
+// transfers, but this will create a problem with subsequent predication,
+// since the transfers will no longer be possible to reorder. To avoid
+// that, the initial splitting will not add any implicit uses. These
+// implicit uses will be added later, after predication. The extra price,
+// however, is that finding the locations where the implicit uses need
+// to be added, and updating the live ranges will be more involved.
+//
+// An additional problem appears when subregister liveness tracking is
+// enabled. In such a scenario, the live interval for the super-register
+// will have live ranges for each subregister (i.e. subranges). This sub-
+// range contains all liveness information about the subregister, except
+// for one case: a "read-undef" flag from another subregister will not
+// be reflected: given
+// vreg1:subreg_hireg<def,read-undef> = ... ; "undefines" subreg_loreg
+// the subrange for subreg_loreg will not have any indication that it is
+// undefined at this point. Calculating subregister liveness based only
+// on the information from the subrange may create a segment which spans
+// over such a "read-undef" flag. This would create inconsistencies in
+// the liveness data, resulting in assertions or incorrect code.
+// Example:
+// vreg1:subreg_loreg<def> = ...
+// vreg1:subreg_hireg<def, read-undef> = ... ; "undefines" subreg_loreg
+// ...
+// vreg1:subreg_loreg<def> = A2_tfrt ... ; may end up with imp-use
+// ; of subreg_loreg
+// The remedy takes advantage of the fact, that at this point we have
+// an unconditional definition of the subregister. What this means is
+// that any preceding value in this subregister will be overwritten,
+// or in other words, the last use before this def is a kill. This also
+// implies that the first of the predicated transfers at this location
+// should not have any implicit uses.
+// Assume for a moment that no part of the corresponding super-register
+// is used as a source. In such case, the entire super-register can be
+// considered undefined immediately before this instruction. Because of
+// that, we can insert an IMPLICIT_DEF of the super-register at this
+// location, which will cause it to be reflected in all the associated
+// subranges. What is important here is that if an IMPLICIT_DEF of
+// subreg_loreg was used, we would lose the indication that subreg_hireg
+// is also considered undefined. This could lead to having implicit uses
+// incorrectly added.
+//
+// What is left is the two cases when the super-register is used as a
+// source.
+// * Case 1: the used part is the same as the one that is defined:
+// vreg1<def> = ...
+// ...
+// vreg1:subreg_loreg<def,read-undef> = C2_mux ..., vreg1:subreg_loreg
+// In the end, the subreg_loreg should be marked as live at the point of
+// the splitting:
+// vreg1:subreg_loreg<def,read-undef> = A2_tfrt ; should have imp-use
+// vreg1:subreg_loreg<def,read-undef> = A2_tfrf ; should have imp-use
+// Hence, an IMPLICIT_DEF of only vreg1:subreg_hireg would be sufficient.
+// * Case 2: the used part does not overlap the part being defined:
+// vreg1<def> = ...
+// ...
+// vreg1:subreg_loreg<def,read-undef> = C2_mux ..., vreg1:subreg_hireg
+// For this case, we insert an IMPLICIT_DEF of vreg1:subreg_hireg after
+// the C2_mux.
+
#define DEBUG_TYPE "expand-condsets"
-#include "HexagonTargetMachine.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -76,6 +156,11 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <iterator>
+#include <set>
+#include <utility>
+
using namespace llvm;
static cl::opt<unsigned> OptTfrLimit("expand-condsets-tfr-limit",
@@ -103,22 +188,26 @@ namespace {
initializeHexagonExpandCondsetsPass(*PassRegistry::getPassRegistry());
}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "Hexagon Expand Condsets";
}
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
AU.addPreserved<SlotIndexes>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
- virtual bool runOnMachineFunction(MachineFunction &MF);
+ bool runOnMachineFunction(MachineFunction &MF) override;
private:
const HexagonInstrInfo *HII;
const TargetRegisterInfo *TRI;
+ MachineDominatorTree *MDT;
MachineRegisterInfo *MRI;
LiveIntervals *LIS;
+ std::set<MachineInstr*> LocalImpDefs;
bool CoaLimitActive, TfrLimitActive;
unsigned CoaLimit, TfrLimit, CoaCounter, TfrCounter;
@@ -131,6 +220,9 @@ namespace {
return Reg == RR.Reg && Sub == RR.Sub;
}
bool operator!= (RegisterRef RR) const { return !operator==(RR); }
+ bool operator< (RegisterRef RR) const {
+ return Reg < RR.Reg || (Reg == RR.Reg && Sub < RR.Sub);
+ }
unsigned Reg, Sub;
};
@@ -138,44 +230,44 @@ namespace {
enum { Sub_Low = 0x1, Sub_High = 0x2, Sub_None = (Sub_Low | Sub_High) };
enum { Exec_Then = 0x10, Exec_Else = 0x20 };
unsigned getMaskForSub(unsigned Sub);
- bool isCondset(const MachineInstr *MI);
+ bool isCondset(const MachineInstr &MI);
+ LaneBitmask getLaneMask(unsigned Reg, unsigned Sub);
void addRefToMap(RegisterRef RR, ReferenceMap &Map, unsigned Exec);
bool isRefInMap(RegisterRef, ReferenceMap &Map, unsigned Exec);
- LiveInterval::iterator nextSegment(LiveInterval &LI, SlotIndex S);
- LiveInterval::iterator prevSegment(LiveInterval &LI, SlotIndex S);
- void makeDefined(unsigned Reg, SlotIndex S, bool SetDef);
- void makeUndead(unsigned Reg, SlotIndex S);
- void shrinkToUses(unsigned Reg, LiveInterval &LI);
- void updateKillFlags(unsigned Reg, LiveInterval &LI);
- void terminateSegment(LiveInterval::iterator LT, SlotIndex S,
- LiveInterval &LI);
- void addInstrToLiveness(MachineInstr *MI);
- void removeInstrFromLiveness(MachineInstr *MI);
+ void removeImpDefSegments(LiveRange &Range);
+ void updateDeadsInRange(unsigned Reg, LaneBitmask LM, LiveRange &Range);
+ void updateKillFlags(unsigned Reg);
+ void updateDeadFlags(unsigned Reg);
+ void recalculateLiveInterval(unsigned Reg);
+ void removeInstr(MachineInstr &MI);
+ void updateLiveness(std::set<unsigned> &RegSet, bool Recalc,
+ bool UpdateKills, bool UpdateDeads);
unsigned getCondTfrOpcode(const MachineOperand &SO, bool Cond);
- MachineInstr *genTfrFor(MachineOperand &SrcOp, unsigned DstR,
- unsigned DstSR, const MachineOperand &PredOp, bool Cond);
- bool split(MachineInstr *MI);
- bool splitInBlock(MachineBasicBlock &B);
+ MachineInstr *genCondTfrFor(MachineOperand &SrcOp,
+ MachineBasicBlock::iterator At, unsigned DstR,
+ unsigned DstSR, const MachineOperand &PredOp, bool PredSense,
+ bool ReadUndef, bool ImpUse);
+ bool split(MachineInstr &MI, std::set<unsigned> &UpdRegs);
+ bool splitInBlock(MachineBasicBlock &B, std::set<unsigned> &UpdRegs);
bool isPredicable(MachineInstr *MI);
MachineInstr *getReachingDefForPred(RegisterRef RD,
MachineBasicBlock::iterator UseIt, unsigned PredR, bool Cond);
- bool canMoveOver(MachineInstr *MI, ReferenceMap &Defs, ReferenceMap &Uses);
- bool canMoveMemTo(MachineInstr *MI, MachineInstr *ToI, bool IsDown);
- void predicateAt(RegisterRef RD, MachineInstr *MI,
- MachineBasicBlock::iterator Where, unsigned PredR, bool Cond);
+ bool canMoveOver(MachineInstr &MI, ReferenceMap &Defs, ReferenceMap &Uses);
+ bool canMoveMemTo(MachineInstr &MI, MachineInstr &ToI, bool IsDown);
+ void predicateAt(const MachineOperand &DefOp, MachineInstr &MI,
+ MachineBasicBlock::iterator Where,
+ const MachineOperand &PredOp, bool Cond,
+ std::set<unsigned> &UpdRegs);
void renameInRange(RegisterRef RO, RegisterRef RN, unsigned PredR,
bool Cond, MachineBasicBlock::iterator First,
MachineBasicBlock::iterator Last);
- bool predicate(MachineInstr *TfrI, bool Cond);
- bool predicateInBlock(MachineBasicBlock &B);
-
- void postprocessUndefImplicitUses(MachineBasicBlock &B);
- void removeImplicitUses(MachineInstr *MI);
- void removeImplicitUses(MachineBasicBlock &B);
+ bool predicate(MachineInstr &TfrI, bool Cond, std::set<unsigned> &UpdRegs);
+ bool predicateInBlock(MachineBasicBlock &B,
+ std::set<unsigned> &UpdRegs);
bool isIntReg(RegisterRef RR, unsigned &BW);
bool isIntraBlocks(LiveInterval &LI);
@@ -186,6 +278,13 @@ namespace {
char HexagonExpandCondsets::ID = 0;
+INITIALIZE_PASS_BEGIN(HexagonExpandCondsets, "expand-condsets",
+ "Hexagon Expand Condsets", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_END(HexagonExpandCondsets, "expand-condsets",
+ "Hexagon Expand Condsets", false, false)
unsigned HexagonExpandCondsets::getMaskForSub(unsigned Sub) {
switch (Sub) {
@@ -199,9 +298,8 @@ unsigned HexagonExpandCondsets::getMaskForSub(unsigned Sub) {
llvm_unreachable("Invalid subregister");
}
-
-bool HexagonExpandCondsets::isCondset(const MachineInstr *MI) {
- unsigned Opc = MI->getOpcode();
+bool HexagonExpandCondsets::isCondset(const MachineInstr &MI) {
+ unsigned Opc = MI.getOpcode();
switch (Opc) {
case Hexagon::C2_mux:
case Hexagon::C2_muxii:
@@ -215,6 +313,13 @@ bool HexagonExpandCondsets::isCondset(const MachineInstr *MI) {
}
+LaneBitmask HexagonExpandCondsets::getLaneMask(unsigned Reg, unsigned Sub) {
+ assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ return Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
+ : MRI->getMaxLaneMaskForVReg(Reg);
+}
+
+
void HexagonExpandCondsets::addRefToMap(RegisterRef RR, ReferenceMap &Map,
unsigned Exec) {
unsigned Mask = getMaskForSub(RR.Sub) | Exec;
@@ -238,408 +343,231 @@ bool HexagonExpandCondsets::isRefInMap(RegisterRef RR, ReferenceMap &Map,
}
-LiveInterval::iterator HexagonExpandCondsets::nextSegment(LiveInterval &LI,
- SlotIndex S) {
- for (LiveInterval::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
- if (I->start >= S)
- return I;
- }
- return LI.end();
-}
-
-
-LiveInterval::iterator HexagonExpandCondsets::prevSegment(LiveInterval &LI,
- SlotIndex S) {
- LiveInterval::iterator P = LI.end();
- for (LiveInterval::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
- if (I->end > S)
- return P;
- P = I;
- }
- return P;
-}
-
-
-/// Find the implicit use of register Reg in slot index S, and make sure
-/// that the "defined" flag is set to SetDef. While the mux expansion is
-/// going on, predicated instructions will have implicit uses of the
-/// registers that are being defined. This is to keep any preceding
-/// definitions live. If there is no preceding definition, the implicit
-/// use will be marked as "undef", otherwise it will be "defined". This
-/// function is used to update the flag.
-void HexagonExpandCondsets::makeDefined(unsigned Reg, SlotIndex S,
- bool SetDef) {
- if (!S.isRegister())
- return;
- MachineInstr *MI = LIS->getInstructionFromIndex(S);
- assert(MI && "Expecting instruction");
- for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isUse() || Op.getReg() != Reg)
- continue;
- bool IsDef = !Op.isUndef();
- if (Op.isImplicit() && IsDef != SetDef)
- Op.setIsUndef(!SetDef);
- }
-}
-
-
-void HexagonExpandCondsets::makeUndead(unsigned Reg, SlotIndex S) {
- // If S is a block boundary, then there can still be a dead def reaching
- // this point. Instead of traversing the CFG, queue start points of all
- // live segments that begin with a register, and end at a block boundary.
- // This may "resurrect" some truly dead definitions, but doing so is
- // harmless.
- SmallVector<MachineInstr*,8> Defs;
- if (S.isBlock()) {
- LiveInterval &LI = LIS->getInterval(Reg);
- for (LiveInterval::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
- if (!I->start.isRegister() || !I->end.isBlock())
- continue;
- MachineInstr *MI = LIS->getInstructionFromIndex(I->start);
- Defs.push_back(MI);
- }
- } else if (S.isRegister()) {
- MachineInstr *MI = LIS->getInstructionFromIndex(S);
- Defs.push_back(MI);
- }
-
- for (unsigned i = 0, n = Defs.size(); i < n; ++i) {
- MachineInstr *MI = Defs[i];
+void HexagonExpandCondsets::updateKillFlags(unsigned Reg) {
+ auto KillAt = [this,Reg] (SlotIndex K, LaneBitmask LM) -> void {
+ // Set the <kill> flag on a use of Reg whose lane mask is contained in LM.
+ MachineInstr *MI = LIS->getInstructionFromIndex(K);
for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isDef() || Op.getReg() != Reg)
- continue;
- Op.setIsDead(false);
- }
- }
-}
-
-
-/// Shrink the segments in the live interval for a given register to the last
-/// use before each subsequent def. Unlike LiveIntervals::shrinkToUses, this
-/// function will not mark any definitions of Reg as dead. The reason for this
-/// is that this function is used while a MUX instruction is being expanded,
-/// or while a conditional copy is undergoing predication. During these
-/// processes, there may be defs present in the instruction sequence that have
-/// not yet been removed, or there may be missing uses that have not yet been
-/// added. We want to utilize LiveIntervals::shrinkToUses as much as possible,
-/// but since it does not extend any intervals that are too short, we need to
-/// pre-emptively extend them here in anticipation of further changes.
-void HexagonExpandCondsets::shrinkToUses(unsigned Reg, LiveInterval &LI) {
- SmallVector<MachineInstr*,4> Deads;
- LIS->shrinkToUses(&LI, &Deads);
- // Need to undo the deadification made by "shrinkToUses". It's easier to
- // do it here, since we have a list of all instructions that were just
- // marked as dead.
- for (unsigned i = 0, n = Deads.size(); i < n; ++i) {
- MachineInstr *MI = Deads[i];
- // Clear the "dead" flag.
- for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isDef() || Op.getReg() != Reg)
+ if (!Op.isReg() || !Op.isUse() || Op.getReg() != Reg)
continue;
- Op.setIsDead(false);
+ LaneBitmask SLM = getLaneMask(Reg, Op.getSubReg());
+ if ((SLM & LM) == SLM) {
+ // Only set the kill flag on the first encountered use of Reg in this
+ // instruction.
+ Op.setIsKill(true);
+ break;
+ }
}
- // Extend the live segment to the beginning of the next one.
- LiveInterval::iterator End = LI.end();
- SlotIndex S = LIS->getInstructionIndex(MI).getRegSlot();
- LiveInterval::iterator T = LI.FindSegmentContaining(S);
- assert(T != End);
- LiveInterval::iterator N = std::next(T);
- if (N != End)
- T->end = N->start;
- else
- T->end = LIS->getMBBEndIdx(MI->getParent());
- }
- updateKillFlags(Reg, LI);
-}
-
+ };
-/// Given an updated live interval LI for register Reg, update the kill flags
-/// in instructions using Reg to reflect the liveness changes.
-void HexagonExpandCondsets::updateKillFlags(unsigned Reg, LiveInterval &LI) {
- MRI->clearKillFlags(Reg);
- for (LiveInterval::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
- SlotIndex EX = I->end;
- if (!EX.isRegister())
+ LiveInterval &LI = LIS->getInterval(Reg);
+ for (auto I = LI.begin(), E = LI.end(); I != E; ++I) {
+ if (!I->end.isRegister())
continue;
- MachineInstr *MI = LIS->getInstructionFromIndex(EX);
- for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isUse() || Op.getReg() != Reg)
+ // Do not mark the end of the segment as <kill>, if the next segment
+ // starts with a predicated instruction.
+ auto NextI = std::next(I);
+ if (NextI != E && NextI->start.isRegister()) {
+ MachineInstr *DefI = LIS->getInstructionFromIndex(NextI->start);
+ if (HII->isPredicated(*DefI))
continue;
- // Only set the kill flag on the first encountered use of Reg in this
- // instruction.
- Op.setIsKill(true);
- break;
}
+ bool WholeReg = true;
+ if (LI.hasSubRanges()) {
+ auto EndsAtI = [I] (LiveInterval::SubRange &S) -> bool {
+ LiveRange::iterator F = S.find(I->end);
+ return F != S.end() && I->end == F->end;
+ };
+ // Check if all subranges end at I->end. If so, make sure to kill
+ // the whole register.
+ for (LiveInterval::SubRange &S : LI.subranges()) {
+ if (EndsAtI(S))
+ KillAt(I->end, S.LaneMask);
+ else
+ WholeReg = false;
+ }
+ }
+ if (WholeReg)
+ KillAt(I->end, MRI->getMaxLaneMaskForVReg(Reg));
}
}
-/// When adding a new instruction to liveness, the newly added definition
-/// will start a new live segment. This may happen at a position that falls
-/// within an existing live segment. In such case that live segment needs to
-/// be truncated to make room for the new segment. Ultimately, the truncation
-/// will occur at the last use, but for now the segment can be terminated
-/// right at the place where the new segment will start. The segments will be
-/// shrunk-to-uses later.
-void HexagonExpandCondsets::terminateSegment(LiveInterval::iterator LT,
- SlotIndex S, LiveInterval &LI) {
- // Terminate the live segment pointed to by LT within a live interval LI.
- if (LT == LI.end())
- return;
+void HexagonExpandCondsets::removeImpDefSegments(LiveRange &Range) {
+ auto StartImpDef = [this] (LiveRange::Segment &S) -> bool {
+ return S.start.isRegister() &&
+ LocalImpDefs.count(LIS->getInstructionFromIndex(S.start));
+ };
+ Range.segments.erase(std::remove_if(Range.begin(), Range.end(), StartImpDef),
+ Range.end());
+}
- VNInfo *OldVN = LT->valno;
- SlotIndex EX = LT->end;
- LT->end = S;
- // If LT does not end at a block boundary, the termination is done.
- if (!EX.isBlock())
+void HexagonExpandCondsets::updateDeadsInRange(unsigned Reg, LaneBitmask LM,
+ LiveRange &Range) {
+ assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ if (Range.empty())
return;
- // If LT ended at a block boundary, it's possible that its value number
- // is picked up at the beginning other blocks. Create a new value number
- // and change such blocks to use it instead.
- VNInfo *NewVN = 0;
- for (LiveInterval::iterator I = LI.begin(), E = LI.end(); I != E; ++I) {
- if (!I->start.isBlock() || I->valno != OldVN)
- continue;
- // Generate on-demand a new value number that is defined by the
- // block beginning (i.e. -phi).
- if (!NewVN)
- NewVN = LI.getNextValue(I->start, LIS->getVNInfoAllocator());
- I->valno = NewVN;
- }
-}
+ auto IsRegDef = [this,Reg,LM] (MachineOperand &Op) -> bool {
+ if (!Op.isReg() || !Op.isDef())
+ return false;
+ unsigned DR = Op.getReg(), DSR = Op.getSubReg();
+ if (!TargetRegisterInfo::isVirtualRegister(DR) || DR != Reg)
+ return false;
+ LaneBitmask SLM = getLaneMask(DR, DSR);
+ return (SLM & LM) != 0;
+ };
+ // The splitting step will create pairs of predicated definitions without
+ // any implicit uses (since implicit uses would interfere with predication).
+ // This can cause the reaching defs to become dead after live range
+ // recomputation, even though they are not really dead.
+ // We need to identify predicated defs that need implicit uses, and
+ // dead defs that are not really dead, and correct both problems.
+
+ SetVector<MachineBasicBlock*> Defs;
+ auto Dominate = [this] (SetVector<MachineBasicBlock*> &Defs,
+ MachineBasicBlock *Dest) -> bool {
+ for (MachineBasicBlock *D : Defs)
+ if (D != Dest && MDT->dominates(D, Dest))
+ return true;
-/// Add the specified instruction to live intervals. This function is used
-/// to update the live intervals while the program code is being changed.
-/// Neither the expansion of a MUX, nor the predication are atomic, and this
-/// function is used to update the live intervals while these transformations
-/// are being done.
-void HexagonExpandCondsets::addInstrToLiveness(MachineInstr *MI) {
- SlotIndex MX = LIS->isNotInMIMap(MI) ? LIS->InsertMachineInstrInMaps(MI)
- : LIS->getInstructionIndex(MI);
- DEBUG(dbgs() << "adding liveness info for instr\n " << MX << " " << *MI);
-
- MX = MX.getRegSlot();
- bool Predicated = HII->isPredicated(MI);
- MachineBasicBlock *MB = MI->getParent();
-
- // Strip all implicit uses from predicated instructions. They will be
- // added again, according to the updated information.
- if (Predicated)
- removeImplicitUses(MI);
-
- // For each def in MI we need to insert a new live segment starting at MX
- // into the interval. If there already exists a live segment in the interval
- // that contains MX, we need to terminate it at MX.
- SmallVector<RegisterRef,2> Defs;
- for (auto &Op : MI->operands())
- if (Op.isReg() && Op.isDef())
- Defs.push_back(RegisterRef(Op));
-
- for (unsigned i = 0, n = Defs.size(); i < n; ++i) {
- unsigned DefR = Defs[i].Reg;
- LiveInterval &LID = LIS->getInterval(DefR);
- DEBUG(dbgs() << "adding def " << PrintReg(DefR, TRI)
- << " with interval\n " << LID << "\n");
- // If MX falls inside of an existing live segment, terminate it.
- LiveInterval::iterator LT = LID.FindSegmentContaining(MX);
- if (LT != LID.end())
- terminateSegment(LT, MX, LID);
- DEBUG(dbgs() << "after terminating segment\n " << LID << "\n");
-
- // Create a new segment starting from MX.
- LiveInterval::iterator P = prevSegment(LID, MX), N = nextSegment(LID, MX);
- SlotIndex EX;
- VNInfo *VN = LID.getNextValue(MX, LIS->getVNInfoAllocator());
- if (N == LID.end()) {
- // There is no live segment after MX. End this segment at the end of
- // the block.
- EX = LIS->getMBBEndIdx(MB);
- } else {
- // If the next segment starts at the block boundary, end the new segment
- // at the boundary of the preceding block (i.e. the previous index).
- // Otherwise, end the segment at the beginning of the next segment. In
- // either case it will be "shrunk-to-uses" later.
- EX = N->start.isBlock() ? N->start.getPrevIndex() : N->start;
+ MachineBasicBlock *Entry = &Dest->getParent()->front();
+ SetVector<MachineBasicBlock*> Work(Dest->pred_begin(), Dest->pred_end());
+ for (unsigned i = 0; i < Work.size(); ++i) {
+ MachineBasicBlock *B = Work[i];
+ if (Defs.count(B))
+ continue;
+ if (B == Entry)
+ return false;
+ for (auto *P : B->predecessors())
+ Work.insert(P);
}
- if (Predicated) {
- // Predicated instruction will have an implicit use of the defined
- // register. This is necessary so that this definition will not make
- // any previous definitions dead. If there are no previous live
- // segments, still add the implicit use, but make it "undef".
- // Because of the implicit use, the preceding definition is not
- // dead. Mark is as such (if necessary).
- MachineOperand ImpUse = MachineOperand::CreateReg(DefR, false, true);
- ImpUse.setSubReg(Defs[i].Sub);
- bool Undef = false;
- if (P == LID.end())
- Undef = true;
- else {
- // If the previous segment extends to the end of the previous block,
- // the end index may actually be the beginning of this block. If
- // the previous segment ends at a block boundary, move it back by one,
- // to get the proper block for it.
- SlotIndex PE = P->end.isBlock() ? P->end.getPrevIndex() : P->end;
- MachineBasicBlock *PB = LIS->getMBBFromIndex(PE);
- if (PB != MB && !LIS->isLiveInToMBB(LID, MB))
- Undef = true;
- }
- if (!Undef) {
- makeUndead(DefR, P->valno->def);
- // We are adding a live use, so extend the previous segment to
- // include it.
- P->end = MX;
- } else {
- ImpUse.setIsUndef(true);
- }
+ return true;
+ };
- if (!MI->readsRegister(DefR))
- MI->addOperand(ImpUse);
- if (N != LID.end())
- makeDefined(DefR, N->start, true);
- }
- LiveRange::Segment NR = LiveRange::Segment(MX, EX, VN);
- LID.addSegment(NR);
- DEBUG(dbgs() << "added a new segment " << NR << "\n " << LID << "\n");
- shrinkToUses(DefR, LID);
- DEBUG(dbgs() << "updated imp-uses: " << *MI);
- LID.verify();
+ // First, try to extend live range within individual basic blocks. This
+ // will leave us only with dead defs that do not reach any predicated
+ // defs in the same block.
+ SmallVector<SlotIndex,4> PredDefs;
+ for (auto &Seg : Range) {
+ if (!Seg.start.isRegister())
+ continue;
+ MachineInstr *DefI = LIS->getInstructionFromIndex(Seg.start);
+ if (LocalImpDefs.count(DefI))
+ continue;
+ Defs.insert(DefI->getParent());
+ if (HII->isPredicated(*DefI))
+ PredDefs.push_back(Seg.start);
+ }
+ for (auto &SI : PredDefs) {
+ MachineBasicBlock *BB = LIS->getMBBFromIndex(SI);
+ if (Range.extendInBlock(LIS->getMBBStartIdx(BB), SI))
+ SI = SlotIndex();
}
- // For each use in MI:
- // - If there is no live segment that contains MX for the used register,
- // extend the previous one. Ignore implicit uses.
- for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isUse() || Op.isImplicit() || Op.isUndef())
+ // Calculate reachability for those predicated defs that were not handled
+ // by the in-block extension.
+ SmallVector<SlotIndex,4> ExtTo;
+ for (auto &SI : PredDefs) {
+ if (!SI.isValid())
+ continue;
+ MachineBasicBlock *BB = LIS->getMBBFromIndex(SI);
+ if (BB->pred_empty())
+ continue;
+ // If the defs from this range reach SI via all predecessors, it is live.
+ if (Dominate(Defs, BB))
+ ExtTo.push_back(SI);
+ }
+ LIS->extendToIndices(Range, ExtTo);
+
+ // Remove <dead> flags from all defs that are not dead after live range
+ // extension, and collect all def operands. They will be used to generate
+ // the necessary implicit uses.
+ std::set<RegisterRef> DefRegs;
+ for (auto &Seg : Range) {
+ if (!Seg.start.isRegister())
continue;
- unsigned UseR = Op.getReg();
- LiveInterval &LIU = LIS->getInterval(UseR);
- // Find the last segment P that starts before MX.
- LiveInterval::iterator P = LIU.FindSegmentContaining(MX);
- if (P == LIU.end())
- P = prevSegment(LIU, MX);
-
- assert(P != LIU.end() && "MI uses undefined register?");
- SlotIndex EX = P->end;
- // If P contains MX, there is not much to do.
- if (EX > MX) {
- Op.setIsKill(false);
+ MachineInstr *DefI = LIS->getInstructionFromIndex(Seg.start);
+ if (LocalImpDefs.count(DefI))
continue;
+ for (auto &Op : DefI->operands()) {
+ if (Seg.start.isDead() || !IsRegDef(Op))
+ continue;
+ DefRegs.insert(Op);
+ Op.setIsDead(false);
}
- // Otherwise, extend P to "next(MX)".
- P->end = MX.getNextIndex();
- Op.setIsKill(true);
- // Get the old "kill" instruction, and remove the kill flag.
- if (MachineInstr *KI = LIS->getInstructionFromIndex(MX))
- KI->clearRegisterKills(UseR, nullptr);
- shrinkToUses(UseR, LIU);
- LIU.verify();
}
-}
-/// Update the live interval information to reflect the removal of the given
-/// instruction from the program. As with "addInstrToLiveness", this function
-/// is called while the program code is being changed.
-void HexagonExpandCondsets::removeInstrFromLiveness(MachineInstr *MI) {
- SlotIndex MX = LIS->getInstructionIndex(MI).getRegSlot();
- DEBUG(dbgs() << "removing instr\n " << MX << " " << *MI);
+ // Finally, add implicit uses to each predicated def that is reached
+ // by other defs. Remove segments started by implicit-defs first, since
+ // they do not define registers.
+ removeImpDefSegments(Range);
- // For each def in MI:
- // If MI starts a live segment, merge this segment with the previous segment.
- //
- for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isDef())
+ for (auto &Seg : Range) {
+ if (!Seg.start.isRegister() || !Range.liveAt(Seg.start.getPrevSlot()))
continue;
- unsigned DefR = Op.getReg();
- LiveInterval &LID = LIS->getInterval(DefR);
- LiveInterval::iterator LT = LID.FindSegmentContaining(MX);
- assert(LT != LID.end() && "Expecting live segments");
- DEBUG(dbgs() << "removing def at " << MX << " of " << PrintReg(DefR, TRI)
- << " with interval\n " << LID << "\n");
- if (LT->start != MX)
+ MachineInstr *DefI = LIS->getInstructionFromIndex(Seg.start);
+ if (!HII->isPredicated(*DefI))
continue;
+ MachineFunction &MF = *DefI->getParent()->getParent();
+ // Construct the set of all necessary implicit uses, based on the def
+ // operands in the instruction.
+ std::set<RegisterRef> ImpUses;
+ for (auto &Op : DefI->operands())
+ if (Op.isReg() && Op.isDef() && DefRegs.count(Op))
+ ImpUses.insert(Op);
+ for (RegisterRef R : ImpUses)
+ MachineInstrBuilder(MF, DefI).addReg(R.Reg, RegState::Implicit, R.Sub);
+ }
+}
- VNInfo *MVN = LT->valno;
- if (LT != LID.begin()) {
- // If the current live segment is not the first, the task is easy. If
- // the previous segment continues into the current block, extend it to
- // the end of the current one, and merge the value numbers.
- // Otherwise, remove the current segment, and make the end of it "undef".
- LiveInterval::iterator P = std::prev(LT);
- SlotIndex PE = P->end.isBlock() ? P->end.getPrevIndex() : P->end;
- MachineBasicBlock *MB = MI->getParent();
- MachineBasicBlock *PB = LIS->getMBBFromIndex(PE);
- if (PB != MB && !LIS->isLiveInToMBB(LID, MB)) {
- makeDefined(DefR, LT->end, false);
- LID.removeSegment(*LT);
- } else {
- // Make the segments adjacent, so that merge-vn can also merge the
- // segments.
- P->end = LT->start;
- makeUndead(DefR, P->valno->def);
- LID.MergeValueNumberInto(MVN, P->valno);
- }
- } else {
- LiveInterval::iterator N = std::next(LT);
- LiveInterval::iterator RmB = LT, RmE = N;
- while (N != LID.end()) {
- // Iterate until the first register-based definition is found
- // (i.e. skip all block-boundary entries).
- LiveInterval::iterator Next = std::next(N);
- if (N->start.isRegister()) {
- makeDefined(DefR, N->start, false);
- break;
- }
- if (N->end.isRegister()) {
- makeDefined(DefR, N->end, false);
- RmE = Next;
- break;
- }
- RmE = Next;
- N = Next;
- }
- // Erase the segments in one shot to avoid invalidating iterators.
- LID.segments.erase(RmB, RmE);
- }
-
- bool VNUsed = false;
- for (LiveInterval::iterator I = LID.begin(), E = LID.end(); I != E; ++I) {
- if (I->valno != MVN)
- continue;
- VNUsed = true;
- break;
- }
- if (!VNUsed)
- MVN->markUnused();
- DEBUG(dbgs() << "new interval: ");
- if (!LID.empty()) {
- DEBUG(dbgs() << LID << "\n");
- LID.verify();
- } else {
- DEBUG(dbgs() << "<empty>\n");
- LIS->removeInterval(DefR);
+void HexagonExpandCondsets::updateDeadFlags(unsigned Reg) {
+ LiveInterval &LI = LIS->getInterval(Reg);
+ if (LI.hasSubRanges()) {
+ for (LiveInterval::SubRange &S : LI.subranges()) {
+ updateDeadsInRange(Reg, S.LaneMask, S);
+ LIS->shrinkToUses(S, Reg);
+ // LI::shrinkToUses will add segments started by implicit-defs.
+ // Remove them again.
+ removeImpDefSegments(S);
}
+ LI.clear();
+ LIS->constructMainRangeFromSubranges(LI);
+ } else {
+ updateDeadsInRange(Reg, MRI->getMaxLaneMaskForVReg(Reg), LI);
}
+}
- // For uses there is nothing to do. The intervals will be updated via
- // shrinkToUses.
- SmallVector<unsigned,4> Uses;
- for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isUse())
- continue;
- unsigned R = Op.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(R))
- continue;
- Uses.push_back(R);
- }
+
+void HexagonExpandCondsets::recalculateLiveInterval(unsigned Reg) {
+ LIS->removeInterval(Reg);
+ LIS->createAndComputeVirtRegInterval(Reg);
+}
+
+void HexagonExpandCondsets::removeInstr(MachineInstr &MI) {
LIS->RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
- for (unsigned i = 0, n = Uses.size(); i < n; ++i) {
- LiveInterval &LI = LIS->getInterval(Uses[i]);
- shrinkToUses(Uses[i], LI);
+ MI.eraseFromParent();
+}
+
+
+void HexagonExpandCondsets::updateLiveness(std::set<unsigned> &RegSet,
+ bool Recalc, bool UpdateKills, bool UpdateDeads) {
+ UpdateKills |= UpdateDeads;
+ for (auto R : RegSet) {
+ if (Recalc)
+ recalculateLiveInterval(R);
+ if (UpdateKills)
+ MRI->clearKillFlags(R);
+ if (UpdateDeads)
+ updateDeadFlags(R);
+ // Fixing <dead> flags may extend live ranges, so reset <kill> flags
+ // after that.
+ if (UpdateKills)
+ updateKillFlags(R);
+ LIS->getInterval(R).verify();
}
}
@@ -647,7 +575,7 @@ void HexagonExpandCondsets::removeInstrFromLiveness(MachineInstr *MI) {
/// Get the opcode for a conditional transfer of the value in SO (source
/// operand). The condition (true/false) is given in Cond.
unsigned HexagonExpandCondsets::getCondTfrOpcode(const MachineOperand &SO,
- bool Cond) {
+ bool IfTrue) {
using namespace Hexagon;
if (SO.isReg()) {
unsigned PhysR;
@@ -664,14 +592,14 @@ unsigned HexagonExpandCondsets::getCondTfrOpcode(const MachineOperand &SO,
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysS);
switch (RC->getSize()) {
case 4:
- return Cond ? A2_tfrt : A2_tfrf;
+ return IfTrue ? A2_tfrt : A2_tfrf;
case 8:
- return Cond ? A2_tfrpt : A2_tfrpf;
+ return IfTrue ? A2_tfrpt : A2_tfrpf;
}
llvm_unreachable("Invalid register operand");
}
if (SO.isImm() || SO.isFPImm())
- return Cond ? C2_cmoveit : C2_cmoveif;
+ return IfTrue ? C2_cmoveit : C2_cmoveif;
llvm_unreachable("Unexpected source operand");
}
@@ -680,12 +608,13 @@ unsigned HexagonExpandCondsets::getCondTfrOpcode(const MachineOperand &SO,
/// destination register DstR:DstSR, and using the predicate register from
/// PredOp. The Cond argument specifies whether the predicate is to be
/// if(PredOp), or if(!PredOp).
-MachineInstr *HexagonExpandCondsets::genTfrFor(MachineOperand &SrcOp,
- unsigned DstR, unsigned DstSR, const MachineOperand &PredOp, bool Cond) {
+MachineInstr *HexagonExpandCondsets::genCondTfrFor(MachineOperand &SrcOp,
+ MachineBasicBlock::iterator At,
+ unsigned DstR, unsigned DstSR, const MachineOperand &PredOp,
+ bool PredSense, bool ReadUndef, bool ImpUse) {
MachineInstr *MI = SrcOp.getParent();
- MachineBasicBlock &B = *MI->getParent();
- MachineBasicBlock::iterator At = MI;
- DebugLoc DL = MI->getDebugLoc();
+ MachineBasicBlock &B = *At->getParent();
+ const DebugLoc &DL = MI->getDebugLoc();
// Don't avoid identity copies here (i.e. if the source and the destination
// are the same registers). It is actually better to generate them here,
@@ -693,62 +622,101 @@ MachineInstr *HexagonExpandCondsets::genTfrFor(MachineOperand &SrcOp,
// step. The predication will remove such a copy if it is unable to
/// predicate.
- unsigned Opc = getCondTfrOpcode(SrcOp, Cond);
- MachineInstr *TfrI = BuildMI(B, At, DL, HII->get(Opc))
- .addReg(DstR, RegState::Define, DstSR)
+ unsigned Opc = getCondTfrOpcode(SrcOp, PredSense);
+ unsigned State = RegState::Define | (ReadUndef ? RegState::Undef : 0);
+ MachineInstrBuilder MIB = BuildMI(B, At, DL, HII->get(Opc))
+ .addReg(DstR, State, DstSR)
.addOperand(PredOp)
.addOperand(SrcOp);
+
// We don't want any kills yet.
- TfrI->clearKillInfo();
- DEBUG(dbgs() << "created an initial copy: " << *TfrI);
- return TfrI;
+ MIB->clearKillInfo();
+ DEBUG(dbgs() << "created an initial copy: " << *MIB);
+ return &*MIB;
}
/// Replace a MUX instruction MI with a pair A2_tfrt/A2_tfrf. This function
/// performs all necessary changes to complete the replacement.
-bool HexagonExpandCondsets::split(MachineInstr *MI) {
+bool HexagonExpandCondsets::split(MachineInstr &MI,
+ std::set<unsigned> &UpdRegs) {
if (TfrLimitActive) {
if (TfrCounter >= TfrLimit)
return false;
TfrCounter++;
}
- DEBUG(dbgs() << "\nsplitting BB#" << MI->getParent()->getNumber()
- << ": " << *MI);
- MachineOperand &MD = MI->getOperand(0); // Definition
- MachineOperand &MP = MI->getOperand(1); // Predicate register
+ DEBUG(dbgs() << "\nsplitting BB#" << MI.getParent()->getNumber() << ": "
+ << MI);
+ MachineOperand &MD = MI.getOperand(0); // Definition
+ MachineOperand &MP = MI.getOperand(1); // Predicate register
+ MachineOperand &MS1 = MI.getOperand(2); // Source value #1
+ MachineOperand &MS2 = MI.getOperand(3); // Source value #2
assert(MD.isDef());
unsigned DR = MD.getReg(), DSR = MD.getSubReg();
+ bool ReadUndef = MD.isUndef();
+ MachineBasicBlock::iterator At = MI;
+
+ if (ReadUndef && DSR != 0 && MRI->shouldTrackSubRegLiveness(DR)) {
+ unsigned NewSR = 0;
+ MachineBasicBlock::iterator DefAt = At;
+ bool SameReg = (MS1.isReg() && DR == MS1.getReg()) ||
+ (MS2.isReg() && DR == MS2.getReg());
+ if (SameReg) {
+ NewSR = (DSR == Hexagon::subreg_loreg) ? Hexagon::subreg_hireg
+ : Hexagon::subreg_loreg;
+ // Advance the insertion point if the subregisters differ between
+ // the source and the target (with the same super-register).
+ // Note: this case has never occured during tests.
+ if ((MS1.isReg() && NewSR == MS1.getSubReg()) ||
+ (MS2.isReg() && NewSR == MS2.getSubReg()))
+ ++DefAt;
+ }
+ // Use "At", since "DefAt" may be end().
+ MachineBasicBlock &B = *At->getParent();
+ DebugLoc DL = At->getDebugLoc();
+ auto ImpD = BuildMI(B, DefAt, DL, HII->get(TargetOpcode::IMPLICIT_DEF))
+ .addReg(DR, RegState::Define, NewSR);
+ LIS->InsertMachineInstrInMaps(*ImpD);
+ LocalImpDefs.insert(&*ImpD);
+ }
// First, create the two invididual conditional transfers, and add each
// of them to the live intervals information. Do that first and then remove
// the old instruction from live intervals.
- if (MachineInstr *TfrT = genTfrFor(MI->getOperand(2), DR, DSR, MP, true))
- addInstrToLiveness(TfrT);
- if (MachineInstr *TfrF = genTfrFor(MI->getOperand(3), DR, DSR, MP, false))
- addInstrToLiveness(TfrF);
- removeInstrFromLiveness(MI);
-
+ MachineInstr *TfrT =
+ genCondTfrFor(MI.getOperand(2), At, DR, DSR, MP, true, ReadUndef, false);
+ MachineInstr *TfrF =
+ genCondTfrFor(MI.getOperand(3), At, DR, DSR, MP, false, ReadUndef, true);
+ LIS->InsertMachineInstrInMaps(*TfrT);
+ LIS->InsertMachineInstrInMaps(*TfrF);
+
+ // Will need to recalculate live intervals for all registers in MI.
+ for (auto &Op : MI.operands())
+ if (Op.isReg())
+ UpdRegs.insert(Op.getReg());
+
+ removeInstr(MI);
return true;
}
-/// Split all MUX instructions in the given block into pairs of contitional
+/// Split all MUX instructions in the given block into pairs of conditional
/// transfers.
-bool HexagonExpandCondsets::splitInBlock(MachineBasicBlock &B) {
+bool HexagonExpandCondsets::splitInBlock(MachineBasicBlock &B,
+ std::set<unsigned> &UpdRegs) {
bool Changed = false;
MachineBasicBlock::iterator I, E, NextI;
for (I = B.begin(), E = B.end(); I != E; I = NextI) {
NextI = std::next(I);
- if (isCondset(I))
- Changed |= split(I);
+ if (isCondset(*I))
+ Changed |= split(*I, UpdRegs);
}
return Changed;
}
bool HexagonExpandCondsets::isPredicable(MachineInstr *MI) {
- if (HII->isPredicated(MI) || !HII->isPredicable(MI))
+ if (HII->isPredicated(*MI) || !HII->isPredicable(*MI))
return false;
if (MI->hasUnmodeledSideEffects() || MI->mayStore())
return false;
@@ -784,8 +752,8 @@ MachineInstr *HexagonExpandCondsets::getReachingDefForPred(RegisterRef RD,
MachineInstr *MI = &*I;
// Check if this instruction can be ignored, i.e. if it is predicated
// on the complementary condition.
- if (PredValid && HII->isPredicated(MI)) {
- if (MI->readsRegister(PredR) && (Cond != HII->isPredicatedTrue(MI)))
+ if (PredValid && HII->isPredicated(*MI)) {
+ if (MI->readsRegister(PredR) && (Cond != HII->isPredicatedTrue(*MI)))
continue;
}
@@ -821,12 +789,12 @@ MachineInstr *HexagonExpandCondsets::getReachingDefForPred(RegisterRef RD,
/// the maps Defs and Uses. These maps reflect the conditional defs and uses
/// that depend on the same predicate register to allow moving instructions
/// over instructions predicated on the opposite condition.
-bool HexagonExpandCondsets::canMoveOver(MachineInstr *MI, ReferenceMap &Defs,
- ReferenceMap &Uses) {
+bool HexagonExpandCondsets::canMoveOver(MachineInstr &MI, ReferenceMap &Defs,
+ ReferenceMap &Uses) {
// In order to be able to safely move MI over instructions that define
// "Defs" and use "Uses", no def operand from MI can be defined or used
// and no use operand can be defined.
- for (auto &Op : MI->operands()) {
+ for (auto &Op : MI.operands()) {
if (!Op.isReg())
continue;
RegisterRef RR = Op;
@@ -848,19 +816,19 @@ bool HexagonExpandCondsets::canMoveOver(MachineInstr *MI, ReferenceMap &Defs,
/// Check if the instruction accessing memory (TheI) can be moved to the
/// location ToI.
-bool HexagonExpandCondsets::canMoveMemTo(MachineInstr *TheI, MachineInstr *ToI,
- bool IsDown) {
- bool IsLoad = TheI->mayLoad(), IsStore = TheI->mayStore();
+bool HexagonExpandCondsets::canMoveMemTo(MachineInstr &TheI, MachineInstr &ToI,
+ bool IsDown) {
+ bool IsLoad = TheI.mayLoad(), IsStore = TheI.mayStore();
if (!IsLoad && !IsStore)
return true;
if (HII->areMemAccessesTriviallyDisjoint(TheI, ToI))
return true;
- if (TheI->hasUnmodeledSideEffects())
+ if (TheI.hasUnmodeledSideEffects())
return false;
MachineBasicBlock::iterator StartI = IsDown ? TheI : ToI;
MachineBasicBlock::iterator EndI = IsDown ? ToI : TheI;
- bool Ordered = TheI->hasOrderedMemoryRef();
+ bool Ordered = TheI.hasOrderedMemoryRef();
// Search for aliased memory reference in (StartI, EndI).
for (MachineBasicBlock::iterator I = std::next(StartI); I != EndI; ++I) {
@@ -883,8 +851,11 @@ bool HexagonExpandCondsets::canMoveMemTo(MachineInstr *TheI, MachineInstr *ToI,
/// Generate a predicated version of MI (where the condition is given via
/// PredR and Cond) at the point indicated by Where.
-void HexagonExpandCondsets::predicateAt(RegisterRef RD, MachineInstr *MI,
- MachineBasicBlock::iterator Where, unsigned PredR, bool Cond) {
+void HexagonExpandCondsets::predicateAt(const MachineOperand &DefOp,
+ MachineInstr &MI,
+ MachineBasicBlock::iterator Where,
+ const MachineOperand &PredOp, bool Cond,
+ std::set<unsigned> &UpdRegs) {
// The problem with updating live intervals is that we can move one def
// past another def. In particular, this can happen when moving an A2_tfrt
// over an A2_tfrf defining the same register. From the point of view of
@@ -896,33 +867,34 @@ void HexagonExpandCondsets::predicateAt(RegisterRef RD, MachineInstr *MI,
// target location, (2) update liveness, (3) delete the old instruction,
// and (4) update liveness again.
- MachineBasicBlock &B = *MI->getParent();
+ MachineBasicBlock &B = *MI.getParent();
DebugLoc DL = Where->getDebugLoc(); // "Where" points to an instruction.
- unsigned Opc = MI->getOpcode();
+ unsigned Opc = MI.getOpcode();
unsigned PredOpc = HII->getCondOpcode(Opc, !Cond);
MachineInstrBuilder MB = BuildMI(B, Where, DL, HII->get(PredOpc));
- unsigned Ox = 0, NP = MI->getNumOperands();
+ unsigned Ox = 0, NP = MI.getNumOperands();
// Skip all defs from MI first.
while (Ox < NP) {
- MachineOperand &MO = MI->getOperand(Ox);
+ MachineOperand &MO = MI.getOperand(Ox);
if (!MO.isReg() || !MO.isDef())
break;
Ox++;
}
// Add the new def, then the predicate register, then the rest of the
// operands.
- MB.addReg(RD.Reg, RegState::Define, RD.Sub);
- MB.addReg(PredR);
+ MB.addReg(DefOp.getReg(), getRegState(DefOp), DefOp.getSubReg());
+ MB.addReg(PredOp.getReg(), PredOp.isUndef() ? RegState::Undef : 0,
+ PredOp.getSubReg());
while (Ox < NP) {
- MachineOperand &MO = MI->getOperand(Ox);
+ MachineOperand &MO = MI.getOperand(Ox);
if (!MO.isReg() || !MO.isImplicit())
MB.addOperand(MO);
Ox++;
}
MachineFunction &MF = *B.getParent();
- MachineInstr::mmo_iterator I = MI->memoperands_begin();
- unsigned NR = std::distance(I, MI->memoperands_end());
+ MachineInstr::mmo_iterator I = MI.memoperands_begin();
+ unsigned NR = std::distance(I, MI.memoperands_end());
MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(NR);
for (unsigned i = 0; i < NR; ++i)
MemRefs[i] = *I++;
@@ -930,7 +902,11 @@ void HexagonExpandCondsets::predicateAt(RegisterRef RD, MachineInstr *MI,
MachineInstr *NewI = MB;
NewI->clearKillInfo();
- addInstrToLiveness(NewI);
+ LIS->InsertMachineInstrInMaps(*NewI);
+
+ for (auto &Op : NewI->operands())
+ if (Op.isReg())
+ UpdRegs.insert(Op.getReg());
}
@@ -945,9 +921,9 @@ void HexagonExpandCondsets::renameInRange(RegisterRef RO, RegisterRef RN,
MachineInstr *MI = &*I;
// Do not touch instructions that are not predicated, or are predicated
// on the opposite condition.
- if (!HII->isPredicated(MI))
+ if (!HII->isPredicated(*MI))
continue;
- if (!MI->readsRegister(PredR) || (Cond != HII->isPredicatedTrue(MI)))
+ if (!MI->readsRegister(PredR) || (Cond != HII->isPredicatedTrue(*MI)))
continue;
for (auto &Op : MI->operands()) {
@@ -965,22 +941,27 @@ void HexagonExpandCondsets::renameInRange(RegisterRef RO, RegisterRef RN,
/// For a given conditional copy, predicate the definition of the source of
/// the copy under the given condition (using the same predicate register as
/// the copy).
-bool HexagonExpandCondsets::predicate(MachineInstr *TfrI, bool Cond) {
+bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond,
+ std::set<unsigned> &UpdRegs) {
// TfrI - A2_tfr[tf] Instruction (not A2_tfrsi).
- unsigned Opc = TfrI->getOpcode();
+ unsigned Opc = TfrI.getOpcode();
(void)Opc;
assert(Opc == Hexagon::A2_tfrt || Opc == Hexagon::A2_tfrf);
DEBUG(dbgs() << "\nattempt to predicate if-" << (Cond ? "true" : "false")
- << ": " << *TfrI);
+ << ": " << TfrI);
- MachineOperand &MD = TfrI->getOperand(0);
- MachineOperand &MP = TfrI->getOperand(1);
- MachineOperand &MS = TfrI->getOperand(2);
+ MachineOperand &MD = TfrI.getOperand(0);
+ MachineOperand &MP = TfrI.getOperand(1);
+ MachineOperand &MS = TfrI.getOperand(2);
// The source operand should be a <kill>. This is not strictly necessary,
// but it makes things a lot simpler. Otherwise, we would need to rename
// some registers, which would complicate the transformation considerably.
if (!MS.isKill())
return false;
+ // Avoid predicating instructions that define a subregister if subregister
+ // liveness tracking is not enabled.
+ if (MD.getSubReg() && !MRI->shouldTrackSubRegLiveness(MD.getReg()))
+ return false;
RegisterRef RT(MS);
unsigned PredR = MP.getReg();
@@ -1014,8 +995,8 @@ bool HexagonExpandCondsets::predicate(MachineInstr *TfrI, bool Cond) {
// By default assume that the instruction executes on the same condition
// as TfrI (Exec_Then), and also on the opposite one (Exec_Else).
unsigned Exec = Exec_Then | Exec_Else;
- if (PredValid && HII->isPredicated(MI) && MI->readsRegister(PredR))
- Exec = (Cond == HII->isPredicatedTrue(MI)) ? Exec_Then : Exec_Else;
+ if (PredValid && HII->isPredicated(*MI) && MI->readsRegister(PredR))
+ Exec = (Cond == HII->isPredicatedTrue(*MI)) ? Exec_Then : Exec_Else;
for (auto &Op : MI->operands()) {
if (!Op.isReg())
@@ -1059,48 +1040,53 @@ bool HexagonExpandCondsets::predicate(MachineInstr *TfrI, bool Cond) {
// If the target register of the TfrI (RD) is not used or defined between
// DefI and TfrI, consider moving TfrI up to DefI.
bool CanUp = canMoveOver(TfrI, Defs, Uses);
- bool CanDown = canMoveOver(DefI, Defs, Uses);
+ bool CanDown = canMoveOver(*DefI, Defs, Uses);
// The TfrI does not access memory, but DefI could. Check if it's safe
// to move DefI down to TfrI.
if (DefI->mayLoad() || DefI->mayStore())
- if (!canMoveMemTo(DefI, TfrI, true))
+ if (!canMoveMemTo(*DefI, TfrI, true))
CanDown = false;
DEBUG(dbgs() << "Can move up: " << (CanUp ? "yes" : "no")
<< ", can move down: " << (CanDown ? "yes\n" : "no\n"));
MachineBasicBlock::iterator PastDefIt = std::next(DefIt);
if (CanUp)
- predicateAt(RD, DefI, PastDefIt, PredR, Cond);
+ predicateAt(MD, *DefI, PastDefIt, MP, Cond, UpdRegs);
else if (CanDown)
- predicateAt(RD, DefI, TfrIt, PredR, Cond);
+ predicateAt(MD, *DefI, TfrIt, MP, Cond, UpdRegs);
else
return false;
- if (RT != RD)
+ if (RT != RD) {
renameInRange(RT, RD, PredR, Cond, PastDefIt, TfrIt);
+ UpdRegs.insert(RT.Reg);
+ }
- // Delete the user of RT first (it should work either way, but this order
- // of deleting is more natural).
- removeInstrFromLiveness(TfrI);
- removeInstrFromLiveness(DefI);
+ removeInstr(TfrI);
+ removeInstr(*DefI);
return true;
}
/// Predicate all cases of conditional copies in the specified block.
-bool HexagonExpandCondsets::predicateInBlock(MachineBasicBlock &B) {
+bool HexagonExpandCondsets::predicateInBlock(MachineBasicBlock &B,
+ std::set<unsigned> &UpdRegs) {
bool Changed = false;
MachineBasicBlock::iterator I, E, NextI;
for (I = B.begin(), E = B.end(); I != E; I = NextI) {
NextI = std::next(I);
unsigned Opc = I->getOpcode();
if (Opc == Hexagon::A2_tfrt || Opc == Hexagon::A2_tfrf) {
- bool Done = predicate(I, (Opc == Hexagon::A2_tfrt));
+ bool Done = predicate(*I, (Opc == Hexagon::A2_tfrt), UpdRegs);
if (!Done) {
// If we didn't predicate I, we may need to remove it in case it is
// an "identity" copy, e.g. vreg1 = A2_tfrt vreg2, vreg1.
- if (RegisterRef(I->getOperand(0)) == RegisterRef(I->getOperand(2)))
- removeInstrFromLiveness(I);
+ if (RegisterRef(I->getOperand(0)) == RegisterRef(I->getOperand(2))) {
+ for (auto &Op : I->operands())
+ if (Op.isReg())
+ UpdRegs.insert(Op.getReg());
+ removeInstr(*I);
+ }
}
Changed |= Done;
}
@@ -1109,51 +1095,6 @@ bool HexagonExpandCondsets::predicateInBlock(MachineBasicBlock &B) {
}
-void HexagonExpandCondsets::removeImplicitUses(MachineInstr *MI) {
- for (unsigned i = MI->getNumOperands(); i > 0; --i) {
- MachineOperand &MO = MI->getOperand(i-1);
- if (MO.isReg() && MO.isUse() && MO.isImplicit())
- MI->RemoveOperand(i-1);
- }
-}
-
-
-void HexagonExpandCondsets::removeImplicitUses(MachineBasicBlock &B) {
- for (MachineBasicBlock::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- MachineInstr *MI = &*I;
- if (HII->isPredicated(MI))
- removeImplicitUses(MI);
- }
-}
-
-
-void HexagonExpandCondsets::postprocessUndefImplicitUses(MachineBasicBlock &B) {
- // Implicit uses that are "undef" are only meaningful (outside of the
- // internals of this pass) when the instruction defines a subregister,
- // and the implicit-undef use applies to the defined register. In such
- // cases, the proper way to record the information in the IR is to mark
- // the definition as "undef", which will be interpreted as "read-undef".
- typedef SmallSet<unsigned,2> RegisterSet;
- for (MachineBasicBlock::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- MachineInstr *MI = &*I;
- RegisterSet Undefs;
- for (unsigned i = MI->getNumOperands(); i > 0; --i) {
- MachineOperand &MO = MI->getOperand(i-1);
- if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.isUndef()) {
- MI->RemoveOperand(i-1);
- Undefs.insert(MO.getReg());
- }
- }
- for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isDef() || !Op.getSubReg())
- continue;
- if (Undefs.count(Op.getReg()))
- Op.setIsUndef(true);
- }
- }
-}
-
-
bool HexagonExpandCondsets::isIntReg(RegisterRef RR, unsigned &BW) {
if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
return false;
@@ -1236,7 +1177,7 @@ bool HexagonExpandCondsets::coalesceRegisters(RegisterRef R1, RegisterRef R2) {
while (L2.begin() != L2.end())
L2.removeSegment(*L2.begin());
- updateKillFlags(R1.Reg, L1);
+ updateKillFlags(R1.Reg);
DEBUG(dbgs() << "coalesced: " << L1 << "\n");
L1.verify();
@@ -1253,7 +1194,7 @@ bool HexagonExpandCondsets::coalesceSegments(MachineFunction &MF) {
MachineBasicBlock &B = *I;
for (MachineBasicBlock::iterator J = B.begin(), F = B.end(); J != F; ++J) {
MachineInstr *MI = &*J;
- if (!isCondset(MI))
+ if (!isCondset(*MI))
continue;
MachineOperand &S1 = MI->getOperand(2), &S2 = MI->getOperand(3);
if (!S1.isReg() && !S2.isReg())
@@ -1290,13 +1231,13 @@ bool HexagonExpandCondsets::coalesceSegments(MachineFunction &MF) {
if (S1.isReg()) {
RegisterRef RS = S1;
MachineInstr *RDef = getReachingDefForPred(RS, CI, RP.Reg, true);
- if (!RDef || !HII->isPredicable(RDef))
+ if (!RDef || !HII->isPredicable(*RDef))
Done = coalesceRegisters(RD, RegisterRef(S1));
}
if (!Done && S2.isReg()) {
RegisterRef RS = S2;
MachineInstr *RDef = getReachingDefForPred(RS, CI, RP.Reg, false);
- if (!RDef || !HII->isPredicable(RDef))
+ if (!RDef || !HII->isPredicable(*RDef))
Done = coalesceRegisters(RD, RegisterRef(S2));
}
Changed |= Done;
@@ -1306,32 +1247,59 @@ bool HexagonExpandCondsets::coalesceSegments(MachineFunction &MF) {
bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
HII = static_cast<const HexagonInstrInfo*>(MF.getSubtarget().getInstrInfo());
TRI = MF.getSubtarget().getRegisterInfo();
+ MDT = &getAnalysis<MachineDominatorTree>();
LIS = &getAnalysis<LiveIntervals>();
MRI = &MF.getRegInfo();
+ LocalImpDefs.clear();
+
+ DEBUG(LIS->print(dbgs() << "Before expand-condsets\n",
+ MF.getFunction()->getParent()));
bool Changed = false;
+ std::set<unsigned> SplitUpd, PredUpd;
// Try to coalesce the target of a mux with one of its sources.
// This could eliminate a register copy in some circumstances.
Changed |= coalesceSegments(MF);
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
- // First, simply split all muxes into a pair of conditional transfers
- // and update the live intervals to reflect the new arrangement.
- // This is done mainly to make the live interval update simpler, than it
- // would be while trying to predicate instructions at the same time.
- Changed |= splitInBlock(*I);
- // Traverse all blocks and collapse predicable instructions feeding
- // conditional transfers into predicated instructions.
- // Walk over all the instructions again, so we may catch pre-existing
- // cases that were not created in the previous step.
- Changed |= predicateInBlock(*I);
- }
+ // First, simply split all muxes into a pair of conditional transfers
+ // and update the live intervals to reflect the new arrangement. The
+ // goal is to update the kill flags, since predication will rely on
+ // them.
+ for (auto &B : MF)
+ Changed |= splitInBlock(B, SplitUpd);
+ updateLiveness(SplitUpd, true, true, false);
+
+ // Traverse all blocks and collapse predicable instructions feeding
+ // conditional transfers into predicated instructions.
+ // Walk over all the instructions again, so we may catch pre-existing
+ // cases that were not created in the previous step.
+ for (auto &B : MF)
+ Changed |= predicateInBlock(B, PredUpd);
+
+ updateLiveness(PredUpd, true, true, true);
+ // Remove from SplitUpd all registers contained in PredUpd to avoid
+ // unnecessary liveness recalculation.
+ std::set<unsigned> Diff;
+ std::set_difference(SplitUpd.begin(), SplitUpd.end(),
+ PredUpd.begin(), PredUpd.end(),
+ std::inserter(Diff, Diff.begin()));
+ updateLiveness(Diff, false, false, true);
+
+ for (auto *ImpD : LocalImpDefs)
+ removeInstr(*ImpD);
+
+ DEBUG({
+ if (Changed)
+ LIS->print(dbgs() << "After expand-condsets\n",
+ MF.getFunction()->getParent());
+ });
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
- postprocessUndefImplicitUses(*I);
return Changed;
}
@@ -1340,18 +1308,6 @@ bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
// Public Constructor Functions
//===----------------------------------------------------------------------===//
-static void initializePassOnce(PassRegistry &Registry) {
- const char *Name = "Hexagon Expand Condsets";
- PassInfo *PI = new PassInfo(Name, "expand-condsets",
- &HexagonExpandCondsets::ID, 0, false, false);
- Registry.registerPass(*PI, true);
-}
-
-void llvm::initializeHexagonExpandCondsetsPass(PassRegistry &Registry) {
- CALL_ONCE_INITIALIZATION(initializePassOnce)
-}
-
-
FunctionPass *llvm::createHexagonExpandCondsets() {
return new HexagonExpandCondsets();
}
diff --git a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
deleted file mode 100644
index 6e2dbc06b124..000000000000
--- a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
+++ /dev/null
@@ -1,357 +0,0 @@
-//===-- HexagonExpandPredSpillCode.cpp - Expand Predicate Spill Code ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// The Hexagon processor has no instructions that load or store predicate
-// registers directly. So, when these registers must be spilled a general
-// purpose register must be found and the value copied to/from it from/to
-// the predicate register. This code currently does not use the register
-// scavenger mechanism available in the allocator. There are two registers
-// reserved to allow spilling/restoring predicate registers. One is used to
-// hold the predicate value. The other is used when stack frame offsets are
-// too large.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Hexagon.h"
-#include "HexagonMachineFunctionInfo.h"
-#include "HexagonSubtarget.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/LatencyPriorityQueue.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/CodeGen/SchedulerRegistry.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-
-using namespace llvm;
-
-
-namespace llvm {
- FunctionPass *createHexagonExpandPredSpillCode();
- void initializeHexagonExpandPredSpillCodePass(PassRegistry&);
-}
-
-
-namespace {
-
-class HexagonExpandPredSpillCode : public MachineFunctionPass {
- public:
- static char ID;
- HexagonExpandPredSpillCode() : MachineFunctionPass(ID) {
- PassRegistry &Registry = *PassRegistry::getPassRegistry();
- initializeHexagonExpandPredSpillCodePass(Registry);
- }
-
- const char *getPassName() const override {
- return "Hexagon Expand Predicate Spill Code";
- }
- bool runOnMachineFunction(MachineFunction &Fn) override;
-};
-
-
-char HexagonExpandPredSpillCode::ID = 0;
-
-
-bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
-
- const HexagonSubtarget &QST = Fn.getSubtarget<HexagonSubtarget>();
- const HexagonInstrInfo *TII = QST.getInstrInfo();
-
- // Loop over all of the basic blocks.
- for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
- MBBb != MBBe; ++MBBb) {
- MachineBasicBlock *MBB = &*MBBb;
- // Traverse the basic block.
- for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
- ++MII) {
- MachineInstr *MI = MII;
- int Opc = MI->getOpcode();
- if (Opc == Hexagon::S2_storerb_pci_pseudo ||
- Opc == Hexagon::S2_storerh_pci_pseudo ||
- Opc == Hexagon::S2_storeri_pci_pseudo ||
- Opc == Hexagon::S2_storerd_pci_pseudo ||
- Opc == Hexagon::S2_storerf_pci_pseudo) {
- unsigned Opcode;
- if (Opc == Hexagon::S2_storerd_pci_pseudo)
- Opcode = Hexagon::S2_storerd_pci;
- else if (Opc == Hexagon::S2_storeri_pci_pseudo)
- Opcode = Hexagon::S2_storeri_pci;
- else if (Opc == Hexagon::S2_storerh_pci_pseudo)
- Opcode = Hexagon::S2_storerh_pci;
- else if (Opc == Hexagon::S2_storerf_pci_pseudo)
- Opcode = Hexagon::S2_storerf_pci;
- else if (Opc == Hexagon::S2_storerb_pci_pseudo)
- Opcode = Hexagon::S2_storerb_pci;
- else
- llvm_unreachable("wrong Opc");
- MachineOperand &Op0 = MI->getOperand(0);
- MachineOperand &Op1 = MI->getOperand(1);
- MachineOperand &Op2 = MI->getOperand(2);
- MachineOperand &Op3 = MI->getOperand(3); // Modifier value.
- MachineOperand &Op4 = MI->getOperand(4);
- // Emit a "C6 = Rn, C6 is the control register for M0".
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_tfrrcr),
- Hexagon::C6)->addOperand(Op3);
- // Replace the pseude circ_ldd by the real circ_ldd.
- MachineInstr *NewMI = BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Opcode));
- NewMI->addOperand(Op0);
- NewMI->addOperand(Op1);
- NewMI->addOperand(Op4);
- NewMI->addOperand(MachineOperand::CreateReg(Hexagon::M0,
- false, /*isDef*/
- false, /*isImpl*/
- true /*isKill*/));
- NewMI->addOperand(Op2);
- MII = MBB->erase(MI);
- --MII;
- } else if (Opc == Hexagon::L2_loadrd_pci_pseudo ||
- Opc == Hexagon::L2_loadri_pci_pseudo ||
- Opc == Hexagon::L2_loadrh_pci_pseudo ||
- Opc == Hexagon::L2_loadruh_pci_pseudo||
- Opc == Hexagon::L2_loadrb_pci_pseudo ||
- Opc == Hexagon::L2_loadrub_pci_pseudo) {
- unsigned Opcode;
- if (Opc == Hexagon::L2_loadrd_pci_pseudo)
- Opcode = Hexagon::L2_loadrd_pci;
- else if (Opc == Hexagon::L2_loadri_pci_pseudo)
- Opcode = Hexagon::L2_loadri_pci;
- else if (Opc == Hexagon::L2_loadrh_pci_pseudo)
- Opcode = Hexagon::L2_loadrh_pci;
- else if (Opc == Hexagon::L2_loadruh_pci_pseudo)
- Opcode = Hexagon::L2_loadruh_pci;
- else if (Opc == Hexagon::L2_loadrb_pci_pseudo)
- Opcode = Hexagon::L2_loadrb_pci;
- else if (Opc == Hexagon::L2_loadrub_pci_pseudo)
- Opcode = Hexagon::L2_loadrub_pci;
- else
- llvm_unreachable("wrong Opc");
-
- MachineOperand &Op0 = MI->getOperand(0);
- MachineOperand &Op1 = MI->getOperand(1);
- MachineOperand &Op2 = MI->getOperand(2);
- MachineOperand &Op4 = MI->getOperand(4); // Modifier value.
- MachineOperand &Op5 = MI->getOperand(5);
- // Emit a "C6 = Rn, C6 is the control register for M0".
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_tfrrcr),
- Hexagon::C6)->addOperand(Op4);
- // Replace the pseude circ_ldd by the real circ_ldd.
- MachineInstr *NewMI = BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Opcode));
- NewMI->addOperand(Op1);
- NewMI->addOperand(Op0);
- NewMI->addOperand(Op2);
- NewMI->addOperand(Op5);
- NewMI->addOperand(MachineOperand::CreateReg(Hexagon::M0,
- false, /*isDef*/
- false, /*isImpl*/
- true /*isKill*/));
- MII = MBB->erase(MI);
- --MII;
- } else if (Opc == Hexagon::L2_loadrd_pbr_pseudo ||
- Opc == Hexagon::L2_loadri_pbr_pseudo ||
- Opc == Hexagon::L2_loadrh_pbr_pseudo ||
- Opc == Hexagon::L2_loadruh_pbr_pseudo||
- Opc == Hexagon::L2_loadrb_pbr_pseudo ||
- Opc == Hexagon::L2_loadrub_pbr_pseudo) {
- unsigned Opcode;
- if (Opc == Hexagon::L2_loadrd_pbr_pseudo)
- Opcode = Hexagon::L2_loadrd_pbr;
- else if (Opc == Hexagon::L2_loadri_pbr_pseudo)
- Opcode = Hexagon::L2_loadri_pbr;
- else if (Opc == Hexagon::L2_loadrh_pbr_pseudo)
- Opcode = Hexagon::L2_loadrh_pbr;
- else if (Opc == Hexagon::L2_loadruh_pbr_pseudo)
- Opcode = Hexagon::L2_loadruh_pbr;
- else if (Opc == Hexagon::L2_loadrb_pbr_pseudo)
- Opcode = Hexagon::L2_loadrb_pbr;
- else if (Opc == Hexagon::L2_loadrub_pbr_pseudo)
- Opcode = Hexagon::L2_loadrub_pbr;
- else
- llvm_unreachable("wrong Opc");
- MachineOperand &Op0 = MI->getOperand(0);
- MachineOperand &Op1 = MI->getOperand(1);
- MachineOperand &Op2 = MI->getOperand(2);
- MachineOperand &Op4 = MI->getOperand(4); // Modifier value.
- // Emit a "C6 = Rn, C6 is the control register for M0".
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_tfrrcr),
- Hexagon::C6)->addOperand(Op4);
- // Replace the pseudo brev_ldd by the real brev_ldd.
- MachineInstr *NewMI = BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Opcode));
- NewMI->addOperand(Op1);
- NewMI->addOperand(Op0);
- NewMI->addOperand(Op2);
- NewMI->addOperand(MachineOperand::CreateReg(Hexagon::M0,
- false, /*isDef*/
- false, /*isImpl*/
- true /*isKill*/));
- MII = MBB->erase(MI);
- --MII;
- } else if (Opc == Hexagon::S2_storerd_pbr_pseudo ||
- Opc == Hexagon::S2_storeri_pbr_pseudo ||
- Opc == Hexagon::S2_storerh_pbr_pseudo ||
- Opc == Hexagon::S2_storerb_pbr_pseudo ||
- Opc == Hexagon::S2_storerf_pbr_pseudo) {
- unsigned Opcode;
- if (Opc == Hexagon::S2_storerd_pbr_pseudo)
- Opcode = Hexagon::S2_storerd_pbr;
- else if (Opc == Hexagon::S2_storeri_pbr_pseudo)
- Opcode = Hexagon::S2_storeri_pbr;
- else if (Opc == Hexagon::S2_storerh_pbr_pseudo)
- Opcode = Hexagon::S2_storerh_pbr;
- else if (Opc == Hexagon::S2_storerf_pbr_pseudo)
- Opcode = Hexagon::S2_storerf_pbr;
- else if (Opc == Hexagon::S2_storerb_pbr_pseudo)
- Opcode = Hexagon::S2_storerb_pbr;
- else
- llvm_unreachable("wrong Opc");
- MachineOperand &Op0 = MI->getOperand(0);
- MachineOperand &Op1 = MI->getOperand(1);
- MachineOperand &Op2 = MI->getOperand(2);
- MachineOperand &Op3 = MI->getOperand(3); // Modifier value.
- // Emit a "C6 = Rn, C6 is the control register for M0".
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_tfrrcr),
- Hexagon::C6)->addOperand(Op3);
- // Replace the pseudo brev_ldd by the real brev_ldd.
- MachineInstr *NewMI = BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Opcode));
- NewMI->addOperand(Op0);
- NewMI->addOperand(Op1);
- NewMI->addOperand(MachineOperand::CreateReg(Hexagon::M0,
- false, /*isDef*/
- false, /*isImpl*/
- true /*isKill*/));
- NewMI->addOperand(Op2);
- MII = MBB->erase(MI);
- --MII;
- } else if (Opc == Hexagon::STriw_pred) {
- // STriw_pred [R30], ofst, SrcReg;
- unsigned FP = MI->getOperand(0).getReg();
- assert(FP == QST.getRegisterInfo()->getFrameRegister() &&
- "Not a Frame Pointer, Nor a Spill Slot");
- assert(MI->getOperand(1).isImm() && "Not an offset");
- int Offset = MI->getOperand(1).getImm();
- int SrcReg = MI->getOperand(2).getReg();
- assert(Hexagon::PredRegsRegClass.contains(SrcReg) &&
- "Not a predicate register");
- if (!TII->isValidOffset(Hexagon::S2_storeri_io, Offset)) {
- if (!TII->isValidOffset(Hexagon::A2_addi, Offset)) {
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::CONST32_Int_Real),
- HEXAGON_RESERVED_REG_1).addImm(Offset);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_add),
- HEXAGON_RESERVED_REG_1)
- .addReg(FP).addReg(HEXAGON_RESERVED_REG_1);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrpr),
- HEXAGON_RESERVED_REG_2).addReg(SrcReg);
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::S2_storeri_io))
- .addReg(HEXAGON_RESERVED_REG_1)
- .addImm(0).addReg(HEXAGON_RESERVED_REG_2);
- } else {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_addi),
- HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrpr),
- HEXAGON_RESERVED_REG_2).addReg(SrcReg);
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::S2_storeri_io))
- .addReg(HEXAGON_RESERVED_REG_1)
- .addImm(0)
- .addReg(HEXAGON_RESERVED_REG_2);
- }
- } else {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrpr),
- HEXAGON_RESERVED_REG_2).addReg(SrcReg);
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::S2_storeri_io)).
- addReg(FP).addImm(Offset).addReg(HEXAGON_RESERVED_REG_2);
- }
- MII = MBB->erase(MI);
- --MII;
- } else if (Opc == Hexagon::LDriw_pred) {
- // DstReg = LDriw_pred [R30], ofst.
- int DstReg = MI->getOperand(0).getReg();
- assert(Hexagon::PredRegsRegClass.contains(DstReg) &&
- "Not a predicate register");
- unsigned FP = MI->getOperand(1).getReg();
- assert(FP == QST.getRegisterInfo()->getFrameRegister() &&
- "Not a Frame Pointer, Nor a Spill Slot");
- assert(MI->getOperand(2).isImm() && "Not an offset");
- int Offset = MI->getOperand(2).getImm();
- if (!TII->isValidOffset(Hexagon::L2_loadri_io, Offset)) {
- if (!TII->isValidOffset(Hexagon::A2_addi, Offset)) {
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::CONST32_Int_Real),
- HEXAGON_RESERVED_REG_1).addImm(Offset);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_add),
- HEXAGON_RESERVED_REG_1)
- .addReg(FP)
- .addReg(HEXAGON_RESERVED_REG_1);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::L2_loadri_io),
- HEXAGON_RESERVED_REG_2)
- .addReg(HEXAGON_RESERVED_REG_1)
- .addImm(0);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrrp),
- DstReg).addReg(HEXAGON_RESERVED_REG_2);
- } else {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::A2_addi),
- HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::L2_loadri_io),
- HEXAGON_RESERVED_REG_2)
- .addReg(HEXAGON_RESERVED_REG_1)
- .addImm(0);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrrp),
- DstReg).addReg(HEXAGON_RESERVED_REG_2);
- }
- } else {
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::L2_loadri_io),
- HEXAGON_RESERVED_REG_2).addReg(FP).addImm(Offset);
- BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::C2_tfrrp),
- DstReg).addReg(HEXAGON_RESERVED_REG_2);
- }
- MII = MBB->erase(MI);
- --MII;
- }
- }
- }
-
- return true;
-}
-
-}
-
-//===----------------------------------------------------------------------===//
-// Public Constructor Functions
-//===----------------------------------------------------------------------===//
-
-static void initializePassOnce(PassRegistry &Registry) {
- const char *Name = "Hexagon Expand Predicate Spill Code";
- PassInfo *PI = new PassInfo(Name, "hexagon-spill-pred",
- &HexagonExpandPredSpillCode::ID,
- nullptr, false, false);
- Registry.registerPass(*PI, true);
-}
-
-void llvm::initializeHexagonExpandPredSpillCodePass(PassRegistry &Registry) {
- CALL_ONCE_INITIALIZATION(initializePassOnce)
-}
-
-FunctionPass*
-llvm::createHexagonExpandPredSpillCode() {
- return new HexagonExpandPredSpillCode();
-}
diff --git a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
index d0c7f9c8960f..3de817cc8fb6 100644
--- a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
+++ b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
@@ -45,6 +45,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "Hexagon Hardware Loop Fixup";
}
@@ -77,14 +82,16 @@ FunctionPass *llvm::createHexagonFixupHwLoops() {
}
/// \brief Returns true if the instruction is a hardware loop instruction.
-static bool isHardwareLoop(const MachineInstr *MI) {
- return MI->getOpcode() == Hexagon::J2_loop0r ||
- MI->getOpcode() == Hexagon::J2_loop0i ||
- MI->getOpcode() == Hexagon::J2_loop1r ||
- MI->getOpcode() == Hexagon::J2_loop1i;
+static bool isHardwareLoop(const MachineInstr &MI) {
+ return MI.getOpcode() == Hexagon::J2_loop0r ||
+ MI.getOpcode() == Hexagon::J2_loop0i ||
+ MI.getOpcode() == Hexagon::J2_loop1r ||
+ MI.getOpcode() == Hexagon::J2_loop1i;
}
bool HexagonFixupHwLoops::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
return fixupLoopInstrs(MF);
}
@@ -123,7 +130,6 @@ bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) {
// Second pass - check each loop instruction to see if it needs to be
// converted.
- InstOffset = 0;
bool Changed = false;
for (MachineBasicBlock &MBB : MF) {
InstOffset = BlockToInstOffset[&MBB];
@@ -137,7 +143,7 @@ bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) {
++MII;
continue;
}
- if (isHardwareLoop(MII)) {
+ if (isHardwareLoop(*MII)) {
assert(MII->getOperand(0).isMBB() &&
"Expect a basic block as loop operand");
int diff = InstOffset - BlockToInstOffset[MII->getOperand(0).getMBB()];
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 7a52a1c9eaec..25402147bf53 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -10,8 +10,8 @@
#define DEBUG_TYPE "hexagon-pei"
+#include "HexagonBlockRanges.h"
#include "HexagonFrameLowering.h"
-#include "Hexagon.h"
#include "HexagonInstrInfo.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonRegisterInfo.h"
@@ -19,12 +19,11 @@
#include "HexagonTargetMachine.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachinePostDominators.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -126,8 +125,7 @@ using namespace llvm;
static cl::opt<bool> DisableDeallocRet("disable-hexagon-dealloc-ret",
cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"));
-
-static cl::opt<int> NumberScavengerSlots("number-scavenger-slots",
+static cl::opt<unsigned> NumberScavengerSlots("number-scavenger-slots",
cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2),
cl::ZeroOrMore);
@@ -139,6 +137,10 @@ static cl::opt<int> SpillFuncThresholdOs("spill-func-threshold-Os",
cl::Hidden, cl::desc("Specify Os spill func threshold"),
cl::init(1), cl::ZeroOrMore);
+static cl::opt<bool> EnableStackOVFSanitizer("enable-stackovf-sanitizer",
+ cl::Hidden, cl::desc("Enable runtime checks for stack overflow."),
+ cl::init(false), cl::ZeroOrMore);
+
static cl::opt<bool> EnableShrinkWrapping("hexagon-shrink-frame",
cl::init(true), cl::Hidden, cl::ZeroOrMore,
cl::desc("Enable stack frame shrink wrapping"));
@@ -150,6 +152,9 @@ static cl::opt<unsigned> ShrinkLimit("shrink-frame-limit", cl::init(UINT_MAX),
static cl::opt<bool> UseAllocframe("use-allocframe", cl::init(true),
cl::Hidden, cl::desc("Use allocframe more conservatively"));
+static cl::opt<bool> OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden,
+ cl::init(true), cl::desc("Optimize spill slots"));
+
namespace llvm {
void initializeHexagonCallFrameInformationPass(PassRegistry&);
@@ -165,6 +170,10 @@ namespace {
initializeHexagonCallFrameInformationPass(PR);
}
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
};
char HexagonCallFrameInformation::ID = 0;
@@ -213,8 +222,8 @@ namespace {
/// Returns the callee saved register with the largest id in the vector.
unsigned getMaxCalleeSavedReg(const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo &TRI) {
- assert(Hexagon::R1 > 0 &&
- "Assume physical registers are encoded as positive integers");
+ static_assert(Hexagon::R1 > 0,
+ "Assume physical registers are encoded as positive integers");
if (CSI.empty())
return 0;
@@ -229,7 +238,8 @@ namespace {
/// Checks if the basic block contains any instruction that needs a stack
/// frame to be already in place.
- bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR) {
+ bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR,
+ const HexagonRegisterInfo &HRI) {
for (auto &I : MBB) {
const MachineInstr *MI = &I;
if (MI->isCall())
@@ -258,8 +268,9 @@ namespace {
// a stack slot.
if (TargetRegisterInfo::isVirtualRegister(R))
return true;
- if (CSR[R])
- return true;
+ for (MCSubRegIterator S(R, &HRI, true); S.isValid(); ++S)
+ if (CSR[*S])
+ return true;
}
}
return false;
@@ -280,6 +291,40 @@ namespace {
return true;
return false;
}
+
+ /// Returns the "return" instruction from this block, or nullptr if there
+ /// isn't any.
+ MachineInstr *getReturn(MachineBasicBlock &MBB) {
+ for (auto &I : MBB)
+ if (I.isReturn())
+ return &I;
+ return nullptr;
+ }
+
+ bool isRestoreCall(unsigned Opc) {
+ switch (Opc) {
+ case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
+ case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
+ case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
+ case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
+ return true;
+ }
+ return false;
+ }
+
+ inline bool isOptNone(const MachineFunction &MF) {
+ return MF.getFunction()->hasFnAttribute(Attribute::OptimizeNone) ||
+ MF.getTarget().getOptLevel() == CodeGenOpt::None;
+ }
+
+ inline bool isOptSize(const MachineFunction &MF) {
+ const Function &F = *MF.getFunction();
+ return F.optForSize() && !F.optForMinSize();
+ }
+
+ inline bool isMinSize(const MachineFunction &MF) {
+ return MF.getFunction()->optForMinSize();
+ }
}
@@ -330,10 +375,11 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
SmallVector<MachineBasicBlock*,16> SFBlocks;
BitVector CSR(Hexagon::NUM_TARGET_REGS);
for (const MCPhysReg *P = HRI.getCalleeSavedRegs(&MF); *P; ++P)
- CSR[*P] = true;
+ for (MCSubRegIterator S(*P, &HRI, true); S.isValid(); ++S)
+ CSR[*S] = true;
for (auto &I : MF)
- if (needsStackFrame(I, CSR))
+ if (needsStackFrame(I, CSR, HRI))
SFBlocks.push_back(&I);
DEBUG({
@@ -386,6 +432,7 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
EpilogB = PDomB;
}
+
/// Perform most of the PEI work here:
/// - saving/restoring of the callee-saved registers,
/// - stack frame creation and destruction.
@@ -396,7 +443,6 @@ void HexagonFrameLowering::emitPrologue(MachineFunction &MF,
auto &HST = static_cast<const HexagonSubtarget&>(MF.getSubtarget());
auto &HRI = *HST.getRegisterInfo();
- assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
MachineFrameInfo *MFI = MF.getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
@@ -404,8 +450,9 @@ void HexagonFrameLowering::emitPrologue(MachineFunction &MF,
if (EnableShrinkWrapping)
findShrunkPrologEpilog(MF, PrologB, EpilogB);
- insertCSRSpillsInBlock(*PrologB, CSI, HRI);
- insertPrologueInBlock(*PrologB);
+ bool PrologueStubs = false;
+ insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
+ insertPrologueInBlock(*PrologB, PrologueStubs);
if (EpilogB) {
insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
@@ -418,11 +465,34 @@ void HexagonFrameLowering::emitPrologue(MachineFunction &MF,
for (auto &B : MF)
if (B.isReturnBlock())
insertEpilogueInBlock(B);
+
+ for (auto &B : MF) {
+ if (B.empty())
+ continue;
+ MachineInstr *RetI = getReturn(B);
+ if (!RetI || isRestoreCall(RetI->getOpcode()))
+ continue;
+ for (auto &R : CSI)
+ RetI->addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
+ }
+ }
+
+ if (EpilogB) {
+ // If there is an epilog block, it may not have a return instruction.
+ // In such case, we need to add the callee-saved registers as live-ins
+ // in all blocks on all paths from the epilog to any return block.
+ unsigned MaxBN = 0;
+ for (auto &B : MF)
+ if (B.getNumber() >= 0)
+ MaxBN = std::max(MaxBN, unsigned(B.getNumber()));
+ BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
+ updateExitPaths(*EpilogB, EpilogB, DoneT, DoneF, Path);
}
}
-void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB) const {
+void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB,
+ bool PrologueStubs) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
auto &HST = MF.getSubtarget<HexagonSubtarget>();
@@ -436,10 +506,10 @@ void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB) const {
// Get the number of bytes to allocate from the FrameInfo.
unsigned FrameSize = MFI->getStackSize();
// Round up the max call frame size to the max alignment on the stack.
- unsigned MaxCFA = RoundUpToAlignment(MFI->getMaxCallFrameSize(), MaxAlign);
+ unsigned MaxCFA = alignTo(MFI->getMaxCallFrameSize(), MaxAlign);
MFI->setMaxCallFrameSize(MaxCFA);
- FrameSize = MaxCFA + RoundUpToAlignment(FrameSize, MaxAlign);
+ FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
MFI->setStackSize(FrameSize);
bool AlignStack = (MaxAlign > getStackAlignment());
@@ -497,6 +567,13 @@ void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB) const {
.addReg(SP)
.addImm(-int64_t(MaxAlign));
}
+
+ // If the stack-checking is enabled, and we spilled the callee-saved
+ // registers inline (i.e. did not use a spill function), then call
+ // the stack checker directly.
+ if (EnableStackOVFSanitizer && !PrologueStubs)
+ BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::CALLstk))
+ .addExternalSymbol("__runtime_stack_check");
}
void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
@@ -509,13 +586,7 @@ void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
auto &HRI = *HST.getRegisterInfo();
unsigned SP = HRI.getStackRegister();
- MachineInstr *RetI = nullptr;
- for (auto &I : MBB) {
- if (!I.isReturn())
- continue;
- RetI = &I;
- break;
- }
+ MachineInstr *RetI = getReturn(MBB);
unsigned RetOpc = RetI ? RetI->getOpcode() : 0;
MachineBasicBlock::iterator InsertPt = MBB.getFirstTerminator();
@@ -536,7 +607,8 @@ void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
// Check for RESTORE_DEALLOC_RET* tail call. Don't emit an extra dealloc-
// frame instruction if we encounter it.
- if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4) {
+ if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
+ RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC) {
MachineBasicBlock::iterator It = RetI;
++It;
// Delete all instructions after the RESTORE (except labels).
@@ -556,7 +628,8 @@ void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
if (!MBB.empty() && InsertPt != MBB.begin()) {
MachineBasicBlock::iterator PrevIt = std::prev(InsertPt);
unsigned COpc = PrevIt->getOpcode();
- if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4)
+ if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
+ COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC)
NeedsDeallocframe = false;
}
@@ -572,11 +645,56 @@ void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
unsigned NewOpc = Hexagon::L4_return;
MachineInstr *NewI = BuildMI(MBB, RetI, DL, HII.get(NewOpc));
// Transfer the function live-out registers.
- NewI->copyImplicitOps(MF, RetI);
+ NewI->copyImplicitOps(MF, *RetI);
MBB.erase(RetI);
}
+bool HexagonFrameLowering::updateExitPaths(MachineBasicBlock &MBB,
+ MachineBasicBlock *RestoreB, BitVector &DoneT, BitVector &DoneF,
+ BitVector &Path) const {
+ assert(MBB.getNumber() >= 0);
+ unsigned BN = MBB.getNumber();
+ if (Path[BN] || DoneF[BN])
+ return false;
+ if (DoneT[BN])
+ return true;
+
+ auto &CSI = MBB.getParent()->getFrameInfo()->getCalleeSavedInfo();
+
+ Path[BN] = true;
+ bool ReachedExit = false;
+ for (auto &SB : MBB.successors())
+ ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
+
+ if (!MBB.empty() && MBB.back().isReturn()) {
+ // Add implicit uses of all callee-saved registers to the reached
+ // return instructions. This is to prevent the anti-dependency breaker
+ // from renaming these registers.
+ MachineInstr &RetI = MBB.back();
+ if (!isRestoreCall(RetI.getOpcode()))
+ for (auto &R : CSI)
+ RetI.addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
+ ReachedExit = true;
+ }
+
+ // We don't want to add unnecessary live-ins to the restore block: since
+ // the callee-saved registers are being defined in it, the entry of the
+ // restore block cannot be on the path from the definitions to any exit.
+ if (ReachedExit && &MBB != RestoreB) {
+ for (auto &R : CSI)
+ if (!MBB.isLiveIn(R.getReg()))
+ MBB.addLiveIn(R.getReg());
+ DoneT[BN] = true;
+ }
+ if (!ReachedExit)
+ DoneF[BN] = true;
+
+ Path[BN] = false;
+ return ReachedExit;
+}
+
+
namespace {
bool IsAllocFrame(MachineBasicBlock::const_iterator It) {
if (!It->isBundle())
@@ -611,7 +729,7 @@ void HexagonFrameLowering::insertCFIInstructions(MachineFunction &MF) const {
void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator At) const {
MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
auto &HST = MF.getSubtarget<HexagonSubtarget>();
auto &HII = *HST.getInstrInfo();
@@ -624,8 +742,9 @@ void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
+ bool HasFP = hasFP(MF);
- if (hasFP(MF)) {
+ if (HasFP) {
unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(), true);
unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(), true);
@@ -663,7 +782,7 @@ void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
Hexagon::NoRegister
};
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
for (unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) {
unsigned Reg = RegsToMove[i];
@@ -674,9 +793,22 @@ void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
if (F == CSI.end())
continue;
+ int64_t Offset;
+ if (HasFP) {
+ // If the function has a frame pointer (i.e. has an allocframe),
+ // then the CFA has been defined in terms of FP. Any offsets in
+ // the following CFI instructions have to be defined relative
+ // to FP, which points to the bottom of the stack frame.
+ // The function getFrameIndexReference can still choose to use SP
+ // for the offset calculation, so we cannot simply call it here.
+ // Instead, get the offset (relative to the FP) directly.
+ Offset = MFI.getObjectOffset(F->getFrameIdx());
+ } else {
+ unsigned FrameReg;
+ Offset = getFrameIndexReference(MF, F->getFrameIdx(), FrameReg);
+ }
// Subtract 8 to make room for R30 and R31, which are added above.
- unsigned FrameReg;
- int64_t Offset = getFrameIndexReference(MF, F->getFrameIdx(), FrameReg) - 8;
+ Offset -= 8;
if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
unsigned DwarfReg = HRI.getDwarfRegNum(Reg, true);
@@ -734,7 +866,7 @@ bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
return true;
if (MFI.getStackSize() > 0) {
- if (UseAllocframe)
+ if (EnableStackOVFSanitizer || UseAllocframe)
return true;
}
@@ -752,8 +884,8 @@ enum SpillKind {
SK_FromMemTailcall
};
-static const char *
-getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType) {
+static const char *getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType,
+ bool Stkchk = false) {
const char * V4SpillToMemoryFunctions[] = {
"__save_r16_through_r17",
"__save_r16_through_r19",
@@ -762,6 +894,14 @@ getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType) {
"__save_r16_through_r25",
"__save_r16_through_r27" };
+ const char * V4SpillToMemoryStkchkFunctions[] = {
+ "__save_r16_through_r17_stkchk",
+ "__save_r16_through_r19_stkchk",
+ "__save_r16_through_r21_stkchk",
+ "__save_r16_through_r23_stkchk",
+ "__save_r16_through_r25_stkchk",
+ "__save_r16_through_r27_stkchk" };
+
const char * V4SpillFromMemoryFunctions[] = {
"__restore_r16_through_r17_and_deallocframe",
"__restore_r16_through_r19_and_deallocframe",
@@ -783,7 +923,8 @@ getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType) {
switch(SpillType) {
case SK_ToMem:
- SpillFunc = V4SpillToMemoryFunctions;
+ SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
+ : V4SpillToMemoryFunctions;
break;
case SK_FromMem:
SpillFunc = V4SpillFromMemoryFunctions;
@@ -814,32 +955,20 @@ getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType) {
return 0;
}
-/// Adds all callee-saved registers up to MaxReg to the instruction.
-static void addCalleeSaveRegistersAsImpOperand(MachineInstr *Inst,
- unsigned MaxReg, bool IsDef) {
- // Add the callee-saved registers as implicit uses.
- for (unsigned R = Hexagon::R16; R <= MaxReg; ++R) {
- MachineOperand ImpUse = MachineOperand::CreateReg(R, IsDef, true);
- Inst->addOperand(ImpUse);
- }
-}
-
int HexagonFrameLowering::getFrameIndexReference(const MachineFunction &MF,
int FI, unsigned &FrameReg) const {
auto &MFI = *MF.getFrameInfo();
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
- // Large parts of this code are shared with HRI::eliminateFrameIndex.
int Offset = MFI.getObjectOffset(FI);
bool HasAlloca = MFI.hasVarSizedObjects();
bool HasExtraAlign = HRI.needsStackRealignment(MF);
bool NoOpt = MF.getTarget().getOptLevel() == CodeGenOpt::None;
unsigned SP = HRI.getStackRegister(), FP = HRI.getFrameRegister();
- unsigned AP = 0;
- if (const MachineInstr *AI = getAlignaInstr(MF))
- AP = AI->getOperand(0).getReg();
+ auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
+ unsigned AP = HMFI.getStackAlignBasePhysReg();
unsigned FrameSize = MFI.getStackSize();
bool UseFP = false, UseAP = false; // Default: use SP (except at -O0).
@@ -912,24 +1041,40 @@ int HexagonFrameLowering::getFrameIndexReference(const MachineFunction &MF,
bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
- const CSIVect &CSI, const HexagonRegisterInfo &HRI) const {
+ const CSIVect &CSI, const HexagonRegisterInfo &HRI,
+ bool &PrologueStubs) const {
if (CSI.empty())
return true;
MachineBasicBlock::iterator MI = MBB.begin();
+ PrologueStubs = false;
MachineFunction &MF = *MBB.getParent();
auto &HII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
if (useSpillFunction(MF, CSI)) {
+ PrologueStubs = true;
unsigned MaxReg = getMaxCalleeSavedReg(CSI, HRI);
- const char *SpillFun = getSpillFunctionFor(MaxReg, SK_ToMem);
+ bool StkOvrFlowEnabled = EnableStackOVFSanitizer;
+ const char *SpillFun = getSpillFunctionFor(MaxReg, SK_ToMem,
+ StkOvrFlowEnabled);
+ auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
+ bool IsPIC = HTM.isPositionIndependent();
+
// Call spill function.
DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc();
+ unsigned SpillOpc;
+ if (StkOvrFlowEnabled)
+ SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
+ : Hexagon::SAVE_REGISTERS_CALL_V4STK;
+ else
+ SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
+ : Hexagon::SAVE_REGISTERS_CALL_V4;
+
MachineInstr *SaveRegsCall =
- BuildMI(MBB, MI, DL, HII.get(Hexagon::SAVE_REGISTERS_CALL_V4))
+ BuildMI(MBB, MI, DL, HII.get(SpillOpc))
.addExternalSymbol(SpillFun);
// Add callee-saved registers as use.
- addCalleeSaveRegistersAsImpOperand(SaveRegsCall, MaxReg, false);
+ addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI, false, true);
// Add live in registers.
for (unsigned I = 0; I < CSI.size(); ++I)
MBB.addLiveIn(CSI[I].getReg());
@@ -966,6 +1111,8 @@ bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
unsigned MaxR = getMaxCalleeSavedReg(CSI, HRI);
SpillKind Kind = HasTC ? SK_FromMemTailcall : SK_FromMem;
const char *RestoreFn = getSpillFunctionFor(MaxR, Kind);
+ auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
+ bool IsPIC = HTM.isPositionIndependent();
// Call spill function.
DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc()
@@ -973,20 +1120,22 @@ bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
MachineInstr *DeallocCall = nullptr;
if (HasTC) {
- unsigned ROpc = Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
+ unsigned ROpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
+ : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
DeallocCall = BuildMI(MBB, MI, DL, HII.get(ROpc))
.addExternalSymbol(RestoreFn);
} else {
// The block has a return.
MachineBasicBlock::iterator It = MBB.getFirstTerminator();
assert(It->isReturn() && std::next(It) == MBB.end());
- unsigned ROpc = Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
+ unsigned ROpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
+ : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
DeallocCall = BuildMI(MBB, It, DL, HII.get(ROpc))
.addExternalSymbol(RestoreFn);
// Transfer the function live-out registers.
- DeallocCall->copyImplicitOps(MF, It);
+ DeallocCall->copyImplicitOps(MF, *It);
}
- addCalleeSaveRegistersAsImpOperand(DeallocCall, MaxR, true);
+ addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI, true, false);
return true;
}
@@ -996,18 +1145,19 @@ bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
int FI = CSI[i].getFrameIdx();
HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI);
}
+
return true;
}
-
-void HexagonFrameLowering::eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
+MachineBasicBlock::iterator HexagonFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
MachineInstr &MI = *I;
unsigned Opc = MI.getOpcode();
(void)Opc; // Silence compiler warning.
assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&
"Cannot handle this call frame pseudo instruction");
- MBB.erase(I);
+ return MBB.erase(I);
}
@@ -1025,14 +1175,16 @@ void HexagonFrameLowering::processFunctionBeforeFrameFinalized(
return;
unsigned LFS = MFI->getLocalFrameSize();
- int Offset = -LFS;
for (int i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (!MFI->isSpillSlotObjectIndex(i) || MFI->isDeadObjectIndex(i))
continue;
- int S = MFI->getObjectSize(i);
- LFS += S;
- Offset -= S;
- MFI->mapLocalFrameObject(i, Offset);
+ unsigned S = MFI->getObjectSize(i);
+ // Reduce the alignment to at most 8. This will require unaligned vector
+ // stores if they happen here.
+ unsigned A = std::max(MFI->getObjectAlignment(i), 8U);
+ MFI->setObjectAlignment(i, 8);
+ LFS = alignTo(LFS+S, A);
+ MFI->mapLocalFrameObject(i, -LFS);
}
MFI->setLocalFrameSize(LFS);
@@ -1041,142 +1193,35 @@ void HexagonFrameLowering::processFunctionBeforeFrameFinalized(
if (A == 0)
MFI->setLocalFrameMaxAlign(8);
MFI->setUseLocalStackAllocationBlock(true);
+
+ // Set the physical aligned-stack base address register.
+ unsigned AP = 0;
+ if (const MachineInstr *AI = getAlignaInstr(MF))
+ AP = AI->getOperand(0).getReg();
+ auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
+ HMFI.setStackAlignBasePhysReg(AP);
}
-/// Returns true if there is no caller saved registers available.
+/// Returns true if there are no caller-saved registers available in class RC.
static bool needToReserveScavengingSpillSlots(MachineFunction &MF,
- const HexagonRegisterInfo &HRI) {
+ const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC) {
MachineRegisterInfo &MRI = MF.getRegInfo();
- const MCPhysReg *CallerSavedRegs = HRI.getCallerSavedRegs(&MF);
- // Check for an unused caller-saved register.
- for ( ; *CallerSavedRegs; ++CallerSavedRegs) {
- MCPhysReg FreeReg = *CallerSavedRegs;
- if (!MRI.reg_nodbg_empty(FreeReg))
- continue;
-
- // Check aliased register usage.
- bool IsCurrentRegUsed = false;
- for (MCRegAliasIterator AI(FreeReg, &HRI, false); AI.isValid(); ++AI)
- if (!MRI.reg_nodbg_empty(*AI)) {
- IsCurrentRegUsed = true;
- break;
- }
- if (IsCurrentRegUsed)
- continue;
- // Neither directly used nor used through an aliased register.
+ auto IsUsed = [&HRI,&MRI] (unsigned Reg) -> bool {
+ for (MCRegAliasIterator AI(Reg, &HRI, true); AI.isValid(); ++AI)
+ if (MRI.isPhysRegUsed(*AI))
+ return true;
return false;
- }
- // All caller-saved registers are used.
- return true;
-}
-
-
-/// Replaces the predicate spill code pseudo instructions by valid instructions.
-bool HexagonFrameLowering::replacePredRegPseudoSpillCode(MachineFunction &MF)
- const {
- auto &HST = static_cast<const HexagonSubtarget&>(MF.getSubtarget());
- auto &HII = *HST.getInstrInfo();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- bool HasReplacedPseudoInst = false;
- // Replace predicate spill pseudo instructions by real code.
- // Loop over all of the basic blocks.
- for (MachineFunction::iterator MBBb = MF.begin(), MBBe = MF.end();
- MBBb != MBBe; ++MBBb) {
- MachineBasicBlock *MBB = &*MBBb;
- // Traverse the basic block.
- MachineBasicBlock::iterator NextII;
- for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
- MII = NextII) {
- MachineInstr *MI = MII;
- NextII = std::next(MII);
- int Opc = MI->getOpcode();
- if (Opc == Hexagon::STriw_pred) {
- HasReplacedPseudoInst = true;
- // STriw_pred FI, 0, SrcReg;
- unsigned VirtReg = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
- unsigned SrcReg = MI->getOperand(2).getReg();
- bool IsOrigSrcRegKilled = MI->getOperand(2).isKill();
-
- assert(MI->getOperand(0).isFI() && "Expect a frame index");
- assert(Hexagon::PredRegsRegClass.contains(SrcReg) &&
- "Not a predicate register");
-
- // Insert transfer to general purpose register.
- // VirtReg = C2_tfrpr SrcPredReg
- BuildMI(*MBB, MII, MI->getDebugLoc(), HII.get(Hexagon::C2_tfrpr),
- VirtReg).addReg(SrcReg, getKillRegState(IsOrigSrcRegKilled));
-
- // Change instruction to S2_storeri_io.
- // S2_storeri_io FI, 0, VirtReg
- MI->setDesc(HII.get(Hexagon::S2_storeri_io));
- MI->getOperand(2).setReg(VirtReg);
- MI->getOperand(2).setIsKill();
-
- } else if (Opc == Hexagon::LDriw_pred) {
- // DstReg = LDriw_pred FI, 0
- MachineOperand &M0 = MI->getOperand(0);
- if (M0.isDead()) {
- MBB->erase(MII);
- continue;
- }
-
- unsigned VirtReg = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
- unsigned DestReg = MI->getOperand(0).getReg();
-
- assert(MI->getOperand(1).isFI() && "Expect a frame index");
- assert(Hexagon::PredRegsRegClass.contains(DestReg) &&
- "Not a predicate register");
-
- // Change instruction to L2_loadri_io.
- // VirtReg = L2_loadri_io FI, 0
- MI->setDesc(HII.get(Hexagon::L2_loadri_io));
- MI->getOperand(0).setReg(VirtReg);
-
- // Insert transfer to general purpose register.
- // DestReg = C2_tfrrp VirtReg
- const MCInstrDesc &D = HII.get(Hexagon::C2_tfrrp);
- BuildMI(*MBB, std::next(MII), MI->getDebugLoc(), D, DestReg)
- .addReg(VirtReg, getKillRegState(true));
- HasReplacedPseudoInst = true;
- }
- }
- }
- return HasReplacedPseudoInst;
-}
-
-
-void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF,
- BitVector &SavedRegs,
- RegScavenger *RS) const {
- TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
-
- auto &HST = static_cast<const HexagonSubtarget&>(MF.getSubtarget());
- auto &HRI = *HST.getRegisterInfo();
-
- bool HasEHReturn = MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn();
-
- // If we have a function containing __builtin_eh_return we want to spill and
- // restore all callee saved registers. Pretend that they are used.
- if (HasEHReturn) {
- for (const MCPhysReg *CSRegs = HRI.getCalleeSavedRegs(&MF); *CSRegs;
- ++CSRegs)
- SavedRegs.set(*CSRegs);
- }
+ };
- const TargetRegisterClass &RC = Hexagon::IntRegsRegClass;
+ // Check for an unused caller-saved register. Callee-saved registers
+ // have become pristine by now.
+ for (const MCPhysReg *P = HRI.getCallerSavedRegs(&MF, RC); *P; ++P)
+ if (!IsUsed(*P))
+ return false;
- // Replace predicate register pseudo spill code.
- bool HasReplacedPseudoInst = replacePredRegPseudoSpillCode(MF);
-
- // We need to reserve a a spill slot if scavenging could potentially require
- // spilling a scavenged register.
- if (HasReplacedPseudoInst && needToReserveScavengingSpillSlots(MF, HRI)) {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- for (int i=0; i < NumberScavengerSlots; i++)
- RS->addScavengingFrameIndex(
- MFI->CreateSpillStackObject(RC.getSize(), RC.getAlignment()));
- }
+ // All caller-saved registers are used.
+ return true;
}
@@ -1327,6 +1372,811 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
}
+bool HexagonFrameLowering::expandCopy(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned DstR = MI->getOperand(0).getReg();
+ unsigned SrcR = MI->getOperand(1).getReg();
+ if (!Hexagon::ModRegsRegClass.contains(DstR) ||
+ !Hexagon::ModRegsRegClass.contains(SrcR))
+ return false;
+
+ unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
+ BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR)
+ .addOperand(MI->getOperand(1));
+ BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR)
+ .addReg(TmpR, RegState::Kill);
+
+ NewRegs.push_back(TmpR);
+ B.erase(It);
+ return true;
+}
+
+bool HexagonFrameLowering::expandStoreInt(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Opc = MI->getOpcode();
+ unsigned SrcR = MI->getOperand(2).getReg();
+ bool IsKill = MI->getOperand(2).isKill();
+
+ assert(MI->getOperand(0).isFI() && "Expect a frame index");
+ int FI = MI->getOperand(0).getIndex();
+
+ // TmpR = C2_tfrpr SrcR if SrcR is a predicate register
+ // TmpR = A2_tfrcrr SrcR if SrcR is a modifier register
+ unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
+ unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
+ : Hexagon::A2_tfrcrr;
+ BuildMI(B, It, DL, HII.get(TfrOpc), TmpR)
+ .addReg(SrcR, getKillRegState(IsKill));
+
+ // S2_storeri_io FI, 0, TmpR
+ BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(TmpR, RegState::Kill)
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ NewRegs.push_back(TmpR);
+ B.erase(It);
+ return true;
+}
+
+bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Opc = MI->getOpcode();
+ unsigned DstR = MI->getOperand(0).getReg();
+
+ assert(MI->getOperand(1).isFI() && "Expect a frame index");
+ int FI = MI->getOperand(1).getIndex();
+
+ // TmpR = L2_loadri_io FI, 0
+ unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
+ BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ // DstR = C2_tfrrp TmpR if DstR is a predicate register
+ // DstR = A2_tfrrcr TmpR if DstR is a modifier register
+ unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
+ : Hexagon::A2_tfrrcr;
+ BuildMI(B, It, DL, HII.get(TfrOpc), DstR)
+ .addReg(TmpR, RegState::Kill);
+
+ NewRegs.push_back(TmpR);
+ B.erase(It);
+ return true;
+}
+
+
+bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ auto &HST = B.getParent()->getSubtarget<HexagonSubtarget>();
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned SrcR = MI->getOperand(2).getReg();
+ bool IsKill = MI->getOperand(2).isKill();
+
+ assert(MI->getOperand(0).isFI() && "Expect a frame index");
+ int FI = MI->getOperand(0).getIndex();
+
+ bool Is128B = HST.useHVXDblOps();
+ auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
+ : &Hexagon::VectorRegs128BRegClass;
+
+ // Insert transfer to general vector register.
+ // TmpR0 = A2_tfrsi 0x01010101
+ // TmpR1 = V6_vandqrt Qx, TmpR0
+ // store FI, 0, TmpR1
+ unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
+ unsigned TmpR1 = MRI.createVirtualRegister(RC);
+
+ BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
+ .addImm(0x01010101);
+
+ unsigned VandOpc = !Is128B ? Hexagon::V6_vandqrt : Hexagon::V6_vandqrt_128B;
+ BuildMI(B, It, DL, HII.get(VandOpc), TmpR1)
+ .addReg(SrcR, getKillRegState(IsKill))
+ .addReg(TmpR0, RegState::Kill);
+
+ auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
+ HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, HRI);
+ expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
+
+ NewRegs.push_back(TmpR0);
+ NewRegs.push_back(TmpR1);
+ B.erase(It);
+ return true;
+}
+
+bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ auto &HST = B.getParent()->getSubtarget<HexagonSubtarget>();
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned DstR = MI->getOperand(0).getReg();
+
+ assert(MI->getOperand(1).isFI() && "Expect a frame index");
+ int FI = MI->getOperand(1).getIndex();
+
+ bool Is128B = HST.useHVXDblOps();
+ auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
+ : &Hexagon::VectorRegs128BRegClass;
+
+ // TmpR0 = A2_tfrsi 0x01010101
+ // TmpR1 = load FI, 0
+ // DstR = V6_vandvrt TmpR1, TmpR0
+ unsigned TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
+ unsigned TmpR1 = MRI.createVirtualRegister(RC);
+
+ BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
+ .addImm(0x01010101);
+ auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
+ HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI);
+ expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
+
+ unsigned VandOpc = !Is128B ? Hexagon::V6_vandvrt : Hexagon::V6_vandvrt_128B;
+ BuildMI(B, It, DL, HII.get(VandOpc), DstR)
+ .addReg(TmpR1, RegState::Kill)
+ .addReg(TmpR0, RegState::Kill);
+
+ NewRegs.push_back(TmpR0);
+ NewRegs.push_back(TmpR1);
+ B.erase(It);
+ return true;
+}
+
+bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ MachineFunction &MF = *B.getParent();
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ auto &MFI = *MF.getFrameInfo();
+ auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+
+ unsigned SrcR = MI->getOperand(2).getReg();
+ unsigned SrcLo = HRI.getSubReg(SrcR, Hexagon::subreg_loreg);
+ unsigned SrcHi = HRI.getSubReg(SrcR, Hexagon::subreg_hireg);
+ bool IsKill = MI->getOperand(2).isKill();
+
+ assert(MI->getOperand(0).isFI() && "Expect a frame index");
+ int FI = MI->getOperand(0).getIndex();
+
+ bool Is128B = HST.useHVXDblOps();
+ auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
+ : &Hexagon::VectorRegs128BRegClass;
+ unsigned Size = RC->getSize();
+ unsigned NeedAlign = RC->getAlignment();
+ unsigned HasAlign = MFI.getObjectAlignment(FI);
+ unsigned StoreOpc;
+
+ // Store low part.
+ if (NeedAlign <= HasAlign)
+ StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
+ else
+ StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
+
+ BuildMI(B, It, DL, HII.get(StoreOpc))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(SrcLo, getKillRegState(IsKill))
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ // Load high part.
+ if (NeedAlign <= MinAlign(HasAlign, Size))
+ StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
+ else
+ StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
+
+ BuildMI(B, It, DL, HII.get(StoreOpc))
+ .addFrameIndex(FI)
+ .addImm(Size)
+ .addReg(SrcHi, getKillRegState(IsKill))
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ B.erase(It);
+ return true;
+}
+
+bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ MachineFunction &MF = *B.getParent();
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ auto &MFI = *MF.getFrameInfo();
+ auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+
+ unsigned DstR = MI->getOperand(0).getReg();
+ unsigned DstHi = HRI.getSubReg(DstR, Hexagon::subreg_hireg);
+ unsigned DstLo = HRI.getSubReg(DstR, Hexagon::subreg_loreg);
+
+ assert(MI->getOperand(1).isFI() && "Expect a frame index");
+ int FI = MI->getOperand(1).getIndex();
+
+ bool Is128B = HST.useHVXDblOps();
+ auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
+ : &Hexagon::VectorRegs128BRegClass;
+ unsigned Size = RC->getSize();
+ unsigned NeedAlign = RC->getAlignment();
+ unsigned HasAlign = MFI.getObjectAlignment(FI);
+ unsigned LoadOpc;
+
+ // Load low part.
+ if (NeedAlign <= HasAlign)
+ LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
+ else
+ LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
+
+ BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ // Load high part.
+ if (NeedAlign <= MinAlign(HasAlign, Size))
+ LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
+ else
+ LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
+
+ BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
+ .addFrameIndex(FI)
+ .addImm(Size)
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ B.erase(It);
+ return true;
+}
+
+bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ MachineFunction &MF = *B.getParent();
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ auto &MFI = *MF.getFrameInfo();
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+
+ unsigned SrcR = MI->getOperand(2).getReg();
+ bool IsKill = MI->getOperand(2).isKill();
+
+ assert(MI->getOperand(0).isFI() && "Expect a frame index");
+ int FI = MI->getOperand(0).getIndex();
+
+ bool Is128B = HST.useHVXDblOps();
+ auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
+ : &Hexagon::VectorRegs128BRegClass;
+
+ unsigned NeedAlign = RC->getAlignment();
+ unsigned HasAlign = MFI.getObjectAlignment(FI);
+ unsigned StoreOpc;
+
+ if (NeedAlign <= HasAlign)
+ StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
+ else
+ StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
+
+ BuildMI(B, It, DL, HII.get(StoreOpc))
+ .addFrameIndex(FI)
+ .addImm(0)
+ .addReg(SrcR, getKillRegState(IsKill))
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ B.erase(It);
+ return true;
+}
+
+bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
+ MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
+ const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
+ MachineFunction &MF = *B.getParent();
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ auto &MFI = *MF.getFrameInfo();
+ MachineInstr *MI = &*It;
+ DebugLoc DL = MI->getDebugLoc();
+
+ unsigned DstR = MI->getOperand(0).getReg();
+
+ assert(MI->getOperand(1).isFI() && "Expect a frame index");
+ int FI = MI->getOperand(1).getIndex();
+
+ bool Is128B = HST.useHVXDblOps();
+ auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
+ : &Hexagon::VectorRegs128BRegClass;
+
+ unsigned NeedAlign = RC->getAlignment();
+ unsigned HasAlign = MFI.getObjectAlignment(FI);
+ unsigned LoadOpc;
+
+ if (NeedAlign <= HasAlign)
+ LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
+ else
+ LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
+
+ BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
+ .addFrameIndex(FI)
+ .addImm(0)
+ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ B.erase(It);
+ return true;
+}
+
+
+bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
+ SmallVectorImpl<unsigned> &NewRegs) const {
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ auto &HII = *HST.getInstrInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ bool Changed = false;
+
+ for (auto &B : MF) {
+ // Traverse the basic block.
+ MachineBasicBlock::iterator NextI;
+ for (auto I = B.begin(), E = B.end(); I != E; I = NextI) {
+ MachineInstr *MI = &*I;
+ NextI = std::next(I);
+ unsigned Opc = MI->getOpcode();
+
+ switch (Opc) {
+ case TargetOpcode::COPY:
+ Changed |= expandCopy(B, I, MRI, HII, NewRegs);
+ break;
+ case Hexagon::STriw_pred:
+ case Hexagon::STriw_mod:
+ Changed |= expandStoreInt(B, I, MRI, HII, NewRegs);
+ break;
+ case Hexagon::LDriw_pred:
+ case Hexagon::LDriw_mod:
+ Changed |= expandLoadInt(B, I, MRI, HII, NewRegs);
+ break;
+ case Hexagon::STriq_pred_V6:
+ case Hexagon::STriq_pred_V6_128B:
+ Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs);
+ break;
+ case Hexagon::LDriq_pred_V6:
+ case Hexagon::LDriq_pred_V6_128B:
+ Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs);
+ break;
+ case Hexagon::LDrivv_pseudo_V6:
+ case Hexagon::LDrivv_pseudo_V6_128B:
+ Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs);
+ break;
+ case Hexagon::STrivv_pseudo_V6:
+ case Hexagon::STrivv_pseudo_V6_128B:
+ Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs);
+ break;
+ case Hexagon::STriv_pseudo_V6:
+ case Hexagon::STriv_pseudo_V6_128B:
+ Changed |= expandStoreVec(B, I, MRI, HII, NewRegs);
+ break;
+ case Hexagon::LDriv_pseudo_V6:
+ case Hexagon::LDriv_pseudo_V6_128B:
+ Changed |= expandLoadVec(B, I, MRI, HII, NewRegs);
+ break;
+ }
+ }
+ }
+
+ return Changed;
+}
+
+
+void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF,
+ BitVector &SavedRegs,
+ RegScavenger *RS) const {
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ auto &HRI = *HST.getRegisterInfo();
+
+ SavedRegs.resize(HRI.getNumRegs());
+
+ // If we have a function containing __builtin_eh_return we want to spill and
+ // restore all callee saved registers. Pretend that they are used.
+ if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn())
+ for (const MCPhysReg *R = HRI.getCalleeSavedRegs(&MF); *R; ++R)
+ SavedRegs.set(*R);
+
+ // Replace predicate register pseudo spill code.
+ SmallVector<unsigned,8> NewRegs;
+ expandSpillMacros(MF, NewRegs);
+ if (OptimizeSpillSlots && !isOptNone(MF))
+ optimizeSpillSlots(MF, NewRegs);
+
+ // We need to reserve a a spill slot if scavenging could potentially require
+ // spilling a scavenged register.
+ if (!NewRegs.empty()) {
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ SetVector<const TargetRegisterClass*> SpillRCs;
+ // Reserve an int register in any case, because it could be used to hold
+ // the stack offset in case it does not fit into a spill instruction.
+ SpillRCs.insert(&Hexagon::IntRegsRegClass);
+
+ for (unsigned VR : NewRegs)
+ SpillRCs.insert(MRI.getRegClass(VR));
+
+ for (auto *RC : SpillRCs) {
+ if (!needToReserveScavengingSpillSlots(MF, HRI, RC))
+ continue;
+ unsigned Num = RC == &Hexagon::IntRegsRegClass ? NumberScavengerSlots : 1;
+ unsigned S = RC->getSize(), A = RC->getAlignment();
+ for (unsigned i = 0; i < Num; i++) {
+ int NewFI = MFI.CreateSpillStackObject(S, A);
+ RS->addScavengingFrameIndex(NewFI);
+ }
+ }
+ }
+
+ TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
+}
+
+
+unsigned HexagonFrameLowering::findPhysReg(MachineFunction &MF,
+ HexagonBlockRanges::IndexRange &FIR,
+ HexagonBlockRanges::InstrIndexMap &IndexMap,
+ HexagonBlockRanges::RegToRangeMap &DeadMap,
+ const TargetRegisterClass *RC) const {
+ auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
+ auto &MRI = MF.getRegInfo();
+
+ auto isDead = [&FIR,&DeadMap] (unsigned Reg) -> bool {
+ auto F = DeadMap.find({Reg,0});
+ if (F == DeadMap.end())
+ return false;
+ for (auto &DR : F->second)
+ if (DR.contains(FIR))
+ return true;
+ return false;
+ };
+
+ for (unsigned Reg : RC->getRawAllocationOrder(MF)) {
+ bool Dead = true;
+ for (auto R : HexagonBlockRanges::expandToSubRegs({Reg,0}, MRI, HRI)) {
+ if (isDead(R.Reg))
+ continue;
+ Dead = false;
+ break;
+ }
+ if (Dead)
+ return Reg;
+ }
+ return 0;
+}
+
+void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
+ SmallVectorImpl<unsigned> &VRegs) const {
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ auto &HII = *HST.getInstrInfo();
+ auto &HRI = *HST.getRegisterInfo();
+ auto &MRI = MF.getRegInfo();
+ HexagonBlockRanges HBR(MF);
+
+ typedef std::map<MachineBasicBlock*,HexagonBlockRanges::InstrIndexMap>
+ BlockIndexMap;
+ typedef std::map<MachineBasicBlock*,HexagonBlockRanges::RangeList>
+ BlockRangeMap;
+ typedef HexagonBlockRanges::IndexType IndexType;
+
+ struct SlotInfo {
+ BlockRangeMap Map;
+ unsigned Size;
+ const TargetRegisterClass *RC;
+
+ SlotInfo() : Map(), Size(0), RC(nullptr) {}
+ };
+
+ BlockIndexMap BlockIndexes;
+ SmallSet<int,4> BadFIs;
+ std::map<int,SlotInfo> FIRangeMap;
+
+ auto getRegClass = [&MRI,&HRI] (HexagonBlockRanges::RegisterRef R)
+ -> const TargetRegisterClass* {
+ if (TargetRegisterInfo::isPhysicalRegister(R.Reg))
+ assert(R.Sub == 0);
+ if (TargetRegisterInfo::isVirtualRegister(R.Reg)) {
+ auto *RCR = MRI.getRegClass(R.Reg);
+ if (R.Sub == 0)
+ return RCR;
+ unsigned PR = *RCR->begin();
+ R.Reg = HRI.getSubReg(PR, R.Sub);
+ }
+ return HRI.getMinimalPhysRegClass(R.Reg);
+ };
+ // Accumulate register classes: get a common class for a pre-existing
+ // class HaveRC and a new class NewRC. Return nullptr if a common class
+ // cannot be found, otherwise return the resulting class. If HaveRC is
+ // nullptr, assume that it is still unset.
+ auto getCommonRC = [&HRI] (const TargetRegisterClass *HaveRC,
+ const TargetRegisterClass *NewRC)
+ -> const TargetRegisterClass* {
+ if (HaveRC == nullptr || HaveRC == NewRC)
+ return NewRC;
+ // Different classes, both non-null. Pick the more general one.
+ if (HaveRC->hasSubClassEq(NewRC))
+ return HaveRC;
+ if (NewRC->hasSubClassEq(HaveRC))
+ return NewRC;
+ return nullptr;
+ };
+
+ // Scan all blocks in the function. Check all occurrences of frame indexes,
+ // and collect relevant information.
+ for (auto &B : MF) {
+ std::map<int,IndexType> LastStore, LastLoad;
+ // Emplace appears not to be supported in gcc 4.7.2-4.
+ //auto P = BlockIndexes.emplace(&B, HexagonBlockRanges::InstrIndexMap(B));
+ auto P = BlockIndexes.insert(
+ std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B)));
+ auto &IndexMap = P.first->second;
+ DEBUG(dbgs() << "Index map for BB#" << B.getNumber() << "\n"
+ << IndexMap << '\n');
+
+ for (auto &In : B) {
+ int LFI, SFI;
+ bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
+ bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
+ if (Load && Store) {
+ // If it's both a load and a store, then we won't handle it.
+ BadFIs.insert(LFI);
+ BadFIs.insert(SFI);
+ continue;
+ }
+ // Check for register classes of the register used as the source for
+ // the store, and the register used as the destination for the load.
+ // Also, only accept base+imm_offset addressing modes. Other addressing
+ // modes can have side-effects (post-increments, etc.). For stack
+ // slots they are very unlikely, so there is not much loss due to
+ // this restriction.
+ if (Load || Store) {
+ int TFI = Load ? LFI : SFI;
+ unsigned AM = HII.getAddrMode(&In);
+ SlotInfo &SI = FIRangeMap[TFI];
+ bool Bad = (AM != HexagonII::BaseImmOffset);
+ if (!Bad) {
+ // If the addressing mode is ok, check the register class.
+ const TargetRegisterClass *RC = nullptr;
+ if (Load) {
+ MachineOperand &DataOp = In.getOperand(0);
+ RC = getRegClass({DataOp.getReg(), DataOp.getSubReg()});
+ } else {
+ MachineOperand &DataOp = In.getOperand(2);
+ RC = getRegClass({DataOp.getReg(), DataOp.getSubReg()});
+ }
+ RC = getCommonRC(SI.RC, RC);
+ if (RC == nullptr)
+ Bad = true;
+ else
+ SI.RC = RC;
+ }
+ if (!Bad) {
+ // Check sizes.
+ unsigned S = (1U << (HII.getMemAccessSize(&In) - 1));
+ if (SI.Size != 0 && SI.Size != S)
+ Bad = true;
+ else
+ SI.Size = S;
+ }
+ if (Bad)
+ BadFIs.insert(TFI);
+ }
+
+ // Locate uses of frame indices.
+ for (unsigned i = 0, n = In.getNumOperands(); i < n; ++i) {
+ const MachineOperand &Op = In.getOperand(i);
+ if (!Op.isFI())
+ continue;
+ int FI = Op.getIndex();
+ // Make sure that the following operand is an immediate and that
+ // it is 0. This is the offset in the stack object.
+ if (i+1 >= n || !In.getOperand(i+1).isImm() ||
+ In.getOperand(i+1).getImm() != 0)
+ BadFIs.insert(FI);
+ if (BadFIs.count(FI))
+ continue;
+
+ IndexType Index = IndexMap.getIndex(&In);
+ if (Load) {
+ if (LastStore[FI] == IndexType::None)
+ LastStore[FI] = IndexType::Entry;
+ LastLoad[FI] = Index;
+ } else if (Store) {
+ HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
+ if (LastStore[FI] != IndexType::None)
+ RL.add(LastStore[FI], LastLoad[FI], false, false);
+ else if (LastLoad[FI] != IndexType::None)
+ RL.add(IndexType::Entry, LastLoad[FI], false, false);
+ LastLoad[FI] = IndexType::None;
+ LastStore[FI] = Index;
+ } else {
+ BadFIs.insert(FI);
+ }
+ }
+ }
+
+ for (auto &I : LastLoad) {
+ IndexType LL = I.second;
+ if (LL == IndexType::None)
+ continue;
+ auto &RL = FIRangeMap[I.first].Map[&B];
+ IndexType &LS = LastStore[I.first];
+ if (LS != IndexType::None)
+ RL.add(LS, LL, false, false);
+ else
+ RL.add(IndexType::Entry, LL, false, false);
+ LS = IndexType::None;
+ }
+ for (auto &I : LastStore) {
+ IndexType LS = I.second;
+ if (LS == IndexType::None)
+ continue;
+ auto &RL = FIRangeMap[I.first].Map[&B];
+ RL.add(LS, IndexType::None, false, false);
+ }
+ }
+
+ DEBUG({
+ for (auto &P : FIRangeMap) {
+ dbgs() << "fi#" << P.first;
+ if (BadFIs.count(P.first))
+ dbgs() << " (bad)";
+ dbgs() << " RC: ";
+ if (P.second.RC != nullptr)
+ dbgs() << HRI.getRegClassName(P.second.RC) << '\n';
+ else
+ dbgs() << "<null>\n";
+ for (auto &R : P.second.Map)
+ dbgs() << " BB#" << R.first->getNumber() << " { " << R.second << "}\n";
+ }
+ });
+
+ // When a slot is loaded from in a block without being stored to in the
+ // same block, it is live-on-entry to this block. To avoid CFG analysis,
+ // consider this slot to be live-on-exit from all blocks.
+ SmallSet<int,4> LoxFIs;
+
+ std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
+
+ for (auto &P : FIRangeMap) {
+ // P = pair(FI, map: BB->RangeList)
+ if (BadFIs.count(P.first))
+ continue;
+ for (auto &B : MF) {
+ auto F = P.second.Map.find(&B);
+ // F = pair(BB, RangeList)
+ if (F == P.second.Map.end() || F->second.empty())
+ continue;
+ HexagonBlockRanges::IndexRange &IR = F->second.front();
+ if (IR.start() == IndexType::Entry)
+ LoxFIs.insert(P.first);
+ BlockFIMap[&B].push_back(P.first);
+ }
+ }
+
+ DEBUG({
+ dbgs() << "Block-to-FI map (* -- live-on-exit):\n";
+ for (auto &P : BlockFIMap) {
+ auto &FIs = P.second;
+ if (FIs.empty())
+ continue;
+ dbgs() << " BB#" << P.first->getNumber() << ": {";
+ for (auto I : FIs) {
+ dbgs() << " fi#" << I;
+ if (LoxFIs.count(I))
+ dbgs() << '*';
+ }
+ dbgs() << " }\n";
+ }
+ });
+
+ // eliminate loads, when all loads eliminated, eliminate all stores.
+ for (auto &B : MF) {
+ auto F = BlockIndexes.find(&B);
+ assert(F != BlockIndexes.end());
+ HexagonBlockRanges::InstrIndexMap &IM = F->second;
+ HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM);
+ HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM);
+ DEBUG(dbgs() << "BB#" << B.getNumber() << " dead map\n"
+ << HexagonBlockRanges::PrintRangeMap(DM, HRI));
+
+ for (auto FI : BlockFIMap[&B]) {
+ if (BadFIs.count(FI))
+ continue;
+ DEBUG(dbgs() << "Working on fi#" << FI << '\n');
+ HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
+ for (auto &Range : RL) {
+ DEBUG(dbgs() << "--Examining range:" << RL << '\n');
+ if (!IndexType::isInstr(Range.start()) ||
+ !IndexType::isInstr(Range.end()))
+ continue;
+ MachineInstr *SI = IM.getInstr(Range.start());
+ MachineInstr *EI = IM.getInstr(Range.end());
+ assert(SI->mayStore() && "Unexpected start instruction");
+ assert(EI->mayLoad() && "Unexpected end instruction");
+ MachineOperand &SrcOp = SI->getOperand(2);
+
+ HexagonBlockRanges::RegisterRef SrcRR = { SrcOp.getReg(),
+ SrcOp.getSubReg() };
+ auto *RC = getRegClass({SrcOp.getReg(), SrcOp.getSubReg()});
+ // The this-> is needed to unconfuse MSVC.
+ unsigned FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
+ DEBUG(dbgs() << "Replacement reg:" << PrintReg(FoundR, &HRI) << '\n');
+ if (FoundR == 0)
+ continue;
+
+ // Generate the copy-in: "FoundR = COPY SrcR" at the store location.
+ MachineBasicBlock::iterator StartIt = SI, NextIt;
+ MachineInstr *CopyIn = nullptr;
+ if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) {
+ const DebugLoc &DL = SI->getDebugLoc();
+ CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR)
+ .addOperand(SrcOp);
+ }
+
+ ++StartIt;
+ // Check if this is a last store and the FI is live-on-exit.
+ if (LoxFIs.count(FI) && (&Range == &RL.back())) {
+ // Update store's source register.
+ if (unsigned SR = SrcOp.getSubReg())
+ SrcOp.setReg(HRI.getSubReg(FoundR, SR));
+ else
+ SrcOp.setReg(FoundR);
+ SrcOp.setSubReg(0);
+ // We are keeping this register live.
+ SrcOp.setIsKill(false);
+ } else {
+ B.erase(SI);
+ IM.replaceInstr(SI, CopyIn);
+ }
+
+ auto EndIt = std::next(MachineBasicBlock::iterator(EI));
+ for (auto It = StartIt; It != EndIt; It = NextIt) {
+ MachineInstr *MI = &*It;
+ NextIt = std::next(It);
+ int TFI;
+ if (!HII.isLoadFromStackSlot(*MI, TFI) || TFI != FI)
+ continue;
+ unsigned DstR = MI->getOperand(0).getReg();
+ assert(MI->getOperand(0).getSubReg() == 0);
+ MachineInstr *CopyOut = nullptr;
+ if (DstR != FoundR) {
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned MemSize = (1U << (HII.getMemAccessSize(MI) - 1));
+ assert(HII.getAddrMode(MI) == HexagonII::BaseImmOffset);
+ unsigned CopyOpc = TargetOpcode::COPY;
+ if (HII.isSignExtendingLoad(*MI))
+ CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
+ else if (HII.isZeroExtendingLoad(*MI))
+ CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
+ CopyOut = BuildMI(B, It, DL, HII.get(CopyOpc), DstR)
+ .addReg(FoundR, getKillRegState(MI == EI));
+ }
+ IM.replaceInstr(MI, CopyOut);
+ B.erase(It);
+ }
+
+ // Update the dead map.
+ HexagonBlockRanges::RegisterRef FoundRR = { FoundR, 0 };
+ for (auto RR : HexagonBlockRanges::expandToSubRegs(FoundRR, MRI, HRI))
+ DM[RR].subtract(Range);
+ } // for Range in range list
+ }
+ }
+}
+
+
void HexagonFrameLowering::expandAlloca(MachineInstr *AI,
const HexagonInstrInfo &HII, unsigned SP, unsigned CF) const {
MachineBasicBlock &MB = *AI->getParent();
@@ -1407,15 +2257,13 @@ const MachineInstr *HexagonFrameLowering::getAlignaInstr(
}
-// FIXME: Use Function::optForSize().
-inline static bool isOptSize(const MachineFunction &MF) {
- AttributeSet AF = MF.getFunction()->getAttributes();
- return AF.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::OptimizeForSize);
-}
-
-inline static bool isMinSize(const MachineFunction &MF) {
- return MF.getFunction()->optForMinSize();
+/// Adds all callee-saved registers as implicit uses or defs to the
+/// instruction.
+void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(MachineInstr *MI,
+ const CSIVect &CSI, bool IsDef, bool IsKill) const {
+ // Add the callee-saved registers as implicit uses.
+ for (auto &R : CSI)
+ MI->addOperand(MachineOperand::CreateReg(R.getReg(), IsDef, true, IsKill));
}
@@ -1472,7 +2320,18 @@ bool HexagonFrameLowering::useRestoreFunction(MachineFunction &MF,
const CSIVect &CSI) const {
if (shouldInlineCSR(MF, CSI))
return false;
+ // The restore functions do a bit more than just restoring registers.
+ // The non-returning versions will go back directly to the caller's
+ // caller, others will clean up the stack frame in preparation for
+ // a tail call. Using them can still save code size even if only one
+ // register is getting restores. Make the decision based on -Oz:
+ // using -Os will use inline restore for a single register.
+ if (isMinSize(MF))
+ return true;
unsigned NumCSI = CSI.size();
+ if (NumCSI <= 1)
+ return false;
+
unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs-1
: SpillFuncThreshold;
return Threshold < NumCSI;
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.h b/lib/Target/Hexagon/HexagonFrameLowering.h
index 683b303d43ea..3e76214559b7 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.h
+++ b/lib/Target/Hexagon/HexagonFrameLowering.h
@@ -11,6 +11,7 @@
#define LLVM_LIB_TARGET_HEXAGON_HEXAGONFRAMELOWERING_H
#include "Hexagon.h"
+#include "HexagonBlockRanges.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
@@ -41,22 +42,23 @@ public:
return true;
}
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = nullptr) const override;
+ RegScavenger *RS = nullptr) const override;
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
- RegScavenger *RS) const override;
+ RegScavenger *RS) const override;
bool targetHandlesStackFrameRounding() const override {
return true;
}
int getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const override;
+ unsigned &FrameReg) const override;
bool hasFP(const MachineFunction &MF) const override;
const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries)
- const override {
+ const override {
static const SpillSlot Offsets[] = {
{ Hexagon::R17, -4 }, { Hexagon::R16, -8 }, { Hexagon::D8, -8 },
{ Hexagon::R19, -12 }, { Hexagon::R18, -16 }, { Hexagon::D9, -16 },
@@ -83,22 +85,61 @@ private:
void expandAlloca(MachineInstr *AI, const HexagonInstrInfo &TII,
unsigned SP, unsigned CF) const;
- void insertPrologueInBlock(MachineBasicBlock &MBB) const;
+ void insertPrologueInBlock(MachineBasicBlock &MBB, bool PrologueStubs) const;
void insertEpilogueInBlock(MachineBasicBlock &MBB) const;
bool insertCSRSpillsInBlock(MachineBasicBlock &MBB, const CSIVect &CSI,
- const HexagonRegisterInfo &HRI) const;
+ const HexagonRegisterInfo &HRI, bool &PrologueStubs) const;
bool insertCSRRestoresInBlock(MachineBasicBlock &MBB, const CSIVect &CSI,
const HexagonRegisterInfo &HRI) const;
+ bool updateExitPaths(MachineBasicBlock &MBB, MachineBasicBlock *RestoreB,
+ BitVector &DoneT, BitVector &DoneF, BitVector &Path) const;
void insertCFIInstructionsAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator At) const;
void adjustForCalleeSavedRegsSpillCall(MachineFunction &MF) const;
- bool replacePredRegPseudoSpillCode(MachineFunction &MF) const;
- bool replaceVecPredRegPseudoSpillCode(MachineFunction &MF) const;
+
+ bool expandCopy(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandStoreInt(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandLoadInt(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandStoreVecPred(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandLoadVecPred(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandStoreVec2(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandLoadVec2(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandStoreVec(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandLoadVec(MachineBasicBlock &B, MachineBasicBlock::iterator It,
+ MachineRegisterInfo &MRI, const HexagonInstrInfo &HII,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+ bool expandSpillMacros(MachineFunction &MF,
+ SmallVectorImpl<unsigned> &NewRegs) const;
+
+ unsigned findPhysReg(MachineFunction &MF, HexagonBlockRanges::IndexRange &FIR,
+ HexagonBlockRanges::InstrIndexMap &IndexMap,
+ HexagonBlockRanges::RegToRangeMap &DeadMap,
+ const TargetRegisterClass *RC) const;
+ void optimizeSpillSlots(MachineFunction &MF,
+ SmallVectorImpl<unsigned> &VRegs) const;
void findShrunkPrologEpilog(MachineFunction &MF, MachineBasicBlock *&PrologB,
MachineBasicBlock *&EpilogB) const;
+ void addCalleeSaveRegistersAsImpOperand(MachineInstr *MI, const CSIVect &CSI,
+ bool IsDef, bool IsKill) const;
bool shouldInlineCSR(llvm::MachineFunction &MF, const CSIVect &CSI) const;
bool useSpillFunction(MachineFunction &MF, const CSIVect &CSI) const;
bool useRestoreFunction(MachineFunction &MF, const CSIVect &CSI) const;
diff --git a/lib/Target/Hexagon/HexagonGenExtract.cpp b/lib/Target/Hexagon/HexagonGenExtract.cpp
index f26e2ff764d7..f46b6d2a82e3 100644
--- a/lib/Target/Hexagon/HexagonGenExtract.cpp
+++ b/lib/Target/Hexagon/HexagonGenExtract.cpp
@@ -242,6 +242,9 @@ bool HexagonGenExtract::visitBlock(BasicBlock *B) {
bool HexagonGenExtract::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
bool Changed;
diff --git a/lib/Target/Hexagon/HexagonGenInsert.cpp b/lib/Target/Hexagon/HexagonGenInsert.cpp
index 64a2b6cec18a..71d079193d79 100644
--- a/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -9,11 +9,8 @@
#define DEBUG_TYPE "hexinsert"
-#include "llvm/Pass.h"
-#include "llvm/PassRegistry.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -21,10 +18,12 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/Pass.h"
+#include "llvm/PassRegistry.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -33,7 +32,6 @@
#include "HexagonTargetMachine.h"
#include "HexagonBitTracker.h"
-#include <map>
#include <vector>
using namespace llvm;
@@ -1446,7 +1444,7 @@ bool HexagonGenInsert::removeDeadCode(MachineDomTreeNode *N) {
bool AllDead = true;
SmallVector<unsigned,2> Regs;
- for (ConstMIOperands Op(MI); Op.isValid(); ++Op) {
+ for (ConstMIOperands Op(*MI); Op.isValid(); ++Op) {
if (!Op->isReg() || !Op->isDef())
continue;
unsigned R = Op->getReg();
@@ -1471,6 +1469,9 @@ bool HexagonGenInsert::removeDeadCode(MachineDomTreeNode *N) {
bool HexagonGenInsert::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
bool Timing = OptTiming, TimingDetail = Timing && OptTimingDetail;
bool Changed = false;
TimerGroup __G("hexinsert");
diff --git a/lib/Target/Hexagon/HexagonGenMux.cpp b/lib/Target/Hexagon/HexagonGenMux.cpp
index c059d566709e..bb9256db4b48 100644
--- a/lib/Target/Hexagon/HexagonGenMux.cpp
+++ b/lib/Target/Hexagon/HexagonGenMux.cpp
@@ -49,6 +49,10 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
private:
const HexagonInstrInfo *HII;
@@ -70,10 +74,10 @@ namespace {
MachineOperand *SrcT, *SrcF;
MachineInstr *Def1, *Def2;
MuxInfo(MachineBasicBlock::iterator It, unsigned DR, unsigned PR,
- MachineOperand *TOp, MachineOperand *FOp,
- MachineInstr *D1, MachineInstr *D2)
- : At(It), DefR(DR), PredR(PR), SrcT(TOp), SrcF(FOp), Def1(D1),
- Def2(D2) {}
+ MachineOperand *TOp, MachineOperand *FOp, MachineInstr &D1,
+ MachineInstr &D2)
+ : At(It), DefR(DR), PredR(PR), SrcT(TOp), SrcF(FOp), Def1(&D1),
+ Def2(&D2) {}
};
typedef DenseMap<MachineInstr*,unsigned> InstrIndexMap;
typedef DenseMap<unsigned,DefUseInfo> DefUseInfoMap;
@@ -128,7 +132,7 @@ void HexagonGenMux::getDefsUses(const MachineInstr *MI, BitVector &Defs,
expandReg(*R++, Uses);
// Look over all operands, and collect explicit defs and uses.
- for (ConstMIOperands Mo(MI); Mo.isValid(); ++Mo) {
+ for (ConstMIOperands Mo(*MI); Mo.isValid(); ++Mo) {
if (!Mo->isReg() || Mo->isImplicit())
continue;
unsigned R = Mo->getReg();
@@ -258,8 +262,8 @@ bool HexagonGenMux::genMuxInBlock(MachineBasicBlock &B) {
MachineBasicBlock::iterator It1 = B.begin(), It2 = B.begin();
std::advance(It1, MinX);
std::advance(It2, MaxX);
- MachineInstr *Def1 = It1, *Def2 = It2;
- MachineOperand *Src1 = &Def1->getOperand(2), *Src2 = &Def2->getOperand(2);
+ MachineInstr &Def1 = *It1, &Def2 = *It2;
+ MachineOperand *Src1 = &Def1.getOperand(2), *Src2 = &Def2.getOperand(2);
unsigned SR1 = Src1->isReg() ? Src1->getReg() : 0;
unsigned SR2 = Src2->isReg() ? Src2->getReg() : 0;
bool Failure = false, CanUp = true, CanDown = true;
@@ -305,6 +309,8 @@ bool HexagonGenMux::genMuxInBlock(MachineBasicBlock &B) {
}
bool HexagonGenMux::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
bool Changed = false;
@@ -316,4 +322,3 @@ bool HexagonGenMux::runOnMachineFunction(MachineFunction &MF) {
FunctionPass *llvm::createHexagonGenMux() {
return new HexagonGenMux();
}
-
diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp
index d9675b5173d2..dcfd3e8317a9 100644
--- a/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -9,24 +9,22 @@
#define DEBUG_TYPE "gen-pred"
+#include "HexagonTargetMachine.h"
#include "llvm/ADT/SetVector.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "HexagonTargetMachine.h"
+#include "llvm/Target/TargetMachine.h"
#include <functional>
#include <queue>
#include <set>
-#include <vector>
using namespace llvm;
@@ -157,7 +155,7 @@ unsigned HexagonGenPredicate::getPredForm(unsigned Opc) {
// The opcode corresponding to 0 is TargetOpcode::PHI. We can use 0 here
// to denote "none", but we need to make sure that none of the valid opcodes
// that we return will ever be 0.
- assert(PHI == 0 && "Use different value for <none>");
+ static_assert(PHI == 0, "Use different value for <none>");
return 0;
}
@@ -332,7 +330,7 @@ bool HexagonGenPredicate::isScalarPred(Register PredReg) {
case Hexagon::C4_or_orn:
case Hexagon::C2_xor:
// Add operands to the queue.
- for (ConstMIOperands Mo(DefI); Mo.isValid(); ++Mo)
+ for (ConstMIOperands Mo(*DefI); Mo.isValid(); ++Mo)
if (Mo->isReg() && Mo->isUse())
WorkQ.push(Register(Mo->getReg()));
break;
@@ -449,13 +447,12 @@ bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) {
// the convertible instruction is converted, its predicate result will be
// copied back into the original gpr.
- for (MachineFunction::iterator A = MF.begin(), Z = MF.end(); A != Z; ++A) {
- MachineBasicBlock &B = *A;
- for (MachineBasicBlock::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- if (I->getOpcode() != TargetOpcode::COPY)
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ if (MI.getOpcode() != TargetOpcode::COPY)
continue;
- Register DR = I->getOperand(0);
- Register SR = I->getOperand(1);
+ Register DR = MI.getOperand(0);
+ Register SR = MI.getOperand(1);
if (!TargetRegisterInfo::isVirtualRegister(DR.R))
continue;
if (!TargetRegisterInfo::isVirtualRegister(SR.R))
@@ -466,7 +463,7 @@ bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) {
continue;
assert(!DR.S && !SR.S && "Unexpected subregister");
MRI->replaceRegWith(DR.R, SR.R);
- Erase.insert(I);
+ Erase.insert(&MI);
Changed = true;
}
}
@@ -479,6 +476,9 @@ bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) {
bool HexagonGenPredicate::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
TRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
MRI = &MF.getRegInfo();
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index d20a809d6c09..cc154c4be012 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -346,6 +346,8 @@ FunctionPass *llvm::createHexagonHardwareLoops() {
bool HexagonHardwareLoops::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********* Hexagon Hardware Loops *********\n");
+ if (skipFunction(*MF.getFunction()))
+ return false;
bool Changed = false;
@@ -434,7 +436,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
SmallVector<MachineOperand,2> Cond;
MachineBasicBlock *TB = nullptr, *FB = nullptr;
- bool NotAnalyzed = TII->AnalyzeBranch(*ExitingBlock, TB, FB, Cond, false);
+ bool NotAnalyzed = TII->analyzeBranch(*ExitingBlock, TB, FB, Cond, false);
if (NotAnalyzed)
return false;
@@ -448,8 +450,8 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
unsigned CmpReg1 = 0, CmpReg2 = 0;
int CmpImm = 0, CmpMask = 0;
- bool CmpAnalyzed = TII->analyzeCompare(PredI, CmpReg1, CmpReg2,
- CmpMask, CmpImm);
+ bool CmpAnalyzed =
+ TII->analyzeCompare(*PredI, CmpReg1, CmpReg2, CmpMask, CmpImm);
// Fail if the compare was not analyzed, or it's not comparing a register
// with an immediate value. Not checking the mask here, since we handle
// the individual compare opcodes (including A4_cmpb*) later on.
@@ -581,7 +583,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
SmallVector<MachineOperand,2> Cond;
MachineBasicBlock *TB = nullptr, *FB = nullptr;
- bool NotAnalyzed = TII->AnalyzeBranch(*ExitingBlock, TB, FB, Cond, false);
+ bool NotAnalyzed = TII->analyzeBranch(*ExitingBlock, TB, FB, Cond, false);
if (NotAnalyzed)
return nullptr;
@@ -593,7 +595,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
if (ExitingBlock != Latch && (TB == Latch || FB == Latch)) {
MachineBasicBlock *LTB = 0, *LFB = 0;
SmallVector<MachineOperand,2> LCond;
- bool NotAnalyzed = TII->AnalyzeBranch(*Latch, LTB, LFB, LCond, false);
+ bool NotAnalyzed = TII->analyzeBranch(*Latch, LTB, LFB, LCond, false);
if (NotAnalyzed)
return nullptr;
if (TB == Latch)
@@ -618,8 +620,8 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
unsigned CmpReg1 = 0, CmpReg2 = 0;
int Mask = 0, ImmValue = 0;
- bool AnalyzedCmp = TII->analyzeCompare(CondI, CmpReg1, CmpReg2,
- Mask, ImmValue);
+ bool AnalyzedCmp =
+ TII->analyzeCompare(*CondI, CmpReg1, CmpReg2, Mask, ImmValue);
if (!AnalyzedCmp)
return nullptr;
@@ -1184,7 +1186,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L,
MachineBasicBlock *TB = 0, *FB = 0;
SmallVector<MachineOperand, 2> Cond;
- if (TII->AnalyzeBranch(*ExitingBlock, TB, FB, Cond, false))
+ if (TII->analyzeBranch(*ExitingBlock, TB, FB, Cond, false))
return false;
if (L->contains(TB))
@@ -1418,12 +1420,12 @@ bool HexagonHardwareLoops::loopCountMayWrapOrUnderFlow(
unsigned CmpReg1 = 0, CmpReg2 = 0;
int CmpMask = 0, CmpValue = 0;
- if (!TII->analyzeCompare(MI, CmpReg1, CmpReg2, CmpMask, CmpValue))
+ if (!TII->analyzeCompare(*MI, CmpReg1, CmpReg2, CmpMask, CmpValue))
continue;
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 2> Cond;
- if (TII->AnalyzeBranch(*MI->getParent(), TBB, FBB, Cond, false))
+ if (TII->analyzeBranch(*MI->getParent(), TBB, FBB, Cond, false))
continue;
Comparison::Kind Cmp = getComparisonKind(MI->getOpcode(), 0, 0, 0);
@@ -1619,14 +1621,14 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
MachineBasicBlock *TB = nullptr, *FB = nullptr;
SmallVector<MachineOperand,2> Cond;
// AnalyzeBranch returns true if it fails to analyze branch.
- bool NotAnalyzed = TII->AnalyzeBranch(*ExitingBlock, TB, FB, Cond, false);
+ bool NotAnalyzed = TII->analyzeBranch(*ExitingBlock, TB, FB, Cond, false);
if (NotAnalyzed || Cond.empty())
return false;
if (ExitingBlock != Latch && (TB == Latch || FB == Latch)) {
MachineBasicBlock *LTB = 0, *LFB = 0;
SmallVector<MachineOperand,2> LCond;
- bool NotAnalyzed = TII->AnalyzeBranch(*Latch, LTB, LFB, LCond, false);
+ bool NotAnalyzed = TII->analyzeBranch(*Latch, LTB, LFB, LCond, false);
if (NotAnalyzed)
return false;
@@ -1837,12 +1839,12 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
SmallVector<MachineOperand,2> Tmp1;
MachineBasicBlock *TB = nullptr, *FB = nullptr;
- if (TII->AnalyzeBranch(*ExitingBlock, TB, FB, Tmp1, false))
+ if (TII->analyzeBranch(*ExitingBlock, TB, FB, Tmp1, false))
return nullptr;
for (MBBVector::iterator I = Preds.begin(), E = Preds.end(); I != E; ++I) {
MachineBasicBlock *PB = *I;
- bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp1, false);
+ bool NotAnalyzed = TII->analyzeBranch(*PB, TB, FB, Tmp1, false);
if (NotAnalyzed)
return nullptr;
}
@@ -1928,7 +1930,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
MachineBasicBlock *PB = *I;
if (PB != Latch) {
Tmp2.clear();
- bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp2, false);
+ bool NotAnalyzed = TII->analyzeBranch(*PB, TB, FB, Tmp2, false);
(void)NotAnalyzed; // suppress compiler warning
assert (!NotAnalyzed && "Should be analyzable!");
if (TB != Header && (Tmp2.empty() || FB != Header))
@@ -1940,7 +1942,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
// It can happen that the latch block will fall through into the header.
// Insert an unconditional branch to the header.
TB = FB = nullptr;
- bool LatchNotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Tmp2, false);
+ bool LatchNotAnalyzed = TII->analyzeBranch(*Latch, TB, FB, Tmp2, false);
(void)LatchNotAnalyzed; // suppress compiler warning
assert (!LatchNotAnalyzed && "Should be analyzable!");
if (!TB && !FB)
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index a0da945e7572..22247aa39b61 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -15,13 +15,11 @@
#include "HexagonISelLowering.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonTargetMachine.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
@@ -38,17 +36,13 @@ MaxNumOfUsesForConstExtenders("ga-max-num-uses-for-constant-extenders",
// Instruction Selector Implementation
//===----------------------------------------------------------------------===//
-namespace llvm {
- void initializeHexagonDAGToDAGISelPass(PassRegistry&);
-}
-
//===--------------------------------------------------------------------===//
/// HexagonDAGToDAGISel - Hexagon specific code to select Hexagon machine
/// instructions for SelectionDAG operations.
///
namespace {
class HexagonDAGToDAGISel : public SelectionDAGISel {
- const HexagonTargetMachine& HTM;
+ const HexagonTargetMachine &HTM;
const HexagonSubtarget *HST;
const HexagonInstrInfo *HII;
const HexagonRegisterInfo *HRI;
@@ -56,9 +50,7 @@ public:
explicit HexagonDAGToDAGISel(HexagonTargetMachine &tm,
CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel), HTM(tm), HST(nullptr), HII(nullptr),
- HRI(nullptr) {
- initializeHexagonDAGToDAGISelPass(*PassRegistry::getPassRegistry());
- }
+ HRI(nullptr) {}
bool runOnMachineFunction(MachineFunction &MF) override {
// Reset the subtarget each time through.
@@ -72,7 +64,7 @@ public:
virtual void PreprocessISelDAG() override;
virtual void EmitFunctionEntryCode() override;
- SDNode *Select(SDNode *N) override;
+ void Select(SDNode *N) override;
// Complex Pattern Selectors.
inline bool SelectAddrGA(SDValue &N, SDValue &R);
@@ -84,36 +76,41 @@ public:
return "Hexagon DAG->DAG Pattern Instruction Selection";
}
- SDNode *SelectFrameIndex(SDNode *N);
+ // Generate a machine instruction node corresponding to the circ/brev
+ // load intrinsic.
+ MachineSDNode *LoadInstrForLoadIntrinsic(SDNode *IntN);
+ // Given the circ/brev load intrinsic and the already generated machine
+ // instruction, generate the appropriate store (that is a part of the
+ // intrinsic's functionality).
+ SDNode *StoreInstrForLoadIntrinsic(MachineSDNode *LoadN, SDNode *IntN);
+
+ void SelectFrameIndex(SDNode *N);
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
unsigned ConstraintID,
std::vector<SDValue> &OutOps) override;
- SDNode *SelectLoad(SDNode *N);
- SDNode *SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl);
- SDNode *SelectIndexedLoad(LoadSDNode *LD, SDLoc dl);
- SDNode *SelectIndexedLoadZeroExtend64(LoadSDNode *LD, unsigned Opcode,
- SDLoc dl);
- SDNode *SelectIndexedLoadSignExtend64(LoadSDNode *LD, unsigned Opcode,
- SDLoc dl);
- SDNode *SelectBaseOffsetStore(StoreSDNode *ST, SDLoc dl);
- SDNode *SelectIndexedStore(StoreSDNode *ST, SDLoc dl);
- SDNode *SelectStore(SDNode *N);
- SDNode *SelectSHL(SDNode *N);
- SDNode *SelectMul(SDNode *N);
- SDNode *SelectZeroExtend(SDNode *N);
- SDNode *SelectIntrinsicWChain(SDNode *N);
- SDNode *SelectIntrinsicWOChain(SDNode *N);
- SDNode *SelectConstant(SDNode *N);
- SDNode *SelectConstantFP(SDNode *N);
- SDNode *SelectAdd(SDNode *N);
- SDNode *SelectBitOp(SDNode *N);
+ bool tryLoadOfLoadIntrinsic(LoadSDNode *N);
+ void SelectLoad(SDNode *N);
+ void SelectBaseOffsetLoad(LoadSDNode *LD, SDLoc dl);
+ void SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl);
+ void SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl);
+ void SelectStore(SDNode *N);
+ void SelectSHL(SDNode *N);
+ void SelectMul(SDNode *N);
+ void SelectZeroExtend(SDNode *N);
+ void SelectIntrinsicWChain(SDNode *N);
+ void SelectIntrinsicWOChain(SDNode *N);
+ void SelectConstant(SDNode *N);
+ void SelectConstantFP(SDNode *N);
+ void SelectAdd(SDNode *N);
+ void SelectBitcast(SDNode *N);
+ void SelectBitOp(SDNode *N);
// XformMskToBitPosU5Imm - Returns the bit position which
// the single bit 32 bit mask represents.
// Used in Clr and Set bit immediate memops.
- SDValue XformMskToBitPosU5Imm(uint32_t Imm, SDLoc DL) {
+ SDValue XformMskToBitPosU5Imm(uint32_t Imm, const SDLoc &DL) {
int32_t bitPos;
bitPos = Log2_32(Imm);
assert(bitPos >= 0 && bitPos < 32 &&
@@ -123,13 +120,13 @@ public:
// XformMskToBitPosU4Imm - Returns the bit position which the single-bit
// 16 bit mask represents. Used in Clr and Set bit immediate memops.
- SDValue XformMskToBitPosU4Imm(uint16_t Imm, SDLoc DL) {
+ SDValue XformMskToBitPosU4Imm(uint16_t Imm, const SDLoc &DL) {
return XformMskToBitPosU5Imm(Imm, DL);
}
// XformMskToBitPosU3Imm - Returns the bit position which the single-bit
// 8 bit mask represents. Used in Clr and Set bit immediate memops.
- SDValue XformMskToBitPosU3Imm(uint8_t Imm, SDLoc DL) {
+ SDValue XformMskToBitPosU3Imm(uint8_t Imm, const SDLoc &DL) {
return XformMskToBitPosU5Imm(Imm, DL);
}
@@ -142,36 +139,36 @@ public:
// XformM5ToU5Imm - Return a target constant with the specified value, of
// type i32 where the negative literal is transformed into a positive literal
// for use in -= memops.
- inline SDValue XformM5ToU5Imm(signed Imm, SDLoc DL) {
- assert((Imm >= -31 && Imm <= -1) && "Constant out of range for Memops");
- return CurDAG->getTargetConstant(-Imm, DL, MVT::i32);
+ inline SDValue XformM5ToU5Imm(signed Imm, const SDLoc &DL) {
+ assert((Imm >= -31 && Imm <= -1) && "Constant out of range for Memops");
+ return CurDAG->getTargetConstant(-Imm, DL, MVT::i32);
}
// XformU7ToU7M1Imm - Return a target constant decremented by 1, in range
// [1..128], used in cmpb.gtu instructions.
- inline SDValue XformU7ToU7M1Imm(signed Imm, SDLoc DL) {
+ inline SDValue XformU7ToU7M1Imm(signed Imm, const SDLoc &DL) {
assert((Imm >= 1 && Imm <= 128) && "Constant out of range for cmpb op");
return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i8);
}
// XformS8ToS8M1Imm - Return a target constant decremented by 1.
- inline SDValue XformSToSM1Imm(signed Imm, SDLoc DL) {
+ inline SDValue XformSToSM1Imm(signed Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i32);
}
// XformU8ToU8M1Imm - Return a target constant decremented by 1.
- inline SDValue XformUToUM1Imm(unsigned Imm, SDLoc DL) {
+ inline SDValue XformUToUM1Imm(unsigned Imm, const SDLoc &DL) {
assert((Imm >= 1) && "Cannot decrement unsigned int less than 1");
return CurDAG->getTargetConstant(Imm - 1, DL, MVT::i32);
}
// XformSToSM2Imm - Return a target constant decremented by 2.
- inline SDValue XformSToSM2Imm(unsigned Imm, SDLoc DL) {
+ inline SDValue XformSToSM2Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm - 2, DL, MVT::i32);
}
// XformSToSM3Imm - Return a target constant decremented by 3.
- inline SDValue XformSToSM3Imm(unsigned Imm, SDLoc DL) {
+ inline SDValue XformSToSM3Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm - 3, DL, MVT::i32);
}
@@ -180,6 +177,8 @@ public:
private:
bool isValueExtension(const SDValue &Val, unsigned FromBits, SDValue &Src);
+ bool orIsAdd(const SDNode *N) const;
+ bool isAlignedMemNode(const MemSDNode *N) const;
}; // end HexagonDAGToDAGISel
} // end anonymous namespace
@@ -194,18 +193,6 @@ FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
}
}
-static void initializePassOnce(PassRegistry &Registry) {
- const char *Name = "Hexagon DAG->DAG Pattern Instruction Selection";
- PassInfo *PI = new PassInfo(Name, "hexagon-isel",
- &SelectionDAGISel::ID, nullptr, false, false);
- Registry.registerPass(*PI, true);
-}
-
-void llvm::initializeHexagonDAGToDAGISelPass(PassRegistry &Registry) {
- CALL_ONCE_INITIALIZATION(initializePassOnce)
-}
-
-
// Intrinsics that return a a predicate.
static bool doesIntrinsicReturnPredicate(unsigned ID) {
switch (ID) {
@@ -251,127 +238,11 @@ static bool doesIntrinsicReturnPredicate(unsigned ID) {
}
}
-SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
- unsigned Opcode,
- SDLoc dl) {
- SDValue Chain = LD->getChain();
- EVT LoadedVT = LD->getMemoryVT();
- SDValue Base = LD->getBasePtr();
- SDValue Offset = LD->getOffset();
- SDNode *OffsetNode = Offset.getNode();
- int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
-
- if (HII->isValidAutoIncImm(LoadedVT, Val)) {
- SDValue TargetConst = CurDAG->getTargetConstant(Val, dl, MVT::i32);
- SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::i32,
- MVT::Other, Base, TargetConst,
- Chain);
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A2_sxtw, dl, MVT::i64,
- SDValue(Result_1, 0));
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = LD->getMemOperand();
- cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
- const SDValue Froms[] = { SDValue(LD, 0),
- SDValue(LD, 1),
- SDValue(LD, 2) };
- const SDValue Tos[] = { SDValue(Result_2, 0),
- SDValue(Result_1, 1),
- SDValue(Result_1, 2) };
- ReplaceUses(Froms, Tos, 3);
- return Result_2;
- }
-
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
- SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::Other,
- Base, TargetConst0, Chain);
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A2_sxtw, dl, MVT::i64,
- SDValue(Result_1, 0));
- SDNode* Result_3 = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
- Base, TargetConstVal,
- SDValue(Result_1, 1));
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = LD->getMemOperand();
- cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
- const SDValue Froms[] = { SDValue(LD, 0),
- SDValue(LD, 1),
- SDValue(LD, 2) };
- const SDValue Tos[] = { SDValue(Result_2, 0),
- SDValue(Result_3, 0),
- SDValue(Result_1, 1) };
- ReplaceUses(Froms, Tos, 3);
- return Result_2;
-}
-
-
-SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
- unsigned Opcode,
- SDLoc dl) {
- SDValue Chain = LD->getChain();
- EVT LoadedVT = LD->getMemoryVT();
- SDValue Base = LD->getBasePtr();
- SDValue Offset = LD->getOffset();
- SDNode *OffsetNode = Offset.getNode();
- int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
-
- if (HII->isValidAutoIncImm(LoadedVT, Val)) {
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
- SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
- MVT::i32, MVT::Other, Base,
- TargetConstVal, Chain);
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A4_combineir, dl,
- MVT::i64, MVT::Other,
- TargetConst0,
- SDValue(Result_1,0));
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = LD->getMemOperand();
- cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
- const SDValue Froms[] = { SDValue(LD, 0),
- SDValue(LD, 1),
- SDValue(LD, 2) };
- const SDValue Tos[] = { SDValue(Result_2, 0),
- SDValue(Result_1, 1),
- SDValue(Result_1, 2) };
- ReplaceUses(Froms, Tos, 3);
- return Result_2;
- }
-
- // Generate an indirect load.
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
- SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
- MVT::Other, Base, TargetConst0,
- Chain);
- SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::A4_combineir, dl,
- MVT::i64, MVT::Other,
- TargetConst0,
- SDValue(Result_1,0));
- // Add offset to base.
- SDNode* Result_3 = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
- Base, TargetConstVal,
- SDValue(Result_1, 1));
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = LD->getMemOperand();
- cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
- const SDValue Froms[] = { SDValue(LD, 0),
- SDValue(LD, 1),
- SDValue(LD, 2) };
- const SDValue Tos[] = { SDValue(Result_2, 0), // Load value.
- SDValue(Result_3, 0), // New address.
- SDValue(Result_1, 1) };
- ReplaceUses(Froms, Tos, 3);
- return Result_2;
-}
-
-
-SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) {
+void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Offset = LD->getOffset();
- SDNode *OffsetNode = Offset.getNode();
- // Get the constant value.
- int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ int32_t Inc = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
EVT LoadedVT = LD->getMemoryVT();
unsigned Opcode = 0;
@@ -379,232 +250,394 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, SDLoc dl) {
// loads.
ISD::LoadExtType ExtType = LD->getExtensionType();
bool IsZeroExt = (ExtType == ISD::ZEXTLOAD || ExtType == ISD::EXTLOAD);
- bool HasVecOffset = false;
+ bool IsValidInc = HII->isValidAutoIncImm(LoadedVT, Inc);
- // Figure out the opcode.
- if (LoadedVT == MVT::i64) {
- if (HII->isValidAutoIncImm(LoadedVT, Val))
- Opcode = Hexagon::L2_loadrd_pi;
+ assert(LoadedVT.isSimple());
+ switch (LoadedVT.getSimpleVT().SimpleTy) {
+ case MVT::i8:
+ if (IsZeroExt)
+ Opcode = IsValidInc ? Hexagon::L2_loadrub_pi : Hexagon::L2_loadrub_io;
else
- Opcode = Hexagon::L2_loadrd_io;
- } else if (LoadedVT == MVT::i32) {
- if (HII->isValidAutoIncImm(LoadedVT, Val))
- Opcode = Hexagon::L2_loadri_pi;
- else
- Opcode = Hexagon::L2_loadri_io;
- } else if (LoadedVT == MVT::i16) {
- if (HII->isValidAutoIncImm(LoadedVT, Val))
- Opcode = IsZeroExt ? Hexagon::L2_loadruh_pi : Hexagon::L2_loadrh_pi;
- else
- Opcode = IsZeroExt ? Hexagon::L2_loadruh_io : Hexagon::L2_loadrh_io;
- } else if (LoadedVT == MVT::i8) {
- if (HII->isValidAutoIncImm(LoadedVT, Val))
- Opcode = IsZeroExt ? Hexagon::L2_loadrub_pi : Hexagon::L2_loadrb_pi;
+ Opcode = IsValidInc ? Hexagon::L2_loadrb_pi : Hexagon::L2_loadrb_io;
+ break;
+ case MVT::i16:
+ if (IsZeroExt)
+ Opcode = IsValidInc ? Hexagon::L2_loadruh_pi : Hexagon::L2_loadruh_io;
else
- Opcode = IsZeroExt ? Hexagon::L2_loadrub_io : Hexagon::L2_loadrb_io;
- } else if (LoadedVT == MVT::v16i32 || LoadedVT == MVT::v8i64 ||
- LoadedVT == MVT::v32i16 || LoadedVT == MVT::v64i8) {
- HasVecOffset = true;
- if (HII->isValidAutoIncImm(LoadedVT, Val)) {
- Opcode = Hexagon::V6_vL32b_pi;
- }
+ Opcode = IsValidInc ? Hexagon::L2_loadrh_pi : Hexagon::L2_loadrh_io;
+ break;
+ case MVT::i32:
+ Opcode = IsValidInc ? Hexagon::L2_loadri_pi : Hexagon::L2_loadri_io;
+ break;
+ case MVT::i64:
+ Opcode = IsValidInc ? Hexagon::L2_loadrd_pi : Hexagon::L2_loadrd_io;
+ break;
+ // 64B
+ case MVT::v64i8:
+ case MVT::v32i16:
+ case MVT::v16i32:
+ case MVT::v8i64:
+ if (isAlignedMemNode(LD))
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_pi : Hexagon::V6_vL32b_ai;
else
- Opcode = Hexagon::V6_vL32b_ai;
+ Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi : Hexagon::V6_vL32Ub_ai;
+ break;
// 128B
- } else if (LoadedVT == MVT::v32i32 || LoadedVT == MVT::v16i64 ||
- LoadedVT == MVT::v64i16 || LoadedVT == MVT::v128i8) {
- HasVecOffset = true;
- if (HII->isValidAutoIncImm(LoadedVT, Val)) {
- Opcode = Hexagon::V6_vL32b_pi_128B;
- }
+ case MVT::v128i8:
+ case MVT::v64i16:
+ case MVT::v32i32:
+ case MVT::v16i64:
+ if (isAlignedMemNode(LD))
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_pi_128B
+ : Hexagon::V6_vL32b_ai_128B;
else
- Opcode = Hexagon::V6_vL32b_ai_128B;
- } else
- llvm_unreachable("unknown memory type");
-
- // For zero extended i64 loads, we need to add combine instructions.
- if (LD->getValueType(0) == MVT::i64 && IsZeroExt)
- return SelectIndexedLoadZeroExtend64(LD, Opcode, dl);
- // Handle sign extended i64 loads.
- if (LD->getValueType(0) == MVT::i64 && ExtType == ISD::SEXTLOAD)
- return SelectIndexedLoadSignExtend64(LD, Opcode, dl);
-
- if (HII->isValidAutoIncImm(LoadedVT, Val)) {
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
- SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
- LD->getValueType(0),
- MVT::i32, MVT::Other, Base,
- TargetConstVal, Chain);
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = LD->getMemOperand();
- cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
- if (HasVecOffset) {
- const SDValue Froms[] = { SDValue(LD, 0),
- SDValue(LD, 2)
- };
- const SDValue Tos[] = { SDValue(Result, 0),
- SDValue(Result, 2)
- };
- ReplaceUses(Froms, Tos, 2);
- } else {
- const SDValue Froms[] = { SDValue(LD, 0),
- SDValue(LD, 1),
- SDValue(LD, 2)
- };
- const SDValue Tos[] = { SDValue(Result, 0),
- SDValue(Result, 1),
- SDValue(Result, 2)
- };
- ReplaceUses(Froms, Tos, 3);
+ Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi_128B
+ : Hexagon::V6_vL32Ub_ai_128B;
+ break;
+ default:
+ llvm_unreachable("Unexpected memory type in indexed load");
+ }
+
+ SDValue IncV = CurDAG->getTargetConstant(Inc, dl, MVT::i32);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+
+ auto getExt64 = [this,ExtType] (MachineSDNode *N, const SDLoc &dl)
+ -> MachineSDNode* {
+ if (ExtType == ISD::ZEXTLOAD || ExtType == ISD::EXTLOAD) {
+ SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
+ return CurDAG->getMachineNode(Hexagon::A4_combineir, dl, MVT::i64,
+ Zero, SDValue(N, 0));
}
- return Result;
+ if (ExtType == ISD::SEXTLOAD)
+ return CurDAG->getMachineNode(Hexagon::A2_sxtw, dl, MVT::i64,
+ SDValue(N, 0));
+ return N;
+ };
+
+ // Loaded value Next address Chain
+ SDValue From[3] = { SDValue(LD,0), SDValue(LD,1), SDValue(LD,2) };
+ SDValue To[3];
+
+ EVT ValueVT = LD->getValueType(0);
+ if (ValueVT == MVT::i64 && ExtType != ISD::NON_EXTLOAD) {
+ // A load extending to i64 will actually produce i32, which will then
+ // need to be extended to i64.
+ assert(LoadedVT.getSizeInBits() <= 32);
+ ValueVT = MVT::i32;
+ }
+
+ if (IsValidInc) {
+ MachineSDNode *L = CurDAG->getMachineNode(Opcode, dl, ValueVT,
+ MVT::i32, MVT::Other, Base,
+ IncV, Chain);
+ L->setMemRefs(MemOp, MemOp+1);
+ To[1] = SDValue(L, 1); // Next address.
+ To[2] = SDValue(L, 2); // Chain.
+ // Handle special case for extension to i64.
+ if (LD->getValueType(0) == MVT::i64)
+ L = getExt64(L, dl);
+ To[0] = SDValue(L, 0); // Loaded (extended) value.
} else {
- SDValue TargetConst0 = CurDAG->getTargetConstant(0, dl, MVT::i32);
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
- SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl,
- LD->getValueType(0),
- MVT::Other, Base, TargetConst0,
- Chain);
- SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
- Base, TargetConstVal,
- SDValue(Result_1, 1));
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = LD->getMemOperand();
- cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
- const SDValue Froms[] = { SDValue(LD, 0),
- SDValue(LD, 1),
- SDValue(LD, 2)
- };
- const SDValue Tos[] = { SDValue(Result_1, 0),
- SDValue(Result_2, 0),
- SDValue(Result_1, 1)
- };
- ReplaceUses(Froms, Tos, 3);
- return Result_1;
+ SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
+ MachineSDNode *L = CurDAG->getMachineNode(Opcode, dl, ValueVT, MVT::Other,
+ Base, Zero, Chain);
+ L->setMemRefs(MemOp, MemOp+1);
+ To[2] = SDValue(L, 1); // Chain.
+ MachineSDNode *A = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
+ Base, IncV);
+ To[1] = SDValue(A, 0); // Next address.
+ // Handle special case for extension to i64.
+ if (LD->getValueType(0) == MVT::i64)
+ L = getExt64(L, dl);
+ To[0] = SDValue(L, 0); // Loaded (extended) value.
}
+ ReplaceUses(From, To, 3);
+ CurDAG->RemoveDeadNode(LD);
}
-SDNode *HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
- SDNode *result;
+MachineSDNode *HexagonDAGToDAGISel::LoadInstrForLoadIntrinsic(SDNode *IntN) {
+ if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
+ return nullptr;
+
+ SDLoc dl(IntN);
+ unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
+
+ static std::map<unsigned,unsigned> LoadPciMap = {
+ { Intrinsic::hexagon_circ_ldb, Hexagon::L2_loadrb_pci },
+ { Intrinsic::hexagon_circ_ldub, Hexagon::L2_loadrub_pci },
+ { Intrinsic::hexagon_circ_ldh, Hexagon::L2_loadrh_pci },
+ { Intrinsic::hexagon_circ_lduh, Hexagon::L2_loadruh_pci },
+ { Intrinsic::hexagon_circ_ldw, Hexagon::L2_loadri_pci },
+ { Intrinsic::hexagon_circ_ldd, Hexagon::L2_loadrd_pci },
+ };
+ auto FLC = LoadPciMap.find(IntNo);
+ if (FLC != LoadPciMap.end()) {
+ SDNode *Mod = CurDAG->getMachineNode(Hexagon::A2_tfrrcr, dl, MVT::i32,
+ IntN->getOperand(4));
+ EVT ValTy = (IntNo == Intrinsic::hexagon_circ_ldd) ? MVT::i64 : MVT::i32;
+ EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
+ // Operands: { Base, Increment, Modifier, Chain }
+ auto Inc = cast<ConstantSDNode>(IntN->getOperand(5));
+ SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), dl, MVT::i32);
+ MachineSDNode *Res = CurDAG->getMachineNode(FLC->second, dl, RTys,
+ { IntN->getOperand(2), I, SDValue(Mod,0), IntN->getOperand(0) });
+ return Res;
+ }
+
+ static std::map<unsigned,unsigned> LoadPbrMap = {
+ { Intrinsic::hexagon_brev_ldb, Hexagon::L2_loadrb_pbr },
+ { Intrinsic::hexagon_brev_ldub, Hexagon::L2_loadrub_pbr },
+ { Intrinsic::hexagon_brev_ldh, Hexagon::L2_loadrh_pbr },
+ { Intrinsic::hexagon_brev_lduh, Hexagon::L2_loadruh_pbr },
+ { Intrinsic::hexagon_brev_ldw, Hexagon::L2_loadri_pbr },
+ { Intrinsic::hexagon_brev_ldd, Hexagon::L2_loadrd_pbr },
+ };
+ auto FLB = LoadPbrMap.find(IntNo);
+ if (FLB != LoadPbrMap.end()) {
+ SDNode *Mod = CurDAG->getMachineNode(Hexagon::A2_tfrrcr, dl, MVT::i32,
+ IntN->getOperand(4));
+ EVT ValTy = (IntNo == Intrinsic::hexagon_brev_ldd) ? MVT::i64 : MVT::i32;
+ EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
+ // Operands: { Base, Modifier, Chain }
+ MachineSDNode *Res = CurDAG->getMachineNode(FLB->second, dl, RTys,
+ { IntN->getOperand(2), SDValue(Mod,0), IntN->getOperand(0) });
+ return Res;
+ }
+
+ return nullptr;
+}
+
+SDNode *HexagonDAGToDAGISel::StoreInstrForLoadIntrinsic(MachineSDNode *LoadN,
+ SDNode *IntN) {
+ // The "LoadN" is just a machine load instruction. The intrinsic also
+ // involves storing it. Generate an appropriate store to the location
+ // given in the intrinsic's operand(3).
+ uint64_t F = HII->get(LoadN->getMachineOpcode()).TSFlags;
+ unsigned SizeBits = (F >> HexagonII::MemAccessSizePos) &
+ HexagonII::MemAccesSizeMask;
+ unsigned Size = 1U << (SizeBits-1);
+
+ SDLoc dl(IntN);
+ MachinePointerInfo PI;
+ SDValue TS;
+ SDValue Loc = IntN->getOperand(3);
+
+ if (Size >= 4)
+ TS = CurDAG->getStore(SDValue(LoadN, 2), dl, SDValue(LoadN, 0), Loc, PI,
+ Size);
+ else
+ TS = CurDAG->getTruncStore(SDValue(LoadN, 2), dl, SDValue(LoadN, 0), Loc,
+ PI, MVT::getIntegerVT(Size * 8), Size);
+
+ SDNode *StoreN;
+ {
+ HandleSDNode Handle(TS);
+ SelectStore(TS.getNode());
+ StoreN = Handle.getValue().getNode();
+ }
+
+ // Load's results are { Loaded value, Updated pointer, Chain }
+ ReplaceUses(SDValue(IntN, 0), SDValue(LoadN, 1));
+ ReplaceUses(SDValue(IntN, 1), SDValue(StoreN, 0));
+ return StoreN;
+}
+
+bool HexagonDAGToDAGISel::tryLoadOfLoadIntrinsic(LoadSDNode *N) {
+ // The intrinsics for load circ/brev perform two operations:
+ // 1. Load a value V from the specified location, using the addressing
+ // mode corresponding to the intrinsic.
+ // 2. Store V into a specified location. This location is typically a
+ // local, temporary object.
+ // In many cases, the program using these intrinsics will immediately
+ // load V again from the local object. In those cases, when certain
+ // conditions are met, the last load can be removed.
+ // This function identifies and optimizes this pattern. If the pattern
+ // cannot be optimized, it returns nullptr, which will cause the load
+ // to be selected separately from the intrinsic (which will be handled
+ // in SelectIntrinsicWChain).
+
+ SDValue Ch = N->getOperand(0);
+ SDValue Loc = N->getOperand(1);
+
+ // Assume that the load and the intrinsic are connected directly with a
+ // chain:
+ // t1: i32,ch = int.load ..., ..., ..., Loc, ... // <-- C
+ // t2: i32,ch = load t1:1, Loc, ...
+ SDNode *C = Ch.getNode();
+
+ if (C->getOpcode() != ISD::INTRINSIC_W_CHAIN)
+ return false;
+
+ // The second load can only be eliminated if its extension type matches
+ // that of the load instruction corresponding to the intrinsic. The user
+ // can provide an address of an unsigned variable to store the result of
+ // a sign-extending intrinsic into (or the other way around).
+ ISD::LoadExtType IntExt;
+ switch (cast<ConstantSDNode>(C->getOperand(1))->getZExtValue()) {
+ case Intrinsic::hexagon_brev_ldub:
+ case Intrinsic::hexagon_brev_lduh:
+ case Intrinsic::hexagon_circ_ldub:
+ case Intrinsic::hexagon_circ_lduh:
+ IntExt = ISD::ZEXTLOAD;
+ break;
+ case Intrinsic::hexagon_brev_ldw:
+ case Intrinsic::hexagon_brev_ldd:
+ case Intrinsic::hexagon_circ_ldw:
+ case Intrinsic::hexagon_circ_ldd:
+ IntExt = ISD::NON_EXTLOAD;
+ break;
+ default:
+ IntExt = ISD::SEXTLOAD;
+ break;
+ }
+ if (N->getExtensionType() != IntExt)
+ return false;
+
+ // Make sure the target location for the loaded value in the load intrinsic
+ // is the location from which LD (or N) is loading.
+ if (C->getNumOperands() < 4 || Loc.getNode() != C->getOperand(3).getNode())
+ return false;
+
+ if (MachineSDNode *L = LoadInstrForLoadIntrinsic(C)) {
+ SDNode *S = StoreInstrForLoadIntrinsic(L, C);
+ SDValue F[] = { SDValue(N,0), SDValue(N,1), SDValue(C,0), SDValue(C,1) };
+ SDValue T[] = { SDValue(L,0), SDValue(S,0), SDValue(L,1), SDValue(S,0) };
+ ReplaceUses(F, T, array_lengthof(T));
+ // This transformation will leave the intrinsic dead. If it remains in
+ // the DAG, the selection code will see it again, but without the load,
+ // and it will generate a store that is normally required for it.
+ CurDAG->RemoveDeadNode(C);
+ return true;
+ }
+
+ return false;
+}
+
+void HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
SDLoc dl(N);
LoadSDNode *LD = cast<LoadSDNode>(N);
ISD::MemIndexedMode AM = LD->getAddressingMode();
// Handle indexed loads.
if (AM != ISD::UNINDEXED) {
- result = SelectIndexedLoad(LD, dl);
- } else {
- result = SelectCode(LD);
+ SelectIndexedLoad(LD, dl);
+ return;
}
- return result;
-}
+ // Handle patterns using circ/brev load intrinsics.
+ if (tryLoadOfLoadIntrinsic(LD))
+ return;
+ SelectCode(LD);
+}
-SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, SDLoc dl) {
+void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
SDValue Chain = ST->getChain();
SDValue Base = ST->getBasePtr();
SDValue Offset = ST->getOffset();
SDValue Value = ST->getValue();
- SDNode *OffsetNode = Offset.getNode();
// Get the constant value.
- int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ int32_t Inc = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
EVT StoredVT = ST->getMemoryVT();
EVT ValueVT = Value.getValueType();
- // Offset value must be within representable range
- // and must have correct alignment properties.
- if (HII->isValidAutoIncImm(StoredVT, Val)) {
- unsigned Opcode = 0;
-
- // Figure out the post inc version of opcode.
- if (StoredVT == MVT::i64) Opcode = Hexagon::S2_storerd_pi;
- else if (StoredVT == MVT::i32) Opcode = Hexagon::S2_storeri_pi;
- else if (StoredVT == MVT::i16) Opcode = Hexagon::S2_storerh_pi;
- else if (StoredVT == MVT::i8) Opcode = Hexagon::S2_storerb_pi;
- else if (StoredVT == MVT::v16i32 || StoredVT == MVT::v8i64 ||
- StoredVT == MVT::v32i16 || StoredVT == MVT::v64i8) {
- Opcode = Hexagon::V6_vS32b_pi;
- }
- // 128B
- else if (StoredVT == MVT::v32i32 || StoredVT == MVT::v16i64 ||
- StoredVT == MVT::v64i16 || StoredVT == MVT::v128i8) {
- Opcode = Hexagon::V6_vS32b_pi_128B;
- } else llvm_unreachable("unknown memory type");
-
- if (ST->isTruncatingStore() && ValueVT.getSizeInBits() == 64) {
- assert(StoredVT.getSizeInBits() < 64 && "Not a truncating store");
- Value = CurDAG->getTargetExtractSubreg(Hexagon::subreg_loreg,
- dl, MVT::i32, Value);
- }
- SDValue Ops[] = {Base, CurDAG->getTargetConstant(Val, dl, MVT::i32), Value,
- Chain};
- // Build post increment store.
- SDNode* Result = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
- MVT::Other, Ops);
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = ST->getMemOperand();
- cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
-
- ReplaceUses(ST, Result);
- ReplaceUses(SDValue(ST,1), SDValue(Result,1));
- return Result;
- }
-
- // Note: Order of operands matches the def of instruction:
- // def S2_storerd_io
- // : STInst<(outs), (ins IntRegs:$base, imm:$offset, DoubleRegs:$src1), ...
- // and it differs for POST_ST* for instance.
- SDValue Ops[] = { Base, CurDAG->getTargetConstant(0, dl, MVT::i32), Value,
- Chain};
+ bool IsValidInc = HII->isValidAutoIncImm(StoredVT, Inc);
unsigned Opcode = 0;
- // Figure out the opcode.
- if (StoredVT == MVT::i64) Opcode = Hexagon::S2_storerd_io;
- else if (StoredVT == MVT::i32) Opcode = Hexagon::S2_storeri_io;
- else if (StoredVT == MVT::i16) Opcode = Hexagon::S2_storerh_io;
- else if (StoredVT == MVT::i8) Opcode = Hexagon::S2_storerb_io;
- else if (StoredVT == MVT::v16i32 || StoredVT == MVT::v8i64 ||
- StoredVT == MVT::v32i16 || StoredVT == MVT::v64i8)
- Opcode = Hexagon::V6_vS32b_ai;
+ assert(StoredVT.isSimple());
+ switch (StoredVT.getSimpleVT().SimpleTy) {
+ case MVT::i8:
+ Opcode = IsValidInc ? Hexagon::S2_storerb_pi : Hexagon::S2_storerb_io;
+ break;
+ case MVT::i16:
+ Opcode = IsValidInc ? Hexagon::S2_storerh_pi : Hexagon::S2_storerh_io;
+ break;
+ case MVT::i32:
+ Opcode = IsValidInc ? Hexagon::S2_storeri_pi : Hexagon::S2_storeri_io;
+ break;
+ case MVT::i64:
+ Opcode = IsValidInc ? Hexagon::S2_storerd_pi : Hexagon::S2_storerd_io;
+ break;
+ // 64B
+ case MVT::v64i8:
+ case MVT::v32i16:
+ case MVT::v16i32:
+ case MVT::v8i64:
+ if (isAlignedMemNode(ST))
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_pi : Hexagon::V6_vS32b_ai;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi : Hexagon::V6_vS32Ub_ai;
+ break;
// 128B
- else if (StoredVT == MVT::v32i32 || StoredVT == MVT::v16i64 ||
- StoredVT == MVT::v64i16 || StoredVT == MVT::v128i8)
- Opcode = Hexagon::V6_vS32b_ai_128B;
- else llvm_unreachable("unknown memory type");
-
- // Build regular store.
- SDValue TargetConstVal = CurDAG->getTargetConstant(Val, dl, MVT::i32);
- SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
- // Build splitted incriment instruction.
- SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
- Base,
- TargetConstVal,
- SDValue(Result_1, 0));
+ case MVT::v128i8:
+ case MVT::v64i16:
+ case MVT::v32i32:
+ case MVT::v16i64:
+ if (isAlignedMemNode(ST))
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_pi_128B
+ : Hexagon::V6_vS32b_ai_128B;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi_128B
+ : Hexagon::V6_vS32Ub_ai_128B;
+ break;
+ default:
+ llvm_unreachable("Unexpected memory type in indexed store");
+ }
+
+ if (ST->isTruncatingStore() && ValueVT.getSizeInBits() == 64) {
+ assert(StoredVT.getSizeInBits() < 64 && "Not a truncating store");
+ Value = CurDAG->getTargetExtractSubreg(Hexagon::subreg_loreg,
+ dl, MVT::i32, Value);
+ }
+
+ SDValue IncV = CurDAG->getTargetConstant(Inc, dl, MVT::i32);
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = ST->getMemOperand();
- cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
- ReplaceUses(SDValue(ST,0), SDValue(Result_2,0));
- ReplaceUses(SDValue(ST,1), SDValue(Result_1,0));
- return Result_2;
+ // Next address Chain
+ SDValue From[2] = { SDValue(ST,0), SDValue(ST,1) };
+ SDValue To[2];
+
+ if (IsValidInc) {
+ // Build post increment store.
+ SDValue Ops[] = { Base, IncV, Value, Chain };
+ MachineSDNode *S = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::Other,
+ Ops);
+ S->setMemRefs(MemOp, MemOp + 1);
+ To[0] = SDValue(S, 0);
+ To[1] = SDValue(S, 1);
+ } else {
+ SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
+ SDValue Ops[] = { Base, Zero, Value, Chain };
+ MachineSDNode *S = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
+ S->setMemRefs(MemOp, MemOp + 1);
+ To[1] = SDValue(S, 0);
+ MachineSDNode *A = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
+ Base, IncV);
+ To[0] = SDValue(A, 0);
+ }
+
+ ReplaceUses(From, To, 2);
+ CurDAG->RemoveDeadNode(ST);
}
-SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
+void HexagonDAGToDAGISel::SelectStore(SDNode *N) {
SDLoc dl(N);
StoreSDNode *ST = cast<StoreSDNode>(N);
ISD::MemIndexedMode AM = ST->getAddressingMode();
// Handle indexed stores.
if (AM != ISD::UNINDEXED) {
- return SelectIndexedStore(ST, dl);
+ SelectIndexedStore(ST, dl);
+ return;
}
- return SelectCode(ST);
+ SelectCode(ST);
}
-SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
+void HexagonDAGToDAGISel::SelectMul(SDNode *N) {
SDLoc dl(N);
//
@@ -629,7 +662,8 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) {
SDValue Sext0 = MulOp0.getOperand(0);
if (Sext0.getNode()->getValueType(0) != MVT::i32) {
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
OP0 = Sext0;
@@ -638,7 +672,8 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
if (LD->getMemoryVT() != MVT::i32 ||
LD->getExtensionType() != ISD::SEXTLOAD ||
LD->getAddressingMode() != ISD::UNINDEXED) {
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
SDValue Chain = LD->getChain();
@@ -648,14 +683,16 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
LD->getBasePtr(), TargetConst0,
Chain), 0);
} else {
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
// Same goes for the second operand.
if (MulOp1.getOpcode() == ISD::SIGN_EXTEND) {
SDValue Sext1 = MulOp1.getOperand(0);
if (Sext1.getNode()->getValueType(0) != MVT::i32) {
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
OP1 = Sext1;
@@ -664,7 +701,8 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
if (LD->getMemoryVT() != MVT::i32 ||
LD->getExtensionType() != ISD::SEXTLOAD ||
LD->getAddressingMode() != ISD::UNINDEXED) {
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
SDValue Chain = LD->getChain();
@@ -674,20 +712,21 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
LD->getBasePtr(), TargetConst0,
Chain), 0);
} else {
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
// Generate a mpy instruction.
SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_dpmpyss_s0, dl, MVT::i64,
OP0, OP1);
- ReplaceUses(N, Result);
- return Result;
+ ReplaceNode(N, Result);
+ return;
}
- return SelectCode(N);
+ SelectCode(N);
}
-SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
+void HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
SDLoc dl(N);
if (N->getValueType(0) == MVT::i32) {
SDValue Shl_0 = N->getOperand(0);
@@ -711,8 +750,8 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
SDNode* Result =
CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl,
MVT::i32, Mul_0, Val);
- ReplaceUses(N, Result);
- return Result;
+ ReplaceNode(N, Result);
+ return;
}
}
@@ -740,8 +779,8 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
SDNode* Result =
CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl, MVT::i32,
Shl2_0, Val);
- ReplaceUses(N, Result);
- return Result;
+ ReplaceNode(N, Result);
+ return;
}
}
}
@@ -750,7 +789,7 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
}
}
}
- return SelectCode(N);
+ SelectCode(N);
}
@@ -764,7 +803,7 @@ SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
// compiler. Architecture defines them as 8-bit registers.
// We want to preserve all the lower 8-bits and, not just 1 LSB bit.
//
-SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
+void HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
SDLoc dl(N);
SDValue Op0 = N->getOperand(0);
@@ -790,11 +829,14 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
SDValue(Mask,0), SDValue(OnesReg,0));
SDValue SubR = CurDAG->getTargetConstant(Hexagon::subreg_loreg, dl,
MVT::i32);
- return CurDAG->getMachineNode(Hexagon::EXTRACT_SUBREG, dl, ExVT,
- SDValue(And,0), SubR);
+ ReplaceNode(N, CurDAG->getMachineNode(Hexagon::EXTRACT_SUBREG, dl, ExVT,
+ SDValue(And, 0), SubR));
+ return;
}
- return CurDAG->getMachineNode(Hexagon::A2_andp, dl, ExVT,
- SDValue(Mask,0), SDValue(OnesReg,0));
+ ReplaceNode(N,
+ CurDAG->getMachineNode(Hexagon::A2_andp, dl, ExVT,
+ SDValue(Mask, 0), SDValue(OnesReg, 0)));
+ return;
}
SDNode *IsIntrinsic = N->getOperand(0).getNode();
@@ -816,225 +858,37 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
MVT::i64, MVT::Other,
SDValue(Result_2, 0),
SDValue(Result_1, 0));
- ReplaceUses(N, Result_3);
- return Result_3;
+ ReplaceNode(N, Result_3);
+ return;
}
if (N->getValueType(0) == MVT::i32) {
// Convert the zero_extend to Rs = Pd
SDNode* RsPd = CurDAG->getMachineNode(Hexagon::C2_tfrpr, dl,
MVT::i32,
SDValue(IsIntrinsic, 0));
- ReplaceUses(N, RsPd);
- return RsPd;
+ ReplaceNode(N, RsPd);
+ return;
}
llvm_unreachable("Unexpected value type");
}
}
- return SelectCode(N);
+ SelectCode(N);
}
+
//
-// Checking for intrinsics circular load/store, and bitreverse load/store
-// instrisics in order to select the correct lowered operation.
+// Handling intrinsics for circular load and bitreverse load.
//
-SDNode *HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
- if (IntNo == Intrinsic::hexagon_circ_ldd ||
- IntNo == Intrinsic::hexagon_circ_ldw ||
- IntNo == Intrinsic::hexagon_circ_lduh ||
- IntNo == Intrinsic::hexagon_circ_ldh ||
- IntNo == Intrinsic::hexagon_circ_ldub ||
- IntNo == Intrinsic::hexagon_circ_ldb) {
- SDLoc dl(N);
- SDValue Chain = N->getOperand(0);
- SDValue Base = N->getOperand(2);
- SDValue Load = N->getOperand(3);
- SDValue ModifierExpr = N->getOperand(4);
- SDValue Offset = N->getOperand(5);
-
- // We need to add the rerurn type for the load. This intrinsic has
- // two return types, one for the load and one for the post-increment.
- // Only the *_ld instructions push the extra return type, and bump the
- // result node operand number correspondingly.
- std::vector<EVT> ResTys;
- unsigned opc;
- unsigned memsize, align;
- MVT MvtSize = MVT::i32;
-
- if (IntNo == Intrinsic::hexagon_circ_ldd) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i64);
- opc = Hexagon::L2_loadrd_pci_pseudo;
- memsize = 8;
- align = 8;
- } else if (IntNo == Intrinsic::hexagon_circ_ldw) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadri_pci_pseudo;
- memsize = 4;
- align = 4;
- } else if (IntNo == Intrinsic::hexagon_circ_ldh) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadrh_pci_pseudo;
- memsize = 2;
- align = 2;
- MvtSize = MVT::i16;
- } else if (IntNo == Intrinsic::hexagon_circ_lduh) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadruh_pci_pseudo;
- memsize = 2;
- align = 2;
- MvtSize = MVT::i16;
- } else if (IntNo == Intrinsic::hexagon_circ_ldb) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadrb_pci_pseudo;
- memsize = 1;
- align = 1;
- MvtSize = MVT::i8;
- } else if (IntNo == Intrinsic::hexagon_circ_ldub) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadrub_pci_pseudo;
- memsize = 1;
- align = 1;
- MvtSize = MVT::i8;
- } else
- llvm_unreachable("no opc");
-
- ResTys.push_back(MVT::Other);
-
- // Copy over the arguments, which are the same mostly.
- SmallVector<SDValue, 5> Ops;
- Ops.push_back(Base);
- Ops.push_back(Load);
- Ops.push_back(ModifierExpr);
- int32_t Val = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
- Ops.push_back(CurDAG->getTargetConstant(Val, dl, MVT::i32));
- Ops.push_back(Chain);
- SDNode* Result = CurDAG->getMachineNode(opc, dl, ResTys, Ops);
-
- SDValue ST;
- MachineMemOperand *Mem =
- MF->getMachineMemOperand(MachinePointerInfo(),
- MachineMemOperand::MOStore, memsize, align);
- if (MvtSize != MVT::i32)
- ST = CurDAG->getTruncStore(Chain, dl, SDValue(Result, 1), Load,
- MvtSize, Mem);
- else
- ST = CurDAG->getStore(Chain, dl, SDValue(Result, 1), Load, Mem);
-
- SDNode* Store = SelectStore(ST.getNode());
-
- const SDValue Froms[] = { SDValue(N, 0),
- SDValue(N, 1) };
- const SDValue Tos[] = { SDValue(Result, 0),
- SDValue(Store, 0) };
- ReplaceUses(Froms, Tos, 2);
- return Result;
- }
-
- if (IntNo == Intrinsic::hexagon_brev_ldd ||
- IntNo == Intrinsic::hexagon_brev_ldw ||
- IntNo == Intrinsic::hexagon_brev_ldh ||
- IntNo == Intrinsic::hexagon_brev_lduh ||
- IntNo == Intrinsic::hexagon_brev_ldb ||
- IntNo == Intrinsic::hexagon_brev_ldub) {
- SDLoc dl(N);
- SDValue Chain = N->getOperand(0);
- SDValue Base = N->getOperand(2);
- SDValue Load = N->getOperand(3);
- SDValue ModifierExpr = N->getOperand(4);
-
- // We need to add the rerurn type for the load. This intrinsic has
- // two return types, one for the load and one for the post-increment.
- std::vector<EVT> ResTys;
- unsigned opc;
- unsigned memsize, align;
- MVT MvtSize = MVT::i32;
-
- if (IntNo == Intrinsic::hexagon_brev_ldd) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i64);
- opc = Hexagon::L2_loadrd_pbr_pseudo;
- memsize = 8;
- align = 8;
- } else if (IntNo == Intrinsic::hexagon_brev_ldw) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadri_pbr_pseudo;
- memsize = 4;
- align = 4;
- } else if (IntNo == Intrinsic::hexagon_brev_ldh) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadrh_pbr_pseudo;
- memsize = 2;
- align = 2;
- MvtSize = MVT::i16;
- } else if (IntNo == Intrinsic::hexagon_brev_lduh) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadruh_pbr_pseudo;
- memsize = 2;
- align = 2;
- MvtSize = MVT::i16;
- } else if (IntNo == Intrinsic::hexagon_brev_ldb) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadrb_pbr_pseudo;
- memsize = 1;
- align = 1;
- MvtSize = MVT::i8;
- } else if (IntNo == Intrinsic::hexagon_brev_ldub) {
- ResTys.push_back(MVT::i32);
- ResTys.push_back(MVT::i32);
- opc = Hexagon::L2_loadrub_pbr_pseudo;
- memsize = 1;
- align = 1;
- MvtSize = MVT::i8;
- } else
- llvm_unreachable("no opc");
-
- ResTys.push_back(MVT::Other);
-
- // Copy over the arguments, which are the same mostly.
- SmallVector<SDValue, 4> Ops;
- Ops.push_back(Base);
- Ops.push_back(Load);
- Ops.push_back(ModifierExpr);
- Ops.push_back(Chain);
- SDNode* Result = CurDAG->getMachineNode(opc, dl, ResTys, Ops);
- SDValue ST;
- MachineMemOperand *Mem =
- MF->getMachineMemOperand(MachinePointerInfo(),
- MachineMemOperand::MOStore, memsize, align);
- if (MvtSize != MVT::i32)
- ST = CurDAG->getTruncStore(Chain, dl, SDValue(Result, 1), Load,
- MvtSize, Mem);
- else
- ST = CurDAG->getStore(Chain, dl, SDValue(Result, 1), Load, Mem);
-
- SDNode* Store = SelectStore(ST.getNode());
-
- const SDValue Froms[] = { SDValue(N, 0),
- SDValue(N, 1) };
- const SDValue Tos[] = { SDValue(Result, 0),
- SDValue(Store, 0) };
- ReplaceUses(Froms, Tos, 2);
- return Result;
+void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
+ if (MachineSDNode *L = LoadInstrForLoadIntrinsic(N)) {
+ StoreInstrForLoadIntrinsic(L, N);
+ CurDAG->RemoveDeadNode(N);
+ return;
}
-
- return SelectCode(N);
+ SelectCode(N);
}
-//
-// Checking for intrinsics which have predicate registers as operand(s)
-// and lowering to the actual intrinsic.
-//
-SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
+void HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
unsigned Bits;
switch (IID) {
@@ -1045,42 +899,51 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
Bits = 16;
break;
default:
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
- SDValue const &V = N->getOperand(1);
+ SDValue V = N->getOperand(1);
SDValue U;
if (isValueExtension(V, Bits, U)) {
SDValue R = CurDAG->getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
- N->getOperand(0), U);
- return SelectCode(R.getNode());
+ N->getOperand(0), U);
+ ReplaceNode(N, R.getNode());
+ SelectCode(R.getNode());
+ return;
}
- return SelectCode(N);
+ SelectCode(N);
}
//
// Map floating point constant values.
//
-SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
+void HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
SDLoc dl(N);
ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
- APFloat APF = CN->getValueAPF();
+ const APFloat &APF = CN->getValueAPF();
if (N->getValueType(0) == MVT::f32) {
- return CurDAG->getMachineNode(Hexagon::TFRI_f, dl, MVT::f32,
- CurDAG->getTargetConstantFP(APF.convertToFloat(), dl, MVT::f32));
+ ReplaceNode(
+ N, CurDAG->getMachineNode(Hexagon::TFRI_f, dl, MVT::f32,
+ CurDAG->getTargetConstantFP(
+ APF.convertToFloat(), dl, MVT::f32)));
+ return;
}
else if (N->getValueType(0) == MVT::f64) {
- return CurDAG->getMachineNode(Hexagon::CONST64_Float_Real, dl, MVT::f64,
- CurDAG->getTargetConstantFP(APF.convertToDouble(), dl, MVT::f64));
+ ReplaceNode(
+ N, CurDAG->getMachineNode(Hexagon::CONST64_Float_Real, dl, MVT::f64,
+ CurDAG->getTargetConstantFP(
+ APF.convertToDouble(), dl, MVT::f64)));
+ return;
}
- return SelectCode(N);
+ SelectCode(N);
}
//
// Map predicate true (encoded as -1 in LLVM) to a XOR.
//
-SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
+void HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
SDLoc dl(N);
if (N->getValueType(0) == MVT::i1) {
SDNode* Result = 0;
@@ -1091,28 +954,30 @@ SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
Result = CurDAG->getMachineNode(Hexagon::TFR_PdFalse, dl, MVT::i1);
}
if (Result) {
- ReplaceUses(N, Result);
- return Result;
+ ReplaceNode(N, Result);
+ return;
}
}
- return SelectCode(N);
+ SelectCode(N);
}
//
// Map add followed by a asr -> asr +=.
//
-SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
+void HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
SDLoc dl(N);
if (N->getValueType(0) != MVT::i32) {
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
// Identify nodes of the form: add(asr(...)).
SDNode* Src1 = N->getOperand(0).getNode();
if (Src1->getOpcode() != ISD::SRA || !Src1->hasOneUse()
|| Src1->getValueType(0) != MVT::i32) {
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
// Build Rd = Rd' + asr(Rs, Rt). The machine constraints will ensure that
@@ -1121,9 +986,7 @@ SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
N->getOperand(1),
Src1->getOperand(0),
Src1->getOperand(1));
- ReplaceUses(N, Result);
-
- return Result;
+ ReplaceNode(N, Result);
}
//
@@ -1132,26 +995,32 @@ SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
// OR -> setbit
// XOR/FNEG ->toggle_bit.
//
-SDNode *HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
+void HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
SDLoc dl(N);
EVT ValueVT = N->getValueType(0);
// We handle only 32 and 64-bit bit ops.
if (!(ValueVT == MVT::i32 || ValueVT == MVT::i64 ||
- ValueVT == MVT::f32 || ValueVT == MVT::f64))
- return SelectCode(N);
+ ValueVT == MVT::f32 || ValueVT == MVT::f64)) {
+ SelectCode(N);
+ return;
+ }
// We handly only fabs and fneg for V5.
unsigned Opc = N->getOpcode();
- if ((Opc == ISD::FABS || Opc == ISD::FNEG) && !HST->hasV5TOps())
- return SelectCode(N);
+ if ((Opc == ISD::FABS || Opc == ISD::FNEG) && !HST->hasV5TOps()) {
+ SelectCode(N);
+ return;
+ }
int64_t Val = 0;
if (Opc != ISD::FABS && Opc != ISD::FNEG) {
if (N->getOperand(1).getOpcode() == ISD::Constant)
Val = cast<ConstantSDNode>((N)->getOperand(1))->getSExtValue();
- else
- return SelectCode(N);
+ else {
+ SelectCode(N);
+ return;
+ }
}
if (Opc == ISD::AND) {
@@ -1159,8 +1028,10 @@ SDNode *HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
if ((ValueVT == MVT::i32 && isPowerOf2_32(~Val)) ||
(ValueVT == MVT::i64 && isPowerOf2_64(~Val)))
Val = ~Val;
- else
- return SelectCode(N);
+ else {
+ SelectCode(N);
+ return;
+ }
}
// If OR or AND is being fed by shl, srl and, sra don't do this change,
@@ -1173,7 +1044,8 @@ SDNode *HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
case ISD::SRA:
case ISD::SRL:
case ISD::SHL:
- return SelectCode(N);
+ SelectCode(N);
+ return;
}
}
@@ -1181,8 +1053,10 @@ SDNode *HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
unsigned BitPos = 0;
if (Opc != ISD::FABS && Opc != ISD::FNEG) {
if ((ValueVT == MVT::i32 && !isPowerOf2_32(Val)) ||
- (ValueVT == MVT::i64 && !isPowerOf2_64(Val)))
- return SelectCode(N);
+ (ValueVT == MVT::i64 && !isPowerOf2_64(Val))) {
+ SelectCode(N);
+ return;
+ }
// Get the bit position.
BitPos = countTrailingZeros(uint64_t(Val));
@@ -1259,12 +1133,11 @@ SDNode *HexagonDAGToDAGISel::SelectBitOp(SDNode *N) {
}
}
- ReplaceUses(N, Result);
- return Result;
+ ReplaceNode(N, Result);
}
-SDNode *HexagonDAGToDAGISel::SelectFrameIndex(SDNode *N) {
+void HexagonDAGToDAGISel::SelectFrameIndex(SDNode *N) {
MachineFrameInfo *MFI = MF->getFrameInfo();
const HexagonFrameLowering *HFI = HST->getFrameLowering();
int FX = cast<FrameIndexSDNode>(N)->getIndex();
@@ -1290,61 +1163,91 @@ SDNode *HexagonDAGToDAGISel::SelectFrameIndex(SDNode *N) {
R = CurDAG->getMachineNode(Hexagon::TFR_FIA, DL, MVT::i32, Ops);
}
- if (N->getHasDebugValue())
- CurDAG->TransferDbgValues(SDValue(N, 0), SDValue(R, 0));
- return R;
+ ReplaceNode(N, R);
}
-SDNode *HexagonDAGToDAGISel::Select(SDNode *N) {
+void HexagonDAGToDAGISel::SelectBitcast(SDNode *N) {
+ EVT SVT = N->getOperand(0).getValueType();
+ EVT DVT = N->getValueType(0);
+ if (!SVT.isVector() || !DVT.isVector() ||
+ SVT.getVectorElementType() == MVT::i1 ||
+ DVT.getVectorElementType() == MVT::i1 ||
+ SVT.getSizeInBits() != DVT.getSizeInBits()) {
+ SelectCode(N);
+ return;
+ }
+
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(N,0), N->getOperand(0));
+ CurDAG->RemoveDeadNode(N);
+}
+
+
+void HexagonDAGToDAGISel::Select(SDNode *N) {
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return nullptr; // Already selected.
+ return; // Already selected.
}
switch (N->getOpcode()) {
case ISD::Constant:
- return SelectConstant(N);
+ SelectConstant(N);
+ return;
case ISD::ConstantFP:
- return SelectConstantFP(N);
+ SelectConstantFP(N);
+ return;
case ISD::FrameIndex:
- return SelectFrameIndex(N);
+ SelectFrameIndex(N);
+ return;
case ISD::ADD:
- return SelectAdd(N);
+ SelectAdd(N);
+ return;
+
+ case ISD::BITCAST:
+ SelectBitcast(N);
+ return;
case ISD::SHL:
- return SelectSHL(N);
+ SelectSHL(N);
+ return;
case ISD::LOAD:
- return SelectLoad(N);
+ SelectLoad(N);
+ return;
case ISD::STORE:
- return SelectStore(N);
+ SelectStore(N);
+ return;
case ISD::MUL:
- return SelectMul(N);
+ SelectMul(N);
+ return;
case ISD::AND:
case ISD::OR:
case ISD::XOR:
case ISD::FABS:
case ISD::FNEG:
- return SelectBitOp(N);
+ SelectBitOp(N);
+ return;
case ISD::ZERO_EXTEND:
- return SelectZeroExtend(N);
+ SelectZeroExtend(N);
+ return;
case ISD::INTRINSIC_W_CHAIN:
- return SelectIntrinsicWChain(N);
+ SelectIntrinsicWChain(N);
+ return;
case ISD::INTRINSIC_WO_CHAIN:
- return SelectIntrinsicWOChain(N);
+ SelectIntrinsicWOChain(N);
+ return;
}
- return SelectCode(N);
+ SelectCode(N);
}
bool HexagonDAGToDAGISel::
@@ -1380,7 +1283,7 @@ void HexagonDAGToDAGISel::PreprocessISelDAG() {
// Simplify: (or (select c x 0) z) -> (select c (or x z) z)
// (or (select c 0 y) z) -> (select c z (or y z))
// This may not be the right thing for all targets, so do it here.
- for (auto I: Nodes) {
+ for (auto I : Nodes) {
if (I->getOpcode() != ISD::OR)
continue;
@@ -1392,7 +1295,7 @@ void HexagonDAGToDAGISel::PreprocessISelDAG() {
auto IsSelect0 = [IsZero] (const SDValue &Op) -> bool {
if (Op.getOpcode() != ISD::SELECT)
return false;
- return IsZero(Op.getOperand(1)) || IsZero(Op.getOperand(2));
+ return IsZero(Op.getOperand(1)) || IsZero(Op.getOperand(2));
};
SDValue N0 = I->getOperand(0), N1 = I->getOperand(1);
@@ -1417,6 +1320,59 @@ void HexagonDAGToDAGISel::PreprocessISelDAG() {
}
}
}
+
+ // Transform: (store ch addr (add x (add (shl y c) e)))
+ // to: (store ch addr (add x (shl (add y d) c))),
+ // where e = (shl d c) for some integer d.
+ // The purpose of this is to enable generation of loads/stores with
+ // shifted addressing mode, i.e. mem(x+y<<#c). For that, the shift
+ // value c must be 0, 1 or 2.
+ for (auto I : Nodes) {
+ if (I->getOpcode() != ISD::STORE)
+ continue;
+
+ // I matched: (store ch addr Off)
+ SDValue Off = I->getOperand(2);
+ // Off needs to match: (add x (add (shl y c) (shl d c))))
+ if (Off.getOpcode() != ISD::ADD)
+ continue;
+ // Off matched: (add x T0)
+ SDValue T0 = Off.getOperand(1);
+ // T0 needs to match: (add T1 T2):
+ if (T0.getOpcode() != ISD::ADD)
+ continue;
+ // T0 matched: (add T1 T2)
+ SDValue T1 = T0.getOperand(0);
+ SDValue T2 = T0.getOperand(1);
+ // T1 needs to match: (shl y c)
+ if (T1.getOpcode() != ISD::SHL)
+ continue;
+ SDValue C = T1.getOperand(1);
+ ConstantSDNode *CN = dyn_cast<ConstantSDNode>(C.getNode());
+ if (CN == nullptr)
+ continue;
+ unsigned CV = CN->getZExtValue();
+ if (CV > 2)
+ continue;
+ // T2 needs to match e, where e = (shl d c) for some d.
+ ConstantSDNode *EN = dyn_cast<ConstantSDNode>(T2.getNode());
+ if (EN == nullptr)
+ continue;
+ unsigned EV = EN->getZExtValue();
+ if (EV % (1 << CV) != 0)
+ continue;
+ unsigned DV = EV / (1 << CV);
+
+ // Replace T0 with: (shl (add y d) c)
+ SDLoc DL = SDLoc(I);
+ EVT VT = T0.getValueType();
+ SDValue D = DAG.getConstant(DV, DL, VT);
+ // NewAdd = (add y d)
+ SDValue NewAdd = DAG.getNode(ISD::ADD, DL, VT, T1.getOperand(0), D);
+ // NewShl = (shl NewAdd c)
+ SDValue NewShl = DAG.getNode(ISD::SHL, DL, VT, NewAdd, C);
+ ReplaceNode(T0.getNode(), NewShl.getNode());
+ }
}
void HexagonDAGToDAGISel::EmitFunctionEntryCode() {
@@ -1561,3 +1517,26 @@ bool HexagonDAGToDAGISel::isValueExtension(const SDValue &Val,
}
return false;
}
+
+
+bool HexagonDAGToDAGISel::orIsAdd(const SDNode *N) const {
+ assert(N->getOpcode() == ISD::OR);
+ auto *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ assert(C);
+
+ // Detect when "or" is used to add an offset to a stack object.
+ if (auto *FN = dyn_cast<FrameIndexSDNode>(N->getOperand(0))) {
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ unsigned A = MFI->getObjectAlignment(FN->getIndex());
+ assert(isPowerOf2_32(A));
+ int32_t Off = C->getSExtValue();
+ // If the alleged offset fits in the zero bits guaranteed by
+ // the alignment, then this or is really an add.
+ return (Off >= 0) && (((A-1) & Off) == unsigned(Off));
+ }
+ return false;
+}
+
+bool HexagonDAGToDAGISel::isAlignedMemNode(const MemSDNode *N) const {
+ return N->getAlignment() >= N->getMemoryVT().getStoreSize();
+}
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index 01670902e2b0..cdd4c2f8617d 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -389,9 +389,12 @@ static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
bool UseHVX = HST.useHVXOps();
bool UseHVXDbl = HST.useHVXDblOps();
- if (LocVT == MVT::i1 ||
- LocVT == MVT::i8 ||
- LocVT == MVT::i16) {
+ if (LocVT == MVT::i1) {
+ // Return values of type MVT::i1 still need to be assigned to R0, but
+ // the value type needs to remain i1. LowerCallResult will deal with it,
+ // but it needs to recognize i1 as the value type.
+ LocVT = MVT::i32;
+ } else if (LocVT == MVT::i8 || LocVT == MVT::i16) {
LocVT = MVT::i32;
ValVT = MVT::i32;
if (ArgFlags.isSExt())
@@ -443,9 +446,14 @@ static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
if (LocVT == MVT::i32 || LocVT == MVT::f32) {
- if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
+ // Note that use of registers beyond R1 is not ABI compliant. However there
+ // are (experimental) IR passes which generate internal functions that
+ // return structs using these additional registers.
+ static const uint16_t RegList[] = { Hexagon::R0, Hexagon::R1,
+ Hexagon::R2, Hexagon::R3,
+ Hexagon::R4, Hexagon::R5};
+ if (unsigned Reg = State.AllocateReg(RegList)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
}
@@ -505,15 +513,13 @@ static bool RetCC_HexagonVector(unsigned ValNo, MVT ValVT,
return false;
}
-void HexagonTargetLowering::promoteLdStType(EVT VT, EVT PromotedLdStVT) {
+void HexagonTargetLowering::promoteLdStType(MVT VT, MVT PromotedLdStVT) {
if (VT != PromotedLdStVT) {
- setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
- AddPromotedToType(ISD::LOAD, VT.getSimpleVT(),
- PromotedLdStVT.getSimpleVT());
+ setOperationAction(ISD::LOAD, VT, Promote);
+ AddPromotedToType(ISD::LOAD, VT, PromotedLdStVT);
- setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
- AddPromotedToType(ISD::STORE, VT.getSimpleVT(),
- PromotedLdStVT.getSimpleVT());
+ setOperationAction(ISD::STORE, VT, Promote);
+ AddPromotedToType(ISD::STORE, VT, PromotedLdStVT);
}
}
@@ -528,10 +534,9 @@ const {
/// specified by the specific parameter attribute. The copy will be passed as
/// a byval function parameter. Sometimes what we are copying is the end of a
/// larger object, the part that does not fit in registers.
-static SDValue
-CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
- ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- SDLoc dl) {
+static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
+ SDValue Chain, ISD::ArgFlagsTy Flags,
+ SelectionDAG &DAG, const SDLoc &dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
@@ -554,11 +559,11 @@ static bool IsHvxVectorType(MVT ty) {
// passed by value, the function prototype is modified to return void and
// the value is stored in memory pointed by a pointer passed by caller.
SDValue
-HexagonTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs;
@@ -608,16 +613,11 @@ bool HexagonTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
/// being lowered. Returns a SDNode with the same number of values as the
/// ISD::CALL.
-SDValue
-HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const
- SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- const SmallVectorImpl<SDValue> &OutVals,
- SDValue Callee) const {
-
+SDValue HexagonTargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
+ const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@@ -628,11 +628,30 @@ HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
- Chain = DAG.getCopyFromReg(Chain, dl,
- RVLocs[i].getLocReg(),
- RVLocs[i].getValVT(), InFlag).getValue(1);
- InFlag = Chain.getValue(2);
- InVals.push_back(Chain.getValue(0));
+ SDValue RetVal;
+ if (RVLocs[i].getValVT() == MVT::i1) {
+ // Return values of type MVT::i1 require special handling. The reason
+ // is that MVT::i1 is associated with the PredRegs register class, but
+ // values of that type are still returned in R0. Generate an explicit
+ // copy into a predicate register from R0, and treat the value of the
+ // predicate register as the call result.
+ auto &MRI = DAG.getMachineFunction().getRegInfo();
+ SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
+ MVT::i32, InFlag);
+ // FR0 = (Value, Chain, Glue)
+ unsigned PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
+ SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR,
+ FR0.getValue(0), FR0.getValue(2));
+ // TPR = (Chain, Glue)
+ RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1,
+ TPR.getValue(1));
+ } else {
+ RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
+ RVLocs[i].getValVT(), InFlag);
+ }
+ InVals.push_back(RetVal.getValue(0));
+ Chain = RetVal.getValue(1);
+ InFlag = RetVal.getValue(2);
}
return Chain;
@@ -759,8 +778,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
} else {
MachinePointerInfo LocPI = MachinePointerInfo::getStack(
DAG.getMachineFunction(), LocMemOffset);
- SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI, false,
- false, 0);
+ SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
MemOpChains.push_back(S);
}
continue;
@@ -990,6 +1008,34 @@ HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
return Op;
}
+// Need to transform ISD::PREFETCH into something that doesn't inherit
+// all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
+// SDNPMayStore.
+SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Addr = Op.getOperand(1);
+ // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
+ // if the "reg" is fed by an "add".
+ SDLoc DL(Op);
+ SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
+ return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
+}
+
+SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ // Lower the hexagon_prefetch builtin to DCFETCH, as above.
+ if (IntNo == Intrinsic::hexagon_prefetch) {
+ SDValue Addr = Op.getOperand(2);
+ SDLoc DL(Op);
+ SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
+ return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
+ }
+ return SDValue();
+}
+
SDValue
HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
@@ -1016,20 +1062,15 @@ HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDValue AC = DAG.getConstant(A, dl, MVT::i32);
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
- if (Op.getNode()->getHasDebugValue())
- DAG.TransferDbgValues(Op, AA);
+
+ DAG.ReplaceAllUsesOfValueWith(Op, AA);
return AA;
}
-SDValue
-HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const
- SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
-const {
+SDValue HexagonTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -1144,9 +1185,8 @@ const {
// location.
InVals.push_back(FIN);
} else {
- InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
- MachinePointerInfo(), false, false,
- false, 0));
+ InVals.push_back(
+ DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, MachinePointerInfo()));
}
}
}
@@ -1174,13 +1214,13 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr,
- Op.getOperand(1), MachinePointerInfo(SV), false,
- false, 0);
+ return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),
+ MachinePointerInfo(SV));
}
// Creates a SPLAT instruction for a constant value VAL.
-static SDValue createSplat(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue Val) {
+static SDValue createSplat(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
+ SDValue Val) {
if (VT.getSimpleVT() == MVT::v4i8)
return DAG.getNode(HexagonISD::VSPLATB, dl, VT, Val);
@@ -1301,20 +1341,14 @@ SDValue HexagonTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Loads[4];
// Base load.
Loads[0] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Base,
- LoadNode->getPointerInfo(), MVT::i16,
- LoadNode->isVolatile(),
- LoadNode->isNonTemporal(),
- LoadNode->isInvariant(),
- Alignment);
+ LoadNode->getPointerInfo(), MVT::i16, Alignment,
+ LoadNode->getMemOperand()->getFlags());
// Base+2 load.
SDValue Increment = DAG.getConstant(2, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[1] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
- LoadNode->getPointerInfo(), MVT::i16,
- LoadNode->isVolatile(),
- LoadNode->isNonTemporal(),
- LoadNode->isInvariant(),
- Alignment);
+ LoadNode->getPointerInfo(), MVT::i16, Alignment,
+ LoadNode->getMemOperand()->getFlags());
// SHL 16, then OR base and base+2.
SDValue ShiftAmount = DAG.getConstant(16, DL, MVT::i32);
SDValue Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[1], ShiftAmount);
@@ -1323,20 +1357,14 @@ SDValue HexagonTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
Increment = DAG.getConstant(4, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[2] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
- LoadNode->getPointerInfo(), MVT::i16,
- LoadNode->isVolatile(),
- LoadNode->isNonTemporal(),
- LoadNode->isInvariant(),
- Alignment);
+ LoadNode->getPointerInfo(), MVT::i16, Alignment,
+ LoadNode->getMemOperand()->getFlags());
// Base + 6.
Increment = DAG.getConstant(6, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[3] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
- LoadNode->getPointerInfo(), MVT::i16,
- LoadNode->isVolatile(),
- LoadNode->isNonTemporal(),
- LoadNode->isInvariant(),
- Alignment);
+ LoadNode->getPointerInfo(), MVT::i16, Alignment,
+ LoadNode->getMemOperand()->getFlags());
// SHL 16, then OR base+4 and base+6.
Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[3], ShiftAmount);
SDValue Tmp4 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[2]);
@@ -1349,8 +1377,8 @@ SDValue HexagonTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
} else {
// Perform default type expansion.
Result = DAG.getLoad(MVT::i64, DL, Chain, Ptr, LoadNode->getPointerInfo(),
- LoadNode->isVolatile(), LoadNode->isNonTemporal(),
- LoadNode->isInvariant(), LoadNode->getAlignment());
+ LoadNode->getAlignment(),
+ LoadNode->getMemOperand()->getFlags());
LoadChain = Result.getValue(1);
}
} else
@@ -1370,15 +1398,15 @@ HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
EVT ValTy = Op.getValueType();
ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op);
unsigned Align = CPN->getAlignment();
- Reloc::Model RM = HTM.getRelocationModel();
- unsigned char TF = (RM == Reloc::PIC_) ? HexagonII::MO_PCREL : 0;
+ bool IsPositionIndependent = isPositionIndependent();
+ unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
SDValue T;
if (CPN->isMachineConstantPoolEntry())
T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Align, TF);
else
T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Align, TF);
- if (RM == Reloc::PIC_)
+ if (IsPositionIndependent)
return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T);
return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T);
}
@@ -1387,8 +1415,7 @@ SDValue
HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
int Idx = cast<JumpTableSDNode>(Op)->getIndex();
- Reloc::Model RM = HTM.getRelocationModel();
- if (RM == Reloc::PIC_) {
+ if (isPositionIndependent()) {
SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T);
}
@@ -1415,7 +1442,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
return DAG.getLoad(VT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo());
}
// Return LR, which contains the return address. Mark it an implicit live-in.
@@ -1436,8 +1463,7 @@ HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
HRI.getFrameRegister(), VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
- MachinePointerInfo(),
- false, false, false, 0);
+ MachinePointerInfo());
return FrameAddr;
}
@@ -1461,13 +1487,12 @@ HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
if (RM == Reloc::Static) {
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
- if (HLOF.IsGlobalInSmallSection(GV, HTM))
+ if (HLOF.isGlobalInSmallSection(GV, HTM))
return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA);
}
- bool UsePCRel = GV->hasInternalLinkage() || GV->hasHiddenVisibility() ||
- (GV->hasLocalLinkage() && !isa<Function>(GV));
+ bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
if (UsePCRel) {
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset,
HexagonII::MO_PCREL);
@@ -1490,7 +1515,7 @@ HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
Reloc::Model RM = HTM.getRelocationModel();
if (RM == Reloc::Static) {
- SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
+ SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A);
}
@@ -1507,6 +1532,157 @@ HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG)
return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym);
}
+SDValue
+HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
+ GlobalAddressSDNode *GA, SDValue *InFlag, EVT PtrVT, unsigned ReturnReg,
+ unsigned char OperandFlags) const {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDLoc dl(GA);
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
+ GA->getValueType(0),
+ GA->getOffset(),
+ OperandFlags);
+ // Create Operands for the call.The Operands should have the following:
+ // 1. Chain SDValue
+ // 2. Callee which in this case is the Global address value.
+ // 3. Registers live into the call.In this case its R0, as we
+ // have just one argument to be passed.
+ // 4. InFlag if there is any.
+ // Note: The order is important.
+
+ if (InFlag) {
+ SDValue Ops[] = { Chain, TGA,
+ DAG.getRegister(Hexagon::R0, PtrVT), *InFlag };
+ Chain = DAG.getNode(HexagonISD::CALLv3, dl, NodeTys, Ops);
+ } else {
+ SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT)};
+ Chain = DAG.getNode(HexagonISD::CALLv3, dl, NodeTys, Ops);
+ }
+
+ // Inform MFI that function has calls.
+ MFI->setAdjustsStack(true);
+
+ SDValue Flag = Chain.getValue(1);
+ return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
+}
+
+//
+// Lower using the intial executable model for TLS addresses
+//
+SDValue
+HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
+ SelectionDAG &DAG) const {
+ SDLoc dl(GA);
+ int64_t Offset = GA->getOffset();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ // Get the thread pointer.
+ SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
+
+ bool IsPositionIndependent = isPositionIndependent();
+ unsigned char TF =
+ IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
+
+ // First generate the TLS symbol address
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT,
+ Offset, TF);
+
+ SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
+
+ if (IsPositionIndependent) {
+ // Generate the GOT pointer in case of position independent code
+ SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG);
+
+ // Add the TLS Symbol address to GOT pointer.This gives
+ // GOT relative relocation for the symbol.
+ Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
+ }
+
+ // Load the offset value for TLS symbol.This offset is relative to
+ // thread pointer.
+ SDValue LoadOffset =
+ DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo());
+
+ // Address of the thread local variable is the add of thread
+ // pointer and the offset of the variable.
+ return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
+}
+
+//
+// Lower using the local executable model for TLS addresses
+//
+SDValue
+HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
+ SelectionDAG &DAG) const {
+ SDLoc dl(GA);
+ int64_t Offset = GA->getOffset();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ // Get the thread pointer.
+ SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
+ // Generate the TLS symbol address
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
+ HexagonII::MO_TPREL);
+ SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
+
+ // Address of the thread local variable is the add of thread
+ // pointer and the offset of the variable.
+ return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym);
+}
+
+//
+// Lower using the general dynamic model for TLS addresses
+//
+SDValue
+HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
+ SelectionDAG &DAG) const {
+ SDLoc dl(GA);
+ int64_t Offset = GA->getOffset();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ // First generate the TLS symbol address
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
+ HexagonII::MO_GDGOT);
+
+ // Then, generate the GOT pointer
+ SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG);
+
+ // Add the TLS symbol and the GOT pointer
+ SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
+ SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
+
+ // Copy over the argument to R0
+ SDValue InFlag;
+ Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag);
+ InFlag = Chain.getValue(1);
+
+ return GetDynamicTLSAddr(DAG, Chain, GA, &InFlag, PtrVT,
+ Hexagon::R0, HexagonII::MO_GDPLT);
+}
+
+//
+// Lower TLS addresses.
+//
+// For now for dynamic models, we only support the general dynamic model.
+//
+SDValue
+HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
+
+ switch (HTM.getTLSModel(GA->getGlobal())) {
+ case TLSModel::GeneralDynamic:
+ case TLSModel::LocalDynamic:
+ return LowerToTLSGeneralDynamicModel(GA, DAG);
+ case TLSModel::InitialExec:
+ return LowerToTLSInitialExecModel(GA, DAG);
+ case TLSModel::LocalExec:
+ return LowerToTLSLocalExecModel(GA, DAG);
+ }
+ llvm_unreachable("Bogus TLS model");
+}
+
//===----------------------------------------------------------------------===//
// TargetLowering Implementation
//===----------------------------------------------------------------------===//
@@ -1524,9 +1700,11 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setPrefLoopAlignment(4);
setPrefFunctionAlignment(4);
setMinFunctionAlignment(2);
- setInsertFencesForAtomic(false);
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
+ setMaxAtomicSizeInBitsSupported(64);
+ setMinCmpXchgSizeInBits(32);
+
if (EnableHexSDNodeSched)
setSchedulingPreference(Sched::VLIW);
else
@@ -1606,8 +1784,11 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
+ setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
// Custom legalize GlobalAddress nodes into CONST32.
@@ -1629,9 +1810,9 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
if (EmitJumpTables)
- setMinimumJumpTableEntries(2);
- else
setMinimumJumpTableEntries(MinimumJumpTables);
+ else
+ setMinimumJumpTableEntries(INT_MAX);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
// Hexagon has instructions for add/sub with carry. The problem with
@@ -1668,10 +1849,6 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::CTLZ, MVT::i16, Promote);
setOperationAction(ISD::CTTZ, MVT::i8, Promote);
setOperationAction(ISD::CTTZ, MVT::i16, Promote);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8, Promote);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i8, Promote);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
// In V5, popcount can count # of 1s in i64 but returns i32.
// On V4 it will be expanded (set later).
@@ -1751,8 +1928,7 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
ISD::SMUL_LOHI, ISD::UMUL_LOHI,
// Logical/bit:
ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR,
- ISD::CTPOP, ISD::CTLZ, ISD::CTTZ, ISD::CTLZ_ZERO_UNDEF,
- ISD::CTTZ_ZERO_UNDEF,
+ ISD::CTPOP, ISD::CTLZ, ISD::CTTZ,
// Floating point arithmetic/math functions:
ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV,
ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN,
@@ -2095,7 +2271,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
SDLoc dl(Op);
EVT VT = Op.getValueType();
- if (V2.getOpcode() == ISD::UNDEF)
+ if (V2.isUndef())
V2 = V1;
if (SVN->isSplat()) {
@@ -2113,7 +2289,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
!isa<ConstantSDNode>(V1.getOperand(0))) {
bool IsScalarToVector = true;
for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
- if (V1.getOperand(i).getOpcode() != ISD::UNDEF) {
+ if (!V1.getOperand(i).isUndef()) {
IsScalarToVector = false;
break;
}
@@ -2235,9 +2411,9 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
SDValue V0 = BVN->getOperand(0);
SDValue V1 = BVN->getOperand(1);
- if (V0.getOpcode() == ISD::UNDEF)
+ if (V0.isUndef())
V0 = DAG.getConstant(0, dl, MVT::i32);
- if (V1.getOpcode() == ISD::UNDEF)
+ if (V1.isUndef())
V1 = DAG.getConstant(0, dl, MVT::i32);
ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(V0);
@@ -2257,7 +2433,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Try to generate a S2_packhl to build v2i16 vectors.
if (VT.getSimpleVT() == MVT::v2i16) {
for (unsigned i = 0, e = NElts; i != e; ++i) {
- if (BVN->getOperand(i).getOpcode() == ISD::UNDEF)
+ if (BVN->getOperand(i).isUndef())
continue;
ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(BVN->getOperand(i));
// If the element isn't a constant, it is in a register:
@@ -2285,7 +2461,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// combine, const64, etc. are Big Endian.
unsigned OpIdx = NElts - i - 1;
SDValue Operand = BVN->getOperand(OpIdx);
- if (Operand.getOpcode() == ISD::UNDEF)
+ if (Operand.isUndef())
continue;
int64_t Val = 0;
@@ -2559,8 +2735,7 @@ HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue StoreAddr =
DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
DAG.getIntPtrConstant(4, dl));
- Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
- false, false, 0);
+ Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
// Not needed we already use it as explict input to EH_RETURN.
@@ -2596,6 +2771,7 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
// Frame & Return address. Currently unimplemented.
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
@@ -2608,7 +2784,9 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::VSELECT: return LowerVSELECT(Op, DAG);
case ISD::CTPOP: return LowerCTPOP(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
+ case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
}
}
@@ -2622,18 +2800,17 @@ HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table,
return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T);
}
-MachineBasicBlock *
-HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB)
- const {
- switch (MI->getOpcode()) {
- case Hexagon::ALLOCA: {
- MachineFunction *MF = BB->getParent();
- auto *FuncInfo = MF->getInfo<HexagonMachineFunctionInfo>();
- FuncInfo->addAllocaAdjustInst(MI);
- return BB;
- }
- default: llvm_unreachable("Unexpected instr type to insert");
+MachineBasicBlock *HexagonTargetLowering::EmitInstrWithCustomInserter(
+ MachineInstr &MI, MachineBasicBlock *BB) const {
+ switch (MI.getOpcode()) {
+ case Hexagon::ALLOCA: {
+ MachineFunction *MF = BB->getParent();
+ auto *FuncInfo = MF->getInfo<HexagonMachineFunctionInfo>();
+ FuncInfo->addAllocaAdjustInst(&MI);
+ return BB;
+ }
+ default:
+ llvm_unreachable("Unexpected instr type to insert");
} // switch
}
@@ -2641,6 +2818,20 @@ HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Inline Assembly Support
//===----------------------------------------------------------------------===//
+TargetLowering::ConstraintType
+HexagonTargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'q':
+ case 'v':
+ if (Subtarget.useHVXOps())
+ return C_Register;
+ break;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
std::pair<unsigned, const TargetRegisterClass *>
HexagonTargetLowering::getRegForInlineAsmConstraint(
const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
@@ -2814,6 +3005,32 @@ bool llvm::isPositiveHalfWord(SDNode *N) {
}
}
+bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
+ unsigned AS, unsigned Align, bool *Fast) const {
+ if (Fast)
+ *Fast = false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ default:
+ return false;
+ case MVT::v64i8:
+ case MVT::v128i8:
+ case MVT::v256i8:
+ case MVT::v32i16:
+ case MVT::v64i16:
+ case MVT::v128i16:
+ case MVT::v16i32:
+ case MVT::v32i32:
+ case MVT::v64i32:
+ case MVT::v8i64:
+ case MVT::v16i64:
+ case MVT::v32i64:
+ return true;
+ }
+ return false;
+}
+
+
std::pair<const TargetRegisterClass*, uint8_t>
HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
MVT VT) const {
@@ -2892,3 +3109,10 @@ bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
// Do not expand loads and stores that don't exceed 64 bits.
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
}
+
+bool HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
+ AtomicCmpXchgInst *AI) const {
+ const DataLayout &DL = AI->getModule()->getDataLayout();
+ unsigned Size = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
+ return Size >= 4 && Size <= 8;
+}
diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h
index bf378b922220..71f67349befe 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/lib/Target/Hexagon/HexagonISelLowering.h
@@ -94,7 +94,7 @@ bool isPositiveHalfWord(SDNode *N);
bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize)
const;
- void promoteLdStType(EVT VT, EVT PromotedLdStVT);
+ void promoteLdStType(MVT VT, MVT PromotedLdStVT);
const HexagonTargetMachine &HTM;
const HexagonSubtarget &Subtarget;
@@ -128,22 +128,37 @@ bool isPositiveHalfWord(SDNode *N);
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
+ SelectionDAG &DAG) const;
+ SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
+ SelectionDAG &DAG) const;
+ SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
+ SelectionDAG &DAG) const;
+ SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
+ GlobalAddressSDNode *GA, SDValue *InFlag, EVT PtrVT,
+ unsigned ReturnReg, unsigned char OperandFlags) const;
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const;
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDValue Callee) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
@@ -153,14 +168,15 @@ bool isPositiveHalfWord(SDNode *N);
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals, SDLoc dl,
- SelectionDAG &DAG) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const override;
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
- MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const override;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
/// If a physical register, this returns the register that receives the
/// exception address on entry to an EH pad.
@@ -192,6 +208,8 @@ bool isPositiveHalfWord(SDNode *N);
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const override;
+ ConstraintType getConstraintType(StringRef Constraint) const override;
+
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
@@ -200,13 +218,12 @@ bool isPositiveHalfWord(SDNode *N);
getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
if (ConstraintCode == "o")
return InlineAsm::Constraint_o;
- else if (ConstraintCode == "v")
- return InlineAsm::Constraint_v;
return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
}
// Intrinsics
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
@@ -226,6 +243,9 @@ bool isPositiveHalfWord(SDNode *N);
/// the immediate into a register.
bool isLegalICmpImmediate(int64_t Imm) const override;
+ bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
+ unsigned Align, bool *Fast) const override;
+
/// Returns relocation base for the given PIC jumptable.
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
const override;
@@ -237,6 +257,8 @@ bool isPositiveHalfWord(SDNode *N);
Value *Addr, AtomicOrdering Ord) const override;
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
+ bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
+
AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override {
return AtomicExpansionKind::LLSC;
diff --git a/lib/Target/Hexagon/HexagonInstrAlias.td b/lib/Target/Hexagon/HexagonInstrAlias.td
index 5a1a69b40d4d..9cbeae7c67c8 100644
--- a/lib/Target/Hexagon/HexagonInstrAlias.td
+++ b/lib/Target/Hexagon/HexagonInstrAlias.td
@@ -460,3 +460,195 @@ def : InstAlias<"$Pd=cmp.lt($Rs, $Rt)",
def : InstAlias<"$Pd=cmp.ltu($Rs, $Rt)",
(C2_cmpgtu PredRegs:$Pd, IntRegs:$Rt, IntRegs:$Rs), 0>;
+// maps if (!Pu) jumpr Rs -> if (!Pu) jumpr:nt Rs
+def : InstAlias<"if (!$Pu) jumpr $Rs",
+ (J2_jumprf PredRegs:$Pu, IntRegs:$Rs)>,
+ Requires<[HasV60T]>;
+
+// maps if (Pu) jumpr Rs -> if (Pu) jumpr:nt Rs
+def : InstAlias<"if ($Pu) jumpr $Rs",
+ (J2_jumprt PredRegs:$Pu, IntRegs:$Rs)>,
+ Requires<[HasV60T]>;
+
+// maps if (!Pu) jump $r15_2 -> if (!Pu) jump:nt $r15_2
+def : InstAlias<"if (!$Pu) jump $r15_2",
+ (J2_jumpf PredRegs:$Pu, brtarget:$r15_2)>,
+ Requires<[HasV60T]>;
+
+// maps if (Pu) jump $r15_2 -> if (Pu) jump:nt $r15_2
+def : InstAlias<"if ($Pu) jump $r15_2",
+ (J2_jumpt PredRegs:$Pu, brtarget:$r15_2)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if ($src) jump $r15_2",
+ (J2_jumpt PredRegs:$src, brtarget:$r15_2), 0>;
+
+def : InstAlias<"if (!$src) jump $r15_2",
+ (J2_jumpf PredRegs:$src, brtarget:$r15_2), 0>;
+
+def : InstAlias<"if ($src1) jumpr $src2",
+ (J2_jumprt PredRegs:$src1, IntRegs:$src2), 0>;
+
+def : InstAlias<"if (!$src1) jumpr $src2",
+ (J2_jumprf PredRegs:$src1, IntRegs:$src2), 0>;
+
+// V6_vassignp: Vector assign mapping.
+let hasNewValue = 1, opNewValue = 0, isAsmParserOnly = 1 in
+def HEXAGON_V6_vassignpair: CVI_VA_DV_Resource <
+ (outs VecDblRegs:$Vdd),
+ (ins VecDblRegs:$Vss),
+ "$Vdd = $Vss">;
+
+// maps Vd = #0 to Vd = vxor(Vd, Vd)
+def : InstAlias<"$Vd = #0",
+ (V6_vxor VectorRegs:$Vd, VectorRegs:$Vd, VectorRegs:$Vd)>,
+ Requires<[HasV60T]>;
+
+// maps Vdd = #0 to Vdd = vsub(Vdd, Vdd)
+def : InstAlias<"$Vdd = #0",
+ (V6_vsubw_dv VecDblRegs:$Vdd, VecDblRegs:$Vdd, VecDblRegs:$Vdd)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd = vcmp.eq($Vu.uh, $Vv.uh)" -> "$Qd = vcmp.eq($Vu.h, $Vv.h)"
+def : InstAlias<"$Qd = vcmp.eq($Vu.uh, $Vv.uh)",
+ (V6_veqh VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd &= vcmp.eq($Vu.uh, $Vv.uh)" -> "$Qd &= vcmp.eq($Vu.h, $Vv.h)"
+def : InstAlias<"$Qd &= vcmp.eq($Vu.uh, $Vv.uh)",
+ (V6_veqh_and VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd |= vcmp.eq($Vu.uh, $Vv.uh)" -> "$Qd |= vcmp.eq($Vu.h, $Vv.h)"
+def : InstAlias<"$Qd |= vcmp.eq($Vu.uh, $Vv.uh)",
+ (V6_veqh_or VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd ^= vcmp.eq($Vu.uh, $Vv.uh)" -> "$Qd ^= vcmp.eq($Vu.h, $Vv.h)"
+def : InstAlias<"$Qd ^= vcmp.eq($Vu.uh, $Vv.uh)",
+ (V6_veqh_xor VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd = vcmp.eq($Vu.uw, $Vv.uw)" -> "$Qd = vcmp.eq($Vu.w, $Vv.w)"
+def : InstAlias<"$Qd = vcmp.eq($Vu.uw, $Vv.uw)",
+ (V6_veqw VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd &= vcmp.eq($Vu.uw, $Vv.uw)" -> "$Qd &= vcmp.eq($Vu.w, $Vv.w)"
+def : InstAlias<"$Qd &= vcmp.eq($Vu.uw, $Vv.uw)",
+ (V6_veqw_and VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd |= vcmp.eq($Vu.uw, $Vv.uw)" -> "$Qd |= vcmp.eq($Vu.w, $Vv.w)"
+def : InstAlias<"$Qd |= vcmp.eq($Vu.uw, $Vv.uw)",
+ (V6_veqh_or VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd ^= vcmp.eq($Vu.uw, $Vv.uw)" -> "$Qd ^= vcmp.eq($Vu.w, $Vv.w)"
+def : InstAlias<"$Qd ^= vcmp.eq($Vu.uw, $Vv.uw)",
+ (V6_veqw_xor VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd = vcmp.eq($Vu.ub, $Vv.ub)" -> "$Qd = vcmp.eq($Vu.b, $Vv.b)"
+def : InstAlias<"$Qd = vcmp.eq($Vu.ub, $Vv.ub)",
+ (V6_veqb VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd &= vcmp.eq($Vu.ub, $Vv.ub)" -> "$Qd &= vcmp.eq($Vu.b, $Vv.b)"
+def : InstAlias<"$Qd &= vcmp.eq($Vu.ub, $Vv.ub)",
+ (V6_veqb_and VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd |= vcmp.eq($Vu.ub, $Vv.ub)" -> "$Qd |= vcmp.eq($Vu.b, $Vv.b)"
+def : InstAlias<"$Qd |= vcmp.eq($Vu.ub, $Vv.ub)",
+ (V6_veqb_or VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Qd ^= vcmp.eq($Vu.ub, $Vv.ub)" -> "$Qd ^= vcmp.eq($Vu.b, $Vv.b)"
+def : InstAlias<"$Qd ^= vcmp.eq($Vu.ub, $Vv.ub)",
+ (V6_veqb_xor VecPredRegs:$Qd, VectorRegs:$Vu, VectorRegs:$Vv)>,
+ Requires<[HasV60T]>;
+
+// maps "$Rd.w = vextract($Vu, $Rs)" -> "$Rd = vextract($Vu, $Rs)"
+def : InstAlias<"$Rd.w = vextract($Vu, $Rs)",
+ (V6_extractw IntRegs:$Rd, VectorRegs:$Vu, IntRegs:$Rs)>,
+ Requires<[HasV60T]>;
+
+// Mapping from vtrans2x2(Vy32,Vx32,Rt32) to vshuff(Vy32,Vx32,Rt32)
+def : InstAlias<"vtrans2x2($Vy, $Vx, $Rt)",
+ (V6_vshuff VectorRegs:$Vy, VectorRegs:$Vx, IntRegs:$Rt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"$Vt=vmem($Rs)",
+ (V6_vL32b_ai VectorRegs:$Vt, IntRegs:$Rs, 0)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"$Vt=vmem($Rs):nt",
+ (V6_vL32b_nt_ai VectorRegs:$Vt, IntRegs:$Rs, 0)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"vmem($Rs)=$Vt",
+ (V6_vS32b_ai IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"vmem($Rs):nt=$Vt",
+ (V6_vS32b_nt_ai IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"vmem($Rs)=$Vt.new",
+ (V6_vS32b_new_ai IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"vmem($Rs):nt=$Vt.new",
+ (V6_vS32b_nt_new_ai IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if ($Qv) vmem($Rs)=$Vt",
+ (V6_vS32b_qpred_ai VecPredRegs:$Qv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if (!$Qv) vmem($Rs)=$Vt",
+ (V6_vS32b_nqpred_ai VecPredRegs:$Qv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if ($Qv) vmem($Rs):nt=$Vt",
+ (V6_vS32b_nt_qpred_ai VecPredRegs:$Qv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if (!$Qv) vmem($Rs):nt=$Vt",
+ (V6_vS32b_nt_nqpred_ai VecPredRegs:$Qv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if ($Pv) vmem($Rs)=$Vt",
+ (V6_vS32b_pred_ai PredRegs:$Pv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if (!$Pv) vmem($Rs)=$Vt",
+ (V6_vS32b_npred_ai PredRegs:$Pv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if ($Pv) vmem($Rs):nt=$Vt",
+ (V6_vS32b_nt_pred_ai PredRegs:$Pv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if (!$Pv) vmem($Rs):nt=$Vt",
+ (V6_vS32b_nt_npred_ai PredRegs:$Pv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"$Vt=vmemu($Rs)",
+ (V6_vL32Ub_ai VectorRegs:$Vt, IntRegs:$Rs, 0)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"vmemu($Rs)=$Vt",
+ (V6_vS32Ub_ai IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if ($Pv) vmemu($Rs)=$Vt",
+ (V6_vS32Ub_pred_ai PredRegs:$Pv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+def : InstAlias<"if (!$Pv) vmemu($Rs)=$Vt",
+ (V6_vS32Ub_npred_ai PredRegs:$Pv, IntRegs:$Rs, 0, VectorRegs:$Vt)>,
+ Requires<[HasV60T]>;
+
+
diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td
index 3c5ec1701dc2..0bfb04447f2f 100644
--- a/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -342,6 +342,10 @@ class JInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = J_tc_2early_SLOT23>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>, OpcodeHexagon;
+class JInst_CJUMP_UCJUMP<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "", InstrItinClass itin = J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>, OpcodeHexagon;
+
// JR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JRInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
@@ -412,21 +416,11 @@ class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "">
: STInst<outs, ins, asmstr, pattern, cstr>;
-let mayStore = 1 in
-class STInst2PI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : STInst<outs, ins, asmstr, pattern, cstr>;
-
// Post increment LD Instruction.
class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "">
: LDInst<outs, ins, asmstr, pattern, cstr>;
-let mayLoad = 1 in
-class LDInst2PI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : LDInst<outs, ins, asmstr, pattern, cstr>;
-
//===----------------------------------------------------------------------===//
// V4 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
index 2d1dea526eed..e17f71fe4e6a 100644
--- a/lib/Target/Hexagon/HexagonInstrFormatsV4.td
+++ b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
@@ -139,7 +139,6 @@ class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
: MEMInst<outs, ins, asmstr, pattern, cstr, itin>;
-let isCodeGenOnly = 1 in
class EXTENDERInst<dag outs, dag ins, string asmstr, list<dag> pattern = []>
: InstHexagon<outs, ins, asmstr, pattern, "", EXTENDER_tc_1_SLOT0123,
TypePREFIX>, OpcodeHexagon;
@@ -151,5 +150,11 @@ class SUBInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
class CJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "">
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>,
+ OpcodeHexagon;
+
+class CJInst_JMPSET<dag outs, dag ins, string asmstr, list<dag> pattern = [],
+ string cstr = "">
: InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND, TypeCOMPOUND>,
OpcodeHexagon;
+
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index eb3590cb1076..fe9f97d1d5e7 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -69,10 +70,10 @@ static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
///
/// Constants for Hexagon instructions.
///
-const int Hexagon_MEMV_OFFSET_MAX_128B = 2047; // #s7
-const int Hexagon_MEMV_OFFSET_MIN_128B = -2048; // #s7
-const int Hexagon_MEMV_OFFSET_MAX = 1023; // #s6
-const int Hexagon_MEMV_OFFSET_MIN = -1024; // #s6
+const int Hexagon_MEMV_OFFSET_MAX_128B = 896; // #s4: -8*128...7*128
+const int Hexagon_MEMV_OFFSET_MIN_128B = -1024; // #s4
+const int Hexagon_MEMV_OFFSET_MAX = 448; // #s4: -8*64...7*64
+const int Hexagon_MEMV_OFFSET_MIN = -512; // #s4
const int Hexagon_MEMW_OFFSET_MAX = 4095;
const int Hexagon_MEMW_OFFSET_MIN = -4096;
const int Hexagon_MEMD_OFFSET_MAX = 8191;
@@ -91,10 +92,10 @@ const int Hexagon_MEMH_AUTOINC_MAX = 14;
const int Hexagon_MEMH_AUTOINC_MIN = -16;
const int Hexagon_MEMB_AUTOINC_MAX = 7;
const int Hexagon_MEMB_AUTOINC_MIN = -8;
-const int Hexagon_MEMV_AUTOINC_MAX = 192;
-const int Hexagon_MEMV_AUTOINC_MIN = -256;
-const int Hexagon_MEMV_AUTOINC_MAX_128B = 384;
-const int Hexagon_MEMV_AUTOINC_MIN_128B = -512;
+const int Hexagon_MEMV_AUTOINC_MAX = 192; // #s3
+const int Hexagon_MEMV_AUTOINC_MIN = -256; // #s3
+const int Hexagon_MEMV_AUTOINC_MAX_128B = 384; // #s3
+const int Hexagon_MEMV_AUTOINC_MIN_128B = -512; // #s3
// Pin the vtable to this file.
void HexagonInstrInfo::anchor() {}
@@ -230,22 +231,64 @@ static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
-unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case Hexagon::L2_loadri_io:
- case Hexagon::L2_loadrd_io:
- case Hexagon::L2_loadrh_io:
+ switch (MI.getOpcode()) {
+ default:
+ break;
case Hexagon::L2_loadrb_io:
case Hexagon::L2_loadrub_io:
- if (MI->getOperand(2).isFI() &&
- MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
- FrameIndex = MI->getOperand(2).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
+ case Hexagon::L2_loadrh_io:
+ case Hexagon::L2_loadruh_io:
+ case Hexagon::L2_loadri_io:
+ case Hexagon::L2_loadrd_io:
+ case Hexagon::V6_vL32b_ai:
+ case Hexagon::V6_vL32b_ai_128B:
+ case Hexagon::V6_vL32Ub_ai:
+ case Hexagon::V6_vL32Ub_ai_128B:
+ case Hexagon::LDriw_pred:
+ case Hexagon::LDriw_mod:
+ case Hexagon::LDriq_pred_V6:
+ case Hexagon::LDriq_pred_vec_V6:
+ case Hexagon::LDriv_pseudo_V6:
+ case Hexagon::LDrivv_pseudo_V6:
+ case Hexagon::LDriq_pred_V6_128B:
+ case Hexagon::LDriq_pred_vec_V6_128B:
+ case Hexagon::LDriv_pseudo_V6_128B:
+ case Hexagon::LDrivv_pseudo_V6_128B: {
+ const MachineOperand OpFI = MI.getOperand(1);
+ if (!OpFI.isFI())
+ return 0;
+ const MachineOperand OpOff = MI.getOperand(2);
+ if (!OpOff.isImm() || OpOff.getImm() != 0)
+ return 0;
+ FrameIndex = OpFI.getIndex();
+ return MI.getOperand(0).getReg();
+ }
+
+ case Hexagon::L2_ploadrbt_io:
+ case Hexagon::L2_ploadrbf_io:
+ case Hexagon::L2_ploadrubt_io:
+ case Hexagon::L2_ploadrubf_io:
+ case Hexagon::L2_ploadrht_io:
+ case Hexagon::L2_ploadrhf_io:
+ case Hexagon::L2_ploadruht_io:
+ case Hexagon::L2_ploadruhf_io:
+ case Hexagon::L2_ploadrit_io:
+ case Hexagon::L2_ploadrif_io:
+ case Hexagon::L2_ploadrdt_io:
+ case Hexagon::L2_ploadrdf_io: {
+ const MachineOperand OpFI = MI.getOperand(2);
+ if (!OpFI.isFI())
+ return 0;
+ const MachineOperand OpOff = MI.getOperand(3);
+ if (!OpOff.isImm() || OpOff.getImm() != 0)
+ return 0;
+ FrameIndex = OpFI.getIndex();
+ return MI.getOperand(0).getReg();
+ }
}
+
return 0;
}
@@ -255,21 +298,58 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
-unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case Hexagon::S2_storerb_io:
+ case Hexagon::S2_storerh_io:
case Hexagon::S2_storeri_io:
case Hexagon::S2_storerd_io:
- case Hexagon::S2_storerh_io:
- case Hexagon::S2_storerb_io:
- if (MI->getOperand(2).isFI() &&
- MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
- FrameIndex = MI->getOperand(0).getIndex();
- return MI->getOperand(2).getReg();
- }
- break;
+ case Hexagon::V6_vS32b_ai:
+ case Hexagon::V6_vS32b_ai_128B:
+ case Hexagon::V6_vS32Ub_ai:
+ case Hexagon::V6_vS32Ub_ai_128B:
+ case Hexagon::STriw_pred:
+ case Hexagon::STriw_mod:
+ case Hexagon::STriq_pred_V6:
+ case Hexagon::STriq_pred_vec_V6:
+ case Hexagon::STriv_pseudo_V6:
+ case Hexagon::STrivv_pseudo_V6:
+ case Hexagon::STriq_pred_V6_128B:
+ case Hexagon::STriq_pred_vec_V6_128B:
+ case Hexagon::STriv_pseudo_V6_128B:
+ case Hexagon::STrivv_pseudo_V6_128B: {
+ const MachineOperand &OpFI = MI.getOperand(0);
+ if (!OpFI.isFI())
+ return 0;
+ const MachineOperand &OpOff = MI.getOperand(1);
+ if (!OpOff.isImm() || OpOff.getImm() != 0)
+ return 0;
+ FrameIndex = OpFI.getIndex();
+ return MI.getOperand(2).getReg();
}
+
+ case Hexagon::S2_pstorerbt_io:
+ case Hexagon::S2_pstorerbf_io:
+ case Hexagon::S2_pstorerht_io:
+ case Hexagon::S2_pstorerhf_io:
+ case Hexagon::S2_pstorerit_io:
+ case Hexagon::S2_pstorerif_io:
+ case Hexagon::S2_pstorerdt_io:
+ case Hexagon::S2_pstorerdf_io: {
+ const MachineOperand &OpFI = MI.getOperand(1);
+ if (!OpFI.isFI())
+ return 0;
+ const MachineOperand &OpOff = MI.getOperand(2);
+ if (!OpOff.isImm() || OpOff.getImm() != 0)
+ return 0;
+ FrameIndex = OpFI.getIndex();
+ return MI.getOperand(3).getReg();
+ }
+ }
+
return 0;
}
@@ -290,7 +370,7 @@ unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
/// Cond[1] = R
/// Cond[2] = Imm
///
-bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -344,7 +424,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return false;
--I;
}
- if (!isUnpredicatedTerminator(&*I))
+ if (!isUnpredicatedTerminator(*I))
return false;
// Get the last instruction in the block.
@@ -352,7 +432,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineInstr *SecondLastInst = nullptr;
// Find one more terminator if present.
for (;;) {
- if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(&*I)) {
+ if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
if (!SecondLastInst)
SecondLastInst = &*I;
else
@@ -377,6 +457,9 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
bool LastOpcodeHasNVJump = isNewValueJump(LastInst);
+ if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
+ return true;
+
// If there is only one terminator instruction, process it.
if (LastInst && !SecondLastInst) {
if (LastOpcode == Hexagon::J2_jump) {
@@ -412,6 +495,8 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
bool SecLastOpcodeHasNVJump = isNewValueJump(SecondLastInst);
if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
+ if (!SecondLastInst->getOperand(1).isMBB())
+ return true;
TBB = SecondLastInst->getOperand(1).getMBB();
Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
Cond.push_back(SecondLastInst->getOperand(0));
@@ -476,10 +561,11 @@ unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return Count;
}
-
unsigned HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
unsigned BOpc = Hexagon::J2_jump;
unsigned BccOpc = Hexagon::J2_jumpt;
assert(validateBranchCond(Cond) && "Invalid branching condition");
@@ -499,9 +585,9 @@ unsigned HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,
// into an infinite loop.
MachineBasicBlock *NewTBB, *NewFBB;
SmallVector<MachineOperand, 4> Cond;
- MachineInstr *Term = MBB.getFirstTerminator();
- if (Term != MBB.end() && isPredicated(Term) &&
- !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond, false)) {
+ auto Term = MBB.getFirstTerminator();
+ if (Term != MBB.end() && isPredicated(*Term) &&
+ !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false)) {
MachineBasicBlock *NextBB = &*++MBB.getIterator();
if (NewTBB == NextBB) {
ReverseBranchCondition(Cond);
@@ -592,85 +678,84 @@ bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
return NumInstrs <= 4;
}
-
void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg,
- unsigned SrcReg, bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
auto &HRI = getRegisterInfo();
+ unsigned KillFlag = getKillRegState(KillSrc);
+
if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg).addReg(SrcReg);
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
+ .addReg(SrcReg, KillFlag);
return;
}
if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg).addReg(SrcReg);
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
+ .addReg(SrcReg, KillFlag);
return;
}
if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
// Map Pd = Ps to Pd = or(Ps, Ps).
- BuildMI(MBB, I, DL, get(Hexagon::C2_or),
- DestReg).addReg(SrcReg).addReg(SrcReg);
+ BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
+ .addReg(SrcReg).addReg(SrcReg, KillFlag);
return;
}
- if (Hexagon::DoubleRegsRegClass.contains(DestReg) &&
+ if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
Hexagon::IntRegsRegClass.contains(SrcReg)) {
- // We can have an overlap between single and double reg: r1:0 = r0.
- if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) {
- // r1:0 = r0
- BuildMI(MBB, I, DL, get(Hexagon::A2_tfrsi), (RI.getSubReg(DestReg,
- Hexagon::subreg_hireg))).addImm(0);
- } else {
- // r1:0 = r1 or no overlap.
- BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), (RI.getSubReg(DestReg,
- Hexagon::subreg_loreg))).addReg(SrcReg);
- BuildMI(MBB, I, DL, get(Hexagon::A2_tfrsi), (RI.getSubReg(DestReg,
- Hexagon::subreg_hireg))).addImm(0);
- }
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
+ .addReg(SrcReg, KillFlag);
return;
}
- if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
+ if (Hexagon::IntRegsRegClass.contains(DestReg) &&
+ Hexagon::CtrRegsRegClass.contains(SrcReg)) {
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
+ .addReg(SrcReg, KillFlag);
+ return;
+ }
+ if (Hexagon::ModRegsRegClass.contains(DestReg) &&
Hexagon::IntRegsRegClass.contains(SrcReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg).addReg(SrcReg);
+ BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
+ .addReg(SrcReg, KillFlag);
return;
}
if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
Hexagon::IntRegsRegClass.contains(DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg).
- addReg(SrcReg, getKillRegState(KillSrc));
+ BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
+ .addReg(SrcReg, KillFlag);
return;
}
if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
Hexagon::PredRegsRegClass.contains(DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg).
- addReg(SrcReg, getKillRegState(KillSrc));
+ BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
+ .addReg(SrcReg, KillFlag);
return;
}
if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
Hexagon::IntRegsRegClass.contains(DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg).
- addReg(SrcReg, getKillRegState(KillSrc));
+ BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
+ .addReg(SrcReg, KillFlag);
return;
}
if (Hexagon::VectorRegsRegClass.contains(SrcReg, DestReg)) {
BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
- addReg(SrcReg, getKillRegState(KillSrc));
+ addReg(SrcReg, KillFlag);
return;
}
if (Hexagon::VecDblRegsRegClass.contains(SrcReg, DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg).
- addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg),
- getKillRegState(KillSrc)).
- addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg),
- getKillRegState(KillSrc));
+ BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
+ .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg), KillFlag)
+ .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg), KillFlag);
return;
}
if (Hexagon::VecPredRegsRegClass.contains(SrcReg, DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg).
- addReg(SrcReg).
- addReg(SrcReg, getKillRegState(KillSrc));
+ BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
+ .addReg(SrcReg)
+ .addReg(SrcReg, KillFlag);
return;
}
if (Hexagon::VecPredRegsRegClass.contains(SrcReg) &&
- Hexagon::VectorRegsRegClass.contains(DestReg)) {
+ Hexagon::VectorRegsRegClass.contains(DestReg)) {
llvm_unreachable("Unimplemented pred to vec");
return;
}
@@ -680,14 +765,12 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
}
if (Hexagon::VecPredRegs128BRegClass.contains(SrcReg, DestReg)) {
- BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and),
- HRI.getSubReg(DestReg, Hexagon::subreg_hireg)).
- addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg),
- getKillRegState(KillSrc));
- BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and),
- HRI.getSubReg(DestReg, Hexagon::subreg_loreg)).
- addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg),
- getKillRegState(KillSrc));
+ unsigned DstHi = HRI.getSubReg(DestReg, Hexagon::subreg_hireg);
+ BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DstHi)
+ .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg), KillFlag);
+ unsigned DstLo = HRI.getSubReg(DestReg, Hexagon::subreg_loreg);
+ BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DstLo)
+ .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg), KillFlag);
return;
}
@@ -708,6 +791,7 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FI);
+ unsigned KillFlag = getKillRegState(isKill);
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
@@ -715,25 +799,57 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
- .addFrameIndex(FI).addImm(0)
- .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
} else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
- .addFrameIndex(FI).addImm(0)
- .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
} else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
- .addFrameIndex(FI).addImm(0)
- .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
+ } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::STriw_mod))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
+ } else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::STriq_pred_V6_128B))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
+ } else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::STriq_pred_V6))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
+ } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
+ DEBUG(dbgs() << "++Generating 128B vector spill");
+ BuildMI(MBB, I, DL, get(Hexagon::STriv_pseudo_V6_128B))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
+ } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) {
+ DEBUG(dbgs() << "++Generating vector spill");
+ BuildMI(MBB, I, DL, get(Hexagon::STriv_pseudo_V6))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
+ } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) {
+ DEBUG(dbgs() << "++Generating double vector spill");
+ BuildMI(MBB, I, DL, get(Hexagon::STrivv_pseudo_V6))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
+ } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
+ DEBUG(dbgs() << "++Generating 128B double vector spill");
+ BuildMI(MBB, I, DL, get(Hexagon::STrivv_pseudo_V6_128B))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, KillFlag).addMemOperand(MMO);
} else {
llvm_unreachable("Unimplemented");
}
}
-
-void HexagonInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, unsigned DestReg, int FI,
- const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const {
+void HexagonInstrInfo::loadRegFromStackSlot(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg,
+ int FI, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
DebugLoc DL = MBB.findDebugLoc(I);
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
@@ -742,15 +858,41 @@ void HexagonInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
MFI.getObjectSize(FI), Align);
- if (RC == &Hexagon::IntRegsRegClass) {
+
+ if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
- } else if (RC == &Hexagon::DoubleRegsRegClass) {
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
- } else if (RC == &Hexagon::PredRegsRegClass) {
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::LDriw_mod), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::LDriq_pred_V6_128B), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::LDriq_pred_V6), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
+ DEBUG(dbgs() << "++Generating 128B double vector restore");
+ BuildMI(MBB, I, DL, get(Hexagon::LDrivv_pseudo_V6_128B), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
+ DEBUG(dbgs() << "++Generating 128B vector restore");
+ BuildMI(MBB, I, DL, get(Hexagon::LDriv_pseudo_V6_128B), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) {
+ DEBUG(dbgs() << "++Generating vector restore");
+ BuildMI(MBB, I, DL, get(Hexagon::LDriv_pseudo_V6), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) {
+ DEBUG(dbgs() << "++Generating double vector restore");
+ BuildMI(MBB, I, DL, get(Hexagon::LDrivv_pseudo_V6), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
} else {
llvm_unreachable("Can't store this register to stack slot");
}
@@ -763,48 +905,58 @@ void HexagonInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
/// into real instructions. The target can edit MI in place, or it can insert
/// new instructions and erase MI. The function should return true if
/// anything was changed.
-bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
- const {
+bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
const HexagonRegisterInfo &HRI = getRegisterInfo();
- MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
- MachineBasicBlock &MBB = *MI->getParent();
- DebugLoc DL = MI->getDebugLoc();
- unsigned Opc = MI->getOpcode();
+ MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Opc = MI.getOpcode();
const unsigned VecOffset = 1;
bool Is128B = false;
switch (Opc) {
+ case TargetOpcode::COPY: {
+ MachineOperand &MD = MI.getOperand(0);
+ MachineOperand &MS = MI.getOperand(1);
+ MachineBasicBlock::iterator MBBI = MI.getIterator();
+ if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
+ copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
+ std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
+ }
+ MBB.erase(MBBI);
+ return true;
+ }
case Hexagon::ALIGNA:
- BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI->getOperand(0).getReg())
+ BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
.addReg(HRI.getFrameRegister())
- .addImm(-MI->getOperand(1).getImm());
+ .addImm(-MI.getOperand(1).getImm());
MBB.erase(MI);
return true;
case Hexagon::HEXAGON_V6_vassignp_128B:
case Hexagon::HEXAGON_V6_vassignp: {
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
if (SrcReg != DstReg)
- copyPhysReg(MBB, MI, DL, DstReg, SrcReg, MI->getOperand(1).isKill());
+ copyPhysReg(MBB, MI, DL, DstReg, SrcReg, MI.getOperand(1).isKill());
MBB.erase(MI);
return true;
}
case Hexagon::HEXAGON_V6_lo_128B:
case Hexagon::HEXAGON_V6_lo: {
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::subreg_loreg);
- copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI->getOperand(1).isKill());
+ copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
MBB.erase(MI);
MRI.clearKillFlags(SrcSubLo);
return true;
}
case Hexagon::HEXAGON_V6_hi_128B:
case Hexagon::HEXAGON_V6_hi: {
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::subreg_hireg);
- copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI->getOperand(1).isKill());
+ copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
MBB.erase(MI);
MRI.clearKillFlags(SrcSubHi);
return true;
@@ -812,24 +964,25 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
case Hexagon::STrivv_indexed_128B:
Is128B = true;
case Hexagon::STrivv_indexed: {
- unsigned SrcReg = MI->getOperand(2).getReg();
+ unsigned SrcReg = MI.getOperand(2).getReg();
unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::subreg_hireg);
unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::subreg_loreg);
unsigned NewOpcd = Is128B ? Hexagon::V6_vS32b_ai_128B
: Hexagon::V6_vS32b_ai;
unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
- MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpcd))
- .addOperand(MI->getOperand(0))
- .addImm(MI->getOperand(1).getImm())
- .addReg(SrcSubLo)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ MachineInstr *MI1New =
+ BuildMI(MBB, MI, DL, get(NewOpcd))
+ .addOperand(MI.getOperand(0))
+ .addImm(MI.getOperand(1).getImm())
+ .addReg(SrcSubLo)
+ .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MI1New->getOperand(0).setIsKill(false);
BuildMI(MBB, MI, DL, get(NewOpcd))
- .addOperand(MI->getOperand(0))
- // The Vectors are indexed in multiples of vector size.
- .addImm(MI->getOperand(1).getImm()+Offset)
- .addReg(SrcSubHi)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addOperand(MI.getOperand(0))
+ // The Vectors are indexed in multiples of vector size.
+ .addImm(MI.getOperand(1).getImm() + Offset)
+ .addReg(SrcSubHi)
+ .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MBB.erase(MI);
return true;
}
@@ -840,35 +993,34 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
case Hexagon::LDrivv_indexed: {
unsigned NewOpcd = Is128B ? Hexagon::V6_vL32b_ai_128B
: Hexagon::V6_vL32b_ai;
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
MachineInstr *MI1New =
BuildMI(MBB, MI, DL, get(NewOpcd),
HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
- .addOperand(MI->getOperand(1))
- .addImm(MI->getOperand(2).getImm());
+ .addOperand(MI.getOperand(1))
+ .addImm(MI.getOperand(2).getImm());
MI1New->getOperand(1).setIsKill(false);
BuildMI(MBB, MI, DL, get(NewOpcd),
HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
- .addOperand(MI->getOperand(1))
+ .addOperand(MI.getOperand(1))
// The Vectors are indexed in multiples of vector size.
- .addImm(MI->getOperand(2).getImm() + Offset)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addImm(MI.getOperand(2).getImm() + Offset)
+ .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MBB.erase(MI);
return true;
}
case Hexagon::LDriv_pseudo_V6_128B:
Is128B = true;
case Hexagon::LDriv_pseudo_V6: {
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
unsigned NewOpc = Is128B ? Hexagon::V6_vL32b_ai_128B
: Hexagon::V6_vL32b_ai;
- int32_t Off = MI->getOperand(2).getImm();
- int32_t Idx = Off;
+ int32_t Off = MI.getOperand(2).getImm();
BuildMI(MBB, MI, DL, get(NewOpc), DstReg)
- .addOperand(MI->getOperand(1))
- .addImm(Idx)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addOperand(MI.getOperand(1))
+ .addImm(Off)
+ .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MBB.erase(MI);
return true;
}
@@ -877,18 +1029,17 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
case Hexagon::STriv_pseudo_V6: {
unsigned NewOpc = Is128B ? Hexagon::V6_vS32b_ai_128B
: Hexagon::V6_vS32b_ai;
- int32_t Off = MI->getOperand(1).getImm();
- int32_t Idx = Is128B ? (Off >> 7) : (Off >> 6);
+ int32_t Off = MI.getOperand(1).getImm();
BuildMI(MBB, MI, DL, get(NewOpc))
- .addOperand(MI->getOperand(0))
- .addImm(Idx)
- .addOperand(MI->getOperand(2))
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ .addOperand(MI.getOperand(0))
+ .addImm(Off)
+ .addOperand(MI.getOperand(2))
+ .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MBB.erase(MI);
return true;
}
case Hexagon::TFR_PdTrue: {
- unsigned Reg = MI->getOperand(0).getReg();
+ unsigned Reg = MI.getOperand(0).getReg();
BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
.addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef);
@@ -896,7 +1047,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
return true;
}
case Hexagon::TFR_PdFalse: {
- unsigned Reg = MI->getOperand(0).getReg();
+ unsigned Reg = MI.getOperand(0).getReg();
BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
.addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef);
@@ -905,18 +1056,20 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
}
case Hexagon::VMULW: {
// Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned Src1Reg = MI->getOperand(1).getReg();
- unsigned Src2Reg = MI->getOperand(2).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned Src1Reg = MI.getOperand(1).getReg();
+ unsigned Src2Reg = MI.getOperand(2).getReg();
unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::subreg_hireg);
unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::subreg_loreg);
unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::subreg_hireg);
unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::subreg_loreg);
- BuildMI(MBB, MI, MI->getDebugLoc(), get(Hexagon::M2_mpyi),
- HRI.getSubReg(DstReg, Hexagon::subreg_hireg)).addReg(Src1SubHi)
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
+ HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
+ .addReg(Src1SubHi)
.addReg(Src2SubHi);
- BuildMI(MBB, MI, MI->getDebugLoc(), get(Hexagon::M2_mpyi),
- HRI.getSubReg(DstReg, Hexagon::subreg_loreg)).addReg(Src1SubLo)
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
+ HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
+ .addReg(Src1SubLo)
.addReg(Src2SubLo);
MBB.erase(MI);
MRI.clearKillFlags(Src1SubHi);
@@ -927,22 +1080,26 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
}
case Hexagon::VMULW_ACC: {
// Expand 64-bit vector multiply with addition into 2 scalar multiplies.
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned Src1Reg = MI->getOperand(1).getReg();
- unsigned Src2Reg = MI->getOperand(2).getReg();
- unsigned Src3Reg = MI->getOperand(3).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned Src1Reg = MI.getOperand(1).getReg();
+ unsigned Src2Reg = MI.getOperand(2).getReg();
+ unsigned Src3Reg = MI.getOperand(3).getReg();
unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::subreg_hireg);
unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::subreg_loreg);
unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::subreg_hireg);
unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::subreg_loreg);
unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::subreg_hireg);
unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::subreg_loreg);
- BuildMI(MBB, MI, MI->getDebugLoc(), get(Hexagon::M2_maci),
- HRI.getSubReg(DstReg, Hexagon::subreg_hireg)).addReg(Src1SubHi)
- .addReg(Src2SubHi).addReg(Src3SubHi);
- BuildMI(MBB, MI, MI->getDebugLoc(), get(Hexagon::M2_maci),
- HRI.getSubReg(DstReg, Hexagon::subreg_loreg)).addReg(Src1SubLo)
- .addReg(Src2SubLo).addReg(Src3SubLo);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
+ HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
+ .addReg(Src1SubHi)
+ .addReg(Src2SubHi)
+ .addReg(Src3SubHi);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
+ HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
+ .addReg(Src1SubLo)
+ .addReg(Src2SubLo)
+ .addReg(Src3SubLo);
MBB.erase(MI);
MRI.clearKillFlags(Src1SubHi);
MRI.clearKillFlags(Src1SubLo);
@@ -952,16 +1109,58 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
MRI.clearKillFlags(Src3SubLo);
return true;
}
+ case Hexagon::Insert4: {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned Src1Reg = MI.getOperand(1).getReg();
+ unsigned Src2Reg = MI.getOperand(2).getReg();
+ unsigned Src3Reg = MI.getOperand(3).getReg();
+ unsigned Src4Reg = MI.getOperand(4).getReg();
+ unsigned Src1RegIsKill = getKillRegState(MI.getOperand(1).isKill());
+ unsigned Src2RegIsKill = getKillRegState(MI.getOperand(2).isKill());
+ unsigned Src3RegIsKill = getKillRegState(MI.getOperand(3).isKill());
+ unsigned Src4RegIsKill = getKillRegState(MI.getOperand(4).isKill());
+ unsigned DstSubHi = HRI.getSubReg(DstReg, Hexagon::subreg_hireg);
+ unsigned DstSubLo = HRI.getSubReg(DstReg, Hexagon::subreg_loreg);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert),
+ HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
+ .addReg(DstSubLo)
+ .addReg(Src1Reg, Src1RegIsKill)
+ .addImm(16)
+ .addImm(0);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert),
+ HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
+ .addReg(DstSubLo)
+ .addReg(Src2Reg, Src2RegIsKill)
+ .addImm(16)
+ .addImm(16);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert),
+ HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
+ .addReg(DstSubHi)
+ .addReg(Src3Reg, Src3RegIsKill)
+ .addImm(16)
+ .addImm(0);
+ BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert),
+ HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
+ .addReg(DstSubHi)
+ .addReg(Src4Reg, Src4RegIsKill)
+ .addImm(16)
+ .addImm(16);
+ MBB.erase(MI);
+ MRI.clearKillFlags(DstReg);
+ MRI.clearKillFlags(DstSubHi);
+ MRI.clearKillFlags(DstSubLo);
+ return true;
+ }
case Hexagon::MUX64_rr: {
- const MachineOperand &Op0 = MI->getOperand(0);
- const MachineOperand &Op1 = MI->getOperand(1);
- const MachineOperand &Op2 = MI->getOperand(2);
- const MachineOperand &Op3 = MI->getOperand(3);
+ const MachineOperand &Op0 = MI.getOperand(0);
+ const MachineOperand &Op1 = MI.getOperand(1);
+ const MachineOperand &Op2 = MI.getOperand(2);
+ const MachineOperand &Op3 = MI.getOperand(3);
unsigned Rd = Op0.getReg();
unsigned Pu = Op1.getReg();
unsigned Rs = Op2.getReg();
unsigned Rt = Op3.getReg();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
unsigned K1 = getKillRegState(Op1.isKill());
unsigned K2 = getKillRegState(Op2.isKill());
unsigned K3 = getKillRegState(Op3.isKill());
@@ -976,24 +1175,62 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI)
MBB.erase(MI);
return true;
}
+ case Hexagon::VSelectPseudo_V6: {
+ const MachineOperand &Op0 = MI.getOperand(0);
+ const MachineOperand &Op1 = MI.getOperand(1);
+ const MachineOperand &Op2 = MI.getOperand(2);
+ const MachineOperand &Op3 = MI.getOperand(3);
+ BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
+ .addOperand(Op0)
+ .addOperand(Op1)
+ .addOperand(Op2);
+ BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
+ .addOperand(Op0)
+ .addOperand(Op1)
+ .addOperand(Op3);
+ MBB.erase(MI);
+ return true;
+ }
+ case Hexagon::VSelectDblPseudo_V6: {
+ MachineOperand &Op0 = MI.getOperand(0);
+ MachineOperand &Op1 = MI.getOperand(1);
+ MachineOperand &Op2 = MI.getOperand(2);
+ MachineOperand &Op3 = MI.getOperand(3);
+ unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::subreg_loreg);
+ unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::subreg_hireg);
+ BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
+ .addOperand(Op0)
+ .addOperand(Op1)
+ .addReg(SrcHi)
+ .addReg(SrcLo);
+ SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::subreg_loreg);
+ SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::subreg_hireg);
+ BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
+ .addOperand(Op0)
+ .addOperand(Op1)
+ .addReg(SrcHi)
+ .addReg(SrcLo);
+ MBB.erase(MI);
+ return true;
+ }
case Hexagon::TCRETURNi:
- MI->setDesc(get(Hexagon::J2_jump));
+ MI.setDesc(get(Hexagon::J2_jump));
return true;
case Hexagon::TCRETURNr:
- MI->setDesc(get(Hexagon::J2_jumpr));
+ MI.setDesc(get(Hexagon::J2_jumpr));
return true;
case Hexagon::TFRI_f:
case Hexagon::TFRI_cPt_f:
case Hexagon::TFRI_cNotPt_f: {
unsigned Opx = (Opc == Hexagon::TFRI_f) ? 1 : 2;
- APFloat FVal = MI->getOperand(Opx).getFPImm()->getValueAPF();
+ APFloat FVal = MI.getOperand(Opx).getFPImm()->getValueAPF();
APInt IVal = FVal.bitcastToAPInt();
- MI->RemoveOperand(Opx);
+ MI.RemoveOperand(Opx);
unsigned NewOpc = (Opc == Hexagon::TFRI_f) ? Hexagon::A2_tfrsi :
(Opc == Hexagon::TFRI_cPt_f) ? Hexagon::C2_cmoveit :
Hexagon::C2_cmoveif;
- MI->setDesc(get(NewOpc));
- MI->addOperand(MachineOperand::CreateImm(IVal.getZExtValue()));
+ MI.setDesc(get(NewOpc));
+ MI.addOperand(MachineOperand::CreateImm(IVal.getZExtValue()));
return true;
}
}
@@ -1035,20 +1272,20 @@ void HexagonInstrInfo::insertNoop(MachineBasicBlock &MBB,
// if (!p0.new) R1 = add(R2, R3)
// Note: New-value stores are not included here as in the current
// implementation, we don't need to check their predicate sense.
-bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const {
- const uint64_t F = MI->getDesc().TSFlags;
+bool HexagonInstrInfo::isPredicated(const MachineInstr &MI) const {
+ const uint64_t F = MI.getDesc().TSFlags;
return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
}
-bool HexagonInstrInfo::PredicateInstruction(MachineInstr *MI,
- ArrayRef<MachineOperand> Cond) const {
+bool HexagonInstrInfo::PredicateInstruction(
+ MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
isEndLoopN(Cond[0].getImm())) {
- DEBUG(dbgs() << "\nCannot predicate:"; MI->dump(););
+ DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
return false;
}
- int Opc = MI->getOpcode();
+ int Opc = MI.getOpcode();
assert (isPredicable(MI) && "Expected predicable instruction");
bool invertJump = predOpcodeHasNot(Cond);
@@ -1057,13 +1294,13 @@ bool HexagonInstrInfo::PredicateInstruction(MachineInstr *MI,
// plicated manipulations with the operands (handling tied operands,
// etc.), build a new temporary instruction, then overwrite MI with it.
- MachineBasicBlock &B = *MI->getParent();
- DebugLoc DL = MI->getDebugLoc();
+ MachineBasicBlock &B = *MI.getParent();
+ DebugLoc DL = MI.getDebugLoc();
unsigned PredOpc = getCondOpcode(Opc, invertJump);
MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
- unsigned NOp = 0, NumOps = MI->getNumOperands();
+ unsigned NOp = 0, NumOps = MI.getNumOperands();
while (NOp < NumOps) {
- MachineOperand &Op = MI->getOperand(NOp);
+ MachineOperand &Op = MI.getOperand(NOp);
if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
break;
T.addOperand(Op);
@@ -1076,13 +1313,13 @@ bool HexagonInstrInfo::PredicateInstruction(MachineInstr *MI,
assert(GotPredReg);
T.addReg(PredReg, PredRegFlags);
while (NOp < NumOps)
- T.addOperand(MI->getOperand(NOp++));
+ T.addOperand(MI.getOperand(NOp++));
- MI->setDesc(get(PredOpc));
- while (unsigned n = MI->getNumOperands())
- MI->RemoveOperand(n-1);
+ MI.setDesc(get(PredOpc));
+ while (unsigned n = MI.getNumOperands())
+ MI.RemoveOperand(n-1);
for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
- MI->addOperand(T->getOperand(i));
+ MI.addOperand(T->getOperand(i));
MachineBasicBlock::instr_iterator TI = T->getIterator();
B.erase(TI);
@@ -1100,11 +1337,11 @@ bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
}
-bool HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const {
+bool HexagonInstrInfo::DefinesPredicate(
+ MachineInstr &MI, std::vector<MachineOperand> &Pred) const {
auto &HRI = getRegisterInfo();
- for (unsigned oper = 0; oper < MI->getNumOperands(); ++oper) {
- MachineOperand MO = MI->getOperand(oper);
+ for (unsigned oper = 0; oper < MI.getNumOperands(); ++oper) {
+ MachineOperand MO = MI.getOperand(oper);
if (MO.isReg() && MO.isDef()) {
const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
if (RC == &Hexagon::PredRegsRegClass) {
@@ -1116,107 +1353,25 @@ bool HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
return false;
}
-bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
- bool isPred = MI->getDesc().isPredicable();
-
- if (!isPred)
- return false;
-
- const int Opc = MI->getOpcode();
- int NumOperands = MI->getNumOperands();
-
- // Keep a flag for upto 4 operands in the instructions, to indicate if
- // that operand has been constant extended.
- bool OpCExtended[4];
- if (NumOperands > 4)
- NumOperands = 4;
-
- for (int i = 0; i < NumOperands; i++)
- OpCExtended[i] = (isOperandExtended(MI, i) && isConstExtended(MI));
-
- switch(Opc) {
- case Hexagon::A2_tfrsi:
- return (isOperandExtended(MI, 1) && isConstExtended(MI)) ||
- isInt<12>(MI->getOperand(1).getImm());
-
- case Hexagon::S2_storerd_io:
- return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
-
- case Hexagon::S2_storeri_io:
- case Hexagon::S2_storerinew_io:
- return isShiftedUInt<6,2>(MI->getOperand(1).getImm());
-
- case Hexagon::S2_storerh_io:
- case Hexagon::S2_storerhnew_io:
- return isShiftedUInt<6,1>(MI->getOperand(1).getImm());
-
- case Hexagon::S2_storerb_io:
- case Hexagon::S2_storerbnew_io:
- return isUInt<6>(MI->getOperand(1).getImm());
-
- case Hexagon::L2_loadrd_io:
- return isShiftedUInt<6,3>(MI->getOperand(2).getImm());
-
- case Hexagon::L2_loadri_io:
- return isShiftedUInt<6,2>(MI->getOperand(2).getImm());
-
- case Hexagon::L2_loadrh_io:
- case Hexagon::L2_loadruh_io:
- return isShiftedUInt<6,1>(MI->getOperand(2).getImm());
-
- case Hexagon::L2_loadrb_io:
- case Hexagon::L2_loadrub_io:
- return isUInt<6>(MI->getOperand(2).getImm());
-
- case Hexagon::L2_loadrd_pi:
- return isShiftedInt<4,3>(MI->getOperand(3).getImm());
-
- case Hexagon::L2_loadri_pi:
- return isShiftedInt<4,2>(MI->getOperand(3).getImm());
-
- case Hexagon::L2_loadrh_pi:
- case Hexagon::L2_loadruh_pi:
- return isShiftedInt<4,1>(MI->getOperand(3).getImm());
-
- case Hexagon::L2_loadrb_pi:
- case Hexagon::L2_loadrub_pi:
- return isInt<4>(MI->getOperand(3).getImm());
-
- case Hexagon::S4_storeirb_io:
- case Hexagon::S4_storeirh_io:
- case Hexagon::S4_storeiri_io:
- return (OpCExtended[1] || isUInt<6>(MI->getOperand(1).getImm())) &&
- (OpCExtended[2] || isInt<6>(MI->getOperand(2).getImm()));
-
- case Hexagon::A2_addi:
- return isInt<8>(MI->getOperand(2).getImm());
- case Hexagon::A2_aslh:
- case Hexagon::A2_asrh:
- case Hexagon::A2_sxtb:
- case Hexagon::A2_sxth:
- case Hexagon::A2_zxtb:
- case Hexagon::A2_zxth:
- return true;
- }
-
- return true;
+bool HexagonInstrInfo::isPredicable(MachineInstr &MI) const {
+ return MI.getDesc().isPredicable();
}
-
-bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
- const MachineBasicBlock *MBB, const MachineFunction &MF) const {
+bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const {
// Debug info is never a scheduling boundary. It's necessary to be explicit
// due to the special treatment of IT instructions below, otherwise a
// dbg_value followed by an IT will result in the IT instruction being
// considered a scheduling hazard, which is wrong. It should be the actual
// instruction preceding the dbg_value instruction(s), just like it is
// when debug info is not present.
- if (MI->isDebugValue())
+ if (MI.isDebugValue())
return false;
// Throwing call is a boundary.
- if (MI->isCall()) {
+ if (MI.isCall()) {
// If any of the block's successors is a landing pad, this could be a
// throwing call.
for (auto I : MBB->successors())
@@ -1225,15 +1380,15 @@ bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
}
// Don't mess around with no return calls.
- if (MI->getOpcode() == Hexagon::CALLv3nr)
+ if (MI.getOpcode() == Hexagon::CALLv3nr)
return true;
// Terminators and labels can't be scheduled around.
- if (MI->getDesc().isTerminator() || MI->isPosition())
+ if (MI.getDesc().isTerminator() || MI.isPosition())
return true;
- if (MI->isInlineAsm() && !ScheduleInlineAsm)
- return true;
+ if (MI.isInlineAsm() && !ScheduleInlineAsm)
+ return true;
return false;
}
@@ -1286,9 +1441,10 @@ HexagonInstrInfo::CreateTargetPostRAHazardRecognizer(
/// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
-bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, unsigned &SrcReg2, int &Mask, int &Value) const {
- unsigned Opc = MI->getOpcode();
+bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &Mask,
+ int &Value) const {
+ unsigned Opc = MI.getOpcode();
// Set mask and the first source register.
switch (Opc) {
@@ -1307,7 +1463,7 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
case Hexagon::C4_cmpneqi:
case Hexagon::C4_cmplteui:
case Hexagon::C4_cmpltei:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
Mask = ~0;
break;
case Hexagon::A4_cmpbeq:
@@ -1316,7 +1472,7 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
case Hexagon::A4_cmpbeqi:
case Hexagon::A4_cmpbgti:
case Hexagon::A4_cmpbgtui:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
Mask = 0xFF;
break;
case Hexagon::A4_cmpheq:
@@ -1325,7 +1481,7 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
case Hexagon::A4_cmpheqi:
case Hexagon::A4_cmphgti:
case Hexagon::A4_cmphgtui:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
Mask = 0xFFFF;
break;
}
@@ -1347,7 +1503,7 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
case Hexagon::C4_cmpneq:
case Hexagon::C4_cmplte:
case Hexagon::C4_cmplteu:
- SrcReg2 = MI->getOperand(2).getReg();
+ SrcReg2 = MI.getOperand(2).getReg();
return true;
case Hexagon::C2_cmpeqi:
@@ -1363,17 +1519,17 @@ bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
case Hexagon::A4_cmphgti:
case Hexagon::A4_cmphgtui:
SrcReg2 = 0;
- Value = MI->getOperand(2).getImm();
+ Value = MI.getOperand(2).getImm();
return true;
}
return false;
}
-
unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI, unsigned *PredCost) const {
- return getInstrTimingClassLatency(ItinData, MI);
+ const MachineInstr &MI,
+ unsigned *PredCost) const {
+ return getInstrTimingClassLatency(ItinData, &MI);
}
@@ -1388,27 +1544,27 @@ DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
// %R13<def> = L2_loadri_io %R29, 136; mem:LD4[FixedStack0]
// S2_storeri_io %R29, 132, %R1<kill>; flags: mem:ST4[FixedStack1]
// Currently AA considers the addresses in these instructions to be aliasing.
-bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
- MachineInstr *MIb, AliasAnalysis *AA) const {
+bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
+ MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
int OffsetA = 0, OffsetB = 0;
unsigned SizeA = 0, SizeB = 0;
- if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
- MIa->hasOrderedMemoryRef() || MIa->hasOrderedMemoryRef())
+ if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
+ MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
return false;
// Instructions that are pure loads, not loads and stores like memops are not
// dependent.
- if (MIa->mayLoad() && !isMemOp(MIa) && MIb->mayLoad() && !isMemOp(MIb))
+ if (MIa.mayLoad() && !isMemOp(&MIa) && MIb.mayLoad() && !isMemOp(&MIb))
return true;
// Get base, offset, and access size in MIa.
- unsigned BaseRegA = getBaseAndOffset(MIa, OffsetA, SizeA);
+ unsigned BaseRegA = getBaseAndOffset(&MIa, OffsetA, SizeA);
if (!BaseRegA || !SizeA)
return false;
// Get base, offset, and access size in MIb.
- unsigned BaseRegB = getBaseAndOffset(MIb, OffsetB, SizeB);
+ unsigned BaseRegB = getBaseAndOffset(&MIb, OffsetB, SizeB);
if (!BaseRegB || !SizeB)
return false;
@@ -1486,13 +1642,13 @@ bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr *MI) const {
bool HexagonInstrInfo::isCondInst(const MachineInstr *MI) const {
- return (MI->isBranch() && isPredicated(MI)) ||
+ return (MI->isBranch() && isPredicated(*MI)) ||
isConditionalTransfer(MI) ||
isConditionalALU32(MI) ||
isConditionalLoad(MI) ||
// Predicated stores which don't have a .new on any operands.
- (MI->mayStore() && isPredicated(MI) && !isNewValueStore(MI) &&
- !isPredicatedNew(MI));
+ (MI->mayStore() && isPredicated(*MI) && !isNewValueStore(MI) &&
+ !isPredicatedNew(*MI));
}
@@ -1557,7 +1713,7 @@ bool HexagonInstrInfo::isConditionalALU32(const MachineInstr* MI) const {
// FIXME - Function name and it's functionality don't match.
// It should be renamed to hasPredNewOpcode()
bool HexagonInstrInfo::isConditionalLoad(const MachineInstr* MI) const {
- if (!MI->getDesc().mayLoad() || !isPredicated(MI))
+ if (!MI->getDesc().mayLoad() || !isPredicated(*MI))
return false;
int PNewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode());
@@ -1763,8 +1919,7 @@ bool HexagonInstrInfo::isDotCurInst(const MachineInstr* MI) const {
// Returns true, if any one of the operands is a dot new
// insn, whether it is predicated dot new or register dot new.
bool HexagonInstrInfo::isDotNewInst(const MachineInstr* MI) const {
- if (isNewValueInst(MI) ||
- (isPredicated(MI) && isPredicatedNew(MI)))
+ if (isNewValueInst(MI) || (isPredicated(*MI) && isPredicatedNew(*MI)))
return true;
return false;
@@ -2129,8 +2284,8 @@ bool HexagonInstrInfo::isPostIncrement(const MachineInstr* MI) const {
}
-bool HexagonInstrInfo::isPredicatedNew(const MachineInstr *MI) const {
- const uint64_t F = MI->getDesc().TSFlags;
+bool HexagonInstrInfo::isPredicatedNew(const MachineInstr &MI) const {
+ const uint64_t F = MI.getDesc().TSFlags;
assert(isPredicated(MI));
return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
}
@@ -2143,8 +2298,8 @@ bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
}
-bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr *MI) const {
- const uint64_t F = MI->getDesc().TSFlags;
+bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr &MI) const {
+ const uint64_t F = MI.getDesc().TSFlags;
return !((F >> HexagonII::PredicatedFalsePos) &
HexagonII::PredicatedFalseMask);
}
@@ -2181,7 +2336,87 @@ bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
- MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
+ MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
+ MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
+ MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
+}
+
+bool HexagonInstrInfo::isSignExtendingLoad(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ // Byte
+ case Hexagon::L2_loadrb_io:
+ case Hexagon::L4_loadrb_ur:
+ case Hexagon::L4_loadrb_ap:
+ case Hexagon::L2_loadrb_pr:
+ case Hexagon::L2_loadrb_pbr:
+ case Hexagon::L2_loadrb_pi:
+ case Hexagon::L2_loadrb_pci:
+ case Hexagon::L2_loadrb_pcr:
+ case Hexagon::L2_loadbsw2_io:
+ case Hexagon::L4_loadbsw2_ur:
+ case Hexagon::L4_loadbsw2_ap:
+ case Hexagon::L2_loadbsw2_pr:
+ case Hexagon::L2_loadbsw2_pbr:
+ case Hexagon::L2_loadbsw2_pi:
+ case Hexagon::L2_loadbsw2_pci:
+ case Hexagon::L2_loadbsw2_pcr:
+ case Hexagon::L2_loadbsw4_io:
+ case Hexagon::L4_loadbsw4_ur:
+ case Hexagon::L4_loadbsw4_ap:
+ case Hexagon::L2_loadbsw4_pr:
+ case Hexagon::L2_loadbsw4_pbr:
+ case Hexagon::L2_loadbsw4_pi:
+ case Hexagon::L2_loadbsw4_pci:
+ case Hexagon::L2_loadbsw4_pcr:
+ case Hexagon::L4_loadrb_rr:
+ case Hexagon::L2_ploadrbt_io:
+ case Hexagon::L2_ploadrbt_pi:
+ case Hexagon::L2_ploadrbf_io:
+ case Hexagon::L2_ploadrbf_pi:
+ case Hexagon::L2_ploadrbtnew_io:
+ case Hexagon::L2_ploadrbfnew_io:
+ case Hexagon::L4_ploadrbt_rr:
+ case Hexagon::L4_ploadrbf_rr:
+ case Hexagon::L4_ploadrbtnew_rr:
+ case Hexagon::L4_ploadrbfnew_rr:
+ case Hexagon::L2_ploadrbtnew_pi:
+ case Hexagon::L2_ploadrbfnew_pi:
+ case Hexagon::L4_ploadrbt_abs:
+ case Hexagon::L4_ploadrbf_abs:
+ case Hexagon::L4_ploadrbtnew_abs:
+ case Hexagon::L4_ploadrbfnew_abs:
+ case Hexagon::L2_loadrbgp:
+ // Half
+ case Hexagon::L2_loadrh_io:
+ case Hexagon::L4_loadrh_ur:
+ case Hexagon::L4_loadrh_ap:
+ case Hexagon::L2_loadrh_pr:
+ case Hexagon::L2_loadrh_pbr:
+ case Hexagon::L2_loadrh_pi:
+ case Hexagon::L2_loadrh_pci:
+ case Hexagon::L2_loadrh_pcr:
+ case Hexagon::L4_loadrh_rr:
+ case Hexagon::L2_ploadrht_io:
+ case Hexagon::L2_ploadrht_pi:
+ case Hexagon::L2_ploadrhf_io:
+ case Hexagon::L2_ploadrhf_pi:
+ case Hexagon::L2_ploadrhtnew_io:
+ case Hexagon::L2_ploadrhfnew_io:
+ case Hexagon::L4_ploadrht_rr:
+ case Hexagon::L4_ploadrhf_rr:
+ case Hexagon::L4_ploadrhtnew_rr:
+ case Hexagon::L4_ploadrhfnew_rr:
+ case Hexagon::L2_ploadrhtnew_pi:
+ case Hexagon::L2_ploadrhfnew_pi:
+ case Hexagon::L4_ploadrht_abs:
+ case Hexagon::L4_ploadrhf_abs:
+ case Hexagon::L4_ploadrhtnew_abs:
+ case Hexagon::L4_ploadrhfnew_abs:
+ case Hexagon::L2_loadrhgp:
+ return true;
+ default:
+ return false;
+ }
}
@@ -2202,6 +2437,17 @@ bool HexagonInstrInfo::isSpillPredRegOp(const MachineInstr *MI) const {
}
+bool HexagonInstrInfo::isTailCall(const MachineInstr *MI) const {
+ if (!MI->isBranch())
+ return false;
+
+ for (auto &Op : MI->operands())
+ if (Op.isGlobal() || Op.isSymbol())
+ return true;
+ return false;
+}
+
+
// Returns true when SU has a timing class TC1.
bool HexagonInstrInfo::isTC1(const MachineInstr *MI) const {
unsigned SchedClass = MI->getDesc().getSchedClass();
@@ -2269,6 +2515,28 @@ bool HexagonInstrInfo::isTC4x(const MachineInstr *MI) const {
}
+// Schedule this ASAP.
+bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr *MI1,
+ const MachineInstr *MI2) const {
+ if (!MI1 || !MI2)
+ return false;
+ if (mayBeCurLoad(MI1)) {
+ // if (result of SU is used in Next) return true;
+ unsigned DstReg = MI1->getOperand(0).getReg();
+ int N = MI2->getNumOperands();
+ for (int I = 0; I < N; I++)
+ if (MI2->getOperand(I).isReg() && DstReg == MI2->getOperand(I).getReg())
+ return true;
+ }
+ if (mayBeNewStore(MI2))
+ if (MI2->getOpcode() == Hexagon::V6_vS32b_pi)
+ if (MI1->getOperand(0).isReg() && MI2->getOperand(3).isReg() &&
+ MI1->getOperand(0).getReg() == MI2->getOperand(3).getReg())
+ return true;
+ return false;
+}
+
+
bool HexagonInstrInfo::isV60VectorInstruction(const MachineInstr *MI) const {
if (!MI)
return false;
@@ -2366,6 +2634,21 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::J2_loop0i:
case Hexagon::J2_loop1i:
return isUInt<10>(Offset);
+
+ case Hexagon::S4_storeirb_io:
+ case Hexagon::S4_storeirbt_io:
+ case Hexagon::S4_storeirbf_io:
+ return isUInt<6>(Offset);
+
+ case Hexagon::S4_storeirh_io:
+ case Hexagon::S4_storeirht_io:
+ case Hexagon::S4_storeirhf_io:
+ return isShiftedUInt<6,1>(Offset);
+
+ case Hexagon::S4_storeiri_io:
+ case Hexagon::S4_storeirit_io:
+ case Hexagon::S4_storeirif_io:
+ return isShiftedUInt<6,2>(Offset);
}
if (Extend)
@@ -2422,10 +2705,12 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::L4_or_memopb_io :
return (0 <= Offset && Offset <= 63);
- // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of
+ // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
// any size. Later pass knows how to handle it.
case Hexagon::STriw_pred:
case Hexagon::LDriw_pred:
+ case Hexagon::STriw_mod:
+ case Hexagon::LDriw_mod:
return true;
case Hexagon::TFR_FI:
@@ -2439,9 +2724,6 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::L2_ploadrubf_io:
case Hexagon::S2_pstorerbt_io:
case Hexagon::S2_pstorerbf_io:
- case Hexagon::S4_storeirb_io:
- case Hexagon::S4_storeirbt_io:
- case Hexagon::S4_storeirbf_io:
return isUInt<6>(Offset);
case Hexagon::L2_ploadrht_io:
@@ -2450,18 +2732,12 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
case Hexagon::L2_ploadruhf_io:
case Hexagon::S2_pstorerht_io:
case Hexagon::S2_pstorerhf_io:
- case Hexagon::S4_storeirh_io:
- case Hexagon::S4_storeirht_io:
- case Hexagon::S4_storeirhf_io:
return isShiftedUInt<6,1>(Offset);
case Hexagon::L2_ploadrit_io:
case Hexagon::L2_ploadrif_io:
case Hexagon::S2_pstorerit_io:
case Hexagon::S2_pstorerif_io:
- case Hexagon::S4_storeiri_io:
- case Hexagon::S4_storeirit_io:
- case Hexagon::S4_storeirif_io:
return isShiftedUInt<6,2>(Offset);
case Hexagon::L2_ploadrdt_io:
@@ -2506,6 +2782,94 @@ bool HexagonInstrInfo::isVecUsableNextPacket(const MachineInstr *ProdMI,
return false;
}
+bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ // Byte
+ case Hexagon::L2_loadrub_io:
+ case Hexagon::L4_loadrub_ur:
+ case Hexagon::L4_loadrub_ap:
+ case Hexagon::L2_loadrub_pr:
+ case Hexagon::L2_loadrub_pbr:
+ case Hexagon::L2_loadrub_pi:
+ case Hexagon::L2_loadrub_pci:
+ case Hexagon::L2_loadrub_pcr:
+ case Hexagon::L2_loadbzw2_io:
+ case Hexagon::L4_loadbzw2_ur:
+ case Hexagon::L4_loadbzw2_ap:
+ case Hexagon::L2_loadbzw2_pr:
+ case Hexagon::L2_loadbzw2_pbr:
+ case Hexagon::L2_loadbzw2_pi:
+ case Hexagon::L2_loadbzw2_pci:
+ case Hexagon::L2_loadbzw2_pcr:
+ case Hexagon::L2_loadbzw4_io:
+ case Hexagon::L4_loadbzw4_ur:
+ case Hexagon::L4_loadbzw4_ap:
+ case Hexagon::L2_loadbzw4_pr:
+ case Hexagon::L2_loadbzw4_pbr:
+ case Hexagon::L2_loadbzw4_pi:
+ case Hexagon::L2_loadbzw4_pci:
+ case Hexagon::L2_loadbzw4_pcr:
+ case Hexagon::L4_loadrub_rr:
+ case Hexagon::L2_ploadrubt_io:
+ case Hexagon::L2_ploadrubt_pi:
+ case Hexagon::L2_ploadrubf_io:
+ case Hexagon::L2_ploadrubf_pi:
+ case Hexagon::L2_ploadrubtnew_io:
+ case Hexagon::L2_ploadrubfnew_io:
+ case Hexagon::L4_ploadrubt_rr:
+ case Hexagon::L4_ploadrubf_rr:
+ case Hexagon::L4_ploadrubtnew_rr:
+ case Hexagon::L4_ploadrubfnew_rr:
+ case Hexagon::L2_ploadrubtnew_pi:
+ case Hexagon::L2_ploadrubfnew_pi:
+ case Hexagon::L4_ploadrubt_abs:
+ case Hexagon::L4_ploadrubf_abs:
+ case Hexagon::L4_ploadrubtnew_abs:
+ case Hexagon::L4_ploadrubfnew_abs:
+ case Hexagon::L2_loadrubgp:
+ // Half
+ case Hexagon::L2_loadruh_io:
+ case Hexagon::L4_loadruh_ur:
+ case Hexagon::L4_loadruh_ap:
+ case Hexagon::L2_loadruh_pr:
+ case Hexagon::L2_loadruh_pbr:
+ case Hexagon::L2_loadruh_pi:
+ case Hexagon::L2_loadruh_pci:
+ case Hexagon::L2_loadruh_pcr:
+ case Hexagon::L4_loadruh_rr:
+ case Hexagon::L2_ploadruht_io:
+ case Hexagon::L2_ploadruht_pi:
+ case Hexagon::L2_ploadruhf_io:
+ case Hexagon::L2_ploadruhf_pi:
+ case Hexagon::L2_ploadruhtnew_io:
+ case Hexagon::L2_ploadruhfnew_io:
+ case Hexagon::L4_ploadruht_rr:
+ case Hexagon::L4_ploadruhf_rr:
+ case Hexagon::L4_ploadruhtnew_rr:
+ case Hexagon::L4_ploadruhfnew_rr:
+ case Hexagon::L2_ploadruhtnew_pi:
+ case Hexagon::L2_ploadruhfnew_pi:
+ case Hexagon::L4_ploadruht_abs:
+ case Hexagon::L4_ploadruhf_abs:
+ case Hexagon::L4_ploadruhtnew_abs:
+ case Hexagon::L4_ploadruhfnew_abs:
+ case Hexagon::L2_loadruhgp:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+// Add latency to instruction.
+bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr *MI1,
+ const MachineInstr *MI2) const {
+ if (isV60VectorInstruction(MI1) && isV60VectorInstruction(MI2))
+ if (!isVecUsableNextPacket(MI1, MI2))
+ return true;
+ return false;
+}
+
/// \brief Can these instructions execute at the same time in a bundle.
bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr *First,
@@ -2687,6 +3051,11 @@ bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const {
}
+short HexagonInstrInfo::getAbsoluteForm(const MachineInstr *MI) const {
+ return Hexagon::getAbsoluteForm(MI->getOpcode());
+}
+
+
unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const {
const uint64_t F = MI->getDesc().TSFlags;
return (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
@@ -2735,8 +3104,6 @@ bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr *MI,
unsigned &BasePos, unsigned &OffsetPos) const {
// Deal with memops first.
if (isMemOp(MI)) {
- assert (MI->getOperand(0).isReg() && MI->getOperand(1).isImm() &&
- "Bad Memop.");
BasePos = 0;
OffsetPos = 1;
} else if (MI->mayStore()) {
@@ -2748,7 +3115,7 @@ bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr *MI,
} else
return false;
- if (isPredicated(MI)) {
+ if (isPredicated(*MI)) {
BasePos++;
OffsetPos++;
}
@@ -2802,7 +3169,7 @@ SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
return Jumpers;
--I;
}
- if (!isUnpredicatedTerminator(&*I))
+ if (!isUnpredicatedTerminator(*I))
return Jumpers;
// Get the last instruction in the block.
@@ -2811,7 +3178,7 @@ SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
MachineInstr *SecondLastInst = nullptr;
// Find one more terminator if present.
do {
- if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(&*I)) {
+ if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
if (!SecondLastInst) {
SecondLastInst = &*I;
Jumpers.push_back(SecondLastInst);
@@ -2826,6 +3193,23 @@ SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
}
+short HexagonInstrInfo::getBaseWithLongOffset(short Opcode) const {
+ if (Opcode < 0)
+ return -1;
+ return Hexagon::getBaseWithLongOffset(Opcode);
+}
+
+
+short HexagonInstrInfo::getBaseWithLongOffset(const MachineInstr *MI) const {
+ return Hexagon::getBaseWithLongOffset(MI->getOpcode());
+}
+
+
+short HexagonInstrInfo::getBaseWithRegOffset(const MachineInstr *MI) const {
+ return Hexagon::getBaseWithRegOffset(MI->getOpcode());
+}
+
+
// Returns Operand Index for the constant extended instruction.
unsigned HexagonInstrInfo::getCExtOpNum(const MachineInstr *MI) const {
const uint64_t F = MI->getDesc().TSFlags;
@@ -3102,6 +3486,7 @@ int HexagonInstrInfo::getDotNewOp(const MachineInstr* MI) const {
return 0;
}
+
// Returns the opcode to use when converting MI, which is a conditional jump,
// into a conditional instruction which uses the .new value of the predicate.
// We also use branch probabilities to add a hint to the jump.
@@ -3353,8 +3738,8 @@ HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
case Hexagon::S4_storeirb_io:
// memb(Rs+#u4) = #U1
Src1Reg = MI->getOperand(0).getReg();
- if (isIntRegForSubInst(Src1Reg) && MI->getOperand(1).isImm() &&
- isUInt<4>(MI->getOperand(1).getImm()) && MI->getOperand(2).isImm() &&
+ if (isIntRegForSubInst(Src1Reg) &&
+ MI->getOperand(1).isImm() && isUInt<4>(MI->getOperand(1).getImm()) &&
MI->getOperand(2).isImm() && isUInt<1>(MI->getOperand(2).getImm()))
return HexagonII::HSIG_S2;
break;
@@ -3532,7 +3917,7 @@ unsigned HexagonInstrInfo::getInstrTimingClassLatency(
// Default to one cycle for no itinerary. However, an "empty" itinerary may
// still have a MinLatency property, which getStageLatency checks.
if (!ItinData)
- return getInstrLatency(ItinData, MI);
+ return getInstrLatency(ItinData, *MI);
// Get the latency embedded in the itinerary. If we're not using timing class
// latencies or if we using BSB scheduling, then restrict the maximum latency
@@ -3737,7 +4122,7 @@ unsigned HexagonInstrInfo::nonDbgBundleSize(
assert(BundleHead->isBundle() && "Not a bundle header");
auto MII = BundleHead.getInstrIterator();
// Skip the bundle header.
- return nonDbgMICount(++MII, getBundleEnd(BundleHead));
+ return nonDbgMICount(++MII, getBundleEnd(*BundleHead));
}
@@ -3770,7 +4155,7 @@ bool HexagonInstrInfo::invertAndChangeJumpTarget(
--TargetPos;
assert((TargetPos >= 0) && MI->getOperand(TargetPos).isMBB());
MI->getOperand(TargetPos).setMBB(NewTarget);
- if (EnableBranchPrediction && isPredicatedNew(MI)) {
+ if (EnableBranchPrediction && isPredicatedNew(*MI)) {
NewOpcode = reversePrediction(NewOpcode);
}
MI->setDesc(get(NewOpcode));
@@ -3826,3 +4211,7 @@ bool HexagonInstrInfo::validateBranchCond(const ArrayRef<MachineOperand> &Cond)
return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
}
+
+short HexagonInstrInfo::xformRegToImmOffset(const MachineInstr *MI) const {
+ return Hexagon::xformRegToImmOffset(MI->getOpcode());
+}
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index 9530d9f2aa0d..66b6883c955b 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -43,7 +43,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// If the specified machine instruction is a direct
@@ -51,7 +51,7 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// Analyze the branching code at the end of MBB, returning
@@ -79,10 +79,10 @@ public:
/// If AllowModify is true, then this routine is allowed to modify the basic
/// block (e.g. delete instructions after the unconditional branch).
///
- bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const override;
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
/// Remove the branching code at the end of the specific MBB.
/// This is only invoked in cases where AnalyzeBranch returns success. It
@@ -101,7 +101,7 @@ public:
/// merging needs to be disabled.
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
/// Return true if it's profitable to predicate
/// instructions with accumulated instruction latency of "NumCycles"
@@ -141,9 +141,8 @@ public:
/// The source and destination registers may overlap, which may require a
/// careful implementation when multiple copy instructions are required for
/// large registers. See for example the ARM target.
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
/// Store the specified register of the given register class to the specified
@@ -171,7 +170,7 @@ public:
/// into real instructions. The target can edit MI in place, or it can insert
/// new instructions and erase MI. The function should return true if
/// anything was changed.
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
/// Reverses the branch condition of the specified condition list,
/// returning false on success and true if it cannot be reversed.
@@ -183,11 +182,11 @@ public:
MachineBasicBlock::iterator MI) const override;
/// Returns true if the instruction is already predicated.
- bool isPredicated(const MachineInstr *MI) const override;
+ bool isPredicated(const MachineInstr &MI) const override;
/// Convert the instruction into a predicated instruction.
/// It returns true if the operation was successful.
- bool PredicateInstruction(MachineInstr *MI,
+ bool PredicateInstruction(MachineInstr &MI,
ArrayRef<MachineOperand> Cond) const override;
/// Returns true if the first specified predicate
@@ -198,17 +197,17 @@ public:
/// If the specified instruction defines any predicate
/// or condition code register(s) used for predication, returns true as well
/// as the definition predicate(s) by reference.
- bool DefinesPredicate(MachineInstr *MI,
+ bool DefinesPredicate(MachineInstr &MI,
std::vector<MachineOperand> &Pred) const override;
/// Return true if the specified instruction can be predicated.
/// By default, this returns true for every instruction with a
/// PredicateOperand.
- bool isPredicable(MachineInstr *MI) const override;
+ bool isPredicable(MachineInstr &MI) const override;
/// Test if the given instruction should be considered a scheduling boundary.
/// This primarily includes labels and terminators.
- bool isSchedulingBoundary(const MachineInstr *MI,
+ bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const override;
@@ -227,15 +226,14 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
- bool analyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, unsigned &SrcReg2,
- int &Mask, int &Value) const override;
+ bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &Mask, int &Value) const override;
/// Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
unsigned getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
+ const MachineInstr &MI,
unsigned *PredCost = 0) const override;
/// Create machine specific model for scheduling.
@@ -246,10 +244,9 @@ public:
// to tell, even without aliasing information, that two MIs access different
// memory addresses. This function returns true if two MIs access different
// memory addresses and false otherwise.
- bool areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
- AliasAnalysis *AA = nullptr)
- const override;
-
+ bool
+ areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ AliasAnalysis *AA = nullptr) const override;
/// HexagonInstrInfo specifics.
///
@@ -301,20 +298,24 @@ public:
bool isNewValueStore(unsigned Opcode) const;
bool isOperandExtended(const MachineInstr *MI, unsigned OperandNum) const;
bool isPostIncrement(const MachineInstr* MI) const;
- bool isPredicatedNew(const MachineInstr *MI) const;
+ bool isPredicatedNew(const MachineInstr &MI) const;
bool isPredicatedNew(unsigned Opcode) const;
- bool isPredicatedTrue(const MachineInstr *MI) const;
+ bool isPredicatedTrue(const MachineInstr &MI) const;
bool isPredicatedTrue(unsigned Opcode) const;
bool isPredicated(unsigned Opcode) const;
bool isPredicateLate(unsigned Opcode) const;
bool isPredictedTaken(unsigned Opcode) const;
bool isSaveCalleeSavedRegsCall(const MachineInstr *MI) const;
+ bool isSignExtendingLoad(const MachineInstr &MI) const;
bool isSolo(const MachineInstr* MI) const;
bool isSpillPredRegOp(const MachineInstr *MI) const;
+ bool isTailCall(const MachineInstr *MI) const;
bool isTC1(const MachineInstr *MI) const;
bool isTC2(const MachineInstr *MI) const;
bool isTC2Early(const MachineInstr *MI) const;
bool isTC4x(const MachineInstr *MI) const;
+ bool isToBeScheduledASAP(const MachineInstr *MI1,
+ const MachineInstr *MI2) const;
bool isV60VectorInstruction(const MachineInstr *MI) const;
bool isValidAutoIncImm(const EVT VT, const int Offset) const;
bool isValidOffset(unsigned Opcode, int Offset, bool Extend = true) const;
@@ -322,8 +323,10 @@ public:
bool isVecALU(const MachineInstr *MI) const;
bool isVecUsableNextPacket(const MachineInstr *ProdMI,
const MachineInstr *ConsMI) const;
+ bool isZeroExtendingLoad(const MachineInstr &MI) const;
-
+ bool addLatencyToSchedule(const MachineInstr *MI1,
+ const MachineInstr *MI2) const;
bool canExecuteInBundle(const MachineInstr *First,
const MachineInstr *Second) const;
bool hasEHLabel(const MachineBasicBlock *B) const;
@@ -341,11 +344,15 @@ public:
bool predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const;
+ short getAbsoluteForm(const MachineInstr *MI) const;
unsigned getAddrMode(const MachineInstr* MI) const;
unsigned getBaseAndOffset(const MachineInstr *MI, int &Offset,
unsigned &AccessSize) const;
bool getBaseAndOffsetPosition(const MachineInstr *MI, unsigned &BasePos,
unsigned &OffsetPos) const;
+ short getBaseWithLongOffset(short Opcode) const;
+ short getBaseWithLongOffset(const MachineInstr *MI) const;
+ short getBaseWithRegOffset(const MachineInstr *MI) const;
SmallVector<MachineInstr*,2> getBranchingInstrs(MachineBasicBlock& MBB) const;
unsigned getCExtOpNum(const MachineInstr *MI) const;
HexagonII::CompoundGroup
@@ -395,6 +402,7 @@ public:
bool reversePredSense(MachineInstr* MI) const;
unsigned reversePrediction(unsigned Opcode) const;
bool validateBranchCond(const ArrayRef<MachineOperand> &Cond) const;
+ short xformRegToImmOffset(const MachineInstr *MI) const;
};
}
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td
index 421403f49724..74dc5ac9a3ad 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.td
+++ b/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -32,6 +32,9 @@ def LoReg: OutPatFrag<(ops node:$Rs),
def HiReg: OutPatFrag<(ops node:$Rs),
(EXTRACT_SUBREG (i64 $Rs), subreg_hireg)>;
+def orisadd: PatFrag<(ops node:$Addr, node:$off),
+ (or node:$Addr, node:$off), [{ return orIsAdd(N); }]>;
+
// SDNode for converting immediate C to C-1.
def DEC_CONST_SIGNED : SDNodeXForm<imm, [{
// Return the byte immediate const-1 as an SDNode.
@@ -418,6 +421,12 @@ defm addi : Addri_base<"add", add>, ImmRegRel, PredNewRel;
def: Pat<(i32 (add I32:$Rs, s32ImmPred:$s16)),
(i32 (A2_addi I32:$Rs, imm:$s16))>;
+let hasNewValue = 1, hasSideEffects = 0, isPseudo = 1 in
+def A2_iconst
+ : ALU32_ri <(outs IntRegs:$Rd),
+ (ins s23_2Imm:$s23_2),
+ "$Rd = iconst(#$s23_2)"> {}
+
//===----------------------------------------------------------------------===//
// Template class used for the following ALU32 instructions.
// Rd=and(Rs,#s10)
@@ -1430,7 +1439,7 @@ class CondStr<string CReg, bit True, bit New> {
string S = "if (" # !if(True,"","!") # CReg # !if(New,".new","") # ") ";
}
class JumpOpcStr<string Mnemonic, bit New, bit Taken> {
- string S = Mnemonic # !if(Taken, ":t", !if(New, ":nt", ""));
+ string S = Mnemonic # !if(Taken, ":t", ":nt");
}
let isBranch = 1, isBarrier = 1, Defs = [PC], hasSideEffects = 0,
@@ -1438,9 +1447,9 @@ let isBranch = 1, isBarrier = 1, Defs = [PC], hasSideEffects = 0,
isExtendable = 1, opExtendable = 0, isExtentSigned = 1,
opExtentBits = 24, opExtentAlign = 2, InputType = "imm" in
class T_JMP<string ExtStr>
- : JInst<(outs), (ins brtarget:$dst),
+ : JInst_CJUMP_UCJUMP<(outs), (ins brtarget:$dst),
"jump " # ExtStr # "$dst",
- [], "", J_tc_2early_SLOT23> {
+ [], "", J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT> {
bits<24> dst;
let IClass = 0b0101;
@@ -1453,11 +1462,11 @@ let isBranch = 1, Defs = [PC], hasSideEffects = 0, isPredicated = 1,
isExtendable = 1, opExtendable = 1, isExtentSigned = 1,
opExtentBits = 17, opExtentAlign = 2, InputType = "imm" in
class T_JMP_c<bit PredNot, bit isPredNew, bit isTak, string ExtStr>
- : JInst<(outs), (ins PredRegs:$src, brtarget:$dst),
+ : JInst_CJUMP_UCJUMP<(outs), (ins PredRegs:$src, brtarget:$dst),
CondStr<"$src", !if(PredNot,0,1), isPredNew>.S #
JumpOpcStr<"jump", isPredNew, isTak>.S # " " #
ExtStr # "$dst",
- [], "", J_tc_2early_SLOT23>, ImmRegRel {
+ [], "", J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT>, ImmRegRel {
let isTaken = isTak;
let isPredicatedFalse = PredNot;
let isPredicatedNew = isPredNew;
@@ -1576,19 +1585,31 @@ let Defs = VolatileV3.Regs in {
let isTerminator = 1, hasSideEffects = 0 in {
defm J2_jump : JMP_base<"JMP", "">, PredNewRel;
- // Deal with explicit assembly
- // - never extened a jump #, always extend a jump ##
- let isAsmParserOnly = 1 in {
- defm J2_jump_ext : JMP_base<"JMP", "##">;
- defm J2_jump_noext : JMP_base<"JMP", "#">;
- }
-
defm J2_jumpr : JMPR_base<"JMPr">, PredNewRel;
let isReturn = 1, isCodeGenOnly = 1 in
defm JMPret : JMPR_base<"JMPret">, PredNewRel;
}
+let validSubTargets = HasV60SubT in
+multiclass JMPpt_base<string BaseOp> {
+ let BaseOpcode = BaseOp in {
+ def tpt : T_JMP_c <0, 0, 1, "">; // Predicate true - taken
+ def fpt : T_JMP_c <1, 0, 1, "">; // Predicate false - taken
+ }
+}
+
+let validSubTargets = HasV60SubT in
+multiclass JMPRpt_base<string BaseOp> {
+ let BaseOpcode = BaseOp in {
+ def tpt : T_JMPr_c<0, 0, 1>; // predicate true - taken
+ def fpt : T_JMPr_c<1, 0, 1>; // predicate false - taken
+ }
+}
+
+defm J2_jumpr : JMPRpt_base<"JMPr">;
+defm J2_jump : JMPpt_base<"JMP">;
+
def: Pat<(br bb:$dst),
(J2_jump brtarget:$dst)>;
def: Pat<(retflag),
@@ -1769,6 +1790,8 @@ multiclass Loadx_pat<PatFrag Load, ValueType VT, PatLeaf ImmPred,
def: Pat<(VT (Load AddrFI:$fi)), (VT (MI AddrFI:$fi, 0))>;
def: Pat<(VT (Load (add (i32 AddrFI:$fi), ImmPred:$Off))),
(VT (MI AddrFI:$fi, imm:$Off))>;
+ def: Pat<(VT (Load (orisadd (i32 AddrFI:$fi), ImmPred:$Off))),
+ (VT (MI AddrFI:$fi, imm:$Off))>;
def: Pat<(VT (Load (add (i32 IntRegs:$Rs), ImmPred:$Off))),
(VT (MI IntRegs:$Rs, imm:$Off))>;
def: Pat<(VT (Load (i32 IntRegs:$Rs))), (VT (MI IntRegs:$Rs, 0))>;
@@ -2010,6 +2033,12 @@ let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
def LDriw_pred : LDInst<(outs PredRegs:$dst),
(ins IntRegs:$addr, s11_2Ext:$off),
".error \"should not emit\"", []>;
+// Load modifier.
+let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
+ isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in
+def LDriw_mod : LDInst<(outs ModRegs:$dst),
+ (ins IntRegs:$addr, s11_2Ext:$off),
+ ".error \"should not emit\"", []>;
let Defs = [R29, R30, R31], Uses = [R30], hasSideEffects = 0 in
def L2_deallocframe : LDInst<(outs), (ins),
@@ -2023,7 +2052,7 @@ let Defs = [R29, R30, R31], Uses = [R30], hasSideEffects = 0 in
}
// Load / Post increment circular addressing mode.
-let Uses = [CS], hasSideEffects = 0 in
+let Uses = [CS], hasSideEffects = 0, addrMode = PostInc in
class T_load_pcr<string mnemonic, RegisterClass RC, bits<4> MajOp>
: LDInst <(outs RC:$dst, IntRegs:$_dst_),
(ins IntRegs:$Rz, ModRegs:$Mu),
@@ -2070,7 +2099,7 @@ let accessSize = DoubleWordAccess in
def L2_loadrd_pcr : T_load_pcr <"memd", DoubleRegs, 0b1110>;
// Load / Post increment circular addressing mode.
-let Uses = [CS], hasSideEffects = 0 in
+let Uses = [CS], hasSideEffects = 0, addrMode = PostInc in
class T_loadalign_pcr<string mnemonic, bits<4> MajOp, MemAccessSize AccessSz >
: LDInst <(outs DoubleRegs:$dst, IntRegs:$_dst_),
(ins DoubleRegs:$_src_, IntRegs:$Rz, ModRegs:$Mu),
@@ -2099,7 +2128,7 @@ def L2_loadalignh_pcr : T_loadalign_pcr <"memh_fifo", 0b0010, HalfWordAccess>;
//===----------------------------------------------------------------------===//
// Circular loads with immediate offset.
//===----------------------------------------------------------------------===//
-let Uses = [CS], mayLoad = 1, hasSideEffects = 0 in
+let Uses = [CS], mayLoad = 1, hasSideEffects = 0, addrMode = PostInc in
class T_load_pci <string mnemonic, RegisterClass RC,
Operand ImmOp, bits<4> MajOp>
: LDInstPI<(outs RC:$dst, IntRegs:$_dst_),
@@ -2155,28 +2184,6 @@ let accessSize = WordAccess, hasNewValue = 0 in {
let accessSize = DoubleWordAccess, hasNewValue = 0 in
def L2_loadrd_pci : T_load_pci <"memd", DoubleRegs, s4_3Imm, 0b1110>;
-//===----------------------------------------------------------------------===//
-// Circular loads - Pseudo
-//
-// Please note that the input operand order in the pseudo instructions
-// doesn't match with the real instructions. Pseudo instructions operand
-// order should mimics the ordering in the intrinsics. Also, 'src2' doesn't
-// appear in the AsmString because it's same as 'dst'.
-//===----------------------------------------------------------------------===//
-let isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0, isPseudo = 1 in
-class T_load_pci_pseudo <string opc, RegisterClass RC>
- : LDInstPI<(outs IntRegs:$_dst_, RC:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4Imm:$src4),
- ".error \"$dst = "#opc#"($src1++#$src4:circ($src3))\"",
- [], "$src1 = $_dst_">;
-
-def L2_loadrb_pci_pseudo : T_load_pci_pseudo <"memb", IntRegs>;
-def L2_loadrub_pci_pseudo : T_load_pci_pseudo <"memub", IntRegs>;
-def L2_loadrh_pci_pseudo : T_load_pci_pseudo <"memh", IntRegs>;
-def L2_loadruh_pci_pseudo : T_load_pci_pseudo <"memuh", IntRegs>;
-def L2_loadri_pci_pseudo : T_load_pci_pseudo <"memw", IntRegs>;
-def L2_loadrd_pci_pseudo : T_load_pci_pseudo <"memd", DoubleRegs>;
-
// TODO: memb_fifo and memh_fifo must take destination register as input.
// One-off circ loads - not enough in common to break into a class.
@@ -2233,7 +2240,7 @@ def S4_stored_locked : T_store_locked <"memd_locked", DoubleRegs>;
//===----------------------------------------------------------------------===//
// Bit-reversed loads with auto-increment register
//===----------------------------------------------------------------------===//
-let hasSideEffects = 0 in
+let hasSideEffects = 0, addrMode = PostInc in
class T_load_pbr<string mnemonic, RegisterClass RC,
MemAccessSize addrSize, bits<4> majOp>
: LDInst
@@ -2278,26 +2285,6 @@ def L2_loadalignh_pbr :T_load_pbr <"memh_fifo", DoubleRegs,
HalfWordAccess, 0b0010>;
//===----------------------------------------------------------------------===//
-// Bit-reversed loads - Pseudo
-//
-// Please note that 'src2' doesn't appear in the AsmString because
-// it's same as 'dst'.
-//===----------------------------------------------------------------------===//
-let isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0, isPseudo = 1 in
-class T_load_pbr_pseudo <string opc, RegisterClass RC>
- : LDInstPI<(outs IntRegs:$_dst_, RC:$dst),
- (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
- ".error \"$dst = "#opc#"($src1++$src3:brev)\"",
- [], "$src1 = $_dst_">;
-
-def L2_loadrb_pbr_pseudo : T_load_pbr_pseudo <"memb", IntRegs>;
-def L2_loadrub_pbr_pseudo : T_load_pbr_pseudo <"memub", IntRegs>;
-def L2_loadrh_pbr_pseudo : T_load_pbr_pseudo <"memh", IntRegs>;
-def L2_loadruh_pbr_pseudo : T_load_pbr_pseudo <"memuh", IntRegs>;
-def L2_loadri_pbr_pseudo : T_load_pbr_pseudo <"memw", IntRegs>;
-def L2_loadrd_pbr_pseudo : T_load_pbr_pseudo <"memd", DoubleRegs>;
-
-//===----------------------------------------------------------------------===//
// LD -
//===----------------------------------------------------------------------===//
@@ -3558,14 +3545,20 @@ let addrMode = BaseImmOffset, InputType = "imm" in {
// AddedComplexity) to the individual patterns.
class Storex_fi_pat<PatFrag Store, PatFrag Value, InstHexagon MI>
: Pat<(Store Value:$Rs, AddrFI:$fi), (MI AddrFI:$fi, 0, Value:$Rs)>;
-class Storex_fi_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
- InstHexagon MI>
- : Pat<(Store Value:$Rs, (add (i32 AddrFI:$fi), ImmPred:$Off)),
- (MI AddrFI:$fi, imm:$Off, Value:$Rs)>;
-class Storex_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
- InstHexagon MI>
- : Pat<(Store Value:$Rt, (add (i32 IntRegs:$Rs), ImmPred:$Off)),
- (MI IntRegs:$Rs, imm:$Off, Value:$Rt)>;
+multiclass Storex_fi_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
+ InstHexagon MI> {
+ def: Pat<(Store Value:$Rs, (add (i32 AddrFI:$fi), ImmPred:$Off)),
+ (MI AddrFI:$fi, imm:$Off, Value:$Rs)>;
+ def: Pat<(Store Value:$Rs, (orisadd (i32 AddrFI:$fi), ImmPred:$Off)),
+ (MI AddrFI:$fi, imm:$Off, Value:$Rs)>;
+}
+multiclass Storex_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
+ InstHexagon MI> {
+ def: Pat<(Store Value:$Rt, (add (i32 IntRegs:$Rs), ImmPred:$Off)),
+ (MI IntRegs:$Rs, imm:$Off, Value:$Rt)>;
+ def: Pat<(Store Value:$Rt, (orisadd (i32 IntRegs:$Rs), ImmPred:$Off)),
+ (MI IntRegs:$Rs, imm:$Off, Value:$Rt)>;
+}
class Storex_simple_pat<PatFrag Store, PatFrag Value, InstHexagon MI>
: Pat<(Store Value:$Rt, (i32 IntRegs:$Rs)),
(MI IntRegs:$Rs, 0, Value:$Rt)>;
@@ -3577,14 +3570,20 @@ class Storexm_fi_pat<PatFrag Store, PatFrag Value, PatFrag ValueMod,
InstHexagon MI>
: Pat<(Store Value:$Rs, AddrFI:$fi),
(MI AddrFI:$fi, 0, (ValueMod Value:$Rs))>;
-class Storexm_fi_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
- PatFrag ValueMod, InstHexagon MI>
- : Pat<(Store Value:$Rs, (add (i32 AddrFI:$fi), ImmPred:$Off)),
- (MI AddrFI:$fi, imm:$Off, (ValueMod Value:$Rs))>;
-class Storexm_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
- PatFrag ValueMod, InstHexagon MI>
- : Pat<(Store Value:$Rt, (add (i32 IntRegs:$Rs), ImmPred:$Off)),
- (MI IntRegs:$Rs, imm:$Off, (ValueMod Value:$Rt))>;
+multiclass Storexm_fi_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
+ PatFrag ValueMod, InstHexagon MI> {
+ def: Pat<(Store Value:$Rs, (add (i32 AddrFI:$fi), ImmPred:$Off)),
+ (MI AddrFI:$fi, imm:$Off, (ValueMod Value:$Rs))>;
+ def: Pat<(Store Value:$Rs, (orisadd (i32 AddrFI:$fi), ImmPred:$Off)),
+ (MI AddrFI:$fi, imm:$Off, (ValueMod Value:$Rs))>;
+}
+multiclass Storexm_add_pat<PatFrag Store, PatFrag Value, PatFrag ImmPred,
+ PatFrag ValueMod, InstHexagon MI> {
+ def: Pat<(Store Value:$Rt, (add (i32 IntRegs:$Rs), ImmPred:$Off)),
+ (MI IntRegs:$Rs, imm:$Off, (ValueMod Value:$Rt))>;
+ def: Pat<(Store Value:$Rt, (orisadd (i32 IntRegs:$Rs), ImmPred:$Off)),
+ (MI IntRegs:$Rs, imm:$Off, (ValueMod Value:$Rt))>;
+}
class Storexm_simple_pat<PatFrag Store, PatFrag Value, PatFrag ValueMod,
InstHexagon MI>
: Pat<(Store Value:$Rt, (i32 IntRegs:$Rs)),
@@ -3592,16 +3591,16 @@ class Storexm_simple_pat<PatFrag Store, PatFrag Value, PatFrag ValueMod,
multiclass Storex_pat<PatFrag Store, PatFrag Value, PatLeaf ImmPred,
InstHexagon MI> {
- def: Storex_fi_pat <Store, Value, MI>;
- def: Storex_fi_add_pat <Store, Value, ImmPred, MI>;
- def: Storex_add_pat <Store, Value, ImmPred, MI>;
+ def: Storex_fi_pat <Store, Value, MI>;
+ defm: Storex_fi_add_pat <Store, Value, ImmPred, MI>;
+ defm: Storex_add_pat <Store, Value, ImmPred, MI>;
}
multiclass Storexm_pat<PatFrag Store, PatFrag Value, PatLeaf ImmPred,
PatFrag ValueMod, InstHexagon MI> {
- def: Storexm_fi_pat <Store, Value, ValueMod, MI>;
- def: Storexm_fi_add_pat <Store, Value, ImmPred, ValueMod, MI>;
- def: Storexm_add_pat <Store, Value, ImmPred, ValueMod, MI>;
+ def: Storexm_fi_pat <Store, Value, ValueMod, MI>;
+ defm: Storexm_fi_add_pat <Store, Value, ImmPred, ValueMod, MI>;
+ defm: Storexm_add_pat <Store, Value, ImmPred, ValueMod, MI>;
}
// Regular stores in the DAG have two operands: value and address.
@@ -3610,7 +3609,8 @@ multiclass Storexm_pat<PatFrag Store, PatFrag Value, PatLeaf ImmPred,
// swapped. This relies on the knowledge that the F.Fragment uses names
// "ptr" and "val".
class SwapSt<PatFrag F>
- : PatFrag<(ops node:$val, node:$ptr), F.Fragment>;
+ : PatFrag<(ops node:$val, node:$ptr), F.Fragment, F.PredicateCode,
+ F.OperandTransform>;
let AddedComplexity = 20 in {
defm: Storex_pat<truncstorei8, I32, s32_0ImmPred, S2_storerb_io>;
@@ -3651,6 +3651,12 @@ let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
def STriw_pred : STInst<(outs),
(ins IntRegs:$addr, s11_2Ext:$off, PredRegs:$src1),
".error \"should not emit\"", []>;
+// Store modifier.
+let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
+ isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in
+def STriw_mod : STInst<(outs),
+ (ins IntRegs:$addr, s11_2Ext:$off, ModRegs:$src1),
+ ".error \"should not emit\"", []>;
// S2_allocframe: Allocate stack frame.
let Defs = [R29, R30], Uses = [R29, R31, R30],
@@ -3668,7 +3674,7 @@ def S2_allocframe: ST0Inst <
// S2_storer[bhwdf]_pci: Store byte/half/word/double.
// S2_storer[bhwdf]_pci -> S2_storerbnew_pci
-let Uses = [CS] in
+let Uses = [CS], addrMode = PostInc in
class T_store_pci <string mnemonic, RegisterClass RC,
Operand Imm, bits<4>MajOp,
MemAccessSize AlignSize, string RegSrc = "Rt">
@@ -3711,7 +3717,8 @@ def S2_storeri_pci : T_store_pci<"memw", IntRegs, s4_2Imm, 0b1100,
def S2_storerd_pci : T_store_pci<"memd", DoubleRegs, s4_3Imm, 0b1110,
DoubleWordAccess>;
-let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 4 in
+let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 4,
+ addrMode = PostInc in
class T_storenew_pci <string mnemonic, Operand Imm,
bits<2>MajOp, MemAccessSize AlignSize>
: NVInst < (outs IntRegs:$_dst_),
@@ -3745,29 +3752,9 @@ def S2_storerhnew_pci : T_storenew_pci <"memh", s4_1Imm, 0b01, HalfWordAccess>;
def S2_storerinew_pci : T_storenew_pci <"memw", s4_2Imm, 0b10, WordAccess>;
//===----------------------------------------------------------------------===//
-// Circular stores - Pseudo
-//
-// Please note that the input operand order in the pseudo instructions
-// doesn't match with the real instructions. Pseudo instructions operand
-// order should mimics the ordering in the intrinsics.
-//===----------------------------------------------------------------------===//
-let isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0, isPseudo = 1 in
-class T_store_pci_pseudo <string opc, RegisterClass RC>
- : STInstPI<(outs IntRegs:$_dst_),
- (ins IntRegs:$src1, RC:$src2, IntRegs:$src3, s4Imm:$src4),
- ".error \""#opc#"($src1++#$src4:circ($src3)) = $src2\"",
- [], "$_dst_ = $src1">;
-
-def S2_storerb_pci_pseudo : T_store_pci_pseudo <"memb", IntRegs>;
-def S2_storerh_pci_pseudo : T_store_pci_pseudo <"memh", IntRegs>;
-def S2_storerf_pci_pseudo : T_store_pci_pseudo <"memh", IntRegs>;
-def S2_storeri_pci_pseudo : T_store_pci_pseudo <"memw", IntRegs>;
-def S2_storerd_pci_pseudo : T_store_pci_pseudo <"memd", DoubleRegs>;
-
-//===----------------------------------------------------------------------===//
// Circular stores with auto-increment register
//===----------------------------------------------------------------------===//
-let Uses = [CS] in
+let Uses = [CS], addrMode = PostInc in
class T_store_pcr <string mnemonic, RegisterClass RC, bits<4>MajOp,
MemAccessSize AlignSize, string RegSrc = "Rt">
: STInst <(outs IntRegs:$_dst_),
@@ -3803,7 +3790,8 @@ def S2_storerf_pcr : T_store_pcr<"memh", IntRegs, 0b1011,
//===----------------------------------------------------------------------===//
// Circular .new stores with auto-increment register
//===----------------------------------------------------------------------===//
-let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 3 in
+let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 3,
+ addrMode = PostInc in
class T_storenew_pcr <string mnemonic, bits<2>MajOp,
MemAccessSize AlignSize>
: NVInst <(outs IntRegs:$_dst_),
@@ -3834,7 +3822,7 @@ def S2_storerinew_pcr : T_storenew_pcr <"memw", 0b10, WordAccess>;
//===----------------------------------------------------------------------===//
// Bit-reversed stores with auto-increment register
//===----------------------------------------------------------------------===//
-let hasSideEffects = 0 in
+let hasSideEffects = 0, addrMode = PostInc in
class T_store_pbr<string mnemonic, RegisterClass RC,
MemAccessSize addrSize, bits<3> majOp,
bit isHalf = 0>
@@ -3879,7 +3867,7 @@ def S2_storerd_pbr : T_store_pbr<"memd", DoubleRegs, DoubleWordAccess, 0b110>;
// Bit-reversed .new stores with auto-increment register
//===----------------------------------------------------------------------===//
let isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 3,
- hasSideEffects = 0 in
+ hasSideEffects = 0, addrMode = PostInc in
class T_storenew_pbr<string mnemonic, MemAccessSize addrSize, bits<2> majOp>
: NVInst <(outs IntRegs:$_dst_),
(ins IntRegs:$Rz, ModRegs:$Mu, IntRegs:$Nt),
@@ -3910,26 +3898,6 @@ let BaseOpcode = "S2_storeri_pbr" in
def S2_storerinew_pbr : T_storenew_pbr<"memw", WordAccess, 0b10>;
//===----------------------------------------------------------------------===//
-// Bit-reversed stores - Pseudo
-//
-// Please note that the input operand order in the pseudo instructions
-// doesn't match with the real instructions. Pseudo instructions operand
-// order should mimics the ordering in the intrinsics.
-//===----------------------------------------------------------------------===//
-let isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0, isPseudo = 1 in
-class T_store_pbr_pseudo <string opc, RegisterClass RC>
- : STInstPI<(outs IntRegs:$_dst_),
- (ins IntRegs:$src1, RC:$src2, IntRegs:$src3),
- ".error \""#opc#"($src1++$src3:brev) = $src2\"",
- [], "$_dst_ = $src1">;
-
-def S2_storerb_pbr_pseudo : T_store_pbr_pseudo <"memb", IntRegs>;
-def S2_storerh_pbr_pseudo : T_store_pbr_pseudo <"memh", IntRegs>;
-def S2_storeri_pbr_pseudo : T_store_pbr_pseudo <"memw", IntRegs>;
-def S2_storerf_pbr_pseudo : T_store_pbr_pseudo <"memh", IntRegs>;
-def S2_storerd_pbr_pseudo : T_store_pbr_pseudo <"memd", DoubleRegs>;
-
-//===----------------------------------------------------------------------===//
// ST -
//===----------------------------------------------------------------------===//
@@ -4201,22 +4169,16 @@ def S2_clbnorm : T_COUNT_LEADING_32<"normamt", 0b000, 0b111>;
// Count leading zeros.
def: Pat<(i32 (ctlz I32:$Rs)), (S2_cl0 I32:$Rs)>;
def: Pat<(i32 (trunc (ctlz I64:$Rss))), (S2_cl0p I64:$Rss)>;
-def: Pat<(i32 (ctlz_zero_undef I32:$Rs)), (S2_cl0 I32:$Rs)>;
-def: Pat<(i32 (trunc (ctlz_zero_undef I64:$Rss))), (S2_cl0p I64:$Rss)>;
// Count trailing zeros: 32-bit.
def: Pat<(i32 (cttz I32:$Rs)), (S2_ct0 I32:$Rs)>;
-def: Pat<(i32 (cttz_zero_undef I32:$Rs)), (S2_ct0 I32:$Rs)>;
// Count leading ones.
def: Pat<(i32 (ctlz (not I32:$Rs))), (S2_cl1 I32:$Rs)>;
def: Pat<(i32 (trunc (ctlz (not I64:$Rss)))), (S2_cl1p I64:$Rss)>;
-def: Pat<(i32 (ctlz_zero_undef (not I32:$Rs))), (S2_cl1 I32:$Rs)>;
-def: Pat<(i32 (trunc (ctlz_zero_undef (not I64:$Rss)))), (S2_cl1p I64:$Rss)>;
// Count trailing ones: 32-bit.
def: Pat<(i32 (cttz (not I32:$Rs))), (S2_ct1 I32:$Rs)>;
-def: Pat<(i32 (cttz_zero_undef (not I32:$Rs))), (S2_ct1 I32:$Rs)>;
// The 64-bit counts leading/trailing are defined in HexagonInstrInfoV4.td.
@@ -4561,6 +4523,9 @@ let isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1,
(ins IntRegs:$Rs, IntRegs:$fi, s32Imm:$off), "">;
}
+def: Pat<(i32 (orisadd (i32 AddrFI:$Rs), s32ImmPred:$off)),
+ (i32 (TFR_FI (i32 AddrFI:$Rs), s32ImmPred:$off))>;
+
//===----------------------------------------------------------------------===//
// CRUSER - Type.
//===----------------------------------------------------------------------===//
@@ -4779,10 +4744,10 @@ def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>;
// HI/LO Instructions
let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
hasNewValue = 1, opNewValue = 0 in
-class REG_IMMED<string RegHalf, string Op, bit Rs, bits<3> MajOp, bit MinOp>
+class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp>
: ALU32_ri<(outs IntRegs:$dst),
- (ins i32imm:$imm_value),
- "$dst"#RegHalf#" = #"#Op#"($imm_value)", []> {
+ (ins u16Imm:$imm_value),
+ "$dst"#RegHalf#" = $imm_value", []> {
bits<5> dst;
bits<32> imm_value;
let IClass = 0b0111;
@@ -4791,15 +4756,13 @@ class REG_IMMED<string RegHalf, string Op, bit Rs, bits<3> MajOp, bit MinOp>
let Inst{26-24} = MajOp;
let Inst{21} = MinOp;
let Inst{20-16} = dst;
- let Inst{23-22} = !if (!eq(Op, "LO"), imm_value{15-14}, imm_value{31-30});
- let Inst{13-0} = !if (!eq(Op, "LO"), imm_value{13-0}, imm_value{29-16});
+ let Inst{23-22} = imm_value{15-14};
+ let Inst{13-0} = imm_value{13-0};
}
let isAsmParserOnly = 1 in {
- def LO : REG_IMMED<".l", "LO", 0b0, 0b001, 0b1>;
- def LO_H : REG_IMMED<".l", "HI", 0b0, 0b001, 0b1>;
- def HI : REG_IMMED<".h", "HI", 0b0, 0b010, 0b1>;
- def HI_L : REG_IMMED<".h", "LO", 0b0, 0b010, 0b1>;
+ def LO : REG_IMMED<".l", 0b0, 0b001, 0b1>;
+ def HI : REG_IMMED<".h", 0b0, 0b010, 0b1>;
}
let isMoveImm = 1, isCodeGenOnly = 1 in
@@ -4866,7 +4829,7 @@ def TFR_PdTrue : SInst<(outs PredRegs:$dst), (ins), "",
let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
isCodeGenOnly = 1 in
-def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins), "$dst = xor($dst, $dst)",
+def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins), "",
[(set (i1 PredRegs:$dst), 0)]>;
// Pseudo instructions.
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV3.td b/lib/Target/Hexagon/HexagonInstrInfoV3.td
index 84d035da451b..9024a43aa7eb 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV3.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV3.td
@@ -21,25 +21,26 @@ def callv3nr : SDNode<"HexagonISD::CALLv3nr", SDT_SPCall,
// J +
//===----------------------------------------------------------------------===//
// Call subroutine.
-let isCall = 1, hasSideEffects = 1, Defs = VolatileV3.Regs, isPredicable = 1,
+let isCall = 1, hasSideEffects = 1, isPredicable = 1,
isExtended = 0, isExtendable = 1, opExtendable = 0,
isExtentSigned = 1, opExtentBits = 24, opExtentAlign = 2 in
-class T_Call<string ExtStr>
+class T_Call<bit CSR, string ExtStr>
: JInst<(outs), (ins calltarget:$dst),
"call " # ExtStr # "$dst", [], "", J_tc_2early_SLOT23> {
let BaseOpcode = "call";
bits<24> dst;
+ let Defs = !if (CSR, VolatileV3.Regs, []);
let IClass = 0b0101;
let Inst{27-25} = 0b101;
let Inst{24-16,13-1} = dst{23-2};
let Inst{0} = 0b0;
}
-let isCall = 1, hasSideEffects = 1, Defs = VolatileV3.Regs, isPredicated = 1,
+let isCall = 1, hasSideEffects = 1, isPredicated = 1,
isExtended = 0, isExtendable = 1, opExtendable = 1,
isExtentSigned = 1, opExtentBits = 17, opExtentAlign = 2 in
-class T_CallPred<bit IfTrue, string ExtStr>
+class T_CallPred<bit CSR, bit IfTrue, string ExtStr>
: JInst<(outs), (ins PredRegs:$Pu, calltarget:$dst),
CondStr<"$Pu", IfTrue, 0>.S # "call " # ExtStr # "$dst",
[], "", J_tc_2early_SLOT23> {
@@ -48,6 +49,7 @@ class T_CallPred<bit IfTrue, string ExtStr>
bits<2> Pu;
bits<17> dst;
+ let Defs = !if (CSR, VolatileV3.Regs, []);
let IClass = 0b0101;
let Inst{27-24} = 0b1101;
let Inst{23-22,20-16,13,7-1} = dst{16-2};
@@ -56,16 +58,19 @@ class T_CallPred<bit IfTrue, string ExtStr>
let Inst{9-8} = Pu;
}
-multiclass T_Calls<string ExtStr> {
- def NAME : T_Call<ExtStr>;
- def t : T_CallPred<1, ExtStr>;
- def f : T_CallPred<0, ExtStr>;
+multiclass T_Calls<bit CSR, string ExtStr> {
+ def NAME : T_Call<CSR, ExtStr>;
+ def t : T_CallPred<CSR, 1, ExtStr>;
+ def f : T_CallPred<CSR, 0, ExtStr>;
}
-defm J2_call: T_Calls<"">, PredRel;
+defm J2_call: T_Calls<1, "">, PredRel;
let isCodeGenOnly = 1, isCall = 1, hasSideEffects = 1, Defs = VolatileV3.Regs in
-def CALLv3nr : T_Call<"">, PredRel;
+def CALLv3nr : T_Call<1, "">, PredRel;
+
+let isCodeGenOnly = 1, isCall = 1, hasSideEffects = 1, Defs = [PC, R31, R6, R7, P0] in
+def CALLstk : T_Call<0, "">, PredRel;
//===----------------------------------------------------------------------===//
// J -
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td
index 37c2042a2ccd..398d2d3bc716 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV4.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -1047,6 +1047,18 @@ let AddedComplexity = 40 in {
def: Storexs_pat<store, I64, S4_storerd_rr>;
}
+class Store_rr_pat<PatFrag Store, PatFrag Value, InstHexagon MI>
+ : Pat<(Store Value:$Ru, (add I32:$Rs, I32:$Rt)),
+ (MI IntRegs:$Rs, IntRegs:$Rt, 0, Value:$Ru)>;
+
+let AddedComplexity = 20 in {
+ def: Store_rr_pat<truncstorei8, I32, S4_storerb_rr>;
+ def: Store_rr_pat<truncstorei16, I32, S4_storerh_rr>;
+ def: Store_rr_pat<store, I32, S4_storeri_rr>;
+ def: Store_rr_pat<store, I64, S4_storerd_rr>;
+}
+
+
// memd(Rx++#s4:3)=Rtt
// memd(Rx++#s4:3:circ(Mu))=Rtt
// memd(Rx++I:circ(Mu))=Rtt
@@ -1188,17 +1200,52 @@ def ToImmByte : OutPatFrag<(ops node:$R), (IMM_BYTE $R)>;
def ToImmHalf : OutPatFrag<(ops node:$R), (IMM_HALF $R)>;
def ToImmWord : OutPatFrag<(ops node:$R), (IMM_WORD $R)>;
+// Emit store-immediate, but only when the stored value will not be constant-
+// extended. The reason for that is that there is no pass that can optimize
+// constant extenders in store-immediate instructions. In some cases we can
+// end up will a number of such stores, all of which store the same extended
+// value (e.g. after unrolling a loop that initializes floating point array).
+
+// Predicates to determine if the 16-bit immediate is expressible as a sign-
+// extended 8-bit immediate. Store-immediate-halfword will ignore any bits
+// beyond 0..15, so we don't care what is in there.
+
+def i16in8ImmPred: PatLeaf<(i32 imm), [{
+ int64_t v = (int16_t)N->getSExtValue();
+ return v == (int64_t)(int8_t)v;
+}]>;
+
+// Predicates to determine if the 32-bit immediate is expressible as a sign-
+// extended 8-bit immediate.
+def i32in8ImmPred: PatLeaf<(i32 imm), [{
+ int64_t v = (int32_t)N->getSExtValue();
+ return v == (int64_t)(int8_t)v;
+}]>;
+
+
let AddedComplexity = 40 in {
- // Not using frameindex patterns for these stores, because the offset
- // is not extendable. This could cause problems during removing the frame
- // indices, since the offset with respect to R29/R30 may not fit in the
- // u6 field.
- def: Storexm_add_pat<truncstorei8, s32ImmPred, u6_0ImmPred, ToImmByte,
- S4_storeirb_io>;
- def: Storexm_add_pat<truncstorei16, s32ImmPred, u6_1ImmPred, ToImmHalf,
- S4_storeirh_io>;
- def: Storexm_add_pat<store, s32ImmPred, u6_2ImmPred, ToImmWord,
- S4_storeiri_io>;
+ // Even though the offset is not extendable in the store-immediate, we
+ // can still generate the fi# in the base address. If the final offset
+ // is not valid for the instruction, we will replace it with a scratch
+ // register.
+// def: Storexm_fi_pat <truncstorei8, s32ImmPred, ToImmByte, S4_storeirb_io>;
+// def: Storexm_fi_pat <truncstorei16, i16in8ImmPred, ToImmHalf,
+// S4_storeirh_io>;
+// def: Storexm_fi_pat <store, i32in8ImmPred, ToImmWord, S4_storeiri_io>;
+
+// defm: Storexm_fi_add_pat <truncstorei8, s32ImmPred, u6_0ImmPred, ToImmByte,
+// S4_storeirb_io>;
+// defm: Storexm_fi_add_pat <truncstorei16, i16in8ImmPred, u6_1ImmPred,
+// ToImmHalf, S4_storeirh_io>;
+// defm: Storexm_fi_add_pat <store, i32in8ImmPred, u6_2ImmPred, ToImmWord,
+// S4_storeiri_io>;
+
+ defm: Storexm_add_pat<truncstorei8, s32ImmPred, u6_0ImmPred, ToImmByte,
+ S4_storeirb_io>;
+ defm: Storexm_add_pat<truncstorei16, i16in8ImmPred, u6_1ImmPred, ToImmHalf,
+ S4_storeirh_io>;
+ defm: Storexm_add_pat<store, i32in8ImmPred, u6_2ImmPred, ToImmWord,
+ S4_storeiri_io>;
}
def: Storexm_simple_pat<truncstorei8, s32ImmPred, ToImmByte, S4_storeirb_io>;
@@ -1698,7 +1745,7 @@ class NVJ_ConstImm_template<string mnemonic, bits<3> majOp, string ImmVal,
: NVInst_V4<(outs),
(ins IntRegs:$src1, brtarget:$offset),
"if ("#!if(isNegCond, "!","")#mnemonic
- #"($src1.new, #"#ImmVal#")) jump:"
+ #"($src1.new, #" # ImmVal # ")) jump:"
#!if(isTak, "t","nt")#" $offset", []> {
let isTaken = isTak;
@@ -2318,21 +2365,15 @@ def S4_clbpnorm : T_COUNT_LEADING_64<"normamt", 0b011, 0b000>;
// Count trailing zeros: 64-bit.
def: Pat<(i32 (trunc (cttz I64:$Rss))), (S2_ct0p I64:$Rss)>;
-def: Pat<(i32 (trunc (cttz_zero_undef I64:$Rss))), (S2_ct0p I64:$Rss)>;
// Count trailing ones: 64-bit.
def: Pat<(i32 (trunc (cttz (not I64:$Rss)))), (S2_ct1p I64:$Rss)>;
-def: Pat<(i32 (trunc (cttz_zero_undef (not I64:$Rss)))), (S2_ct1p I64:$Rss)>;
// Define leading/trailing patterns that require zero-extensions to 64 bits.
def: Pat<(i64 (ctlz I64:$Rss)), (Zext64 (S2_cl0p I64:$Rss))>;
-def: Pat<(i64 (ctlz_zero_undef I64:$Rss)), (Zext64 (S2_cl0p I64:$Rss))>;
def: Pat<(i64 (cttz I64:$Rss)), (Zext64 (S2_ct0p I64:$Rss))>;
-def: Pat<(i64 (cttz_zero_undef I64:$Rss)), (Zext64 (S2_ct0p I64:$Rss))>;
def: Pat<(i64 (ctlz (not I64:$Rss))), (Zext64 (S2_cl1p I64:$Rss))>;
-def: Pat<(i64 (ctlz_zero_undef (not I64:$Rss))), (Zext64 (S2_cl1p I64:$Rss))>;
def: Pat<(i64 (cttz (not I64:$Rss))), (Zext64 (S2_ct1p I64:$Rss))>;
-def: Pat<(i64 (cttz_zero_undef (not I64:$Rss))), (Zext64 (S2_ct1p I64:$Rss))>;
let hasSideEffects = 0, hasNewValue = 1 in
@@ -2789,79 +2830,75 @@ def S4_lsli: SInst <(outs IntRegs:$Rd), (ins s6Imm:$s6, IntRegs:$Rt),
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
-// MEMOP: Word, Half, Byte
+// MEMOP
//===----------------------------------------------------------------------===//
-def MEMOPIMM : SDNodeXForm<imm, [{
- // Call the transformation function XformM5ToU5Imm to get the negative
- // immediate's positive counterpart.
- int32_t imm = N->getSExtValue();
- return XformM5ToU5Imm(imm, SDLoc(N));
+def m5Imm8Pred : PatLeaf<(i32 imm), [{
+ int8_t v = (int8_t)N->getSExtValue();
+ return v > -32 && v <= -1;
}]>;
-def MEMOPIMM_HALF : SDNodeXForm<imm, [{
- // -1 .. -31 represented as 65535..65515
- // assigning to a short restores our desired signed value.
- // Call the transformation function XformM5ToU5Imm to get the negative
- // immediate's positive counterpart.
- int16_t imm = N->getSExtValue();
- return XformM5ToU5Imm(imm, SDLoc(N));
+def m5Imm16Pred : PatLeaf<(i32 imm), [{
+ int16_t v = (int16_t)N->getSExtValue();
+ return v > -32 && v <= -1;
}]>;
-def MEMOPIMM_BYTE : SDNodeXForm<imm, [{
- // -1 .. -31 represented as 255..235
- // assigning to a char restores our desired signed value.
- // Call the transformation function XformM5ToU5Imm to get the negative
- // immediate's positive counterpart.
- int8_t imm = N->getSExtValue();
- return XformM5ToU5Imm(imm, SDLoc(N));
+def Clr5Imm8Pred : PatLeaf<(i32 imm), [{
+ uint32_t v = (uint8_t)~N->getZExtValue();
+ return ImmIsSingleBit(v);
}]>;
-def SETMEMIMM : SDNodeXForm<imm, [{
- // Return the bit position we will set [0-31].
- // As an SDNode.
- int32_t imm = N->getSExtValue();
+def Clr5Imm16Pred : PatLeaf<(i32 imm), [{
+ uint32_t v = (uint16_t)~N->getZExtValue();
+ return ImmIsSingleBit(v);
+}]>;
+
+def Set5Imm8 : SDNodeXForm<imm, [{
+ uint32_t imm = (uint8_t)N->getZExtValue();
return XformMskToBitPosU5Imm(imm, SDLoc(N));
}]>;
-def CLRMEMIMM : SDNodeXForm<imm, [{
- // Return the bit position we will clear [0-31].
- // As an SDNode.
- // we bit negate the value first
- int32_t imm = ~(N->getSExtValue());
+def Set5Imm16 : SDNodeXForm<imm, [{
+ uint32_t imm = (uint16_t)N->getZExtValue();
return XformMskToBitPosU5Imm(imm, SDLoc(N));
}]>;
-def SETMEMIMM_SHORT : SDNodeXForm<imm, [{
- // Return the bit position we will set [0-15].
- // As an SDNode.
- int16_t imm = N->getSExtValue();
- return XformMskToBitPosU4Imm(imm, SDLoc(N));
+def Set5Imm32 : SDNodeXForm<imm, [{
+ uint32_t imm = (uint32_t)N->getZExtValue();
+ return XformMskToBitPosU5Imm(imm, SDLoc(N));
}]>;
-def CLRMEMIMM_SHORT : SDNodeXForm<imm, [{
- // Return the bit position we will clear [0-15].
- // As an SDNode.
- // we bit negate the value first
- int16_t imm = ~(N->getSExtValue());
- return XformMskToBitPosU4Imm(imm, SDLoc(N));
+def Clr5Imm8 : SDNodeXForm<imm, [{
+ uint32_t imm = (uint8_t)~N->getZExtValue();
+ return XformMskToBitPosU5Imm(imm, SDLoc(N));
}]>;
-def SETMEMIMM_BYTE : SDNodeXForm<imm, [{
- // Return the bit position we will set [0-7].
- // As an SDNode.
- int8_t imm = N->getSExtValue();
- return XformMskToBitPosU3Imm(imm, SDLoc(N));
+def Clr5Imm16 : SDNodeXForm<imm, [{
+ uint32_t imm = (uint16_t)~N->getZExtValue();
+ return XformMskToBitPosU5Imm(imm, SDLoc(N));
}]>;
-def CLRMEMIMM_BYTE : SDNodeXForm<imm, [{
- // Return the bit position we will clear [0-7].
- // As an SDNode.
- // we bit negate the value first
- int8_t imm = ~(N->getSExtValue());
- return XformMskToBitPosU3Imm(imm, SDLoc(N));
+def Clr5Imm32 : SDNodeXForm<imm, [{
+ int32_t imm = (int32_t)~N->getZExtValue();
+ return XformMskToBitPosU5Imm(imm, SDLoc(N));
+}]>;
+
+def NegImm8 : SDNodeXForm<imm, [{
+ int8_t V = N->getSExtValue();
+ return CurDAG->getTargetConstant(-V, SDLoc(N), MVT::i32);
+}]>;
+
+def NegImm16 : SDNodeXForm<imm, [{
+ int16_t V = N->getSExtValue();
+ return CurDAG->getTargetConstant(-V, SDLoc(N), MVT::i32);
}]>;
+def NegImm32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def IdImm : SDNodeXForm<imm, [{ return SDValue(N, 0); }]>;
+
//===----------------------------------------------------------------------===//
// Template class for MemOp instructions with the register value.
//===----------------------------------------------------------------------===//
@@ -2958,197 +2995,234 @@ let isExtendable = 1, opExtendable = 1, isExtentSigned = 0 in {
defm memopw_io : MemOp_base <"memw", 0b10, u6_2Ext>;
}
-//===----------------------------------------------------------------------===//
-// Multiclass to define 'Def Pats' for ALU operations on the memory
-// Here value used for the ALU operation is an immediate value.
-// mem[bh](Rs+#0) += #U5
-// mem[bh](Rs+#u6) += #U5
-//===----------------------------------------------------------------------===//
-
-multiclass MemOpi_u5Pats <PatFrag ldOp, PatFrag stOp, PatLeaf ImmPred,
- InstHexagon MI, SDNode OpNode> {
- let AddedComplexity = 180 in
- def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), u5ImmPred:$addend),
- IntRegs:$addr),
- (MI IntRegs:$addr, 0, u5ImmPred:$addend)>;
-
- let AddedComplexity = 190 in
- def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, ImmPred:$offset)),
- u5ImmPred:$addend),
- (add IntRegs:$base, ImmPred:$offset)),
- (MI IntRegs:$base, ImmPred:$offset, u5ImmPred:$addend)>;
-}
-
-multiclass MemOpi_u5ALUOp<PatFrag ldOp, PatFrag stOp, PatLeaf ImmPred,
- InstHexagon addMI, InstHexagon subMI> {
- defm: MemOpi_u5Pats<ldOp, stOp, ImmPred, addMI, add>;
- defm: MemOpi_u5Pats<ldOp, stOp, ImmPred, subMI, sub>;
-}
-multiclass MemOpi_u5ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
- // Half Word
- defm: MemOpi_u5ALUOp <ldOpHalf, truncstorei16, u31_1ImmPred,
- L4_iadd_memoph_io, L4_isub_memoph_io>;
- // Byte
- defm: MemOpi_u5ALUOp <ldOpByte, truncstorei8, u32ImmPred,
- L4_iadd_memopb_io, L4_isub_memopb_io>;
+multiclass Memopxr_simple_pat<PatFrag Load, PatFrag Store, SDNode Oper,
+ InstHexagon MI> {
+ // Addr: i32
+ def: Pat<(Store (Oper (Load I32:$Rs), I32:$A), I32:$Rs),
+ (MI I32:$Rs, 0, I32:$A)>;
+ // Addr: fi
+ def: Pat<(Store (Oper (Load AddrFI:$Rs), I32:$A), AddrFI:$Rs),
+ (MI AddrFI:$Rs, 0, I32:$A)>;
+}
+
+multiclass Memopxr_add_pat<PatFrag Load, PatFrag Store, PatFrag ImmPred,
+ SDNode Oper, InstHexagon MI> {
+ // Addr: i32
+ def: Pat<(Store (Oper (Load (add I32:$Rs, ImmPred:$Off)), I32:$A),
+ (add I32:$Rs, ImmPred:$Off)),
+ (MI I32:$Rs, imm:$Off, I32:$A)>;
+ def: Pat<(Store (Oper (Load (orisadd I32:$Rs, ImmPred:$Off)), I32:$A),
+ (orisadd I32:$Rs, ImmPred:$Off)),
+ (MI I32:$Rs, imm:$Off, I32:$A)>;
+ // Addr: fi
+ def: Pat<(Store (Oper (Load (add AddrFI:$Rs, ImmPred:$Off)), I32:$A),
+ (add AddrFI:$Rs, ImmPred:$Off)),
+ (MI AddrFI:$Rs, imm:$Off, I32:$A)>;
+ def: Pat<(Store (Oper (Load (orisadd AddrFI:$Rs, ImmPred:$Off)), I32:$A),
+ (orisadd AddrFI:$Rs, ImmPred:$Off)),
+ (MI AddrFI:$Rs, imm:$Off, I32:$A)>;
+}
+
+multiclass Memopxr_pat<PatFrag Load, PatFrag Store, PatFrag ImmPred,
+ SDNode Oper, InstHexagon MI> {
+ defm: Memopxr_simple_pat <Load, Store, Oper, MI>;
+ defm: Memopxr_add_pat <Load, Store, ImmPred, Oper, MI>;
+}
+
+let AddedComplexity = 180 in {
+ // add reg
+ defm: Memopxr_pat<extloadi8, truncstorei8, u6_0ImmPred, add,
+ /*anyext*/ L4_add_memopb_io>;
+ defm: Memopxr_pat<sextloadi8, truncstorei8, u6_0ImmPred, add,
+ /*sext*/ L4_add_memopb_io>;
+ defm: Memopxr_pat<zextloadi8, truncstorei8, u6_0ImmPred, add,
+ /*zext*/ L4_add_memopb_io>;
+ defm: Memopxr_pat<extloadi16, truncstorei16, u6_1ImmPred, add,
+ /*anyext*/ L4_add_memoph_io>;
+ defm: Memopxr_pat<sextloadi16, truncstorei16, u6_1ImmPred, add,
+ /*sext*/ L4_add_memoph_io>;
+ defm: Memopxr_pat<zextloadi16, truncstorei16, u6_1ImmPred, add,
+ /*zext*/ L4_add_memoph_io>;
+ defm: Memopxr_pat<load, store, u6_2ImmPred, add, L4_add_memopw_io>;
+
+ // sub reg
+ defm: Memopxr_pat<extloadi8, truncstorei8, u6_0ImmPred, sub,
+ /*anyext*/ L4_sub_memopb_io>;
+ defm: Memopxr_pat<sextloadi8, truncstorei8, u6_0ImmPred, sub,
+ /*sext*/ L4_sub_memopb_io>;
+ defm: Memopxr_pat<zextloadi8, truncstorei8, u6_0ImmPred, sub,
+ /*zext*/ L4_sub_memopb_io>;
+ defm: Memopxr_pat<extloadi16, truncstorei16, u6_1ImmPred, sub,
+ /*anyext*/ L4_sub_memoph_io>;
+ defm: Memopxr_pat<sextloadi16, truncstorei16, u6_1ImmPred, sub,
+ /*sext*/ L4_sub_memoph_io>;
+ defm: Memopxr_pat<zextloadi16, truncstorei16, u6_1ImmPred, sub,
+ /*zext*/ L4_sub_memoph_io>;
+ defm: Memopxr_pat<load, store, u6_2ImmPred, sub, L4_sub_memopw_io>;
+
+ // and reg
+ defm: Memopxr_pat<extloadi8, truncstorei8, u6_0ImmPred, and,
+ /*anyext*/ L4_and_memopb_io>;
+ defm: Memopxr_pat<sextloadi8, truncstorei8, u6_0ImmPred, and,
+ /*sext*/ L4_and_memopb_io>;
+ defm: Memopxr_pat<zextloadi8, truncstorei8, u6_0ImmPred, and,
+ /*zext*/ L4_and_memopb_io>;
+ defm: Memopxr_pat<extloadi16, truncstorei16, u6_1ImmPred, and,
+ /*anyext*/ L4_and_memoph_io>;
+ defm: Memopxr_pat<sextloadi16, truncstorei16, u6_1ImmPred, and,
+ /*sext*/ L4_and_memoph_io>;
+ defm: Memopxr_pat<zextloadi16, truncstorei16, u6_1ImmPred, and,
+ /*zext*/ L4_and_memoph_io>;
+ defm: Memopxr_pat<load, store, u6_2ImmPred, and, L4_and_memopw_io>;
+
+ // or reg
+ defm: Memopxr_pat<extloadi8, truncstorei8, u6_0ImmPred, or,
+ /*anyext*/ L4_or_memopb_io>;
+ defm: Memopxr_pat<sextloadi8, truncstorei8, u6_0ImmPred, or,
+ /*sext*/ L4_or_memopb_io>;
+ defm: Memopxr_pat<zextloadi8, truncstorei8, u6_0ImmPred, or,
+ /*zext*/ L4_or_memopb_io>;
+ defm: Memopxr_pat<extloadi16, truncstorei16, u6_1ImmPred, or,
+ /*anyext*/ L4_or_memoph_io>;
+ defm: Memopxr_pat<sextloadi16, truncstorei16, u6_1ImmPred, or,
+ /*sext*/ L4_or_memoph_io>;
+ defm: Memopxr_pat<zextloadi16, truncstorei16, u6_1ImmPred, or,
+ /*zext*/ L4_or_memoph_io>;
+ defm: Memopxr_pat<load, store, u6_2ImmPred, or, L4_or_memopw_io>;
+}
+
+
+multiclass Memopxi_simple_pat<PatFrag Load, PatFrag Store, SDNode Oper,
+ PatFrag Arg, SDNodeXForm ArgMod,
+ InstHexagon MI> {
+ // Addr: i32
+ def: Pat<(Store (Oper (Load I32:$Rs), Arg:$A), I32:$Rs),
+ (MI I32:$Rs, 0, (ArgMod Arg:$A))>;
+ // Addr: fi
+ def: Pat<(Store (Oper (Load AddrFI:$Rs), Arg:$A), AddrFI:$Rs),
+ (MI AddrFI:$Rs, 0, (ArgMod Arg:$A))>;
+}
+
+multiclass Memopxi_add_pat<PatFrag Load, PatFrag Store, PatFrag ImmPred,
+ SDNode Oper, PatFrag Arg, SDNodeXForm ArgMod,
+ InstHexagon MI> {
+ // Addr: i32
+ def: Pat<(Store (Oper (Load (add I32:$Rs, ImmPred:$Off)), Arg:$A),
+ (add I32:$Rs, ImmPred:$Off)),
+ (MI I32:$Rs, imm:$Off, (ArgMod Arg:$A))>;
+ def: Pat<(Store (Oper (Load (orisadd I32:$Rs, ImmPred:$Off)), Arg:$A),
+ (orisadd I32:$Rs, ImmPred:$Off)),
+ (MI I32:$Rs, imm:$Off, (ArgMod Arg:$A))>;
+ // Addr: fi
+ def: Pat<(Store (Oper (Load (add AddrFI:$Rs, ImmPred:$Off)), Arg:$A),
+ (add AddrFI:$Rs, ImmPred:$Off)),
+ (MI AddrFI:$Rs, imm:$Off, (ArgMod Arg:$A))>;
+ def: Pat<(Store (Oper (Load (orisadd AddrFI:$Rs, ImmPred:$Off)), Arg:$A),
+ (orisadd AddrFI:$Rs, ImmPred:$Off)),
+ (MI AddrFI:$Rs, imm:$Off, (ArgMod Arg:$A))>;
+}
+
+multiclass Memopxi_pat<PatFrag Load, PatFrag Store, PatFrag ImmPred,
+ SDNode Oper, PatFrag Arg, SDNodeXForm ArgMod,
+ InstHexagon MI> {
+ defm: Memopxi_simple_pat <Load, Store, Oper, Arg, ArgMod, MI>;
+ defm: Memopxi_add_pat <Load, Store, ImmPred, Oper, Arg, ArgMod, MI>;
}
-let Predicates = [UseMEMOP] in {
- defm: MemOpi_u5ExtType<zextloadi8, zextloadi16>; // zero extend
- defm: MemOpi_u5ExtType<sextloadi8, sextloadi16>; // sign extend
- defm: MemOpi_u5ExtType<extloadi8, extloadi16>; // any extend
- // Word
- defm: MemOpi_u5ALUOp <load, store, u30_2ImmPred, L4_iadd_memopw_io,
- L4_isub_memopw_io>;
-}
-
-//===----------------------------------------------------------------------===//
-// multiclass to define 'Def Pats' for ALU operations on the memory.
-// Here value used for the ALU operation is a negative value.
-// mem[bh](Rs+#0) += #m5
-// mem[bh](Rs+#u6) += #m5
-//===----------------------------------------------------------------------===//
-
-multiclass MemOpi_m5Pats <PatFrag ldOp, PatFrag stOp, PatLeaf ImmPred,
- PatLeaf immPred, SDNodeXForm xformFunc,
- InstHexagon MI> {
- let AddedComplexity = 190 in
- def: Pat<(stOp (add (ldOp IntRegs:$addr), immPred:$subend), IntRegs:$addr),
- (MI IntRegs:$addr, 0, (xformFunc immPred:$subend))>;
-
- let AddedComplexity = 195 in
- def: Pat<(stOp (add (ldOp (add IntRegs:$base, ImmPred:$offset)),
- immPred:$subend),
- (add IntRegs:$base, ImmPred:$offset)),
- (MI IntRegs:$base, ImmPred:$offset, (xformFunc immPred:$subend))>;
-}
-
-multiclass MemOpi_m5ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
- // Half Word
- defm: MemOpi_m5Pats <ldOpHalf, truncstorei16, u31_1ImmPred, m5HImmPred,
- MEMOPIMM_HALF, L4_isub_memoph_io>;
- // Byte
- defm: MemOpi_m5Pats <ldOpByte, truncstorei8, u32ImmPred, m5BImmPred,
- MEMOPIMM_BYTE, L4_isub_memopb_io>;
-}
-
-let Predicates = [UseMEMOP] in {
- defm: MemOpi_m5ExtType<zextloadi8, zextloadi16>; // zero extend
- defm: MemOpi_m5ExtType<sextloadi8, sextloadi16>; // sign extend
- defm: MemOpi_m5ExtType<extloadi8, extloadi16>; // any extend
-
- // Word
- defm: MemOpi_m5Pats <load, store, u30_2ImmPred, m5ImmPred,
- MEMOPIMM, L4_isub_memopw_io>;
-}
-
-//===----------------------------------------------------------------------===//
-// Multiclass to define 'def Pats' for bit operations on the memory.
-// mem[bhw](Rs+#0) = [clrbit|setbit](#U5)
-// mem[bhw](Rs+#u6) = [clrbit|setbit](#U5)
-//===----------------------------------------------------------------------===//
-
-multiclass MemOpi_bitPats <PatFrag ldOp, PatFrag stOp, PatLeaf immPred,
- PatLeaf extPred, SDNodeXForm xformFunc, InstHexagon MI,
- SDNode OpNode> {
-
- // mem[bhw](Rs+#u6:[012]) = [clrbit|setbit](#U5)
- let AddedComplexity = 250 in
- def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)),
- immPred:$bitend),
- (add IntRegs:$base, extPred:$offset)),
- (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$bitend))>;
-
- // mem[bhw](Rs+#0) = [clrbit|setbit](#U5)
- let AddedComplexity = 225 in
- def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), immPred:$bitend), IntRegs:$addr),
- (MI IntRegs:$addr, 0, (xformFunc immPred:$bitend))>;
-}
-
-multiclass MemOpi_bitExtType<PatFrag ldOpByte, PatFrag ldOpHalf> {
- // Byte - clrbit
- defm: MemOpi_bitPats<ldOpByte, truncstorei8, Clr3ImmPred, u32ImmPred,
- CLRMEMIMM_BYTE, L4_iand_memopb_io, and>;
- // Byte - setbit
- defm: MemOpi_bitPats<ldOpByte, truncstorei8, Set3ImmPred, u32ImmPred,
- SETMEMIMM_BYTE, L4_ior_memopb_io, or>;
- // Half Word - clrbit
- defm: MemOpi_bitPats<ldOpHalf, truncstorei16, Clr4ImmPred, u31_1ImmPred,
- CLRMEMIMM_SHORT, L4_iand_memoph_io, and>;
- // Half Word - setbit
- defm: MemOpi_bitPats<ldOpHalf, truncstorei16, Set4ImmPred, u31_1ImmPred,
- SETMEMIMM_SHORT, L4_ior_memoph_io, or>;
-}
-
-let Predicates = [UseMEMOP] in {
- // mem[bh](Rs+#0) = [clrbit|setbit](#U5)
- // mem[bh](Rs+#u6:[01]) = [clrbit|setbit](#U5)
- defm: MemOpi_bitExtType<zextloadi8, zextloadi16>; // zero extend
- defm: MemOpi_bitExtType<sextloadi8, sextloadi16>; // sign extend
- defm: MemOpi_bitExtType<extloadi8, extloadi16>; // any extend
-
- // memw(Rs+#0) = [clrbit|setbit](#U5)
- // memw(Rs+#u6:2) = [clrbit|setbit](#U5)
- defm: MemOpi_bitPats<load, store, Clr5ImmPred, u30_2ImmPred, CLRMEMIMM,
- L4_iand_memopw_io, and>;
- defm: MemOpi_bitPats<load, store, Set5ImmPred, u30_2ImmPred, SETMEMIMM,
- L4_ior_memopw_io, or>;
-}
-
-//===----------------------------------------------------------------------===//
-// Multiclass to define 'def Pats' for ALU operations on the memory
-// where addend is a register.
-// mem[bhw](Rs+#0) [+-&|]= Rt
-// mem[bhw](Rs+#U6:[012]) [+-&|]= Rt
-//===----------------------------------------------------------------------===//
-
-multiclass MemOpr_Pats <PatFrag ldOp, PatFrag stOp, PatLeaf extPred,
- InstHexagon MI, SDNode OpNode> {
- let AddedComplexity = 141 in
- // mem[bhw](Rs+#0) [+-&|]= Rt
- def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), (i32 IntRegs:$addend)),
- IntRegs:$addr),
- (MI IntRegs:$addr, 0, (i32 IntRegs:$addend))>;
-
- // mem[bhw](Rs+#U6:[012]) [+-&|]= Rt
- let AddedComplexity = 150 in
- def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)),
- (i32 IntRegs:$orend)),
- (add IntRegs:$base, extPred:$offset)),
- (MI IntRegs:$base, extPred:$offset, (i32 IntRegs:$orend))>;
-}
-
-multiclass MemOPr_ALUOp<PatFrag ldOp, PatFrag stOp, PatLeaf extPred,
- InstHexagon addMI, InstHexagon subMI,
- InstHexagon andMI, InstHexagon orMI> {
- defm: MemOpr_Pats <ldOp, stOp, extPred, addMI, add>;
- defm: MemOpr_Pats <ldOp, stOp, extPred, subMI, sub>;
- defm: MemOpr_Pats <ldOp, stOp, extPred, andMI, and>;
- defm: MemOpr_Pats <ldOp, stOp, extPred, orMI, or>;
-}
-
-multiclass MemOPr_ExtType<PatFrag ldOpByte, PatFrag ldOpHalf > {
- // Half Word
- defm: MemOPr_ALUOp <ldOpHalf, truncstorei16, u31_1ImmPred,
- L4_add_memoph_io, L4_sub_memoph_io,
- L4_and_memoph_io, L4_or_memoph_io>;
- // Byte
- defm: MemOPr_ALUOp <ldOpByte, truncstorei8, u32ImmPred,
- L4_add_memopb_io, L4_sub_memopb_io,
- L4_and_memopb_io, L4_or_memopb_io>;
-}
-
-// Define 'def Pats' for MemOps with register addend.
-let Predicates = [UseMEMOP] in {
- // Byte, Half Word
- defm: MemOPr_ExtType<zextloadi8, zextloadi16>; // zero extend
- defm: MemOPr_ExtType<sextloadi8, sextloadi16>; // sign extend
- defm: MemOPr_ExtType<extloadi8, extloadi16>; // any extend
- // Word
- defm: MemOPr_ALUOp <load, store, u30_2ImmPred, L4_add_memopw_io,
- L4_sub_memopw_io, L4_and_memopw_io, L4_or_memopw_io>;
+let AddedComplexity = 200 in {
+ // add imm
+ defm: Memopxi_pat<extloadi8, truncstorei8, u6_0ImmPred, add, u5ImmPred,
+ /*anyext*/ IdImm, L4_iadd_memopb_io>;
+ defm: Memopxi_pat<sextloadi8, truncstorei8, u6_0ImmPred, add, u5ImmPred,
+ /*sext*/ IdImm, L4_iadd_memopb_io>;
+ defm: Memopxi_pat<zextloadi8, truncstorei8, u6_0ImmPred, add, u5ImmPred,
+ /*zext*/ IdImm, L4_iadd_memopb_io>;
+ defm: Memopxi_pat<extloadi16, truncstorei16, u6_1ImmPred, add, u5ImmPred,
+ /*anyext*/ IdImm, L4_iadd_memoph_io>;
+ defm: Memopxi_pat<extloadi16, truncstorei16, u6_1ImmPred, add, u5ImmPred,
+ /*sext*/ IdImm, L4_iadd_memoph_io>;
+ defm: Memopxi_pat<extloadi16, truncstorei16, u6_1ImmPred, add, u5ImmPred,
+ /*zext*/ IdImm, L4_iadd_memoph_io>;
+ defm: Memopxi_pat<load, store, u6_2ImmPred, add, u5ImmPred, IdImm,
+ L4_iadd_memopw_io>;
+ defm: Memopxi_pat<extloadi8, truncstorei8, u6_0ImmPred, sub, m5Imm8Pred,
+ /*anyext*/ NegImm8, L4_iadd_memopb_io>;
+ defm: Memopxi_pat<sextloadi8, truncstorei8, u6_0ImmPred, sub, m5Imm8Pred,
+ /*sext*/ NegImm8, L4_iadd_memopb_io>;
+ defm: Memopxi_pat<zextloadi8, truncstorei8, u6_0ImmPred, sub, m5Imm8Pred,
+ /*zext*/ NegImm8, L4_iadd_memopb_io>;
+ defm: Memopxi_pat<extloadi16, truncstorei16, u6_1ImmPred, sub, m5Imm16Pred,
+ /*anyext*/ NegImm16, L4_iadd_memoph_io>;
+ defm: Memopxi_pat<sextloadi16, truncstorei16, u6_1ImmPred, sub, m5Imm16Pred,
+ /*sext*/ NegImm16, L4_iadd_memoph_io>;
+ defm: Memopxi_pat<zextloadi16, truncstorei16, u6_1ImmPred, sub, m5Imm16Pred,
+ /*zext*/ NegImm16, L4_iadd_memoph_io>;
+ defm: Memopxi_pat<load, store, u6_2ImmPred, sub, m5ImmPred, NegImm32,
+ L4_iadd_memopw_io>;
+
+ // sub imm
+ defm: Memopxi_pat<extloadi8, truncstorei8, u6_0ImmPred, sub, u5ImmPred,
+ /*anyext*/ IdImm, L4_isub_memopb_io>;
+ defm: Memopxi_pat<sextloadi8, truncstorei8, u6_0ImmPred, sub, u5ImmPred,
+ /*sext*/ IdImm, L4_isub_memopb_io>;
+ defm: Memopxi_pat<zextloadi8, truncstorei8, u6_0ImmPred, sub, u5ImmPred,
+ /*zext*/ IdImm, L4_isub_memopb_io>;
+ defm: Memopxi_pat<extloadi16, truncstorei16, u6_1ImmPred, sub, u5ImmPred,
+ /*anyext*/ IdImm, L4_isub_memoph_io>;
+ defm: Memopxi_pat<sextloadi16, truncstorei16, u6_1ImmPred, sub, u5ImmPred,
+ /*sext*/ IdImm, L4_isub_memoph_io>;
+ defm: Memopxi_pat<zextloadi16, truncstorei16, u6_1ImmPred, sub, u5ImmPred,
+ /*zext*/ IdImm, L4_isub_memoph_io>;
+ defm: Memopxi_pat<load, store, u6_2ImmPred, sub, u5ImmPred, IdImm,
+ L4_isub_memopw_io>;
+ defm: Memopxi_pat<extloadi8, truncstorei8, u6_0ImmPred, add, m5Imm8Pred,
+ /*anyext*/ NegImm8, L4_isub_memopb_io>;
+ defm: Memopxi_pat<sextloadi8, truncstorei8, u6_0ImmPred, add, m5Imm8Pred,
+ /*sext*/ NegImm8, L4_isub_memopb_io>;
+ defm: Memopxi_pat<zextloadi8, truncstorei8, u6_0ImmPred, add, m5Imm8Pred,
+ /*zext*/ NegImm8, L4_isub_memopb_io>;
+ defm: Memopxi_pat<extloadi16, truncstorei16, u6_1ImmPred, add, m5Imm16Pred,
+ /*anyext*/ NegImm16, L4_isub_memoph_io>;
+ defm: Memopxi_pat<sextloadi16, truncstorei16, u6_1ImmPred, add, m5Imm16Pred,
+ /*sext*/ NegImm16, L4_isub_memoph_io>;
+ defm: Memopxi_pat<zextloadi16, truncstorei16, u6_1ImmPred, add, m5Imm16Pred,
+ /*zext*/ NegImm16, L4_isub_memoph_io>;
+ defm: Memopxi_pat<load, store, u6_2ImmPred, add, m5ImmPred, NegImm32,
+ L4_isub_memopw_io>;
+
+ // clrbit imm
+ defm: Memopxi_pat<extloadi8, truncstorei8, u6_0ImmPred, and, Clr5Imm8Pred,
+ /*anyext*/ Clr5Imm8, L4_iand_memopb_io>;
+ defm: Memopxi_pat<sextloadi8, truncstorei8, u6_0ImmPred, and, Clr5Imm8Pred,
+ /*sext*/ Clr5Imm8, L4_iand_memopb_io>;
+ defm: Memopxi_pat<zextloadi8, truncstorei8, u6_0ImmPred, and, Clr5Imm8Pred,
+ /*zext*/ Clr5Imm8, L4_iand_memopb_io>;
+ defm: Memopxi_pat<extloadi16, truncstorei16, u6_1ImmPred, and, Clr5Imm16Pred,
+ /*anyext*/ Clr5Imm16, L4_iand_memoph_io>;
+ defm: Memopxi_pat<sextloadi16, truncstorei16, u6_1ImmPred, and, Clr5Imm16Pred,
+ /*sext*/ Clr5Imm16, L4_iand_memoph_io>;
+ defm: Memopxi_pat<zextloadi16, truncstorei16, u6_1ImmPred, and, Clr5Imm16Pred,
+ /*zext*/ Clr5Imm16, L4_iand_memoph_io>;
+ defm: Memopxi_pat<load, store, u6_2ImmPred, and, Clr5ImmPred, Clr5Imm32,
+ L4_iand_memopw_io>;
+
+ // setbit imm
+ defm: Memopxi_pat<extloadi8, truncstorei8, u6_0ImmPred, or, Set5ImmPred,
+ /*anyext*/ Set5Imm8, L4_ior_memopb_io>;
+ defm: Memopxi_pat<sextloadi8, truncstorei8, u6_0ImmPred, or, Set5ImmPred,
+ /*sext*/ Set5Imm8, L4_ior_memopb_io>;
+ defm: Memopxi_pat<zextloadi8, truncstorei8, u6_0ImmPred, or, Set5ImmPred,
+ /*zext*/ Set5Imm8, L4_ior_memopb_io>;
+ defm: Memopxi_pat<extloadi16, truncstorei16, u6_1ImmPred, or, Set5ImmPred,
+ /*anyext*/ Set5Imm16, L4_ior_memoph_io>;
+ defm: Memopxi_pat<sextloadi16, truncstorei16, u6_1ImmPred, or, Set5ImmPred,
+ /*sext*/ Set5Imm16, L4_ior_memoph_io>;
+ defm: Memopxi_pat<zextloadi16, truncstorei16, u6_1ImmPred, or, Set5ImmPred,
+ /*zext*/ Set5Imm16, L4_ior_memoph_io>;
+ defm: Memopxi_pat<load, store, u6_2ImmPred, or, Set5ImmPred, Set5Imm32,
+ L4_ior_memopw_io>;
}
//===----------------------------------------------------------------------===//
@@ -3281,22 +3355,57 @@ defm L4_return: LD_MISC_L4_RETURN <"dealloc_return">, PredNewRel;
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
Defs = [R29, R30, R31, PC], isPredicable = 0, isAsmParserOnly = 1 in {
def RESTORE_DEALLOC_RET_JMP_V4 : T_JMP<"">;
+
let isExtended = 1, opExtendable = 0 in
- def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP<"">;
+
+ let Defs = [R14, R15, R28, R29, R30, R31, PC] in {
+ def RESTORE_DEALLOC_RET_JMP_V4_PIC : T_JMP<"">;
+
+ let isExtended = 1, opExtendable = 0 in
+ def RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC : T_JMP<"">;
+ }
}
// Restore registers and dealloc frame before a tail call.
let isCall = 1, Defs = [R29, R30, R31, PC], isAsmParserOnly = 1 in {
- def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : T_Call<"">, PredRel;
+ def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : T_Call<0, "">, PredRel;
+
let isExtended = 1, opExtendable = 0 in
- def RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT : T_Call<"">, PredRel;
+ def RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT : T_Call<0, "">, PredRel;
+
+ let Defs = [R14, R15, R28, R29, R30, R31, PC] in {
+ def RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC : T_Call<0, "">, PredRel;
+
+ let isExtended = 1, opExtendable = 0 in
+ def RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC : T_Call<0, "">, PredRel;
+ }
}
// Save registers function call.
let isCall = 1, Uses = [R29, R31], isAsmParserOnly = 1 in {
- def SAVE_REGISTERS_CALL_V4 : T_Call<"">, PredRel;
+ def SAVE_REGISTERS_CALL_V4 : T_Call<0, "">, PredRel;
+
let isExtended = 1, opExtendable = 0 in
- def SAVE_REGISTERS_CALL_V4_EXT : T_Call<"">, PredRel;
+ def SAVE_REGISTERS_CALL_V4_EXT : T_Call<0, "">, PredRel;
+
+ let Defs = [P0] in
+ def SAVE_REGISTERS_CALL_V4STK : T_Call<0, "">, PredRel;
+
+ let Defs = [P0], isExtended = 1, opExtendable = 0 in
+ def SAVE_REGISTERS_CALL_V4STK_EXT : T_Call<0, "">, PredRel;
+
+ let Defs = [R14, R15, R28] in
+ def SAVE_REGISTERS_CALL_V4_PIC : T_Call<0, "">, PredRel;
+
+ let Defs = [R14, R15, R28], isExtended = 1, opExtendable = 0 in
+ def SAVE_REGISTERS_CALL_V4_EXT_PIC : T_Call<0, "">, PredRel;
+
+ let Defs = [R14, R15, R28, P0] in
+ def SAVE_REGISTERS_CALL_V4STK_PIC : T_Call<0, "">, PredRel;
+
+ let Defs = [R14, R15, R28, P0], isExtended = 1, opExtendable = 0 in
+ def SAVE_REGISTERS_CALL_V4STK_EXT_PIC : T_Call<0, "">, PredRel;
}
//===----------------------------------------------------------------------===//
@@ -3413,9 +3522,9 @@ multiclass ST_Abs<string mnemonic, string CextOp, RegisterClass RC,
//===----------------------------------------------------------------------===//
let hasSideEffects = 0, isPredicable = 1, mayStore = 1, isNVStore = 1,
isNewValue = 1, opNewValue = 1 in
-class T_StoreAbsGP_NV <string mnemonic, Operand ImmOp, bits<2>MajOp, bit isAbs>
- : NVInst_V4<(outs), (ins u32Imm:$addr, IntRegs:$src),
- mnemonic # !if(isAbs, "(##", "(#")#"$addr) = $src.new",
+class T_StoreAbsGP_NV <string mnemonic, Operand ImmOp, bits<2>MajOp>
+ : NVInst_V4<(outs), (ins ImmOp:$addr, IntRegs:$src),
+ mnemonic #"(#$addr) = $src.new",
[], "", V2LDST_tc_st_SLOT0> {
bits<19> addr;
bits<3> src;
@@ -3426,7 +3535,6 @@ class T_StoreAbsGP_NV <string mnemonic, Operand ImmOp, bits<2>MajOp, bit isAbs>
!if (!eq(ImmOpStr, "u16_2Imm"), addr{17-2},
!if (!eq(ImmOpStr, "u16_1Imm"), addr{16-1},
/* u16_0Imm */ addr{15-0})));
- let Uses = !if (isAbs, [], [GP]);
let IClass = 0b0100;
let Inst{27} = 1;
@@ -3446,7 +3554,7 @@ class T_StoreAbsGP_NV <string mnemonic, Operand ImmOp, bits<2>MajOp, bit isAbs>
let hasSideEffects = 0, isPredicated = 1, mayStore = 1, isNVStore = 1,
isNewValue = 1, opNewValue = 2, opExtentBits = 6, opExtendable = 1 in
class T_StoreAbs_NV_Pred <string mnemonic, bits<2> MajOp, bit isNot, bit isNew>
- : NVInst_V4<(outs), (ins PredRegs:$src1, u6Ext:$absaddr, IntRegs:$src2),
+ : NVInst_V4<(outs), (ins PredRegs:$src1, u32MustExt:$absaddr, IntRegs:$src2),
!if(isNot, "if (!$src1", "if ($src1")#!if(isNew, ".new) ",
") ")#mnemonic#"(#$absaddr) = $src2.new",
[], "", ST_tc_st_SLOT0>, AddrModeRel {
@@ -3476,7 +3584,7 @@ class T_StoreAbs_NV_Pred <string mnemonic, bits<2> MajOp, bit isNot, bit isNew>
// absolute addressing.
//===----------------------------------------------------------------------===//
class T_StoreAbs_NV <string mnemonic, Operand ImmOp, bits<2> MajOp>
- : T_StoreAbsGP_NV <mnemonic, ImmOp, MajOp, 1>, AddrModeRel {
+ : T_StoreAbsGP_NV <mnemonic, u32MustExt, MajOp>, AddrModeRel {
string ImmOpStr = !cast<string>(ImmOp);
let opExtentBits = !if (!eq(ImmOpStr, "u16_3Imm"), 19,
@@ -3538,7 +3646,7 @@ defm storerf : ST_Abs <"memh", "STrif", IntRegs, u16_1Imm, 0b01, 1>;
// if ([!]Pv[.new]) mem[bhwd](##global)=Rt
//===----------------------------------------------------------------------===//
-let isAsmParserOnly = 1 in
+let Uses = [GP], isAsmParserOnly = 1 in
class T_StoreGP <string mnemonic, string BaseOp, RegisterClass RC,
Operand ImmOp, bits<2> MajOp, bit isHalf = 0>
: T_StoreAbsGP <mnemonic, RC, ImmOp, MajOp, 0, isHalf> {
@@ -3548,7 +3656,7 @@ class T_StoreGP <string mnemonic, string BaseOp, RegisterClass RC,
let BaseOpcode = BaseOp#_abs;
}
-let isAsmParserOnly = 1 in
+let Uses = [GP], isAsmParserOnly = 1 in
multiclass ST_GP <string mnemonic, string BaseOp, Operand ImmOp,
bits<2> MajOp, bit isHalf = 0> {
// Set BaseOpcode same as absolute addressing instructions so that
@@ -3558,7 +3666,7 @@ multiclass ST_GP <string mnemonic, string BaseOp, Operand ImmOp,
def NAME#gp : T_StoreAbsGP <mnemonic, IntRegs, ImmOp, MajOp,
0, isHalf>;
// New-value store
- def NAME#newgp : T_StoreAbsGP_NV <mnemonic, ImmOp, MajOp, 0> ;
+ def NAME#newgp : T_StoreAbsGP_NV <mnemonic, ImmOp, MajOp> ;
}
}
@@ -3594,6 +3702,17 @@ class Stoream_pat<PatFrag Store, PatFrag Value, PatFrag Addr, PatFrag ValueMod,
: Pat<(Store Value:$val, Addr:$addr),
(MI Addr:$addr, (ValueMod Value:$val))>;
+let AddedComplexity = 30 in {
+ def: Storea_pat<truncstorei8, I32, addrga, S2_storerbabs>;
+ def: Storea_pat<truncstorei16, I32, addrga, S2_storerhabs>;
+ def: Storea_pat<store, I32, addrga, S2_storeriabs>;
+ def: Storea_pat<store, I64, addrga, S2_storerdabs>;
+
+ def: Stoream_pat<truncstorei8, I64, addrga, LoReg, S2_storerbabs>;
+ def: Stoream_pat<truncstorei16, I64, addrga, LoReg, S2_storerhabs>;
+ def: Stoream_pat<truncstorei32, I64, addrga, LoReg, S2_storeriabs>;
+}
+
def: Storea_pat<SwapSt<atomic_store_8>, I32, addrgp, S2_storerbgp>;
def: Storea_pat<SwapSt<atomic_store_16>, I32, addrgp, S2_storerhgp>;
def: Storea_pat<SwapSt<atomic_store_32>, I32, addrgp, S2_storerigp>;
@@ -3731,6 +3850,26 @@ defm loadri : LD_Abs<"memw", "LDriw", IntRegs, u16_2Imm, 0b100>;
let accessSize = DoubleWordAccess in
defm loadrd : LD_Abs<"memd", "LDrid", DoubleRegs, u16_3Imm, 0b110>;
+class LoadAbs_pats <PatFrag ldOp, InstHexagon MI, ValueType VT = i32>
+ : Pat <(VT (ldOp (HexagonCONST32 tglobaladdr:$absaddr))),
+ (VT (MI tglobaladdr:$absaddr))>;
+
+let AddedComplexity = 30 in {
+ def: LoadAbs_pats <load, L4_loadri_abs>;
+ def: LoadAbs_pats <zextloadi1, L4_loadrub_abs>;
+ def: LoadAbs_pats <sextloadi8, L4_loadrb_abs>;
+ def: LoadAbs_pats <extloadi8, L4_loadrub_abs>;
+ def: LoadAbs_pats <zextloadi8, L4_loadrub_abs>;
+ def: LoadAbs_pats <sextloadi16, L4_loadrh_abs>;
+ def: LoadAbs_pats <extloadi16, L4_loadruh_abs>;
+ def: LoadAbs_pats <zextloadi16, L4_loadruh_abs>;
+ def: LoadAbs_pats <load, L4_loadrd_abs, i64>;
+}
+
+let AddedComplexity = 30 in
+def: Pat<(i64 (zextloadi1 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (Zext64 (L4_loadrub_abs tglobaladdr:$absaddr))>;
+
//===----------------------------------------------------------------------===//
// multiclass for load instructions with GP-relative addressing mode.
// Rx=mem[bhwd](##global)
@@ -3779,14 +3918,14 @@ class LoadGP_pats <PatFrag ldOp, InstHexagon MI, ValueType VT = i32>
(VT (MI tglobaladdr:$global))>;
let AddedComplexity = 100 in {
- def: LoadGP_pats <extloadi8, L2_loadrbgp>;
- def: LoadGP_pats <sextloadi8, L2_loadrbgp>;
- def: LoadGP_pats <zextloadi8, L2_loadrubgp>;
- def: LoadGP_pats <extloadi16, L2_loadrhgp>;
+ def: LoadGP_pats <extloadi8, L2_loadrubgp>;
+ def: LoadGP_pats <sextloadi8, L2_loadrbgp>;
+ def: LoadGP_pats <zextloadi8, L2_loadrubgp>;
+ def: LoadGP_pats <extloadi16, L2_loadruhgp>;
def: LoadGP_pats <sextloadi16, L2_loadrhgp>;
def: LoadGP_pats <zextloadi16, L2_loadruhgp>;
- def: LoadGP_pats <load, L2_loadrigp>;
- def: LoadGP_pats <load, L2_loadrdgp, i64>;
+ def: LoadGP_pats <load, L2_loadrigp>;
+ def: LoadGP_pats <load, L2_loadrdgp, i64>;
}
// When the Interprocedural Global Variable optimizer realizes that a certain
@@ -3819,7 +3958,7 @@ let AddedComplexity = 30 in {
// Indexed store word - global address.
// memw(Rs+#u6:2)=#S8
let AddedComplexity = 100 in
-def: Storex_add_pat<store, addrga, u6_2ImmPred, S4_storeiri_io>;
+defm: Storex_add_pat<store, addrga, u6_2ImmPred, S4_storeiri_io>;
// Load from a global address that has only one use in the current basic block.
let AddedComplexity = 100 in {
@@ -3996,6 +4135,10 @@ def Y2_dcfetchbo : LD0Inst<(outs), (ins IntRegs:$Rs, u11_3Imm:$u11_3),
let Inst{10-0} = u11_3{13-3};
}
+
+def: Pat<(HexagonDCFETCH (i32 (add IntRegs:$Rs, u11_3ImmPred:$u11_3)), (i32 0)),
+ (Y2_dcfetchbo IntRegs:$Rs, u11_3ImmPred:$u11_3)>;
+
//===----------------------------------------------------------------------===//
// Compound instructions
//===----------------------------------------------------------------------===//
@@ -4008,7 +4151,7 @@ class CJInst_tstbit_R0<string px, bit np, string tnt>
: InstHexagon<(outs), (ins IntRegs:$Rs, brtarget:$r9_2),
""#px#" = tstbit($Rs, #0); if ("
#!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2",
- [], "", COMPOUND, TypeCOMPOUND>, OpcodeHexagon {
+ [], "", COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>, OpcodeHexagon {
bits<4> Rs;
bits<11> r9_2;
@@ -4054,7 +4197,7 @@ class CJInst_RR<string px, string op, bit np, string tnt>
: InstHexagon<(outs), (ins IntRegs:$Rs, IntRegs:$Rt, brtarget:$r9_2),
""#px#" = cmp."#op#"($Rs, $Rt); if ("
#!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2",
- [], "", COMPOUND, TypeCOMPOUND>, OpcodeHexagon {
+ [], "", COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>, OpcodeHexagon {
bits<4> Rs;
bits<4> Rt;
bits<11> r9_2;
@@ -4108,7 +4251,7 @@ class CJInst_RU5<string px, string op, bit np, string tnt>
: InstHexagon<(outs), (ins IntRegs:$Rs, u5Imm:$U5, brtarget:$r9_2),
""#px#" = cmp."#op#"($Rs, #$U5); if ("
#!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2",
- [], "", COMPOUND, TypeCOMPOUND>, OpcodeHexagon {
+ [], "", COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>, OpcodeHexagon {
bits<4> Rs;
bits<5> U5;
bits<11> r9_2;
@@ -4163,7 +4306,7 @@ class CJInst_Rn1<string px, string op, bit np, string tnt>
: InstHexagon<(outs), (ins IntRegs:$Rs, brtarget:$r9_2),
""#px#" = cmp."#op#"($Rs,#-1); if ("
#!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2",
- [], "", COMPOUND, TypeCOMPOUND>, OpcodeHexagon {
+ [], "", COMPOUND_CJ_ARCHDEPSLOT, TypeCOMPOUND>, OpcodeHexagon {
bits<4> Rs;
bits<11> r9_2;
@@ -4212,7 +4355,7 @@ defm gt : T_pnp_CJInst_Rn1<"gt">;
let Defs = [PC], isBranch = 1, hasSideEffects = 0, hasNewValue = 1,
isExtentSigned = 1, opNewValue = 0, isExtendable = 1, opExtentBits = 11,
opExtentAlign = 2, opExtendable = 2 in
-def J4_jumpseti: CJInst <
+def J4_jumpseti: CJInst_JMPSET <
(outs IntRegs:$Rd),
(ins u6Imm:$U6, brtarget:$r9_2),
"$Rd = #$U6 ; jump $r9_2"> {
@@ -4232,7 +4375,7 @@ def J4_jumpseti: CJInst <
let Defs = [PC], isBranch = 1, hasSideEffects = 0, hasNewValue = 1,
isExtentSigned = 1, opNewValue = 0, isExtendable = 1, opExtentBits = 11,
opExtentAlign = 2, opExtendable = 2 in
-def J4_jumpsetr: CJInst <
+def J4_jumpsetr: CJInst_JMPSET <
(outs IntRegs:$Rd),
(ins IntRegs:$Rs, brtarget:$r9_2),
"$Rd = $Rs ; jump $r9_2"> {
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV60.td b/lib/Target/Hexagon/HexagonInstrInfoV60.td
index 897ada081534..c3f09b69ce85 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV60.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV60.td
@@ -10,6 +10,21 @@
// This file describes the Hexagon V60 instructions in TableGen format.
//
//===----------------------------------------------------------------------===//
+def alignedload : PatFrag<(ops node:$addr), (load $addr), [{
+ return isAlignedMemNode(dyn_cast<MemSDNode>(N));
+}]>;
+
+def unalignedload : PatFrag<(ops node:$addr), (load $addr), [{
+ return !isAlignedMemNode(dyn_cast<MemSDNode>(N));
+}]>;
+
+def alignedstore : PatFrag<(ops node:$val, node:$addr), (store $val, $addr), [{
+ return isAlignedMemNode(dyn_cast<MemSDNode>(N));
+}]>;
+
+def unalignedstore : PatFrag<(ops node:$val, node:$addr), (store $val, $addr), [{
+ return !isAlignedMemNode(dyn_cast<MemSDNode>(N));
+}]>;
// Vector store
@@ -102,7 +117,7 @@ let Itinerary = CVI_VM_TMP_LD, Type = TypeCVI_VM_TMP_LD, hasNewValue = 1 in {
//===----------------------------------------------------------------------===//
// Vector stores with base + immediate offset - unconditional
//===----------------------------------------------------------------------===//
-let addrMode = BaseImmOffset, accessSize = Vector64Access in
+let addrMode = BaseImmOffset, accessSize = Vector64Access, isPredicable = 1 in
class T_vstore_ai <string mnemonic, string baseOp, Operand ImmOp,
RegisterClass RC, bit isNT>
: V6_STInst <(outs), (ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
@@ -133,16 +148,16 @@ let isNVStorable = 1, isNonTemporal = 1 in {
}
let Itinerary = CVI_VM_STU, Type = TypeCVI_VM_STU in {
- def V6_vS32Ub_ai : T_vstore_ai_64B <"vmemu", "vs32Ub_ai">,
+ def V6_vS32Ub_ai : T_vstore_ai_64B <"vmemu", "vS32Ub_ai">,
V6_vS32Ub_ai_enc;
- def V6_vS32Ub_ai_128B : T_vstore_ai_128B <"vmemu", "vs32Ub_ai">,
+ def V6_vS32Ub_ai_128B : T_vstore_ai_128B <"vmemu", "vS32Ub_ai">,
V6_vS32Ub_ai_128B_enc;
}
//===----------------------------------------------------------------------===//
// Vector stores with base + immediate offset - unconditional new
//===----------------------------------------------------------------------===//
let addrMode = BaseImmOffset, isNewValue = 1, opNewValue = 2, isNVStore = 1,
- Itinerary = CVI_VM_NEW_ST, Type = TypeCVI_VM_NEW_ST in
+ isPredicable = 1, Itinerary = CVI_VM_NEW_ST, Type = TypeCVI_VM_NEW_ST in
class T_vstore_new_ai <string baseOp, Operand ImmOp, RegisterClass RC, bit isNT>
: V6_STInst <(outs ), (ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
"vmem($src1+#$src2)"#!if(isNT, ":nt", "")#" = $src3.new">, NewValueRel {
@@ -384,13 +399,15 @@ let Itinerary = CVI_VM_TMP_LD, Type = TypeCVI_VM_TMP_LD in {
//===----------------------------------------------------------------------===//
// Post increment vector stores with immediate offset.
//===----------------------------------------------------------------------===//
-let addrMode = PostInc in
+let addrMode = PostInc, isPredicable = 1 in
class T_vstore_pi <string mnemonic, string baseOp, Operand ImmOp,
RegisterClass RC, bit isNT>
: V6_STInst <(outs IntRegs:$_dst_),
(ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
mnemonic#"($src1++#$src2)"#!if(isNT, ":nt", "")#" = $src3", [],
- "$src1 = $_dst_">, NewValueRel;
+ "$src1 = $_dst_">, NewValueRel {
+ let BaseOpcode = baseOp;
+}
let accessSize = Vector64Access in
class T_vstore_pi_64B <string mnemonic, string baseOp, bit isNT = 0>
@@ -398,7 +415,7 @@ class T_vstore_pi_64B <string mnemonic, string baseOp, bit isNT = 0>
let isCodeGenOnly = 1, accessSize = Vector128Access in
class T_vstore_pi_128B <string mnemonic, string baseOp, bit isNT = 0>
- : T_vstore_pi <mnemonic, baseOp, s3_7Imm, VectorRegs128B, isNT>;
+ : T_vstore_pi <mnemonic, baseOp#"128B", s3_7Imm, VectorRegs128B, isNT>;
let isNVStorable = 1 in {
def V6_vS32b_pi : T_vstore_pi_64B <"vmem", "vS32b_pi">, V6_vS32b_pi_enc;
@@ -426,7 +443,7 @@ let Itinerary = CVI_VM_STU, Type = TypeCVI_VM_STU in {
//===----------------------------------------------------------------------===//
let addrMode = PostInc, isNVStore = 1 in
let Itinerary = CVI_VM_NEW_ST, Type = TypeCVI_VM_NEW_ST, isNewValue = 1,
- opNewValue = 3, isNVStore = 1 in
+ isPredicable = 1, opNewValue = 3, isNVStore = 1 in
class T_vstore_new_pi <string baseOp, Operand ImmOp, RegisterClass RC, bit isNT>
: V6_STInst <(outs IntRegs:$_dst_),
(ins IntRegs:$src1, ImmOp:$src2, RC:$src3),
@@ -644,6 +661,7 @@ let Itinerary = CVI_VM_TMP_LD, Type = TypeCVI_VM_TMP_LD in {
//===----------------------------------------------------------------------===//
// Post increment vector stores with register offset
//===----------------------------------------------------------------------===//
+let isPredicable = 1 in
class T_vstore_ppu <string mnemonic, bit isNT = 0>
: V6_STInst <(outs IntRegs:$_dst_),
(ins IntRegs:$src1, ModRegs:$src2, VectorRegs:$src3),
@@ -665,7 +683,7 @@ def V6_vS32Ub_ppu : T_vstore_ppu <"vmemu">, V6_vS32Ub_ppu_enc;
// Post increment .new vector stores with register offset
//===----------------------------------------------------------------------===//
let Itinerary = CVI_VM_NEW_ST, Type = TypeCVI_VM_NEW_ST, isNewValue = 1,
- opNewValue = 3, isNVStore = 1 in
+ isPredicable = 1, opNewValue = 3, isNVStore = 1 in
class T_vstore_new_ppu <bit isNT = 0>
: V6_STInst <(outs IntRegs:$_dst_),
(ins IntRegs:$src1, ModRegs:$src2, VectorRegs:$src3),
@@ -785,30 +803,46 @@ defm : STrivv_pats <v16i64, v32i64>;
multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Aligned stores
- def : Pat<(store (VTSgl VectorRegs:$src1), IntRegs:$addr),
+ def : Pat<(alignedstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
(V6_vS32b_ai IntRegs:$addr, #0, (VTSgl VectorRegs:$src1))>,
Requires<[UseHVXSgl]>;
+ def : Pat<(unalignedstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
+ (V6_vS32Ub_ai IntRegs:$addr, #0, (VTSgl VectorRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
// 128B Aligned stores
- def : Pat<(store (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
+ def : Pat<(alignedstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
(V6_vS32b_ai_128B IntRegs:$addr, #0, (VTDbl VectorRegs128B:$src1))>,
Requires<[UseHVXDbl]>;
+ def : Pat<(unalignedstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
+ (V6_vS32Ub_ai_128B IntRegs:$addr, #0, (VTDbl VectorRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
// Fold Add R+IFF into vector store.
- let AddedComplexity = 10 in
- def : Pat<(store (VTSgl VectorRegs:$src1),
- (add IntRegs:$src2, s4_6ImmPred:$offset)),
- (V6_vS32b_ai IntRegs:$src2, s4_6ImmPred:$offset,
- (VTSgl VectorRegs:$src1))>,
- Requires<[UseHVXSgl]>;
+ let AddedComplexity = 10 in {
+ def : Pat<(alignedstore (VTSgl VectorRegs:$src1),
+ (add IntRegs:$src2, s4_6ImmPred:$offset)),
+ (V6_vS32b_ai IntRegs:$src2, s4_6ImmPred:$offset,
+ (VTSgl VectorRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
+ def : Pat<(unalignedstore (VTSgl VectorRegs:$src1),
+ (add IntRegs:$src2, s4_6ImmPred:$offset)),
+ (V6_vS32Ub_ai IntRegs:$src2, s4_6ImmPred:$offset,
+ (VTSgl VectorRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
- // Fold Add R+IFF into vector store 128B.
- let AddedComplexity = 10 in
- def : Pat<(store (VTDbl VectorRegs128B:$src1),
- (add IntRegs:$src2, s4_7ImmPred:$offset)),
- (V6_vS32b_ai_128B IntRegs:$src2, s4_7ImmPred:$offset,
- (VTDbl VectorRegs128B:$src1))>,
- Requires<[UseHVXDbl]>;
+ // Fold Add R+IFF into vector store 128B.
+ def : Pat<(alignedstore (VTDbl VectorRegs128B:$src1),
+ (add IntRegs:$src2, s4_7ImmPred:$offset)),
+ (V6_vS32b_ai_128B IntRegs:$src2, s4_7ImmPred:$offset,
+ (VTDbl VectorRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
+ def : Pat<(unalignedstore (VTDbl VectorRegs128B:$src1),
+ (add IntRegs:$src2, s4_7ImmPred:$offset)),
+ (V6_vS32Ub_ai_128B IntRegs:$src2, s4_7ImmPred:$offset,
+ (VTDbl VectorRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
+ }
}
defm : vS32b_ai_pats <v64i8, v128i8>;
@@ -843,25 +877,37 @@ defm : LDrivv_pats <v16i64, v32i64>;
multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Aligned loads
- def : Pat < (VTSgl (load IntRegs:$addr)),
+ def : Pat < (VTSgl (alignedload IntRegs:$addr)),
(V6_vL32b_ai IntRegs:$addr, #0) >,
Requires<[UseHVXSgl]>;
+ def : Pat < (VTSgl (unalignedload IntRegs:$addr)),
+ (V6_vL32Ub_ai IntRegs:$addr, #0) >,
+ Requires<[UseHVXSgl]>;
// 128B Load
- def : Pat < (VTDbl (load IntRegs:$addr)),
+ def : Pat < (VTDbl (alignedload IntRegs:$addr)),
(V6_vL32b_ai_128B IntRegs:$addr, #0) >,
Requires<[UseHVXDbl]>;
+ def : Pat < (VTDbl (unalignedload IntRegs:$addr)),
+ (V6_vL32Ub_ai_128B IntRegs:$addr, #0) >,
+ Requires<[UseHVXDbl]>;
// Fold Add R+IFF into vector load.
- let AddedComplexity = 10 in
- def : Pat<(VTDbl (load (add IntRegs:$src2, s4_7ImmPred:$offset))),
- (V6_vL32b_ai_128B IntRegs:$src2, s4_7ImmPred:$offset)>,
- Requires<[UseHVXDbl]>;
-
- let AddedComplexity = 10 in
- def : Pat<(VTSgl (load (add IntRegs:$src2, s4_6ImmPred:$offset))),
- (V6_vL32b_ai IntRegs:$src2, s4_6ImmPred:$offset)>,
- Requires<[UseHVXSgl]>;
+ let AddedComplexity = 10 in {
+ def : Pat<(VTDbl (alignedload (add IntRegs:$src2, s4_7ImmPred:$offset))),
+ (V6_vL32b_ai_128B IntRegs:$src2, s4_7ImmPred:$offset)>,
+ Requires<[UseHVXDbl]>;
+ def : Pat<(VTDbl (unalignedload (add IntRegs:$src2, s4_7ImmPred:$offset))),
+ (V6_vL32Ub_ai_128B IntRegs:$src2, s4_7ImmPred:$offset)>,
+ Requires<[UseHVXDbl]>;
+
+ def : Pat<(VTSgl (alignedload (add IntRegs:$src2, s4_6ImmPred:$offset))),
+ (V6_vL32b_ai IntRegs:$src2, s4_6ImmPred:$offset)>,
+ Requires<[UseHVXSgl]>;
+ def : Pat<(VTSgl (unalignedload (add IntRegs:$src2, s4_6ImmPred:$offset))),
+ (V6_vL32Ub_ai IntRegs:$src2, s4_6ImmPred:$offset)>,
+ Requires<[UseHVXSgl]>;
+ }
}
defm : vL32b_ai_pats <v64i8, v128i8>;
diff --git a/lib/Target/Hexagon/HexagonInstrInfoVector.td b/lib/Target/Hexagon/HexagonInstrInfoVector.td
index 96dd5315b87f..0277d5e3c28c 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoVector.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoVector.td
@@ -35,61 +35,12 @@ multiclass bitconvert_64<ValueType a, ValueType b> {
(a DoubleRegs:$src)>;
}
-multiclass bitconvert_vec<ValueType a, ValueType b> {
- def : Pat <(b (bitconvert (a VectorRegs:$src))),
- (b VectorRegs:$src)>;
- def : Pat <(a (bitconvert (b VectorRegs:$src))),
- (a VectorRegs:$src)>;
-}
-
-multiclass bitconvert_dblvec<ValueType a, ValueType b> {
- def : Pat <(b (bitconvert (a VecDblRegs:$src))),
- (b VecDblRegs:$src)>;
- def : Pat <(a (bitconvert (b VecDblRegs:$src))),
- (a VecDblRegs:$src)>;
-}
-
-multiclass bitconvert_predvec<ValueType a, ValueType b> {
- def : Pat <(b (bitconvert (a VecPredRegs:$src))),
- (b VectorRegs:$src)>;
- def : Pat <(a (bitconvert (b VectorRegs:$src))),
- (a VecPredRegs:$src)>;
-}
-
-multiclass bitconvert_dblvec128B<ValueType a, ValueType b> {
- def : Pat <(b (bitconvert (a VecDblRegs128B:$src))),
- (b VecDblRegs128B:$src)>;
- def : Pat <(a (bitconvert (b VecDblRegs128B:$src))),
- (a VecDblRegs128B:$src)>;
-}
-
-// Bit convert vector types.
-defm : bitconvert_32<v4i8, i32>;
+// Bit convert vector types to integers.
+defm : bitconvert_32<v4i8, i32>;
defm : bitconvert_32<v2i16, i32>;
-defm : bitconvert_32<v2i16, v4i8>;
-
-defm : bitconvert_64<v8i8, i64>;
+defm : bitconvert_64<v8i8, i64>;
defm : bitconvert_64<v4i16, i64>;
defm : bitconvert_64<v2i32, i64>;
-defm : bitconvert_64<v8i8, v4i16>;
-defm : bitconvert_64<v8i8, v2i32>;
-defm : bitconvert_64<v4i16, v2i32>;
-
-defm : bitconvert_vec<v64i8, v16i32>;
-defm : bitconvert_vec<v8i64 , v16i32>;
-defm : bitconvert_vec<v32i16, v16i32>;
-
-defm : bitconvert_dblvec<v16i64, v128i8>;
-defm : bitconvert_dblvec<v32i32, v128i8>;
-defm : bitconvert_dblvec<v64i16, v128i8>;
-
-defm : bitconvert_dblvec128B<v64i32, v128i16>;
-defm : bitconvert_dblvec128B<v256i8, v128i16>;
-defm : bitconvert_dblvec128B<v32i64, v128i16>;
-
-defm : bitconvert_dblvec128B<v64i32, v256i8>;
-defm : bitconvert_dblvec128B<v32i64, v256i8>;
-defm : bitconvert_dblvec128B<v128i16, v256i8>;
// Vector shift support. Vector shifting in Hexagon is rather different
// from internal representation of LLVM.
diff --git a/lib/Target/Hexagon/HexagonIntrinsics.td b/lib/Target/Hexagon/HexagonIntrinsics.td
index b207aaf392f4..a319dd4f9789 100644
--- a/lib/Target/Hexagon/HexagonIntrinsics.td
+++ b/lib/Target/Hexagon/HexagonIntrinsics.td
@@ -23,27 +23,29 @@ class T_R_pat <InstHexagon MI, Intrinsic IntID>
class T_P_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs),
- (MI DoubleRegs:$Rs)>;
+ (MI I64:$Rs)>;
class T_II_pat <InstHexagon MI, Intrinsic IntID, PatFrag Imm1, PatFrag Imm2>
: Pat<(IntID Imm1:$Is, Imm2:$It),
(MI Imm1:$Is, Imm2:$It)>;
-class T_RI_pat <InstHexagon MI, Intrinsic IntID, PatLeaf ImmPred = PatLeaf<(i32 imm)>>
+class T_RI_pat <InstHexagon MI, Intrinsic IntID,
+ PatLeaf ImmPred = PatLeaf<(i32 imm)>>
: Pat<(IntID I32:$Rs, ImmPred:$It),
(MI I32:$Rs, ImmPred:$It)>;
-class T_IR_pat <InstHexagon MI, Intrinsic IntID, PatFrag ImmPred = PatLeaf<(i32 imm)>>
+class T_IR_pat <InstHexagon MI, Intrinsic IntID,
+ PatFrag ImmPred = PatLeaf<(i32 imm)>>
: Pat<(IntID ImmPred:$Is, I32:$Rt),
(MI ImmPred:$Is, I32:$Rt)>;
class T_PI_pat <InstHexagon MI, Intrinsic IntID>
: Pat<(IntID I64:$Rs, imm:$It),
- (MI DoubleRegs:$Rs, imm:$It)>;
+ (MI I64:$Rs, imm:$It)>;
class T_RP_pat <InstHexagon MI, Intrinsic IntID>
: Pat<(IntID I32:$Rs, I64:$Rt),
- (MI I32:$Rs, DoubleRegs:$Rt)>;
+ (MI I32:$Rs, I64:$Rt)>;
class T_RR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I32:$Rs, I32:$Rt),
@@ -51,19 +53,31 @@ class T_RR_pat <InstHexagon MI, Intrinsic IntID>
class T_PP_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, I64:$Rt),
- (MI DoubleRegs:$Rs, DoubleRegs:$Rt)>;
+ (MI I64:$Rs, I64:$Rt)>;
+
+class T_QQ_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, I32:$Rt),
+ (MI (C2_tfrrp I32:$Rs), (C2_tfrrp I32:$Rt))>;
class T_QII_pat <InstHexagon MI, Intrinsic IntID, PatFrag Imm1, PatFrag Imm2>
- : Pat <(IntID (i32 PredRegs:$Ps), Imm1:$Is, Imm2:$It),
- (MI PredRegs:$Ps, Imm1:$Is, Imm2:$It)>;
+ : Pat <(IntID I32:$Rp, Imm1:$Is, Imm2:$It),
+ (MI (C2_tfrrp I32:$Rp), Imm1:$Is, Imm2:$It)>;
+
+class T_QRR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rp, I32:$Rs, I32:$Rt),
+ (MI (C2_tfrrp I32:$Rp), I32:$Rs, I32:$Rt)>;
class T_QRI_pat <InstHexagon MI, Intrinsic IntID, PatFrag ImmPred>
- : Pat <(IntID (i32 PredRegs:$Ps), I32:$Rs, ImmPred:$Is),
- (MI PredRegs:$Ps, I32:$Rs, ImmPred:$Is)>;
+ : Pat <(IntID I32:$Rp, I32:$Rs, ImmPred:$Is),
+ (MI (C2_tfrrp I32:$Rp), I32:$Rs, ImmPred:$Is)>;
class T_QIR_pat <InstHexagon MI, Intrinsic IntID, PatFrag ImmPred>
- : Pat <(IntID (i32 PredRegs:$Ps), ImmPred:$Is, I32:$Rs),
- (MI PredRegs:$Ps, ImmPred:$Is, I32:$Rs)>;
+ : Pat <(IntID I32:$Rp, ImmPred:$Is, I32:$Rs),
+ (MI (C2_tfrrp I32:$Rp), ImmPred:$Is, I32:$Rs)>;
+
+class T_QPP_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rp, I64:$Rs, I64:$Rt),
+ (MI (C2_tfrrp I32:$Rp), I64:$Rs, I64:$Rt)>;
class T_RRI_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I32:$Rs, I32:$Rt, imm:$Iu),
@@ -91,31 +105,31 @@ class T_RRR_pat <InstHexagon MI, Intrinsic IntID>
class T_PPI_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, I64:$Rt, imm:$Iu),
- (MI DoubleRegs:$Rs, DoubleRegs:$Rt, imm:$Iu)>;
+ (MI I64:$Rs, I64:$Rt, imm:$Iu)>;
class T_PII_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, imm:$It, imm:$Iu),
- (MI DoubleRegs:$Rs, imm:$It, imm:$Iu)>;
+ (MI I64:$Rs, imm:$It, imm:$Iu)>;
class T_PPP_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, I64:$Rt, I64:$Ru),
- (MI DoubleRegs:$Rs, DoubleRegs:$Rt, DoubleRegs:$Ru)>;
+ (MI I64:$Rs, I64:$Rt, I64:$Ru)>;
class T_PPR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, I64:$Rt, I32:$Ru),
- (MI DoubleRegs:$Rs, DoubleRegs:$Rt, I32:$Ru)>;
+ (MI I64:$Rs, I64:$Rt, I32:$Ru)>;
class T_PRR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, I32:$Rt, I32:$Ru),
- (MI DoubleRegs:$Rs, I32:$Rt, I32:$Ru)>;
+ (MI I64:$Rs, I32:$Rt, I32:$Ru)>;
class T_PPQ_pat <InstHexagon MI, Intrinsic IntID>
- : Pat <(IntID I64:$Rs, I64:$Rt, (i32 PredRegs:$Ru)),
- (MI DoubleRegs:$Rs, DoubleRegs:$Rt, PredRegs:$Ru)>;
+ : Pat <(IntID I64:$Rs, I64:$Rt, I32:$Rp),
+ (MI I64:$Rs, I64:$Rt, (C2_tfrrp I32:$Rp))>;
class T_PR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I64:$Rs, I32:$Rt),
- (MI DoubleRegs:$Rs, I32:$Rt)>;
+ (MI I64:$Rs, I32:$Rt)>;
class T_D_pat <InstHexagon MI, Intrinsic IntID>
: Pat<(IntID (F64:$Rs)),
@@ -131,7 +145,7 @@ class T_F_pat <InstHexagon MI, Intrinsic IntID>
(MI F32:$Rs)>;
class T_FI_pat <InstHexagon MI, Intrinsic IntID,
- PatLeaf ImmPred = PatLeaf<(i32 imm)>>
+ PatLeaf ImmPred = PatLeaf<(i32 imm)>>
: Pat<(IntID F32:$Rs, ImmPred:$It),
(MI F32:$Rs, ImmPred:$It)>;
@@ -148,8 +162,62 @@ class T_FFF_pat <InstHexagon MI, Intrinsic IntID>
(MI F32:$Rs, F32:$Rt, F32:$Ru)>;
class T_FFFQ_pat <InstHexagon MI, Intrinsic IntID>
- : Pat <(IntID F32:$Rs, F32:$Rt, F32:$Ru, (i32 PredRegs:$Rx)),
- (MI F32:$Rs, F32:$Rt, F32:$Ru, PredRegs:$Rx)>;
+ : Pat <(IntID F32:$Rs, F32:$Rt, F32:$Ru, I32:$Rp),
+ (MI F32:$Rs, F32:$Rt, F32:$Ru, (C2_tfrrp I32:$Rp))>;
+
+class T_Q_RI_pat <InstHexagon MI, Intrinsic IntID,
+ PatLeaf ImmPred = PatLeaf<(i32 imm)>>
+ : Pat<(IntID I32:$Rs, ImmPred:$It),
+ (C2_tfrpr (MI I32:$Rs, ImmPred:$It))>;
+
+class T_Q_RR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, I32:$Rt),
+ (C2_tfrpr (MI I32:$Rs, I32:$Rt))>;
+
+class T_Q_RP_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rs, I64:$Rt),
+ (C2_tfrpr (MI I32:$Rs, I64:$Rt))>;
+
+class T_Q_PR_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I32:$Rt),
+ (C2_tfrpr (MI I64:$Rs, I32:$Rt))>;
+
+class T_Q_PI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID I64:$Rs, imm:$It),
+ (C2_tfrpr (MI I64:$Rs, imm:$It))>;
+
+class T_Q_PP_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I64:$Rs, I64:$Rt),
+ (C2_tfrpr (MI I64:$Rs, I64:$Rt))>;
+
+class T_Q_Q_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rp),
+ (C2_tfrpr (MI (C2_tfrrp I32:$Rp)))>;
+
+class T_Q_QQ_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rp, I32:$Rq),
+ (C2_tfrpr (MI (C2_tfrrp I32:$Rp), (C2_tfrrp I32:$Rq)))>;
+
+class T_Q_FF_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID F32:$Rs, F32:$Rt),
+ (C2_tfrpr (MI F32:$Rs, F32:$Rt))>;
+
+class T_Q_DD_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID F64:$Rs, F64:$Rt),
+ (C2_tfrpr (MI F64:$Rs, F64:$Rt))>;
+
+class T_Q_FI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID F32:$Rs, imm:$It),
+ (C2_tfrpr (MI F32:$Rs, imm:$It))>;
+
+class T_Q_DI_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat<(IntID F64:$Rs, imm:$It),
+ (C2_tfrpr (MI F64:$Rs, imm:$It))>;
+
+class T_Q_QQQ_pat <InstHexagon MI, Intrinsic IntID>
+ : Pat <(IntID I32:$Rp, I32:$Rq, I32:$Rs),
+ (C2_tfrpr (MI (C2_tfrrp I32:$Rp), (C2_tfrrp I32:$Rq),
+ (C2_tfrrp I32:$Rs)))>;
//===----------------------------------------------------------------------===//
// MPYS / Multipy signed/unsigned halfwords
@@ -645,9 +713,9 @@ def : T_PPR_pat <S2_lsr_r_p_or, int_hexagon_S2_lsr_r_p_or>;
def : T_PPR_pat <S2_asl_r_p_or, int_hexagon_S2_asl_r_p_or>;
def : T_PPR_pat <S2_lsl_r_p_or, int_hexagon_S2_lsl_r_p_or>;
-/********************************************************************
-* ALU32/ALU *
-*********************************************************************/
+//*******************************************************************
+// ALU32/ALU
+//*******************************************************************
def : T_RR_pat<A2_add, int_hexagon_A2_add>;
def : T_RI_pat<A2_addi, int_hexagon_A2_addi>;
def : T_RR_pat<A2_sub, int_hexagon_A2_sub>;
@@ -660,31 +728,46 @@ def : T_RR_pat<A2_xor, int_hexagon_A2_xor>;
def : T_RR_pat<A2_combinew, int_hexagon_A2_combinew>;
// Assembler mapped from Rd32=not(Rs32) to Rd32=sub(#-1,Rs32)
-def : Pat <(int_hexagon_A2_not (I32:$Rs)),
- (A2_subri -1, IntRegs:$Rs)>;
+def : Pat <(int_hexagon_A2_not I32:$Rs),
+ (A2_subri -1, I32:$Rs)>;
// Assembler mapped from Rd32=neg(Rs32) to Rd32=sub(#0,Rs32)
-def : Pat <(int_hexagon_A2_neg IntRegs:$Rs),
- (A2_subri 0, IntRegs:$Rs)>;
+def : Pat <(int_hexagon_A2_neg I32:$Rs),
+ (A2_subri 0, I32:$Rs)>;
// Transfer immediate
-def : Pat <(int_hexagon_A2_tfril (I32:$Rs), u16_0ImmPred:$Is),
- (A2_tfril IntRegs:$Rs, u16_0ImmPred:$Is)>;
-def : Pat <(int_hexagon_A2_tfrih (I32:$Rs), u16_0ImmPred:$Is),
- (A2_tfrih IntRegs:$Rs, u16_0ImmPred:$Is)>;
+def : Pat <(int_hexagon_A2_tfril I32:$Rs, u16_0ImmPred:$Is),
+ (A2_tfril I32:$Rs, u16_0ImmPred:$Is)>;
+def : Pat <(int_hexagon_A2_tfrih I32:$Rs, u16_0ImmPred:$Is),
+ (A2_tfrih I32:$Rs, u16_0ImmPred:$Is)>;
// Transfer Register/immediate.
def : T_R_pat <A2_tfr, int_hexagon_A2_tfr>;
def : T_I_pat <A2_tfrsi, int_hexagon_A2_tfrsi>;
-def : T_I_pat <A2_tfrpi, int_hexagon_A2_tfrpi>;
+
+def ImmExt64: SDNodeXForm<imm, [{
+ int64_t V = N->getSExtValue();
+ return CurDAG->getTargetConstant(V, SDLoc(N), MVT::i64);
+}]>;
+
+// A2_tfrpi has an operand of type i64. This is necessary, since it is
+// generated from "(set I64:$Rd, imm)". That pattern would not appear
+// in the DAG, if the immediate was not a 64-bit value.
+// The builtin for A2_tfrpi, on the other hand, takes a 32-bit value,
+// which makes it impossible to simply replace it with the instruction.
+// To connect the builtin with the instruction, the builtin's operand
+// needs to be extended to the right type.
+
+def : Pat<(int_hexagon_A2_tfrpi imm:$Is),
+ (A2_tfrpi (ImmExt64 $Is))>;
// Assembler mapped from Rdd32=Rss32 to Rdd32=combine(Rss.H32,Rss.L32)
-def : Pat<(int_hexagon_A2_tfrp DoubleRegs:$src),
- (A2_combinew (HiReg DoubleRegs:$src), (LoReg DoubleRegs:$src))>;
+def : Pat<(int_hexagon_A2_tfrp I64:$src),
+ (A2_combinew (HiReg I64:$src), (LoReg I64:$src))>;
-/********************************************************************
-* ALU32/PERM *
-*********************************************************************/
+//*******************************************************************
+// ALU32/PERM
+//*******************************************************************
// Combine
def: T_RR_pat<A2_combine_hh, int_hexagon_A2_combine_hh>;
def: T_RR_pat<A2_combine_hl, int_hexagon_A2_combine_hl>;
@@ -693,10 +776,8 @@ def: T_RR_pat<A2_combine_ll, int_hexagon_A2_combine_ll>;
def: T_II_pat<A2_combineii, int_hexagon_A2_combineii, s32ImmPred, s8ImmPred>;
-def: Pat<(i32 (int_hexagon_C2_mux (I32:$Rp), (I32:$Rs), (I32:$Rt))),
- (i32 (C2_mux (C2_tfrrp IntRegs:$Rp), IntRegs:$Rs, IntRegs:$Rt))>;
-
// Mux
+def : T_QRR_pat<C2_mux, int_hexagon_C2_mux>;
def : T_QRI_pat<C2_muxir, int_hexagon_C2_muxir, s32ImmPred>;
def : T_QIR_pat<C2_muxri, int_hexagon_C2_muxri, s32ImmPred>;
def : T_QII_pat<C2_muxii, int_hexagon_C2_muxii, s32ImmPred, s8ImmPred>;
@@ -712,41 +793,36 @@ def : T_R_pat<A2_sxtb, int_hexagon_A2_sxtb>;
def : T_R_pat<A2_zxth, int_hexagon_A2_zxth>;
def : T_R_pat<A2_zxtb, int_hexagon_A2_zxtb>;
-/********************************************************************
-* ALU32/PRED *
-*********************************************************************/
+//*******************************************************************
+// ALU32/PRED
+//*******************************************************************
// Compare
-def : T_RR_pat<C2_cmpeq, int_hexagon_C2_cmpeq>;
-def : T_RR_pat<C2_cmpgt, int_hexagon_C2_cmpgt>;
-def : T_RR_pat<C2_cmpgtu, int_hexagon_C2_cmpgtu>;
-
-def : T_RI_pat<C2_cmpeqi, int_hexagon_C2_cmpeqi, s32ImmPred>;
-def : T_RI_pat<C2_cmpgti, int_hexagon_C2_cmpgti, s32ImmPred>;
-def : T_RI_pat<C2_cmpgtui, int_hexagon_C2_cmpgtui, u32ImmPred>;
-
-def : Pat <(i32 (int_hexagon_C2_cmpgei (I32:$src1), s32ImmPred:$src2)),
- (i32 (C2_cmpgti (I32:$src1),
- (DEC_CONST_SIGNED s32ImmPred:$src2)))>;
-
-def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), u32ImmPred:$src2)),
- (i32 (C2_cmpgtui (I32:$src1),
- (DEC_CONST_UNSIGNED u32ImmPred:$src2)))>;
-
-// The instruction, Pd=cmp.geu(Rs, #u8) -> Pd=cmp.eq(Rs,Rs) when #u8 == 0.
-def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), 0)),
- (i32 (C2_cmpeq (I32:$src1), (I32:$src1)))>;
-
-def : Pat <(i32 (int_hexagon_C2_cmplt (I32:$src1),
- (I32:$src2))),
- (i32 (C2_cmpgt (I32:$src2), (I32:$src1)))>;
-
-def : Pat <(i32 (int_hexagon_C2_cmpltu (I32:$src1),
- (I32:$src2))),
- (i32 (C2_cmpgtu (I32:$src2), (I32:$src1)))>;
-
-/********************************************************************
-* ALU32/VH *
-*********************************************************************/
+def : T_Q_RR_pat<C2_cmpeq, int_hexagon_C2_cmpeq>;
+def : T_Q_RR_pat<C2_cmpgt, int_hexagon_C2_cmpgt>;
+def : T_Q_RR_pat<C2_cmpgtu, int_hexagon_C2_cmpgtu>;
+
+def : T_Q_RI_pat<C2_cmpeqi, int_hexagon_C2_cmpeqi, s32ImmPred>;
+def : T_Q_RI_pat<C2_cmpgti, int_hexagon_C2_cmpgti, s32ImmPred>;
+def : T_Q_RI_pat<C2_cmpgtui, int_hexagon_C2_cmpgtui, u32ImmPred>;
+
+def : Pat <(int_hexagon_C2_cmpgei I32:$src1, s32ImmPred:$src2),
+ (C2_tfrpr (C2_cmpgti I32:$src1,
+ (DEC_CONST_SIGNED s32ImmPred:$src2)))>;
+
+def : Pat <(int_hexagon_C2_cmpgeui I32:$src1, u32ImmPred:$src2),
+ (C2_tfrpr (C2_cmpgtui I32:$src1,
+ (DEC_CONST_UNSIGNED u32ImmPred:$src2)))>;
+
+def : Pat <(int_hexagon_C2_cmpgeui I32:$src, 0),
+ (C2_tfrpr (C2_cmpeq I32:$src, I32:$src))>;
+def : Pat <(int_hexagon_C2_cmplt I32:$src1, I32:$src2),
+ (C2_tfrpr (C2_cmpgt I32:$src2, I32:$src1))>;
+def : Pat <(int_hexagon_C2_cmpltu I32:$src1, I32:$src2),
+ (C2_tfrpr (C2_cmpgtu I32:$src2, I32:$src1))>;
+
+//*******************************************************************
+// ALU32/VH
+//*******************************************************************
// Vector add, subtract, average halfwords
def: T_RR_pat<A2_svaddh, int_hexagon_A2_svaddh>;
def: T_RR_pat<A2_svaddhs, int_hexagon_A2_svaddhs>;
@@ -760,28 +836,28 @@ def: T_RR_pat<A2_svavgh, int_hexagon_A2_svavgh>;
def: T_RR_pat<A2_svavghs, int_hexagon_A2_svavghs>;
def: T_RR_pat<A2_svnavgh, int_hexagon_A2_svnavgh>;
-/********************************************************************
-* ALU64/ALU *
-*********************************************************************/
-def: T_RR_pat<A2_addsat, int_hexagon_A2_addsat>;
-def: T_RR_pat<A2_subsat, int_hexagon_A2_subsat>;
-def: T_PP_pat<A2_addp, int_hexagon_A2_addp>;
-def: T_PP_pat<A2_subp, int_hexagon_A2_subp>;
+//*******************************************************************
+// ALU64/ALU
+//*******************************************************************
+def: T_RR_pat<A2_addsat, int_hexagon_A2_addsat>;
+def: T_RR_pat<A2_subsat, int_hexagon_A2_subsat>;
+def: T_PP_pat<A2_addp, int_hexagon_A2_addp>;
+def: T_PP_pat<A2_subp, int_hexagon_A2_subp>;
-def: T_PP_pat<A2_andp, int_hexagon_A2_andp>;
-def: T_PP_pat<A2_orp, int_hexagon_A2_orp>;
-def: T_PP_pat<A2_xorp, int_hexagon_A2_xorp>;
+def: T_PP_pat<A2_andp, int_hexagon_A2_andp>;
+def: T_PP_pat<A2_orp, int_hexagon_A2_orp>;
+def: T_PP_pat<A2_xorp, int_hexagon_A2_xorp>;
-def: T_PP_pat<C2_cmpeqp, int_hexagon_C2_cmpeqp>;
-def: T_PP_pat<C2_cmpgtp, int_hexagon_C2_cmpgtp>;
-def: T_PP_pat<C2_cmpgtup, int_hexagon_C2_cmpgtup>;
+def: T_Q_PP_pat<C2_cmpeqp, int_hexagon_C2_cmpeqp>;
+def: T_Q_PP_pat<C2_cmpgtp, int_hexagon_C2_cmpgtp>;
+def: T_Q_PP_pat<C2_cmpgtup, int_hexagon_C2_cmpgtup>;
-def: T_PP_pat<S2_parityp, int_hexagon_S2_parityp>;
-def: T_RR_pat<S2_packhl, int_hexagon_S2_packhl>;
+def: T_PP_pat<S2_parityp, int_hexagon_S2_parityp>;
+def: T_RR_pat<S2_packhl, int_hexagon_S2_packhl>;
-/********************************************************************
-* ALU64/VB *
-*********************************************************************/
+//*******************************************************************
+// ALU64/VB
+//*******************************************************************
// ALU64 - Vector add
def : T_PP_pat <A2_vaddub, int_hexagon_A2_vaddub>;
def : T_PP_pat <A2_vaddubs, int_hexagon_A2_vaddubs>;
@@ -838,23 +914,22 @@ def : T_PP_pat <A2_vsubw, int_hexagon_A2_vsubw>;
def : T_PP_pat <A2_vsubws, int_hexagon_A2_vsubws>;
// ALU64 - Vector compare bytes
-def : T_PP_pat <A2_vcmpbeq, int_hexagon_A2_vcmpbeq>;
-def : T_PP_pat <A4_vcmpbgt, int_hexagon_A4_vcmpbgt>;
-def : T_PP_pat <A2_vcmpbgtu, int_hexagon_A2_vcmpbgtu>;
+def : T_Q_PP_pat <A2_vcmpbeq, int_hexagon_A2_vcmpbeq>;
+def : T_Q_PP_pat <A4_vcmpbgt, int_hexagon_A4_vcmpbgt>;
+def : T_Q_PP_pat <A2_vcmpbgtu, int_hexagon_A2_vcmpbgtu>;
// ALU64 - Vector compare halfwords
-def : T_PP_pat <A2_vcmpheq, int_hexagon_A2_vcmpheq>;
-def : T_PP_pat <A2_vcmphgt, int_hexagon_A2_vcmphgt>;
-def : T_PP_pat <A2_vcmphgtu, int_hexagon_A2_vcmphgtu>;
+def : T_Q_PP_pat <A2_vcmpheq, int_hexagon_A2_vcmpheq>;
+def : T_Q_PP_pat <A2_vcmphgt, int_hexagon_A2_vcmphgt>;
+def : T_Q_PP_pat <A2_vcmphgtu, int_hexagon_A2_vcmphgtu>;
// ALU64 - Vector compare words
-def : T_PP_pat <A2_vcmpweq, int_hexagon_A2_vcmpweq>;
-def : T_PP_pat <A2_vcmpwgt, int_hexagon_A2_vcmpwgt>;
-def : T_PP_pat <A2_vcmpwgtu, int_hexagon_A2_vcmpwgtu>;
+def : T_Q_PP_pat <A2_vcmpweq, int_hexagon_A2_vcmpweq>;
+def : T_Q_PP_pat <A2_vcmpwgt, int_hexagon_A2_vcmpwgt>;
+def : T_Q_PP_pat <A2_vcmpwgtu, int_hexagon_A2_vcmpwgtu>;
// ALU64 / VB / Vector mux.
-def : Pat<(int_hexagon_C2_vmux PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt),
- (C2_vmux PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt)>;
+def : T_QPP_pat <C2_vmux, int_hexagon_C2_vmux>;
// MPY - Multiply and use full result
// Rdd = mpy[u](Rs, Rt)
@@ -903,35 +978,24 @@ def : T_PRR_pat <M2_vmac2, int_hexagon_M2_vmac2>;
def : T_PRR_pat <M2_vmac2s_s0, int_hexagon_M2_vmac2s_s0>;
def : T_PRR_pat <M2_vmac2s_s1, int_hexagon_M2_vmac2s_s1>;
-/********************************************************************
-* CR *
-*********************************************************************/
-class qi_CRInst_qi_pat<InstHexagon Inst, Intrinsic IntID> :
- Pat<(i32 (IntID IntRegs:$Rs)),
- (i32 (C2_tfrpr (Inst (C2_tfrrp IntRegs:$Rs))))>;
-
-class qi_CRInst_qiqi_pat<InstHexagon Inst, Intrinsic IntID> :
- Pat<(i32 (IntID IntRegs:$Rs, IntRegs:$Rt)),
- (i32 (C2_tfrpr (Inst (C2_tfrrp IntRegs:$Rs), (C2_tfrrp IntRegs:$Rt))))>;
-
-def: qi_CRInst_qi_pat<C2_not, int_hexagon_C2_not>;
-def: qi_CRInst_qi_pat<C2_all8, int_hexagon_C2_all8>;
-def: qi_CRInst_qi_pat<C2_any8, int_hexagon_C2_any8>;
+//*******************************************************************
+// CR
+//*******************************************************************
+def: T_Q_Q_pat<C2_not, int_hexagon_C2_not>;
+def: T_Q_Q_pat<C2_all8, int_hexagon_C2_all8>;
+def: T_Q_Q_pat<C2_any8, int_hexagon_C2_any8>;
+def: T_Q_Q_pat<C2_pxfer_map, int_hexagon_C2_pxfer_map>;
-def: qi_CRInst_qiqi_pat<C2_and, int_hexagon_C2_and>;
-def: qi_CRInst_qiqi_pat<C2_andn, int_hexagon_C2_andn>;
-def: qi_CRInst_qiqi_pat<C2_or, int_hexagon_C2_or>;
-def: qi_CRInst_qiqi_pat<C2_orn, int_hexagon_C2_orn>;
-def: qi_CRInst_qiqi_pat<C2_xor, int_hexagon_C2_xor>;
-
-// Assembler mapped from Pd4=Ps4 to Pd4=or(Ps4,Ps4)
-def : Pat<(int_hexagon_C2_pxfer_map PredRegs:$src),
- (C2_pxfer_map PredRegs:$src)>;
+def: T_Q_QQ_pat<C2_and, int_hexagon_C2_and>;
+def: T_Q_QQ_pat<C2_andn, int_hexagon_C2_andn>;
+def: T_Q_QQ_pat<C2_or, int_hexagon_C2_or>;
+def: T_Q_QQ_pat<C2_orn, int_hexagon_C2_orn>;
+def: T_Q_QQ_pat<C2_xor, int_hexagon_C2_xor>;
// Multiply 32x32 and use lower result
def : T_RRI_pat <M2_macsip, int_hexagon_M2_macsip>;
def : T_RRI_pat <M2_macsin, int_hexagon_M2_macsin>;
-def : T_RRR_pat <M2_maci, int_hexagon_M2_maci>;
+def : T_RRR_pat <M2_maci, int_hexagon_M2_maci>;
// Subtract and accumulate
def : T_RRR_pat <M2_subacc, int_hexagon_M2_subacc>;
@@ -945,54 +1009,45 @@ def : T_RRI_pat <M2_naccii, int_hexagon_M2_naccii>;
// XOR and XOR with destination
def : T_RRR_pat <M2_xor_xacc, int_hexagon_M2_xor_xacc>;
-class MType_R32_pat <Intrinsic IntID, InstHexagon OutputInst> :
- Pat <(IntID IntRegs:$src1, IntRegs:$src2),
- (OutputInst IntRegs:$src1, IntRegs:$src2)>;
-
// Vector dual multiply with round and pack
-
-def : Pat <(int_hexagon_M2_vdmpyrs_s0 DoubleRegs:$src1, DoubleRegs:$src2),
- (M2_vdmpyrs_s0 DoubleRegs:$src1, DoubleRegs:$src2)>;
-
-def : Pat <(int_hexagon_M2_vdmpyrs_s1 DoubleRegs:$src1, DoubleRegs:$src2),
- (M2_vdmpyrs_s1 DoubleRegs:$src1, DoubleRegs:$src2)>;
+def : T_PP_pat <M2_vdmpyrs_s0, int_hexagon_M2_vdmpyrs_s0>;
+def : T_PP_pat <M2_vdmpyrs_s1, int_hexagon_M2_vdmpyrs_s1>;
// Vector multiply halfwords with round and pack
-
-def : MType_R32_pat <int_hexagon_M2_vmpy2s_s0pack, M2_vmpy2s_s0pack>;
-def : MType_R32_pat <int_hexagon_M2_vmpy2s_s1pack, M2_vmpy2s_s1pack>;
+def : T_RR_pat <M2_vmpy2s_s0pack, int_hexagon_M2_vmpy2s_s0pack>;
+def : T_RR_pat <M2_vmpy2s_s1pack, int_hexagon_M2_vmpy2s_s1pack>;
// Multiply and use lower result
-def : MType_R32_pat <int_hexagon_M2_mpyi, M2_mpyi>;
-def : T_RI_pat<M2_mpysmi, int_hexagon_M2_mpysmi>;
+def : T_RR_pat <M2_mpyi, int_hexagon_M2_mpyi>;
+def : T_RI_pat <M2_mpysmi, int_hexagon_M2_mpysmi>;
// Assembler mapped from Rd32=mpyui(Rs32,Rt32) to Rd32=mpyi(Rs32,Rt32)
-def : MType_R32_pat <int_hexagon_M2_mpyui, M2_mpyi>;
+def : T_RR_pat <M2_mpyi, int_hexagon_M2_mpyui>;
// Multiply and use upper result
-def : MType_R32_pat <int_hexagon_M2_mpy_up, M2_mpy_up>;
-def : MType_R32_pat <int_hexagon_M2_mpyu_up, M2_mpyu_up>;
-def : MType_R32_pat <int_hexagon_M2_hmmpyh_rs1, M2_hmmpyh_rs1>;
-def : MType_R32_pat <int_hexagon_M2_hmmpyl_rs1, M2_hmmpyl_rs1>;
-def : MType_R32_pat <int_hexagon_M2_dpmpyss_rnd_s0, M2_dpmpyss_rnd_s0>;
+def : T_RR_pat <M2_mpy_up, int_hexagon_M2_mpy_up>;
+def : T_RR_pat <M2_mpyu_up, int_hexagon_M2_mpyu_up>;
+def : T_RR_pat <M2_hmmpyh_rs1, int_hexagon_M2_hmmpyh_rs1>;
+def : T_RR_pat <M2_hmmpyl_rs1, int_hexagon_M2_hmmpyl_rs1>;
+def : T_RR_pat <M2_dpmpyss_rnd_s0, int_hexagon_M2_dpmpyss_rnd_s0>;
// Complex multiply with round and pack
// Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat
-def : MType_R32_pat <int_hexagon_M2_cmpyrs_s0, M2_cmpyrs_s0>;
-def : MType_R32_pat <int_hexagon_M2_cmpyrs_s1, M2_cmpyrs_s1>;
-def : MType_R32_pat <int_hexagon_M2_cmpyrsc_s0, M2_cmpyrsc_s0>;
-def : MType_R32_pat <int_hexagon_M2_cmpyrsc_s1, M2_cmpyrsc_s1>;
-
-/********************************************************************
-* STYPE/ALU *
-*********************************************************************/
+def : T_RR_pat <M2_cmpyrs_s0, int_hexagon_M2_cmpyrs_s0>;
+def : T_RR_pat <M2_cmpyrs_s1, int_hexagon_M2_cmpyrs_s1>;
+def : T_RR_pat <M2_cmpyrsc_s0, int_hexagon_M2_cmpyrsc_s0>;
+def : T_RR_pat <M2_cmpyrsc_s1, int_hexagon_M2_cmpyrsc_s1>;
+
+//*******************************************************************
+// STYPE/ALU
+//*******************************************************************
def : T_P_pat <A2_absp, int_hexagon_A2_absp>;
def : T_P_pat <A2_negp, int_hexagon_A2_negp>;
def : T_P_pat <A2_notp, int_hexagon_A2_notp>;
-/********************************************************************
-* STYPE/BIT *
-*********************************************************************/
+//*******************************************************************
+// STYPE/BIT
+//*******************************************************************
// Count leading/trailing
def: T_R_pat<S2_cl0, int_hexagon_S2_cl0>;
@@ -1023,6 +1078,11 @@ def : T_PP_pat <S2_vtrunowh, int_hexagon_S2_vtrunowh>;
// Linear feedback-shift Iteration.
def : T_PP_pat <S2_lfsp, int_hexagon_S2_lfsp>;
+// Vector align
+// Need custom lowering
+def : T_PPQ_pat <S2_valignrb, int_hexagon_S2_valignrb>;
+def : T_PPI_pat <S2_valignib, int_hexagon_S2_valignib>;
+
// Vector splice
def : T_PPQ_pat <S2_vsplicerb, int_hexagon_S2_vsplicerb>;
def : T_PPI_pat <S2_vspliceib, int_hexagon_S2_vspliceib>;
@@ -1037,26 +1097,22 @@ def : T_RP_pat <S2_extractu_rp, int_hexagon_S2_extractu_rp>;
def : T_PP_pat <S2_extractup_rp, int_hexagon_S2_extractup_rp>;
// Insert bitfield
-def : Pat <(int_hexagon_S2_insert_rp IntRegs:$src1, IntRegs:$src2,
- DoubleRegs:$src3),
- (S2_insert_rp IntRegs:$src1, IntRegs:$src2, DoubleRegs:$src3)>;
+def : Pat <(int_hexagon_S2_insert_rp I32:$src1, I32:$src2, I64:$src3),
+ (S2_insert_rp I32:$src1, I32:$src2, I64:$src3)>;
-def : Pat<(i64 (int_hexagon_S2_insertp_rp (I64:$src1),
- (I64:$src2), (I64:$src3))),
- (i64 (S2_insertp_rp (I64:$src1), (I64:$src2),
- (I64:$src3)))>;
+def : Pat<(i64 (int_hexagon_S2_insertp_rp I64:$src1, I64:$src2, I64:$src3)),
+ (i64 (S2_insertp_rp I64:$src1, I64:$src2, I64:$src3))>;
-def : Pat<(int_hexagon_S2_insert IntRegs:$src1, IntRegs:$src2,
+def : Pat<(int_hexagon_S2_insert I32:$src1, I32:$src2,
u5ImmPred:$src3, u5ImmPred:$src4),
- (S2_insert IntRegs:$src1, IntRegs:$src2,
+ (S2_insert I32:$src1, I32:$src2,
u5ImmPred:$src3, u5ImmPred:$src4)>;
-def : Pat<(i64 (int_hexagon_S2_insertp (I64:$src1),
- (I64:$src2), u6ImmPred:$src3, u6ImmPred:$src4)),
- (i64 (S2_insertp (I64:$src1), (I64:$src2),
+def : Pat<(i64 (int_hexagon_S2_insertp I64:$src1, I64:$src2,
+ u6ImmPred:$src3, u6ImmPred:$src4)),
+ (i64 (S2_insertp I64:$src1, I64:$src2,
u6ImmPred:$src3, u6ImmPred:$src4))>;
-
// Innterleave/deinterleave
def : T_P_pat <S2_interleave, int_hexagon_S2_interleave>;
def : T_P_pat <S2_deinterleave, int_hexagon_S2_deinterleave>;
@@ -1071,21 +1127,21 @@ def: T_RR_pat<S2_clrbit_r, int_hexagon_S2_clrbit_r>;
def: T_RR_pat<S2_togglebit_r, int_hexagon_S2_togglebit_r>;
// Test Bit
-def: T_RI_pat<S2_tstbit_i, int_hexagon_S2_tstbit_i>;
-def: T_RR_pat<S2_tstbit_r, int_hexagon_S2_tstbit_r>;
+def: T_Q_RI_pat<S2_tstbit_i, int_hexagon_S2_tstbit_i>;
+def: T_Q_RR_pat<S2_tstbit_r, int_hexagon_S2_tstbit_r>;
-/********************************************************************
-* STYPE/COMPLEX *
-*********************************************************************/
+//*******************************************************************
+// STYPE/COMPLEX
+//*******************************************************************
// Vector Complex conjugate
def : T_P_pat <A2_vconj, int_hexagon_A2_vconj>;
// Vector Complex rotate
def : T_PR_pat <S2_vcrotate, int_hexagon_S2_vcrotate>;
-/********************************************************************
-* STYPE/PERM *
-*********************************************************************/
+//*******************************************************************
+// STYPE/PERM
+//*******************************************************************
// Vector saturate without pack
def : T_P_pat <S2_vsathb_nopack, int_hexagon_S2_vsathb_nopack>;
@@ -1093,28 +1149,26 @@ def : T_P_pat <S2_vsathub_nopack, int_hexagon_S2_vsathub_nopack>;
def : T_P_pat <S2_vsatwh_nopack, int_hexagon_S2_vsatwh_nopack>;
def : T_P_pat <S2_vsatwuh_nopack, int_hexagon_S2_vsatwuh_nopack>;
-/********************************************************************
-* STYPE/PRED *
-*********************************************************************/
+//*******************************************************************
+// STYPE/PRED
+//*******************************************************************
// Predicate transfer
-def: Pat<(i32 (int_hexagon_C2_tfrpr (I32:$Rs))),
- (i32 (C2_tfrpr (C2_tfrrp (I32:$Rs))))>;
-def: Pat<(i32 (int_hexagon_C2_tfrrp (I32:$Rs))),
- (i32 (C2_tfrpr (C2_tfrrp (I32:$Rs))))>;
+def: Pat<(i32 (int_hexagon_C2_tfrpr I32:$Rs)),
+ (i32 (C2_tfrpr (C2_tfrrp I32:$Rs)))>;
+def: Pat<(i32 (int_hexagon_C2_tfrrp I32:$Rs)),
+ (i32 (C2_tfrpr (C2_tfrrp I32:$Rs)))>;
// Mask generate from predicate
-def: Pat<(i64 (int_hexagon_C2_mask (I32:$Rs))),
- (i64 (C2_mask (C2_tfrrp (I32:$Rs))))>;
+def: Pat<(i64 (int_hexagon_C2_mask I32:$Rs)),
+ (i64 (C2_mask (C2_tfrrp I32:$Rs)))>;
// Viterbi pack even and odd predicate bits
-def: Pat<(i32 (int_hexagon_C2_vitpack (I32:$Rs), (I32:$Rt))),
- (i32 (C2_vitpack (C2_tfrrp (I32:$Rs)),
- (C2_tfrrp (I32:$Rt))))>;
+def: T_QQ_pat<C2_vitpack, int_hexagon_C2_vitpack>;
-/********************************************************************
-* STYPE/SHIFT *
-*********************************************************************/
+//*******************************************************************
+// STYPE/SHIFT
+//*******************************************************************
def : T_PI_pat <S2_asr_i_p, int_hexagon_S2_asr_i_p>;
def : T_PI_pat <S2_lsr_i_p, int_hexagon_S2_lsr_i_p>;
@@ -1185,8 +1239,8 @@ def : T_RI_pat <S2_asl_i_r_sat, int_hexagon_S2_asl_i_r_sat>;
//===----------------------------------------------------------------------===//
class S2op_tableidx_pat <Intrinsic IntID, InstHexagon OutputInst,
SDNodeXForm XformImm>
- : Pat <(IntID IntRegs:$src1, IntRegs:$src2, u4ImmPred:$src3, u5ImmPred:$src4),
- (OutputInst IntRegs:$src1, IntRegs:$src2, u4ImmPred:$src3,
+ : Pat <(IntID I32:$src1, I32:$src2, u4ImmPred:$src3, u5ImmPred:$src4),
+ (OutputInst I32:$src1, I32:$src2, u4ImmPred:$src3,
(XformImm u5ImmPred:$src4))>;
@@ -1195,9 +1249,9 @@ class S2op_tableidx_pat <Intrinsic IntID, InstHexagon OutputInst,
// values from the 4th input operand. Please note that subtraction is not
// needed for int_hexagon_S2_tableidxb_goodsyntax.
-def : Pat <(int_hexagon_S2_tableidxb_goodsyntax IntRegs:$src1, IntRegs:$src2,
+def : Pat <(int_hexagon_S2_tableidxb_goodsyntax I32:$src1, I32:$src2,
u4ImmPred:$src3, u5ImmPred:$src4),
- (S2_tableidxb IntRegs:$src1, IntRegs:$src2,
+ (S2_tableidxb I32:$src1, I32:$src2,
u4ImmPred:$src3, u5ImmPred:$src4)>;
def : S2op_tableidx_pat <int_hexagon_S2_tableidxh_goodsyntax, S2_tableidxh,
@@ -1207,9 +1261,9 @@ def : S2op_tableidx_pat <int_hexagon_S2_tableidxw_goodsyntax, S2_tableidxw,
def : S2op_tableidx_pat <int_hexagon_S2_tableidxd_goodsyntax, S2_tableidxd,
DEC3_CONST_SIGNED>;
-/********************************************************************
-* STYPE/VH *
-*********************************************************************/
+//*******************************************************************
+// STYPE/VH
+//*******************************************************************
// Vector absolute value halfwords with and without saturation
// Rdd64=vabsh(Rss64)[:sat]
@@ -1229,9 +1283,9 @@ def : T_PR_pat <S2_lsr_r_vh, int_hexagon_S2_lsr_r_vh>;
def : T_PR_pat <S2_asl_r_vh, int_hexagon_S2_asl_r_vh>;
def : T_PR_pat <S2_lsl_r_vh, int_hexagon_S2_lsl_r_vh>;
-/********************************************************************
-* STYPE/VW *
-*********************************************************************/
+//*******************************************************************
+// STYPE/VW
+//*******************************************************************
// Vector absolute value words with and without saturation
def : T_P_pat <A2_vabsw, int_hexagon_A2_vabsw>;
@@ -1251,43 +1305,42 @@ def : T_PR_pat <S2_asl_r_vw, int_hexagon_S2_asl_r_vw>;
def : T_PR_pat <S2_lsl_r_vw, int_hexagon_S2_lsl_r_vw>;
// Vector shift words with truncate and pack
-
def : T_PR_pat <S2_asr_r_svw_trun, int_hexagon_S2_asr_r_svw_trun>;
+// Load/store locked.
def : T_R_pat<L2_loadw_locked, int_hexagon_L2_loadw_locked>;
def : T_R_pat<L4_loadd_locked, int_hexagon_L4_loadd_locked>;
-def: Pat<(i32 (int_hexagon_S2_storew_locked (I32:$Rs), (I32:$Rt))),
- (i32 (C2_tfrpr (S2_storew_locked (I32:$Rs), (I32:$Rt))))>;
-def: Pat<(i32 (int_hexagon_S4_stored_locked (I32:$Rs), (I64:$Rt))),
- (i32 (C2_tfrpr (S4_stored_locked (I32:$Rs), (I64:$Rt))))>;
+def : Pat<(int_hexagon_S2_storew_locked I32:$Rs, I32:$Rt),
+ (C2_tfrpr (S2_storew_locked I32:$Rs, I32:$Rt))>;
+def : Pat<(int_hexagon_S4_stored_locked I32:$Rs, I64:$Rt),
+ (C2_tfrpr (S4_stored_locked I32:$Rs, I64:$Rt))>;
-/********************************************************************
-* ST
-*********************************************************************/
+//*******************************************************************
+// ST
+//*******************************************************************
class T_stb_pat <InstHexagon MI, Intrinsic IntID, PatLeaf Val>
: Pat<(IntID I32:$Rs, Val:$Rt, I32:$Ru),
- (MI I32:$Rs, Val:$Rt, I32:$Ru)>;
+ (MI I32:$Rs, I32:$Ru, Val:$Rt)>;
-def : T_stb_pat <S2_storerh_pbr_pseudo, int_hexagon_brev_sth, I32>;
-def : T_stb_pat <S2_storerb_pbr_pseudo, int_hexagon_brev_stb, I32>;
-def : T_stb_pat <S2_storeri_pbr_pseudo, int_hexagon_brev_stw, I32>;
-def : T_stb_pat <S2_storerf_pbr_pseudo, int_hexagon_brev_sthhi, I32>;
-def : T_stb_pat <S2_storerd_pbr_pseudo, int_hexagon_brev_std, I64>;
+def : T_stb_pat <S2_storerh_pbr, int_hexagon_brev_sth, I32>;
+def : T_stb_pat <S2_storerb_pbr, int_hexagon_brev_stb, I32>;
+def : T_stb_pat <S2_storeri_pbr, int_hexagon_brev_stw, I32>;
+def : T_stb_pat <S2_storerf_pbr, int_hexagon_brev_sthhi, I32>;
+def : T_stb_pat <S2_storerd_pbr, int_hexagon_brev_std, I64>;
class T_stc_pat <InstHexagon MI, Intrinsic IntID, PatLeaf Imm, PatLeaf Val>
: Pat<(IntID I32:$Rs, Val:$Rt, I32:$Ru, Imm:$s),
- (MI I32:$Rs, Val:$Rt, I32:$Ru, Imm:$s)>;
+ (MI I32:$Rs, Imm:$s, I32:$Ru, Val:$Rt)>;
-def: T_stc_pat<S2_storerb_pci_pseudo, int_hexagon_circ_stb, s4_0ImmPred, I32>;
-def: T_stc_pat<S2_storerh_pci_pseudo, int_hexagon_circ_sth, s4_1ImmPred, I32>;
-def: T_stc_pat<S2_storeri_pci_pseudo, int_hexagon_circ_stw, s4_2ImmPred, I32>;
-def: T_stc_pat<S2_storerd_pci_pseudo, int_hexagon_circ_std, s4_3ImmPred, I64>;
-def: T_stc_pat<S2_storerf_pci_pseudo, int_hexagon_circ_sthhi, s4_1ImmPred, I32>;
+def: T_stc_pat<S2_storerb_pci, int_hexagon_circ_stb, s4_0ImmPred, I32>;
+def: T_stc_pat<S2_storerh_pci, int_hexagon_circ_sth, s4_1ImmPred, I32>;
+def: T_stc_pat<S2_storeri_pci, int_hexagon_circ_stw, s4_2ImmPred, I32>;
+def: T_stc_pat<S2_storerd_pci, int_hexagon_circ_std, s4_3ImmPred, I64>;
+def: T_stc_pat<S2_storerf_pci, int_hexagon_circ_sthhi, s4_1ImmPred, I32>;
include "HexagonIntrinsicsV3.td"
include "HexagonIntrinsicsV4.td"
include "HexagonIntrinsicsV5.td"
include "HexagonIntrinsicsV60.td"
-
diff --git a/lib/Target/Hexagon/HexagonIntrinsicsV4.td b/lib/Target/Hexagon/HexagonIntrinsicsV4.td
index c80a188d82e7..578973db1933 100644
--- a/lib/Target/Hexagon/HexagonIntrinsicsV4.td
+++ b/lib/Target/Hexagon/HexagonIntrinsicsV4.td
@@ -60,71 +60,60 @@ def : T_PPR_pat <S2_lsr_r_p_xor, int_hexagon_S2_lsr_r_p_xor>;
def : T_PPR_pat <S2_lsl_r_p_xor, int_hexagon_S2_lsl_r_p_xor>;
// Multiply and use upper result
-def : MType_R32_pat <int_hexagon_M2_mpysu_up, M2_mpysu_up>;
-def : MType_R32_pat <int_hexagon_M2_mpy_up_s1, M2_mpy_up_s1>;
-def : MType_R32_pat <int_hexagon_M2_hmmpyh_s1, M2_hmmpyh_s1>;
-def : MType_R32_pat <int_hexagon_M2_hmmpyl_s1, M2_hmmpyl_s1>;
-def : MType_R32_pat <int_hexagon_M2_mpy_up_s1_sat, M2_mpy_up_s1_sat>;
+def : T_RR_pat <M2_mpysu_up, int_hexagon_M2_mpysu_up>;
+def : T_RR_pat <M2_mpy_up_s1, int_hexagon_M2_mpy_up_s1>;
+def : T_RR_pat <M2_hmmpyh_s1, int_hexagon_M2_hmmpyh_s1>;
+def : T_RR_pat <M2_hmmpyl_s1, int_hexagon_M2_hmmpyl_s1>;
+def : T_RR_pat <M2_mpy_up_s1_sat, int_hexagon_M2_mpy_up_s1_sat>;
-// Vector reduce add unsigned halfwords
-def : Pat <(int_hexagon_M2_vraddh DoubleRegs:$src1, DoubleRegs:$src2),
- (M2_vraddh DoubleRegs:$src1, DoubleRegs:$src2)>;
-
-def : T_P_pat <S2_brevp, int_hexagon_S2_brevp>;
-
-def: T_P_pat <S2_ct0p, int_hexagon_S2_ct0p>;
-def: T_P_pat <S2_ct1p, int_hexagon_S2_ct1p>;
-def: T_RR_pat<C4_nbitsset, int_hexagon_C4_nbitsset>;
-def: T_RR_pat<C4_nbitsclr, int_hexagon_C4_nbitsclr>;
-def: T_RI_pat<C4_nbitsclri, int_hexagon_C4_nbitsclri>;
-
-
-class vcmpImm_pat <InstHexagon MI, Intrinsic IntID, PatLeaf immPred> :
- Pat <(IntID (i64 DoubleRegs:$src1), immPred:$src2),
- (MI (i64 DoubleRegs:$src1), immPred:$src2)>;
-
-def : vcmpImm_pat <A4_vcmpbeqi, int_hexagon_A4_vcmpbeqi, u8ImmPred>;
-def : vcmpImm_pat <A4_vcmpbgti, int_hexagon_A4_vcmpbgti, s8ImmPred>;
-def : vcmpImm_pat <A4_vcmpbgtui, int_hexagon_A4_vcmpbgtui, u7ImmPred>;
-
-def : vcmpImm_pat <A4_vcmpheqi, int_hexagon_A4_vcmpheqi, s8ImmPred>;
-def : vcmpImm_pat <A4_vcmphgti, int_hexagon_A4_vcmphgti, s8ImmPred>;
-def : vcmpImm_pat <A4_vcmphgtui, int_hexagon_A4_vcmphgtui, u7ImmPred>;
-
-def : vcmpImm_pat <A4_vcmpweqi, int_hexagon_A4_vcmpweqi, s8ImmPred>;
-def : vcmpImm_pat <A4_vcmpwgti, int_hexagon_A4_vcmpwgti, s8ImmPred>;
-def : vcmpImm_pat <A4_vcmpwgtui, int_hexagon_A4_vcmpwgtui, u7ImmPred>;
-
-def : T_PP_pat<A4_vcmpbeq_any, int_hexagon_A4_vcmpbeq_any>;
+def : T_PP_pat <A2_vaddub, int_hexagon_A2_vaddb_map>;
+def : T_PP_pat <A2_vsubub, int_hexagon_A2_vsubb_map>;
-def : T_RR_pat<A4_cmpbeq, int_hexagon_A4_cmpbeq>;
-def : T_RR_pat<A4_cmpbgt, int_hexagon_A4_cmpbgt>;
-def : T_RR_pat<A4_cmpbgtu, int_hexagon_A4_cmpbgtu>;
-def : T_RR_pat<A4_cmpheq, int_hexagon_A4_cmpheq>;
-def : T_RR_pat<A4_cmphgt, int_hexagon_A4_cmphgt>;
-def : T_RR_pat<A4_cmphgtu, int_hexagon_A4_cmphgtu>;
-
-def : T_RI_pat<A4_cmpbeqi, int_hexagon_A4_cmpbeqi>;
-def : T_RI_pat<A4_cmpbgti, int_hexagon_A4_cmpbgti>;
-def : T_RI_pat<A4_cmpbgtui, int_hexagon_A4_cmpbgtui>;
-
-def : T_RI_pat<A4_cmpheqi, int_hexagon_A4_cmpheqi>;
-def : T_RI_pat<A4_cmphgti, int_hexagon_A4_cmphgti>;
-def : T_RI_pat<A4_cmphgtui, int_hexagon_A4_cmphgtui>;
-
-def : T_RP_pat <A4_boundscheck, int_hexagon_A4_boundscheck>;
-
-def : T_PR_pat<A4_tlbmatch, int_hexagon_A4_tlbmatch>;
-
-def : Pat <(int_hexagon_M4_mpyrr_addr IntRegs:$src1, IntRegs:$src2,
- IntRegs:$src3),
- (M4_mpyrr_addr IntRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
-
-def : T_IRR_pat <M4_mpyrr_addi, int_hexagon_M4_mpyrr_addi>;
-def : T_IRI_pat <M4_mpyri_addi, int_hexagon_M4_mpyri_addi>;
+// Vector reduce add unsigned halfwords
+def : T_PP_pat <M2_vraddh, int_hexagon_M2_vraddh>;
+
+def: T_P_pat<S2_brevp, int_hexagon_S2_brevp>;
+def: T_P_pat<S2_ct0p, int_hexagon_S2_ct0p>;
+def: T_P_pat<S2_ct1p, int_hexagon_S2_ct1p>;
+
+def: T_Q_RR_pat<C4_nbitsset, int_hexagon_C4_nbitsset>;
+def: T_Q_RR_pat<C4_nbitsclr, int_hexagon_C4_nbitsclr>;
+def: T_Q_RI_pat<C4_nbitsclri, int_hexagon_C4_nbitsclri>;
+
+def : T_Q_PI_pat<A4_vcmpbeqi, int_hexagon_A4_vcmpbeqi>;
+def : T_Q_PI_pat<A4_vcmpbgti, int_hexagon_A4_vcmpbgti>;
+def : T_Q_PI_pat<A4_vcmpbgtui, int_hexagon_A4_vcmpbgtui>;
+def : T_Q_PI_pat<A4_vcmpheqi, int_hexagon_A4_vcmpheqi>;
+def : T_Q_PI_pat<A4_vcmphgti, int_hexagon_A4_vcmphgti>;
+def : T_Q_PI_pat<A4_vcmphgtui, int_hexagon_A4_vcmphgtui>;
+def : T_Q_PI_pat<A4_vcmpweqi, int_hexagon_A4_vcmpweqi>;
+def : T_Q_PI_pat<A4_vcmpwgti, int_hexagon_A4_vcmpwgti>;
+def : T_Q_PI_pat<A4_vcmpwgtui, int_hexagon_A4_vcmpwgtui>;
+def : T_Q_PP_pat<A4_vcmpbeq_any, int_hexagon_A4_vcmpbeq_any>;
+
+def : T_Q_RR_pat<A4_cmpbeq, int_hexagon_A4_cmpbeq>;
+def : T_Q_RR_pat<A4_cmpbgt, int_hexagon_A4_cmpbgt>;
+def : T_Q_RR_pat<A4_cmpbgtu, int_hexagon_A4_cmpbgtu>;
+def : T_Q_RR_pat<A4_cmpheq, int_hexagon_A4_cmpheq>;
+def : T_Q_RR_pat<A4_cmphgt, int_hexagon_A4_cmphgt>;
+def : T_Q_RR_pat<A4_cmphgtu, int_hexagon_A4_cmphgtu>;
+
+def : T_Q_RI_pat<A4_cmpbeqi, int_hexagon_A4_cmpbeqi>;
+def : T_Q_RI_pat<A4_cmpbgti, int_hexagon_A4_cmpbgti>;
+def : T_Q_RI_pat<A4_cmpbgtui, int_hexagon_A4_cmpbgtui>;
+
+def : T_Q_RI_pat<A4_cmpheqi, int_hexagon_A4_cmpheqi>;
+def : T_Q_RI_pat<A4_cmphgti, int_hexagon_A4_cmphgti>;
+def : T_Q_RI_pat<A4_cmphgtui, int_hexagon_A4_cmphgtui>;
+
+def : T_Q_RP_pat<A4_boundscheck, int_hexagon_A4_boundscheck>;
+def : T_Q_PR_pat<A4_tlbmatch, int_hexagon_A4_tlbmatch>;
+
+def : T_RRR_pat <M4_mpyrr_addr, int_hexagon_M4_mpyrr_addr>;
+def : T_IRR_pat <M4_mpyrr_addi, int_hexagon_M4_mpyrr_addi>;
+def : T_IRI_pat <M4_mpyri_addi, int_hexagon_M4_mpyri_addi>;
def : T_RIR_pat <M4_mpyri_addr_u2, int_hexagon_M4_mpyri_addr_u2>;
-def : T_RRI_pat <M4_mpyri_addr, int_hexagon_M4_mpyri_addr>;
-// Multiply 32x32 and use upper result
+def : T_RRI_pat <M4_mpyri_addr, int_hexagon_M4_mpyri_addr>;
def : T_RRR_pat <M4_mac_up_s1_sat, int_hexagon_M4_mac_up_s1_sat>;
def : T_RRR_pat <M4_nac_up_s1_sat, int_hexagon_M4_nac_up_s1_sat>;
@@ -210,41 +199,46 @@ def : T_IRI_pat <S4_subi_lsr_ri, int_hexagon_S4_subi_lsr_ri>;
// Split bitfield
def : T_RI_pat <A4_bitspliti, int_hexagon_A4_bitspliti>;
-def : T_RR_pat <A4_bitsplit, int_hexagon_A4_bitsplit>;
+def : T_RR_pat <A4_bitsplit, int_hexagon_A4_bitsplit>;
-def: T_RR_pat<S4_parity, int_hexagon_S4_parity>;
+def: T_RR_pat<S4_parity, int_hexagon_S4_parity>;
-def: T_RI_pat<S4_ntstbit_i, int_hexagon_S4_ntstbit_i>;
-def: T_RR_pat<S4_ntstbit_r, int_hexagon_S4_ntstbit_r>;
+def: T_Q_RI_pat<S4_ntstbit_i, int_hexagon_S4_ntstbit_i>;
+def: T_Q_RR_pat<S4_ntstbit_r, int_hexagon_S4_ntstbit_r>;
-def: T_RI_pat<S4_clbaddi, int_hexagon_S4_clbaddi>;
-def: T_PI_pat<S4_clbpaddi, int_hexagon_S4_clbpaddi>;
-def: T_P_pat <S4_clbpnorm, int_hexagon_S4_clbpnorm>;
+def: T_RI_pat<S4_clbaddi, int_hexagon_S4_clbaddi>;
+def: T_PI_pat<S4_clbpaddi, int_hexagon_S4_clbpaddi>;
+def: T_P_pat <S4_clbpnorm, int_hexagon_S4_clbpnorm>;
-/********************************************************************
-* ALU32/ALU *
-*********************************************************************/
+//*******************************************************************
+// ALU32/ALU
+//*******************************************************************
// ALU32 / ALU / Logical Operations.
def: T_RR_pat<A4_andn, int_hexagon_A4_andn>;
def: T_RR_pat<A4_orn, int_hexagon_A4_orn>;
-/********************************************************************
-* ALU32/PERM *
-*********************************************************************/
+//*******************************************************************
+// ALU32/PERM
+//*******************************************************************
// Combine Words Into Doublewords.
def: T_RI_pat<A4_combineri, int_hexagon_A4_combineri, s32ImmPred>;
def: T_IR_pat<A4_combineir, int_hexagon_A4_combineir, s32ImmPred>;
-/********************************************************************
-* ALU32/PRED *
-*********************************************************************/
+//*******************************************************************
+// ALU32/PRED
+//*******************************************************************
// Compare
-def : T_RI_pat<C4_cmpneqi, int_hexagon_C4_cmpneqi, s32ImmPred>;
-def : T_RI_pat<C4_cmpltei, int_hexagon_C4_cmpltei, s32ImmPred>;
-def : T_RI_pat<C4_cmplteui, int_hexagon_C4_cmplteui, u32ImmPred>;
+def : T_Q_RI_pat<C4_cmpneqi, int_hexagon_C4_cmpneqi, s32ImmPred>;
+def : T_Q_RI_pat<C4_cmpltei, int_hexagon_C4_cmpltei, s32ImmPred>;
+def : T_Q_RI_pat<C4_cmplteui, int_hexagon_C4_cmplteui, u32ImmPred>;
+
+// Compare To General Register.
+def: T_Q_RR_pat<C4_cmpneq, int_hexagon_C4_cmpneq>;
+def: T_Q_RR_pat<C4_cmplte, int_hexagon_C4_cmplte>;
+def: T_Q_RR_pat<C4_cmplteu, int_hexagon_C4_cmplteu>;
def: T_RR_pat<A4_rcmpeq, int_hexagon_A4_rcmpeq>;
def: T_RR_pat<A4_rcmpneq, int_hexagon_A4_rcmpneq>;
@@ -252,30 +246,23 @@ def: T_RR_pat<A4_rcmpneq, int_hexagon_A4_rcmpneq>;
def: T_RI_pat<A4_rcmpeqi, int_hexagon_A4_rcmpeqi>;
def: T_RI_pat<A4_rcmpneqi, int_hexagon_A4_rcmpneqi>;
-/********************************************************************
-* CR *
-*********************************************************************/
+//*******************************************************************
+// CR
+//*******************************************************************
// CR / Logical Operations On Predicates.
-
-class qi_CRInst_qiqiqi_pat<Intrinsic IntID, InstHexagon Inst> :
- Pat<(i32 (IntID IntRegs:$Rs, IntRegs:$Rt, IntRegs:$Ru)),
- (i32 (C2_tfrpr (Inst (C2_tfrrp IntRegs:$Rs),
- (C2_tfrrp IntRegs:$Rt),
- (C2_tfrrp IntRegs:$Ru))))>;
-
-def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_and, C4_and_and>;
-def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_andn, C4_and_andn>;
-def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_or, C4_and_or>;
-def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_orn, C4_and_orn>;
-def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_and, C4_or_and>;
-def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_andn, C4_or_andn>;
-def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_or, C4_or_or>;
-def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_orn, C4_or_orn>;
-
-/********************************************************************
-* XTYPE/ALU *
-*********************************************************************/
+def: T_Q_QQQ_pat<C4_and_and, int_hexagon_C4_and_and>;
+def: T_Q_QQQ_pat<C4_and_andn, int_hexagon_C4_and_andn>;
+def: T_Q_QQQ_pat<C4_and_or, int_hexagon_C4_and_or>;
+def: T_Q_QQQ_pat<C4_and_orn, int_hexagon_C4_and_orn>;
+def: T_Q_QQQ_pat<C4_or_and, int_hexagon_C4_or_and>;
+def: T_Q_QQQ_pat<C4_or_andn, int_hexagon_C4_or_andn>;
+def: T_Q_QQQ_pat<C4_or_or, int_hexagon_C4_or_or>;
+def: T_Q_QQQ_pat<C4_or_orn, int_hexagon_C4_or_orn>;
+
+//*******************************************************************
+// XTYPE/ALU
+//*******************************************************************
// Add And Accumulate.
diff --git a/lib/Target/Hexagon/HexagonIntrinsicsV5.td b/lib/Target/Hexagon/HexagonIntrinsicsV5.td
index 60e6b1eb4479..f27a63e20e61 100644
--- a/lib/Target/Hexagon/HexagonIntrinsicsV5.td
+++ b/lib/Target/Hexagon/HexagonIntrinsicsV5.td
@@ -43,8 +43,8 @@ def : T_FF_pat<F2_sffixupn, int_hexagon_F2_sffixupn>;
def : T_FF_pat<F2_sffixupd, int_hexagon_F2_sffixupd>;
def : T_F_pat <F2_sffixupr, int_hexagon_F2_sffixupr>;
-def: qi_CRInst_qiqi_pat<C4_fastcorner9, int_hexagon_C4_fastcorner9>;
-def: qi_CRInst_qiqi_pat<C4_fastcorner9_not, int_hexagon_C4_fastcorner9_not>;
+def : T_Q_QQ_pat<C4_fastcorner9, int_hexagon_C4_fastcorner9>;
+def : T_Q_QQ_pat<C4_fastcorner9_not, int_hexagon_C4_fastcorner9_not>;
def : T_P_pat <S5_popcountp, int_hexagon_S5_popcountp>;
def : T_PI_pat <S5_asrhub_sat, int_hexagon_S5_asrhub_sat>;
@@ -65,15 +65,15 @@ def : T_FFF_pat <F2_sffms_lib, int_hexagon_F2_sffms_lib>;
def : T_FFFQ_pat <F2_sffma_sc, int_hexagon_F2_sffma_sc>;
// Compare floating-point value
-def : T_FF_pat <F2_sfcmpge, int_hexagon_F2_sfcmpge>;
-def : T_FF_pat <F2_sfcmpuo, int_hexagon_F2_sfcmpuo>;
-def : T_FF_pat <F2_sfcmpeq, int_hexagon_F2_sfcmpeq>;
-def : T_FF_pat <F2_sfcmpgt, int_hexagon_F2_sfcmpgt>;
+def : T_Q_FF_pat <F2_sfcmpge, int_hexagon_F2_sfcmpge>;
+def : T_Q_FF_pat <F2_sfcmpuo, int_hexagon_F2_sfcmpuo>;
+def : T_Q_FF_pat <F2_sfcmpeq, int_hexagon_F2_sfcmpeq>;
+def : T_Q_FF_pat <F2_sfcmpgt, int_hexagon_F2_sfcmpgt>;
-def : T_DD_pat <F2_dfcmpeq, int_hexagon_F2_dfcmpeq>;
-def : T_DD_pat <F2_dfcmpgt, int_hexagon_F2_dfcmpgt>;
-def : T_DD_pat <F2_dfcmpge, int_hexagon_F2_dfcmpge>;
-def : T_DD_pat <F2_dfcmpuo, int_hexagon_F2_dfcmpuo>;
+def : T_Q_DD_pat <F2_dfcmpeq, int_hexagon_F2_dfcmpeq>;
+def : T_Q_DD_pat <F2_dfcmpgt, int_hexagon_F2_dfcmpgt>;
+def : T_Q_DD_pat <F2_dfcmpge, int_hexagon_F2_dfcmpge>;
+def : T_Q_DD_pat <F2_dfcmpuo, int_hexagon_F2_dfcmpuo>;
// Create floating-point value
def : T_I_pat <F2_sfimm_p, int_hexagon_F2_sfimm_p>;
@@ -81,8 +81,8 @@ def : T_I_pat <F2_sfimm_n, int_hexagon_F2_sfimm_n>;
def : T_I_pat <F2_dfimm_p, int_hexagon_F2_dfimm_p>;
def : T_I_pat <F2_dfimm_n, int_hexagon_F2_dfimm_n>;
-def : T_DI_pat <F2_dfclass, int_hexagon_F2_dfclass>;
-def : T_FI_pat <F2_sfclass, int_hexagon_F2_sfclass>;
+def : T_Q_DI_pat <F2_dfclass, int_hexagon_F2_dfclass>;
+def : T_Q_FI_pat <F2_sfclass, int_hexagon_F2_sfclass>;
def : T_F_pat <F2_conv_sf2df, int_hexagon_F2_conv_sf2df>;
def : T_D_pat <F2_conv_df2sf, int_hexagon_F2_conv_df2sf>;
def : T_R_pat <F2_conv_uw2sf, int_hexagon_F2_conv_uw2sf>;
diff --git a/lib/Target/Hexagon/HexagonIntrinsicsV60.td b/lib/Target/Hexagon/HexagonIntrinsicsV60.td
index 24a3e4d36de9..82bc91bb3021 100644
--- a/lib/Target/Hexagon/HexagonIntrinsicsV60.td
+++ b/lib/Target/Hexagon/HexagonIntrinsicsV60.td
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+
let isCodeGenOnly = 1 in {
def HEXAGON_V6_vd0_pseudo : CVI_VA_Resource<(outs VectorRegs:$dst),
(ins ),
@@ -22,6 +23,7 @@ def HEXAGON_V6_vd0_pseudo_128B : CVI_VA_Resource<(outs VectorRegs128B:$dst),
"$dst=#0",
[(set VectorRegs128B:$dst, (int_hexagon_V6_vd0_128B ))]>;
}
+
let isPseudo = 1 in
def HEXAGON_V6_vassignp : CVI_VA_Resource<(outs VecDblRegs:$dst),
(ins VecDblRegs:$src1),
@@ -800,7 +802,7 @@ defm : T_VQR_pat <V6_vandqrt_acc, int_hexagon_V6_vandqrt_acc>;
defm : T_QVR_pat <V6_vandvrt_acc, int_hexagon_V6_vandvrt_acc>;
defm : T_QR_pat <V6_vandqrt, int_hexagon_V6_vandqrt>;
defm : T_R_pat <V6_lvsplatw, int_hexagon_V6_lvsplatw>;
-defm : T_R_pat <V6_pred_scalar2, int_hexagon_V6_pred_scalar2>;
+defm : T_R_pat <V6_pred_scalar2, int_hexagon_V6_pred_scalar2>;
defm : T_VR_pat <V6_vandvrt, int_hexagon_V6_vandvrt>;
defm : T_VVR_pat <V6_vlutvvb, int_hexagon_V6_vlutvvb>;
diff --git a/lib/Target/Hexagon/HexagonMCInstLower.cpp b/lib/Target/Hexagon/HexagonMCInstLower.cpp
index 624c0f6cf49d..a5dc002642c8 100644
--- a/lib/Target/Hexagon/HexagonMCInstLower.cpp
+++ b/lib/Target/Hexagon/HexagonMCInstLower.cpp
@@ -32,7 +32,7 @@ namespace llvm {
}
static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
- HexagonAsmPrinter &Printer) {
+ HexagonAsmPrinter &Printer, bool MustExtend) {
MCContext &MC = Printer.OutContext;
const MCExpr *ME;
@@ -58,6 +58,21 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
case HexagonII::MO_GPREL:
RelocationType = MCSymbolRefExpr::VK_Hexagon_GPREL;
break;
+ case HexagonII::MO_GDGOT:
+ RelocationType = MCSymbolRefExpr::VK_Hexagon_GD_GOT;
+ break;
+ case HexagonII::MO_GDPLT:
+ RelocationType = MCSymbolRefExpr::VK_Hexagon_GD_PLT;
+ break;
+ case HexagonII::MO_IE:
+ RelocationType = MCSymbolRefExpr::VK_Hexagon_IE;
+ break;
+ case HexagonII::MO_IEGOT:
+ RelocationType = MCSymbolRefExpr::VK_Hexagon_IE_GOT;
+ break;
+ case HexagonII::MO_TPREL:
+ RelocationType = MCSymbolRefExpr::VK_TPREL;
+ break;
}
ME = MCSymbolRefExpr::create(Symbol, RelocationType, MC);
@@ -66,6 +81,8 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
ME = MCBinaryExpr::createAdd(ME, MCConstantExpr::create(MO.getOffset(), MC),
MC);
+ ME = HexagonMCExpr::create(ME, MC);
+ HexagonMCInstrInfo::setMustExtend(*ME, MustExtend);
return MCOperand::createExpr(ME);
}
@@ -84,13 +101,11 @@ void llvm::HexagonLowerToMC(const MCInstrInfo &MCII, const MachineInstr *MI,
MCI->setOpcode(MI->getOpcode());
assert(MCI->getOpcode() == static_cast<unsigned>(MI->getOpcode()) &&
"MCI opcode should have been set on construction");
- bool MustExtend = false;
for (unsigned i = 0, e = MI->getNumOperands(); i < e; i++) {
const MachineOperand &MO = MI->getOperand(i);
MCOperand MCO;
- if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
- MustExtend = true;
+ bool MustExtend = MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended;
switch (MO.getType()) {
default:
@@ -105,42 +120,51 @@ void llvm::HexagonLowerToMC(const MCInstrInfo &MCII, const MachineInstr *MI,
APFloat Val = MO.getFPImm()->getValueAPF();
// FP immediates are used only when setting GPRs, so they may be dealt
// with like regular immediates from this point on.
- MCO = MCOperand::createExpr(
- MCConstantExpr::create(*Val.bitcastToAPInt().getRawData(),
- AP.OutContext));
+ auto Expr = HexagonMCExpr::create(
+ MCConstantExpr::create(*Val.bitcastToAPInt().getRawData(),
+ AP.OutContext),
+ AP.OutContext);
+ HexagonMCInstrInfo::setMustExtend(*Expr, MustExtend);
+ MCO = MCOperand::createExpr(Expr);
break;
}
- case MachineOperand::MO_Immediate:
- MCO = MCOperand::createExpr(
- MCConstantExpr::create(MO.getImm(), AP.OutContext));
+ case MachineOperand::MO_Immediate: {
+ auto Expr = HexagonMCExpr::create(
+ MCConstantExpr::create(MO.getImm(), AP.OutContext), AP.OutContext);
+ HexagonMCInstrInfo::setMustExtend(*Expr, MustExtend);
+ MCO = MCOperand::createExpr(Expr);
break;
- case MachineOperand::MO_MachineBasicBlock:
- MCO = MCOperand::createExpr
- (MCSymbolRefExpr::create(MO.getMBB()->getSymbol(),
- AP.OutContext));
+ }
+ case MachineOperand::MO_MachineBasicBlock: {
+ MCExpr const *Expr = MCSymbolRefExpr::create(MO.getMBB()->getSymbol(),
+ AP.OutContext);
+ Expr = HexagonMCExpr::create(Expr, AP.OutContext);
+ HexagonMCInstrInfo::setMustExtend(*Expr, MustExtend);
+ MCO = MCOperand::createExpr(Expr);
break;
+ }
case MachineOperand::MO_GlobalAddress:
- MCO = GetSymbolRef(MO, AP.getSymbol(MO.getGlobal()), AP);
+ MCO = GetSymbolRef(MO, AP.getSymbol(MO.getGlobal()), AP, MustExtend);
break;
case MachineOperand::MO_ExternalSymbol:
MCO = GetSymbolRef(MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()),
- AP);
+ AP, MustExtend);
break;
case MachineOperand::MO_JumpTableIndex:
- MCO = GetSymbolRef(MO, AP.GetJTISymbol(MO.getIndex()), AP);
+ MCO = GetSymbolRef(MO, AP.GetJTISymbol(MO.getIndex()), AP, MustExtend);
break;
case MachineOperand::MO_ConstantPoolIndex:
- MCO = GetSymbolRef(MO, AP.GetCPISymbol(MO.getIndex()), AP);
+ MCO = GetSymbolRef(MO, AP.GetCPISymbol(MO.getIndex()), AP, MustExtend);
break;
case MachineOperand::MO_BlockAddress:
- MCO = GetSymbolRef(MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()),AP);
+ MCO = GetSymbolRef(MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()), AP,
+ MustExtend);
break;
}
MCI->addOperand(MCO);
}
AP.HexagonProcessInstruction(*MCI, *MI);
- HexagonMCInstrInfo::extendIfNeeded(AP.OutContext, MCII, MCB, *MCI,
- MustExtend);
+ HexagonMCInstrInfo::extendIfNeeded(AP.OutContext, MCII, MCB, *MCI);
MCB.addOperand(MCOperand::createInst(MCI));
}
diff --git a/lib/Target/Hexagon/HexagonMachineFunctionInfo.h b/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
index 76723586c66e..26c5b63fec6c 100644
--- a/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
+++ b/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
@@ -27,7 +27,8 @@ class HexagonMachineFunctionInfo : public MachineFunctionInfo {
// returning the value of the returned struct in a register. This field
// holds the virtual register into which the sret argument is passed.
unsigned SRetReturnReg;
- unsigned StackAlignBaseReg;
+ unsigned StackAlignBaseVReg; // Aligned-stack base register (virtual)
+ unsigned StackAlignBasePhysReg; // (physical)
std::vector<MachineInstr*> AllocaAdjustInsts;
int VarArgsFrameIndex;
bool HasClobberLR;
@@ -36,13 +37,12 @@ class HexagonMachineFunctionInfo : public MachineFunctionInfo {
virtual void anchor();
public:
- HexagonMachineFunctionInfo() : SRetReturnReg(0), StackAlignBaseReg(0),
- HasClobberLR(0), HasEHReturn(false) {}
+ HexagonMachineFunctionInfo() : SRetReturnReg(0), StackAlignBaseVReg(0),
+ StackAlignBasePhysReg(0), HasClobberLR(0), HasEHReturn(false) {}
HexagonMachineFunctionInfo(MachineFunction &MF) : SRetReturnReg(0),
- StackAlignBaseReg(0),
- HasClobberLR(0),
- HasEHReturn(false) {}
+ StackAlignBaseVReg(0), StackAlignBasePhysReg(0), HasClobberLR(0),
+ HasEHReturn(false) {}
unsigned getSRetReturnReg() const { return SRetReturnReg; }
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
@@ -77,8 +77,11 @@ public:
bool hasEHReturn() const { return HasEHReturn; };
void setHasEHReturn(bool H = true) { HasEHReturn = H; };
- void setStackAlignBaseVReg(unsigned R) { StackAlignBaseReg = R; }
- unsigned getStackAlignBaseVReg() const { return StackAlignBaseReg; }
+ void setStackAlignBaseVReg(unsigned R) { StackAlignBaseVReg = R; }
+ unsigned getStackAlignBaseVReg() const { return StackAlignBaseVReg; }
+
+ void setStackAlignBasePhysReg(unsigned R) { StackAlignBasePhysReg = R; }
+ unsigned getStackAlignBasePhysReg() const { return StackAlignBasePhysReg; }
};
} // End llvm namespace
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 7a52d6874c33..6dcac0dc7ee2 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -13,28 +13,126 @@
//===----------------------------------------------------------------------===//
#include "HexagonMachineScheduler.h"
+#include "HexagonSubtarget.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/ScheduleDAGMutation.h"
#include "llvm/IR/Function.h"
+#include <iomanip>
+#include <sstream>
+
+static cl::opt<bool> IgnoreBBRegPressure("ignore-bb-reg-pressure",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false));
+
+static cl::opt<bool> SchedPredsCloser("sched-preds-closer",
+ cl::Hidden, cl::ZeroOrMore, cl::init(true));
+
+static cl::opt<unsigned> SchedDebugVerboseLevel("misched-verbose-level",
+ cl::Hidden, cl::ZeroOrMore, cl::init(1));
+
+static cl::opt<bool> TopUseShorterTie("top-use-shorter-tie",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false));
+
+static cl::opt<bool> BotUseShorterTie("bot-use-shorter-tie",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false));
+
+static cl::opt<bool> DisableTCTie("disable-tc-tie",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false));
+
+static cl::opt<bool> SchedRetvalOptimization("sched-retval-optimization",
+ cl::Hidden, cl::ZeroOrMore, cl::init(true));
+
+// Check if the scheduler should penalize instructions that are available to
+// early due to a zero-latency dependence.
+static cl::opt<bool> CheckEarlyAvail("check-early-avail", cl::Hidden,
+ cl::ZeroOrMore, cl::init(true));
+
using namespace llvm;
#define DEBUG_TYPE "misched"
-/// Platform-specific modifications to DAG.
-void VLIWMachineScheduler::postprocessDAG() {
+class HexagonCallMutation : public ScheduleDAGMutation {
+public:
+ void apply(ScheduleDAGInstrs *DAG) override;
+private:
+ bool shouldTFRICallBind(const HexagonInstrInfo &HII,
+ const SUnit &Inst1, const SUnit &Inst2) const;
+};
+
+// Check if a call and subsequent A2_tfrpi instructions should maintain
+// scheduling affinity. We are looking for the TFRI to be consumed in
+// the next instruction. This should help reduce the instances of
+// double register pairs being allocated and scheduled before a call
+// when not used until after the call. This situation is exacerbated
+// by the fact that we allocate the pair from the callee saves list,
+// leading to excess spills and restores.
+bool HexagonCallMutation::shouldTFRICallBind(const HexagonInstrInfo &HII,
+ const SUnit &Inst1, const SUnit &Inst2) const {
+ if (Inst1.getInstr()->getOpcode() != Hexagon::A2_tfrpi)
+ return false;
+
+ // TypeXTYPE are 64 bit operations.
+ if (HII.getType(Inst2.getInstr()) == HexagonII::TypeXTYPE)
+ return true;
+ return false;
+}
+
+void HexagonCallMutation::apply(ScheduleDAGInstrs *DAG) {
SUnit* LastSequentialCall = nullptr;
+ unsigned VRegHoldingRet = 0;
+ unsigned RetRegister;
+ SUnit* LastUseOfRet = nullptr;
+ auto &TRI = *DAG->MF.getSubtarget().getRegisterInfo();
+ auto &HII = *DAG->MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
+
// Currently we only catch the situation when compare gets scheduled
// before preceding call.
- for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
+ for (unsigned su = 0, e = DAG->SUnits.size(); su != e; ++su) {
// Remember the call.
- if (SUnits[su].getInstr()->isCall())
- LastSequentialCall = &(SUnits[su]);
+ if (DAG->SUnits[su].getInstr()->isCall())
+ LastSequentialCall = &DAG->SUnits[su];
// Look for a compare that defines a predicate.
- else if (SUnits[su].getInstr()->isCompare() && LastSequentialCall)
- SUnits[su].addPred(SDep(LastSequentialCall, SDep::Barrier));
+ else if (DAG->SUnits[su].getInstr()->isCompare() && LastSequentialCall)
+ DAG->SUnits[su].addPred(SDep(LastSequentialCall, SDep::Barrier));
+ // Look for call and tfri* instructions.
+ else if (SchedPredsCloser && LastSequentialCall && su > 1 && su < e-1 &&
+ shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1]))
+ DAG->SUnits[su].addPred(SDep(&DAG->SUnits[su-1], SDep::Barrier));
+ // Prevent redundant register copies between two calls, which are caused by
+ // both the return value and the argument for the next call being in %R0.
+ // Example:
+ // 1: <call1>
+ // 2: %VregX = COPY %R0
+ // 3: <use of %VregX>
+ // 4: %R0 = ...
+ // 5: <call2>
+ // The scheduler would often swap 3 and 4, so an additional register is
+ // needed. This code inserts a Barrier dependence between 3 & 4 to prevent
+ // this. The same applies for %D0 and %V0/%W0, which are also handled.
+ else if (SchedRetvalOptimization) {
+ const MachineInstr *MI = DAG->SUnits[su].getInstr();
+ if (MI->isCopy() && (MI->readsRegister(Hexagon::R0, &TRI) ||
+ MI->readsRegister(Hexagon::V0, &TRI))) {
+ // %vregX = COPY %R0
+ VRegHoldingRet = MI->getOperand(0).getReg();
+ RetRegister = MI->getOperand(1).getReg();
+ LastUseOfRet = nullptr;
+ } else if (VRegHoldingRet && MI->readsVirtualRegister(VRegHoldingRet))
+ // <use of %vregX>
+ LastUseOfRet = &DAG->SUnits[su];
+ else if (LastUseOfRet && MI->definesRegister(RetRegister, &TRI))
+ // %R0 = ...
+ DAG->SUnits[su].addPred(SDep(LastUseOfRet, SDep::Barrier));
+ }
}
}
+
+/// Save the last formed packet
+void VLIWResourceModel::savePacket() {
+ OldPacket = Packet;
+}
+
/// Check if scheduling of this SU is possible
/// in the current packet.
/// It is _not_ precise (statefull), it is more like
@@ -48,7 +146,7 @@ bool VLIWResourceModel::isResourceAvailable(SUnit *SU) {
// in the current cycle.
switch (SU->getInstr()->getOpcode()) {
default:
- if (!ResourcesModel->canReserveResources(SU->getInstr()))
+ if (!ResourcesModel->canReserveResources(*SU->getInstr()))
return false;
case TargetOpcode::EXTRACT_SUBREG:
case TargetOpcode::INSERT_SUBREG:
@@ -60,11 +158,19 @@ bool VLIWResourceModel::isResourceAvailable(SUnit *SU) {
break;
}
+ MachineFunction &MF = *SU->getInstr()->getParent()->getParent();
+ auto &QII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
+
// Now see if there are no other dependencies to instructions already
// in the packet.
for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
if (Packet[i]->Succs.size() == 0)
continue;
+
+ // Enable .cur formation.
+ if (QII.mayBeCurLoad(Packet[i]->getInstr()))
+ continue;
+
for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
E = Packet[i]->Succs.end(); I != E; ++I) {
// Since we do not add pseudos to packets, might as well
@@ -85,6 +191,7 @@ bool VLIWResourceModel::reserveResources(SUnit *SU) {
// Artificially reset state.
if (!SU) {
ResourcesModel->clearResources();
+ savePacket();
Packet.clear();
TotalPackets++;
return false;
@@ -93,6 +200,7 @@ bool VLIWResourceModel::reserveResources(SUnit *SU) {
// start a new one.
if (!isResourceAvailable(SU)) {
ResourcesModel->clearResources();
+ savePacket();
Packet.clear();
TotalPackets++;
startNewCycle = true;
@@ -100,7 +208,7 @@ bool VLIWResourceModel::reserveResources(SUnit *SU) {
switch (SU->getInstr()->getOpcode()) {
default:
- ResourcesModel->reserveResources(SU->getInstr());
+ ResourcesModel->reserveResources(*SU->getInstr());
break;
case TargetOpcode::EXTRACT_SUBREG:
case TargetOpcode::INSERT_SUBREG:
@@ -129,6 +237,7 @@ bool VLIWResourceModel::reserveResources(SUnit *SU) {
// we start fresh.
if (Packet.size() >= SchedModel->getIssueWidth()) {
ResourcesModel->clearResources();
+ savePacket();
Packet.clear();
TotalPackets++;
startNewCycle = true;
@@ -150,19 +259,12 @@ void VLIWMachineScheduler::schedule() {
buildDAGWithRegPressure();
- // Postprocess the DAG to add platform-specific artificial dependencies.
- postprocessDAG();
-
SmallVector<SUnit*, 8> TopRoots, BotRoots;
findRootsAndBiasEdges(TopRoots, BotRoots);
// Initialize the strategy before modifying the DAG.
SchedImpl->initialize(this);
- // To view Height/Depth correctly, they should be accessed at least once.
- //
- // FIXME: SUnit::dumpAll always recompute depth and height now. The max
- // depth/height could be computed directly from the roots and leaves.
DEBUG(unsigned maxH = 0;
for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
if (SUnits[su].getHeight() > maxH)
@@ -197,6 +299,13 @@ void VLIWMachineScheduler::schedule() {
assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
placeDebugValues();
+
+ DEBUG({
+ unsigned BBNum = begin()->getParent()->getNumber();
+ dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
}
void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
@@ -223,16 +332,18 @@ void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
assert((!llvm::ForceTopDown || !llvm::ForceBottomUp) &&
"-misched-topdown incompatible with -misched-bottomup");
+
+ DAG->addMutation(make_unique<HexagonSubtarget::HexagonDAGMutation>());
+ DAG->addMutation(make_unique<HexagonCallMutation>());
}
void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
if (SU->isScheduled)
return;
- for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
- unsigned MinLatency = I->getLatency();
+ for (const SDep &PI : SU->Preds) {
+ unsigned PredReadyCycle = PI.getSUnit()->TopReadyCycle;
+ unsigned MinLatency = PI.getLatency();
#ifndef NDEBUG
Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
#endif
@@ -321,8 +432,8 @@ void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
}
CheckPending = true;
- DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
- << CurrCycle << '\n');
+ DEBUG(dbgs() << "*** Next cycle " << Available.getName() << " cycle "
+ << CurrCycle << '\n');
}
/// Move the boundary of scheduled code by one SUnit.
@@ -414,16 +525,38 @@ SUnit *ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() {
#ifndef NDEBUG
void ConvergingVLIWScheduler::traceCandidate(const char *Label,
- const ReadyQueue &Q,
- SUnit *SU, PressureChange P) {
+ const ReadyQueue &Q, SUnit *SU, int Cost, PressureChange P) {
dbgs() << Label << " " << Q.getName() << " ";
if (P.isValid())
dbgs() << DAG->TRI->getRegPressureSetName(P.getPSet()) << ":"
<< P.getUnitInc() << " ";
else
dbgs() << " ";
+ dbgs() << "cost(" << Cost << ")\t";
SU->dump(DAG);
}
+
+// Very detailed queue dump, to be used with higher verbosity levels.
+void ConvergingVLIWScheduler::readyQueueVerboseDump(
+ const RegPressureTracker &RPTracker, SchedCandidate &Candidate,
+ ReadyQueue &Q) {
+ RegPressureTracker &TempTracker = const_cast<RegPressureTracker &>(RPTracker);
+
+ dbgs() << ">>> " << Q.getName() << "\n";
+ for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
+ RegPressureDelta RPDelta;
+ TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
+ DAG->getRegionCriticalPSets(),
+ DAG->getRegPressure().MaxSetPressure);
+ std::stringstream dbgstr;
+ dbgstr << "SU(" << std::setw(3) << (*I)->NodeNum << ")";
+ dbgs() << dbgstr.str();
+ SchedulingCost(Q, *I, Candidate, RPDelta, true);
+ dbgs() << "\t";
+ (*I)->getInstr()->dump();
+ }
+ dbgs() << "\n";
+}
#endif
/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
@@ -466,6 +599,7 @@ static SUnit *getSingleUnscheduledSucc(SUnit *SU) {
// heuristic components for cost computation.
static const unsigned PriorityOne = 200;
static const unsigned PriorityTwo = 50;
+static const unsigned PriorityThree = 75;
static const unsigned ScaleTwo = 10;
static const unsigned FactorOne = 2;
@@ -482,25 +616,50 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
if (!SU || SU->isScheduled)
return ResCount;
+ MachineInstr *Instr = SU->getInstr();
+
+ DEBUG(if (verbose) dbgs() << ((Q.getID() == TopQID) ? "(top|" : "(bot|"));
// Forced priority is high.
- if (SU->isScheduleHigh)
+ if (SU->isScheduleHigh) {
ResCount += PriorityOne;
+ DEBUG(dbgs() << "H|");
+ }
// Critical path first.
if (Q.getID() == TopQID) {
ResCount += (SU->getHeight() * ScaleTwo);
+ DEBUG(if (verbose) {
+ std::stringstream dbgstr;
+ dbgstr << "h" << std::setw(3) << SU->getHeight() << "|";
+ dbgs() << dbgstr.str();
+ });
+
// If resources are available for it, multiply the
// chance of scheduling.
- if (Top.ResourceModel->isResourceAvailable(SU))
+ if (Top.ResourceModel->isResourceAvailable(SU)) {
ResCount <<= FactorOne;
+ ResCount += PriorityThree;
+ DEBUG(if (verbose) dbgs() << "A|");
+ } else
+ DEBUG(if (verbose) dbgs() << " |");
} else {
ResCount += (SU->getDepth() * ScaleTwo);
+ DEBUG(if (verbose) {
+ std::stringstream dbgstr;
+ dbgstr << "d" << std::setw(3) << SU->getDepth() << "|";
+ dbgs() << dbgstr.str();
+ });
+
// If resources are available for it, multiply the
// chance of scheduling.
- if (Bot.ResourceModel->isResourceAvailable(SU))
+ if (Bot.ResourceModel->isResourceAvailable(SU)) {
ResCount <<= FactorOne;
+ ResCount += PriorityThree;
+ DEBUG(if (verbose) dbgs() << "A|");
+ } else
+ DEBUG(if (verbose) dbgs() << " |");
}
unsigned NumNodesBlocking = 0;
@@ -509,24 +668,121 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
// Look at all of the successors of this node.
// Count the number of nodes that
// this node is the sole unscheduled node for.
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- if (getSingleUnscheduledPred(I->getSUnit()) == SU)
+ for (const SDep &SI : SU->Succs)
+ if (getSingleUnscheduledPred(SI.getSUnit()) == SU)
++NumNodesBlocking;
} else {
// How many unscheduled predecessors block this node?
- for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I)
- if (getSingleUnscheduledSucc(I->getSUnit()) == SU)
+ for (const SDep &PI : SU->Preds)
+ if (getSingleUnscheduledSucc(PI.getSUnit()) == SU)
++NumNodesBlocking;
}
ResCount += (NumNodesBlocking * ScaleTwo);
+ DEBUG(if (verbose) {
+ std::stringstream dbgstr;
+ dbgstr << "blk " << std::setw(2) << NumNodesBlocking << ")|";
+ dbgs() << dbgstr.str();
+ });
+
// Factor in reg pressure as a heuristic.
- ResCount -= (Delta.Excess.getUnitInc()*PriorityTwo);
- ResCount -= (Delta.CriticalMax.getUnitInc()*PriorityTwo);
+ if (!IgnoreBBRegPressure) {
+ // Decrease priority by the amount that register pressure exceeds the limit.
+ ResCount -= (Delta.Excess.getUnitInc()*PriorityOne);
+ // Decrease priority if register pressure exceeds the limit.
+ ResCount -= (Delta.CriticalMax.getUnitInc()*PriorityOne);
+ // Decrease priority slightly if register pressure would increase over the
+ // current maximum.
+ ResCount -= (Delta.CurrentMax.getUnitInc()*PriorityTwo);
+ DEBUG(if (verbose) {
+ dbgs() << "RP " << Delta.Excess.getUnitInc() << "/"
+ << Delta.CriticalMax.getUnitInc() <<"/"
+ << Delta.CurrentMax.getUnitInc() << ")|";
+ });
+ }
+
+ // Give a little extra priority to a .cur instruction if there is a resource
+ // available for it.
+ auto &QST = DAG->MF.getSubtarget<HexagonSubtarget>();
+ auto &QII = *QST.getInstrInfo();
+ if (SU->isInstr() && QII.mayBeCurLoad(SU->getInstr())) {
+ if (Q.getID() == TopQID && Top.ResourceModel->isResourceAvailable(SU)) {
+ ResCount += PriorityTwo;
+ DEBUG(if (verbose) dbgs() << "C|");
+ } else if (Q.getID() == BotQID &&
+ Bot.ResourceModel->isResourceAvailable(SU)) {
+ ResCount += PriorityTwo;
+ DEBUG(if (verbose) dbgs() << "C|");
+ }
+ }
+
+ // Give preference to a zero latency instruction if the dependent
+ // instruction is in the current packet.
+ if (Q.getID() == TopQID) {
+ for (const SDep &PI : SU->Preds) {
+ if (!PI.getSUnit()->getInstr()->isPseudo() && PI.isAssignedRegDep() &&
+ PI.getLatency() == 0 &&
+ Top.ResourceModel->isInPacket(PI.getSUnit())) {
+ ResCount += PriorityThree;
+ DEBUG(if (verbose) dbgs() << "Z|");
+ }
+ }
+ } else {
+ for (const SDep &SI : SU->Succs) {
+ if (!SI.getSUnit()->getInstr()->isPseudo() && SI.isAssignedRegDep() &&
+ SI.getLatency() == 0 &&
+ Bot.ResourceModel->isInPacket(SI.getSUnit())) {
+ ResCount += PriorityThree;
+ DEBUG(if (verbose) dbgs() << "Z|");
+ }
+ }
+ }
+
+ // Give less preference to an instruction that will cause a stall with
+ // an instruction in the previous packet.
+ if (QII.isV60VectorInstruction(Instr)) {
+ // Check for stalls in the previous packet.
+ if (Q.getID() == TopQID) {
+ for (auto J : Top.ResourceModel->OldPacket)
+ if (QII.producesStall(J->getInstr(), Instr))
+ ResCount -= PriorityOne;
+ } else {
+ for (auto J : Bot.ResourceModel->OldPacket)
+ if (QII.producesStall(Instr, J->getInstr()))
+ ResCount -= PriorityOne;
+ }
+ }
- DEBUG(if (verbose) dbgs() << " Total(" << ResCount << ")");
+ // If the instruction has a non-zero latency dependence with an instruction in
+ // the current packet, then it should not be scheduled yet. The case occurs
+ // when the dependent instruction is scheduled in a new packet, so the
+ // scheduler updates the current cycle and pending instructions become
+ // available.
+ if (CheckEarlyAvail) {
+ if (Q.getID() == TopQID) {
+ for (const auto &PI : SU->Preds) {
+ if (PI.getLatency() > 0 &&
+ Top.ResourceModel->isInPacket(PI.getSUnit())) {
+ ResCount -= PriorityOne;
+ DEBUG(if (verbose) dbgs() << "D|");
+ }
+ }
+ } else {
+ for (const auto &SI : SU->Succs) {
+ if (SI.getLatency() > 0 &&
+ Bot.ResourceModel->isInPacket(SI.getSUnit())) {
+ ResCount -= PriorityOne;
+ DEBUG(if (verbose) dbgs() << "D|");
+ }
+ }
+ }
+ }
+
+ DEBUG(if (verbose) {
+ std::stringstream dbgstr;
+ dbgstr << "Total " << std::setw(4) << ResCount << ")";
+ dbgs() << dbgstr.str();
+ });
return ResCount;
}
@@ -539,7 +795,9 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
SchedCandidate &Candidate) {
- DEBUG(Q.dump());
+ DEBUG(if (SchedDebugVerboseLevel > 1)
+ readyQueueVerboseDump(RPTracker, Candidate, Q);
+ else Q.dump(););
// getMaxPressureDelta temporarily modifies the tracker.
RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
@@ -556,6 +814,7 @@ pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
// Initialize the candidate if needed.
if (!Candidate.SU) {
+ DEBUG(traceCandidate("DCAND", Q, *I, CurrentCost));
Candidate.SU = *I;
Candidate.RPDelta = RPDelta;
Candidate.SCost = CurrentCost;
@@ -565,7 +824,7 @@ pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
// Best cost.
if (CurrentCost > Candidate.SCost) {
- DEBUG(traceCandidate("CCAND", Q, *I));
+ DEBUG(traceCandidate("CCAND", Q, *I, CurrentCost));
Candidate.SU = *I;
Candidate.RPDelta = RPDelta;
Candidate.SCost = CurrentCost;
@@ -573,6 +832,69 @@ pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
continue;
}
+ // Tie breaker using Timing Class.
+ if (!DisableTCTie) {
+ auto &QST = DAG->MF.getSubtarget<HexagonSubtarget>();
+ auto &QII = *QST.getInstrInfo();
+
+ const MachineInstr *MI = (*I)->getInstr();
+ const MachineInstr *CandI = Candidate.SU->getInstr();
+ const InstrItineraryData *InstrItins = QST.getInstrItineraryData();
+
+ unsigned InstrLatency = QII.getInstrTimingClassLatency(InstrItins, MI);
+ unsigned CandLatency = QII.getInstrTimingClassLatency(InstrItins, CandI);
+ DEBUG(dbgs() << "TC Tie Breaker Cand: "
+ << CandLatency << " Instr:" << InstrLatency << "\n"
+ << *MI << *CandI << "\n");
+ if (Q.getID() == TopQID && CurrentCost == Candidate.SCost) {
+ if (InstrLatency < CandLatency && TopUseShorterTie) {
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ Candidate.SCost = CurrentCost;
+ FoundCandidate = BestCost;
+ DEBUG(dbgs() << "Used top shorter tie breaker\n");
+ continue;
+ } else if (InstrLatency > CandLatency && !TopUseShorterTie) {
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ Candidate.SCost = CurrentCost;
+ FoundCandidate = BestCost;
+ DEBUG(dbgs() << "Used top longer tie breaker\n");
+ continue;
+ }
+ } else if (Q.getID() == BotQID && CurrentCost == Candidate.SCost) {
+ if (InstrLatency < CandLatency && BotUseShorterTie) {
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ Candidate.SCost = CurrentCost;
+ FoundCandidate = BestCost;
+ DEBUG(dbgs() << "Used Bot shorter tie breaker\n");
+ continue;
+ } else if (InstrLatency > CandLatency && !BotUseShorterTie) {
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ Candidate.SCost = CurrentCost;
+ FoundCandidate = BestCost;
+ DEBUG(dbgs() << "Used Bot longer tie breaker\n");
+ continue;
+ }
+ }
+ }
+
+ if (CurrentCost == Candidate.SCost) {
+ if ((Q.getID() == TopQID &&
+ (*I)->Succs.size() > Candidate.SU->Succs.size()) ||
+ (Q.getID() == BotQID &&
+ (*I)->Preds.size() < Candidate.SU->Preds.size())) {
+ DEBUG(traceCandidate("SPCAND", Q, *I, CurrentCost));
+ Candidate.SU = *I;
+ Candidate.RPDelta = RPDelta;
+ Candidate.SCost = CurrentCost;
+ FoundCandidate = BestCost;
+ continue;
+ }
+ }
+
// Fall through to original instruction order.
// Only consider node order if Candidate was chosen from this Q.
if (FoundCandidate == NoCand)
@@ -586,10 +908,12 @@ SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
// Schedule as far as possible in the direction of no choice. This is most
// efficient, but also provides the best heuristics for CriticalPSets.
if (SUnit *SU = Bot.pickOnlyChoice()) {
+ DEBUG(dbgs() << "Picked only Bottom\n");
IsTopNode = false;
return SU;
}
if (SUnit *SU = Top.pickOnlyChoice()) {
+ DEBUG(dbgs() << "Picked only Top\n");
IsTopNode = true;
return SU;
}
@@ -607,6 +931,7 @@ SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
// increase pressure for one of the excess PSets, then schedule in that
// direction first to provide more freedom in the other direction.
if (BotResult == SingleExcess || BotResult == SingleCritical) {
+ DEBUG(dbgs() << "Prefered Bottom Node\n");
IsTopNode = false;
return BotCand.SU;
}
@@ -617,24 +942,29 @@ SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
assert(TopResult != NoCand && "failed to find the first candidate");
if (TopResult == SingleExcess || TopResult == SingleCritical) {
+ DEBUG(dbgs() << "Prefered Top Node\n");
IsTopNode = true;
return TopCand.SU;
}
// If either Q has a single candidate that minimizes pressure above the
// original region's pressure pick it.
if (BotResult == SingleMax) {
+ DEBUG(dbgs() << "Prefered Bottom Node SingleMax\n");
IsTopNode = false;
return BotCand.SU;
}
if (TopResult == SingleMax) {
+ DEBUG(dbgs() << "Prefered Top Node SingleMax\n");
IsTopNode = true;
return TopCand.SU;
}
if (TopCand.SCost > BotCand.SCost) {
+ DEBUG(dbgs() << "Prefered Top Node Cost\n");
IsTopNode = true;
return TopCand.SU;
}
// Otherwise prefer the bottom candidate in node order.
+ DEBUG(dbgs() << "Prefered Bottom in Node order\n");
IsTopNode = false;
return BotCand.SU;
}
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.h b/lib/Target/Hexagon/HexagonMachineScheduler.h
index 60343442e327..51c84a4cee31 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.h
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.h
@@ -24,7 +24,6 @@
#include "llvm/CodeGen/ResourcePriorityQueue.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -54,6 +53,10 @@ class VLIWResourceModel {
unsigned TotalPackets;
public:
+ /// Save the last formed packet.
+ std::vector<SUnit*> OldPacket;
+
+public:
VLIWResourceModel(const TargetSubtargetInfo &STI, const TargetSchedModel *SM)
: SchedModel(SM), TotalPackets(0) {
ResourcesModel = STI.getInstrInfo()->CreateTargetScheduleState(STI);
@@ -64,6 +67,8 @@ public:
Packet.resize(SchedModel->getIssueWidth());
Packet.clear();
+ OldPacket.resize(SchedModel->getIssueWidth());
+ OldPacket.clear();
ResourcesModel->clearResources();
}
@@ -86,7 +91,12 @@ public:
bool isResourceAvailable(SUnit *SU);
bool reserveResources(SUnit *SU);
+ void savePacket();
unsigned getTotalPackets() const { return TotalPackets; }
+
+ bool isInPacket(SUnit *SU) const {
+ return std::find(Packet.begin(), Packet.end(), SU) != Packet.end();
+ }
};
/// Extend the standard ScheduleDAGMI to provide more context and override the
@@ -100,8 +110,6 @@ public:
/// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
/// time to do some work.
void schedule() override;
- /// Perform platform-specific DAG postprocessing.
- void postprocessDAG();
};
/// ConvergingVLIWScheduler shrinks the unscheduled zone using heuristics
@@ -167,6 +175,7 @@ class ConvergingVLIWScheduler : public MachineSchedStrategy {
void init(VLIWMachineScheduler *dag, const TargetSchedModel *smodel) {
DAG = dag;
SchedModel = smodel;
+ IssueCount = 0;
}
bool isTop() const {
@@ -234,7 +243,10 @@ protected:
SchedCandidate &Candidate);
#ifndef NDEBUG
void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
- PressureChange P = PressureChange());
+ int Cost, PressureChange P = PressureChange());
+
+ void readyQueueVerboseDump(const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate, ReadyQueue &Q);
#endif
};
diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 20c4ab112b5f..3ffb9cffc6a6 100644
--- a/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -21,14 +21,12 @@
//
//
//===----------------------------------------------------------------------===//
-#include "llvm/PassSupport.h"
#include "Hexagon.h"
#include "HexagonInstrInfo.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonRegisterInfo.h"
#include "HexagonSubtarget.h"
#include "HexagonTargetMachine.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
@@ -37,14 +35,13 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/PassSupport.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include <map>
using namespace llvm;
#define DEBUG_TYPE "hexagon-nvj"
@@ -87,12 +84,16 @@ namespace {
}
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
private:
/// \brief A handle to the branch probability pass.
const MachineBranchProbabilityInfo *MBPI;
- bool isNewValueJumpCandidate(const MachineInstr *MI) const;
+ bool isNewValueJumpCandidate(const MachineInstr &MI) const;
};
} // end of anonymous namespace
@@ -116,7 +117,7 @@ static bool canBeFeederToNewValueJump(const HexagonInstrInfo *QII,
MachineFunction &MF) {
// Predicated instruction can not be feeder to NVJ.
- if (QII->isPredicated(II))
+ if (QII->isPredicated(*II))
return false;
// Bail out if feederReg is a paired register (double regs in
@@ -219,25 +220,24 @@ static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII,
MachineBasicBlock::iterator end,
MachineFunction &MF) {
- MachineInstr *MI = II;
+ MachineInstr &MI = *II;
// If the second operand of the compare is an imm, make sure it's in the
// range specified by the arch.
if (!secondReg) {
- int64_t v = MI->getOperand(2).getImm();
+ int64_t v = MI.getOperand(2).getImm();
- if (!(isUInt<5>(v) ||
- ((MI->getOpcode() == Hexagon::C2_cmpeqi ||
- MI->getOpcode() == Hexagon::C2_cmpgti) &&
- (v == -1))))
+ if (!(isUInt<5>(v) || ((MI.getOpcode() == Hexagon::C2_cmpeqi ||
+ MI.getOpcode() == Hexagon::C2_cmpgti) &&
+ (v == -1))))
return false;
}
unsigned cmpReg1, cmpOp2 = 0; // cmpOp2 assignment silences compiler warning.
- cmpReg1 = MI->getOperand(1).getReg();
+ cmpReg1 = MI.getOperand(1).getReg();
if (secondReg) {
- cmpOp2 = MI->getOperand(2).getReg();
+ cmpOp2 = MI.getOperand(2).getReg();
// Make sure that that second register is not from COPY
// At machine code level, we don't need this, but if we decide
@@ -367,22 +367,22 @@ static unsigned getNewValueJumpOpcode(MachineInstr *MI, int reg,
return 0;
}
-bool HexagonNewValueJump::isNewValueJumpCandidate(const MachineInstr *MI)
- const {
- switch (MI->getOpcode()) {
- case Hexagon::C2_cmpeq:
- case Hexagon::C2_cmpeqi:
- case Hexagon::C2_cmpgt:
- case Hexagon::C2_cmpgti:
- case Hexagon::C2_cmpgtu:
- case Hexagon::C2_cmpgtui:
- case Hexagon::C4_cmpneq:
- case Hexagon::C4_cmplte:
- case Hexagon::C4_cmplteu:
- return true;
-
- default:
- return false;
+bool HexagonNewValueJump::isNewValueJumpCandidate(
+ const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ case Hexagon::C2_cmpeq:
+ case Hexagon::C2_cmpeqi:
+ case Hexagon::C2_cmpgt:
+ case Hexagon::C2_cmpgti:
+ case Hexagon::C2_cmpgtu:
+ case Hexagon::C2_cmpgtui:
+ case Hexagon::C4_cmpneq:
+ case Hexagon::C4_cmplte:
+ case Hexagon::C4_cmplteu:
+ return true;
+
+ default:
+ return false;
}
}
@@ -393,6 +393,9 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
<< "********** Function: "
<< MF.getName() << "\n");
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
// If we move NewValueJump before register allocation we'll need live variable
// analysis here too.
@@ -435,28 +438,27 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
// Traverse the basic block - bottom up
for (MachineBasicBlock::iterator MII = MBB->end(), E = MBB->begin();
MII != E;) {
- MachineInstr *MI = --MII;
- if (MI->isDebugValue()) {
+ MachineInstr &MI = *--MII;
+ if (MI.isDebugValue()) {
continue;
}
if ((nvjCount == 0) || (nvjCount > -1 && nvjCount <= nvjGenerated))
break;
- DEBUG(dbgs() << "Instr: "; MI->dump(); dbgs() << "\n");
+ DEBUG(dbgs() << "Instr: "; MI.dump(); dbgs() << "\n");
- if (!foundJump &&
- (MI->getOpcode() == Hexagon::J2_jumpt ||
- MI->getOpcode() == Hexagon::J2_jumpf ||
- MI->getOpcode() == Hexagon::J2_jumptnewpt ||
- MI->getOpcode() == Hexagon::J2_jumptnew ||
- MI->getOpcode() == Hexagon::J2_jumpfnewpt ||
- MI->getOpcode() == Hexagon::J2_jumpfnew)) {
+ if (!foundJump && (MI.getOpcode() == Hexagon::J2_jumpt ||
+ MI.getOpcode() == Hexagon::J2_jumpf ||
+ MI.getOpcode() == Hexagon::J2_jumptnewpt ||
+ MI.getOpcode() == Hexagon::J2_jumptnew ||
+ MI.getOpcode() == Hexagon::J2_jumpfnewpt ||
+ MI.getOpcode() == Hexagon::J2_jumpfnew)) {
// This is where you would insert your compare and
// instr that feeds compare
jmpPos = MII;
- jmpInstr = MI;
- predReg = MI->getOperand(0).getReg();
+ jmpInstr = &MI;
+ predReg = MI.getOperand(0).getReg();
afterRA = TargetRegisterInfo::isPhysicalRegister(predReg);
// If ifconverter had not messed up with the kill flags of the
@@ -485,11 +487,13 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
if (predLive)
break;
- jmpTarget = MI->getOperand(1).getMBB();
+ if (!MI.getOperand(1).isMBB())
+ continue;
+ jmpTarget = MI.getOperand(1).getMBB();
foundJump = true;
- if (MI->getOpcode() == Hexagon::J2_jumpf ||
- MI->getOpcode() == Hexagon::J2_jumpfnewpt ||
- MI->getOpcode() == Hexagon::J2_jumpfnew) {
+ if (MI.getOpcode() == Hexagon::J2_jumpf ||
+ MI.getOpcode() == Hexagon::J2_jumpfnewpt ||
+ MI.getOpcode() == Hexagon::J2_jumpfnew) {
invertPredicate = true;
}
continue;
@@ -498,41 +502,40 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
// No new value jump if there is a barrier. A barrier has to be in its
// own packet. A barrier has zero operands. We conservatively bail out
// here if we see any instruction with zero operands.
- if (foundJump && MI->getNumOperands() == 0)
+ if (foundJump && MI.getNumOperands() == 0)
break;
- if (foundJump &&
- !foundCompare &&
- MI->getOperand(0).isReg() &&
- MI->getOperand(0).getReg() == predReg) {
+ if (foundJump && !foundCompare && MI.getOperand(0).isReg() &&
+ MI.getOperand(0).getReg() == predReg) {
// Not all compares can be new value compare. Arch Spec: 7.6.1.1
if (isNewValueJumpCandidate(MI)) {
- assert((MI->getDesc().isCompare()) &&
+ assert(
+ (MI.getDesc().isCompare()) &&
"Only compare instruction can be collapsed into New Value Jump");
- isSecondOpReg = MI->getOperand(2).isReg();
+ isSecondOpReg = MI.getOperand(2).isReg();
if (!canCompareBeNewValueJump(QII, QRI, MII, predReg, isSecondOpReg,
afterRA, jmpPos, MF))
break;
- cmpInstr = MI;
+ cmpInstr = &MI;
cmpPos = MII;
foundCompare = true;
// We need cmpReg1 and cmpOp2(imm or reg) while building
// new value jump instruction.
- cmpReg1 = MI->getOperand(1).getReg();
- if (MI->getOperand(1).isKill())
+ cmpReg1 = MI.getOperand(1).getReg();
+ if (MI.getOperand(1).isKill())
MO1IsKill = true;
if (isSecondOpReg) {
- cmpOp2 = MI->getOperand(2).getReg();
- if (MI->getOperand(2).isKill())
+ cmpOp2 = MI.getOperand(2).getReg();
+ if (MI.getOperand(2).isKill())
MO2IsKill = true;
} else
- cmpOp2 = MI->getOperand(2).getImm();
+ cmpOp2 = MI.getOperand(2).getImm();
continue;
}
}
@@ -545,13 +548,12 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
bool foundFeeder = false;
MachineBasicBlock::iterator feederPos = MII;
- if (MI->getOperand(0).isReg() &&
- MI->getOperand(0).isDef() &&
- (MI->getOperand(0).getReg() == cmpReg1 ||
- (isSecondOpReg &&
- MI->getOperand(0).getReg() == (unsigned) cmpOp2))) {
+ if (MI.getOperand(0).isReg() && MI.getOperand(0).isDef() &&
+ (MI.getOperand(0).getReg() == cmpReg1 ||
+ (isSecondOpReg &&
+ MI.getOperand(0).getReg() == (unsigned)cmpOp2))) {
- unsigned feederReg = MI->getOperand(0).getReg();
+ unsigned feederReg = MI.getOperand(0).getReg();
// First try to see if we can get the feeder from the first operand
// of the compare. If we can not, and if secondOpReg is true
@@ -600,15 +602,15 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
// the operands of the feeder.
bool updatedIsKill = false;
- for (unsigned i = 0; i < MI->getNumOperands(); i++) {
- MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0; i < MI.getNumOperands(); i++) {
+ MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isUse()) {
unsigned feederReg = MO.getReg();
for (MachineBasicBlock::iterator localII = feederPos,
end = jmpPos; localII != end; localII++) {
- MachineInstr *localMI = localII;
- for (unsigned j = 0; j < localMI->getNumOperands(); j++) {
- MachineOperand &localMO = localMI->getOperand(j);
+ MachineInstr &localMI = *localII;
+ for (unsigned j = 0; j < localMI.getNumOperands(); j++) {
+ MachineOperand &localMO = localMI.getOperand(j);
if (localMO.isReg() && localMO.isUse() &&
localMO.isKill() && feederReg == localMO.getReg()) {
// We found that there is kill of a use register
@@ -625,12 +627,12 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) {
if (updatedIsKill) break;
}
- MBB->splice(jmpPos, MI->getParent(), MI);
- MBB->splice(jmpPos, MI->getParent(), cmpInstr);
- DebugLoc dl = MI->getDebugLoc();
+ MBB->splice(jmpPos, MI.getParent(), MI);
+ MBB->splice(jmpPos, MI.getParent(), cmpInstr);
+ DebugLoc dl = MI.getDebugLoc();
MachineInstr *NewMI;
- assert((isNewValueJumpCandidate(cmpInstr)) &&
+ assert((isNewValueJumpCandidate(*cmpInstr)) &&
"This compare is not a New Value Jump candidate.");
unsigned opc = getNewValueJumpOpcode(cmpInstr, cmpOp2,
isSecondOpNewified,
diff --git a/lib/Target/Hexagon/HexagonOperands.td b/lib/Target/Hexagon/HexagonOperands.td
index fbd29cd4d6d1..11092d2b92fe 100644
--- a/lib/Target/Hexagon/HexagonOperands.td
+++ b/lib/Target/Hexagon/HexagonOperands.td
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
def s32ImmOperand : AsmOperandClass { let Name = "s32Imm"; }
+def s23_2ImmOperand : AsmOperandClass { let Name = "s23_2Imm"; }
def s8ImmOperand : AsmOperandClass { let Name = "s8Imm"; }
def s8Imm64Operand : AsmOperandClass { let Name = "s8Imm64"; }
def s6ImmOperand : AsmOperandClass { let Name = "s6Imm"; }
@@ -48,6 +49,7 @@ let OperandType = "OPERAND_IMMEDIATE",
DecoderMethod = "unsignedImmDecoder" in {
def s32Imm : Operand<i32> { let ParserMatchClass = s32ImmOperand;
let DecoderMethod = "s32ImmDecoder"; }
+ def s23_2Imm : Operand<i32> { let ParserMatchClass = s23_2ImmOperand; }
def s8Imm : Operand<i32> { let ParserMatchClass = s8ImmOperand;
let DecoderMethod = "s8ImmDecoder"; }
def s8Imm64 : Operand<i64> { let ParserMatchClass = s8Imm64Operand;
@@ -345,22 +347,6 @@ def u1ImmPred32 : PatLeaf<(i32 imm), [{
return isUInt<1>(v);
}]>;
-def m5BImmPred : PatLeaf<(i32 imm), [{
- // m5BImmPred predicate - True if the (char) number is in range -1 .. -31
- // and will fit in a 5 bit field when made positive, for use in memops.
- // this is specific to the zero extending of a negative by CombineInstr
- int8_t v = (int8_t)N->getSExtValue();
- return (-31 <= v && v <= -1);
-}]>;
-
-def m5HImmPred : PatLeaf<(i32 imm), [{
- // m5HImmPred predicate - True if the (short) number is in range -1 .. -31
- // and will fit in a 5 bit field when made positive, for use in memops.
- // this is specific to the zero extending of a negative by CombineInstr
- int16_t v = (int16_t)N->getSExtValue();
- return (-31 <= v && v <= -1);
-}]>;
-
def m5ImmPred : PatLeaf<(i32 imm), [{
// m5ImmPred predicate - True if the number is in range -1 .. -31
// and will fit in a 5 bit field when made positive, for use in memops.
@@ -402,60 +388,6 @@ def Clr5ImmPred : PatLeaf<(i32 imm), [{
return ImmIsSingleBit(v);
}]>;
-def SetClr5ImmPred : PatLeaf<(i32 imm), [{
- // True if the immediate is in range 0..31.
- int32_t v = (int32_t)N->getSExtValue();
- return (v >= 0 && v <= 31);
-}]>;
-
-def Set4ImmPred : PatLeaf<(i32 imm), [{
- // Set4ImmPred predicate - True if the number is in the series of values:
- // [ 2^0, 2^1, ... 2^15 ].
- // For use in setbit immediate.
- uint16_t v = (int16_t)N->getSExtValue();
- // Constrain to 16 bits, and then check for single bit.
- return ImmIsSingleBit(v);
-}]>;
-
-def Clr4ImmPred : PatLeaf<(i32 imm), [{
- // Clr4ImmPred predicate - True if the number is in the series of
- // bit negated values:
- // [ 2^0, 2^1, ... 2^15 ].
- // For use in setbit and clrbit immediate.
- uint16_t v = ~ (int16_t)N->getSExtValue();
- // Constrain to 16 bits, and then check for single bit.
- return ImmIsSingleBit(v);
-}]>;
-
-def SetClr4ImmPred : PatLeaf<(i32 imm), [{
- // True if the immediate is in the range 0..15.
- int16_t v = (int16_t)N->getSExtValue();
- return (v >= 0 && v <= 15);
-}]>;
-
-def Set3ImmPred : PatLeaf<(i32 imm), [{
- // True if the number is in the series of values: [ 2^0, 2^1, ... 2^7 ].
- // For use in setbit immediate.
- uint8_t v = (int8_t)N->getSExtValue();
- // Constrain to 8 bits, and then check for single bit.
- return ImmIsSingleBit(v);
-}]>;
-
-def Clr3ImmPred : PatLeaf<(i32 imm), [{
- // True if the number is in the series of bit negated values: [ 2^0, 2^1, ... 2^7 ].
- // For use in setbit and clrbit immediate.
- uint8_t v = ~ (int8_t)N->getSExtValue();
- // Constrain to 8 bits, and then check for single bit.
- return ImmIsSingleBit(v);
-}]>;
-
-def SetClr3ImmPred : PatLeaf<(i32 imm), [{
- // True if the immediate is in the range 0..7.
- int8_t v = (int8_t)N->getSExtValue();
- return (v >= 0 && v <= 7);
-}]>;
-
-
// Extendable immediate operands.
def f32ExtOperand : AsmOperandClass { let Name = "f32Ext"; }
def s16ExtOperand : AsmOperandClass { let Name = "s16Ext"; }
diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp
new file mode 100644
index 000000000000..4dff0dbc2b71
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp
@@ -0,0 +1,663 @@
+//===--- HexagonOptAddrMode.cpp -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This implements a Hexagon-specific pass to optimize addressing mode for
+// load/store instructions.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "opt-addr-mode"
+
+#include "HexagonTargetMachine.h"
+#include "RDFGraph.h"
+#include "RDFLiveness.h"
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineDominanceFrontier.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+static cl::opt<int> CodeGrowthLimit("hexagon-amode-growth-limit",
+ cl::Hidden, cl::init(0), cl::desc("Code growth limit for address mode "
+ "optimization"));
+
+using namespace llvm;
+using namespace rdf;
+
+namespace llvm {
+ FunctionPass *createHexagonOptAddrMode();
+ void initializeHexagonOptAddrModePass(PassRegistry &);
+}
+
+namespace {
+class HexagonOptAddrMode : public MachineFunctionPass {
+public:
+ static char ID;
+ HexagonOptAddrMode()
+ : MachineFunctionPass(ID), HII(0), MDT(0), DFG(0), LV(0) {
+ PassRegistry &R = *PassRegistry::getPassRegistry();
+ initializeHexagonOptAddrModePass(R);
+ }
+ const char *getPassName() const override {
+ return "Optimize addressing mode of load/store";
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<MachineDominanceFrontier>();
+ AU.setPreservesAll();
+ }
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ typedef DenseSet<MachineInstr *> MISetType;
+ typedef DenseMap<MachineInstr *, bool> InstrEvalMap;
+ const HexagonInstrInfo *HII;
+ MachineDominatorTree *MDT;
+ DataFlowGraph *DFG;
+ DataFlowGraph::DefStackMap DefM;
+ std::map<RegisterRef, std::map<NodeId, NodeId>> RDefMap;
+ Liveness *LV;
+ MISetType Deleted;
+
+ bool processBlock(NodeAddr<BlockNode *> BA);
+ bool xformUseMI(MachineInstr *TfrMI, MachineInstr *UseMI,
+ NodeAddr<UseNode *> UseN, unsigned UseMOnum);
+ bool analyzeUses(unsigned DefR, const NodeList &UNodeList,
+ InstrEvalMap &InstrEvalResult, short &SizeInc);
+ bool hasRepForm(MachineInstr *MI, unsigned TfrDefR);
+ bool canRemoveAddasl(NodeAddr<StmtNode *> AddAslSN, MachineInstr *MI,
+ const NodeList &UNodeList);
+ void getAllRealUses(NodeAddr<StmtNode *> SN, NodeList &UNodeList);
+ bool allValidCandidates(NodeAddr<StmtNode *> SA, NodeList &UNodeList);
+ short getBaseWithLongOffset(const MachineInstr *MI) const;
+ void updateMap(NodeAddr<InstrNode *> IA);
+ bool constructDefMap(MachineBasicBlock *B);
+ bool changeStore(MachineInstr *OldMI, MachineOperand ImmOp,
+ unsigned ImmOpNum);
+ bool changeLoad(MachineInstr *OldMI, MachineOperand ImmOp, unsigned ImmOpNum);
+ bool changeAddAsl(NodeAddr<UseNode *> AddAslUN, MachineInstr *AddAslMI,
+ const MachineOperand &ImmOp, unsigned ImmOpNum);
+};
+}
+
+char HexagonOptAddrMode::ID = 0;
+
+INITIALIZE_PASS_BEGIN(HexagonOptAddrMode, "opt-amode",
+ "Optimize addressing mode", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier)
+INITIALIZE_PASS_END(HexagonOptAddrMode, "opt-amode", "Optimize addressing mode",
+ false, false)
+
+bool HexagonOptAddrMode::hasRepForm(MachineInstr *MI, unsigned TfrDefR) {
+ const MCInstrDesc &MID = MI->getDesc();
+
+ if ((!MID.mayStore() && !MID.mayLoad()) || HII->isPredicated(*MI))
+ return false;
+
+ if (MID.mayStore()) {
+ MachineOperand StOp = MI->getOperand(MI->getNumOperands() - 1);
+ if (StOp.isReg() && StOp.getReg() == TfrDefR)
+ return false;
+ }
+
+ if (HII->getAddrMode(MI) == HexagonII::BaseRegOffset)
+ // Tranform to Absolute plus register offset.
+ return (HII->getBaseWithLongOffset(MI) >= 0);
+ else if (HII->getAddrMode(MI) == HexagonII::BaseImmOffset)
+ // Tranform to absolute addressing mode.
+ return (HII->getAbsoluteForm(MI) >= 0);
+
+ return false;
+}
+
+// Check if addasl instruction can be removed. This is possible only
+// if it's feeding to only load/store instructions with base + register
+// offset as these instruction can be tranformed to use 'absolute plus
+// shifted register offset'.
+// ex:
+// Rs = ##foo
+// Rx = addasl(Rs, Rt, #2)
+// Rd = memw(Rx + #28)
+// Above three instructions can be replaced with Rd = memw(Rt<<#2 + ##foo+28)
+
+bool HexagonOptAddrMode::canRemoveAddasl(NodeAddr<StmtNode *> AddAslSN,
+ MachineInstr *MI,
+ const NodeList &UNodeList) {
+ // check offset size in addasl. if 'offset > 3' return false
+ const MachineOperand &OffsetOp = MI->getOperand(3);
+ if (!OffsetOp.isImm() || OffsetOp.getImm() > 3)
+ return false;
+
+ unsigned OffsetReg = MI->getOperand(2).getReg();
+ RegisterRef OffsetRR;
+ NodeId OffsetRegRD = 0;
+ for (NodeAddr<UseNode *> UA : AddAslSN.Addr->members_if(DFG->IsUse, *DFG)) {
+ RegisterRef RR = UA.Addr->getRegRef();
+ if (OffsetReg == RR.Reg) {
+ OffsetRR = RR;
+ OffsetRegRD = UA.Addr->getReachingDef();
+ }
+ }
+
+ for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
+ NodeAddr<UseNode *> UA = *I;
+ NodeAddr<InstrNode *> IA = UA.Addr->getOwner(*DFG);
+ if ((UA.Addr->getFlags() & NodeAttrs::PhiRef) ||
+ RDefMap[OffsetRR][IA.Id] != OffsetRegRD)
+ return false;
+
+ MachineInstr *UseMI = NodeAddr<StmtNode *>(IA).Addr->getCode();
+ NodeAddr<DefNode *> OffsetRegDN = DFG->addr<DefNode *>(OffsetRegRD);
+ // Reaching Def to an offset register can't be a phi.
+ if ((OffsetRegDN.Addr->getFlags() & NodeAttrs::PhiRef) &&
+ MI->getParent() != UseMI->getParent())
+ return false;
+
+ const MCInstrDesc &UseMID = UseMI->getDesc();
+ if ((!UseMID.mayLoad() && !UseMID.mayStore()) ||
+ HII->getAddrMode(UseMI) != HexagonII::BaseImmOffset ||
+ getBaseWithLongOffset(UseMI) < 0)
+ return false;
+
+ // Addasl output can't be a store value.
+ if (UseMID.mayStore() && UseMI->getOperand(2).isReg() &&
+ UseMI->getOperand(2).getReg() == MI->getOperand(0).getReg())
+ return false;
+
+ for (auto &Mo : UseMI->operands())
+ if (Mo.isFI())
+ return false;
+ }
+ return true;
+}
+
+bool HexagonOptAddrMode::allValidCandidates(NodeAddr<StmtNode *> SA,
+ NodeList &UNodeList) {
+ for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
+ NodeAddr<UseNode *> UN = *I;
+ RegisterRef UR = UN.Addr->getRegRef();
+ NodeSet Visited, Defs;
+ const auto &ReachingDefs = LV->getAllReachingDefsRec(UR, UN, Visited, Defs);
+ if (ReachingDefs.size() > 1) {
+ DEBUG({
+ dbgs() << "*** Multiple Reaching Defs found!!! ***\n";
+ for (auto DI : ReachingDefs) {
+ NodeAddr<UseNode *> DA = DFG->addr<UseNode *>(DI);
+ NodeAddr<StmtNode *> TempIA = DA.Addr->getOwner(*DFG);
+ dbgs() << "\t\t[Reaching Def]: "
+ << Print<NodeAddr<InstrNode *>>(TempIA, *DFG) << "\n";
+ }
+ });
+ return false;
+ }
+ }
+ return true;
+}
+
+void HexagonOptAddrMode::getAllRealUses(NodeAddr<StmtNode *> SA,
+ NodeList &UNodeList) {
+ for (NodeAddr<DefNode *> DA : SA.Addr->members_if(DFG->IsDef, *DFG)) {
+ DEBUG(dbgs() << "\t\t[DefNode]: " << Print<NodeAddr<DefNode *>>(DA, *DFG)
+ << "\n");
+ RegisterRef DR = DA.Addr->getRegRef();
+ auto UseSet = LV->getAllReachedUses(DR, DA);
+
+ for (auto UI : UseSet) {
+ NodeAddr<UseNode *> UA = DFG->addr<UseNode *>(UI);
+ DEBUG({
+ NodeAddr<StmtNode *> TempIA = UA.Addr->getOwner(*DFG);
+ dbgs() << "\t\t\t[Reached Use]: "
+ << Print<NodeAddr<InstrNode *>>(TempIA, *DFG) << "\n";
+ });
+
+ if (UA.Addr->getFlags() & NodeAttrs::PhiRef) {
+ NodeAddr<PhiNode *> PA = UA.Addr->getOwner(*DFG);
+ NodeId id = PA.Id;
+ const Liveness::RefMap &phiUse = LV->getRealUses(id);
+ DEBUG(dbgs() << "\t\t\t\tphi real Uses"
+ << Print<Liveness::RefMap>(phiUse, *DFG) << "\n");
+ if (phiUse.size() > 0) {
+ for (auto I : phiUse) {
+ if (DR != I.first)
+ continue;
+ auto phiUseSet = I.second;
+ for (auto phiUI : phiUseSet) {
+ NodeAddr<UseNode *> phiUA = DFG->addr<UseNode *>(phiUI);
+ UNodeList.push_back(phiUA);
+ }
+ }
+ }
+ } else
+ UNodeList.push_back(UA);
+ }
+ }
+}
+
+bool HexagonOptAddrMode::analyzeUses(unsigned tfrDefR,
+ const NodeList &UNodeList,
+ InstrEvalMap &InstrEvalResult,
+ short &SizeInc) {
+ bool KeepTfr = false;
+ bool HasRepInstr = false;
+ InstrEvalResult.clear();
+
+ for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
+ bool CanBeReplaced = false;
+ NodeAddr<UseNode *> UN = *I;
+ NodeAddr<StmtNode *> SN = UN.Addr->getOwner(*DFG);
+ MachineInstr *MI = SN.Addr->getCode();
+ const MCInstrDesc &MID = MI->getDesc();
+ if ((MID.mayLoad() || MID.mayStore())) {
+ if (!hasRepForm(MI, tfrDefR)) {
+ KeepTfr = true;
+ continue;
+ }
+ SizeInc++;
+ CanBeReplaced = true;
+ } else if (MI->getOpcode() == Hexagon::S2_addasl_rrri) {
+ NodeList AddaslUseList;
+
+ DEBUG(dbgs() << "\nGetting ReachedUses for === " << *MI << "\n");
+ getAllRealUses(SN, AddaslUseList);
+ // Process phi nodes.
+ if (allValidCandidates(SN, AddaslUseList) &&
+ canRemoveAddasl(SN, MI, AddaslUseList)) {
+ SizeInc += AddaslUseList.size();
+ SizeInc -= 1; // Reduce size by 1 as addasl itself can be removed.
+ CanBeReplaced = true;
+ } else
+ SizeInc++;
+ } else
+ // Currently, only load/store and addasl are handled.
+ // Some other instructions to consider -
+ // A2_add -> A2_addi
+ // M4_mpyrr_addr -> M4_mpyrr_addi
+ KeepTfr = true;
+
+ InstrEvalResult[MI] = CanBeReplaced;
+ HasRepInstr |= CanBeReplaced;
+ }
+
+ // Reduce total size by 2 if original tfr can be deleted.
+ if (!KeepTfr)
+ SizeInc -= 2;
+
+ return HasRepInstr;
+}
+
+bool HexagonOptAddrMode::changeLoad(MachineInstr *OldMI, MachineOperand ImmOp,
+ unsigned ImmOpNum) {
+ bool Changed = false;
+ MachineBasicBlock *BB = OldMI->getParent();
+ auto UsePos = MachineBasicBlock::iterator(OldMI);
+ MachineBasicBlock::instr_iterator InsertPt = UsePos.getInstrIterator();
+ ++InsertPt;
+ unsigned OpStart;
+ unsigned OpEnd = OldMI->getNumOperands();
+ MachineInstrBuilder MIB;
+
+ if (ImmOpNum == 1) {
+ if (HII->getAddrMode(OldMI) == HexagonII::BaseRegOffset) {
+ short NewOpCode = HII->getBaseWithLongOffset(OldMI);
+ assert(NewOpCode >= 0 && "Invalid New opcode\n");
+ MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode));
+ MIB.addOperand(OldMI->getOperand(0));
+ MIB.addOperand(OldMI->getOperand(2));
+ MIB.addOperand(OldMI->getOperand(3));
+ MIB.addOperand(ImmOp);
+ OpStart = 4;
+ Changed = true;
+ } else if (HII->getAddrMode(OldMI) == HexagonII::BaseImmOffset) {
+ short NewOpCode = HII->getAbsoluteForm(OldMI);
+ assert(NewOpCode >= 0 && "Invalid New opcode\n");
+ MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode))
+ .addOperand(OldMI->getOperand(0));
+ const GlobalValue *GV = ImmOp.getGlobal();
+ int64_t Offset = ImmOp.getOffset() + OldMI->getOperand(2).getImm();
+
+ MIB.addGlobalAddress(GV, Offset, ImmOp.getTargetFlags());
+ OpStart = 3;
+ Changed = true;
+ } else
+ Changed = false;
+
+ DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
+ DEBUG(dbgs() << "[TO]: " << MIB << "\n");
+ } else if (ImmOpNum == 2 && OldMI->getOperand(3).getImm() == 0) {
+ short NewOpCode = HII->xformRegToImmOffset(OldMI);
+ assert(NewOpCode >= 0 && "Invalid New opcode\n");
+ MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode));
+ MIB.addOperand(OldMI->getOperand(0));
+ MIB.addOperand(OldMI->getOperand(1));
+ MIB.addOperand(ImmOp);
+ OpStart = 4;
+ Changed = true;
+ DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
+ DEBUG(dbgs() << "[TO]: " << MIB << "\n");
+ }
+
+ if (Changed)
+ for (unsigned i = OpStart; i < OpEnd; ++i)
+ MIB.addOperand(OldMI->getOperand(i));
+
+ return Changed;
+}
+
+bool HexagonOptAddrMode::changeStore(MachineInstr *OldMI, MachineOperand ImmOp,
+ unsigned ImmOpNum) {
+ bool Changed = false;
+ unsigned OpStart;
+ unsigned OpEnd = OldMI->getNumOperands();
+ MachineBasicBlock *BB = OldMI->getParent();
+ auto UsePos = MachineBasicBlock::iterator(OldMI);
+ MachineBasicBlock::instr_iterator InsertPt = UsePos.getInstrIterator();
+ ++InsertPt;
+ MachineInstrBuilder MIB;
+ if (ImmOpNum == 0) {
+ if (HII->getAddrMode(OldMI) == HexagonII::BaseRegOffset) {
+ short NewOpCode = HII->getBaseWithLongOffset(OldMI);
+ assert(NewOpCode >= 0 && "Invalid New opcode\n");
+ MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode));
+ MIB.addOperand(OldMI->getOperand(1));
+ MIB.addOperand(OldMI->getOperand(2));
+ MIB.addOperand(ImmOp);
+ MIB.addOperand(OldMI->getOperand(3));
+ OpStart = 4;
+ } else if (HII->getAddrMode(OldMI) == HexagonII::BaseImmOffset) {
+ short NewOpCode = HII->getAbsoluteForm(OldMI);
+ assert(NewOpCode >= 0 && "Invalid New opcode\n");
+ MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode));
+ const GlobalValue *GV = ImmOp.getGlobal();
+ int64_t Offset = ImmOp.getOffset() + OldMI->getOperand(1).getImm();
+ MIB.addGlobalAddress(GV, Offset, ImmOp.getTargetFlags());
+ MIB.addOperand(OldMI->getOperand(2));
+ OpStart = 3;
+ }
+ Changed = true;
+ DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
+ DEBUG(dbgs() << "[TO]: " << MIB << "\n");
+ } else if (ImmOpNum == 1 && OldMI->getOperand(2).getImm() == 0) {
+ short NewOpCode = HII->xformRegToImmOffset(OldMI);
+ assert(NewOpCode >= 0 && "Invalid New opcode\n");
+ MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode));
+ MIB.addOperand(OldMI->getOperand(0));
+ MIB.addOperand(ImmOp);
+ MIB.addOperand(OldMI->getOperand(1));
+ OpStart = 2;
+ Changed = true;
+ DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n");
+ DEBUG(dbgs() << "[TO]: " << MIB << "\n");
+ }
+ if (Changed)
+ for (unsigned i = OpStart; i < OpEnd; ++i)
+ MIB.addOperand(OldMI->getOperand(i));
+
+ return Changed;
+}
+
+short HexagonOptAddrMode::getBaseWithLongOffset(const MachineInstr *MI) const {
+ if (HII->getAddrMode(MI) == HexagonII::BaseImmOffset) {
+ short TempOpCode = HII->getBaseWithRegOffset(MI);
+ return HII->getBaseWithLongOffset(TempOpCode);
+ } else
+ return HII->getBaseWithLongOffset(MI);
+}
+
+bool HexagonOptAddrMode::changeAddAsl(NodeAddr<UseNode *> AddAslUN,
+ MachineInstr *AddAslMI,
+ const MachineOperand &ImmOp,
+ unsigned ImmOpNum) {
+ NodeAddr<StmtNode *> SA = AddAslUN.Addr->getOwner(*DFG);
+
+ DEBUG(dbgs() << "Processing addasl :" << *AddAslMI << "\n");
+
+ NodeList UNodeList;
+ getAllRealUses(SA, UNodeList);
+
+ for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
+ NodeAddr<UseNode *> UseUN = *I;
+ assert(!(UseUN.Addr->getFlags() & NodeAttrs::PhiRef) &&
+ "Can't transform this 'AddAsl' instruction!");
+
+ NodeAddr<StmtNode *> UseIA = UseUN.Addr->getOwner(*DFG);
+ DEBUG(dbgs() << "[InstrNode]: " << Print<NodeAddr<InstrNode *>>(UseIA, *DFG)
+ << "\n");
+ MachineInstr *UseMI = UseIA.Addr->getCode();
+ DEBUG(dbgs() << "[MI <BB#" << UseMI->getParent()->getNumber()
+ << ">]: " << *UseMI << "\n");
+ const MCInstrDesc &UseMID = UseMI->getDesc();
+ assert(HII->getAddrMode(UseMI) == HexagonII::BaseImmOffset);
+
+ auto UsePos = MachineBasicBlock::iterator(UseMI);
+ MachineBasicBlock::instr_iterator InsertPt = UsePos.getInstrIterator();
+ short NewOpCode = getBaseWithLongOffset(UseMI);
+ assert(NewOpCode >= 0 && "Invalid New opcode\n");
+
+ unsigned OpStart;
+ unsigned OpEnd = UseMI->getNumOperands();
+
+ MachineBasicBlock *BB = UseMI->getParent();
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, InsertPt, UseMI->getDebugLoc(), HII->get(NewOpCode));
+ // change mem(Rs + # ) -> mem(Rt << # + ##)
+ if (UseMID.mayLoad()) {
+ MIB.addOperand(UseMI->getOperand(0));
+ MIB.addOperand(AddAslMI->getOperand(2));
+ MIB.addOperand(AddAslMI->getOperand(3));
+ const GlobalValue *GV = ImmOp.getGlobal();
+ MIB.addGlobalAddress(GV, UseMI->getOperand(2).getImm(),
+ ImmOp.getTargetFlags());
+ OpStart = 3;
+ } else if (UseMID.mayStore()) {
+ MIB.addOperand(AddAslMI->getOperand(2));
+ MIB.addOperand(AddAslMI->getOperand(3));
+ const GlobalValue *GV = ImmOp.getGlobal();
+ MIB.addGlobalAddress(GV, UseMI->getOperand(1).getImm(),
+ ImmOp.getTargetFlags());
+ MIB.addOperand(UseMI->getOperand(2));
+ OpStart = 3;
+ } else
+ llvm_unreachable("Unhandled instruction");
+
+ for (unsigned i = OpStart; i < OpEnd; ++i)
+ MIB.addOperand(UseMI->getOperand(i));
+
+ Deleted.insert(UseMI);
+ }
+
+ return true;
+}
+
+bool HexagonOptAddrMode::xformUseMI(MachineInstr *TfrMI, MachineInstr *UseMI,
+ NodeAddr<UseNode *> UseN,
+ unsigned UseMOnum) {
+ const MachineOperand ImmOp = TfrMI->getOperand(1);
+ const MCInstrDesc &MID = UseMI->getDesc();
+ unsigned Changed = false;
+ if (MID.mayLoad())
+ Changed = changeLoad(UseMI, ImmOp, UseMOnum);
+ else if (MID.mayStore())
+ Changed = changeStore(UseMI, ImmOp, UseMOnum);
+ else if (UseMI->getOpcode() == Hexagon::S2_addasl_rrri)
+ Changed = changeAddAsl(UseN, UseMI, ImmOp, UseMOnum);
+
+ if (Changed)
+ Deleted.insert(UseMI);
+
+ return Changed;
+}
+
+bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) {
+ bool Changed = false;
+
+ for (auto IA : BA.Addr->members(*DFG)) {
+ if (!DFG->IsCode<NodeAttrs::Stmt>(IA))
+ continue;
+
+ NodeAddr<StmtNode *> SA = IA;
+ MachineInstr *MI = SA.Addr->getCode();
+ if (MI->getOpcode() != Hexagon::A2_tfrsi ||
+ !MI->getOperand(1).isGlobal())
+ continue;
+
+ DEBUG(dbgs() << "[Analyzing A2_tfrsi]: " << *MI << "\n");
+ DEBUG(dbgs() << "\t[InstrNode]: " << Print<NodeAddr<InstrNode *>>(IA, *DFG)
+ << "\n");
+
+ NodeList UNodeList;
+ getAllRealUses(SA, UNodeList);
+
+ if (!allValidCandidates(SA, UNodeList))
+ continue;
+
+ short SizeInc = 0;
+ unsigned DefR = MI->getOperand(0).getReg();
+ InstrEvalMap InstrEvalResult;
+
+ // Analyze all uses and calculate increase in size. Perform the optimization
+ // only if there is no increase in size.
+ if (!analyzeUses(DefR, UNodeList, InstrEvalResult, SizeInc))
+ continue;
+ if (SizeInc > CodeGrowthLimit)
+ continue;
+
+ bool KeepTfr = false;
+
+ DEBUG(dbgs() << "\t[Total reached uses] : " << UNodeList.size() << "\n");
+ DEBUG(dbgs() << "\t[Processing Reached Uses] ===\n");
+ for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) {
+ NodeAddr<UseNode *> UseN = *I;
+ assert(!(UseN.Addr->getFlags() & NodeAttrs::PhiRef) &&
+ "Found a PhiRef node as a real reached use!!");
+
+ NodeAddr<StmtNode *> OwnerN = UseN.Addr->getOwner(*DFG);
+ MachineInstr *UseMI = OwnerN.Addr->getCode();
+ DEBUG(dbgs() << "\t\t[MI <BB#" << UseMI->getParent()->getNumber()
+ << ">]: " << *UseMI << "\n");
+
+ int UseMOnum = -1;
+ unsigned NumOperands = UseMI->getNumOperands();
+ for (unsigned j = 0; j < NumOperands - 1; ++j) {
+ const MachineOperand &op = UseMI->getOperand(j);
+ if (op.isReg() && op.isUse() && DefR == op.getReg())
+ UseMOnum = j;
+ }
+ assert(UseMOnum >= 0 && "Invalid reached use!");
+
+ if (InstrEvalResult[UseMI])
+ // Change UseMI if replacement is possible.
+ Changed |= xformUseMI(MI, UseMI, UseN, UseMOnum);
+ else
+ KeepTfr = true;
+ }
+ if (!KeepTfr)
+ Deleted.insert(MI);
+ }
+ return Changed;
+}
+
+void HexagonOptAddrMode::updateMap(NodeAddr<InstrNode *> IA) {
+ RegisterSet RRs;
+ for (NodeAddr<RefNode *> RA : IA.Addr->members(*DFG))
+ RRs.insert(RA.Addr->getRegRef());
+ bool Common = false;
+ for (auto &R : RDefMap) {
+ if (!RRs.count(R.first))
+ continue;
+ Common = true;
+ break;
+ }
+ if (!Common)
+ return;
+
+ for (auto &R : RDefMap) {
+ auto F = DefM.find(R.first);
+ if (F == DefM.end() || F->second.empty())
+ continue;
+ R.second[IA.Id] = F->second.top()->Id;
+ }
+}
+
+bool HexagonOptAddrMode::constructDefMap(MachineBasicBlock *B) {
+ bool Changed = false;
+ auto BA = DFG->getFunc().Addr->findBlock(B, *DFG);
+ DFG->markBlock(BA.Id, DefM);
+
+ for (NodeAddr<InstrNode *> IA : BA.Addr->members(*DFG)) {
+ updateMap(IA);
+ DFG->pushDefs(IA, DefM);
+ }
+
+ MachineDomTreeNode *N = MDT->getNode(B);
+ for (auto I : *N)
+ Changed |= constructDefMap(I->getBlock());
+
+ DFG->releaseBlock(BA.Id, DefM);
+ return Changed;
+}
+
+bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) {
+ bool Changed = false;
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ auto &MRI = MF.getRegInfo();
+ HII = HST.getInstrInfo();
+ const auto &MDF = getAnalysis<MachineDominanceFrontier>();
+ MDT = &getAnalysis<MachineDominatorTree>();
+ const auto &TRI = *MF.getSubtarget().getRegisterInfo();
+ const TargetOperandInfo TOI(*HII);
+
+ RegisterAliasInfo RAI(TRI);
+ DataFlowGraph G(MF, *HII, TRI, *MDT, MDF, RAI, TOI);
+ G.build();
+ DFG = &G;
+
+ Liveness L(MRI, *DFG);
+ L.computePhiInfo();
+ LV = &L;
+
+ constructDefMap(&DFG->getMF().front());
+
+ Deleted.clear();
+ NodeAddr<FuncNode *> FA = DFG->getFunc();
+ DEBUG(dbgs() << "==== [RefMap#]=====:\n "
+ << Print<NodeAddr<FuncNode *>>(FA, *DFG) << "\n");
+
+ for (NodeAddr<BlockNode *> BA : FA.Addr->members(*DFG))
+ Changed |= processBlock(BA);
+
+ for (auto MI : Deleted)
+ MI->eraseFromParent();
+
+ if (Changed) {
+ G.build();
+ L.computeLiveIns();
+ L.resetLiveIns();
+ L.resetKills();
+ }
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonOptAddrMode() {
+ return new HexagonOptAddrMode();
+}
diff --git a/lib/Target/Hexagon/HexagonOptimizeSZextends.cpp b/lib/Target/Hexagon/HexagonOptimizeSZextends.cpp
index 1723771550c9..7937a7908b06 100644
--- a/lib/Target/Hexagon/HexagonOptimizeSZextends.cpp
+++ b/lib/Target/Hexagon/HexagonOptimizeSZextends.cpp
@@ -69,6 +69,9 @@ bool HexagonOptimizeSZextends::intrinsicAlreadySextended(Intrinsic::ID IntID) {
}
bool HexagonOptimizeSZextends::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
unsigned Idx = 1;
// Try to optimize sign extends in formal parameters. It's relying on
// callee already sign extending the values. I'm not sure if our ABI
diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp
index e68ff85b1da6..b064decc5c76 100644
--- a/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -67,11 +67,11 @@ static cl::opt<bool> DisablePNotP("disable-hexagon-pnotp",
cl::desc("Disable Optimization of PNotP"));
static cl::opt<bool> DisableOptSZExt("disable-hexagon-optszext",
- cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::Hidden, cl::ZeroOrMore, cl::init(true),
cl::desc("Disable Optimization of Sign/Zero Extends"));
static cl::opt<bool> DisableOptExtTo64("disable-hexagon-opt-ext-to-64",
- cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::Hidden, cl::ZeroOrMore, cl::init(true),
cl::desc("Disable Optimization of extensions to i64."));
namespace llvm {
@@ -112,6 +112,9 @@ INITIALIZE_PASS(HexagonPeephole, "hexagon-peephole", "Hexagon Peephole",
false, false)
bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
QII = static_cast<const HexagonInstrInfo *>(MF.getSubtarget().getInstrInfo());
QRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
MRI = &MF.getRegInfo();
@@ -129,15 +132,13 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
PeepholeDoubleRegsMap.clear();
// Traverse the basic block.
- for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
- ++MII) {
- MachineInstr *MI = MII;
+ for (MachineInstr &MI : *MBB) {
// Look for sign extends:
// %vreg170<def> = SXTW %vreg166
- if (!DisableOptSZExt && MI->getOpcode() == Hexagon::A2_sxtw) {
- assert (MI->getNumOperands() == 2);
- MachineOperand &Dst = MI->getOperand(0);
- MachineOperand &Src = MI->getOperand(1);
+ if (!DisableOptSZExt && MI.getOpcode() == Hexagon::A2_sxtw) {
+ assert(MI.getNumOperands() == 2);
+ MachineOperand &Dst = MI.getOperand(0);
+ MachineOperand &Src = MI.getOperand(1);
unsigned DstReg = Dst.getReg();
unsigned SrcReg = Src.getReg();
// Just handle virtual registers.
@@ -152,12 +153,11 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// Look for %vreg170<def> = COMBINE_ir_V4 (0, %vreg169)
// %vreg170:DoublRegs, %vreg169:IntRegs
- if (!DisableOptExtTo64 &&
- MI->getOpcode () == Hexagon::A4_combineir) {
- assert (MI->getNumOperands() == 3);
- MachineOperand &Dst = MI->getOperand(0);
- MachineOperand &Src1 = MI->getOperand(1);
- MachineOperand &Src2 = MI->getOperand(2);
+ if (!DisableOptExtTo64 && MI.getOpcode() == Hexagon::A4_combineir) {
+ assert(MI.getNumOperands() == 3);
+ MachineOperand &Dst = MI.getOperand(0);
+ MachineOperand &Src1 = MI.getOperand(1);
+ MachineOperand &Src2 = MI.getOperand(2);
if (Src1.getImm() != 0)
continue;
unsigned DstReg = Dst.getReg();
@@ -170,11 +170,11 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// %vregIntReg = COPY %vregDoubleReg1:subreg_loreg.
// and convert into
// %vregIntReg = COPY %vregDoubleReg0:subreg_hireg.
- if (MI->getOpcode() == Hexagon::S2_lsr_i_p) {
- assert(MI->getNumOperands() == 3);
- MachineOperand &Dst = MI->getOperand(0);
- MachineOperand &Src1 = MI->getOperand(1);
- MachineOperand &Src2 = MI->getOperand(2);
+ if (MI.getOpcode() == Hexagon::S2_lsr_i_p) {
+ assert(MI.getNumOperands() == 3);
+ MachineOperand &Dst = MI.getOperand(0);
+ MachineOperand &Src1 = MI.getOperand(1);
+ MachineOperand &Src2 = MI.getOperand(2);
if (Src2.getImm() != 32)
continue;
unsigned DstReg = Dst.getReg();
@@ -184,11 +184,10 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
}
// Look for P=NOT(P).
- if (!DisablePNotP &&
- (MI->getOpcode() == Hexagon::C2_not)) {
- assert (MI->getNumOperands() == 2);
- MachineOperand &Dst = MI->getOperand(0);
- MachineOperand &Src = MI->getOperand(1);
+ if (!DisablePNotP && MI.getOpcode() == Hexagon::C2_not) {
+ assert(MI.getNumOperands() == 2);
+ MachineOperand &Dst = MI.getOperand(0);
+ MachineOperand &Src = MI.getOperand(1);
unsigned DstReg = Dst.getReg();
unsigned SrcReg = Src.getReg();
// Just handle virtual registers.
@@ -203,10 +202,10 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// Look for copy:
// %vreg176<def> = COPY %vreg170:subreg_loreg
- if (!DisableOptSZExt && MI->isCopy()) {
- assert (MI->getNumOperands() == 2);
- MachineOperand &Dst = MI->getOperand(0);
- MachineOperand &Src = MI->getOperand(1);
+ if (!DisableOptSZExt && MI.isCopy()) {
+ assert(MI.getNumOperands() == 2);
+ MachineOperand &Dst = MI.getOperand(0);
+ MachineOperand &Src = MI.getOperand(1);
// Make sure we are copying the lower 32 bits.
if (Src.getSubReg() != Hexagon::subreg_loreg)
@@ -219,22 +218,18 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// Try to find in the map.
if (unsigned PeepholeSrc = PeepholeMap.lookup(SrcReg)) {
// Change the 1st operand.
- MI->RemoveOperand(1);
- MI->addOperand(MachineOperand::CreateReg(PeepholeSrc, false));
+ MI.RemoveOperand(1);
+ MI.addOperand(MachineOperand::CreateReg(PeepholeSrc, false));
} else {
DenseMap<unsigned, std::pair<unsigned, unsigned> >::iterator DI =
PeepholeDoubleRegsMap.find(SrcReg);
if (DI != PeepholeDoubleRegsMap.end()) {
std::pair<unsigned,unsigned> PeepholeSrc = DI->second;
- MI->RemoveOperand(1);
- MI->addOperand(MachineOperand::CreateReg(PeepholeSrc.first,
- false /*isDef*/,
- false /*isImp*/,
- false /*isKill*/,
- false /*isDead*/,
- false /*isUndef*/,
- false /*isEarlyClobber*/,
- PeepholeSrc.second));
+ MI.RemoveOperand(1);
+ MI.addOperand(MachineOperand::CreateReg(
+ PeepholeSrc.first, false /*isDef*/, false /*isImp*/,
+ false /*isKill*/, false /*isDead*/, false /*isUndef*/,
+ false /*isEarlyClobber*/, PeepholeSrc.second));
}
}
}
@@ -244,7 +239,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (!DisablePNotP) {
bool Done = false;
if (QII->isPredicated(MI)) {
- MachineOperand &Op0 = MI->getOperand(0);
+ MachineOperand &Op0 = MI.getOperand(0);
unsigned Reg0 = Op0.getReg();
const TargetRegisterClass *RC0 = MRI->getRegClass(Reg0);
if (RC0->getID() == Hexagon::PredRegsRegClassID) {
@@ -254,9 +249,9 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
// Try to find in the map.
if (unsigned PeepholeSrc = PeepholeMap.lookup(Reg0)) {
// Change the 1st operand and, flip the opcode.
- MI->getOperand(0).setReg(PeepholeSrc);
- int NewOp = QII->getInvertedPredicatedOpcode(MI->getOpcode());
- MI->setDesc(QII->get(NewOp));
+ MI.getOperand(0).setReg(PeepholeSrc);
+ int NewOp = QII->getInvertedPredicatedOpcode(MI.getOpcode());
+ MI.setDesc(QII->get(NewOp));
Done = true;
}
}
@@ -265,7 +260,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (!Done) {
// Handle special instructions.
- unsigned Op = MI->getOpcode();
+ unsigned Op = MI.getOpcode();
unsigned NewOp = 0;
unsigned PR = 1, S1 = 2, S2 = 3; // Operand indices.
@@ -282,15 +277,15 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
break;
}
if (NewOp) {
- unsigned PSrc = MI->getOperand(PR).getReg();
+ unsigned PSrc = MI.getOperand(PR).getReg();
if (unsigned POrig = PeepholeMap.lookup(PSrc)) {
- MI->getOperand(PR).setReg(POrig);
- MI->setDesc(QII->get(NewOp));
+ MI.getOperand(PR).setReg(POrig);
+ MI.setDesc(QII->get(NewOp));
// Swap operands S1 and S2.
- MachineOperand Op1 = MI->getOperand(S1);
- MachineOperand Op2 = MI->getOperand(S2);
- ChangeOpInto(MI->getOperand(S1), Op2);
- ChangeOpInto(MI->getOperand(S2), Op1);
+ MachineOperand Op1 = MI.getOperand(S1);
+ MachineOperand Op2 = MI.getOperand(S2);
+ ChangeOpInto(MI.getOperand(S1), Op2);
+ ChangeOpInto(MI.getOperand(S2), Op1);
}
} // if (NewOp)
} // if (!Done)
@@ -308,6 +303,7 @@ void HexagonPeephole::ChangeOpInto(MachineOperand &Dst, MachineOperand &Src) {
case MachineOperand::MO_Register:
if (Src.isReg()) {
Dst.setReg(Src.getReg());
+ Dst.setSubReg(Src.getSubReg());
} else if (Src.isImm()) {
Dst.ChangeToImmediate(Src.getImm());
} else {
@@ -322,6 +318,7 @@ void HexagonPeephole::ChangeOpInto(MachineOperand &Dst, MachineOperand &Src) {
Dst.ChangeToRegister(Src.getReg(), Src.isDef(), Src.isImplicit(),
Src.isKill(), Src.isDead(), Src.isUndef(),
Src.isDebug());
+ Dst.setSubReg(Src.getSubReg());
} else {
llvm_unreachable("Unexpected src operand type");
}
diff --git a/lib/Target/Hexagon/HexagonRDF.h b/lib/Target/Hexagon/HexagonRDF.h
index 00c1889e8eb5..9a63150c377d 100644
--- a/lib/Target/Hexagon/HexagonRDF.h
+++ b/lib/Target/Hexagon/HexagonRDF.h
@@ -13,7 +13,6 @@
namespace llvm {
class TargetRegisterInfo;
-}
namespace rdf {
struct HexagonRegisterAliasInfo : public RegisterAliasInfo {
@@ -22,7 +21,8 @@ namespace rdf {
bool covers(RegisterRef RA, RegisterRef RR) const override;
bool covers(const RegisterSet &RRs, RegisterRef RR) const override;
};
-}
+} // namespace rdf
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/HexagonRDFOpt.cpp b/lib/Target/Hexagon/HexagonRDFOpt.cpp
index 3fcda984d265..642a8785def9 100644
--- a/lib/Target/Hexagon/HexagonRDFOpt.cpp
+++ b/lib/Target/Hexagon/HexagonRDFOpt.cpp
@@ -35,8 +35,8 @@ namespace llvm {
}
namespace {
- cl::opt<unsigned> RDFLimit("rdf-limit", cl::init(UINT_MAX));
unsigned RDFCount = 0;
+ cl::opt<unsigned> RDFLimit("rdf-limit", cl::init(UINT_MAX));
cl::opt<bool> RDFDump("rdf-dump", cl::init(false));
class HexagonRDFOpt : public MachineFunctionPass {
@@ -55,6 +55,11 @@ namespace {
}
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
static char ID;
private:
@@ -71,6 +76,13 @@ INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier)
INITIALIZE_PASS_END(HexagonRDFOpt, "rdfopt", "Hexagon RDF opt", false, false)
+namespace {
+struct HexagonCP : public CopyPropagation {
+ HexagonCP(DataFlowGraph &G) : CopyPropagation(G) {}
+ bool interpretAsCopy(const MachineInstr *MI, EqualityMap &EM) override;
+};
+
+
struct HexagonDCE : public DeadCodeElimination {
HexagonDCE(DataFlowGraph &G, MachineRegisterInfo &MRI)
: DeadCodeElimination(G, MRI) {}
@@ -79,6 +91,44 @@ struct HexagonDCE : public DeadCodeElimination {
bool run();
};
+} // end anonymous namespace
+
+
+bool HexagonCP::interpretAsCopy(const MachineInstr *MI, EqualityMap &EM) {
+ auto mapRegs = [MI,&EM] (RegisterRef DstR, RegisterRef SrcR) -> void {
+ EM.insert(std::make_pair(DstR, SrcR));
+ };
+
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
+ case Hexagon::A2_combinew: {
+ const MachineOperand &DstOp = MI->getOperand(0);
+ const MachineOperand &HiOp = MI->getOperand(1);
+ const MachineOperand &LoOp = MI->getOperand(2);
+ assert(DstOp.getSubReg() == 0 && "Unexpected subregister");
+ mapRegs({ DstOp.getReg(), Hexagon::subreg_hireg },
+ { HiOp.getReg(), HiOp.getSubReg() });
+ mapRegs({ DstOp.getReg(), Hexagon::subreg_loreg },
+ { LoOp.getReg(), LoOp.getSubReg() });
+ return true;
+ }
+ case Hexagon::A2_addi: {
+ const MachineOperand &A = MI->getOperand(2);
+ if (!A.isImm() || A.getImm() != 0)
+ return false;
+ }
+ // Fall through.
+ case Hexagon::A2_tfr: {
+ const MachineOperand &DstOp = MI->getOperand(0);
+ const MachineOperand &SrcOp = MI->getOperand(1);
+ mapRegs({ DstOp.getReg(), DstOp.getSubReg() },
+ { SrcOp.getReg(), SrcOp.getSubReg() });
+ return true;
+ }
+ }
+
+ return CopyPropagation::interpretAsCopy(MI, EM);
+}
bool HexagonDCE::run() {
@@ -106,6 +156,7 @@ bool HexagonDCE::run() {
}
}
+
// Nodes to remove.
SetVector<NodeId> Remove = DeadInstrs;
@@ -216,6 +267,9 @@ bool HexagonDCE::rewrite(NodeAddr<InstrNode*> IA, SetVector<NodeId> &Remove) {
bool HexagonRDFOpt::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
if (RDFLimit.getPosition()) {
if (RDFCount >= RDFLimit)
return false;
@@ -227,31 +281,36 @@ bool HexagonRDFOpt::runOnMachineFunction(MachineFunction &MF) {
const auto &HII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
MRI = &MF.getRegInfo();
-
- HexagonRegisterAliasInfo HAI(HRI);
- TargetOperandInfo TOI(HII);
+ bool Changed;
if (RDFDump)
MF.print(dbgs() << "Before " << getPassName() << "\n", nullptr);
+
+ HexagonRegisterAliasInfo HAI(HRI);
+ TargetOperandInfo TOI(HII);
DataFlowGraph G(MF, HII, HRI, *MDT, MDF, HAI, TOI);
- G.build();
- if (RDFDump) {
- dbgs() << PrintNode<FuncNode*>(G.getFunc(), G) << '\n';
- dbgs() << MF.getName() << '\n';
- }
+ // Dead phi nodes are necessary for copy propagation: we can add a use
+ // of a register in a block where it would need a phi node, but which
+ // was dead (and removed) during the graph build time.
+ G.build(BuildOptions::KeepDeadPhis);
- bool Changed;
- CopyPropagation CP(G);
+ if (RDFDump)
+ dbgs() << "Starting copy propagation on: " << MF.getName() << '\n'
+ << PrintNode<FuncNode*>(G.getFunc(), G) << '\n';
+ HexagonCP CP(G);
CP.trace(RDFDump);
Changed = CP.run();
- if (Changed)
- G.build();
+ if (RDFDump)
+ dbgs() << "Starting dead code elimination on: " << MF.getName() << '\n'
+ << PrintNode<FuncNode*>(G.getFunc(), G) << '\n';
HexagonDCE DCE(G, *MRI);
DCE.trace(RDFDump);
Changed |= DCE.run();
if (Changed) {
+ if (RDFDump)
+ dbgs() << "Starting liveness recomputation on: " << MF.getName() << '\n';
Liveness LV(*MRI, G);
LV.trace(RDFDump);
LV.computeLiveIns();
@@ -261,6 +320,7 @@ bool HexagonRDFOpt::runOnMachineFunction(MachineFunction &MF) {
if (RDFDump)
MF.print(dbgs() << "After " << getPassName() << "\n", nullptr);
+
return false;
}
@@ -268,5 +328,3 @@ bool HexagonRDFOpt::runOnMachineFunction(MachineFunction &MF) {
FunctionPass *llvm::createHexagonRDFOpt() {
return new HexagonRDFOpt();
}
-
-
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index 6e5f7324aca8..23ebfd484be9 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -29,7 +29,6 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MachineLocation.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -54,24 +53,51 @@ bool HexagonRegisterInfo::isCalleeSaveReg(unsigned Reg) const {
const MCPhysReg *
-HexagonRegisterInfo::getCallerSavedRegs(const MachineFunction *MF) const {
- static const MCPhysReg CallerSavedRegsV4[] = {
- Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
- Hexagon::R5, Hexagon::R6, Hexagon::R7, Hexagon::R8, Hexagon::R9,
- Hexagon::R10, Hexagon::R11, Hexagon::R12, Hexagon::R13, Hexagon::R14,
- Hexagon::R15, 0
+HexagonRegisterInfo::getCallerSavedRegs(const MachineFunction *MF,
+ const TargetRegisterClass *RC) const {
+ using namespace Hexagon;
+
+ static const MCPhysReg Int32[] = {
+ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, R14, R15, 0
+ };
+ static const MCPhysReg Int64[] = {
+ D0, D1, D2, D3, D4, D5, D6, D7, 0
+ };
+ static const MCPhysReg Pred[] = {
+ P0, P1, P2, P3, 0
+ };
+ static const MCPhysReg VecSgl[] = {
+ V0, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13,
+ V14, V15, V16, V17, V18, V19, V20, V21, V22, V23, V24, V25, V26, V27,
+ V28, V29, V30, V31, 0
+ };
+ static const MCPhysReg VecDbl[] = {
+ W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, 0
};
- auto &HST = static_cast<const HexagonSubtarget&>(MF->getSubtarget());
- switch (HST.getHexagonArchVersion()) {
- case HexagonSubtarget::V4:
- case HexagonSubtarget::V5:
- case HexagonSubtarget::V55:
- case HexagonSubtarget::V60:
- return CallerSavedRegsV4;
+ switch (RC->getID()) {
+ case IntRegsRegClassID:
+ return Int32;
+ case DoubleRegsRegClassID:
+ return Int64;
+ case PredRegsRegClassID:
+ return Pred;
+ case VectorRegsRegClassID:
+ case VectorRegs128BRegClassID:
+ return VecSgl;
+ case VecDblRegsRegClassID:
+ case VecDblRegs128BRegClassID:
+ return VecDbl;
+ default:
+ break;
}
- llvm_unreachable(
- "Callee saved registers requested for unknown archtecture version");
+
+ static const MCPhysReg Empty[] = { 0 };
+#ifndef NDEBUG
+ dbgs() << "Register class: " << getRegClassName(RC) << "\n";
+#endif
+ llvm_unreachable("Unexpected register class");
+ return Empty;
}
@@ -83,33 +109,48 @@ HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
Hexagon::R24, Hexagon::R25, Hexagon::R26, Hexagon::R27, 0
};
+ // Functions that contain a call to __builtin_eh_return also save the first 4
+ // parameter registers.
+ static const MCPhysReg CalleeSavedRegsV3EHReturn[] = {
+ Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3,
+ Hexagon::R16, Hexagon::R17, Hexagon::R18, Hexagon::R19,
+ Hexagon::R20, Hexagon::R21, Hexagon::R22, Hexagon::R23,
+ Hexagon::R24, Hexagon::R25, Hexagon::R26, Hexagon::R27, 0
+ };
+
+ bool HasEHReturn = MF->getInfo<HexagonMachineFunctionInfo>()->hasEHReturn();
+
switch (MF->getSubtarget<HexagonSubtarget>().getHexagonArchVersion()) {
case HexagonSubtarget::V4:
case HexagonSubtarget::V5:
case HexagonSubtarget::V55:
case HexagonSubtarget::V60:
- return CalleeSavedRegsV3;
+ return HasEHReturn ? CalleeSavedRegsV3EHReturn : CalleeSavedRegsV3;
}
+
llvm_unreachable("Callee saved registers requested for unknown architecture "
"version");
}
+
BitVector HexagonRegisterInfo::getReservedRegs(const MachineFunction &MF)
const {
BitVector Reserved(getNumRegs());
- Reserved.set(HEXAGON_RESERVED_REG_1);
- Reserved.set(HEXAGON_RESERVED_REG_2);
Reserved.set(Hexagon::R29);
Reserved.set(Hexagon::R30);
Reserved.set(Hexagon::R31);
Reserved.set(Hexagon::PC);
- Reserved.set(Hexagon::GP);
Reserved.set(Hexagon::D14);
Reserved.set(Hexagon::D15);
Reserved.set(Hexagon::LC0);
Reserved.set(Hexagon::LC1);
Reserved.set(Hexagon::SA0);
Reserved.set(Hexagon::SA1);
+ Reserved.set(Hexagon::UGP);
+ Reserved.set(Hexagon::GP);
+ Reserved.set(Hexagon::CS0);
+ Reserved.set(Hexagon::CS1);
+ Reserved.set(Hexagon::CS);
return Reserved;
}
@@ -135,6 +176,7 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int Offset = HFI.getFrameIndexReference(MF, FI, BP);
// Add the offset from the instruction.
int RealOffset = Offset + MI.getOperand(FIOp+1).getImm();
+ bool IsKill = false;
unsigned Opc = MI.getOpcode();
switch (Opc) {
@@ -149,20 +191,22 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
break;
}
- if (HII.isValidOffset(Opc, RealOffset)) {
- MI.getOperand(FIOp).ChangeToRegister(BP, false);
- MI.getOperand(FIOp+1).ChangeToImmediate(RealOffset);
- return;
+ if (!HII.isValidOffset(Opc, RealOffset)) {
+ // If the offset is not valid, calculate the address in a temporary
+ // register and use it with offset 0.
+ auto &MRI = MF.getRegInfo();
+ unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
+ const DebugLoc &DL = MI.getDebugLoc();
+ BuildMI(MB, II, DL, HII.get(Hexagon::A2_addi), TmpR)
+ .addReg(BP)
+ .addImm(RealOffset);
+ BP = TmpR;
+ RealOffset = 0;
+ IsKill = true;
}
-#ifndef NDEBUG
- const Function *F = MF.getFunction();
- dbgs() << "In function ";
- if (F) dbgs() << F->getName();
- else dbgs() << "<?>";
- dbgs() << ", BB#" << MB.getNumber() << "\n" << MI;
-#endif
- llvm_unreachable("Unhandled instruction");
+ MI.getOperand(FIOp).ChangeToRegister(BP, false, false, IsKill);
+ MI.getOperand(FIOp+1).ChangeToImmediate(RealOffset);
}
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.h b/lib/Target/Hexagon/HexagonRegisterInfo.h
index db7e0f27815d..fc70679bc930 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.h
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.h
@@ -21,21 +21,6 @@
#define GET_REGINFO_HEADER
#include "HexagonGenRegisterInfo.inc"
-//
-// We try not to hard code the reserved registers in our code,
-// so the following two macros were defined. However, there
-// are still a few places that R11 and R10 are hard wired.
-// See below. If, in the future, we decided to change the reserved
-// register. Don't forget changing the following places.
-//
-// 1. the "Defs" set of STriw_pred in HexagonInstrInfo.td
-// 2. the "Defs" set of LDri_pred in HexagonInstrInfo.td
-// 3. the definition of "IntRegs" in HexagonRegisterInfo.td
-// 4. the definition of "DoubleRegs" in HexagonRegisterInfo.td
-//
-#define HEXAGON_RESERVED_REG_1 Hexagon::R10
-#define HEXAGON_RESERVED_REG_2 Hexagon::R11
-
namespace llvm {
class HexagonRegisterInfo : public HexagonGenRegisterInfo {
public:
@@ -76,7 +61,8 @@ public:
unsigned getFrameRegister() const;
unsigned getStackRegister() const;
- const MCPhysReg *getCallerSavedRegs(const MachineFunction *MF) const;
+ const MCPhysReg *getCallerSavedRegs(const MachineFunction *MF,
+ const TargetRegisterClass *RC) const;
unsigned getFirstCallerSavedNonParamReg() const;
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.td b/lib/Target/Hexagon/HexagonRegisterInfo.td
index 81629dc6d47f..4d0d411d73da 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.td
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -13,8 +13,8 @@
let Namespace = "Hexagon" in {
- class HexagonReg<bits<5> num, string n, list<string> alt = [],
- list<Register> alias = []> : Register<n> {
+ class HexagonReg<bits<5> num, string n, list<string> alt = [],
+ list<Register> alias = []> : Register<n, alt> {
field bits<5> Num;
let Aliases = alias;
let HWEncoding{4-0} = num;
@@ -31,7 +31,8 @@ let Namespace = "Hexagon" in {
// Registers are identified with 5-bit ID numbers.
// Ri - 32-bit integer registers.
- class Ri<bits<5> num, string n, list<string> alt = []> : HexagonReg<num, n, alt> {
+ class Ri<bits<5> num, string n, list<string> alt = []> :
+ HexagonReg<num, n, alt> {
let Num = num;
}
@@ -42,8 +43,9 @@ let Namespace = "Hexagon" in {
// Rd - 64-bit registers.
- class Rd<bits<5> num, string n, list<Register> subregs> :
- HexagonDoubleReg<num, n, subregs> {
+ class Rd<bits<5> num, string n, list<Register> subregs,
+ list<string> alt = []> :
+ HexagonDoubleReg<num, n, subregs, alt> {
let Num = num;
let SubRegs = subregs;
}
@@ -94,11 +96,11 @@ let Namespace = "Hexagon" in {
// Aliases of the R* registers used to hold 64-bit int values (doubles).
let SubRegIndices = [subreg_loreg, subreg_hireg], CoveredBySubRegs = 1 in {
- def D0 : Rd< 0, "r1:0", [R0, R1]>, DwarfRegNum<[32]>;
- def D1 : Rd< 2, "r3:2", [R2, R3]>, DwarfRegNum<[34]>;
- def D2 : Rd< 4, "r5:4", [R4, R5]>, DwarfRegNum<[36]>;
- def D3 : Rd< 6, "r7:6", [R6, R7]>, DwarfRegNum<[38]>;
- def D4 : Rd< 8, "r9:8", [R8, R9]>, DwarfRegNum<[40]>;
+ def D0 : Rd< 0, "r1:0", [R0, R1]>, DwarfRegNum<[32]>;
+ def D1 : Rd< 2, "r3:2", [R2, R3]>, DwarfRegNum<[34]>;
+ def D2 : Rd< 4, "r5:4", [R4, R5]>, DwarfRegNum<[36]>;
+ def D3 : Rd< 6, "r7:6", [R6, R7]>, DwarfRegNum<[38]>;
+ def D4 : Rd< 8, "r9:8", [R8, R9]>, DwarfRegNum<[40]>;
def D5 : Rd<10, "r11:10", [R10, R11]>, DwarfRegNum<[42]>;
def D6 : Rd<12, "r13:12", [R12, R13]>, DwarfRegNum<[44]>;
def D7 : Rd<14, "r15:14", [R14, R15]>, DwarfRegNum<[46]>;
@@ -109,7 +111,7 @@ let Namespace = "Hexagon" in {
def D12 : Rd<24, "r25:24", [R24, R25]>, DwarfRegNum<[56]>;
def D13 : Rd<26, "r27:26", [R26, R27]>, DwarfRegNum<[58]>;
def D14 : Rd<28, "r29:28", [R28, R29]>, DwarfRegNum<[60]>;
- def D15 : Rd<30, "r31:30", [R30, R31]>, DwarfRegNum<[62]>;
+ def D15 : Rd<30, "r31:30", [R30, R31], ["lr:fp"]>, DwarfRegNum<[62]>;
}
// Predicate registers.
@@ -130,6 +132,11 @@ let Namespace = "Hexagon" in {
// on the entire USR.
def USR_OVF : Rc<?, "usr.ovf">;
+ def USR : Rc<8, "usr", ["c8"]>, DwarfRegNum<[75]> {
+ let SubRegIndices = [subreg_overflow];
+ let SubRegs = [USR_OVF];
+ }
+
// Control registers.
def SA0 : Rc<0, "sa0", ["c0"]>, DwarfRegNum<[67]>;
def LC0 : Rc<1, "lc0", ["c1"]>, DwarfRegNum<[68]>;
@@ -140,11 +147,12 @@ let Namespace = "Hexagon" in {
def C5 : Rc<5, "c5", ["c5"]>, DwarfRegNum<[72]>; // future use
def C6 : Rc<6, "c6", [], [M0]>, DwarfRegNum<[73]>;
def C7 : Rc<7, "c7", [], [M1]>, DwarfRegNum<[74]>;
-
- def USR : Rc<8, "usr", ["c8"]>, DwarfRegNum<[75]> {
- let SubRegIndices = [subreg_overflow];
- let SubRegs = [USR_OVF];
- }
+ // Define C8 separately and make it aliased with USR.
+ // The problem is that USR has subregisters (e.g. overflow). If USR was
+ // specified as a subregister of C9_8, it would imply that subreg_overflow
+ // and subreg_loreg can be composed, which leads to all kinds of issues
+ // with lane masks.
+ def C8 : Rc<8, "c8", [], [USR]>, DwarfRegNum<[75]>;
def PC : Rc<9, "pc">, DwarfRegNum<[76]>;
def UGP : Rc<10, "ugp", ["c10"]>, DwarfRegNum<[77]>;
def GP : Rc<11, "gp">, DwarfRegNum<[78]>;
@@ -159,7 +167,8 @@ let Namespace = "Hexagon" in {
def C1_0 : Rcc<0, "c1:0", [SA0, LC0], ["lc0:sa0"]>, DwarfRegNum<[67]>;
def C3_2 : Rcc<2, "c3:2", [SA1, LC1], ["lc1:sa1"]>, DwarfRegNum<[69]>;
def C7_6 : Rcc<6, "c7:6", [C6, C7], ["m1:0"]>, DwarfRegNum<[72]>;
- def C9_8 : Rcc<8, "c9:8", [USR, PC]>, DwarfRegNum<[74]>;
+ // Use C8 instead of USR as a subregister of C9_8.
+ def C9_8 : Rcc<8, "c9:8", [C8, PC]>, DwarfRegNum<[74]>;
def C11_10 : Rcc<10, "c11:10", [UGP, GP]>, DwarfRegNum<[76]>;
def CS : Rcc<12, "c13:12", [CS0, CS1], ["cs1:0"]>, DwarfRegNum<[78]>;
def UPC : Rcc<14, "c15:14", [UPCL, UPCH]>, DwarfRegNum<[80]>;
@@ -261,7 +270,13 @@ def VolatileV3 {
R28, R31,
P0, P1, P2, P3,
M0, M1,
- LC0, LC1, SA0, SA1, USR, USR_OVF];
+ LC0, LC1, SA0, SA1, USR, USR_OVF, CS0, CS1,
+ V0, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11,
+ V12, V13, V14, V15, V16, V17, V18, V19, V20, V21,
+ V22, V23, V24, V25, V26, V27, V28, V29, V30, V31,
+ W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11,
+ W12, W13, W14, W15,
+ Q0, Q1, Q2, Q3];
}
def PositiveHalfWord : PatLeaf<(i32 IntRegs:$a),
diff --git a/lib/Target/Hexagon/HexagonScheduleV4.td b/lib/Target/Hexagon/HexagonScheduleV4.td
index 67af147b25b3..7416baab392c 100644
--- a/lib/Target/Hexagon/HexagonScheduleV4.td
+++ b/lib/Target/Hexagon/HexagonScheduleV4.td
@@ -49,7 +49,6 @@ def ALU32_3op_tc_1_SLOT0123 : InstrItinClass;
def ALU32_3op_tc_2_SLOT0123 : InstrItinClass;
def ALU32_ADDI_tc_1_SLOT0123 : InstrItinClass;
def ALU64_tc_1_SLOT23 : InstrItinClass;
-def ALU64_tc_1or2_SLOT23 : InstrItinClass;
def ALU64_tc_2_SLOT23 : InstrItinClass;
def ALU64_tc_2early_SLOT23 : InstrItinClass;
def ALU64_tc_3x_SLOT23 : InstrItinClass;
@@ -64,10 +63,9 @@ def J_tc_2early_SLOT2 : InstrItinClass;
def LD_tc_ld_SLOT01 : InstrItinClass;
def LD_tc_ld_SLOT0 : InstrItinClass;
def LD_tc_3or4stall_SLOT0 : InstrItinClass;
-def M_tc_1_SLOT23 : InstrItinClass;
-def M_tc_1or2_SLOT23 : InstrItinClass;
def M_tc_2_SLOT23 : InstrItinClass;
def M_tc_3_SLOT23 : InstrItinClass;
+def M_tc_1_SLOT23 : InstrItinClass;
def M_tc_3x_SLOT23 : InstrItinClass;
def M_tc_3or4x_SLOT23 : InstrItinClass;
def ST_tc_st_SLOT01 : InstrItinClass;
@@ -79,7 +77,6 @@ def S_2op_tc_2_SLOT23 : InstrItinClass;
def S_2op_tc_2early_SLOT23 : InstrItinClass;
def S_2op_tc_3or4x_SLOT23 : InstrItinClass;
def S_3op_tc_1_SLOT23 : InstrItinClass;
-def S_3op_tc_1or2_SLOT23 : InstrItinClass;
def S_3op_tc_2_SLOT23 : InstrItinClass;
def S_3op_tc_2early_SLOT23 : InstrItinClass;
def S_3op_tc_3_SLOT23 : InstrItinClass;
@@ -95,7 +92,6 @@ def J_tc_2early_SLOT0123 : InstrItinClass;
def EXTENDER_tc_1_SLOT0123 : InstrItinClass;
def S_3op_tc_3stall_SLOT23 : InstrItinClass;
-
def HexagonItinerariesV4 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
// ALU32
@@ -114,7 +110,6 @@ def HexagonItinerariesV4 :
// ALU64
InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_1or2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<ALU64_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
@@ -130,6 +125,7 @@ def HexagonItinerariesV4 :
InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
// J
InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
// JR
InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>]>,
@@ -140,7 +136,6 @@ def HexagonItinerariesV4 :
// M
InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_1or2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
@@ -159,11 +154,11 @@ def HexagonItinerariesV4 :
InstrItinData<S_2op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_1or2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S_3op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_3op_tc_3stall_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
// SYS
InstrItinData<ST_tc_3stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
@@ -188,6 +183,7 @@ def HexagonItinerariesV4 :
InstrItinData<EXTENDER_tc_1_SLOT0123,
[InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
@@ -199,6 +195,7 @@ def HexagonModelV4 : SchedMachineModel {
let IssueWidth = 4;
let Itineraries = HexagonItinerariesV4;
let LoadLatency = 1;
+ let CompleteModel = 0;
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonScheduleV55.td b/lib/Target/Hexagon/HexagonScheduleV55.td
index d9ad25d4cd5a..b2a75f7200d7 100644
--- a/lib/Target/Hexagon/HexagonScheduleV55.td
+++ b/lib/Target/Hexagon/HexagonScheduleV55.td
@@ -31,131 +31,154 @@ def COPROC_VX_vtc_SLOT23 : InstrItinClass;
def J_tc_3stall_SLOT2 : InstrItinClass;
def MAPPING_tc_1_SLOT0123 : InstrItinClass;
def M_tc_3stall_SLOT23 : InstrItinClass;
-def SUBINSN_tc_1_SLOT01 : InstrItinClass;
-def SUBINSN_tc_2early_SLOT0 : InstrItinClass;
-def SUBINSN_tc_2early_SLOT01 : InstrItinClass;
-def SUBINSN_tc_3stall_SLOT0 : InstrItinClass;
-def SUBINSN_tc_ld_SLOT0 : InstrItinClass;
-def SUBINSN_tc_ld_SLOT01 : InstrItinClass;
-def SUBINSN_tc_st_SLOT01 : InstrItinClass;
def HexagonItinerariesV55 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
// ALU32
InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
// ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
+ InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1]>,
+ InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1]>,
+ InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1]>,
// CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<2, [SLOT3]>]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<2, [SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<3, [SLOT3]>]>,
+ InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
+ InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
+ InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>], [3, 1, 1]>,
// Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<CR_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1, 1]>,
+ InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [1, 1, 1, 1]>,
+ InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT,
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 1, 1, 1]>,
// JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<2, [SLOT2]>]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<3, [SLOT2]>]>,
+ InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>], [2, 1, 1]>,
+ InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<1, [SLOT2]>], [3, 1, 1]>,
// Extender
InstrItinData<EXTENDER_tc_1_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
// Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<3, [SLOT0]>]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
+ InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
+ [2, 1]>,
+ InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>], [2, 1]>,
+ InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [2, 1]>,
// M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
+ InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1]>,
+ InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1]>,
+ InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1]>,
+ InstrItinData<M_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1]>,
// Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<3, [SLOT0]>]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // Subinsn
- InstrItinData<SUBINSN_tc_2early_SLOT0, [InstrStage<2, [SLOT0]>]>,
- InstrItinData<SUBINSN_tc_3stall_SLOT0, [InstrStage<3, [SLOT0]>]>,
- InstrItinData<SUBINSN_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
- InstrItinData<SUBINSN_tc_1_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<SUBINSN_tc_2early_SLOT01,
- [InstrStage<2, [SLOT0, SLOT1]>]>,
- InstrItinData<SUBINSN_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<SUBINSN_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1]>,
+ InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<1, [SLOT0]>], [2, 1, 1]>,
+ InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [2, 1, 1]>,
+ InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
// S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
+ InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1]>,
+ InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1]>,
+ InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1]>,
+ InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1]>,
+ InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
+ [2, 1, 1]>,
+ InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1]>,
+ InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1]>,
+ InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
+ [3, 1, 1]>,
// New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<3, [SLOT0]>]>,
+ InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>],
+ [3, 1, 1, 1]>,
// Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<2, [SLOT0, SLOT1]>]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
+ [1, 1, 1, 1]>,
+ InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
+ [2, 1, 1, 1]>,
+ InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1, 1]>,
+ InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
+ [1, 1, 1, 1]>,
+ InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
+ [3, 1, 1, 1]>,
+ InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1, 1]>,
// Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<2, [SLOT_ENDLOOP]>]>,
+ InstrItinData<J_tc_2early_SLOT0123, [InstrStage<1, [SLOT_ENDLOOP]>],
+ [2]>,
// Vector
InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<3, [SLOT0, SLOT1]>]>,
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 1]>,
InstrItinData<COPROC_VX_vtc_long_SLOT23 ,
- [InstrStage<3, [SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1, 1]>,
InstrItinData<COPROC_VX_vtc_SLOT23 ,
- [InstrStage<3, [SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1, 1]>,
InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1, 1]>,
// Misc
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
+ InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1]>,
InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>]>
-
+ InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>
]>;
def HexagonModelV55 : SchedMachineModel {
@@ -163,6 +186,7 @@ def HexagonModelV55 : SchedMachineModel {
let IssueWidth = 4;
let Itineraries = HexagonItinerariesV55;
let LoadLatency = 1;
+ let CompleteModel = 0;
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonScheduleV60.td b/lib/Target/Hexagon/HexagonScheduleV60.td
index 2ccff8242a47..dc2ce43b0579 100644
--- a/lib/Target/Hexagon/HexagonScheduleV60.td
+++ b/lib/Target/Hexagon/HexagonScheduleV60.td
@@ -167,16 +167,6 @@ def HexagonItinerariesV60 :
InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- // Subinsn
- InstrItinData<SUBINSN_tc_2early_SLOT0, [InstrStage<2, [SLOT0]>]>,
- InstrItinData<SUBINSN_tc_3stall_SLOT0, [InstrStage<3, [SLOT0]>]>,
- InstrItinData<SUBINSN_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
- InstrItinData<SUBINSN_tc_1_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<SUBINSN_tc_2early_SLOT01,
- [InstrStage<2, [SLOT0, SLOT1]>]>,
- InstrItinData<SUBINSN_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<SUBINSN_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
-
// S
InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
@@ -303,6 +293,7 @@ def HexagonModelV60 : SchedMachineModel {
let IssueWidth = 4;
let Itineraries = HexagonItinerariesV60;
let LoadLatency = 1;
+ let CompleteModel = 0;
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp b/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
index 239dbda8f27b..00dfed754995 100644
--- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
+++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
@@ -17,13 +17,10 @@ using namespace llvm;
#define DEBUG_TYPE "hexagon-selectiondag-info"
-SDValue
-HexagonSelectionDAGInfo::
-EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
- SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const {
+SDValue HexagonSelectionDAGInfo::EmitTargetCodeForMemcpy(
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
if (AlwaysInline || (Align & 0x3) != 0 || !ConstantSize)
return SDValue();
@@ -55,7 +52,7 @@ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
Type::getVoidTy(*DAG.getContext()),
DAG.getTargetExternalSymbol(
SpecialMemcpyName, TLI.getPointerTy(DAG.getDataLayout())),
- std::move(Args), 0)
+ std::move(Args))
.setDiscardResult();
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
index 80ac5d7bd9e2..6f2a42ce97f6 100644
--- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
+++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
@@ -7,25 +7,23 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the Hexagon subclass for TargetSelectionDAGInfo.
+// This file defines the Hexagon subclass for SelectionDAGTargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONSELECTIONDAGINFO_H
#define LLVM_LIB_TARGET_HEXAGON_HEXAGONSELECTIONDAGINFO_H
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
namespace llvm {
-class HexagonSelectionDAGInfo : public TargetSelectionDAGInfo {
+class HexagonSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
-
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;
};
diff --git a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
index 10fe606985dd..5a94cce4ce57 100644
--- a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
+++ b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
@@ -21,7 +21,6 @@
#include "HexagonSubtarget.h"
#include "HexagonTargetMachine.h"
#include "HexagonTargetObjectFile.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -32,14 +31,11 @@
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include <map>
using namespace llvm;
@@ -61,6 +57,10 @@ class HexagonSplitConst32AndConst64 : public MachineFunctionPass {
return "Hexagon Split Const32s and Const64s";
}
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
};
@@ -72,7 +72,7 @@ bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
const HexagonTargetObjectFile &TLOF =
*static_cast<const HexagonTargetObjectFile *>(
Fn.getTarget().getObjFileLowering());
- if (TLOF.IsSmallDataEnabled())
+ if (TLOF.isSmallDataEnabled())
return true;
const TargetInstrInfo *TII = Fn.getSubtarget().getInstrInfo();
@@ -86,55 +86,56 @@ bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
MachineBasicBlock::iterator MII = MBB->begin();
MachineBasicBlock::iterator MIE = MBB->end ();
while (MII != MIE) {
- MachineInstr *MI = MII;
- int Opc = MI->getOpcode();
+ MachineInstr &MI = *MII;
+ int Opc = MI.getOpcode();
if (Opc == Hexagon::CONST32_Int_Real &&
- MI->getOperand(1).isBlockAddress()) {
- int DestReg = MI->getOperand(0).getReg();
- MachineOperand &Symbol = MI->getOperand (1);
-
- BuildMI (*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::LO), DestReg).addOperand(Symbol);
- BuildMI (*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::HI), DestReg).addOperand(Symbol);
+ MI.getOperand(1).isBlockAddress()) {
+ int DestReg = MI.getOperand(0).getReg();
+ MachineOperand &Symbol = MI.getOperand(1);
+
+ BuildMI(*MBB, MII, MI.getDebugLoc(), TII->get(Hexagon::LO), DestReg)
+ .addOperand(Symbol);
+ BuildMI(*MBB, MII, MI.getDebugLoc(), TII->get(Hexagon::HI), DestReg)
+ .addOperand(Symbol);
// MBB->erase returns the iterator to the next instruction, which is the
// one we want to process next
- MII = MBB->erase (MI);
+ MII = MBB->erase(&MI);
continue;
}
else if (Opc == Hexagon::CONST32_Int_Real ||
Opc == Hexagon::CONST32_Float_Real) {
- int DestReg = MI->getOperand(0).getReg();
+ int DestReg = MI.getOperand(0).getReg();
// We have to convert an FP immediate into its corresponding integer
// representation
int64_t ImmValue;
if (Opc == Hexagon::CONST32_Float_Real) {
- APFloat Val = MI->getOperand(1).getFPImm()->getValueAPF();
+ APFloat Val = MI.getOperand(1).getFPImm()->getValueAPF();
ImmValue = *Val.bitcastToAPInt().getRawData();
}
else
- ImmValue = MI->getOperand(1).getImm();
+ ImmValue = MI.getOperand(1).getImm();
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::A2_tfrsi), DestReg).addImm(ImmValue);
- MII = MBB->erase (MI);
+ BuildMI(*MBB, MII, MI.getDebugLoc(), TII->get(Hexagon::A2_tfrsi),
+ DestReg)
+ .addImm(ImmValue);
+ MII = MBB->erase(&MI);
continue;
}
else if (Opc == Hexagon::CONST64_Int_Real ||
Opc == Hexagon::CONST64_Float_Real) {
- int DestReg = MI->getOperand(0).getReg();
+ int DestReg = MI.getOperand(0).getReg();
// We have to convert an FP immediate into its corresponding integer
// representation
int64_t ImmValue;
if (Opc == Hexagon::CONST64_Float_Real) {
- APFloat Val = MI->getOperand(1).getFPImm()->getValueAPF();
+ APFloat Val = MI.getOperand(1).getFPImm()->getValueAPF();
ImmValue = *Val.bitcastToAPInt().getRawData();
}
else
- ImmValue = MI->getOperand(1).getImm();
+ ImmValue = MI.getOperand(1).getImm();
unsigned DestLo = TRI->getSubReg(DestReg, Hexagon::subreg_loreg);
unsigned DestHi = TRI->getSubReg(DestReg, Hexagon::subreg_hireg);
@@ -142,11 +143,13 @@ bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
int32_t LowWord = (ImmValue & 0xFFFFFFFF);
int32_t HighWord = (ImmValue >> 32) & 0xFFFFFFFF;
- BuildMI(*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::A2_tfrsi), DestLo).addImm(LowWord);
- BuildMI (*MBB, MII, MI->getDebugLoc(),
- TII->get(Hexagon::A2_tfrsi), DestHi).addImm(HighWord);
- MII = MBB->erase (MI);
+ BuildMI(*MBB, MII, MI.getDebugLoc(), TII->get(Hexagon::A2_tfrsi),
+ DestLo)
+ .addImm(LowWord);
+ BuildMI(*MBB, MII, MI.getDebugLoc(), TII->get(Hexagon::A2_tfrsi),
+ DestHi)
+ .addImm(HighWord);
+ MII = MBB->erase(&MI);
continue;
}
++MII;
diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp
index d4e95b0d0210..25b2affa2f0b 100644
--- a/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -12,13 +12,12 @@
#include "HexagonRegisterInfo.h"
#include "HexagonTargetMachine.h"
-#include "llvm/Pass.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -109,18 +108,6 @@ INITIALIZE_PASS(HexagonSplitDoubleRegs, "hexagon-split-double",
"Hexagon Split Double Registers", false, false)
-static inline uint32_t getRegState(const MachineOperand &R) {
- assert(R.isReg());
- return getDefRegState(R.isDef()) |
- getImplRegState(R.isImplicit()) |
- getKillRegState(R.isKill()) |
- getDeadRegState(R.isDead()) |
- getUndefRegState(R.isUndef()) |
- getInternalReadRegState(R.isInternalRead()) |
- (R.isDebug() ? RegState::Debug : 0);
-}
-
-
void HexagonSplitDoubleRegs::dump_partition(raw_ostream &os,
const USet &Part, const TargetRegisterInfo &TRI) {
dbgs() << '{';
@@ -452,7 +439,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L,
MachineBasicBlock *TB = 0, *FB = 0;
MachineBasicBlock *TmpLB = const_cast<MachineBasicBlock*>(LB);
SmallVector<MachineOperand,2> Cond;
- bool BadLB = TII->AnalyzeBranch(*TmpLB, TB, FB, Cond, false);
+ bool BadLB = TII->analyzeBranch(*TmpLB, TB, FB, Cond, false);
// Only analyzable conditional branches. HII::AnalyzeBranch will put
// the branch opcode as the first element of Cond, and the predicate
// operand as the second.
@@ -477,7 +464,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L,
CmpI = MRI->getVRegDef(CmpI->getOperand(1).getReg());
int Mask = 0, Val = 0;
- bool OkCI = TII->analyzeCompare(CmpI, CmpR1, CmpR2, Mask, Val);
+ bool OkCI = TII->analyzeCompare(*CmpI, CmpR1, CmpR2, Mask, Val);
if (!OkCI)
return;
// Eliminate non-double input registers.
@@ -655,7 +642,7 @@ void HexagonSplitDoubleRegs::splitMemRef(MachineInstr *MI,
MachineFunction &MF = *B.getParent();
for (auto &MO : MI->memoperands()) {
const MachinePointerInfo &Ptr = MO->getPointerInfo();
- unsigned F = MO->getFlags();
+ MachineMemOperand::Flags F = MO->getFlags();
int A = MO->getAlignment();
auto *Tmp1 = MF.getMachineMemOperand(Ptr, F, 4/*size*/, A);
@@ -1164,6 +1151,9 @@ bool HexagonSplitDoubleRegs::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "Splitting double registers in function: "
<< MF.getName() << '\n');
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
auto &ST = MF.getSubtarget<HexagonSubtarget>();
TRI = ST.getRegisterInfo();
TII = ST.getInstrInfo();
diff --git a/lib/Target/Hexagon/HexagonStoreWidening.cpp b/lib/Target/Hexagon/HexagonStoreWidening.cpp
index b5339ff4c0dc..54bc3cf6f6ff 100644
--- a/lib/Target/Hexagon/HexagonStoreWidening.cpp
+++ b/lib/Target/Hexagon/HexagonStoreWidening.cpp
@@ -594,6 +594,9 @@ bool HexagonStoreWidening::processBasicBlock(MachineBasicBlock &MBB) {
bool HexagonStoreWidening::runOnMachineFunction(MachineFunction &MFn) {
+ if (skipFunction(*MFn.getFunction()))
+ return false;
+
MF = &MFn;
auto &ST = MFn.getSubtarget<HexagonSubtarget>();
TII = ST.getInstrInfo();
diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp
index aa0efd4f65e0..fb315a730f39 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -14,6 +14,8 @@
#include "HexagonSubtarget.h"
#include "Hexagon.h"
#include "HexagonRegisterInfo.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include <map>
@@ -49,10 +51,24 @@ static cl::opt<bool> EnableHexagonHVX("enable-hexagon-hvx",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("Enable Hexagon Vector eXtensions"));
+static cl::opt<bool> EnableTCLatencySched("enable-tc-latency-sched",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false));
+
+static cl::opt<bool> EnableDotCurSched("enable-cur-sched",
+ cl::Hidden, cl::ZeroOrMore, cl::init(true),
+ cl::desc("Enable the scheduler to generate .cur"));
+
+static cl::opt<bool> EnableVecFrwdSched("enable-evec-frwd-sched",
+ cl::Hidden, cl::ZeroOrMore, cl::init(true));
+
static cl::opt<bool> DisableHexagonMISched("disable-hexagon-misched",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("Disable Hexagon MI Scheduling"));
+static cl::opt<bool> EnableSubregLiveness("hexagon-subreg-liveness",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Enable subregister liveness tracking for Hexagon"));
+
void HexagonSubtarget::initializeEnvironment() {
UseMemOps = false;
ModeIEEERndNear = false;
@@ -115,6 +131,57 @@ HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
UseBSBScheduling = hasV60TOps() && EnableBSBSched;
}
+
+void HexagonSubtarget::HexagonDAGMutation::apply(ScheduleDAGInstrs *DAG) {
+ for (auto &SU : DAG->SUnits) {
+ if (!SU.isInstr())
+ continue;
+ SmallVector<SDep, 4> Erase;
+ for (auto &D : SU.Preds)
+ if (D.getKind() == SDep::Output && D.getReg() == Hexagon::USR_OVF)
+ Erase.push_back(D);
+ for (auto &E : Erase)
+ SU.removePred(E);
+ }
+
+ for (auto &SU : DAG->SUnits) {
+ // Update the latency of chain edges between v60 vector load or store
+ // instructions to be 1. These instructions cannot be scheduled in the
+ // same packet.
+ MachineInstr *MI1 = SU.getInstr();
+ auto *QII = static_cast<const HexagonInstrInfo*>(DAG->TII);
+ bool IsStoreMI1 = MI1->mayStore();
+ bool IsLoadMI1 = MI1->mayLoad();
+ if (!QII->isV60VectorInstruction(MI1) || !(IsStoreMI1 || IsLoadMI1))
+ continue;
+ for (auto &SI : SU.Succs) {
+ if (SI.getKind() != SDep::Order || SI.getLatency() != 0)
+ continue;
+ MachineInstr *MI2 = SI.getSUnit()->getInstr();
+ if (!QII->isV60VectorInstruction(MI2))
+ continue;
+ if ((IsStoreMI1 && MI2->mayStore()) || (IsLoadMI1 && MI2->mayLoad())) {
+ SI.setLatency(1);
+ SU.setHeightDirty();
+ // Change the dependence in the opposite direction too.
+ for (auto &PI : SI.getSUnit()->Preds) {
+ if (PI.getSUnit() != &SU || PI.getKind() != SDep::Order)
+ continue;
+ PI.setLatency(1);
+ SI.getSUnit()->setDepthDirty();
+ }
+ }
+ }
+ }
+}
+
+
+void HexagonSubtarget::getPostRAMutations(
+ std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
+ Mutations.push_back(make_unique<HexagonSubtarget::HexagonDAGMutation>());
+}
+
+
// Pin the vtable to this file.
void HexagonSubtarget::anchor() {}
@@ -123,3 +190,180 @@ bool HexagonSubtarget::enableMachineScheduler() const {
return !DisableHexagonMISched;
return true;
}
+
+bool HexagonSubtarget::enableSubRegLiveness() const {
+ return EnableSubregLiveness;
+}
+
+// This helper function is responsible for increasing the latency only.
+void HexagonSubtarget::updateLatency(MachineInstr *SrcInst,
+ MachineInstr *DstInst, SDep &Dep) const {
+ if (!hasV60TOps())
+ return;
+
+ auto &QII = static_cast<const HexagonInstrInfo&>(*getInstrInfo());
+
+ if (EnableVecFrwdSched && QII.addLatencyToSchedule(SrcInst, DstInst)) {
+ // Vec frwd scheduling.
+ Dep.setLatency(Dep.getLatency() + 1);
+ } else if (useBSBScheduling() &&
+ QII.isLateInstrFeedsEarlyInstr(SrcInst, DstInst)) {
+ // BSB scheduling.
+ Dep.setLatency(Dep.getLatency() + 1);
+ } else if (EnableTCLatencySched) {
+ // TClass latency scheduling.
+ // Check if SrcInst produces in 2C an operand of DstInst taken in stage 2B.
+ if (QII.isTC1(SrcInst) || QII.isTC2(SrcInst))
+ if (!QII.isTC1(DstInst) && !QII.isTC2(DstInst))
+ Dep.setLatency(Dep.getLatency() + 1);
+ }
+}
+
+/// If the SUnit has a zero latency edge, return the other SUnit.
+static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
+ for (auto &I : Deps)
+ if (I.isAssignedRegDep() && I.getLatency() == 0 &&
+ !I.getSUnit()->getInstr()->isPseudo())
+ return I.getSUnit();
+ return nullptr;
+}
+
+/// Change the latency between the two SUnits.
+void HexagonSubtarget::changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps,
+ SUnit *Dst, unsigned Lat) const {
+ MachineInstr *SrcI = Src->getInstr();
+ for (auto &I : Deps) {
+ if (I.getSUnit() != Dst)
+ continue;
+ I.setLatency(Lat);
+ SUnit *UpdateDst = I.getSUnit();
+ updateLatency(SrcI, UpdateDst->getInstr(), I);
+ // Update the latency of opposite edge too.
+ for (auto &PI : UpdateDst->Preds) {
+ if (PI.getSUnit() != Src || !PI.isAssignedRegDep())
+ continue;
+ PI.setLatency(Lat);
+ updateLatency(SrcI, UpdateDst->getInstr(), PI);
+ }
+ }
+}
+
+// Return true if these are the best two instructions to schedule
+// together with a zero latency. Only one dependence should have a zero
+// latency. If there are multiple choices, choose the best, and change
+// ther others, if needed.
+bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
+ const HexagonInstrInfo *TII) const {
+ MachineInstr *SrcInst = Src->getInstr();
+ MachineInstr *DstInst = Dst->getInstr();
+
+ if (SrcInst->isPHI() || DstInst->isPHI())
+ return false;
+
+ // Check if the Dst instruction is the best candidate first.
+ SUnit *Best = nullptr;
+ SUnit *DstBest = nullptr;
+ SUnit *SrcBest = getZeroLatency(Dst, Dst->Preds);
+ if (SrcBest == nullptr || Src->NodeNum >= SrcBest->NodeNum) {
+ // Check that Src doesn't have a better candidate.
+ DstBest = getZeroLatency(Src, Src->Succs);
+ if (DstBest == nullptr || Dst->NodeNum <= DstBest->NodeNum)
+ Best = Dst;
+ }
+ if (Best != Dst)
+ return false;
+
+ // The caller frequents adds the same dependence twice. If so, then
+ // return true for this case too.
+ if (Src == SrcBest && Dst == DstBest)
+ return true;
+
+ // Reassign the latency for the previous bests, which requires setting
+ // the dependence edge in both directions.
+ if (SrcBest != nullptr)
+ changeLatency(SrcBest, SrcBest->Succs, Dst, 1);
+ if (DstBest != nullptr)
+ changeLatency(Src, Src->Succs, DstBest, 1);
+ // If there is an edge from SrcBest to DstBst, then try to change that
+ // to 0 now.
+ if (SrcBest && DstBest)
+ changeLatency(SrcBest, SrcBest->Succs, DstBest, 0);
+
+ return true;
+}
+
+// Update the latency of a Phi when the Phi bridges two instructions that
+// require a multi-cycle latency.
+void HexagonSubtarget::changePhiLatency(MachineInstr *SrcInst, SUnit *Dst,
+ SDep &Dep) const {
+ if (!SrcInst->isPHI() || Dst->NumPreds == 0 || Dep.getLatency() != 0)
+ return;
+
+ for (const SDep &PI : Dst->Preds) {
+ if (PI.getLatency() != 0)
+ continue;
+ Dep.setLatency(2);
+ break;
+ }
+}
+
+/// \brief Perform target specific adjustments to the latency of a schedule
+/// dependency.
+void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
+ SDep &Dep) const {
+ MachineInstr *SrcInst = Src->getInstr();
+ MachineInstr *DstInst = Dst->getInstr();
+ if (!Src->isInstr() || !Dst->isInstr())
+ return;
+
+ const HexagonInstrInfo *QII = static_cast<const HexagonInstrInfo *>(getInstrInfo());
+
+ // Instructions with .new operands have zero latency.
+ if (QII->canExecuteInBundle(SrcInst, DstInst) &&
+ isBestZeroLatency(Src, Dst, QII)) {
+ Dep.setLatency(0);
+ return;
+ }
+
+ if (!hasV60TOps())
+ return;
+
+ // Don't adjust the latency of post-increment part of the instruction.
+ if (QII->isPostIncrement(SrcInst) && Dep.isAssignedRegDep()) {
+ if (SrcInst->mayStore())
+ return;
+ if (Dep.getReg() != SrcInst->getOperand(0).getReg())
+ return;
+ } else if (QII->isPostIncrement(DstInst) && Dep.getKind() == SDep::Anti) {
+ if (DstInst->mayStore())
+ return;
+ if (Dep.getReg() != DstInst->getOperand(0).getReg())
+ return;
+ } else if (QII->isPostIncrement(DstInst) && DstInst->mayStore() &&
+ Dep.isAssignedRegDep()) {
+ MachineOperand &Op = DstInst->getOperand(DstInst->getNumOperands() - 1);
+ if (Op.isReg() && Dep.getReg() != Op.getReg())
+ return;
+ }
+
+ // Check if we need to change any the latency values when Phis are added.
+ if (useBSBScheduling() && SrcInst->isPHI()) {
+ changePhiLatency(SrcInst, Dst, Dep);
+ return;
+ }
+
+ // If it's a REG_SEQUENCE, use its destination instruction to determine
+ // the correct latency.
+ if (DstInst->isRegSequence() && Dst->NumSuccs == 1)
+ DstInst = Dst->Succs[0].getSUnit()->getInstr();
+
+ // Try to schedule uses near definitions to generate .cur.
+ if (EnableDotCurSched && QII->isToBeScheduledASAP(SrcInst, DstInst) &&
+ isBestZeroLatency(Src, Dst, QII)) {
+ Dep.setLatency(0);
+ return;
+ }
+
+ updateLatency(SrcInst, DstInst, Dep);
+}
+
diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h
index c7ae139c4346..9b40c130e622 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/lib/Target/Hexagon/HexagonSubtarget.h
@@ -18,7 +18,6 @@
#include "HexagonISelLowering.h"
#include "HexagonInstrInfo.h"
#include "HexagonSelectionDAGInfo.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -47,6 +46,11 @@ public:
/// default for V60.
bool UseBSBScheduling;
+ class HexagonDAGMutation : public ScheduleDAGMutation {
+ public:
+ void apply(ScheduleDAGInstrs *DAG) override;
+ };
+
private:
std::string CPUString;
HexagonInstrInfo InstrInfo;
@@ -105,6 +109,11 @@ public:
// compiler time and will be removed eventually anyway.
bool enableMachineSchedDefaultSched() const override { return false; }
+ AntiDepBreakMode getAntiDepBreakMode() const override { return ANTIDEP_ALL; }
+ bool enablePostRAScheduler() const override { return true; }
+
+ bool enableSubRegLiveness() const override;
+
const std::string &getCPUString () const { return CPUString; }
// Threshold for small data section
@@ -114,6 +123,24 @@ public:
const HexagonArchEnum &getHexagonArchVersion() const {
return HexagonArchVersion;
}
+
+ void getPostRAMutations(
+ std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations)
+ const override;
+
+ /// \brief Perform target specific adjustments to the latency of a schedule
+ /// dependency.
+ void adjustSchedDependency(SUnit *def, SUnit *use, SDep& dep) const override;
+
+private:
+ // Helper function responsible for increasing the latency only.
+ void updateLatency(MachineInstr *SrcInst, MachineInstr *DstInst, SDep &Dep)
+ const;
+ void changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps, SUnit *Dst,
+ unsigned Lat) const;
+ bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII)
+ const;
+ void changePhiLatency(MachineInstr *SrcInst, SUnit *Dst, SDep &Dep) const;
};
} // end namespace llvm
diff --git a/lib/Target/Hexagon/HexagonSystemInst.td b/lib/Target/Hexagon/HexagonSystemInst.td
index 784686a437ad..771498a40b99 100644
--- a/lib/Target/Hexagon/HexagonSystemInst.td
+++ b/lib/Target/Hexagon/HexagonSystemInst.td
@@ -111,3 +111,24 @@ def Y2_isync: JRInst <(outs), (ins),
let Inst{9-0} = 0b0000000010;
}
+//===----------------------------------------------------------------------===//
+// System/User instructions.
+//===----------------------------------------------------------------------===//
+// traps and pause
+let hasSideEffects = 0, isSolo = 1 in
+class J2_MISC_TRAP_PAUSE<string mnemonic, bits<2> MajOp>
+ : JRInst
+ <(outs), (ins u8Imm:$u8),
+ #mnemonic#"(#$u8)"> {
+ bits<8> u8;
+
+ let IClass = 0b0101;
+ let Inst{27-24} = 0b0100;
+ let Inst{23-22} = MajOp;
+ let Inst{12-8} = u8{7-3};
+ let Inst{4-2} = u8{2-0};
+ }
+def J2_trap0 : J2_MISC_TRAP_PAUSE<"trap0", 0b00>;
+def J2_trap1 : J2_MISC_TRAP_PAUSE<"trap1", 0b10>;
+def J2_pause : J2_MISC_TRAP_PAUSE<"pause", 0b01>;
+
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 34b03fb74cef..f964a6612f43 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -18,6 +18,7 @@
#include "HexagonTargetObjectFile.h"
#include "HexagonTargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
@@ -33,6 +34,10 @@ static cl::opt<bool> EnableRDFOpt("rdf-opt", cl::Hidden, cl::ZeroOrMore,
static cl::opt<bool> DisableHardwareLoops("disable-hexagon-hwloops",
cl::Hidden, cl::desc("Disable Hardware Loops for Hexagon target"));
+static cl::opt<bool> DisableAModeOpt("disable-hexagon-amodeopt",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Disable Hexagon Addressing Mode Optimization"));
+
static cl::opt<bool> DisableHexagonCFGOpt("disable-hexagon-cfgopt",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("Disable Hexagon CFG Optimization"));
@@ -72,6 +77,9 @@ static cl::opt<bool> EnableBitSimplify("hexagon-bit", cl::init(true),
static cl::opt<bool> EnableLoopResched("hexagon-loop-resched", cl::init(true),
cl::Hidden, cl::desc("Loop rescheduling"));
+static cl::opt<bool> HexagonNoOpt("hexagon-noopt", cl::init(false),
+ cl::Hidden, cl::desc("Disable backend optimizations"));
+
/// HexagonTargetMachineModule - Note that this is used on hosts that
/// cannot link in a library unless there are references into the
/// library. In particular, it seems that it is not possible to get
@@ -95,13 +103,13 @@ SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler",
namespace llvm {
FunctionPass *createHexagonBitSimplify();
+ FunctionPass *createHexagonBranchRelaxation();
FunctionPass *createHexagonCallFrameInformation();
FunctionPass *createHexagonCFGOptimizer();
FunctionPass *createHexagonCommonGEP();
FunctionPass *createHexagonCopyToCombine();
FunctionPass *createHexagonEarlyIfConversion();
FunctionPass *createHexagonExpandCondsets();
- FunctionPass *createHexagonExpandPredSpillCode();
FunctionPass *createHexagonFixupHwLoops();
FunctionPass *createHexagonGenExtract();
FunctionPass *createHexagonGenInsert();
@@ -113,6 +121,7 @@ namespace llvm {
FunctionPass *createHexagonLoopRescheduling();
FunctionPass *createHexagonNewValueJump();
FunctionPass *createHexagonOptimizeSZextends();
+ FunctionPass *createHexagonOptAddrMode();
FunctionPass *createHexagonPacketizer();
FunctionPass *createHexagonPeephole();
FunctionPass *createHexagonRDFOpt();
@@ -121,19 +130,27 @@ namespace llvm {
FunctionPass *createHexagonStoreWidening();
} // end namespace llvm;
-/// HexagonTargetMachine ctor - Create an ILP32 architecture model.
-///
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::Static;
+ return *RM;
+}
-/// Hexagon_TODO: Do I need an aggregate alignment?
-///
HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, "e-m:e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-"
- "i1:8:8-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a:0-"
- "n16:32", TT, CPU, FS, Options, RM, CM, OL),
+ // Specify the vector alignment explicitly. For v512x1, the calculated
+ // alignment would be 512*alignment(i1), which is 512 bytes, instead of
+ // the required minimum of 64 bytes.
+ : LLVMTargetMachine(
+ T, "e-m:e-p:32:32:32-a:0-n16:32-"
+ "i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-"
+ "v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048",
+ TT, CPU, FS, Options, getEffectiveRelocModel(RM), CM,
+ (HexagonNoOpt ? CodeGenOpt::None : OL)),
TLOF(make_unique<HexagonTargetObjectFile>()) {
initAsmInfo();
}
@@ -178,15 +195,7 @@ namespace {
class HexagonPassConfig : public TargetPassConfig {
public:
HexagonPassConfig(HexagonTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {
- bool NoOpt = (TM->getOptLevel() == CodeGenOpt::None);
- if (!NoOpt) {
- if (EnableExpandCondsets) {
- Pass *Exp = createHexagonExpandCondsets();
- insertPass(&RegisterCoalescerID, IdentifyingPassPtr(Exp));
- }
- }
- }
+ : TargetPassConfig(TM, PM) {}
HexagonTargetMachine &getHexagonTargetMachine() const {
return getTM<HexagonTargetMachine>();
@@ -259,6 +268,10 @@ bool HexagonPassConfig::addInstSelector() {
void HexagonPassConfig::addPreRegAlloc() {
if (getOptLevel() != CodeGenOpt::None) {
+ if (EnableExpandCondsets) {
+ Pass *Exp = createHexagonExpandCondsets();
+ insertPass(&RegisterCoalescerID, IdentifyingPassPtr(Exp));
+ }
if (!DisableStoreWidening)
addPass(createHexagonStoreWidening(), false);
if (!DisableHardwareLoops)
@@ -272,6 +285,8 @@ void HexagonPassConfig::addPostRegAlloc() {
addPass(createHexagonRDFOpt());
if (!DisableHexagonCFGOpt)
addPass(createHexagonCFGOptimizer(), false);
+ if (!DisableAModeOpt)
+ addPass(createHexagonOptAddrMode(), false);
}
}
@@ -288,8 +303,7 @@ void HexagonPassConfig::addPreEmitPass() {
if (!NoOpt)
addPass(createHexagonNewValueJump(), false);
- // Expand Spill code for predicate registers.
- addPass(createHexagonExpandPredSpillCode(), false);
+ addPass(createHexagonBranchRelaxation(), false);
// Create Packets.
if (!NoOpt) {
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.h b/lib/Target/Hexagon/HexagonTargetMachine.h
index 968814b3ea32..70835c0d4ac5 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.h
+++ b/lib/Target/Hexagon/HexagonTargetMachine.h
@@ -30,7 +30,7 @@ class HexagonTargetMachine : public LLVMTargetMachine {
public:
HexagonTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
~HexagonTargetMachine() override;
const HexagonSubtarget *getSubtargetImpl(const Function &F) const override;
diff --git a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
index ccca62021f5b..82b437eb6a0c 100644
--- a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
+++ b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
@@ -1,4 +1,4 @@
-//===-- HexagonTargetObjectFile.cpp - Hexagon asm properties --------------===//
+//===-- HexagonTargetObjectFile.cpp ---------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,10 +10,10 @@
// This file contains the declarations of the HexagonTargetAsmInfo properties.
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "hexagon-sdata"
-#include "HexagonTargetObjectFile.h"
-#include "HexagonSubtarget.h"
#include "HexagonTargetMachine.h"
+#include "HexagonTargetObjectFile.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
@@ -24,75 +24,368 @@
using namespace llvm;
-static cl::opt<int> SmallDataThreshold("hexagon-small-data-threshold",
- cl::init(8), cl::Hidden,
- cl::desc("The maximum size of an object in the sdata section"));
+static cl::opt<unsigned> SmallDataThreshold("hexagon-small-data-threshold",
+ cl::init(8), cl::Hidden,
+ cl::desc("The maximum size of an object in the sdata section"));
+
+static cl::opt<bool> NoSmallDataSorting("mno-sort-sda", cl::init(false),
+ cl::Hidden, cl::desc("Disable small data sections sorting"));
+
+static cl::opt<bool> StaticsInSData("hexagon-statics-in-small-data",
+ cl::init(false), cl::Hidden, cl::ZeroOrMore,
+ cl::desc("Allow static variables in .sdata"));
+
+static cl::opt<bool> TraceGVPlacement("trace-gv-placement",
+ cl::Hidden, cl::init(false),
+ cl::desc("Trace global value placement"));
+
+// TraceGVPlacement controls messages for all builds. For builds with assertions
+// (debug or release), messages are also controlled by the usual debug flags
+// (e.g. -debug and -debug-only=globallayout)
+#define TRACE_TO(s, X) s << X
+#ifdef NDEBUG
+#define TRACE(X) do { if (TraceGVPlacement) { TRACE_TO(errs(), X); } } while (0)
+#else
+#define TRACE(X) \
+ do { \
+ if (TraceGVPlacement) { TRACE_TO(errs(), X); } \
+ else { DEBUG( TRACE_TO(dbgs(), X) ); } \
+ } while (0)
+#endif
+
+// Returns true if the section name is such that the symbol will be put
+// in a small data section.
+// For instance, global variables with section attributes such as ".sdata"
+// ".sdata.*", ".sbss", and ".sbss.*" will go into small data.
+static bool isSmallDataSection(StringRef Sec) {
+ // sectionName is either ".sdata" or ".sbss". Looking for an exact match
+ // obviates the need for checks for section names such as ".sdatafoo".
+ if (Sec.equals(".sdata") || Sec.equals(".sbss") || Sec.equals(".scommon"))
+ return true;
+ // If either ".sdata." or ".sbss." is a substring of the section name
+ // then put the symbol in small data.
+ return Sec.find(".sdata.") != StringRef::npos ||
+ Sec.find(".sbss.") != StringRef::npos ||
+ Sec.find(".scommon.") != StringRef::npos;
+}
+
+
+static const char *getSectionSuffixForSize(unsigned Size) {
+ switch (Size) {
+ default:
+ return "";
+ case 1:
+ return ".1";
+ case 2:
+ return ".2";
+ case 4:
+ return ".4";
+ case 8:
+ return ".8";
+ }
+}
void HexagonTargetObjectFile::Initialize(MCContext &Ctx,
- const TargetMachine &TM) {
+ const TargetMachine &TM) {
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
InitializeELF(TM.Options.UseInitArray);
- SmallDataSection = getContext().getELFSection(
- ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
- SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
- ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ SmallDataSection =
+ getContext().getELFSection(".sdata", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC |
+ ELF::SHF_HEX_GPREL);
+ SmallBSSSection =
+ getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC |
+ ELF::SHF_HEX_GPREL);
}
-// sdata/sbss support taken largely from the MIPS Backend.
-static bool IsInSmallSection(uint64_t Size) {
- return Size > 0 && Size <= (uint64_t)SmallDataThreshold;
-}
-bool HexagonTargetObjectFile::IsSmallDataEnabled () const {
- return SmallDataThreshold > 0;
+MCSection *HexagonTargetObjectFile::SelectSectionForGlobal(
+ const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
+ TRACE("[SelectSectionForGlobal] GV(" << GV->getName() << ") ");
+ TRACE("input section(" << GV->getSection() << ") ");
+
+ TRACE((GV->hasPrivateLinkage() ? "private_linkage " : "")
+ << (GV->hasLocalLinkage() ? "local_linkage " : "")
+ << (GV->hasInternalLinkage() ? "internal " : "")
+ << (GV->hasExternalLinkage() ? "external " : "")
+ << (GV->hasCommonLinkage() ? "common_linkage " : "")
+ << (GV->hasCommonLinkage() ? "common " : "" )
+ << (Kind.isCommon() ? "kind_common " : "" )
+ << (Kind.isBSS() ? "kind_bss " : "" )
+ << (Kind.isBSSLocal() ? "kind_bss_local " : "" ));
+
+ if (isGlobalInSmallSection(GV, TM))
+ return selectSmallSectionForGlobal(GV, Kind, Mang, TM);
+
+ if (Kind.isCommon()) {
+ // This is purely for LTO+Linker Script because commons don't really have a
+ // section. However, the BitcodeSectionWriter pass will query for the
+ // sections of commons (and the linker expects us to know their section) so
+ // we'll return one here.
+ return BSSSection;
+ }
+
+ TRACE("default_ELF_section\n");
+ // Otherwise, we work the same as ELF.
+ return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind,
+ Mang, TM);
}
-/// IsGlobalInSmallSection - Return true if this global value should be
-/// placed into small data/bss section.
-bool HexagonTargetObjectFile::IsGlobalInSmallSection(const GlobalValue *GV,
- const TargetMachine &TM) const {
- // If the primary definition of this global value is outside the current
- // translation unit or the global value is available for inspection but not
- // emission, then do nothing.
- if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
- return false;
- // Otherwise, Check if GV should be in sdata/sbss, when normally it would end
- // up in getKindForGlobal(GV, TM).
- return IsGlobalInSmallSection(GV, TM, getKindForGlobal(GV, TM));
+MCSection *HexagonTargetObjectFile::getExplicitSectionGlobal(
+ const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
+ TRACE("[getExplicitSectionGlobal] GV(" << GV->getName() << ") from("
+ << GV->getSection() << ") ");
+ TRACE((GV->hasPrivateLinkage() ? "private_linkage " : "")
+ << (GV->hasLocalLinkage() ? "local_linkage " : "")
+ << (GV->hasInternalLinkage() ? "internal " : "")
+ << (GV->hasExternalLinkage() ? "external " : "")
+ << (GV->hasCommonLinkage() ? "common_linkage " : "")
+ << (GV->hasCommonLinkage() ? "common " : "" )
+ << (Kind.isCommon() ? "kind_common " : "" )
+ << (Kind.isBSS() ? "kind_bss " : "" )
+ << (Kind.isBSSLocal() ? "kind_bss_local " : "" ));
+
+ if (GV->hasSection()) {
+ StringRef Section = GV->getSection();
+ if (Section.find(".access.text.group") != StringRef::npos)
+ return getContext().getELFSection(GV->getSection(), ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_EXECINSTR);
+ if (Section.find(".access.data.group") != StringRef::npos)
+ return getContext().getELFSection(GV->getSection(), ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ }
+
+ if (isGlobalInSmallSection(GV, TM))
+ return selectSmallSectionForGlobal(GV, Kind, Mang, TM);
+
+ // Otherwise, we work the same as ELF.
+ TRACE("default_ELF_section\n");
+ return TargetLoweringObjectFileELF::getExplicitSectionGlobal(GV, Kind,
+ Mang, TM);
}
-/// IsGlobalInSmallSection - Return true if this global value should be
-/// placed into small data/bss section.
-bool HexagonTargetObjectFile::
-IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
- SectionKind Kind) const {
+
+/// Return true if this global value should be placed into small data/bss
+/// section.
+bool HexagonTargetObjectFile::isGlobalInSmallSection(const GlobalValue *GV,
+ const TargetMachine &TM) const {
// Only global variables, not functions.
- const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GV);
- if (!GVA)
+ DEBUG(dbgs() << "Checking if value is in small-data, -G"
+ << SmallDataThreshold << ": \"" << GV->getName() << "\": ");
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
+ if (!GVar) {
+ DEBUG(dbgs() << "no, not a global variable\n");
return false;
+ }
- if (Kind.isBSS() || Kind.isData() || Kind.isCommon()) {
- Type *Ty = GV->getType()->getElementType();
- return IsInSmallSection(
- GV->getParent()->getDataLayout().getTypeAllocSize(Ty));
+ // Globals with external linkage that have an original section set must be
+ // emitted to that section, regardless of whether we would put them into
+ // small data or not. This is how we can support mixing -G0/-G8 in LTO.
+ if (GVar->hasSection()) {
+ bool IsSmall = isSmallDataSection(GVar->getSection());
+ DEBUG(dbgs() << (IsSmall ? "yes" : "no") << ", has section: "
+ << GVar->getSection() << '\n');
+ return IsSmall;
}
- return false;
+ if (GVar->isConstant()) {
+ DEBUG(dbgs() << "no, is a constant\n");
+ return false;
+ }
+
+ bool IsLocal = GVar->hasLocalLinkage();
+ if (!StaticsInSData && IsLocal) {
+ DEBUG(dbgs() << "no, is static\n");
+ return false;
+ }
+
+ Type *GType = GVar->getType();
+ if (PointerType *PT = dyn_cast<PointerType>(GType))
+ GType = PT->getElementType();
+
+ if (isa<ArrayType>(GType)) {
+ DEBUG(dbgs() << "no, is an array\n");
+ return false;
+ }
+
+ // If the type is a struct with no body provided, treat is conservatively.
+ // There cannot be actual definitions of object of such a type in this CU
+ // (only references), so assuming that they are not in sdata is safe. If
+ // these objects end up in the sdata, the references will still be valid.
+ if (StructType *ST = dyn_cast<StructType>(GType)) {
+ if (ST->isOpaque()) {
+ DEBUG(dbgs() << "no, has opaque type\n");
+ return false;
+ }
+ }
+
+ unsigned Size = GVar->getParent()->getDataLayout().getTypeAllocSize(GType);
+ if (Size == 0) {
+ DEBUG(dbgs() << "no, has size 0\n");
+ return false;
+ }
+ if (Size > SmallDataThreshold) {
+ DEBUG(dbgs() << "no, size exceeds sdata threshold: " << Size << '\n');
+ return false;
+ }
+
+ DEBUG(dbgs() << "yes\n");
+ return true;
+}
+
+
+bool HexagonTargetObjectFile::isSmallDataEnabled() const {
+ return SmallDataThreshold > 0;
+}
+
+
+unsigned HexagonTargetObjectFile::getSmallDataSize() const {
+ return SmallDataThreshold;
+}
+
+
+/// Descends any type down to "elementary" components,
+/// discovering the smallest addressable one.
+/// If zero is returned, declaration will not be modified.
+unsigned HexagonTargetObjectFile::getSmallestAddressableSize(const Type *Ty,
+ const GlobalValue *GV, const TargetMachine &TM) const {
+ // Assign the smallest element access size to the highest
+ // value which assembler can handle.
+ unsigned SmallestElement = 8;
+
+ if (!Ty)
+ return 0;
+ switch (Ty->getTypeID()) {
+ case Type::StructTyID: {
+ const StructType *STy = cast<const StructType>(Ty);
+ for (auto &E : STy->elements()) {
+ unsigned AtomicSize = getSmallestAddressableSize(E, GV, TM);
+ if (AtomicSize < SmallestElement)
+ SmallestElement = AtomicSize;
+ }
+ return (STy->getNumElements() == 0) ? 0 : SmallestElement;
+ }
+ case Type::ArrayTyID: {
+ const ArrayType *ATy = cast<const ArrayType>(Ty);
+ return getSmallestAddressableSize(ATy->getElementType(), GV, TM);
+ }
+ case Type::VectorTyID: {
+ const VectorType *PTy = cast<const VectorType>(Ty);
+ return getSmallestAddressableSize(PTy->getElementType(), GV, TM);
+ }
+ case Type::PointerTyID:
+ case Type::HalfTyID:
+ case Type::FloatTyID:
+ case Type::DoubleTyID:
+ case Type::IntegerTyID: {
+ const DataLayout &DL = GV->getParent()->getDataLayout();
+ // It is unfortunate that DL's function take non-const Type*.
+ return DL.getTypeAllocSize(const_cast<Type*>(Ty));
+ }
+ case Type::FunctionTyID:
+ case Type::VoidTyID:
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ case Type::LabelTyID:
+ case Type::MetadataTyID:
+ case Type::X86_MMXTyID:
+ case Type::TokenTyID:
+ return 0;
+ }
+
+ return 0;
}
-MCSection *
-HexagonTargetObjectFile::SelectSectionForGlobal(const GlobalValue *GV,
- SectionKind Kind, Mangler &Mang,
- const TargetMachine &TM) const {
+MCSection *HexagonTargetObjectFile::selectSmallSectionForGlobal(
+ const GlobalValue *GV, SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
+ const Type *GTy = GV->getType()->getElementType();
+ unsigned Size = getSmallestAddressableSize(GTy, GV, TM);
+
+ // If we have -ffunction-section or -fdata-section then we should emit the
+ // global value to a unique section specifically for it... even for sdata.
+ bool EmitUniquedSection = TM.getDataSections();
+
+ TRACE("Small data. Size(" << Size << ")");
// Handle Small Section classification here.
- if (Kind.isBSS() && IsGlobalInSmallSection(GV, TM, Kind))
- return SmallBSSSection;
- if (Kind.isData() && IsGlobalInSmallSection(GV, TM, Kind))
- return SmallDataSection;
+ if (Kind.isBSS() || Kind.isBSSLocal()) {
+ // If -mno-sort-sda is not set, find out smallest accessible entity in
+ // declaration and add it to the section name string.
+ // Note. It does not track the actual usage of the value, only its de-
+ // claration. Also, compiler adds explicit pad fields to some struct
+ // declarations - they are currently counted towards smallest addres-
+ // sable entity.
+ if (NoSmallDataSorting) {
+ TRACE(" default sbss\n");
+ return SmallBSSSection;
+ }
+
+ StringRef Prefix(".sbss");
+ SmallString<128> Name(Prefix);
+ Name.append(getSectionSuffixForSize(Size));
+
+ if (EmitUniquedSection) {
+ Name.append(".");
+ Name.append(GV->getName());
+ }
+ TRACE(" unique sbss(" << Name << ")\n");
+ return getContext().getELFSection(Name.str(), ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC | ELF::SHF_HEX_GPREL);
+ }
+
+ if (Kind.isCommon()) {
+ // This is purely for LTO+Linker Script because commons don't really have a
+ // section. However, the BitcodeSectionWriter pass will query for the
+ // sections of commons (and the linker expects us to know their section) so
+ // we'll return one here.
+ if (NoSmallDataSorting)
+ return BSSSection;
+
+ Twine Name = Twine(".scommon") + getSectionSuffixForSize(Size);
+ TRACE(" small COMMON (" << Name << ")\n");
+
+ return getContext().getELFSection(Name.str(), ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC |
+ ELF::SHF_HEX_GPREL);
+ }
+
+ // We could have changed sdata object to a constant... in this
+ // case the Kind could be wrong for it.
+ if (Kind.isMergeableConst()) {
+ TRACE(" const_object_as_data ");
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
+ if (GVar->hasSection() && isSmallDataSection(GVar->getSection()))
+ Kind = SectionKind::getData();
+ }
+
+ if (Kind.isData()) {
+ if (NoSmallDataSorting) {
+ TRACE(" default sdata\n");
+ return SmallDataSection;
+ }
+
+ StringRef Prefix(".sdata");
+ SmallString<128> Name(Prefix);
+ Name.append(getSectionSuffixForSize(Size));
+
+ if (EmitUniquedSection) {
+ Name.append(".");
+ Name.append(GV->getName());
+ }
+ TRACE(" unique sdata(" << Name << ")\n");
+ return getContext().getELFSection(Name.str(), ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC | ELF::SHF_HEX_GPREL);
+ }
+ TRACE("default ELF section\n");
// Otherwise, we work the same as ELF.
- return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang,TM);
+ return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind,
+ Mang, TM);
}
diff --git a/lib/Target/Hexagon/HexagonTargetObjectFile.h b/lib/Target/Hexagon/HexagonTargetObjectFile.h
index da0eeeb3fd28..cbc00da88c58 100644
--- a/lib/Target/Hexagon/HexagonTargetObjectFile.h
+++ b/lib/Target/Hexagon/HexagonTargetObjectFile.h
@@ -1,4 +1,4 @@
-//===-- HexagonTargetAsmInfo.h - Hexagon asm properties --------*- C++ -*--===//
+//===-- HexagonTargetObjectFile.h -----------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,24 +16,31 @@
namespace llvm {
class HexagonTargetObjectFile : public TargetLoweringObjectFileELF {
- MCSectionELF *SmallDataSection;
- MCSectionELF *SmallBSSSection;
-
public:
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
- /// IsGlobalInSmallSection - Return true if this global address should be
- /// placed into small data/bss section.
- bool IsGlobalInSmallSection(const GlobalValue *GV,
- const TargetMachine &TM,
- SectionKind Kind) const;
- bool IsGlobalInSmallSection(const GlobalValue *GV,
- const TargetMachine &TM) const;
-
- bool IsSmallDataEnabled () const;
MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
- Mangler &Mang,
- const TargetMachine &TM) const override;
+ Mangler &Mang, const TargetMachine &TM) const override;
+
+ MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang, const TargetMachine &TM) const override;
+
+ bool isGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM)
+ const;
+
+ bool isSmallDataEnabled() const;
+
+ unsigned getSmallDataSize() const;
+
+ private:
+ MCSectionELF *SmallDataSection;
+ MCSectionELF *SmallBSSSection;
+
+ unsigned getSmallestAddressableSize(const Type *Ty, const GlobalValue *GV,
+ const TargetMachine &TM) const;
+
+ MCSection *selectSmallSectionForGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang, const TargetMachine &TM) const;
};
} // namespace llvm
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index 81850548bb6e..d326b9471315 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -29,8 +29,6 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include <map>
-#include <vector>
using namespace llvm;
@@ -81,6 +79,10 @@ namespace {
return "Hexagon Packetizer";
}
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
private:
const HexagonInstrInfo *HII;
@@ -106,16 +108,19 @@ HexagonPacketizerList::HexagonPacketizerList(MachineFunction &MF,
: VLIWPacketizerList(MF, MLI, AA), MBPI(MBPI), MLI(&MLI) {
HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
+
+ addMutation(make_unique<HexagonSubtarget::HexagonDAGMutation>());
}
// Check if FirstI modifies a register that SecondI reads.
-static bool hasWriteToReadDep(const MachineInstr *FirstI,
- const MachineInstr *SecondI, const TargetRegisterInfo *TRI) {
- for (auto &MO : FirstI->operands()) {
+static bool hasWriteToReadDep(const MachineInstr &FirstI,
+ const MachineInstr &SecondI,
+ const TargetRegisterInfo *TRI) {
+ for (auto &MO : FirstI.operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned R = MO.getReg();
- if (SecondI->readsRegister(R, TRI))
+ if (SecondI.readsRegister(R, TRI))
return true;
}
return false;
@@ -146,7 +151,7 @@ static MachineBasicBlock::iterator moveInstrOut(MachineInstr *MI,
B.splice(InsertPt, &B, MI);
// Get the size of the bundle without asserting.
- MachineBasicBlock::const_instr_iterator I(BundleIt);
+ MachineBasicBlock::const_instr_iterator I = BundleIt.getInstrIterator();
MachineBasicBlock::const_instr_iterator E = B.instr_end();
unsigned Size = 0;
for (++I; I != E && I->isBundledWithPred(); ++I)
@@ -168,7 +173,7 @@ static MachineBasicBlock::iterator moveInstrOut(MachineInstr *MI,
bool HexagonPacketizer::runOnMachineFunction(MachineFunction &MF) {
- if (DisablePacketizer)
+ if (DisablePacketizer || skipFunction(*MF.getFunction()))
return false;
HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
@@ -216,12 +221,12 @@ bool HexagonPacketizer::runOnMachineFunction(MachineFunction &MF) {
// First the first non-boundary starting from the end of the last
// scheduling region.
MachineBasicBlock::iterator RB = Begin;
- while (RB != End && HII->isSchedulingBoundary(RB, &MB, MF))
+ while (RB != End && HII->isSchedulingBoundary(*RB, &MB, MF))
++RB;
// First the first boundary starting from the beginning of the new
// region.
MachineBasicBlock::iterator RE = RB;
- while (RE != End && !HII->isSchedulingBoundary(RE, &MB, MF))
+ while (RE != End && !HII->isSchedulingBoundary(*RE, &MB, MF))
++RE;
// Add the scheduling boundary if it's not block end.
if (RE != End)
@@ -254,9 +259,9 @@ bool HexagonPacketizerList::canReserveResourcesForConstExt() {
// return true, otherwise, return false.
bool HexagonPacketizerList::tryAllocateResourcesForConstExt(bool Reserve) {
auto *ExtMI = MF.CreateMachineInstr(HII->get(Hexagon::A4_ext), DebugLoc());
- bool Avail = ResourceTracker->canReserveResources(ExtMI);
+ bool Avail = ResourceTracker->canReserveResources(*ExtMI);
if (Reserve && Avail)
- ResourceTracker->reserveResources(ExtMI);
+ ResourceTracker->reserveResources(*ExtMI);
MF.DeleteMachineInstr(ExtMI);
return Avail;
}
@@ -365,7 +370,7 @@ bool HexagonPacketizerList::canPromoteToDotCur(const MachineInstr *MI,
const TargetRegisterClass *RC) {
if (!HII->isV60VectorInstruction(MI))
return false;
- if (!HII->isV60VectorInstruction(MII))
+ if (!HII->isV60VectorInstruction(&*MII))
return false;
// Already a dot new instruction.
@@ -383,11 +388,14 @@ bool HexagonPacketizerList::canPromoteToDotCur(const MachineInstr *MI,
DEBUG(dbgs() << "Can we DOT Cur Vector MI\n";
MI->dump();
dbgs() << "in packet\n";);
- MachineInstr *MJ = MII;
- DEBUG(dbgs() << "Checking CUR against "; MJ->dump(););
+ MachineInstr &MJ = *MII;
+ DEBUG({
+ dbgs() << "Checking CUR against ";
+ MJ.dump();
+ });
unsigned DestReg = MI->getOperand(0).getReg();
bool FoundMatch = false;
- for (auto &MO : MJ->operands())
+ for (auto &MO : MJ.operands())
if (MO.isReg() && MO.getReg() == DestReg)
FoundMatch = true;
if (!FoundMatch)
@@ -436,7 +444,7 @@ enum PredicateKind {
/// Returns true if an instruction is predicated on p0 and false if it's
/// predicated on !p0.
-static PredicateKind getPredicateSense(const MachineInstr *MI,
+static PredicateKind getPredicateSense(const MachineInstr &MI,
const HexagonInstrInfo *HII) {
if (!HII->isPredicated(MI))
return PK_Unknown;
@@ -570,8 +578,8 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr *MI,
// If the source that feeds the store is predicated, new value store must
// also be predicated.
- if (HII->isPredicated(PacketMI)) {
- if (!HII->isPredicated(MI))
+ if (HII->isPredicated(*PacketMI)) {
+ if (!HII->isPredicated(*MI))
return false;
// Check to make sure that they both will have their predicates
@@ -613,8 +621,8 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr *MI,
// 3) Both new-value register producer and user should have same predicate
// sense, i.e, either both should be negated or both should be non-negated.
if (predRegNumDst != predRegNumSrc ||
- HII->isDotNewInst(PacketMI) != HII->isDotNewInst(MI) ||
- getPredicateSense(MI, HII) != getPredicateSense(PacketMI, HII))
+ HII->isDotNewInst(PacketMI) != HII->isDotNewInst(MI) ||
+ getPredicateSense(*MI, HII) != getPredicateSense(*PacketMI, HII))
return false;
}
@@ -762,7 +770,7 @@ bool HexagonPacketizerList::canPromoteToDotNew(const MachineInstr *MI,
int NewOpcode = HII->getDotNewOp(MI);
const MCInstrDesc &D = HII->get(NewOpcode);
MachineInstr *NewMI = MF.CreateMachineInstr(D, DebugLoc());
- bool ResourcesAvailable = ResourceTracker->canReserveResources(NewMI);
+ bool ResourcesAvailable = ResourceTracker->canReserveResources(*NewMI);
MF.DeleteMachineInstr(NewMI);
if (!ResourcesAvailable)
return false;
@@ -793,7 +801,7 @@ bool HexagonPacketizerList::restrictingDepExistInPacket(MachineInstr* MI,
for (auto I : CurrentPacketMIs) {
// We only care for dependencies to predicated instructions
- if (!HII->isPredicated(I))
+ if (!HII->isPredicated(*I))
continue;
// Scheduling Unit for current insn in the packet
@@ -817,13 +825,13 @@ bool HexagonPacketizerList::restrictingDepExistInPacket(MachineInstr* MI,
/// Gets the predicate register of a predicated instruction.
-static unsigned getPredicatedRegister(MachineInstr *MI,
+static unsigned getPredicatedRegister(MachineInstr &MI,
const HexagonInstrInfo *QII) {
/// We use the following rule: The first predicate register that is a use is
/// the predicate register of a predicated instruction.
assert(QII->isPredicated(MI) && "Must be predicated instruction");
- for (auto &Op : MI->operands()) {
+ for (auto &Op : MI.operands()) {
if (Op.isReg() && Op.getReg() && Op.isUse() &&
Hexagon::PredRegsRegClass.contains(Op.getReg()))
return Op.getReg();
@@ -835,8 +843,8 @@ static unsigned getPredicatedRegister(MachineInstr *MI,
// Given two predicated instructions, this function detects whether
// the predicates are complements.
-bool HexagonPacketizerList::arePredicatesComplements(MachineInstr *MI1,
- MachineInstr *MI2) {
+bool HexagonPacketizerList::arePredicatesComplements(MachineInstr &MI1,
+ MachineInstr &MI2) {
// If we don't know the predicate sense of the instructions bail out early, we
// need it later.
if (getPredicateSense(MI1, HII) == PK_Unknown ||
@@ -844,7 +852,7 @@ bool HexagonPacketizerList::arePredicatesComplements(MachineInstr *MI1,
return false;
// Scheduling unit for candidate.
- SUnit *SU = MIToSUnit[MI1];
+ SUnit *SU = MIToSUnit[&MI1];
// One corner case deals with the following scenario:
// Trying to add
@@ -898,7 +906,7 @@ bool HexagonPacketizerList::arePredicatesComplements(MachineInstr *MI1,
Hexagon::PredRegsRegClass.contains(PReg1) &&
Hexagon::PredRegsRegClass.contains(PReg2) &&
getPredicateSense(MI1, HII) != getPredicateSense(MI2, HII) &&
- HII->isDotNewInst(MI1) == HII->isDotNewInst(MI2);
+ HII->isDotNewInst(&MI1) == HII->isDotNewInst(&MI2);
}
// Initialize packetizer flags.
@@ -911,31 +919,31 @@ void HexagonPacketizerList::initPacketizerState() {
}
// Ignore bundling of pseudo instructions.
-bool HexagonPacketizerList::ignorePseudoInstruction(const MachineInstr *MI,
- const MachineBasicBlock*) {
- if (MI->isDebugValue())
+bool HexagonPacketizerList::ignorePseudoInstruction(const MachineInstr &MI,
+ const MachineBasicBlock *) {
+ if (MI.isDebugValue())
return true;
- if (MI->isCFIInstruction())
+ if (MI.isCFIInstruction())
return false;
// We must print out inline assembly.
- if (MI->isInlineAsm())
+ if (MI.isInlineAsm())
return false;
- if (MI->isImplicitDef())
+ if (MI.isImplicitDef())
return false;
// We check if MI has any functional units mapped to it. If it doesn't,
// we ignore the instruction.
- const MCInstrDesc& TID = MI->getDesc();
+ const MCInstrDesc& TID = MI.getDesc();
auto *IS = ResourceTracker->getInstrItins()->beginStage(TID.getSchedClass());
unsigned FuncUnits = IS->getUnits();
return !FuncUnits;
}
-bool HexagonPacketizerList::isSoloInstruction(const MachineInstr *MI) {
- if (MI->isEHLabel() || MI->isCFIInstruction())
+bool HexagonPacketizerList::isSoloInstruction(const MachineInstr &MI) {
+ if (MI.isEHLabel() || MI.isCFIInstruction())
return true;
// Consider inline asm to not be a solo instruction by default.
@@ -943,19 +951,19 @@ bool HexagonPacketizerList::isSoloInstruction(const MachineInstr *MI) {
// removed, and placed outside of the packet (before or after, depending
// on dependencies). This is to reduce the impact of inline asm as a
// "packet splitting" instruction.
- if (MI->isInlineAsm() && !ScheduleInlineAsm)
+ if (MI.isInlineAsm() && !ScheduleInlineAsm)
return true;
// From Hexagon V4 Programmer's Reference Manual 3.4.4 Grouping constraints:
// trap, pause, barrier, icinva, isync, and syncht are solo instructions.
// They must not be grouped with other instructions in a packet.
- if (isSchedBarrier(MI))
+ if (isSchedBarrier(&MI))
return true;
- if (HII->isSolo(MI))
+ if (HII->isSolo(&MI))
return true;
- if (MI->getOpcode() == Hexagon::A2_nop)
+ if (MI.getOpcode() == Hexagon::A2_nop)
return true;
return false;
@@ -1016,7 +1024,7 @@ void HexagonPacketizerList::unpacketizeSoloInstrs(MachineFunction &MF) {
// after the bundle (to preserve the bundle semantics).
bool InsertBeforeBundle;
if (MI->isInlineAsm())
- InsertBeforeBundle = !hasWriteToReadDep(MI, BundleIt, HRI);
+ InsertBeforeBundle = !hasWriteToReadDep(*MI, *BundleIt, HRI);
else if (MI->isDebugValue())
InsertBeforeBundle = true;
else
@@ -1045,7 +1053,7 @@ bool HexagonPacketizerList::hasDeadDependence(const MachineInstr *I,
// defining the same (dead) register.
if (I->isCall() || J->isCall())
return false;
- if (HII->isPredicated(I) || HII->isPredicated(J))
+ if (HII->isPredicated(*I) || HII->isPredicated(*J))
return false;
BitVector DeadDefs(Hexagon::NUM_TARGET_REGS);
@@ -1085,7 +1093,7 @@ bool HexagonPacketizerList::hasControlDependence(const MachineInstr *I,
auto isBadForLoopN = [this] (const MachineInstr *MI) -> bool {
if (MI->isCall() || HII->isDeallocRet(MI) || HII->isNewValueJump(MI))
return true;
- if (HII->isPredicated(MI) && HII->isPredicatedNew(MI) && HII->isJumpR(MI))
+ if (HII->isPredicated(*MI) && HII->isPredicatedNew(*MI) && HII->isJumpR(MI))
return true;
return false;
};
@@ -1139,7 +1147,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
const unsigned FrameSize = MF.getFrameInfo()->getStackSize();
// Solo instructions cannot go in the packet.
- assert(!isSoloInstruction(I) && "Unexpected solo instr!");
+ assert(!isSoloInstruction(*I) && "Unexpected solo instr!");
if (cannotCoexist(I, J))
return false;
@@ -1158,12 +1166,12 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// If an instruction feeds new value jump, glue it.
MachineBasicBlock::iterator NextMII = I;
++NextMII;
- if (NextMII != I->getParent()->end() && HII->isNewValueJump(NextMII)) {
- MachineInstr *NextMI = NextMII;
+ if (NextMII != I->getParent()->end() && HII->isNewValueJump(&*NextMII)) {
+ MachineInstr &NextMI = *NextMII;
bool secondRegMatch = false;
- const MachineOperand &NOp0 = NextMI->getOperand(0);
- const MachineOperand &NOp1 = NextMI->getOperand(1);
+ const MachineOperand &NOp0 = NextMI.getOperand(0);
+ const MachineOperand &NOp1 = NextMI.getOperand(1);
if (NOp1.isReg() && I->getOperand(0).getReg() == NOp1.getReg())
secondRegMatch = true;
@@ -1242,7 +1250,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
RC = HRI->getMinimalPhysRegClass(DepReg);
}
- if (I->isCall() || I->isReturn()) {
+ if (I->isCall() || I->isReturn() || HII->isTailCall(I)) {
if (!isRegDependence(DepType))
continue;
if (!isCallDependent(I, DepType, SUJ->Succs[i].getReg()))
@@ -1275,8 +1283,8 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// For predicated instructions, if the predicates are complements then
// there can be no dependence.
- if (HII->isPredicated(I) && HII->isPredicated(J) &&
- arePredicatesComplements(I, J)) {
+ if (HII->isPredicated(*I) && HII->isPredicated(*J) &&
+ arePredicatesComplements(*I, *J)) {
// Not always safe to do this translation.
// DAG Builder attempts to reduce dependence edges using transitive
// nature of dependencies. Here is an example:
@@ -1400,8 +1408,30 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
}
}
- // Skip over anti-dependences. Two instructions that are anti-dependent
- // can share a packet.
+ // There are certain anti-dependencies that cannot be ignored.
+ // Specifically:
+ // J2_call ... %R0<imp-def> ; SUJ
+ // R0 = ... ; SUI
+ // Those cannot be packetized together, since the call will observe
+ // the effect of the assignment to R0.
+ if (DepType == SDep::Anti && J->isCall()) {
+ // Check if I defines any volatile register. We should also check
+ // registers that the call may read, but these happen to be a
+ // subset of the volatile register set.
+ for (const MCPhysReg *P = J->getDesc().ImplicitDefs; P && *P; ++P) {
+ if (!I->modifiesRegister(*P, HRI))
+ continue;
+ FoundSequentialDependence = true;
+ break;
+ }
+ }
+
+ // Skip over remaining anti-dependences. Two instructions that are
+ // anti-dependent can share a packet, since in most such cases all
+ // operands are read before any modifications take place.
+ // The exceptions are branch and call instructions, since they are
+ // executed after all other instructions have completed (at least
+ // conceptually).
if (DepType != SDep::Anti) {
FoundSequentialDependence = true;
break;
@@ -1444,26 +1474,25 @@ bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
return false;
}
-
MachineBasicBlock::iterator
-HexagonPacketizerList::addToPacket(MachineInstr *MI) {
+HexagonPacketizerList::addToPacket(MachineInstr &MI) {
MachineBasicBlock::iterator MII = MI;
- MachineBasicBlock *MBB = MI->getParent();
- if (MI->isImplicitDef()) {
- unsigned R = MI->getOperand(0).getReg();
+ MachineBasicBlock *MBB = MI.getParent();
+ if (MI.isImplicitDef()) {
+ unsigned R = MI.getOperand(0).getReg();
if (Hexagon::IntRegsRegClass.contains(R)) {
MCSuperRegIterator S(R, HRI, false);
- MI->addOperand(MachineOperand::CreateReg(*S, true, true));
+ MI.addOperand(MachineOperand::CreateReg(*S, true, true));
}
return MII;
}
assert(ResourceTracker->canReserveResources(MI));
- bool ExtMI = HII->isExtended(MI) || HII->isConstExtended(MI);
+ bool ExtMI = HII->isExtended(&MI) || HII->isConstExtended(&MI);
bool Good = true;
if (GlueToNewValueJump) {
- MachineInstr *NvjMI = ++MII;
+ MachineInstr &NvjMI = *++MII;
// We need to put both instructions in the same packet: MI and NvjMI.
// Either of them can require a constant extender. Try to add both to
// the current packet, and if that fails, end the packet and start a
@@ -1472,7 +1501,7 @@ HexagonPacketizerList::addToPacket(MachineInstr *MI) {
if (ExtMI)
Good = tryAllocateResourcesForConstExt(true);
- bool ExtNvjMI = HII->isExtended(NvjMI) || HII->isConstExtended(NvjMI);
+ bool ExtNvjMI = HII->isExtended(&NvjMI) || HII->isConstExtended(&NvjMI);
if (Good) {
if (ResourceTracker->canReserveResources(NvjMI))
ResourceTracker->reserveResources(NvjMI);
@@ -1497,8 +1526,8 @@ HexagonPacketizerList::addToPacket(MachineInstr *MI) {
reserveResourcesForConstExt();
}
}
- CurrentPacketMIs.push_back(MI);
- CurrentPacketMIs.push_back(NvjMI);
+ CurrentPacketMIs.push_back(&MI);
+ CurrentPacketMIs.push_back(&NvjMI);
return MII;
}
@@ -1506,23 +1535,23 @@ HexagonPacketizerList::addToPacket(MachineInstr *MI) {
if (ExtMI && !tryAllocateResourcesForConstExt(true)) {
endPacket(MBB, MI);
if (PromotedToDotNew)
- demoteToDotOld(MI);
+ demoteToDotOld(&MI);
ResourceTracker->reserveResources(MI);
reserveResourcesForConstExt();
}
- CurrentPacketMIs.push_back(MI);
+ CurrentPacketMIs.push_back(&MI);
return MII;
}
void HexagonPacketizerList::endPacket(MachineBasicBlock *MBB,
- MachineInstr *MI) {
+ MachineBasicBlock::iterator MI) {
OldPacketMIs = CurrentPacketMIs;
VLIWPacketizerList::endPacket(MBB, MI);
}
-bool HexagonPacketizerList::shouldAddToPacket(const MachineInstr *MI) {
- return !producesStall(MI);
+bool HexagonPacketizerList::shouldAddToPacket(const MachineInstr &MI) {
+ return !producesStall(&MI);
}
@@ -1598,4 +1627,3 @@ bool HexagonPacketizerList::producesStall(const MachineInstr *I) {
FunctionPass *llvm::createHexagonPacketizer() {
return new HexagonPacketizer();
}
-
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.h b/lib/Target/Hexagon/HexagonVLIWPacketizer.h
index 960cf6ca5bbc..3f8ed5af3540 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.h
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.h
@@ -50,12 +50,12 @@ public:
void initPacketizerState() override;
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
- bool ignorePseudoInstruction(const MachineInstr *MI,
+ bool ignorePseudoInstruction(const MachineInstr &MI,
const MachineBasicBlock *MBB) override;
// isSoloInstruction - return true if instruction MI can not be packetized
// with any other instruction, which means that MI itself is a packet.
- bool isSoloInstruction(const MachineInstr *MI) override;
+ bool isSoloInstruction(const MachineInstr &MI) override;
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
// together.
@@ -65,9 +65,10 @@ public:
// and SUJ.
bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) override;
- MachineBasicBlock::iterator addToPacket(MachineInstr *MI) override;
- void endPacket(MachineBasicBlock *MBB, MachineInstr *MI) override;
- bool shouldAddToPacket(const MachineInstr *MI) override;
+ MachineBasicBlock::iterator addToPacket(MachineInstr &MI) override;
+ void endPacket(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator MI) override;
+ bool shouldAddToPacket(const MachineInstr &MI) override;
void unpacketizeSoloInstrs(MachineFunction &MF);
@@ -93,7 +94,7 @@ protected:
bool canPromoteToNewValueStore(const MachineInstr* MI,
const MachineInstr* PacketMI, unsigned DepReg);
bool demoteToDotOld(MachineInstr* MI);
- bool arePredicatesComplements(MachineInstr* MI1, MachineInstr* MI2);
+ bool arePredicatesComplements(MachineInstr &MI1, MachineInstr &MI2);
bool restrictingDepExistInPacket(MachineInstr*, unsigned);
bool isNewifiable(const MachineInstr *MI);
bool isCurifiable(MachineInstr* MI);
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index b73af8249cb5..2898b056a03d 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -11,7 +11,10 @@
#include "HexagonFixupKinds.h"
#include "HexagonMCTargetDesc.h"
#include "MCTargetDesc/HexagonBaseInfo.h"
+#include "MCTargetDesc/HexagonMCChecker.h"
+#include "MCTargetDesc/HexagonMCCodeEmitter.h"
#include "MCTargetDesc/HexagonMCInstrInfo.h"
+#include "MCTargetDesc/HexagonMCShuffler.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
@@ -19,14 +22,20 @@
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/TargetRegistry.h"
+#include <sstream>
+
using namespace llvm;
using namespace Hexagon;
#define DEBUG_TYPE "hexagon-asm-backend"
+static cl::opt<bool> DisableFixup
+ ("mno-fixup", cl::desc("Disable fixing up resolved relocations for Hexagon"));
+
namespace {
class HexagonAsmBackend : public MCAsmBackend {
@@ -36,8 +45,21 @@ class HexagonAsmBackend : public MCAsmBackend {
std::unique_ptr <MCInstrInfo> MCII;
std::unique_ptr <MCInst *> RelaxTarget;
MCInst * Extender;
+
+ void ReplaceInstruction(MCCodeEmitter &E, MCRelaxableFragment &RF,
+ MCInst &HMB) const {
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ E.encodeInstruction(HMB, VecOS, Fixups, RF.getSubtargetInfo());
+
+ // Update the fragment.
+ RF.setInst(HMB);
+ RF.getContents() = Code;
+ RF.getFixups() = Fixups;
+ }
public:
- HexagonAsmBackend(Target const &T, uint8_t OSABI, StringRef CPU) :
+ HexagonAsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) :
OSABI(OSABI), MCII (T.createMCInstrInfo()), RelaxTarget(new MCInst *),
Extender(nullptr) {}
@@ -63,118 +85,438 @@ public:
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[Hexagon::NumTargetFixupKinds] = {
- // This table *must* be in same the order of fixup_* kinds in
- // HexagonFixupKinds.h.
- //
- // namei offset bits flags
- {"fixup_Hexagon_B22_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_B15_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_B7_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_LO16", 0, 32, 0},
- {"fixup_Hexagon_HI16", 0, 32, 0},
- {"fixup_Hexagon_32", 0, 32, 0},
- {"fixup_Hexagon_16", 0, 32, 0},
- {"fixup_Hexagon_8", 0, 32, 0},
- {"fixup_Hexagon_GPREL16_0", 0, 32, 0},
- {"fixup_Hexagon_GPREL16_1", 0, 32, 0},
- {"fixup_Hexagon_GPREL16_2", 0, 32, 0},
- {"fixup_Hexagon_GPREL16_3", 0, 32, 0},
- {"fixup_Hexagon_HL16", 0, 32, 0},
- {"fixup_Hexagon_B13_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_B9_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_B32_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_B22_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_B15_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_B13_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_B9_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_B7_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_16_X", 0, 32, 0},
- {"fixup_Hexagon_12_X", 0, 32, 0},
- {"fixup_Hexagon_11_X", 0, 32, 0},
- {"fixup_Hexagon_10_X", 0, 32, 0},
- {"fixup_Hexagon_9_X", 0, 32, 0},
- {"fixup_Hexagon_8_X", 0, 32, 0},
- {"fixup_Hexagon_7_X", 0, 32, 0},
- {"fixup_Hexagon_6_X", 0, 32, 0},
- {"fixup_Hexagon_32_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_COPY", 0, 32, 0},
- {"fixup_Hexagon_GLOB_DAT", 0, 32, 0},
- {"fixup_Hexagon_JMP_SLOT", 0, 32, 0},
- {"fixup_Hexagon_RELATIVE", 0, 32, 0},
- {"fixup_Hexagon_PLT_B22_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_GOTREL_LO16", 0, 32, 0},
- {"fixup_Hexagon_GOTREL_HI16", 0, 32, 0},
- {"fixup_Hexagon_GOTREL_32", 0, 32, 0},
- {"fixup_Hexagon_GOT_LO16", 0, 32, 0},
- {"fixup_Hexagon_GOT_HI16", 0, 32, 0},
- {"fixup_Hexagon_GOT_32", 0, 32, 0},
- {"fixup_Hexagon_GOT_16", 0, 32, 0},
- {"fixup_Hexagon_DTPMOD_32", 0, 32, 0},
- {"fixup_Hexagon_DTPREL_LO16", 0, 32, 0},
- {"fixup_Hexagon_DTPREL_HI16", 0, 32, 0},
- {"fixup_Hexagon_DTPREL_32", 0, 32, 0},
- {"fixup_Hexagon_DTPREL_16", 0, 32, 0},
- {"fixup_Hexagon_GD_PLT_B22_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_LD_PLT_B22_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_GD_GOT_LO16", 0, 32, 0},
- {"fixup_Hexagon_GD_GOT_HI16", 0, 32, 0},
- {"fixup_Hexagon_GD_GOT_32", 0, 32, 0},
- {"fixup_Hexagon_GD_GOT_16", 0, 32, 0},
- {"fixup_Hexagon_LD_GOT_LO16", 0, 32, 0},
- {"fixup_Hexagon_LD_GOT_HI16", 0, 32, 0},
- {"fixup_Hexagon_LD_GOT_32", 0, 32, 0},
- {"fixup_Hexagon_LD_GOT_16", 0, 32, 0},
- {"fixup_Hexagon_IE_LO16", 0, 32, 0},
- {"fixup_Hexagon_IE_HI16", 0, 32, 0},
- {"fixup_Hexagon_IE_32", 0, 32, 0},
- {"fixup_Hexagon_IE_16", 0, 32, 0},
- {"fixup_Hexagon_IE_GOT_LO16", 0, 32, 0},
- {"fixup_Hexagon_IE_GOT_HI16", 0, 32, 0},
- {"fixup_Hexagon_IE_GOT_32", 0, 32, 0},
- {"fixup_Hexagon_IE_GOT_16", 0, 32, 0},
- {"fixup_Hexagon_TPREL_LO16", 0, 32, 0},
- {"fixup_Hexagon_TPREL_HI16", 0, 32, 0},
- {"fixup_Hexagon_TPREL_32", 0, 32, 0},
- {"fixup_Hexagon_TPREL_16", 0, 32, 0},
- {"fixup_Hexagon_6_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
- {"fixup_Hexagon_GOTREL_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_GOTREL_16_X", 0, 32, 0},
- {"fixup_Hexagon_GOTREL_11_X", 0, 32, 0},
- {"fixup_Hexagon_GOT_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_GOT_16_X", 0, 32, 0},
- {"fixup_Hexagon_GOT_11_X", 0, 32, 0},
- {"fixup_Hexagon_DTPREL_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_DTPREL_16_X", 0, 32, 0},
- {"fixup_Hexagon_DTPREL_11_X", 0, 32, 0},
- {"fixup_Hexagon_GD_GOT_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_GD_GOT_16_X", 0, 32, 0},
- {"fixup_Hexagon_GD_GOT_11_X", 0, 32, 0},
- {"fixup_Hexagon_LD_GOT_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_LD_GOT_16_X", 0, 32, 0},
- {"fixup_Hexagon_LD_GOT_11_X", 0, 32, 0},
- {"fixup_Hexagon_IE_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_IE_16_X", 0, 32, 0},
- {"fixup_Hexagon_IE_GOT_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_IE_GOT_16_X", 0, 32, 0},
- {"fixup_Hexagon_IE_GOT_11_X", 0, 32, 0},
- {"fixup_Hexagon_TPREL_32_6_X", 0, 32, 0},
- {"fixup_Hexagon_TPREL_16_X", 0, 32, 0},
- {"fixup_Hexagon_TPREL_11_X", 0, 32, 0}};
-
- if (Kind < FirstTargetFixupKind) {
+ // This table *must* be in same the order of fixup_* kinds in
+ // HexagonFixupKinds.h.
+ //
+ // namei offset bits flags
+ { "fixup_Hexagon_B22_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_B15_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_B7_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_32", 0, 32, 0 },
+ { "fixup_Hexagon_16", 0, 32, 0 },
+ { "fixup_Hexagon_8", 0, 32, 0 },
+ { "fixup_Hexagon_GPREL16_0", 0, 32, 0 },
+ { "fixup_Hexagon_GPREL16_1", 0, 32, 0 },
+ { "fixup_Hexagon_GPREL16_2", 0, 32, 0 },
+ { "fixup_Hexagon_GPREL16_3", 0, 32, 0 },
+ { "fixup_Hexagon_HL16", 0, 32, 0 },
+ { "fixup_Hexagon_B13_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_B9_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_B32_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_B22_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_B15_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_B13_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_B9_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_B7_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_12_X", 0, 32, 0 },
+ { "fixup_Hexagon_11_X", 0, 32, 0 },
+ { "fixup_Hexagon_10_X", 0, 32, 0 },
+ { "fixup_Hexagon_9_X", 0, 32, 0 },
+ { "fixup_Hexagon_8_X", 0, 32, 0 },
+ { "fixup_Hexagon_7_X", 0, 32, 0 },
+ { "fixup_Hexagon_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_32_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_COPY", 0, 32, 0 },
+ { "fixup_Hexagon_GLOB_DAT", 0, 32, 0 },
+ { "fixup_Hexagon_JMP_SLOT", 0, 32, 0 },
+ { "fixup_Hexagon_RELATIVE", 0, 32, 0 },
+ { "fixup_Hexagon_PLT_B22_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_GOTREL_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_GOTREL_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_GOTREL_32", 0, 32, 0 },
+ { "fixup_Hexagon_GOT_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_GOT_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_GOT_32", 0, 32, 0 },
+ { "fixup_Hexagon_GOT_16", 0, 32, 0 },
+ { "fixup_Hexagon_DTPMOD_32", 0, 32, 0 },
+ { "fixup_Hexagon_DTPREL_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_DTPREL_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_DTPREL_32", 0, 32, 0 },
+ { "fixup_Hexagon_DTPREL_16", 0, 32, 0 },
+ { "fixup_Hexagon_GD_PLT_B22_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_LD_PLT_B22_PCREL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_GD_GOT_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_GD_GOT_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_GD_GOT_32", 0, 32, 0 },
+ { "fixup_Hexagon_GD_GOT_16", 0, 32, 0 },
+ { "fixup_Hexagon_LD_GOT_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_LD_GOT_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_LD_GOT_32", 0, 32, 0 },
+ { "fixup_Hexagon_LD_GOT_16", 0, 32, 0 },
+ { "fixup_Hexagon_IE_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_IE_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_IE_32", 0, 32, 0 },
+ { "fixup_Hexagon_IE_16", 0, 32, 0 },
+ { "fixup_Hexagon_IE_GOT_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_IE_GOT_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_IE_GOT_32", 0, 32, 0 },
+ { "fixup_Hexagon_IE_GOT_16", 0, 32, 0 },
+ { "fixup_Hexagon_TPREL_LO16", 0, 32, 0 },
+ { "fixup_Hexagon_TPREL_HI16", 0, 32, 0 },
+ { "fixup_Hexagon_TPREL_32", 0, 32, 0 },
+ { "fixup_Hexagon_TPREL_16", 0, 32, 0 },
+ { "fixup_Hexagon_6_PCREL_X", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Hexagon_GOTREL_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_GOTREL_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_GOTREL_11_X", 0, 32, 0 },
+ { "fixup_Hexagon_GOT_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_GOT_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_GOT_11_X", 0, 32, 0 },
+ { "fixup_Hexagon_DTPREL_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_DTPREL_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_DTPREL_11_X", 0, 32, 0 },
+ { "fixup_Hexagon_GD_GOT_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_GD_GOT_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_GD_GOT_11_X", 0, 32, 0 },
+ { "fixup_Hexagon_LD_GOT_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_LD_GOT_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_LD_GOT_11_X", 0, 32, 0 },
+ { "fixup_Hexagon_IE_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_IE_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_IE_GOT_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_IE_GOT_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_IE_GOT_11_X", 0, 32, 0 },
+ { "fixup_Hexagon_TPREL_32_6_X", 0, 32, 0 },
+ { "fixup_Hexagon_TPREL_16_X", 0, 32, 0 },
+ { "fixup_Hexagon_TPREL_11_X", 0, 32, 0 }
+ };
+
+ if (Kind < FirstTargetFixupKind)
return MCAsmBackend::getFixupKindInfo(Kind);
- }
assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
"Invalid kind!");
return Infos[Kind - FirstTargetFixupKind];
}
- void applyFixup(MCFixup const & /*Fixup*/, char * /*Data*/,
- unsigned /*DataSize*/, uint64_t /*Value*/,
- bool /*IsPCRel*/) const override {
- return;
+ /// processFixupValue - Target hook to adjust the literal value of a fixup
+ /// if necessary. IsResolved signals whether the caller believes a relocation
+ /// is needed; the target can modify the value. The default does nothing.
+ void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) override {
+ MCFixupKind Kind = Fixup.getKind();
+
+ switch((unsigned)Kind) {
+ default:
+ llvm_unreachable("Unknown Fixup Kind!");
+
+ case fixup_Hexagon_LO16:
+ case fixup_Hexagon_HI16:
+ case fixup_Hexagon_16:
+ case fixup_Hexagon_8:
+ case fixup_Hexagon_GPREL16_0:
+ case fixup_Hexagon_GPREL16_1:
+ case fixup_Hexagon_GPREL16_2:
+ case fixup_Hexagon_GPREL16_3:
+ case fixup_Hexagon_HL16:
+ case fixup_Hexagon_32_6_X:
+ case fixup_Hexagon_16_X:
+ case fixup_Hexagon_12_X:
+ case fixup_Hexagon_11_X:
+ case fixup_Hexagon_10_X:
+ case fixup_Hexagon_9_X:
+ case fixup_Hexagon_8_X:
+ case fixup_Hexagon_7_X:
+ case fixup_Hexagon_6_X:
+ case fixup_Hexagon_COPY:
+ case fixup_Hexagon_GLOB_DAT:
+ case fixup_Hexagon_JMP_SLOT:
+ case fixup_Hexagon_RELATIVE:
+ case fixup_Hexagon_PLT_B22_PCREL:
+ case fixup_Hexagon_GOTREL_LO16:
+ case fixup_Hexagon_GOTREL_HI16:
+ case fixup_Hexagon_GOTREL_32:
+ case fixup_Hexagon_GOT_LO16:
+ case fixup_Hexagon_GOT_HI16:
+ case fixup_Hexagon_GOT_32:
+ case fixup_Hexagon_GOT_16:
+ case fixup_Hexagon_DTPMOD_32:
+ case fixup_Hexagon_DTPREL_LO16:
+ case fixup_Hexagon_DTPREL_HI16:
+ case fixup_Hexagon_DTPREL_32:
+ case fixup_Hexagon_DTPREL_16:
+ case fixup_Hexagon_GD_PLT_B22_PCREL:
+ case fixup_Hexagon_LD_PLT_B22_PCREL:
+ case fixup_Hexagon_GD_GOT_LO16:
+ case fixup_Hexagon_GD_GOT_HI16:
+ case fixup_Hexagon_GD_GOT_32:
+ case fixup_Hexagon_GD_GOT_16:
+ case fixup_Hexagon_LD_GOT_LO16:
+ case fixup_Hexagon_LD_GOT_HI16:
+ case fixup_Hexagon_LD_GOT_32:
+ case fixup_Hexagon_LD_GOT_16:
+ case fixup_Hexagon_IE_LO16:
+ case fixup_Hexagon_IE_HI16:
+ case fixup_Hexagon_IE_32:
+ case fixup_Hexagon_IE_16:
+ case fixup_Hexagon_IE_GOT_LO16:
+ case fixup_Hexagon_IE_GOT_HI16:
+ case fixup_Hexagon_IE_GOT_32:
+ case fixup_Hexagon_IE_GOT_16:
+ case fixup_Hexagon_TPREL_LO16:
+ case fixup_Hexagon_TPREL_HI16:
+ case fixup_Hexagon_TPREL_32:
+ case fixup_Hexagon_TPREL_16:
+ case fixup_Hexagon_GOTREL_32_6_X:
+ case fixup_Hexagon_GOTREL_16_X:
+ case fixup_Hexagon_GOTREL_11_X:
+ case fixup_Hexagon_GOT_32_6_X:
+ case fixup_Hexagon_GOT_16_X:
+ case fixup_Hexagon_GOT_11_X:
+ case fixup_Hexagon_DTPREL_32_6_X:
+ case fixup_Hexagon_DTPREL_16_X:
+ case fixup_Hexagon_DTPREL_11_X:
+ case fixup_Hexagon_GD_GOT_32_6_X:
+ case fixup_Hexagon_GD_GOT_16_X:
+ case fixup_Hexagon_GD_GOT_11_X:
+ case fixup_Hexagon_LD_GOT_32_6_X:
+ case fixup_Hexagon_LD_GOT_16_X:
+ case fixup_Hexagon_LD_GOT_11_X:
+ case fixup_Hexagon_IE_32_6_X:
+ case fixup_Hexagon_IE_16_X:
+ case fixup_Hexagon_IE_GOT_32_6_X:
+ case fixup_Hexagon_IE_GOT_16_X:
+ case fixup_Hexagon_IE_GOT_11_X:
+ case fixup_Hexagon_TPREL_32_6_X:
+ case fixup_Hexagon_TPREL_16_X:
+ case fixup_Hexagon_TPREL_11_X:
+ case fixup_Hexagon_32_PCREL:
+ case fixup_Hexagon_6_PCREL_X:
+ case fixup_Hexagon_23_REG:
+ // These relocations should always have a relocation recorded
+ IsResolved = false;
+ return;
+
+ case fixup_Hexagon_B22_PCREL:
+ //IsResolved = false;
+ break;
+
+ case fixup_Hexagon_B13_PCREL:
+ case fixup_Hexagon_B13_PCREL_X:
+ case fixup_Hexagon_B32_PCREL_X:
+ case fixup_Hexagon_B22_PCREL_X:
+ case fixup_Hexagon_B15_PCREL:
+ case fixup_Hexagon_B15_PCREL_X:
+ case fixup_Hexagon_B9_PCREL:
+ case fixup_Hexagon_B9_PCREL_X:
+ case fixup_Hexagon_B7_PCREL:
+ case fixup_Hexagon_B7_PCREL_X:
+ if (DisableFixup)
+ IsResolved = false;
+ break;
+
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ case FK_PCRel_4:
+ case fixup_Hexagon_32:
+ // Leave these relocations alone as they are used for EH.
+ return;
+ }
+ }
+
+ /// getFixupKindNumBytes - The number of bytes the fixup may change.
+ static unsigned getFixupKindNumBytes(unsigned Kind) {
+ switch (Kind) {
+ default:
+ return 0;
+
+ case FK_Data_1:
+ return 1;
+ case FK_Data_2:
+ return 2;
+ case FK_Data_4: // this later gets mapped to R_HEX_32
+ case FK_PCRel_4: // this later gets mapped to R_HEX_32_PCREL
+ case fixup_Hexagon_32:
+ case fixup_Hexagon_B32_PCREL_X:
+ case fixup_Hexagon_B22_PCREL:
+ case fixup_Hexagon_B22_PCREL_X:
+ case fixup_Hexagon_B15_PCREL:
+ case fixup_Hexagon_B15_PCREL_X:
+ case fixup_Hexagon_B13_PCREL:
+ case fixup_Hexagon_B13_PCREL_X:
+ case fixup_Hexagon_B9_PCREL:
+ case fixup_Hexagon_B9_PCREL_X:
+ case fixup_Hexagon_B7_PCREL:
+ case fixup_Hexagon_B7_PCREL_X:
+ return 4;
+ }
+ }
+
+ // Make up for left shift when encoding the operand.
+ static uint64_t adjustFixupValue(MCFixupKind Kind, uint64_t Value) {
+ switch((unsigned)Kind) {
+ default:
+ break;
+
+ case fixup_Hexagon_B7_PCREL:
+ case fixup_Hexagon_B9_PCREL:
+ case fixup_Hexagon_B13_PCREL:
+ case fixup_Hexagon_B15_PCREL:
+ case fixup_Hexagon_B22_PCREL:
+ Value >>= 2;
+ break;
+
+ case fixup_Hexagon_B7_PCREL_X:
+ case fixup_Hexagon_B9_PCREL_X:
+ case fixup_Hexagon_B13_PCREL_X:
+ case fixup_Hexagon_B15_PCREL_X:
+ case fixup_Hexagon_B22_PCREL_X:
+ Value &= 0x3f;
+ break;
+
+ case fixup_Hexagon_B32_PCREL_X:
+ Value >>= 6;
+ break;
+ }
+ return (Value);
+ }
+
+ void HandleFixupError(const int bits, const int align_bits,
+ const int64_t FixupValue, const char *fixupStr) const {
+ // Error: value 1124 out of range: -1024-1023 when resolving
+ // symbol in file xprtsock.S
+ const APInt IntMin = APInt::getSignedMinValue(bits+align_bits);
+ const APInt IntMax = APInt::getSignedMaxValue(bits+align_bits);
+ std::stringstream errStr;
+ errStr << "\nError: value " <<
+ FixupValue <<
+ " out of range: " <<
+ IntMin.getSExtValue() <<
+ "-" <<
+ IntMax.getSExtValue() <<
+ " when resolving " <<
+ fixupStr <<
+ " fixup\n";
+ llvm_unreachable(errStr.str().c_str());
+ }
+
+ /// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
+ /// data fragment, at the offset specified by the fixup and following the
+ /// fixup kind as appropriate.
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t FixupValue, bool IsPCRel) const override {
+
+ // When FixupValue is 0 the relocation is external and there
+ // is nothing for us to do.
+ if (!FixupValue) return;
+
+ MCFixupKind Kind = Fixup.getKind();
+ uint64_t Value;
+ uint32_t InstMask;
+ uint32_t Reloc;
+
+ // LLVM gives us an encoded value, we have to convert it back
+ // to a real offset before we can use it.
+ uint32_t Offset = Fixup.getOffset();
+ unsigned NumBytes = getFixupKindNumBytes(Kind);
+ assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
+ char *InstAddr = Data + Offset;
+
+ Value = adjustFixupValue(Kind, FixupValue);
+ if(!Value)
+ return;
+ int sValue = (int)Value;
+
+ switch((unsigned)Kind) {
+ default:
+ return;
+
+ case fixup_Hexagon_B7_PCREL:
+ if (!(isIntN(7, sValue)))
+ HandleFixupError(7, 2, (int64_t)FixupValue, "B7_PCREL");
+ case fixup_Hexagon_B7_PCREL_X:
+ InstMask = 0x00001f18; // Word32_B7
+ Reloc = (((Value >> 2) & 0x1f) << 8) | // Value 6-2 = Target 12-8
+ ((Value & 0x3) << 3); // Value 1-0 = Target 4-3
+ break;
+
+ case fixup_Hexagon_B9_PCREL:
+ if (!(isIntN(9, sValue)))
+ HandleFixupError(9, 2, (int64_t)FixupValue, "B9_PCREL");
+ case fixup_Hexagon_B9_PCREL_X:
+ InstMask = 0x003000fe; // Word32_B9
+ Reloc = (((Value >> 7) & 0x3) << 20) | // Value 8-7 = Target 21-20
+ ((Value & 0x7f) << 1); // Value 6-0 = Target 7-1
+ break;
+
+ // Since the existing branches that use this relocation cannot be
+ // extended, they should only be fixed up if the target is within range.
+ case fixup_Hexagon_B13_PCREL:
+ if (!(isIntN(13, sValue)))
+ HandleFixupError(13, 2, (int64_t)FixupValue, "B13_PCREL");
+ case fixup_Hexagon_B13_PCREL_X:
+ InstMask = 0x00202ffe; // Word32_B13
+ Reloc = (((Value >> 12) & 0x1) << 21) | // Value 12 = Target 21
+ (((Value >> 11) & 0x1) << 13) | // Value 11 = Target 13
+ ((Value & 0x7ff) << 1); // Value 10-0 = Target 11-1
+ break;
+
+ case fixup_Hexagon_B15_PCREL:
+ if (!(isIntN(15, sValue)))
+ HandleFixupError(15, 2, (int64_t)FixupValue, "B15_PCREL");
+ case fixup_Hexagon_B15_PCREL_X:
+ InstMask = 0x00df20fe; // Word32_B15
+ Reloc = (((Value >> 13) & 0x3) << 22) | // Value 14-13 = Target 23-22
+ (((Value >> 8) & 0x1f) << 16) | // Value 12-8 = Target 20-16
+ (((Value >> 7) & 0x1) << 13) | // Value 7 = Target 13
+ ((Value & 0x7f) << 1); // Value 6-0 = Target 7-1
+ break;
+
+ case fixup_Hexagon_B22_PCREL:
+ if (!(isIntN(22, sValue)))
+ HandleFixupError(22, 2, (int64_t)FixupValue, "B22_PCREL");
+ case fixup_Hexagon_B22_PCREL_X:
+ InstMask = 0x01ff3ffe; // Word32_B22
+ Reloc = (((Value >> 13) & 0x1ff) << 16) | // Value 21-13 = Target 24-16
+ ((Value & 0x1fff) << 1); // Value 12-0 = Target 13-1
+ break;
+
+ case fixup_Hexagon_B32_PCREL_X:
+ InstMask = 0x0fff3fff; // Word32_X26
+ Reloc = (((Value >> 14) & 0xfff) << 16) | // Value 25-14 = Target 27-16
+ (Value & 0x3fff); // Value 13-0 = Target 13-0
+ break;
+
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ case fixup_Hexagon_32:
+ InstMask = 0xffffffff; // Word32
+ Reloc = Value;
+ break;
+ }
+
+ DEBUG(dbgs() << "Name=" << getFixupKindInfo(Kind).Name << "(" <<
+ (unsigned)Kind << ")\n");
+ DEBUG(uint32_t OldData = 0;
+ for (unsigned i = 0; i < NumBytes; i++)
+ OldData |= (InstAddr[i] << (i * 8)) & (0xff << (i * 8));
+ dbgs() << "\tBValue=0x"; dbgs().write_hex(Value) <<
+ ": AValue=0x"; dbgs().write_hex(FixupValue) <<
+ ": Offset=" << Offset <<
+ ": Size=" << DataSize <<
+ ": OInst=0x"; dbgs().write_hex(OldData) <<
+ ": Reloc=0x"; dbgs().write_hex(Reloc););
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value. The Value has been "split up" into the
+ // appropriate bitfields above.
+ for (unsigned i = 0; i < NumBytes; i++){
+ InstAddr[i] &= uint8_t(~InstMask >> (i * 8)) & 0xff; // Clear reloc bits
+ InstAddr[i] |= uint8_t(Reloc >> (i * 8)) & 0xff; // Apply new reloc
+ }
+
+ DEBUG(uint32_t NewData = 0;
+ for (unsigned i = 0; i < NumBytes; i++)
+ NewData |= (InstAddr[i] << (i * 8)) & (0xff << (i * 8));
+ dbgs() << ": NInst=0x"; dbgs().write_hex(NewData) << "\n";);
}
bool isInstRelaxable(MCInst const &HMI) const {
@@ -182,12 +524,20 @@ public:
bool Relaxable = false;
// Branches and loop-setup insns are handled as necessary by relaxation.
if (llvm::HexagonMCInstrInfo::getType(*MCII, HMI) == HexagonII::TypeJ ||
+ (llvm::HexagonMCInstrInfo::getType(*MCII, HMI) ==
+ HexagonII::TypeCOMPOUND &&
+ MCID.isBranch()) ||
(llvm::HexagonMCInstrInfo::getType(*MCII, HMI) == HexagonII::TypeNV &&
MCID.isBranch()) ||
(llvm::HexagonMCInstrInfo::getType(*MCII, HMI) == HexagonII::TypeCR &&
HMI.getOpcode() != Hexagon::C4_addipc))
- if (HexagonMCInstrInfo::isExtendable(*MCII, HMI))
+ if (HexagonMCInstrInfo::isExtendable(*MCII, HMI)) {
Relaxable = true;
+ MCOperand const &Operand =
+ HMI.getOperand(HexagonMCInstrInfo::getExtendableOp(*MCII, HMI));
+ if (HexagonMCInstrInfo::mustNotExtend(*Operand.getExpr()))
+ Relaxable = false;
+ }
return Relaxable;
}
@@ -197,17 +547,7 @@ public:
///
/// \param Inst - The instruction to test.
bool mayNeedRelaxation(MCInst const &Inst) const override {
- assert(HexagonMCInstrInfo::isBundle(Inst));
- bool PreviousIsExtender = false;
- for (auto const &I : HexagonMCInstrInfo::bundleInstructions(Inst)) {
- auto const &Inst = *I.getInst();
- if (!PreviousIsExtender) {
- if (isInstRelaxable(Inst))
- return true;
- }
- PreviousIsExtender = HexagonMCInstrInfo::isImmext(Inst);
- }
- return false;
+ return true;
}
/// fixupNeedsRelaxation - Target specific predicate for whether a given
@@ -222,6 +562,9 @@ public:
*RelaxTarget = nullptr;
MCInst &MCI = const_cast<MCInst &>(HexagonMCInstrInfo::instruction(
MCB, Fixup.getOffset() / HEXAGON_INSTR_SIZE));
+ bool Relaxable = isInstRelaxable(MCI);
+ if (Relaxable == false)
+ return false;
// If we cannot resolve the fixup value, it requires relaxation.
if (!Resolved) {
switch ((unsigned)Fixup.getKind()) {
@@ -247,9 +590,6 @@ public:
}
}
}
- bool Relaxable = isInstRelaxable(MCI);
- if (Relaxable == false)
- return false;
MCFixupKind Kind = Fixup.getKind();
int64_t sValue = Value;
@@ -294,8 +634,8 @@ public:
llvm_unreachable("Handled by fixupNeedsRelaxationAdvanced");
}
- void relaxInstruction(MCInst const & Inst,
- MCInst & Res) const override {
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {
assert(HexagonMCInstrInfo::isBundle(Inst) &&
"Hexagon relaxInstruction only works on bundles");
@@ -347,6 +687,58 @@ public:
}
return true;
}
+
+ void finishLayout(MCAssembler const &Asm,
+ MCAsmLayout &Layout) const override {
+ for (auto I : Layout.getSectionOrder()) {
+ auto &Fragments = I->getFragmentList();
+ for (auto &J : Fragments) {
+ switch (J.getKind()) {
+ default:
+ break;
+ case MCFragment::FT_Align: {
+ auto Size = Asm.computeFragmentSize(Layout, J);
+ for (auto K = J.getIterator();
+ K != Fragments.begin() && Size >= HEXAGON_PACKET_SIZE;) {
+ --K;
+ switch (K->getKind()) {
+ default:
+ break;
+ case MCFragment::FT_Align: {
+ // Don't pad before other alignments
+ Size = 0;
+ break;
+ }
+ case MCFragment::FT_Relaxable: {
+ auto &RF = cast<MCRelaxableFragment>(*K);
+ auto &Inst = const_cast<MCInst &>(RF.getInst());
+ while (Size > 0 && HexagonMCInstrInfo::bundleSize(Inst) < 4) {
+ MCInst *Nop = new (Asm.getContext()) MCInst;
+ Nop->setOpcode(Hexagon::A2_nop);
+ Inst.addOperand(MCOperand::createInst(Nop));
+ Size -= 4;
+ if (!HexagonMCChecker(
+ *MCII, RF.getSubtargetInfo(), Inst, Inst,
+ *Asm.getContext().getRegisterInfo()).check()) {
+ Inst.erase(Inst.end() - 1);
+ Size = 0;
+ }
+ }
+ bool Error = HexagonMCShuffle(*MCII, RF.getSubtargetInfo(), Inst);
+ //assert(!Error);
+ (void)Error;
+ ReplaceInstruction(Asm.getEmitter(), RF, Inst);
+ Layout.invalidateFragmentsFrom(&RF);
+ Size = 0; // Only look back one instruction
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
};
} // end anonymous namespace
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index 47a6f8636276..c63f044b7128 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -227,7 +227,27 @@ namespace HexagonII {
MO_LO16, MO_HI16,
// Offset from the base of the SDA.
- MO_GPREL
+ MO_GPREL,
+
+ // MO_GDGOT - indicates GOT relative relocation for TLS
+ // GeneralDynamic method
+ MO_GDGOT,
+
+ // MO_GDPLT - indicates PLT relative relocation for TLS
+ // GeneralDynamic method
+ MO_GDPLT,
+
+ // MO_IE - indicates non PIC relocation for TLS
+ // Initial Executable method
+ MO_IE,
+
+ // MO_IEGOT - indicates PIC relocation for TLS
+ // Initial Executable method
+ MO_IEGOT,
+
+ // MO_TPREL - indicates relocation for TLS
+ // local Executable method
+ MO_TPREL
};
// Hexagon Sub-instruction classes.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp
index da5d4d1da69b..944e235e72f2 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp
@@ -11,6 +11,7 @@
#include "MCTargetDesc/HexagonFixupKinds.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCValue.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -28,8 +29,8 @@ private:
public:
HexagonELFObjectWriter(uint8_t OSABI, StringRef C);
- unsigned GetRelocType(MCValue const &Target, MCFixup const &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, MCValue const &Target,
+ MCFixup const &Fixup, bool IsPCRel) const override;
};
}
@@ -38,20 +39,61 @@ HexagonELFObjectWriter::HexagonELFObjectWriter(uint8_t OSABI, StringRef C)
/*HasRelocationAddend*/ true),
CPU(C) {}
-unsigned HexagonELFObjectWriter::GetRelocType(MCValue const & /*Target*/,
+unsigned HexagonELFObjectWriter::getRelocType(MCContext &Ctx,
+ MCValue const &Target,
MCFixup const &Fixup,
bool IsPCRel) const {
+ MCSymbolRefExpr::VariantKind Variant = Target.getAccessVariant();
switch ((unsigned)Fixup.getKind()) {
default:
- DEBUG(dbgs() << "unrecognized relocation " << Fixup.getKind() << "\n");
- llvm_unreachable("Unimplemented Fixup kind!");
- return ELF::R_HEX_NONE;
+ report_fatal_error("Unrecognized relocation type");
+ break;
case FK_Data_4:
- return (IsPCRel) ? ELF::R_HEX_32_PCREL : ELF::R_HEX_32;
+ switch(Variant) {
+ case MCSymbolRefExpr::VariantKind::VK_DTPREL:
+ return ELF::R_HEX_DTPREL_32;
+ case MCSymbolRefExpr::VariantKind::VK_GOT:
+ return ELF::R_HEX_GOT_32;
+ case MCSymbolRefExpr::VariantKind::VK_GOTREL:
+ return ELF::R_HEX_GOTREL_32;
+ case MCSymbolRefExpr::VariantKind::VK_Hexagon_GD_GOT:
+ return ELF::R_HEX_GD_GOT_32;
+ case MCSymbolRefExpr::VariantKind::VK_Hexagon_IE:
+ return ELF::R_HEX_IE_32;
+ case MCSymbolRefExpr::VariantKind::VK_Hexagon_IE_GOT:
+ return ELF::R_HEX_IE_GOT_32;
+ case MCSymbolRefExpr::VariantKind::VK_Hexagon_LD_GOT:
+ return ELF::R_HEX_LD_GOT_32;
+ case MCSymbolRefExpr::VariantKind::VK_Hexagon_PCREL:
+ return ELF::R_HEX_32_PCREL;
+ case MCSymbolRefExpr::VariantKind::VK_TPREL:
+ return ELF::R_HEX_TPREL_32;
+ case MCSymbolRefExpr::VariantKind::VK_None:
+ return IsPCRel ? ELF::R_HEX_32_PCREL : ELF::R_HEX_32;
+ default:
+ report_fatal_error("Unrecognized variant type");
+ };
case FK_PCRel_4:
return ELF::R_HEX_32_PCREL;
case FK_Data_2:
- return ELF::R_HEX_16;
+ switch(Variant) {
+ case MCSymbolRefExpr::VariantKind::VK_DTPREL:
+ return ELF::R_HEX_DTPREL_16;
+ case MCSymbolRefExpr::VariantKind::VK_GOT:
+ return ELF::R_HEX_GOT_16;
+ case MCSymbolRefExpr::VariantKind::VK_Hexagon_GD_GOT:
+ return ELF::R_HEX_GD_GOT_16;
+ case MCSymbolRefExpr::VariantKind::VK_Hexagon_IE_GOT:
+ return ELF::R_HEX_IE_GOT_16;
+ case MCSymbolRefExpr::VariantKind::VK_Hexagon_LD_GOT:
+ return ELF::R_HEX_LD_GOT_16;
+ case MCSymbolRefExpr::VariantKind::VK_TPREL:
+ return ELF::R_HEX_TPREL_16;
+ case MCSymbolRefExpr::VariantKind::VK_None:
+ return ELF::R_HEX_16;
+ default:
+ report_fatal_error("Unrecognized variant type");
+ };
case FK_Data_1:
return ELF::R_HEX_8;
case fixup_Hexagon_B22_PCREL:
@@ -240,6 +282,8 @@ unsigned HexagonELFObjectWriter::GetRelocType(MCValue const & /*Target*/,
return ELF::R_HEX_TPREL_16_X;
case fixup_Hexagon_TPREL_11_X:
return ELF::R_HEX_TPREL_11_X;
+ case fixup_Hexagon_23_REG:
+ return ELF::R_HEX_23_REG;
}
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonFixupKinds.h b/lib/Target/Hexagon/MCTargetDesc/HexagonFixupKinds.h
index 4bbfbec883c4..4c97ebbdd346 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonFixupKinds.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonFixupKinds.h
@@ -110,6 +110,7 @@ enum Fixups {
fixup_Hexagon_TPREL_32_6_X,
fixup_Hexagon_TPREL_16_X,
fixup_Hexagon_TPREL_11_X,
+ fixup_Hexagon_23_REG,
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp
index 06ccec532211..42fcc5a6aa89 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp
@@ -79,7 +79,6 @@ void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
}
if (HexagonMCInstrInfo::isOuterLoop(*MI)) {
OS << Separator;
- Separator = " ";
MCInst ME;
ME.setOpcode(Hexagon::ENDLOOP1);
printInstruction(&ME, OS);
@@ -203,16 +202,11 @@ void HexagonInstPrinter::printPredicateOperand(MCInst const *MI, unsigned OpNo,
void HexagonInstPrinter::printSymbol(MCInst const *MI, unsigned OpNo,
raw_ostream &O, bool hi) const {
- MCOperand const &MO = MI->getOperand(OpNo);
+ assert(MI->getOperand(OpNo).isImm() && "Unknown symbol operand");
O << '#' << (hi ? "HI" : "LO") << '(';
- if (MO.isImm()) {
- O << '#';
- printOperand(MI, OpNo, O);
- } else {
- printOperand(MI, OpNo, O);
- assert("Unknown symbol operand");
- }
+ O << '#';
+ printOperand(MI, OpNo, O);
O << ')';
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
index 51d2f1c878dc..9e2c28076432 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
@@ -32,6 +32,7 @@ HexagonMCAsmInfo::HexagonMCAsmInfo(const Triple &TT) {
AscizDirective = "\t.string\t";
SupportsDebugInformation = true;
+ MinInstAlignment = 4;
UsesELFSectionDirectiveForBSS = true;
ExceptionsType = ExceptionHandling::DwarfCFI;
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
index a8456b4ead9c..efeff2436234 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
@@ -14,7 +14,6 @@
#ifndef LLVM_LIB_TARGET_HEXAGON_MCTARGETDESC_HEXAGONMCASMINFO_H
#define LLVM_LIB_TARGET_HEXAGON_MCTARGETDESC_HEXAGONMCASMINFO_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
index 46b7b41fec3b..07c9ad96a0d7 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
@@ -16,7 +16,6 @@
#include "HexagonBaseInfo.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/CommandLine.h"
@@ -117,6 +116,11 @@ void HexagonMCChecker::init(MCInst const& MCI) {
for (unsigned i = 0; i < MCID.getNumDefs(); ++i) {
unsigned R = MCI.getOperand(i).getReg(),
S = Hexagon::NoRegister;
+ // USR has subregisters (while C8 does not for technical reasons), so
+ // reset R to USR, since we know how to handle multiple defs of USR,
+ // taking into account its subregisters.
+ if (R == Hexagon::C8)
+ R = Hexagon::USR;
// Note register definitions, direct ones as well as indirect side-effects.
// Super-registers are not tracked directly, but their components.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
index 5fc0bdeaccbb..33e22798c954 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
@@ -15,10 +15,9 @@
#ifndef HEXAGONMCCHECKER_H
#define HEXAGONMCCHECKER_H
-#include <map>
-#include <set>
-#include <queue>
#include "MCTargetDesc/HexagonMCShuffler.h"
+#include <queue>
+#include <set>
using namespace llvm;
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index 4b07ca7490a8..39b828d8a03a 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -88,6 +88,19 @@ void HexagonMCCodeEmitter::encodeInstruction(MCInst const &MI, raw_ostream &OS,
return;
}
+static bool RegisterMatches(unsigned Consumer, unsigned Producer,
+ unsigned Producer2) {
+ if (Consumer == Producer)
+ return true;
+ if (Consumer == Producer2)
+ return true;
+ // Calculate if we're a single vector consumer referencing a double producer
+ if (Producer >= Hexagon::W0 && Producer <= Hexagon::W15)
+ if (Consumer >= Hexagon::V0 && Consumer <= Hexagon::V31)
+ return ((Consumer - Hexagon::V0) >> 1) == (Producer - Hexagon::W0);
+ return false;
+}
+
/// EncodeSingleInstruction - Emit a single
void HexagonMCCodeEmitter::EncodeSingleInstruction(
const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
@@ -125,8 +138,10 @@ void HexagonMCCodeEmitter::EncodeSingleInstruction(
MCOperand &MCO =
HMB.getOperand(HexagonMCInstrInfo::getNewValueOp(MCII, HMB));
unsigned SOffset = 0;
+ unsigned VOffset = 0;
unsigned Register = MCO.getReg();
unsigned Register1;
+ unsigned Register2;
auto Instructions = HexagonMCInstrInfo::bundleInstructions(**CurrentBundle);
auto i = Instructions.begin() + Index - 1;
for (;; --i) {
@@ -135,11 +150,18 @@ void HexagonMCCodeEmitter::EncodeSingleInstruction(
if (HexagonMCInstrInfo::isImmext(Inst))
continue;
++SOffset;
+ if (HexagonMCInstrInfo::isVector(MCII, Inst))
+ // Vector instructions don't count scalars
+ ++VOffset;
Register1 =
HexagonMCInstrInfo::hasNewValue(MCII, Inst)
? HexagonMCInstrInfo::getNewValueOperand(MCII, Inst).getReg()
: static_cast<unsigned>(Hexagon::NoRegister);
- if (Register != Register1)
+ Register2 =
+ HexagonMCInstrInfo::hasNewValue2(MCII, Inst)
+ ? HexagonMCInstrInfo::getNewValueOperand2(MCII, Inst).getReg()
+ : static_cast<unsigned>(Hexagon::NoRegister);
+ if (!RegisterMatches(Register, Register1, Register2))
// This isn't the register we're looking for
continue;
if (!HexagonMCInstrInfo::isPredicated(MCII, Inst))
@@ -153,8 +175,11 @@ void HexagonMCCodeEmitter::EncodeSingleInstruction(
break;
}
// Hexagon PRM 10.11 Construct Nt from distance
- unsigned Offset = SOffset;
+ unsigned Offset =
+ HexagonMCInstrInfo::isVector(MCII, HMB) ? VOffset : SOffset;
Offset <<= 1;
+ Offset |=
+ HexagonMCInstrInfo::SubregisterBit(Register, Register1, Register2);
MCO.setReg(Offset + Hexagon::R0);
}
@@ -165,7 +190,6 @@ void HexagonMCCodeEmitter::EncodeSingleInstruction(
((HMB.getOpcode() != DuplexIClass0) && (HMB.getOpcode() != A4_ext) &&
(HMB.getOpcode() != A4_ext_b) && (HMB.getOpcode() != A4_ext_c) &&
(HMB.getOpcode() != A4_ext_g))) {
- // Use a A2_nop for unimplemented instructions.
DEBUG(dbgs() << "Unimplemented inst: "
" `" << HexagonMCInstrInfo::getName(MCII, HMB) << "'"
"\n");
@@ -251,7 +275,23 @@ void HexagonMCCodeEmitter::EncodeSingleInstruction(
++MCNumEmitted;
}
-static Hexagon::Fixups getFixupNoBits(MCInstrInfo const &MCII, const MCInst &MI,
+namespace {
+void raise_relocation_error(unsigned bits, unsigned kind) {
+ std::string Text;
+ {
+ llvm::raw_string_ostream Stream(Text);
+ Stream << "Unrecognized relocation combination bits: " << bits
+ << " kind: " << kind;
+ }
+ report_fatal_error(Text);
+}
+}
+
+/// getFixupNoBits - Some insns are not extended and thus have no
+/// bits. These cases require a more brute force method for determining
+/// the correct relocation.
+namespace {
+Hexagon::Fixups getFixupNoBits(MCInstrInfo const &MCII, const MCInst &MI,
const MCOperand &MO,
const MCSymbolRefExpr::VariantKind kind) {
const MCInstrDesc &MCID = HexagonMCInstrInfo::getDesc(MCII, MI);
@@ -259,83 +299,90 @@ static Hexagon::Fixups getFixupNoBits(MCInstrInfo const &MCII, const MCInst &MI,
if (insnType == HexagonII::TypePREFIX) {
switch (kind) {
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTREL:
return Hexagon::fixup_Hexagon_GOTREL_32_6_X;
- case llvm::MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_GOT:
return Hexagon::fixup_Hexagon_GOT_32_6_X;
- case llvm::MCSymbolRefExpr::VK_TPREL:
+ case MCSymbolRefExpr::VK_TPREL:
return Hexagon::fixup_Hexagon_TPREL_32_6_X;
- case llvm::MCSymbolRefExpr::VK_DTPREL:
+ case MCSymbolRefExpr::VK_DTPREL:
return Hexagon::fixup_Hexagon_DTPREL_32_6_X;
- case llvm::MCSymbolRefExpr::VK_Hexagon_GD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_GD_GOT:
return Hexagon::fixup_Hexagon_GD_GOT_32_6_X;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_LD_GOT:
return Hexagon::fixup_Hexagon_LD_GOT_32_6_X;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE:
+ case MCSymbolRefExpr::VK_Hexagon_IE:
return Hexagon::fixup_Hexagon_IE_32_6_X;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_IE_GOT:
return Hexagon::fixup_Hexagon_IE_GOT_32_6_X;
- default:
+ case MCSymbolRefExpr::VK_Hexagon_PCREL:
+ case MCSymbolRefExpr::VK_None:
if (MCID.isBranch())
return Hexagon::fixup_Hexagon_B32_PCREL_X;
else
return Hexagon::fixup_Hexagon_32_6_X;
+ default:
+ raise_relocation_error(0, kind);
}
} else if (MCID.isBranch())
- return (Hexagon::fixup_Hexagon_B13_PCREL);
+ return Hexagon::fixup_Hexagon_B13_PCREL;
switch (MCID.getOpcode()) {
case Hexagon::HI:
case Hexagon::A2_tfrih:
switch (kind) {
- case llvm::MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_GOT:
return Hexagon::fixup_Hexagon_GOT_HI16;
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTREL:
return Hexagon::fixup_Hexagon_GOTREL_HI16;
- case llvm::MCSymbolRefExpr::VK_Hexagon_GD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_GD_GOT:
return Hexagon::fixup_Hexagon_GD_GOT_HI16;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_LD_GOT:
return Hexagon::fixup_Hexagon_LD_GOT_HI16;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE:
+ case MCSymbolRefExpr::VK_Hexagon_IE:
return Hexagon::fixup_Hexagon_IE_HI16;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_IE_GOT:
return Hexagon::fixup_Hexagon_IE_GOT_HI16;
- case llvm::MCSymbolRefExpr::VK_TPREL:
+ case MCSymbolRefExpr::VK_TPREL:
return Hexagon::fixup_Hexagon_TPREL_HI16;
- case llvm::MCSymbolRefExpr::VK_DTPREL:
+ case MCSymbolRefExpr::VK_DTPREL:
return Hexagon::fixup_Hexagon_DTPREL_HI16;
- default:
+ case MCSymbolRefExpr::VK_None:
return Hexagon::fixup_Hexagon_HI16;
+ default:
+ raise_relocation_error(0, kind);
}
case Hexagon::LO:
case Hexagon::A2_tfril:
switch (kind) {
- case llvm::MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_GOT:
return Hexagon::fixup_Hexagon_GOT_LO16;
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTREL:
return Hexagon::fixup_Hexagon_GOTREL_LO16;
- case llvm::MCSymbolRefExpr::VK_Hexagon_GD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_GD_GOT:
return Hexagon::fixup_Hexagon_GD_GOT_LO16;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_LD_GOT:
return Hexagon::fixup_Hexagon_LD_GOT_LO16;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE:
+ case MCSymbolRefExpr::VK_Hexagon_IE:
return Hexagon::fixup_Hexagon_IE_LO16;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_IE_GOT:
return Hexagon::fixup_Hexagon_IE_GOT_LO16;
- case llvm::MCSymbolRefExpr::VK_TPREL:
+ case MCSymbolRefExpr::VK_TPREL:
return Hexagon::fixup_Hexagon_TPREL_LO16;
- case llvm::MCSymbolRefExpr::VK_DTPREL:
+ case MCSymbolRefExpr::VK_DTPREL:
return Hexagon::fixup_Hexagon_DTPREL_LO16;
- default:
+ case MCSymbolRefExpr::VK_None:
return Hexagon::fixup_Hexagon_LO16;
+ default:
+ raise_relocation_error(0, kind);
}
// The only relocs left should be GP relative:
default:
if (MCID.mayStore() || MCID.mayLoad()) {
- for (const MCPhysReg *ImpUses = MCID.getImplicitUses();
- ImpUses && *ImpUses; ++ImpUses) {
+ for (const MCPhysReg *ImpUses = MCID.getImplicitUses(); *ImpUses;
+ ++ImpUses) {
if (*ImpUses != Hexagon::GP)
continue;
switch (HexagonMCInstrInfo::getAccessSize(MCII, MI)) {
@@ -348,14 +395,14 @@ static Hexagon::Fixups getFixupNoBits(MCInstrInfo const &MCII, const MCInst &MI,
case HexagonII::MemAccessSize::DoubleWordAccess:
return fixup_Hexagon_GPREL16_3;
default:
- llvm_unreachable("unhandled fixup");
+ raise_relocation_error(0, kind);
}
}
- } else
- llvm_unreachable("unhandled fixup");
+ }
+ raise_relocation_error(0, kind);
}
-
- return LastTargetFixupKind;
+ llvm_unreachable("Relocation exit not taken");
+}
}
namespace llvm {
@@ -395,23 +442,18 @@ unsigned HexagonMCCodeEmitter::getExprOpValue(const MCInst &MI,
const MCSubtargetInfo &STI) const
{
- int64_t Res;
-
- if (ME->evaluateAsAbsolute(Res))
- return Res;
-
- MCExpr::ExprKind MK = ME->getKind();
- if (MK == MCExpr::Constant) {
- return cast<MCConstantExpr>(ME)->getValue();
- }
- if (MK == MCExpr::Binary) {
- getExprOpValue(MI, MO, cast<MCBinaryExpr>(ME)->getLHS(), Fixups, STI);
- getExprOpValue(MI, MO, cast<MCBinaryExpr>(ME)->getRHS(), Fixups, STI);
+ if (isa<HexagonMCExpr>(ME))
+ ME = &HexagonMCInstrInfo::getExpr(*ME);
+ int64_t Value;
+ if (ME->evaluateAsAbsolute(Value))
+ return Value;
+ assert(ME->getKind() == MCExpr::SymbolRef || ME->getKind() == MCExpr::Binary);
+ if (ME->getKind() == MCExpr::Binary) {
+ MCBinaryExpr const *Binary = cast<MCBinaryExpr>(ME);
+ getExprOpValue(MI, MO, Binary->getLHS(), Fixups, STI);
+ getExprOpValue(MI, MO, Binary->getRHS(), Fixups, STI);
return 0;
}
-
- assert(MK == MCExpr::SymbolRef);
-
Hexagon::Fixups FixupKind =
Hexagon::Fixups(Hexagon::fixup_Hexagon_TPREL_LO16);
const MCSymbolRefExpr *MCSRE = static_cast<const MCSymbolRefExpr *>(ME);
@@ -430,275 +472,302 @@ unsigned HexagonMCCodeEmitter::getExprOpValue(const MCInst &MI,
switch (bits) {
default:
- DEBUG(dbgs() << "unrecognized bit count of " << bits << '\n');
- break;
-
+ raise_relocation_error(bits, kind);
case 32:
switch (kind) {
- case llvm::MCSymbolRefExpr::VK_Hexagon_PCREL:
- FixupKind = Hexagon::fixup_Hexagon_32_PCREL;
+ case MCSymbolRefExpr::VK_DTPREL:
+ FixupKind = *Extended ? Hexagon::fixup_Hexagon_DTPREL_32_6_X
+ : Hexagon::fixup_Hexagon_DTPREL_32;
break;
- case llvm::MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_GOT:
FixupKind = *Extended ? Hexagon::fixup_Hexagon_GOT_32_6_X
: Hexagon::fixup_Hexagon_GOT_32;
break;
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTREL:
FixupKind = *Extended ? Hexagon::fixup_Hexagon_GOTREL_32_6_X
: Hexagon::fixup_Hexagon_GOTREL_32;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_GD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_GD_GOT:
FixupKind = *Extended ? Hexagon::fixup_Hexagon_GD_GOT_32_6_X
: Hexagon::fixup_Hexagon_GD_GOT_32;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LD_GOT:
- FixupKind = *Extended ? Hexagon::fixup_Hexagon_LD_GOT_32_6_X
- : Hexagon::fixup_Hexagon_LD_GOT_32;
- break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE:
+ case MCSymbolRefExpr::VK_Hexagon_IE:
FixupKind = *Extended ? Hexagon::fixup_Hexagon_IE_32_6_X
: Hexagon::fixup_Hexagon_IE_32;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_IE_GOT:
FixupKind = *Extended ? Hexagon::fixup_Hexagon_IE_GOT_32_6_X
: Hexagon::fixup_Hexagon_IE_GOT_32;
break;
- case llvm::MCSymbolRefExpr::VK_TPREL:
- FixupKind = *Extended ? Hexagon::fixup_Hexagon_TPREL_32_6_X
- : Hexagon::fixup_Hexagon_TPREL_32;
+ case MCSymbolRefExpr::VK_Hexagon_LD_GOT:
+ FixupKind = *Extended ? Hexagon::fixup_Hexagon_LD_GOT_32_6_X
+ : Hexagon::fixup_Hexagon_LD_GOT_32;
break;
- case llvm::MCSymbolRefExpr::VK_DTPREL:
- FixupKind = *Extended ? Hexagon::fixup_Hexagon_DTPREL_32_6_X
- : Hexagon::fixup_Hexagon_DTPREL_32;
+ case MCSymbolRefExpr::VK_Hexagon_PCREL:
+ FixupKind = Hexagon::fixup_Hexagon_32_PCREL;
break;
- default:
+ case MCSymbolRefExpr::VK_None:
FixupKind =
*Extended ? Hexagon::fixup_Hexagon_32_6_X : Hexagon::fixup_Hexagon_32;
break;
+ case MCSymbolRefExpr::VK_TPREL:
+ FixupKind = *Extended ? Hexagon::fixup_Hexagon_TPREL_32_6_X
+ : Hexagon::fixup_Hexagon_TPREL_32;
+ break;
+ default:
+ raise_relocation_error(bits, kind);
}
break;
case 22:
switch (kind) {
- case llvm::MCSymbolRefExpr::VK_Hexagon_GD_PLT:
+ case MCSymbolRefExpr::VK_Hexagon_GD_PLT:
FixupKind = Hexagon::fixup_Hexagon_GD_PLT_B22_PCREL;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LD_PLT:
+ case MCSymbolRefExpr::VK_Hexagon_LD_PLT:
FixupKind = Hexagon::fixup_Hexagon_LD_PLT_B22_PCREL;
break;
- default:
- if (MCID.isBranch() || MCID.isCall()) {
- FixupKind = *Extended ? Hexagon::fixup_Hexagon_B22_PCREL_X
- : Hexagon::fixup_Hexagon_B22_PCREL;
- } else {
- errs() << "unrecognized relocation, bits: " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
- }
+ case MCSymbolRefExpr::VK_None:
+ FixupKind = *Extended ? Hexagon::fixup_Hexagon_B22_PCREL_X
+ : Hexagon::fixup_Hexagon_B22_PCREL;
+ break;
+ case MCSymbolRefExpr::VK_PLT:
+ FixupKind = Hexagon::fixup_Hexagon_PLT_B22_PCREL;
break;
+ default:
+ raise_relocation_error(bits, kind);
}
break;
case 16:
if (*Extended) {
switch (kind) {
- default:
- FixupKind = Hexagon::fixup_Hexagon_16_X;
+ case MCSymbolRefExpr::VK_DTPREL:
+ FixupKind = Hexagon::fixup_Hexagon_DTPREL_16_X;
break;
- case llvm::MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_GOT:
FixupKind = Hexagon::fixup_Hexagon_GOT_16_X;
break;
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTREL:
FixupKind = Hexagon::fixup_Hexagon_GOTREL_16_X;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_GD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_GD_GOT:
FixupKind = Hexagon::fixup_Hexagon_GD_GOT_16_X;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LD_GOT:
- FixupKind = Hexagon::fixup_Hexagon_LD_GOT_16_X;
- break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE:
+ case MCSymbolRefExpr::VK_Hexagon_IE:
FixupKind = Hexagon::fixup_Hexagon_IE_16_X;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_IE_GOT:
FixupKind = Hexagon::fixup_Hexagon_IE_GOT_16_X;
break;
- case llvm::MCSymbolRefExpr::VK_TPREL:
- FixupKind = Hexagon::fixup_Hexagon_TPREL_16_X;
+ case MCSymbolRefExpr::VK_Hexagon_LD_GOT:
+ FixupKind = Hexagon::fixup_Hexagon_LD_GOT_16_X;
break;
- case llvm::MCSymbolRefExpr::VK_DTPREL:
- FixupKind = Hexagon::fixup_Hexagon_DTPREL_16_X;
+ case MCSymbolRefExpr::VK_None:
+ FixupKind = Hexagon::fixup_Hexagon_16_X;
+ break;
+ case MCSymbolRefExpr::VK_TPREL:
+ FixupKind = Hexagon::fixup_Hexagon_TPREL_16_X;
break;
+ default:
+ raise_relocation_error(bits, kind);
}
} else
switch (kind) {
- default:
- errs() << "unrecognized relocation, bits " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
+ case MCSymbolRefExpr::VK_None: {
+ if (HexagonMCInstrInfo::s23_2_reloc(*MO.getExpr()))
+ FixupKind = Hexagon::fixup_Hexagon_23_REG;
+ else
+ raise_relocation_error(bits, kind);
+ break;
+ }
+ case MCSymbolRefExpr::VK_DTPREL:
+ FixupKind = Hexagon::fixup_Hexagon_DTPREL_16;
break;
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
- if ((MCID.getOpcode() == Hexagon::HI) ||
- (MCID.getOpcode() == Hexagon::LO_H))
+ case MCSymbolRefExpr::VK_GOTREL:
+ if (MCID.getOpcode() == Hexagon::HI)
FixupKind = Hexagon::fixup_Hexagon_GOTREL_HI16;
else
FixupKind = Hexagon::fixup_Hexagon_GOTREL_LO16;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_GPREL:
- FixupKind = Hexagon::fixup_Hexagon_GPREL16_0;
+ case MCSymbolRefExpr::VK_Hexagon_GD_GOT:
+ FixupKind = Hexagon::fixup_Hexagon_GD_GOT_16;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LO16:
- FixupKind = Hexagon::fixup_Hexagon_LO16;
+ case MCSymbolRefExpr::VK_Hexagon_GPREL:
+ FixupKind = Hexagon::fixup_Hexagon_GPREL16_0;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_HI16:
+ case MCSymbolRefExpr::VK_Hexagon_HI16:
FixupKind = Hexagon::fixup_Hexagon_HI16;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_GD_GOT:
- FixupKind = Hexagon::fixup_Hexagon_GD_GOT_16;
+ case MCSymbolRefExpr::VK_Hexagon_IE_GOT:
+ FixupKind = Hexagon::fixup_Hexagon_IE_GOT_16;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_LD_GOT:
FixupKind = Hexagon::fixup_Hexagon_LD_GOT_16;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE_GOT:
- FixupKind = Hexagon::fixup_Hexagon_IE_GOT_16;
+ case MCSymbolRefExpr::VK_Hexagon_LO16:
+ FixupKind = Hexagon::fixup_Hexagon_LO16;
break;
- case llvm::MCSymbolRefExpr::VK_TPREL:
+ case MCSymbolRefExpr::VK_TPREL:
FixupKind = Hexagon::fixup_Hexagon_TPREL_16;
break;
- case llvm::MCSymbolRefExpr::VK_DTPREL:
- FixupKind = Hexagon::fixup_Hexagon_DTPREL_16;
- break;
+ default:
+ raise_relocation_error(bits, kind);
}
break;
case 15:
- if (MCID.isBranch() || MCID.isCall())
+ switch (kind) {
+ case MCSymbolRefExpr::VK_None:
FixupKind = *Extended ? Hexagon::fixup_Hexagon_B15_PCREL_X
: Hexagon::fixup_Hexagon_B15_PCREL;
+ break;
+ default:
+ raise_relocation_error(bits, kind);
+ }
break;
case 13:
- if (MCID.isBranch())
+ switch (kind) {
+ case MCSymbolRefExpr::VK_None:
FixupKind = Hexagon::fixup_Hexagon_B13_PCREL;
- else {
- errs() << "unrecognized relocation, bits " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
+ break;
+ default:
+ raise_relocation_error(bits, kind);
}
break;
case 12:
if (*Extended)
switch (kind) {
- default:
- FixupKind = Hexagon::fixup_Hexagon_12_X;
- break;
// There isn't a GOT_12_X, both 11_X and 16_X resolve to 6/26
- case llvm::MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_GOT:
FixupKind = Hexagon::fixup_Hexagon_GOT_16_X;
break;
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTREL:
FixupKind = Hexagon::fixup_Hexagon_GOTREL_16_X;
break;
+ case MCSymbolRefExpr::VK_None:
+ FixupKind = Hexagon::fixup_Hexagon_12_X;
+ break;
+ default:
+ raise_relocation_error(bits, kind);
}
- else {
- errs() << "unrecognized relocation, bits " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
- }
+ else
+ raise_relocation_error(bits, kind);
break;
case 11:
if (*Extended)
switch (kind) {
- default:
- FixupKind = Hexagon::fixup_Hexagon_11_X;
+ case MCSymbolRefExpr::VK_DTPREL:
+ FixupKind = Hexagon::fixup_Hexagon_DTPREL_11_X;
break;
- case llvm::MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_GOT:
FixupKind = Hexagon::fixup_Hexagon_GOT_11_X;
break;
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTREL:
FixupKind = Hexagon::fixup_Hexagon_GOTREL_11_X;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_GD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_GD_GOT:
FixupKind = Hexagon::fixup_Hexagon_GD_GOT_11_X;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_LD_GOT:
+ case MCSymbolRefExpr::VK_Hexagon_IE_GOT:
+ FixupKind = Hexagon::fixup_Hexagon_IE_GOT_11_X;
+ break;
+ case MCSymbolRefExpr::VK_Hexagon_LD_GOT:
FixupKind = Hexagon::fixup_Hexagon_LD_GOT_11_X;
break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_IE_GOT:
- FixupKind = Hexagon::fixup_Hexagon_IE_GOT_11_X;
+ case MCSymbolRefExpr::VK_None:
+ FixupKind = Hexagon::fixup_Hexagon_11_X;
break;
- case llvm::MCSymbolRefExpr::VK_TPREL:
+ case MCSymbolRefExpr::VK_TPREL:
FixupKind = Hexagon::fixup_Hexagon_TPREL_11_X;
break;
- case llvm::MCSymbolRefExpr::VK_DTPREL:
- FixupKind = Hexagon::fixup_Hexagon_DTPREL_11_X;
- break;
+ default:
+ raise_relocation_error(bits, kind);
}
else {
- errs() << "unrecognized relocation, bits " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
+ switch (kind) {
+ case MCSymbolRefExpr::VK_TPREL:
+ FixupKind = Hexagon::fixup_Hexagon_TPREL_11_X;
+ break;
+ default:
+ raise_relocation_error(bits, kind);
+ }
}
break;
case 10:
- if (*Extended)
- FixupKind = Hexagon::fixup_Hexagon_10_X;
+ if (*Extended) {
+ switch (kind) {
+ case MCSymbolRefExpr::VK_None:
+ FixupKind = Hexagon::fixup_Hexagon_10_X;
+ break;
+ default:
+ raise_relocation_error(bits, kind);
+ }
+ } else
+ raise_relocation_error(bits, kind);
break;
case 9:
if (MCID.isBranch() ||
- (llvm::HexagonMCInstrInfo::getType(MCII, MI) == HexagonII::TypeCR))
+ (HexagonMCInstrInfo::getType(MCII, MI) == HexagonII::TypeCR))
FixupKind = *Extended ? Hexagon::fixup_Hexagon_B9_PCREL_X
: Hexagon::fixup_Hexagon_B9_PCREL;
else if (*Extended)
FixupKind = Hexagon::fixup_Hexagon_9_X;
- else {
- errs() << "unrecognized relocation, bits " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
- }
+ else
+ raise_relocation_error(bits, kind);
break;
case 8:
if (*Extended)
FixupKind = Hexagon::fixup_Hexagon_8_X;
- else {
- errs() << "unrecognized relocation, bits " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
- }
+ else
+ raise_relocation_error(bits, kind);
break;
case 7:
if (MCID.isBranch() ||
- (llvm::HexagonMCInstrInfo::getType(MCII, MI) == HexagonII::TypeCR))
+ (HexagonMCInstrInfo::getType(MCII, MI) == HexagonII::TypeCR))
FixupKind = *Extended ? Hexagon::fixup_Hexagon_B7_PCREL_X
: Hexagon::fixup_Hexagon_B7_PCREL;
else if (*Extended)
FixupKind = Hexagon::fixup_Hexagon_7_X;
- else {
- errs() << "unrecognized relocation, bits " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
- }
+ else
+ raise_relocation_error(bits, kind);
break;
case 6:
if (*Extended) {
switch (kind) {
- default:
- FixupKind = Hexagon::fixup_Hexagon_6_X;
- break;
- case llvm::MCSymbolRefExpr::VK_Hexagon_PCREL:
- FixupKind = Hexagon::fixup_Hexagon_6_PCREL_X;
+ case MCSymbolRefExpr::VK_DTPREL:
+ FixupKind = Hexagon::fixup_Hexagon_DTPREL_16_X;
break;
// This is part of an extender, GOT_11 is a
// Word32_U6 unsigned/truncated reloc.
- case llvm::MCSymbolRefExpr::VK_GOT:
+ case MCSymbolRefExpr::VK_GOT:
FixupKind = Hexagon::fixup_Hexagon_GOT_11_X;
break;
- case llvm::MCSymbolRefExpr::VK_GOTOFF:
+ case MCSymbolRefExpr::VK_GOTREL:
FixupKind = Hexagon::fixup_Hexagon_GOTREL_11_X;
break;
+ case MCSymbolRefExpr::VK_Hexagon_PCREL:
+ FixupKind = Hexagon::fixup_Hexagon_6_PCREL_X;
+ break;
+ case MCSymbolRefExpr::VK_TPREL:
+ FixupKind = Hexagon::fixup_Hexagon_TPREL_16_X;
+ break;
+ case MCSymbolRefExpr::VK_None:
+ FixupKind = Hexagon::fixup_Hexagon_6_X;
+ break;
+ default:
+ raise_relocation_error(bits, kind);
}
- } else {
- errs() << "unrecognized relocation, bits " << bits << "\n";
- errs() << "name = " << HexagonMCInstrInfo::getName(MCII, MI) << "\n";
- }
+ } else
+ raise_relocation_error(bits, kind);
break;
case 0:
@@ -706,29 +775,39 @@ unsigned HexagonMCCodeEmitter::getExprOpValue(const MCInst &MI,
break;
}
- MCExpr const *FixupExpression = (*Addend > 0 && isPCRel(FixupKind)) ?
- MCBinaryExpr::createAdd(MO.getExpr(),
- MCConstantExpr::create(*Addend, MCT), MCT) :
- MO.getExpr();
+ MCExpr const *FixupExpression =
+ (*Addend > 0 && isPCRel(FixupKind))
+ ? MCBinaryExpr::createAdd(MO.getExpr(),
+ MCConstantExpr::create(*Addend, MCT), MCT)
+ : MO.getExpr();
- MCFixup fixup = MCFixup::create(*Addend, FixupExpression,
+ MCFixup fixup = MCFixup::create(*Addend, FixupExpression,
MCFixupKind(FixupKind), MI.getLoc());
Fixups.push_back(fixup);
// All of the information is in the fixup.
- return (0);
+ return 0;
}
unsigned
HexagonMCCodeEmitter::getMachineOpValue(MCInst const &MI, MCOperand const &MO,
SmallVectorImpl<MCFixup> &Fixups,
MCSubtargetInfo const &STI) const {
- if (MO.isReg())
- return MCT.getRegisterInfo()->getEncodingValue(MO.getReg());
- if (MO.isImm())
- return static_cast<unsigned>(MO.getImm());
+ assert(!MO.isImm());
+ if (MO.isReg()) {
+ unsigned Reg = MO.getReg();
+ if (HexagonMCInstrInfo::isSubInstruction(MI))
+ return HexagonMCInstrInfo::getDuplexRegisterNumbering(Reg);
+ switch(MI.getOpcode()){
+ case Hexagon::A2_tfrrcr:
+ case Hexagon::A2_tfrcrr:
+ if(Reg == Hexagon::M0)
+ Reg = Hexagon::C6;
+ if(Reg == Hexagon::M1)
+ Reg = Hexagon::C7;
+ }
+ return MCT.getRegisterInfo()->getEncodingValue(Reg);
+ }
- // MO must be an ME.
- assert(MO.isExpr());
return getExprOpValue(MI, MO, MO.getExpr(), Fixups, STI);
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
index e6194f61a6ba..88336217cc8d 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
@@ -80,9 +80,6 @@ static const std::pair<unsigned, unsigned> opcodeData[] = {
std::make_pair((unsigned)V4_SS2_storewi0, 4096),
std::make_pair((unsigned)V4_SS2_storewi1, 4352)};
-static std::map<unsigned, unsigned>
- subinstOpcodeMap(std::begin(opcodeData), std::end(opcodeData));
-
bool HexagonMCInstrInfo::isDuplexPairMatch(unsigned Ga, unsigned Gb) {
switch (Ga) {
case HexagonII::HSIG_None:
@@ -587,6 +584,9 @@ bool HexagonMCInstrInfo::isOrderedDuplexPair(MCInstrInfo const &MCII,
unsigned MIaG = HexagonMCInstrInfo::getDuplexCandidateGroup(MIa),
MIbG = HexagonMCInstrInfo::getDuplexCandidateGroup(MIb);
+ static std::map<unsigned, unsigned> subinstOpcodeMap(std::begin(opcodeData),
+ std::end(opcodeData));
+
// If a duplex contains 2 insns in the same group, the insns must be
// ordered such that the numerically smaller opcode is in slot 1.
if ((MIaG != HexagonII::HSIG_None) && (MIaG == MIbG) && bisReversable) {
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
index eaa3550d07f6..67dcb8fea739 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
@@ -107,15 +107,20 @@ void HexagonMCELFStreamer::HexagonMCEmitCommonSymbol(MCSymbol *Symbol,
((AccessSize == 0) || (Size == 0) || (Size > GPSize))
? ".bss"
: sbss[(Log2_64(AccessSize))];
-
- MCSection *CrntSection = getCurrentSection().first;
- MCSection *Section = getAssembler().getContext().getELFSection(
+ MCSection &Section = *getAssembler().getContext().getELFSection(
SectionName, ELF::SHT_NOBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
- SwitchSection(Section);
- AssignFragment(Symbol, getCurrentFragment());
+ MCSectionSubPair P = getCurrentSection();
+ SwitchSection(&Section);
+
+ EmitValueToAlignment(ByteAlignment, 0, 1, 0);
+ EmitLabel(Symbol);
+ EmitZeros(Size);
+
+ // Update the maximum alignment of the section if necessary.
+ if (ByteAlignment > Section.getAlignment())
+ Section.setAlignment(ByteAlignment);
- MCELFStreamer::EmitCommonSymbol(Symbol, Size, ByteAlignment);
- SwitchSection(CrntSection);
+ SwitchSection(P.first, P.second);
} else {
if (ELFSymbol->declareCommon(Size, ByteAlignment))
report_fatal_error("Symbol: " + Symbol->getName() +
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.cpp
index fc6262657514..e93906a0a396 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.cpp
@@ -10,6 +10,7 @@
#include "HexagonMCExpr.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/raw_ostream.h"
@@ -17,33 +18,61 @@ using namespace llvm;
#define DEBUG_TYPE "hexagon-mcexpr"
-HexagonNoExtendOperand *HexagonNoExtendOperand::Create(MCExpr const *Expr,
- MCContext &Ctx) {
- return new (Ctx) HexagonNoExtendOperand(Expr);
+HexagonMCExpr *HexagonMCExpr::create(MCExpr const *Expr, MCContext &Ctx) {
+ return new (Ctx) HexagonMCExpr(Expr);
}
-bool HexagonNoExtendOperand::evaluateAsRelocatableImpl(
- MCValue &Res, MCAsmLayout const *Layout, MCFixup const *Fixup) const {
+bool HexagonMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
+ MCAsmLayout const *Layout,
+ MCFixup const *Fixup) const {
return Expr->evaluateAsRelocatable(Res, Layout, Fixup);
}
-void HexagonNoExtendOperand::visitUsedExpr(MCStreamer &Streamer) const {}
+void HexagonMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*Expr);
+}
-MCFragment *llvm::HexagonNoExtendOperand::findAssociatedFragment() const {
+MCFragment *llvm::HexagonMCExpr::findAssociatedFragment() const {
return Expr->findAssociatedFragment();
}
-void HexagonNoExtendOperand::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {}
+void HexagonMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {}
+
+MCExpr const *HexagonMCExpr::getExpr() const { return Expr; }
+
+void HexagonMCExpr::setMustExtend(bool Val) {
+ assert((!Val || !MustNotExtend) && "Extension contradiction");
+ MustExtend = Val;
+}
+
+bool HexagonMCExpr::mustExtend() const { return MustExtend; }
+void HexagonMCExpr::setMustNotExtend(bool Val) {
+ assert((!Val || !MustExtend) && "Extension contradiction");
+ MustNotExtend = Val;
+}
+bool HexagonMCExpr::mustNotExtend() const { return MustNotExtend; }
-MCExpr const *HexagonNoExtendOperand::getExpr() const { return Expr; }
+bool HexagonMCExpr::s23_2_reloc() const { return S23_2_reloc; }
+void HexagonMCExpr::setS23_2_reloc(bool Val) {
+ S23_2_reloc = Val;
+}
-bool HexagonNoExtendOperand::classof(MCExpr const *E) {
+bool HexagonMCExpr::classof(MCExpr const *E) {
return E->getKind() == MCExpr::Target;
}
-HexagonNoExtendOperand::HexagonNoExtendOperand(MCExpr const *Expr)
- : Expr(Expr) {}
+HexagonMCExpr::HexagonMCExpr(MCExpr const *Expr)
+ : Expr(Expr), MustNotExtend(false), MustExtend(false), S23_2_reloc(false),
+ SignMismatch(false) {}
-void HexagonNoExtendOperand::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
+void HexagonMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
Expr->print(OS, MAI);
}
+
+void HexagonMCExpr::setSignMismatch(bool Val) {
+ SignMismatch = Val;
+}
+
+bool HexagonMCExpr::signMismatch() const {
+ return SignMismatch;
+} \ No newline at end of file
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.h
index 60f180fb2bc4..bca40cfaf6f4 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCExpr.h
@@ -14,9 +14,9 @@
namespace llvm {
class MCInst;
-class HexagonNoExtendOperand : public MCTargetExpr {
+class HexagonMCExpr : public MCTargetExpr {
public:
- static HexagonNoExtendOperand *Create(MCExpr const *Expr, MCContext &Ctx);
+ static HexagonMCExpr *create(MCExpr const *Expr, MCContext &Ctx);
void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
const MCFixup *Fixup) const override;
@@ -25,10 +25,22 @@ public:
void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
static bool classof(MCExpr const *E);
MCExpr const *getExpr() const;
+ void setMustExtend(bool Val = true);
+ bool mustExtend() const;
+ void setMustNotExtend(bool Val = true);
+ bool mustNotExtend() const;
+ void setS23_2_reloc(bool Val = true);
+ bool s23_2_reloc() const;
+ void setSignMismatch(bool Val = true);
+ bool signMismatch() const;
private:
- HexagonNoExtendOperand(MCExpr const *Expr);
+ HexagonMCExpr(MCExpr const *Expr);
MCExpr const *Expr;
+ bool MustNotExtend;
+ bool MustExtend;
+ bool S23_2_reloc;
+ bool SignMismatch;
};
} // end namespace llvm
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
index e6842076db2a..941cbd6dc35d 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
@@ -99,7 +99,8 @@ void HexagonMCInstrInfo::clampExtended(MCInstrInfo const &MCII,
int64_t Value;
if (exOp.getExpr()->evaluateAsAbsolute(Value)) {
unsigned Shift = HexagonMCInstrInfo::getExtentAlignment(MCII, MCI);
- exOp.setExpr(MCConstantExpr::create((Value & 0x3f) << Shift, Context));
+ exOp.setExpr(HexagonMCExpr::create(
+ MCConstantExpr::create((Value & 0x3f) << Shift, Context), Context));
}
}
@@ -159,8 +160,8 @@ MCInst const *HexagonMCInstrInfo::extenderForIndex(MCInst const &MCB,
void HexagonMCInstrInfo::extendIfNeeded(MCContext &Context,
MCInstrInfo const &MCII, MCInst &MCB,
- MCInst const &MCI, bool MustExtend) {
- if (isConstExtended(MCII, MCI) || MustExtend)
+ MCInst const &MCI) {
+ if (isConstExtended(MCII, MCI))
addConstExtender(Context, MCII, MCB, MCI);
}
@@ -190,6 +191,61 @@ MCInstrDesc const &HexagonMCInstrInfo::getDesc(MCInstrInfo const &MCII,
return (MCII.get(MCI.getOpcode()));
}
+unsigned HexagonMCInstrInfo::getDuplexRegisterNumbering(unsigned Reg) {
+ using namespace Hexagon;
+ switch (Reg) {
+ default:
+ llvm_unreachable("unknown duplex register");
+ // Rs Rss
+ case R0:
+ case D0:
+ return 0;
+ case R1:
+ case D1:
+ return 1;
+ case R2:
+ case D2:
+ return 2;
+ case R3:
+ case D3:
+ return 3;
+ case R4:
+ case D8:
+ return 4;
+ case R5:
+ case D9:
+ return 5;
+ case R6:
+ case D10:
+ return 6;
+ case R7:
+ case D11:
+ return 7;
+ case R16:
+ return 8;
+ case R17:
+ return 9;
+ case R18:
+ return 10;
+ case R19:
+ return 11;
+ case R20:
+ return 12;
+ case R21:
+ return 13;
+ case R22:
+ return 14;
+ case R23:
+ return 15;
+ }
+}
+
+MCExpr const &HexagonMCInstrInfo::getExpr(MCExpr const &Expr) {
+ const auto &HExpr = cast<HexagonMCExpr>(Expr);
+ assert(HExpr.getExpr());
+ return *HExpr.getExpr();
+}
+
unsigned short HexagonMCInstrInfo::getExtendableOp(MCInstrInfo const &MCII,
MCInst const &MCI) {
const uint64_t F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
@@ -401,6 +457,12 @@ bool HexagonMCInstrInfo::isConstExtended(MCInstrInfo const &MCII,
MCInst const &MCI) {
if (HexagonMCInstrInfo::isExtended(MCII, MCI))
return true;
+ if (!HexagonMCInstrInfo::isExtendable(MCII, MCI))
+ return false;
+ MCOperand const &MO = HexagonMCInstrInfo::getExtendableOperand(MCII, MCI);
+ if (isa<HexagonMCExpr>(MO.getExpr()) &&
+ HexagonMCInstrInfo::mustExtend(*MO.getExpr()))
+ return true;
// Branch insns are handled as necessary by relaxation.
if ((HexagonMCInstrInfo::getType(MCII, MCI) == HexagonII::TypeJ) ||
(HexagonMCInstrInfo::getType(MCII, MCI) == HexagonII::TypeCOMPOUND &&
@@ -412,18 +474,11 @@ bool HexagonMCInstrInfo::isConstExtended(MCInstrInfo const &MCII,
else if ((HexagonMCInstrInfo::getType(MCII, MCI) == HexagonII::TypeCR) &&
(MCI.getOpcode() != Hexagon::C4_addipc))
return false;
- else if (!HexagonMCInstrInfo::isExtendable(MCII, MCI))
- return false;
- MCOperand const &MO = HexagonMCInstrInfo::getExtendableOperand(MCII, MCI);
-
- // We could be using an instruction with an extendable immediate and shoehorn
- // a global address into it. If it is a global address it will be constant
- // extended. We do this for COMBINE.
- // We currently only handle isGlobal() because it is the only kind of
- // object we are going to end up with here for now.
- // In the future we probably should add isSymbol(), etc.
assert(!MO.isImm());
+ if (isa<HexagonMCExpr>(MO.getExpr()) &&
+ HexagonMCInstrInfo::mustNotExtend(*MO.getExpr()))
+ return false;
int64_t Value;
if (!MO.getExpr()->evaluateAsAbsolute(Value))
return true;
@@ -543,6 +598,66 @@ bool HexagonMCInstrInfo::isMemStoreReorderEnabled(MCInst const &MCI) {
return (Flags & memStoreReorderEnabledMask) != 0;
}
+bool HexagonMCInstrInfo::isSubInstruction(MCInst const &MCI) {
+ switch (MCI.getOpcode()) {
+ default:
+ return false;
+ case Hexagon::V4_SA1_addi:
+ case Hexagon::V4_SA1_addrx:
+ case Hexagon::V4_SA1_addsp:
+ case Hexagon::V4_SA1_and1:
+ case Hexagon::V4_SA1_clrf:
+ case Hexagon::V4_SA1_clrfnew:
+ case Hexagon::V4_SA1_clrt:
+ case Hexagon::V4_SA1_clrtnew:
+ case Hexagon::V4_SA1_cmpeqi:
+ case Hexagon::V4_SA1_combine0i:
+ case Hexagon::V4_SA1_combine1i:
+ case Hexagon::V4_SA1_combine2i:
+ case Hexagon::V4_SA1_combine3i:
+ case Hexagon::V4_SA1_combinerz:
+ case Hexagon::V4_SA1_combinezr:
+ case Hexagon::V4_SA1_dec:
+ case Hexagon::V4_SA1_inc:
+ case Hexagon::V4_SA1_seti:
+ case Hexagon::V4_SA1_setin1:
+ case Hexagon::V4_SA1_sxtb:
+ case Hexagon::V4_SA1_sxth:
+ case Hexagon::V4_SA1_tfr:
+ case Hexagon::V4_SA1_zxtb:
+ case Hexagon::V4_SA1_zxth:
+ case Hexagon::V4_SL1_loadri_io:
+ case Hexagon::V4_SL1_loadrub_io:
+ case Hexagon::V4_SL2_deallocframe:
+ case Hexagon::V4_SL2_jumpr31:
+ case Hexagon::V4_SL2_jumpr31_f:
+ case Hexagon::V4_SL2_jumpr31_fnew:
+ case Hexagon::V4_SL2_jumpr31_t:
+ case Hexagon::V4_SL2_jumpr31_tnew:
+ case Hexagon::V4_SL2_loadrb_io:
+ case Hexagon::V4_SL2_loadrd_sp:
+ case Hexagon::V4_SL2_loadrh_io:
+ case Hexagon::V4_SL2_loadri_sp:
+ case Hexagon::V4_SL2_loadruh_io:
+ case Hexagon::V4_SL2_return:
+ case Hexagon::V4_SL2_return_f:
+ case Hexagon::V4_SL2_return_fnew:
+ case Hexagon::V4_SL2_return_t:
+ case Hexagon::V4_SL2_return_tnew:
+ case Hexagon::V4_SS1_storeb_io:
+ case Hexagon::V4_SS1_storew_io:
+ case Hexagon::V4_SS2_allocframe:
+ case Hexagon::V4_SS2_storebi0:
+ case Hexagon::V4_SS2_storebi1:
+ case Hexagon::V4_SS2_stored_sp:
+ case Hexagon::V4_SS2_storeh_io:
+ case Hexagon::V4_SS2_storew_sp:
+ case Hexagon::V4_SS2_storewi0:
+ case Hexagon::V4_SS2_storewi1:
+ return true;
+ }
+}
+
bool HexagonMCInstrInfo::isSoloAX(MCInstrInfo const &MCII, MCInst const &MCI) {
const uint64_t F = HexagonMCInstrInfo::getDesc(MCII, MCI).TSFlags;
return ((F >> HexagonII::SoloAXPos) & HexagonII::SoloAXMask);
@@ -575,6 +690,25 @@ int64_t HexagonMCInstrInfo::minConstant(MCInst const &MCI, size_t Index) {
return Value;
}
+void HexagonMCInstrInfo::setMustExtend(MCExpr const &Expr, bool Val) {
+ HexagonMCExpr &HExpr = const_cast<HexagonMCExpr &>(cast<HexagonMCExpr>(Expr));
+ HExpr.setMustExtend(Val);
+}
+
+bool HexagonMCInstrInfo::mustExtend(MCExpr const &Expr) {
+ HexagonMCExpr const &HExpr = cast<HexagonMCExpr>(Expr);
+ return HExpr.mustExtend();
+}
+void HexagonMCInstrInfo::setMustNotExtend(MCExpr const &Expr, bool Val) {
+ HexagonMCExpr &HExpr =
+ const_cast<HexagonMCExpr &>(cast<HexagonMCExpr>(Expr));
+ HExpr.setMustNotExtend(Val);
+}
+bool HexagonMCInstrInfo::mustNotExtend(MCExpr const &Expr) {
+ HexagonMCExpr const &HExpr = cast<HexagonMCExpr>(Expr);
+ return HExpr.mustNotExtend();
+}
+
void HexagonMCInstrInfo::padEndloop(MCContext &Context, MCInst &MCB) {
MCInst Nop;
Nop.setOpcode(Hexagon::A2_nop);
@@ -639,10 +773,32 @@ void HexagonMCInstrInfo::setMemStoreReorderEnabled(MCInst &MCI) {
Operand.setImm(Operand.getImm() | memStoreReorderEnabledMask);
assert(isMemStoreReorderEnabled(MCI));
}
+void HexagonMCInstrInfo::setS23_2_reloc(MCExpr const &Expr, bool Val) {
+ HexagonMCExpr &HExpr =
+ const_cast<HexagonMCExpr &>(*llvm::cast<HexagonMCExpr>(&Expr));
+ HExpr.setS23_2_reloc(Val);
+}
+bool HexagonMCInstrInfo::s23_2_reloc(MCExpr const &Expr) {
+ HexagonMCExpr const &HExpr = *llvm::cast<HexagonMCExpr>(&Expr);
+ return HExpr.s23_2_reloc();
+}
void HexagonMCInstrInfo::setOuterLoop(MCInst &MCI) {
assert(isBundle(MCI));
MCOperand &Operand = MCI.getOperand(0);
Operand.setImm(Operand.getImm() | outerLoopMask);
}
+
+unsigned HexagonMCInstrInfo::SubregisterBit(unsigned Consumer,
+ unsigned Producer,
+ unsigned Producer2) {
+ // If we're a single vector consumer of a double producer, set subreg bit
+ // based on if we're accessing the lower or upper register component
+ if (Producer >= Hexagon::W0 && Producer <= Hexagon::W15)
+ if (Consumer >= Hexagon::V0 && Consumer <= Hexagon::V31)
+ return (Consumer - Hexagon::V0) & 0x1;
+ if (Consumer == Producer2)
+ return 0x1;
+ return 0;
+}
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
index 0237b2884a3b..58a8f68b9847 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
@@ -75,7 +75,7 @@ MCInst createBundle();
// Return the extender for instruction at Index or nullptr if none
MCInst const *extenderForIndex(MCInst const &MCB, size_t Index);
void extendIfNeeded(MCContext &Context, MCInstrInfo const &MCII, MCInst &MCB,
- MCInst const &MCI, bool MustExtend);
+ MCInst const &MCI);
// Create a duplex instruction given the two subinsts
MCInst *deriveDuplex(MCContext &Context, unsigned iClass, MCInst const &inst0,
@@ -107,6 +107,9 @@ unsigned getDuplexCandidateGroup(MCInst const &MI);
// Return a list of all possible instruction duplex combinations
SmallVector<DuplexCandidate, 8> getDuplexPossibilties(MCInstrInfo const &MCII,
MCInst const &MCB);
+unsigned getDuplexRegisterNumbering(unsigned Reg);
+
+MCExpr const &getExpr(MCExpr const &Expr);
// Return the index of the extendable operand
unsigned short getExtendableOp(MCInstrInfo const &MCII, MCInst const &MCI);
@@ -260,7 +263,10 @@ bool isSoloAX(MCInstrInfo const &MCII, MCInst const &MCI);
/// Return whether the insn can be packaged only with an A-type insn in slot #1.
bool isSoloAin1(MCInstrInfo const &MCII, MCInst const &MCI);
+bool isSubInstruction(MCInst const &MCI);
bool isVector(MCInstrInfo const &MCII, MCInst const &MCI);
+bool mustExtend(MCExpr const &Expr);
+bool mustNotExtend(MCExpr const &Expr);
// Pad the bundle with nops to satisfy endloop requirements
void padEndloop(MCContext &Context, MCInst &MCI);
@@ -270,16 +276,22 @@ bool prefersSlot3(MCInstrInfo const &MCII, MCInst const &MCI);
// Replace the instructions inside MCB, represented by Candidate
void replaceDuplex(MCContext &Context, MCInst &MCB, DuplexCandidate Candidate);
+bool s23_2_reloc(MCExpr const &Expr);
// Marks a bundle as endloop0
void setInnerLoop(MCInst &MCI);
void setMemReorderDisabled(MCInst &MCI);
void setMemStoreReorderEnabled(MCInst &MCI);
+void setMustExtend(MCExpr const &Expr, bool Val = true);
+void setMustNotExtend(MCExpr const &Expr, bool Val = true);
+void setS23_2_reloc(MCExpr const &Expr, bool Val = true);
// Marks a bundle as endloop1
void setOuterLoop(MCInst &MCI);
// Would duplexing this instruction create a requirement to extend
bool subInstWouldBeExtended(MCInst const &potentialDuplex);
+unsigned SubregisterBit(unsigned Consumer, unsigned Producer,
+ unsigned Producer2);
// Attempt to find and replace compound pairs
void tryCompound(MCInstrInfo const &MCII, MCContext &Context, MCInst &MCI);
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
index 8e70280c1a0d..7f8e7a4edb0c 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
@@ -180,7 +180,6 @@ llvm::HexagonMCShuffle(MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
if (MCS.size() == 1) { // case of one duplex
// copy the created duplex in the shuffler to the bundle
MCS.copyTo(MCB);
- doneShuffling = true;
return HexagonShuffler::SHUFFLE_SUCCESS;
}
// try shuffle with this duplex
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index 9a292577a8f3..35a1a23a8892 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -16,7 +16,6 @@
#include "HexagonMCAsmInfo.h"
#include "HexagonMCELFStreamer.h"
#include "MCTargetDesc/HexagonInstPrinter.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -48,10 +47,46 @@ cl::opt<bool> llvm::HexagonDisableDuplex
("mno-pairing",
cl::desc("Disable looking for duplex instructions for Hexagon"));
+static cl::opt<bool> HexagonV4ArchVariant("mv4", cl::Hidden, cl::init(false),
+ cl::desc("Build for Hexagon V4"));
+
+static cl::opt<bool> HexagonV5ArchVariant("mv5", cl::Hidden, cl::init(false),
+ cl::desc("Build for Hexagon V5"));
+
+static cl::opt<bool> HexagonV55ArchVariant("mv55", cl::Hidden, cl::init(false),
+ cl::desc("Build for Hexagon V55"));
+
+static cl::opt<bool> HexagonV60ArchVariant("mv60", cl::Hidden, cl::init(false),
+ cl::desc("Build for Hexagon V60"));
+
+
+static StringRef DefaultArch = "hexagonv60";
+
+static StringRef HexagonGetArchVariant() {
+ if (HexagonV4ArchVariant)
+ return "hexagonv4";
+ if (HexagonV5ArchVariant)
+ return "hexagonv5";
+ if (HexagonV55ArchVariant)
+ return "hexagonv55";
+ if (HexagonV60ArchVariant)
+ return "hexagonv60";
+ return "";
+}
+
StringRef HEXAGON_MC::selectHexagonCPU(const Triple &TT, StringRef CPU) {
- if (CPU.empty())
- CPU = "hexagonv60";
- return CPU;
+ StringRef ArchV = HexagonGetArchVariant();
+ if (!ArchV.empty() && !CPU.empty()) {
+ if (ArchV != CPU)
+ report_fatal_error("conflicting architectures specified.");
+ return CPU;
+ }
+ if (ArchV.empty()) {
+ if (CPU.empty())
+ CPU = DefaultArch;
+ return CPU;
+ }
+ return ArchV;
}
MCInstrInfo *llvm::createHexagonMCInstrInfo() {
@@ -62,7 +97,7 @@ MCInstrInfo *llvm::createHexagonMCInstrInfo() {
static MCRegisterInfo *createHexagonMCRegisterInfo(const Triple &TT) {
MCRegisterInfo *X = new MCRegisterInfo();
- InitHexagonMCRegisterInfo(X, Hexagon::R0);
+ InitHexagonMCRegisterInfo(X, Hexagon::R31);
return X;
}
@@ -121,10 +156,14 @@ public:
HexagonTargetELFStreamer(MCStreamer &S, MCSubtargetInfo const &STI)
: HexagonTargetStreamer(S) {
auto Bits = STI.getFeatureBits();
- unsigned Flags;
- if (Bits.to_ullong() & llvm::Hexagon::ArchV5)
+ unsigned Flags = 0;
+ if (Bits[Hexagon::ArchV60])
+ Flags = ELF::EF_HEXAGON_MACH_V60;
+ else if (Bits[Hexagon::ArchV55])
+ Flags = ELF::EF_HEXAGON_MACH_V55;
+ else if (Bits[Hexagon::ArchV5])
Flags = ELF::EF_HEXAGON_MACH_V5;
- else
+ else if (Bits[Hexagon::ArchV4])
Flags = ELF::EF_HEXAGON_MACH_V4;
getStreamer().getAssembler().setELFHeaderEFlags(Flags);
}
@@ -159,17 +198,6 @@ static MCAsmInfo *createHexagonMCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createHexagonMCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- if (RM == Reloc::Default)
- RM = Reloc::Static;
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
-}
-
static MCInstPrinter *createHexagonMCInstPrinter(const Triple &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -204,10 +232,6 @@ extern "C" void LLVMInitializeHexagonTargetMC() {
// Register the MC asm info.
RegisterMCAsmInfoFn X(TheHexagonTarget, createHexagonMCAsmInfo);
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheHexagonTarget,
- createHexagonMCCodeGenInfo);
-
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(TheHexagonTarget,
createHexagonMCInstrInfo);
diff --git a/lib/Target/Hexagon/MCTargetDesc/Makefile b/lib/Target/Hexagon/MCTargetDesc/Makefile
deleted file mode 100644
index 885be2ddbd88..000000000000
--- a/lib/Target/Hexagon/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/Hexagon/TargetDesc/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMHexagonDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Hexagon/Makefile b/lib/Target/Hexagon/Makefile
deleted file mode 100644
index c53b8e56aafc..000000000000
--- a/lib/Target/Hexagon/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-##===- lib/Target/Hexagon/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../..
-LIBRARYNAME = LLVMHexagonCodeGen
-TARGET = Hexagon
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = HexagonGenRegisterInfo.inc \
- HexagonGenInstrInfo.inc \
- HexagonGenAsmMatcher.inc \
- HexagonGenAsmWriter.inc \
- HexagonGenDAGISel.inc HexagonGenSubtargetInfo.inc \
- HexagonGenCallingConv.inc \
- HexagonGenDFAPacketizer.inc \
- HexagonGenMCCodeEmitter.inc \
- HexagonGenDisassemblerTables.inc
-
-DIRS = TargetInfo MCTargetDesc Disassembler AsmParser
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Hexagon/RDFCopy.cpp b/lib/Target/Hexagon/RDFCopy.cpp
index c547c7195075..61a83dada218 100644
--- a/lib/Target/Hexagon/RDFCopy.cpp
+++ b/lib/Target/Hexagon/RDFCopy.cpp
@@ -7,37 +7,85 @@
//
//===----------------------------------------------------------------------===//
//
-// Simplistic RDF-based copy propagation.
+// RDF-based copy propagation.
#include "RDFCopy.h"
#include "RDFGraph.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
-
-#include <atomic>
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+using namespace llvm;
+using namespace rdf;
#ifndef NDEBUG
static cl::opt<unsigned> CpLimit("rdf-cp-limit", cl::init(0), cl::Hidden);
static unsigned CpCount = 0;
#endif
-using namespace llvm;
-using namespace rdf;
+bool CopyPropagation::interpretAsCopy(const MachineInstr *MI, EqualityMap &EM) {
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
+ case TargetOpcode::COPY: {
+ const MachineOperand &Dst = MI->getOperand(0);
+ const MachineOperand &Src = MI->getOperand(1);
+ RegisterRef DstR = { Dst.getReg(), Dst.getSubReg() };
+ RegisterRef SrcR = { Src.getReg(), Src.getSubReg() };
+ if (TargetRegisterInfo::isVirtualRegister(DstR.Reg)) {
+ if (!TargetRegisterInfo::isVirtualRegister(SrcR.Reg))
+ return false;
+ MachineRegisterInfo &MRI = DFG.getMF().getRegInfo();
+ if (MRI.getRegClass(DstR.Reg) != MRI.getRegClass(SrcR.Reg))
+ return false;
+ } else if (TargetRegisterInfo::isPhysicalRegister(DstR.Reg)) {
+ if (!TargetRegisterInfo::isPhysicalRegister(SrcR.Reg))
+ return false;
+ const TargetRegisterInfo &TRI = DFG.getTRI();
+ if (TRI.getMinimalPhysRegClass(DstR.Reg) !=
+ TRI.getMinimalPhysRegClass(SrcR.Reg))
+ return false;
+ } else {
+ // Copy between some unknown objects.
+ return false;
+ }
+ EM.insert(std::make_pair(DstR, SrcR));
+ return true;
+ }
+ case TargetOpcode::REG_SEQUENCE: {
+ const MachineOperand &Dst = MI->getOperand(0);
+ RegisterRef DefR = { Dst.getReg(), Dst.getSubReg() };
+ SmallVector<TargetInstrInfo::RegSubRegPairAndIdx,2> Inputs;
+ const TargetInstrInfo &TII = DFG.getTII();
+ if (!TII.getRegSequenceInputs(*MI, 0, Inputs))
+ return false;
+ for (auto I : Inputs) {
+ unsigned S = DFG.getTRI().composeSubRegIndices(DefR.Sub, I.SubIdx);
+ RegisterRef DR = { DefR.Reg, S };
+ RegisterRef SR = { I.Reg, I.SubReg };
+ EM.insert(std::make_pair(DR, SR));
+ }
+ return true;
+ }
+ }
+ return false;
+}
-void CopyPropagation::recordCopy(NodeAddr<StmtNode*> SA, MachineInstr *MI) {
- assert(MI->getOpcode() == TargetOpcode::COPY);
- const MachineOperand &Op0 = MI->getOperand(0), &Op1 = MI->getOperand(1);
- RegisterRef DstR = { Op0.getReg(), Op0.getSubReg() };
- RegisterRef SrcR = { Op1.getReg(), Op1.getSubReg() };
- auto FS = DefM.find(SrcR);
- if (FS == DefM.end() || FS->second.empty())
- return;
+
+void CopyPropagation::recordCopy(NodeAddr<StmtNode*> SA, EqualityMap &EM) {
+ CopyMap.insert(std::make_pair(SA.Id, EM));
Copies.push_back(SA.Id);
- RDefMap[SrcR][SA.Id] = FS->second.top()->Id;
- // Insert DstR into the map.
- RDefMap[DstR];
+
+ for (auto I : EM) {
+ auto FS = DefM.find(I.second);
+ if (FS == DefM.end() || FS->second.empty())
+ continue; // Undefined source
+ RDefMap[I.second][SA.Id] = FS->second.top()->Id;
+ // Insert DstR into the map.
+ RDefMap[I.first];
+ }
}
@@ -74,9 +122,9 @@ bool CopyPropagation::scanBlock(MachineBasicBlock *B) {
for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) {
if (DFG.IsCode<NodeAttrs::Stmt>(IA)) {
NodeAddr<StmtNode*> SA = IA;
- MachineInstr *MI = SA.Addr->getCode();
- if (MI->isCopy())
- recordCopy(SA, MI);
+ EqualityMap EM;
+ if (interpretAsCopy(SA.Addr->getCode(), EM))
+ recordCopy(SA, EM);
}
updateMap(IA);
@@ -97,8 +145,14 @@ bool CopyPropagation::run() {
if (trace()) {
dbgs() << "Copies:\n";
- for (auto I : Copies)
- dbgs() << *DFG.addr<StmtNode*>(I).Addr->getCode();
+ for (auto I : Copies) {
+ dbgs() << "Instr: " << *DFG.addr<StmtNode*>(I).Addr->getCode();
+ dbgs() << " eq: {";
+ for (auto J : CopyMap[I])
+ dbgs() << ' ' << Print<RegisterRef>(J.first, DFG) << '='
+ << Print<RegisterRef>(J.second, DFG);
+ dbgs() << " }\n";
+ }
dbgs() << "\nRDef map:\n";
for (auto R : RDefMap) {
dbgs() << Print<RegisterRef>(R.first, DFG) << " -> {";
@@ -110,70 +164,87 @@ bool CopyPropagation::run() {
}
bool Changed = false;
- NodeSet Deleted;
#ifndef NDEBUG
bool HasLimit = CpLimit.getNumOccurrences() > 0;
#endif
- for (auto I : Copies) {
+ for (auto C : Copies) {
#ifndef NDEBUG
if (HasLimit && CpCount >= CpLimit)
break;
#endif
- if (Deleted.count(I))
- continue;
- auto SA = DFG.addr<InstrNode*>(I);
- NodeList Ds = SA.Addr->members_if(DFG.IsDef, DFG);
- if (Ds.size() != 1)
- continue;
- NodeAddr<DefNode*> DA = Ds[0];
- RegisterRef DR0 = DA.Addr->getRegRef();
- NodeList Us = SA.Addr->members_if(DFG.IsUse, DFG);
- if (Us.size() != 1)
+ auto SA = DFG.addr<InstrNode*>(C);
+ auto FS = CopyMap.find(SA.Id);
+ if (FS == CopyMap.end())
continue;
- NodeAddr<UseNode*> UA0 = Us[0];
- RegisterRef UR0 = UA0.Addr->getRegRef();
- NodeId RD0 = UA0.Addr->getReachingDef();
-
- for (NodeId N = DA.Addr->getReachedUse(), NextN; N; N = NextN) {
- auto UA = DFG.addr<UseNode*>(N);
- NextN = UA.Addr->getSibling();
- uint16_t F = UA.Addr->getFlags();
- if ((F & NodeAttrs::PhiRef) || (F & NodeAttrs::Fixed))
- continue;
- if (UA.Addr->getRegRef() != DR0)
- continue;
- NodeAddr<InstrNode*> IA = UA.Addr->getOwner(DFG);
- assert(DFG.IsCode<NodeAttrs::Stmt>(IA));
- MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode();
- if (RDefMap[UR0][IA.Id] != RD0)
+
+ EqualityMap &EM = FS->second;
+ for (NodeAddr<DefNode*> DA : SA.Addr->members_if(DFG.IsDef, DFG)) {
+ RegisterRef DR = DA.Addr->getRegRef();
+ auto FR = EM.find(DR);
+ if (FR == EM.end())
continue;
- MachineOperand &Op = UA.Addr->getOp();
- if (Op.isTied())
+ RegisterRef SR = FR->second;
+ if (DR == SR)
continue;
- if (trace()) {
- dbgs() << "can replace " << Print<RegisterRef>(DR0, DFG)
- << " with " << Print<RegisterRef>(UR0, DFG) << " in "
- << *NodeAddr<StmtNode*>(IA).Addr->getCode();
- }
-
- Op.setReg(UR0.Reg);
- Op.setSubReg(UR0.Sub);
- Changed = true;
-#ifndef NDEBUG
- if (HasLimit && CpCount >= CpLimit)
- break;
- CpCount++;
-#endif
- if (MI->isCopy()) {
- MachineOperand &Op0 = MI->getOperand(0), &Op1 = MI->getOperand(1);
- if (Op0.getReg() == Op1.getReg() && Op0.getSubReg() == Op1.getSubReg())
- MI->eraseFromParent();
- Deleted.insert(IA.Id);
- }
- }
- }
+ auto &RDefSR = RDefMap[SR];
+ NodeId RDefSR_SA = RDefSR[SA.Id];
+
+ for (NodeId N = DA.Addr->getReachedUse(), NextN; N; N = NextN) {
+ auto UA = DFG.addr<UseNode*>(N);
+ NextN = UA.Addr->getSibling();
+ uint16_t F = UA.Addr->getFlags();
+ if ((F & NodeAttrs::PhiRef) || (F & NodeAttrs::Fixed))
+ continue;
+ if (UA.Addr->getRegRef() != DR)
+ continue;
+
+ NodeAddr<InstrNode*> IA = UA.Addr->getOwner(DFG);
+ assert(DFG.IsCode<NodeAttrs::Stmt>(IA));
+ if (RDefSR[IA.Id] != RDefSR_SA)
+ continue;
+
+ MachineOperand &Op = UA.Addr->getOp();
+ if (Op.isTied())
+ continue;
+ if (trace()) {
+ dbgs() << "Can replace " << Print<RegisterRef>(DR, DFG)
+ << " with " << Print<RegisterRef>(SR, DFG) << " in "
+ << *NodeAddr<StmtNode*>(IA).Addr->getCode();
+ }
+
+ Op.setReg(SR.Reg);
+ Op.setSubReg(SR.Sub);
+ DFG.unlinkUse(UA, false);
+ if (RDefSR_SA != 0) {
+ UA.Addr->linkToDef(UA.Id, DFG.addr<DefNode*>(RDefSR_SA));
+ } else {
+ UA.Addr->setReachingDef(0);
+ UA.Addr->setSibling(0);
+ }
+
+ Changed = true;
+ #ifndef NDEBUG
+ if (HasLimit && CpCount >= CpLimit)
+ break;
+ CpCount++;
+ #endif
+
+ auto FC = CopyMap.find(IA.Id);
+ if (FC != CopyMap.end()) {
+ // Update the EM map in the copy's entry.
+ auto &M = FC->second;
+ for (auto &J : M) {
+ if (J.second != DR)
+ continue;
+ J.second = SR;
+ break;
+ }
+ }
+ } // for (N in reached-uses)
+ } // for (DA in defs)
+ } // for (C in Copies)
return Changed;
}
diff --git a/lib/Target/Hexagon/RDFCopy.h b/lib/Target/Hexagon/RDFCopy.h
index 02531b94c9b0..e8a576cf57a3 100644
--- a/lib/Target/Hexagon/RDFCopy.h
+++ b/lib/Target/Hexagon/RDFCopy.h
@@ -18,17 +18,20 @@ namespace llvm {
class MachineBasicBlock;
class MachineDominatorTree;
class MachineInstr;
-}
namespace rdf {
struct CopyPropagation {
CopyPropagation(DataFlowGraph &dfg) : MDT(dfg.getDT()), DFG(dfg),
Trace(false) {}
+ virtual ~CopyPropagation() {}
bool run();
void trace(bool On) { Trace = On; }
bool trace() const { return Trace; }
+ typedef std::map<RegisterRef, RegisterRef> EqualityMap;
+ virtual bool interpretAsCopy(const MachineInstr *MI, EqualityMap &EM);
+
private:
const MachineDominatorTree &MDT;
DataFlowGraph &DFG;
@@ -37,12 +40,15 @@ namespace rdf {
// map: register -> (map: stmt -> reaching def)
std::map<RegisterRef,std::map<NodeId,NodeId>> RDefMap;
+ // map: statement -> (map: dst reg -> src reg)
+ std::map<NodeId, EqualityMap> CopyMap;
std::vector<NodeId> Copies;
- void recordCopy(NodeAddr<StmtNode*> SA, MachineInstr *MI);
+ void recordCopy(NodeAddr<StmtNode*> SA, EqualityMap &EM);
void updateMap(NodeAddr<InstrNode*> IA);
bool scanBlock(MachineBasicBlock *B);
};
-}
+} // namespace rdf
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/RDFDeadCode.cpp b/lib/Target/Hexagon/RDFDeadCode.cpp
index 95668577bd50..63177d51cada 100644
--- a/lib/Target/Hexagon/RDFDeadCode.cpp
+++ b/lib/Target/Hexagon/RDFDeadCode.cpp
@@ -18,9 +18,38 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include <queue>
+
using namespace llvm;
using namespace rdf;
+// This drastically improves execution time in "collect" over using
+// SetVector as a work queue, and popping the first element from it.
+template<typename T> struct DeadCodeElimination::SetQueue {
+ SetQueue() : Set(), Queue() {}
+
+ bool empty() const {
+ return Queue.empty();
+ }
+ T pop_front() {
+ T V = Queue.front();
+ Queue.pop();
+ Set.erase(V);
+ return V;
+ }
+ void push_back(T V) {
+ if (Set.count(V))
+ return;
+ Queue.push(V);
+ Set.insert(V);
+ }
+
+private:
+ DenseSet<T> Set;
+ std::queue<T> Queue;
+};
+
+
// Check if the given instruction has observable side-effects, i.e. if
// it should be considered "live". It is safe for this function to be
// overly conservative (i.e. return "true" for all instructions), but it
@@ -40,33 +69,33 @@ bool DeadCodeElimination::isLiveInstr(const MachineInstr *MI) const {
}
void DeadCodeElimination::scanInstr(NodeAddr<InstrNode*> IA,
- SetVector<NodeId> &WorkQ) {
+ SetQueue<NodeId> &WorkQ) {
if (!DFG.IsCode<NodeAttrs::Stmt>(IA))
return;
if (!isLiveInstr(NodeAddr<StmtNode*>(IA).Addr->getCode()))
return;
for (NodeAddr<RefNode*> RA : IA.Addr->members(DFG)) {
if (!LiveNodes.count(RA.Id))
- WorkQ.insert(RA.Id);
+ WorkQ.push_back(RA.Id);
}
}
void DeadCodeElimination::processDef(NodeAddr<DefNode*> DA,
- SetVector<NodeId> &WorkQ) {
+ SetQueue<NodeId> &WorkQ) {
NodeAddr<InstrNode*> IA = DA.Addr->getOwner(DFG);
for (NodeAddr<UseNode*> UA : IA.Addr->members_if(DFG.IsUse, DFG)) {
if (!LiveNodes.count(UA.Id))
- WorkQ.insert(UA.Id);
+ WorkQ.push_back(UA.Id);
}
for (NodeAddr<DefNode*> TA : DFG.getRelatedRefs(IA, DA))
LiveNodes.insert(TA.Id);
}
void DeadCodeElimination::processUse(NodeAddr<UseNode*> UA,
- SetVector<NodeId> &WorkQ) {
+ SetQueue<NodeId> &WorkQ) {
for (NodeAddr<DefNode*> DA : LV.getAllReachingDefs(UA)) {
if (!LiveNodes.count(DA.Id))
- WorkQ.insert(DA.Id);
+ WorkQ.push_back(DA.Id);
}
}
@@ -84,14 +113,13 @@ bool DeadCodeElimination::collect() {
// instruction are considered live. For each live use, all its reaching
// defs are considered live.
LiveNodes.clear();
- SetVector<NodeId> WorkQ;
+ SetQueue<NodeId> WorkQ;
for (NodeAddr<BlockNode*> BA : DFG.getFunc().Addr->members(DFG))
for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG))
scanInstr(IA, WorkQ);
while (!WorkQ.empty()) {
- NodeId N = *WorkQ.begin();
- WorkQ.remove(N);
+ NodeId N = WorkQ.pop_front();
LiveNodes.insert(N);
auto RA = DFG.addr<RefNode*>(N);
if (DFG.IsDef(RA))
@@ -183,9 +211,9 @@ bool DeadCodeElimination::erase(const SetVector<NodeId> &Nodes) {
if (trace())
dbgs() << " " << PrintNode<RefNode*>(RA, DFG) << '\n';
if (DFG.IsUse(RA))
- DFG.unlinkUse(RA);
+ DFG.unlinkUse(RA, true);
else if (DFG.IsDef(RA))
- DFG.unlinkDef(RA);
+ DFG.unlinkDef(RA, true);
}
// Now, remove all dead instruction nodes.
diff --git a/lib/Target/Hexagon/RDFDeadCode.h b/lib/Target/Hexagon/RDFDeadCode.h
index f4373fb5007d..8977e730b855 100644
--- a/lib/Target/Hexagon/RDFDeadCode.h
+++ b/lib/Target/Hexagon/RDFDeadCode.h
@@ -30,7 +30,6 @@
namespace llvm {
class MachineRegisterInfo;
-}
namespace rdf {
struct DeadCodeElimination {
@@ -55,11 +54,14 @@ namespace rdf {
MachineRegisterInfo &MRI;
Liveness LV;
+ template<typename T> struct SetQueue;
+
bool isLiveInstr(const MachineInstr *MI) const;
- void scanInstr(NodeAddr<InstrNode*> IA, SetVector<NodeId> &WorkQ);
- void processDef(NodeAddr<DefNode*> DA, SetVector<NodeId> &WorkQ);
- void processUse(NodeAddr<UseNode*> UA, SetVector<NodeId> &WorkQ);
+ void scanInstr(NodeAddr<InstrNode*> IA, SetQueue<NodeId> &WorkQ);
+ void processDef(NodeAddr<DefNode*> DA, SetQueue<NodeId> &WorkQ);
+ void processUse(NodeAddr<UseNode*> UA, SetQueue<NodeId> &WorkQ);
};
-}
+} // namespace rdf
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/RDFGraph.cpp b/lib/Target/Hexagon/RDFGraph.cpp
index 9b47422153bb..273d6b7cb0c8 100644
--- a/lib/Target/Hexagon/RDFGraph.cpp
+++ b/lib/Target/Hexagon/RDFGraph.cpp
@@ -25,6 +25,7 @@ using namespace rdf;
// Printing functions. Have them here first, so that the rest of the code
// can use them.
+namespace llvm {
namespace rdf {
template<>
@@ -298,6 +299,7 @@ raw_ostream &operator<< (raw_ostream &OS,
}
} // namespace rdf
+} // namespace llvm
// Node allocation functions.
//
@@ -315,7 +317,7 @@ void NodeAllocator::startNewBlock() {
// Check if the block index is still within the allowed range, i.e. less
// than 2^N, where N is the number of bits in NodeId for the block index.
// BitsPerIndex is the number of bits per node index.
- assert((Blocks.size() < (1U << (8*sizeof(NodeId)-BitsPerIndex))) &&
+ assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) &&
"Out of bits for block index");
ActiveEnd = P;
}
@@ -674,7 +676,7 @@ bool RegisterAliasInfo::alias(RegisterRef RA, RegisterRef RB) const {
// unchanged across this def.
bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum)
const {
- return TII.isPredicated(&In);
+ return TII.isPredicated(In);
}
// Check if the definition of RR produces an unspecified value.
@@ -686,11 +688,17 @@ bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum)
return false;
}
-// Check if the given instruction specifically requires
+// Check if the given instruction specifically requires
bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum)
const {
- if (In.isCall() || In.isReturn())
+ if (In.isCall() || In.isReturn() || In.isInlineAsm())
return true;
+ // Check for a tail call.
+ if (In.isBranch())
+ for (auto &O : In.operands())
+ if (O.isGlobal() || O.isSymbol())
+ return true;
+
const MCInstrDesc &D = In.getDesc();
if (!D.getImplicitDefs() && !D.getImplicitUses())
return false;
@@ -919,7 +927,7 @@ NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) {
}
// Build the data flow graph.
-void DataFlowGraph::build() {
+void DataFlowGraph::build(unsigned Options) {
reset();
Func = newFunc(&MF);
@@ -964,7 +972,8 @@ void DataFlowGraph::build() {
linkBlockRefs(DM, EA);
// Finally, remove all unused phi nodes.
- removeUnusedPhis();
+ if (!(Options & BuildOptions::KeepDeadPhis))
+ removeUnusedPhis();
}
// For each stack in the map DefM, push the delimiter for block B on it.
@@ -1167,6 +1176,17 @@ NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
auto SA = newStmt(BA, &In);
+ auto isCall = [] (const MachineInstr &In) -> bool {
+ if (In.isCall())
+ return true;
+ // Is tail call?
+ if (In.isBranch())
+ for (auto &Op : In.operands())
+ if (Op.isGlobal() || Op.isSymbol())
+ return true;
+ return false;
+ };
+
// Collect a set of registers that this instruction implicitly uses
// or defines. Implicit operands from an instruction will be ignored
// unless they are listed here.
@@ -1178,8 +1198,8 @@ void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
while (uint16_t R = *ImpU++)
ImpUses.insert({R, 0});
- bool IsCall = In.isCall(), IsReturn = In.isReturn();
- bool IsPredicated = TII.isPredicated(&In);
+ bool NeedsImplicit = isCall(In) || In.isInlineAsm() || In.isReturn();
+ bool IsPredicated = TII.isPredicated(In);
unsigned NumOps = In.getNumOperands();
// Avoid duplicate implicit defs. This will not detect cases of implicit
@@ -1212,7 +1232,7 @@ void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
if (!Op.isReg() || !Op.isDef() || !Op.isImplicit())
continue;
RegisterRef RR = { Op.getReg(), Op.getSubReg() };
- if (!IsCall && !ImpDefs.count(RR))
+ if (!NeedsImplicit && !ImpDefs.count(RR))
continue;
if (DoneDefs.count(RR))
continue;
@@ -1237,7 +1257,7 @@ void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
// instructions regardless of whether or not they appear in the instruction
// descriptor's list.
bool Implicit = Op.isImplicit();
- bool TakeImplicit = IsReturn || IsCall || IsPredicated;
+ bool TakeImplicit = NeedsImplicit || IsPredicated;
if (Implicit && !TakeImplicit && !ImpUses.count(RR))
continue;
uint16_t Flags = NodeAttrs::None;
@@ -1456,9 +1476,9 @@ void DataFlowGraph::removeUnusedPhis() {
PhiQ.insert(OA.Id);
}
if (RA.Addr->isDef())
- unlinkDef(RA);
+ unlinkDef(RA, true);
else
- unlinkUse(RA);
+ unlinkUse(RA, true);
}
NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this);
BA.Addr->removeMember(PA, *this);
@@ -1546,6 +1566,7 @@ void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) {
// Push block delimiters.
markBlock(BA.Id, DefM);
+ assert(BA.Addr && "block node address is needed to create a data-flow link");
// For each non-phi instruction in the block, link all the defs and uses
// to their reaching defs. For any member of the block (including phis),
// push the defs on the corresponding stacks.
@@ -1593,13 +1614,10 @@ void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) {
}
// Remove the use node UA from any data-flow and structural links.
-void DataFlowGraph::unlinkUse(NodeAddr<UseNode*> UA) {
+void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) {
NodeId RD = UA.Addr->getReachingDef();
NodeId Sib = UA.Addr->getSibling();
- NodeAddr<InstrNode*> IA = UA.Addr->getOwner(*this);
- IA.Addr->removeMember(UA, *this);
-
if (RD == 0) {
assert(Sib == 0);
return;
@@ -1623,7 +1641,7 @@ void DataFlowGraph::unlinkUse(NodeAddr<UseNode*> UA) {
}
// Remove the def node DA from any data-flow and structural links.
-void DataFlowGraph::unlinkDef(NodeAddr<DefNode*> DA) {
+void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) {
//
// RD
// | reached
@@ -1710,7 +1728,4 @@ void DataFlowGraph::unlinkDef(NodeAddr<DefNode*> DA) {
Last.Addr->setSibling(RDA.Addr->getReachedUse());
RDA.Addr->setReachedUse(ReachedUses.front().Id);
}
-
- NodeAddr<InstrNode*> IA = DA.Addr->getOwner(*this);
- IA.Addr->removeMember(DA, *this);
}
diff --git a/lib/Target/Hexagon/RDFGraph.h b/lib/Target/Hexagon/RDFGraph.h
index 7da7bb5973cf..49b053741263 100644
--- a/lib/Target/Hexagon/RDFGraph.h
+++ b/lib/Target/Hexagon/RDFGraph.h
@@ -202,7 +202,6 @@
#ifndef RDF_GRAPH_H
#define RDF_GRAPH_H
-#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -213,8 +212,6 @@
#include <set>
#include <vector>
-using namespace llvm;
-
namespace llvm {
class MachineBasicBlock;
class MachineFunction;
@@ -224,7 +221,6 @@ namespace llvm {
class MachineDominatorTree;
class TargetInstrInfo;
class TargetRegisterInfo;
-}
namespace rdf {
typedef uint32_t NodeId;
@@ -288,6 +284,13 @@ namespace rdf {
}
};
+ struct BuildOptions {
+ enum : unsigned {
+ None = 0x00,
+ KeepDeadPhis = 0x01, // Do not remove dead phis during build.
+ };
+ };
+
template <typename T> struct NodeAddr {
NodeAddr() : Addr(nullptr), Id(0) {}
NodeAddr(T A, NodeId I) : Addr(A), Id(I) {}
@@ -678,7 +681,7 @@ namespace rdf {
typedef std::map<RegisterRef,DefStack> DefStackMap;
- void build();
+ void build(unsigned Options = BuildOptions::None);
void pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DM);
void markBlock(NodeId B, DefStackMap &DefM);
void releaseBlock(NodeId B, DefStackMap &DefM);
@@ -697,8 +700,16 @@ namespace rdf {
NodeList getRelatedRefs(NodeAddr<InstrNode*> IA,
NodeAddr<RefNode*> RA) const;
- void unlinkUse(NodeAddr<UseNode*> UA);
- void unlinkDef(NodeAddr<DefNode*> DA);
+ void unlinkUse(NodeAddr<UseNode*> UA, bool RemoveFromOwner) {
+ unlinkUseDF(UA);
+ if (RemoveFromOwner)
+ removeFromOwner(UA);
+ }
+ void unlinkDef(NodeAddr<DefNode*> DA, bool RemoveFromOwner) {
+ unlinkDefDF(DA);
+ if (RemoveFromOwner)
+ removeFromOwner(DA);
+ }
// Some useful filters.
template <uint16_t Kind>
@@ -765,6 +776,13 @@ namespace rdf {
void linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA);
void linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA);
+ void unlinkUseDF(NodeAddr<UseNode*> UA);
+ void unlinkDefDF(NodeAddr<DefNode*> DA);
+ void removeFromOwner(NodeAddr<RefNode*> RA) {
+ NodeAddr<InstrNode*> IA = RA.Addr->getOwner(*this);
+ IA.Addr->removeMember(RA, *this);
+ }
+
TimerGroup TimeG;
NodeAddr<FuncNode*> Func;
NodeAllocator Memory;
@@ -837,5 +855,6 @@ namespace rdf {
: Print<NodeAddr<T>>(x, g) {}
};
} // namespace rdf
+} // namespace llvm
#endif // RDF_GRAPH_H
diff --git a/lib/Target/Hexagon/RDFLiveness.cpp b/lib/Target/Hexagon/RDFLiveness.cpp
index 1d9bd372ff4e..641f01423176 100644
--- a/lib/Target/Hexagon/RDFLiveness.cpp
+++ b/lib/Target/Hexagon/RDFLiveness.cpp
@@ -36,6 +36,7 @@
using namespace llvm;
using namespace rdf;
+namespace llvm {
namespace rdf {
template<>
raw_ostream &operator<< (raw_ostream &OS, const Print<Liveness::RefMap> &P) {
@@ -52,7 +53,8 @@ namespace rdf {
OS << " }";
return OS;
}
-}
+} // namespace rdf
+} // namespace llvm
// The order in the returned sequence is the order of reaching defs in the
// upward traversal: the first def is the closest to the given reference RefA,
@@ -235,7 +237,93 @@ NodeList Liveness::getAllReachingDefs(NodeAddr<RefNode*> RefA) {
}
+NodeSet Liveness::getAllReachingDefsRec(RegisterRef RefRR,
+ NodeAddr<RefNode*> RefA, NodeSet &Visited, const NodeSet &Defs) {
+ // Collect all defined registers. Do not consider phis to be defining
+ // anything, only collect "real" definitions.
+ RegisterSet DefRRs;
+ for (const auto D : Defs) {
+ const auto DA = DFG.addr<const DefNode*>(D);
+ if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
+ DefRRs.insert(DA.Addr->getRegRef());
+ }
+
+ auto RDs = getAllReachingDefs(RefRR, RefA, true, DefRRs);
+ if (RDs.empty())
+ return Defs;
+
+ // Make a copy of the preexisting definitions and add the newly found ones.
+ NodeSet TmpDefs = Defs;
+ for (auto R : RDs)
+ TmpDefs.insert(R.Id);
+
+ NodeSet Result = Defs;
+
+ for (NodeAddr<DefNode*> DA : RDs) {
+ Result.insert(DA.Id);
+ if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
+ continue;
+ NodeAddr<PhiNode*> PA = DA.Addr->getOwner(DFG);
+ if (Visited.count(PA.Id))
+ continue;
+ Visited.insert(PA.Id);
+ // Go over all phi uses and get the reaching defs for each use.
+ for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
+ const auto &T = getAllReachingDefsRec(RefRR, U, Visited, TmpDefs);
+ Result.insert(T.begin(), T.end());
+ }
+ }
+
+ return Result;
+}
+
+
+NodeSet Liveness::getAllReachedUses(RegisterRef RefRR,
+ NodeAddr<DefNode*> DefA, const RegisterSet &DefRRs) {
+ NodeSet Uses;
+
+ // If the original register is already covered by all the intervening
+ // defs, no more uses can be reached.
+ if (RAI.covers(DefRRs, RefRR))
+ return Uses;
+
+ // Add all directly reached uses.
+ NodeId U = DefA.Addr->getReachedUse();
+ while (U != 0) {
+ auto UA = DFG.addr<UseNode*>(U);
+ auto UR = UA.Addr->getRegRef();
+ if (RAI.alias(RefRR, UR) && !RAI.covers(DefRRs, UR))
+ Uses.insert(U);
+ U = UA.Addr->getSibling();
+ }
+
+ // Traverse all reached defs.
+ for (NodeId D = DefA.Addr->getReachedDef(), NextD; D != 0; D = NextD) {
+ auto DA = DFG.addr<DefNode*>(D);
+ NextD = DA.Addr->getSibling();
+ auto DR = DA.Addr->getRegRef();
+ // If this def is already covered, it cannot reach anything new.
+ // Similarly, skip it if it is not aliased to the interesting register.
+ if (RAI.covers(DefRRs, DR) || !RAI.alias(RefRR, DR))
+ continue;
+ NodeSet T;
+ if (DA.Addr->getFlags() & NodeAttrs::Preserving) {
+ // If it is a preserving def, do not update the set of intervening defs.
+ T = getAllReachedUses(RefRR, DA, DefRRs);
+ } else {
+ RegisterSet NewDefRRs = DefRRs;
+ NewDefRRs.insert(DR);
+ T = getAllReachedUses(RefRR, DA, NewDefRRs);
+ }
+ Uses.insert(T.begin(), T.end());
+ }
+ return Uses;
+}
+
+
void Liveness::computePhiInfo() {
+ RealUseMap.clear();
+
NodeList Phis;
NodeAddr<FuncNode*> FA = DFG.getFunc();
auto Blocks = FA.Addr->members(DFG);
@@ -601,7 +689,11 @@ void Liveness::resetKills(MachineBasicBlock *B) {
MI->clearKillInfo();
for (auto &Op : MI->operands()) {
- if (!Op.isReg() || !Op.isDef())
+ // An implicit def of a super-register may not necessarily start a
+ // live range of it, since an implicit use could be used to keep parts
+ // of it live. Instead of analyzing the implicit operands, ignore
+ // implicit defs.
+ if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
continue;
unsigned R = Op.getReg();
if (!TargetRegisterInfo::isPhysicalRegister(R))
@@ -616,8 +708,8 @@ void Liveness::resetKills(MachineBasicBlock *B) {
if (!TargetRegisterInfo::isPhysicalRegister(R))
continue;
bool IsLive = false;
- for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR) {
- if (!Live[*SR])
+ for (MCRegAliasIterator AR(R, &TRI, true); AR.isValid(); ++AR) {
+ if (!Live[*AR])
continue;
IsLive = true;
break;
diff --git a/lib/Target/Hexagon/RDFLiveness.h b/lib/Target/Hexagon/RDFLiveness.h
index 4c1e8f3ee838..2b49c7488ce3 100644
--- a/lib/Target/Hexagon/RDFLiveness.h
+++ b/lib/Target/Hexagon/RDFLiveness.h
@@ -26,7 +26,6 @@ namespace llvm {
class TargetRegisterInfo;
class MachineDominatorTree;
class MachineDominanceFrontier;
-}
namespace rdf {
struct Liveness {
@@ -41,6 +40,10 @@ namespace rdf {
NodeList getAllReachingDefs(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
bool FullChain = false, const RegisterSet &DefRRs = RegisterSet());
NodeList getAllReachingDefs(NodeAddr<RefNode*> RefA);
+ NodeSet getAllReachingDefsRec(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
+ NodeSet &Visited, const NodeSet &Defs);
+ NodeSet getAllReachedUses(RegisterRef RefRR, NodeAddr<DefNode*> DefA,
+ const RegisterSet &DefRRs = RegisterSet());
LiveMapType &getLiveMap() { return LiveMap; }
const LiveMapType &getLiveMap() const { return LiveMap; }
@@ -101,6 +104,7 @@ namespace rdf {
void traverse(MachineBasicBlock *B, RefMap &LiveIn);
void emptify(RefMap &M);
};
-}
+} // namespace rdf
+} // namespace llvm
#endif // RDF_LIVENESS_H
diff --git a/lib/Target/Hexagon/TargetInfo/Makefile b/lib/Target/Hexagon/TargetInfo/Makefile
deleted file mode 100644
index 494cca112249..000000000000
--- a/lib/Target/Hexagon/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/Hexagon/TargetInfo/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMHexagonInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/LLVMBuild.txt b/lib/Target/LLVMBuild.txt
index eb794ebc7216..43621629dd25 100644
--- a/lib/Target/LLVMBuild.txt
+++ b/lib/Target/LLVMBuild.txt
@@ -24,7 +24,7 @@ subdirectories =
AArch64
AVR
BPF
- CppBackend
+ Lanai
Hexagon
MSP430
NVPTX
diff --git a/lib/Target/Lanai/AsmParser/CMakeLists.txt b/lib/Target/Lanai/AsmParser/CMakeLists.txt
new file mode 100644
index 000000000000..3c88192ea6f0
--- /dev/null
+++ b/lib/Target/Lanai/AsmParser/CMakeLists.txt
@@ -0,0 +1,7 @@
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
+
+add_llvm_library(LLVMLanaiAsmParser
+ LanaiAsmParser.cpp
+ )
+
+add_dependencies( LLVMLanaiAsmParser LanaiCommonTableGen )
diff --git a/lib/Target/Lanai/AsmParser/LLVMBuild.txt b/lib/Target/Lanai/AsmParser/LLVMBuild.txt
new file mode 100644
index 000000000000..08cf4033087d
--- /dev/null
+++ b/lib/Target/Lanai/AsmParser/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Target/Lanai/AsmParser/LLVMBuild.txt ----------------*- Conf -*-===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = LanaiAsmParser
+parent = Lanai
+required_libraries = MC MCParser Support LanaiMCTargetDesc LanaiInfo
+add_to_library_groups = Lanai
diff --git a/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp b/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
new file mode 100644
index 000000000000..cbb96d8b05b2
--- /dev/null
+++ b/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
@@ -0,0 +1,1213 @@
+//===-- LanaiAsmParser.cpp - Parse Lanai assembly to MCInst instructions --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Lanai.h"
+#include "MCTargetDesc/LanaiMCExpr.h"
+#include "MCTargetDesc/LanaiMCTargetDesc.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TargetRegistry.h"
+
+namespace llvm {
+namespace {
+struct LanaiOperand;
+
+class LanaiAsmParser : public MCTargetAsmParser {
+ // Parse operands
+ std::unique_ptr<LanaiOperand> parseRegister();
+
+ std::unique_ptr<LanaiOperand> parseImmediate();
+
+ std::unique_ptr<LanaiOperand> parseIdentifier();
+
+ unsigned parseAluOperator(bool PreOp, bool PostOp);
+
+ // Split the mnemonic stripping conditional code and quantifiers
+ StringRef splitMnemonic(StringRef Name, SMLoc NameLoc,
+ OperandVector *Operands);
+
+ bool parsePrePost(StringRef Type, int *OffsetValue);
+
+ bool ParseDirective(AsmToken DirectiveID) override;
+
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override;
+
+ bool ParseRegister(unsigned &RegNum, SMLoc &StartLoc, SMLoc &EndLoc) override;
+
+ bool MatchAndEmitInstruction(SMLoc IdLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ uint64_t &ErrorInfo,
+ bool MatchingInlineAsm) override;
+
+// Auto-generated instruction matching functions
+#define GET_ASSEMBLER_HEADER
+#include "LanaiGenAsmMatcher.inc"
+
+ OperandMatchResultTy parseOperand(OperandVector *Operands,
+ StringRef Mnemonic);
+
+ OperandMatchResultTy parseMemoryOperand(OperandVector &Operands);
+
+public:
+ LanaiAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
+ const MCInstrInfo &MII, const MCTargetOptions &Options)
+ : MCTargetAsmParser(Options, STI), Parser(Parser),
+ Lexer(Parser.getLexer()), SubtargetInfo(STI) {
+ setAvailableFeatures(
+ ComputeAvailableFeatures(SubtargetInfo.getFeatureBits()));
+ }
+
+private:
+ MCAsmParser &Parser;
+ MCAsmLexer &Lexer;
+
+ const MCSubtargetInfo &SubtargetInfo;
+};
+
+// Auto-generated by TableGen
+static unsigned MatchRegisterName(llvm::StringRef Name);
+
+// LanaiOperand - Instances of this class represented a parsed machine
+// instruction
+struct LanaiOperand : public MCParsedAsmOperand {
+ enum KindTy {
+ TOKEN,
+ REGISTER,
+ IMMEDIATE,
+ MEMORY_IMM,
+ MEMORY_REG_IMM,
+ MEMORY_REG_REG,
+ } Kind;
+
+ SMLoc StartLoc, EndLoc;
+
+ struct Token {
+ const char *Data;
+ unsigned Length;
+ };
+
+ struct RegOp {
+ unsigned RegNum;
+ };
+
+ struct ImmOp {
+ const MCExpr *Value;
+ };
+
+ struct MemOp {
+ unsigned BaseReg;
+ unsigned OffsetReg;
+ unsigned AluOp;
+ const MCExpr *Offset;
+ };
+
+ union {
+ struct Token Tok;
+ struct RegOp Reg;
+ struct ImmOp Imm;
+ struct MemOp Mem;
+ };
+
+ explicit LanaiOperand(KindTy Kind) : MCParsedAsmOperand(), Kind(Kind) {}
+
+public:
+ // The functions below are used by the autogenerated ASM matcher and hence to
+ // be of the form expected.
+
+ // getStartLoc - Gets location of the first token of this operand
+ SMLoc getStartLoc() const override { return StartLoc; }
+
+ // getEndLoc - Gets location of the last token of this operand
+ SMLoc getEndLoc() const override { return EndLoc; }
+
+ unsigned getReg() const override {
+ assert(isReg() && "Invalid type access!");
+ return Reg.RegNum;
+ }
+
+ const MCExpr *getImm() const {
+ assert(isImm() && "Invalid type access!");
+ return Imm.Value;
+ }
+
+ StringRef getToken() const {
+ assert(isToken() && "Invalid type access!");
+ return StringRef(Tok.Data, Tok.Length);
+ }
+
+ unsigned getMemBaseReg() const {
+ assert(isMem() && "Invalid type access!");
+ return Mem.BaseReg;
+ }
+
+ unsigned getMemOffsetReg() const {
+ assert(isMem() && "Invalid type access!");
+ return Mem.OffsetReg;
+ }
+
+ const MCExpr *getMemOffset() const {
+ assert(isMem() && "Invalid type access!");
+ return Mem.Offset;
+ }
+
+ unsigned getMemOp() const {
+ assert(isMem() && "Invalid type access!");
+ return Mem.AluOp;
+ }
+
+ // Functions for testing operand type
+ bool isReg() const override { return Kind == REGISTER; }
+
+ bool isImm() const override { return Kind == IMMEDIATE; }
+
+ bool isMem() const override {
+ return isMemImm() || isMemRegImm() || isMemRegReg();
+ }
+
+ bool isMemImm() const { return Kind == MEMORY_IMM; }
+
+ bool isMemRegImm() const { return Kind == MEMORY_REG_IMM; }
+
+ bool isMemRegReg() const { return Kind == MEMORY_REG_REG; }
+
+ bool isMemSpls() const { return isMemRegImm() || isMemRegReg(); }
+
+ bool isToken() const override { return Kind == TOKEN; }
+
+ bool isBrImm() {
+ if (!isImm())
+ return false;
+
+ // Constant case
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm.Value);
+ if (!MCE)
+ return true;
+ int64_t Value = MCE->getValue();
+ // Check if value fits in 25 bits with 2 least significant bits 0.
+ return isShiftedUInt<23, 2>(static_cast<int32_t>(Value));
+ }
+
+ bool isBrTarget() { return isBrImm() || isToken(); }
+
+ bool isCallTarget() { return isImm() || isToken(); }
+
+ bool isHiImm16() {
+ if (!isImm())
+ return false;
+
+ // Constant case
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value)) {
+ int64_t Value = ConstExpr->getValue();
+ return Value != 0 && isShiftedUInt<16, 16>(Value);
+ }
+
+ // Symbolic reference expression
+ if (const LanaiMCExpr *SymbolRefExpr = dyn_cast<LanaiMCExpr>(Imm.Value))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_ABS_HI;
+
+ // Binary expression
+ if (const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(Imm.Value))
+ if (const LanaiMCExpr *SymbolRefExpr =
+ dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS()))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_ABS_HI;
+
+ return false;
+ }
+
+ bool isHiImm16And() {
+ if (!isImm())
+ return false;
+
+ const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value);
+ if (ConstExpr) {
+ int64_t Value = ConstExpr->getValue();
+ // Check if in the form 0xXYZWffff
+ return (Value != 0) && ((Value & ~0xffff0000) == 0xffff);
+ }
+ return false;
+ }
+
+ bool isLoImm16() {
+ if (!isImm())
+ return false;
+
+ // Constant case
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value)) {
+ int64_t Value = ConstExpr->getValue();
+ // Check if value fits in 16 bits
+ return isUInt<16>(static_cast<int32_t>(Value));
+ }
+
+ // Symbolic reference expression
+ if (const LanaiMCExpr *SymbolRefExpr = dyn_cast<LanaiMCExpr>(Imm.Value))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_ABS_LO;
+
+ // Binary expression
+ if (const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(Imm.Value))
+ if (const LanaiMCExpr *SymbolRefExpr =
+ dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS()))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_ABS_LO;
+
+ return false;
+ }
+
+ bool isLoImm16Signed() {
+ if (!isImm())
+ return false;
+
+ // Constant case
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value)) {
+ int64_t Value = ConstExpr->getValue();
+ // Check if value fits in 16 bits or value of the form 0xffffxyzw
+ return isInt<16>(static_cast<int32_t>(Value));
+ }
+
+ // Symbolic reference expression
+ if (const LanaiMCExpr *SymbolRefExpr = dyn_cast<LanaiMCExpr>(Imm.Value))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_ABS_LO;
+
+ // Binary expression
+ if (const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(Imm.Value))
+ if (const LanaiMCExpr *SymbolRefExpr =
+ dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS()))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_ABS_LO;
+
+ return false;
+ }
+
+ bool isLoImm16And() {
+ if (!isImm())
+ return false;
+
+ const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value);
+ if (ConstExpr) {
+ int64_t Value = ConstExpr->getValue();
+ // Check if in the form 0xffffXYZW
+ return ((Value & ~0xffff) == 0xffff0000);
+ }
+ return false;
+ }
+
+ bool isImmShift() {
+ if (!isImm())
+ return false;
+
+ const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value);
+ if (!ConstExpr)
+ return false;
+ int64_t Value = ConstExpr->getValue();
+ return (Value >= -31) && (Value <= 31);
+ }
+
+ bool isLoImm21() {
+ if (!isImm())
+ return false;
+
+ // Constant case
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value)) {
+ int64_t Value = ConstExpr->getValue();
+ return isUInt<21>(Value);
+ }
+
+ // Symbolic reference expression
+ if (const LanaiMCExpr *SymbolRefExpr = dyn_cast<LanaiMCExpr>(Imm.Value))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_None;
+ if (const MCSymbolRefExpr *SymbolRefExpr =
+ dyn_cast<MCSymbolRefExpr>(Imm.Value)) {
+ return SymbolRefExpr->getKind() == MCSymbolRefExpr::VK_None;
+ }
+
+ // Binary expression
+ if (const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(Imm.Value)) {
+ if (const LanaiMCExpr *SymbolRefExpr =
+ dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS()))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_None;
+ if (const MCSymbolRefExpr *SymbolRefExpr =
+ dyn_cast<MCSymbolRefExpr>(BinaryExpr->getLHS()))
+ return SymbolRefExpr->getKind() == MCSymbolRefExpr::VK_None;
+ }
+
+ return false;
+ }
+
+ bool isImm10() {
+ if (!isImm())
+ return false;
+
+ const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value);
+ if (!ConstExpr)
+ return false;
+ int64_t Value = ConstExpr->getValue();
+ return isInt<10>(Value);
+ }
+
+ bool isCondCode() {
+ if (!isImm())
+ return false;
+
+ const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Imm.Value);
+ if (!ConstExpr)
+ return false;
+ uint64_t Value = ConstExpr->getValue();
+ // The condition codes are between 0 (ICC_T) and 15 (ICC_LE). If the
+ // unsigned value of the immediate is less than LPCC::UNKNOWN (16) then
+ // value corresponds to a valid condition code.
+ return Value < LPCC::UNKNOWN;
+ }
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
+ // Add as immediates where possible. Null MCExpr = 0
+ if (Expr == nullptr)
+ Inst.addOperand(MCOperand::createImm(0));
+ else if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Expr))
+ Inst.addOperand(
+ MCOperand::createImm(static_cast<int32_t>(ConstExpr->getValue())));
+ else
+ Inst.addOperand(MCOperand::createExpr(Expr));
+ }
+
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(getReg()));
+ }
+
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addBrTargetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addCallTargetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addCondCodeOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addMemImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCExpr *Expr = getMemOffset();
+ addExpr(Inst, Expr);
+ }
+
+ void addMemRegImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 3 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
+ const MCExpr *Expr = getMemOffset();
+ addExpr(Inst, Expr);
+ Inst.addOperand(MCOperand::createImm(getMemOp()));
+ }
+
+ void addMemRegRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 3 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
+ assert(getMemOffsetReg() != 0 && "Invalid offset");
+ Inst.addOperand(MCOperand::createReg(getMemOffsetReg()));
+ Inst.addOperand(MCOperand::createImm(getMemOp()));
+ }
+
+ void addMemSplsOperands(MCInst &Inst, unsigned N) const {
+ if (isMemRegImm())
+ addMemRegImmOperands(Inst, N);
+ if (isMemRegReg())
+ addMemRegRegOperands(Inst, N);
+ }
+
+ void addImmShiftOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addImm10Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ addExpr(Inst, getImm());
+ }
+
+ void addLoImm16Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(getImm()))
+ Inst.addOperand(
+ MCOperand::createImm(static_cast<int32_t>(ConstExpr->getValue())));
+ else if (isa<LanaiMCExpr>(getImm())) {
+#ifndef NDEBUG
+ const LanaiMCExpr *SymbolRefExpr = dyn_cast<LanaiMCExpr>(getImm());
+ assert(SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_ABS_LO);
+#endif
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ } else if (isa<MCBinaryExpr>(getImm())) {
+#ifndef NDEBUG
+ const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(getImm());
+ assert(dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS()) &&
+ dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS())->getKind() ==
+ LanaiMCExpr::VK_Lanai_ABS_LO);
+#endif
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ } else
+ assert(false && "Operand type not supported.");
+ }
+
+ void addLoImm16AndOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(getImm()))
+ Inst.addOperand(MCOperand::createImm(ConstExpr->getValue() & 0xffff));
+ else
+ assert(false && "Operand type not supported.");
+ }
+
+ void addHiImm16Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(getImm()))
+ Inst.addOperand(MCOperand::createImm(ConstExpr->getValue() >> 16));
+ else if (isa<LanaiMCExpr>(getImm())) {
+#ifndef NDEBUG
+ const LanaiMCExpr *SymbolRefExpr = dyn_cast<LanaiMCExpr>(getImm());
+ assert(SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_ABS_HI);
+#endif
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ } else if (isa<MCBinaryExpr>(getImm())) {
+#ifndef NDEBUG
+ const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(getImm());
+ assert(dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS()) &&
+ dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS())->getKind() ==
+ LanaiMCExpr::VK_Lanai_ABS_HI);
+#endif
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ } else
+ assert(false && "Operand type not supported.");
+ }
+
+ void addHiImm16AndOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(getImm()))
+ Inst.addOperand(MCOperand::createImm(ConstExpr->getValue() >> 16));
+ else
+ assert(false && "Operand type not supported.");
+ }
+
+ void addLoImm21Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(getImm()))
+ Inst.addOperand(MCOperand::createImm(ConstExpr->getValue() & 0x1fffff));
+ else if (isa<LanaiMCExpr>(getImm())) {
+#ifndef NDEBUG
+ const LanaiMCExpr *SymbolRefExpr = dyn_cast<LanaiMCExpr>(getImm());
+ assert(SymbolRefExpr &&
+ SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_None);
+#endif
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ } else if (isa<MCSymbolRefExpr>(getImm())) {
+#ifndef NDEBUG
+ const MCSymbolRefExpr *SymbolRefExpr =
+ dyn_cast<MCSymbolRefExpr>(getImm());
+ assert(SymbolRefExpr &&
+ SymbolRefExpr->getKind() == MCSymbolRefExpr::VK_None);
+#endif
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ } else if (isa<MCBinaryExpr>(getImm())) {
+#ifndef NDEBUG
+ const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(getImm());
+ const LanaiMCExpr *SymbolRefExpr =
+ dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS());
+ assert(SymbolRefExpr &&
+ SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_None);
+#endif
+ Inst.addOperand(MCOperand::createExpr(getImm()));
+ } else
+ assert(false && "Operand type not supported.");
+ }
+
+ void print(raw_ostream &OS) const override {
+ switch (Kind) {
+ case IMMEDIATE:
+ OS << "Imm: " << getImm() << "\n";
+ break;
+ case TOKEN:
+ OS << "Token: " << getToken() << "\n";
+ break;
+ case REGISTER:
+ OS << "Reg: %r" << getReg() << "\n";
+ break;
+ case MEMORY_IMM:
+ OS << "MemImm: " << *getMemOffset() << "\n";
+ break;
+ case MEMORY_REG_IMM:
+ OS << "MemRegImm: " << getMemBaseReg() << "+" << *getMemOffset() << "\n";
+ break;
+ case MEMORY_REG_REG:
+ assert(getMemOffset() == nullptr);
+ OS << "MemRegReg: " << getMemBaseReg() << "+"
+ << "%r" << getMemOffsetReg() << "\n";
+ break;
+ }
+ }
+
+ static std::unique_ptr<LanaiOperand> CreateToken(StringRef Str, SMLoc Start) {
+ auto Op = make_unique<LanaiOperand>(TOKEN);
+ Op->Tok.Data = Str.data();
+ Op->Tok.Length = Str.size();
+ Op->StartLoc = Start;
+ Op->EndLoc = Start;
+ return Op;
+ }
+
+ static std::unique_ptr<LanaiOperand> createReg(unsigned RegNum, SMLoc Start,
+ SMLoc End) {
+ auto Op = make_unique<LanaiOperand>(REGISTER);
+ Op->Reg.RegNum = RegNum;
+ Op->StartLoc = Start;
+ Op->EndLoc = End;
+ return Op;
+ }
+
+ static std::unique_ptr<LanaiOperand> createImm(const MCExpr *Value,
+ SMLoc Start, SMLoc End) {
+ auto Op = make_unique<LanaiOperand>(IMMEDIATE);
+ Op->Imm.Value = Value;
+ Op->StartLoc = Start;
+ Op->EndLoc = End;
+ return Op;
+ }
+
+ static std::unique_ptr<LanaiOperand>
+ MorphToMemImm(std::unique_ptr<LanaiOperand> Op) {
+ const MCExpr *Imm = Op->getImm();
+ Op->Kind = MEMORY_IMM;
+ Op->Mem.BaseReg = 0;
+ Op->Mem.AluOp = LPAC::ADD;
+ Op->Mem.OffsetReg = 0;
+ Op->Mem.Offset = Imm;
+ return Op;
+ }
+
+ static std::unique_ptr<LanaiOperand>
+ MorphToMemRegReg(unsigned BaseReg, std::unique_ptr<LanaiOperand> Op,
+ unsigned AluOp) {
+ unsigned OffsetReg = Op->getReg();
+ Op->Kind = MEMORY_REG_REG;
+ Op->Mem.BaseReg = BaseReg;
+ Op->Mem.AluOp = AluOp;
+ Op->Mem.OffsetReg = OffsetReg;
+ Op->Mem.Offset = nullptr;
+ return Op;
+ }
+
+ static std::unique_ptr<LanaiOperand>
+ MorphToMemRegImm(unsigned BaseReg, std::unique_ptr<LanaiOperand> Op,
+ unsigned AluOp) {
+ const MCExpr *Imm = Op->getImm();
+ Op->Kind = MEMORY_REG_IMM;
+ Op->Mem.BaseReg = BaseReg;
+ Op->Mem.AluOp = AluOp;
+ Op->Mem.OffsetReg = 0;
+ Op->Mem.Offset = Imm;
+ return Op;
+ }
+};
+
+bool LanaiAsmParser::ParseDirective(AsmToken /*DirectiveId*/) { return true; }
+
+bool LanaiAsmParser::MatchAndEmitInstruction(SMLoc IdLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ uint64_t &ErrorInfo,
+ bool MatchingInlineAsm) {
+ MCInst Inst;
+ SMLoc ErrorLoc;
+
+ switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
+ case Match_Success:
+ Out.EmitInstruction(Inst, SubtargetInfo);
+ Opcode = Inst.getOpcode();
+ return false;
+ case Match_MissingFeature:
+ return Error(IdLoc, "Instruction use requires option to be enabled");
+ case Match_MnemonicFail:
+ return Error(IdLoc, "Unrecognized instruction mnemonic");
+ case Match_InvalidOperand: {
+ ErrorLoc = IdLoc;
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IdLoc, "Too few operands for instruction");
+
+ ErrorLoc = ((LanaiOperand &)*Operands[ErrorInfo]).getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IdLoc;
+ }
+ return Error(ErrorLoc, "Invalid operand for instruction");
+ }
+ default:
+ break;
+ }
+
+ llvm_unreachable("Unknown match type detected!");
+}
+
+// Both '%rN' and 'rN' are parsed as valid registers. This was done to remain
+// backwards compatible with GCC and the different ways inline assembly is
+// handled.
+// TODO: see if there isn't a better way to do this.
+std::unique_ptr<LanaiOperand> LanaiAsmParser::parseRegister() {
+ SMLoc Start = Parser.getTok().getLoc();
+ SMLoc End = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ unsigned RegNum;
+ // Eat the '%'.
+ if (Lexer.getKind() == AsmToken::Percent)
+ Parser.Lex();
+ if (Lexer.getKind() == AsmToken::Identifier) {
+ RegNum = MatchRegisterName(Lexer.getTok().getIdentifier());
+ if (RegNum == 0)
+ return 0;
+ Parser.Lex(); // Eat identifier token
+ return LanaiOperand::createReg(RegNum, Start, End);
+ }
+ return 0;
+}
+
+bool LanaiAsmParser::ParseRegister(unsigned &RegNum, SMLoc &StartLoc,
+ SMLoc &EndLoc) {
+ const AsmToken &Tok = getParser().getTok();
+ StartLoc = Tok.getLoc();
+ EndLoc = Tok.getEndLoc();
+ std::unique_ptr<LanaiOperand> Op = parseRegister();
+ if (Op != nullptr)
+ RegNum = Op->getReg();
+ return (Op == nullptr);
+}
+
+std::unique_ptr<LanaiOperand> LanaiAsmParser::parseIdentifier() {
+ SMLoc Start = Parser.getTok().getLoc();
+ SMLoc End = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ const MCExpr *Res, *RHS = 0;
+ LanaiMCExpr::VariantKind Kind = LanaiMCExpr::VK_Lanai_None;
+
+ if (Lexer.getKind() != AsmToken::Identifier)
+ return 0;
+
+ StringRef Identifier;
+ if (Parser.parseIdentifier(Identifier))
+ return 0;
+
+ // Check if identifier has a modifier
+ if (Identifier.equals_lower("hi"))
+ Kind = LanaiMCExpr::VK_Lanai_ABS_HI;
+ else if (Identifier.equals_lower("lo"))
+ Kind = LanaiMCExpr::VK_Lanai_ABS_LO;
+
+ // If the identifier corresponds to a variant then extract the real
+ // identifier.
+ if (Kind != LanaiMCExpr::VK_Lanai_None) {
+ if (Lexer.getKind() != AsmToken::LParen) {
+ Error(Lexer.getLoc(), "Expected '('");
+ return 0;
+ }
+ Lexer.Lex(); // lex '('
+
+ // Parse identifier
+ if (Parser.parseIdentifier(Identifier))
+ return 0;
+ }
+
+ // If addition parse the RHS.
+ if (Lexer.getKind() == AsmToken::Plus && Parser.parseExpression(RHS))
+ return 0;
+
+ // For variants parse the final ')'
+ if (Kind != LanaiMCExpr::VK_Lanai_None) {
+ if (Lexer.getKind() != AsmToken::RParen) {
+ Error(Lexer.getLoc(), "Expected ')'");
+ return 0;
+ }
+ Lexer.Lex(); // lex ')'
+ }
+
+ End = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
+ const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
+ Res = LanaiMCExpr::create(Kind, Expr, getContext());
+
+ // Nest if this was an addition
+ if (RHS)
+ Res = MCBinaryExpr::createAdd(Res, RHS, getContext());
+
+ return LanaiOperand::createImm(Res, Start, End);
+}
+
+std::unique_ptr<LanaiOperand> LanaiAsmParser::parseImmediate() {
+ SMLoc Start = Parser.getTok().getLoc();
+ SMLoc End = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+
+ const MCExpr *ExprVal;
+ switch (Lexer.getKind()) {
+ case AsmToken::Identifier:
+ return parseIdentifier();
+ case AsmToken::Plus:
+ case AsmToken::Minus:
+ case AsmToken::Integer:
+ case AsmToken::Dot:
+ if (!Parser.parseExpression(ExprVal))
+ return LanaiOperand::createImm(ExprVal, Start, End);
+ default:
+ return 0;
+ }
+}
+
+static unsigned AluWithPrePost(unsigned AluCode, bool PreOp, bool PostOp) {
+ if (PreOp)
+ return LPAC::makePreOp(AluCode);
+ if (PostOp)
+ return LPAC::makePostOp(AluCode);
+ return AluCode;
+}
+
+unsigned LanaiAsmParser::parseAluOperator(bool PreOp, bool PostOp) {
+ StringRef IdString;
+ Parser.parseIdentifier(IdString);
+ unsigned AluCode = LPAC::stringToLanaiAluCode(IdString);
+ if (AluCode == LPAC::UNKNOWN) {
+ Error(Parser.getTok().getLoc(), "Can't parse ALU operator");
+ return 0;
+ }
+ return AluCode;
+}
+
+static int SizeForSuffix(StringRef T) {
+ return StringSwitch<int>(T).EndsWith(".h", 2).EndsWith(".b", 1).Default(4);
+}
+
+bool LanaiAsmParser::parsePrePost(StringRef Type, int *OffsetValue) {
+ bool PreOrPost = false;
+ if (Lexer.getKind() == Lexer.peekTok(true).getKind()) {
+ PreOrPost = true;
+ if (Lexer.is(AsmToken::Minus))
+ *OffsetValue = -SizeForSuffix(Type);
+ else if (Lexer.is(AsmToken::Plus))
+ *OffsetValue = SizeForSuffix(Type);
+ else
+ return false;
+
+ // Eat the '-' '-' or '+' '+'
+ Parser.Lex();
+ Parser.Lex();
+ } else if (Lexer.is(AsmToken::Star)) {
+ Parser.Lex(); // Eat the '*'
+ PreOrPost = true;
+ }
+
+ return PreOrPost;
+}
+
+bool shouldBeSls(const LanaiOperand &Op) {
+ // The instruction should be encoded as an SLS if the constant is word
+ // aligned and will fit in 21 bits
+ if (const MCConstantExpr *ConstExpr = dyn_cast<MCConstantExpr>(Op.getImm())) {
+ int64_t Value = ConstExpr->getValue();
+ return (Value % 4 == 0) && (Value >= 0) && (Value <= 0x1fffff);
+ }
+ // The instruction should be encoded as an SLS if the operand is a symbolic
+ // reference with no variant.
+ if (const LanaiMCExpr *SymbolRefExpr = dyn_cast<LanaiMCExpr>(Op.getImm()))
+ return SymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_None;
+ // The instruction should be encoded as an SLS if the operand is a binary
+ // expression with the left-hand side being a symbolic reference with no
+ // variant.
+ if (const MCBinaryExpr *BinaryExpr = dyn_cast<MCBinaryExpr>(Op.getImm())) {
+ const LanaiMCExpr *LHSSymbolRefExpr =
+ dyn_cast<LanaiMCExpr>(BinaryExpr->getLHS());
+ return (LHSSymbolRefExpr &&
+ LHSSymbolRefExpr->getKind() == LanaiMCExpr::VK_Lanai_None);
+ }
+ return false;
+}
+
+// Matches memory operand. Returns true if error encountered.
+LanaiAsmParser::OperandMatchResultTy
+LanaiAsmParser::parseMemoryOperand(OperandVector &Operands) {
+ // Try to match a memory operand.
+ // The memory operands are of the form:
+ // (1) Register|Immediate|'' '[' '*'? Register '*'? ']' or
+ // ^
+ // (2) '[' '*'? Register '*'? AluOperator Register ']'
+ // ^
+ // (3) '[' '--'|'++' Register '--'|'++' ']'
+ //
+ // (4) '[' Immediate ']' (for SLS)
+
+ // Store the type for use in parsing pre/post increment/decrement operators
+ StringRef Type;
+ if (Operands[0]->isToken())
+ Type = static_cast<LanaiOperand *>(Operands[0].get())->getToken();
+
+ // Use 0 if no offset given
+ int OffsetValue = 0;
+ unsigned BaseReg = 0;
+ unsigned AluOp = LPAC::ADD;
+ bool PostOp = false, PreOp = false;
+
+ // Try to parse the offset
+ std::unique_ptr<LanaiOperand> Op = parseRegister();
+ if (!Op)
+ Op = parseImmediate();
+
+ // Only continue if next token is '['
+ if (Lexer.isNot(AsmToken::LBrac)) {
+ if (!Op)
+ return MatchOperand_NoMatch;
+
+ // The start of this custom parsing overlaps with register/immediate so
+ // consider this as a successful match of an operand of that type as the
+ // token stream can't be rewound to allow them to match separately.
+ Operands.push_back(std::move(Op));
+ return MatchOperand_Success;
+ }
+
+ Parser.Lex(); // Eat the '['.
+ std::unique_ptr<LanaiOperand> Offset = nullptr;
+ if (Op)
+ Offset.swap(Op);
+
+ // Determine if a pre operation
+ PreOp = parsePrePost(Type, &OffsetValue);
+
+ Op = parseRegister();
+ if (!Op) {
+ if (!Offset) {
+ if ((Op = parseImmediate()) && Lexer.is(AsmToken::RBrac)) {
+ Parser.Lex(); // Eat the ']'
+
+ // Memory address operations aligned to word boundary are encoded as
+ // SLS, the rest as RM.
+ if (shouldBeSls(*Op)) {
+ Operands.push_back(LanaiOperand::MorphToMemImm(std::move(Op)));
+ } else {
+ if (!Op->isLoImm16Signed()) {
+ Error(Parser.getTok().getLoc(),
+ "Memory address is not word "
+ "aligned and larger than class RM can handle");
+ return MatchOperand_ParseFail;
+ }
+ Operands.push_back(LanaiOperand::MorphToMemRegImm(
+ Lanai::R0, std::move(Op), LPAC::ADD));
+ }
+ return MatchOperand_Success;
+ }
+ }
+
+ Error(Parser.getTok().getLoc(),
+ "Unknown operand, expected register or immediate");
+ return MatchOperand_ParseFail;
+ }
+ BaseReg = Op->getReg();
+
+ // Determine if a post operation
+ if (!PreOp)
+ PostOp = parsePrePost(Type, &OffsetValue);
+
+ // If ] match form (1) else match form (2)
+ if (Lexer.is(AsmToken::RBrac)) {
+ Parser.Lex(); // Eat the ']'.
+ if (!Offset) {
+ SMLoc Start = Parser.getTok().getLoc();
+ SMLoc End =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ const MCConstantExpr *OffsetConstExpr =
+ MCConstantExpr::create(OffsetValue, getContext());
+ Offset = LanaiOperand::createImm(OffsetConstExpr, Start, End);
+ }
+ } else {
+ if (Offset || OffsetValue != 0) {
+ Error(Parser.getTok().getLoc(), "Expected ']'");
+ return MatchOperand_ParseFail;
+ }
+
+ // Parse operator
+ AluOp = parseAluOperator(PreOp, PostOp);
+
+ // Second form requires offset register
+ Offset = parseRegister();
+ if (!BaseReg || Lexer.isNot(AsmToken::RBrac)) {
+ Error(Parser.getTok().getLoc(), "Expected ']'");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex(); // Eat the ']'.
+ }
+
+ // First form has addition as operator. Add pre- or post-op indicator as
+ // needed.
+ AluOp = AluWithPrePost(AluOp, PreOp, PostOp);
+
+ // Ensure immediate offset is not too large
+ if (Offset->isImm() && !Offset->isLoImm16Signed()) {
+ Error(Parser.getTok().getLoc(),
+ "Memory address is not word "
+ "aligned and larger than class RM can handle");
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(
+ Offset->isImm()
+ ? LanaiOperand::MorphToMemRegImm(BaseReg, std::move(Offset), AluOp)
+ : LanaiOperand::MorphToMemRegReg(BaseReg, std::move(Offset), AluOp));
+
+ return MatchOperand_Success;
+}
+
+// Looks at a token type and creates the relevant operand from this
+// information, adding to operands.
+// If operand was parsed, returns false, else true.
+LanaiAsmParser::OperandMatchResultTy
+LanaiAsmParser::parseOperand(OperandVector *Operands, StringRef Mnemonic) {
+ // Check if the current operand has a custom associated parser, if so, try to
+ // custom parse the operand, or fallback to the general approach.
+ OperandMatchResultTy Result = MatchOperandParserImpl(*Operands, Mnemonic);
+
+ if (Result == MatchOperand_Success)
+ return Result;
+ if (Result == MatchOperand_ParseFail) {
+ Parser.eatToEndOfStatement();
+ return Result;
+ }
+
+ // Attempt to parse token as register
+ std::unique_ptr<LanaiOperand> Op = parseRegister();
+
+ // Attempt to parse token as immediate
+ if (!Op)
+ Op = parseImmediate();
+
+ // If the token could not be parsed then fail
+ if (!Op) {
+ Error(Parser.getTok().getLoc(), "Unknown operand");
+ Parser.eatToEndOfStatement();
+ return MatchOperand_ParseFail;
+ }
+
+ // Push back parsed operand into list of operands
+ Operands->push_back(std::move(Op));
+
+ return MatchOperand_Success;
+}
+
+// Split the mnemonic into ASM operand, conditional code and instruction
+// qualifier (half-word, byte).
+StringRef LanaiAsmParser::splitMnemonic(StringRef Name, SMLoc NameLoc,
+ OperandVector *Operands) {
+ size_t Next = Name.find('.');
+
+ StringRef Mnemonic = Name;
+
+ bool IsBRR = false;
+ if (Name.endswith(".r")) {
+ Mnemonic = Name.substr(0, Name.size() - 2);
+ IsBRR = true;
+ }
+
+ // Match b?? and s?? (BR, BRR, and SCC instruction classes).
+ if (Mnemonic[0] == 'b' ||
+ (Mnemonic[0] == 's' && !Mnemonic.startswith("sel") &&
+ !Mnemonic.startswith("st"))) {
+ // Parse instructions with a conditional code. For example, 'bne' is
+ // converted into two operands 'b' and 'ne'.
+ LPCC::CondCode CondCode =
+ LPCC::suffixToLanaiCondCode(Mnemonic.substr(1, Next));
+ if (CondCode != LPCC::UNKNOWN) {
+ Mnemonic = Mnemonic.slice(0, 1);
+ Operands->push_back(LanaiOperand::CreateToken(Mnemonic, NameLoc));
+ Operands->push_back(LanaiOperand::createImm(
+ MCConstantExpr::create(CondCode, getContext()), NameLoc, NameLoc));
+ if (IsBRR) {
+ Operands->push_back(LanaiOperand::CreateToken(".r", NameLoc));
+ }
+ return Mnemonic;
+ }
+ }
+
+ // Parse other instructions with condition codes (RR instructions).
+ // We ignore .f here and assume they are flag-setting operations, not
+ // conditional codes (except for select instructions where flag-setting
+ // variants are not yet implemented).
+ if (Mnemonic.startswith("sel") ||
+ (!Mnemonic.endswith(".f") && !Mnemonic.startswith("st"))) {
+ LPCC::CondCode CondCode = LPCC::suffixToLanaiCondCode(Mnemonic);
+ if (CondCode != LPCC::UNKNOWN) {
+ size_t Next = Mnemonic.rfind('.', Name.size());
+ // 'sel' doesn't use a predicate operand whose printer adds the period,
+ // but instead has the period as part of the identifier (i.e., 'sel.' is
+ // expected by the generated matcher). If the mnemonic starts with 'sel'
+ // then include the period as part of the mnemonic, else don't include it
+ // as part of the mnemonic.
+ if (Mnemonic.startswith("sel")) {
+ Mnemonic = Mnemonic.substr(0, Next + 1);
+ } else {
+ Mnemonic = Mnemonic.substr(0, Next);
+ }
+ Operands->push_back(LanaiOperand::CreateToken(Mnemonic, NameLoc));
+ Operands->push_back(LanaiOperand::createImm(
+ MCConstantExpr::create(CondCode, getContext()), NameLoc, NameLoc));
+ return Mnemonic;
+ }
+ }
+
+ Operands->push_back(LanaiOperand::CreateToken(Mnemonic, NameLoc));
+ if (IsBRR) {
+ Operands->push_back(LanaiOperand::CreateToken(".r", NameLoc));
+ }
+
+ return Mnemonic;
+}
+
+bool IsMemoryAssignmentError(const OperandVector &Operands) {
+ // Detects if a memory operation has an erroneous base register modification.
+ // Memory operations are detected by matching the types of operands.
+ //
+ // TODO: This test is focussed on one specific instance (ld/st).
+ // Extend it to handle more cases or be more robust.
+ bool Modifies = false;
+
+ int Offset = 0;
+
+ if (Operands.size() < 5)
+ return false;
+ else if (Operands[0]->isToken() && Operands[1]->isReg() &&
+ Operands[2]->isImm() && Operands[3]->isImm() && Operands[4]->isReg())
+ Offset = 0;
+ else if (Operands[0]->isToken() && Operands[1]->isToken() &&
+ Operands[2]->isReg() && Operands[3]->isImm() &&
+ Operands[4]->isImm() && Operands[5]->isReg())
+ Offset = 1;
+ else
+ return false;
+
+ int PossibleAluOpIdx = Offset + 3;
+ int PossibleBaseIdx = Offset + 1;
+ int PossibleDestIdx = Offset + 4;
+ if (LanaiOperand *PossibleAluOp =
+ static_cast<LanaiOperand *>(Operands[PossibleAluOpIdx].get()))
+ if (PossibleAluOp->isImm())
+ if (const MCConstantExpr *ConstExpr =
+ dyn_cast<MCConstantExpr>(PossibleAluOp->getImm()))
+ Modifies = LPAC::modifiesOp(ConstExpr->getValue());
+ return Modifies && Operands[PossibleBaseIdx]->isReg() &&
+ Operands[PossibleDestIdx]->isReg() &&
+ Operands[PossibleBaseIdx]->getReg() ==
+ Operands[PossibleDestIdx]->getReg();
+}
+
+static bool IsRegister(const MCParsedAsmOperand &op) {
+ return static_cast<const LanaiOperand &>(op).isReg();
+}
+
+static bool MaybePredicatedInst(const OperandVector &Operands) {
+ if (Operands.size() < 4 || !IsRegister(*Operands[1]) ||
+ !IsRegister(*Operands[2]))
+ return false;
+ return StringSwitch<bool>(
+ static_cast<const LanaiOperand &>(*Operands[0]).getToken())
+ .StartsWith("addc", true)
+ .StartsWith("add", true)
+ .StartsWith("and", true)
+ .StartsWith("sh", true)
+ .StartsWith("subb", true)
+ .StartsWith("sub", true)
+ .StartsWith("or", true)
+ .StartsWith("xor", true)
+ .Default(false);
+}
+
+bool LanaiAsmParser::ParseInstruction(ParseInstructionInfo & /*Info*/,
+ StringRef Name, SMLoc NameLoc,
+ OperandVector &Operands) {
+ // First operand is token for instruction
+ StringRef Mnemonic = splitMnemonic(Name, NameLoc, &Operands);
+
+ // If there are no more operands, then finish
+ if (Lexer.is(AsmToken::EndOfStatement))
+ return false;
+
+ // Parse first operand
+ if (parseOperand(&Operands, Mnemonic) != MatchOperand_Success)
+ return true;
+
+ // If it is a st instruction with one 1 operand then it is a "store true".
+ // Transform <"st"> to <"s">, <LPCC:ICC_T>
+ if (Lexer.is(AsmToken::EndOfStatement) && Name == "st" &&
+ Operands.size() == 2) {
+ Operands.erase(Operands.begin(), Operands.begin() + 1);
+ Operands.insert(Operands.begin(), LanaiOperand::CreateToken("s", NameLoc));
+ Operands.insert(Operands.begin() + 1,
+ LanaiOperand::createImm(
+ MCConstantExpr::create(LPCC::ICC_T, getContext()),
+ NameLoc, NameLoc));
+ }
+
+ // If the instruction is a bt instruction with 1 operand (in assembly) then it
+ // is an unconditional branch instruction and the first two elements of
+ // operands need to be merged.
+ if (Lexer.is(AsmToken::EndOfStatement) && Name.startswith("bt") &&
+ Operands.size() == 3) {
+ Operands.erase(Operands.begin(), Operands.begin() + 2);
+ Operands.insert(Operands.begin(), LanaiOperand::CreateToken("bt", NameLoc));
+ }
+
+ // Parse until end of statement, consuming commas between operands
+ while (Lexer.isNot(AsmToken::EndOfStatement) && Lexer.is(AsmToken::Comma)) {
+ // Consume comma token
+ Lex();
+
+ // Parse next operand
+ if (parseOperand(&Operands, Mnemonic) != MatchOperand_Success)
+ return true;
+ }
+
+ if (IsMemoryAssignmentError(Operands)) {
+ Error(Parser.getTok().getLoc(),
+ "the destination register can't equal the base register in an "
+ "instruction that modifies the base register.");
+ return true;
+ }
+
+ // Insert always true operand for instruction that may be predicated but
+ // are not. Currently the autogenerated parser always expects a predicate.
+ if (MaybePredicatedInst(Operands)) {
+ Operands.insert(Operands.begin() + 1,
+ LanaiOperand::createImm(
+ MCConstantExpr::create(LPCC::ICC_T, getContext()),
+ NameLoc, NameLoc));
+ }
+
+ return false;
+}
+
+#define GET_REGISTER_MATCHER
+#define GET_MATCHER_IMPLEMENTATION
+#include "LanaiGenAsmMatcher.inc"
+} // namespace
+
+extern "C" void LLVMInitializeLanaiAsmParser() {
+ RegisterMCAsmParser<LanaiAsmParser> x(TheLanaiTarget);
+}
+
+} // namespace llvm
diff --git a/lib/Target/Lanai/CMakeLists.txt b/lib/Target/Lanai/CMakeLists.txt
new file mode 100644
index 000000000000..867f6165c253
--- /dev/null
+++ b/lib/Target/Lanai/CMakeLists.txt
@@ -0,0 +1,35 @@
+set(LLVM_TARGET_DEFINITIONS Lanai.td)
+
+tablegen(LLVM LanaiGenAsmMatcher.inc -gen-asm-matcher)
+tablegen(LLVM LanaiGenAsmWriter.inc -gen-asm-writer)
+tablegen(LLVM LanaiGenCallingConv.inc -gen-callingconv)
+tablegen(LLVM LanaiGenDAGISel.inc -gen-dag-isel)
+tablegen(LLVM LanaiGenDisassemblerTables.inc -gen-disassembler)
+tablegen(LLVM LanaiGenInstrInfo.inc -gen-instr-info)
+tablegen(LLVM LanaiGenMCCodeEmitter.inc -gen-emitter)
+tablegen(LLVM LanaiGenRegisterInfo.inc -gen-register-info)
+tablegen(LLVM LanaiGenSubtargetInfo.inc -gen-subtarget)
+add_public_tablegen_target(LanaiCommonTableGen)
+
+add_llvm_target(LanaiCodeGen
+ LanaiAsmPrinter.cpp
+ LanaiDelaySlotFiller.cpp
+ LanaiFrameLowering.cpp
+ LanaiInstrInfo.cpp
+ LanaiISelDAGToDAG.cpp
+ LanaiISelLowering.cpp
+ LanaiMachineFunctionInfo.cpp
+ LanaiMCInstLower.cpp
+ LanaiMemAluCombiner.cpp
+ LanaiRegisterInfo.cpp
+ LanaiSelectionDAGInfo.cpp
+ LanaiSubtarget.cpp
+ LanaiTargetMachine.cpp
+ LanaiTargetObjectFile.cpp
+)
+
+add_subdirectory(AsmParser)
+add_subdirectory(TargetInfo)
+add_subdirectory(MCTargetDesc)
+add_subdirectory(InstPrinter)
+add_subdirectory(Disassembler)
diff --git a/lib/Target/Lanai/Disassembler/CMakeLists.txt b/lib/Target/Lanai/Disassembler/CMakeLists.txt
new file mode 100644
index 000000000000..785c98d8dff1
--- /dev/null
+++ b/lib/Target/Lanai/Disassembler/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMLanaiDisassembler
+ LanaiDisassembler.cpp
+ )
diff --git a/lib/Target/Lanai/Disassembler/LLVMBuild.txt b/lib/Target/Lanai/Disassembler/LLVMBuild.txt
new file mode 100644
index 000000000000..fe789f90f2e3
--- /dev/null
+++ b/lib/Target/Lanai/Disassembler/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===-- ./lib/Target/Lanai/Disassembler/LLVMBuild.txt -----------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = LanaiDisassembler
+parent = Lanai
+required_libraries = LanaiMCTargetDesc LanaiInfo MC MCDisassembler Support
+add_to_library_groups = Lanai
diff --git a/lib/Target/Lanai/Disassembler/LanaiDisassembler.cpp b/lib/Target/Lanai/Disassembler/LanaiDisassembler.cpp
new file mode 100644
index 000000000000..744441bc90dd
--- /dev/null
+++ b/lib/Target/Lanai/Disassembler/LanaiDisassembler.cpp
@@ -0,0 +1,240 @@
+//===- LanaiDisassembler.cpp - Disassembler for Lanai -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the Lanai Disassembler.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiDisassembler.h"
+
+#include "Lanai.h"
+#include "LanaiSubtarget.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+typedef MCDisassembler::DecodeStatus DecodeStatus;
+
+namespace llvm {
+extern Target TheLanaiTarget;
+}
+
+static MCDisassembler *createLanaiDisassembler(const Target & /*T*/,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new LanaiDisassembler(STI, Ctx);
+}
+
+extern "C" void LLVMInitializeLanaiDisassembler() {
+ // Register the disassembler
+ TargetRegistry::RegisterMCDisassembler(TheLanaiTarget,
+ createLanaiDisassembler);
+}
+
+LanaiDisassembler::LanaiDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : MCDisassembler(STI, Ctx) {}
+
+// Forward declare because the autogenerated code will reference this.
+// Definition is further down.
+DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus decodeRiMemoryValue(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus decodeRrMemoryValue(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus decodeSplsValue(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus decodeBranch(MCInst &Inst, unsigned Insn, uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus decodePredicateOperand(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus decodeShiftImm(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+#include "LanaiGenDisassemblerTables.inc"
+
+static DecodeStatus readInstruction32(ArrayRef<uint8_t> Bytes, uint64_t &Size,
+ uint32_t &Insn) {
+ // We want to read exactly 4 bytes of data.
+ if (Bytes.size() < 4) {
+ Size = 0;
+ return MCDisassembler::Fail;
+ }
+
+ // Encoded as big-endian 32-bit word in the stream.
+ Insn =
+ (Bytes[0] << 24) | (Bytes[1] << 16) | (Bytes[2] << 8) | (Bytes[3] << 0);
+
+ return MCDisassembler::Success;
+}
+
+static void PostOperandDecodeAdjust(MCInst &Instr, uint32_t Insn) {
+ unsigned AluOp = LPAC::ADD;
+ // Fix up for pre and post operations.
+ int PqShift = -1;
+ if (isRMOpcode(Instr.getOpcode()))
+ PqShift = 16;
+ else if (isSPLSOpcode(Instr.getOpcode()))
+ PqShift = 10;
+ else if (isRRMOpcode(Instr.getOpcode())) {
+ PqShift = 16;
+ // Determine RRM ALU op.
+ AluOp = (Insn >> 8) & 0x7;
+ if (AluOp == 7)
+ // Handle JJJJJ
+ // 0b10000 or 0b11000
+ AluOp |= 0x20 | (((Insn >> 3) & 0xf) << 1);
+ }
+
+ if (PqShift != -1) {
+ unsigned PQ = (Insn >> PqShift) & 0x3;
+ switch (PQ) {
+ case 0x0:
+ if (Instr.getOperand(2).isReg()) {
+ Instr.getOperand(2).setReg(Lanai::R0);
+ }
+ if (Instr.getOperand(2).isImm())
+ Instr.getOperand(2).setImm(0);
+ break;
+ case 0x1:
+ AluOp = LPAC::makePostOp(AluOp);
+ break;
+ case 0x2:
+ break;
+ case 0x3:
+ AluOp = LPAC::makePreOp(AluOp);
+ break;
+ }
+ Instr.addOperand(MCOperand::createImm(AluOp));
+ }
+}
+
+DecodeStatus LanaiDisassembler::getInstruction(
+ MCInst &Instr, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream & /*VStream*/, raw_ostream & /*CStream*/) const {
+ uint32_t Insn;
+
+ DecodeStatus Result = readInstruction32(Bytes, Size, Insn);
+
+ if (Result == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+
+ // Call auto-generated decoder function
+ Result =
+ decodeInstruction(DecoderTableLanai32, Instr, Insn, Address, this, STI);
+
+ if (Result != MCDisassembler::Fail) {
+ PostOperandDecodeAdjust(Instr, Insn);
+ Size = 4;
+ return Result;
+ }
+
+ return MCDisassembler::Fail;
+}
+
+static const unsigned GPRDecoderTable[] = {
+ Lanai::R0, Lanai::R1, Lanai::PC, Lanai::R3, Lanai::SP, Lanai::FP,
+ Lanai::R6, Lanai::R7, Lanai::RV, Lanai::R9, Lanai::RR1, Lanai::RR2,
+ Lanai::R12, Lanai::R13, Lanai::R14, Lanai::RCA, Lanai::R16, Lanai::R17,
+ Lanai::R18, Lanai::R19, Lanai::R20, Lanai::R21, Lanai::R22, Lanai::R23,
+ Lanai::R24, Lanai::R25, Lanai::R26, Lanai::R27, Lanai::R28, Lanai::R29,
+ Lanai::R30, Lanai::R31};
+
+DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t /*Address*/,
+ const void * /*Decoder*/) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = GPRDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeRiMemoryValue(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ // RI memory values encoded using 23 bits:
+ // 5 bit register, 16 bit constant
+ unsigned Register = (Insn >> 18) & 0x1f;
+ Inst.addOperand(MCOperand::createReg(GPRDecoderTable[Register]));
+ unsigned Offset = (Insn & 0xffff);
+ Inst.addOperand(MCOperand::createImm(SignExtend32<16>(Offset)));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeRrMemoryValue(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ // RR memory values encoded using 20 bits:
+ // 5 bit register, 5 bit register, 2 bit PQ, 3 bit ALU operator, 5 bit JJJJJ
+ unsigned Register = (Insn >> 15) & 0x1f;
+ Inst.addOperand(MCOperand::createReg(GPRDecoderTable[Register]));
+ Register = (Insn >> 10) & 0x1f;
+ Inst.addOperand(MCOperand::createReg(GPRDecoderTable[Register]));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeSplsValue(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ // RI memory values encoded using 17 bits:
+ // 5 bit register, 10 bit constant
+ unsigned Register = (Insn >> 12) & 0x1f;
+ Inst.addOperand(MCOperand::createReg(GPRDecoderTable[Register]));
+ unsigned Offset = (Insn & 0x3ff);
+ Inst.addOperand(MCOperand::createImm(SignExtend32<10>(Offset)));
+
+ return MCDisassembler::Success;
+}
+
+static bool tryAddingSymbolicOperand(int64_t Value, bool IsBranch,
+ uint64_t Address, uint64_t Offset,
+ uint64_t Width, MCInst &MI,
+ const void *Decoder) {
+ const MCDisassembler *Dis = static_cast<const MCDisassembler *>(Decoder);
+ return Dis->tryAddingSymbolicOperand(MI, Value, Address, IsBranch, Offset,
+ Width);
+}
+
+static DecodeStatus decodeBranch(MCInst &MI, unsigned Insn, uint64_t Address,
+ const void *Decoder) {
+ if (!tryAddingSymbolicOperand(Insn + Address, false, Address, 2, 23, MI,
+ Decoder))
+ MI.addOperand(MCOperand::createImm(Insn));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeShiftImm(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned Offset = (Insn & 0xffff);
+ Inst.addOperand(MCOperand::createImm(SignExtend32<16>(Offset)));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodePredicateOperand(MCInst &Inst, unsigned Val,
+ uint64_t Address,
+ const void *Decoder) {
+ if (Val >= LPCC::UNKNOWN)
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::createImm(Val));
+ return MCDisassembler::Success;
+}
diff --git a/lib/Target/Lanai/Disassembler/LanaiDisassembler.h b/lib/Target/Lanai/Disassembler/LanaiDisassembler.h
new file mode 100644
index 000000000000..a317cd88ad63
--- /dev/null
+++ b/lib/Target/Lanai/Disassembler/LanaiDisassembler.h
@@ -0,0 +1,41 @@
+//===- LanaiDisassembler.cpp - Disassembler for Lanai -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the Lanai Disassembler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_DISASSEMBLER_LANAIDISASSEMBLER_H
+#define LLVM_LIB_TARGET_LANAI_DISASSEMBLER_LANAIDISASSEMBLER_H
+
+#define DEBUG_TYPE "lanai-disassembler"
+
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+
+namespace llvm {
+
+class MCInst;
+class raw_ostream;
+
+class LanaiDisassembler : public MCDisassembler {
+public:
+ LanaiDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx);
+
+ ~LanaiDisassembler() override {}
+
+ // getInstruction - See MCDisassembler.
+ MCDisassembler::DecodeStatus
+ getInstruction(MCInst &Instr, uint64_t &Size, ArrayRef<uint8_t> Bytes,
+ uint64_t Address, raw_ostream &VStream,
+ raw_ostream &CStream) const override;
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_DISASSEMBLER_LANAIDISASSEMBLER_H
diff --git a/lib/Target/Lanai/InstPrinter/CMakeLists.txt b/lib/Target/Lanai/InstPrinter/CMakeLists.txt
new file mode 100644
index 000000000000..6badb1c98a6d
--- /dev/null
+++ b/lib/Target/Lanai/InstPrinter/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMLanaiInstPrinter
+ LanaiInstPrinter.cpp
+ )
diff --git a/lib/Target/Lanai/InstPrinter/LLVMBuild.txt b/lib/Target/Lanai/InstPrinter/LLVMBuild.txt
new file mode 100644
index 000000000000..6366d7eded8f
--- /dev/null
+++ b/lib/Target/Lanai/InstPrinter/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===-- ./lib/Target/Lanai/InstPrinter/LLVMBuild.txt ------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = LanaiInstPrinter
+parent = Lanai
+required_libraries = LanaiInfo MC Support
+add_to_library_groups = Lanai
diff --git a/lib/Target/Lanai/InstPrinter/LanaiInstPrinter.cpp b/lib/Target/Lanai/InstPrinter/LanaiInstPrinter.cpp
new file mode 100644
index 000000000000..2fa411fcfd87
--- /dev/null
+++ b/lib/Target/Lanai/InstPrinter/LanaiInstPrinter.cpp
@@ -0,0 +1,305 @@
+//===-- LanaiInstPrinter.cpp - Convert Lanai MCInst to asm syntax ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an Lanai MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiInstPrinter.h"
+#include "Lanai.h"
+#include "MCTargetDesc/LanaiMCExpr.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+// Include the auto-generated portion of the assembly writer.
+#define PRINT_ALIAS_INSTR
+#include "LanaiGenAsmWriter.inc"
+
+void LanaiInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
+ OS << StringRef(getRegisterName(RegNo)).lower();
+}
+
+bool LanaiInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
+ StringRef Alias, unsigned OpNo0,
+ unsigned OpNo1) {
+ OS << "\t" << Alias << " ";
+ printOperand(MI, OpNo0, OS);
+ OS << ", ";
+ printOperand(MI, OpNo1, OS);
+ return true;
+}
+
+static bool usesGivenOffset(const MCInst *MI, int AddOffset) {
+ unsigned AluCode = MI->getOperand(3).getImm();
+ return LPAC::encodeLanaiAluCode(AluCode) == LPAC::ADD &&
+ (MI->getOperand(2).getImm() == AddOffset ||
+ MI->getOperand(2).getImm() == -AddOffset);
+}
+
+static bool isPreIncrementForm(const MCInst *MI, int AddOffset) {
+ unsigned AluCode = MI->getOperand(3).getImm();
+ return LPAC::isPreOp(AluCode) && usesGivenOffset(MI, AddOffset);
+}
+
+static bool isPostIncrementForm(const MCInst *MI, int AddOffset) {
+ unsigned AluCode = MI->getOperand(3).getImm();
+ return LPAC::isPostOp(AluCode) && usesGivenOffset(MI, AddOffset);
+}
+
+static StringRef decIncOperator(const MCInst *MI) {
+ if (MI->getOperand(2).getImm() < 0)
+ return "--";
+ return "++";
+}
+
+bool LanaiInstPrinter::printMemoryLoadIncrement(const MCInst *MI,
+ raw_ostream &OS,
+ StringRef Opcode,
+ int AddOffset) {
+ if (isPreIncrementForm(MI, AddOffset)) {
+ OS << "\t" << Opcode << "\t[" << decIncOperator(MI) << "%"
+ << getRegisterName(MI->getOperand(1).getReg()) << "], %"
+ << getRegisterName(MI->getOperand(0).getReg());
+ return true;
+ }
+ if (isPostIncrementForm(MI, AddOffset)) {
+ OS << "\t" << Opcode << "\t[%"
+ << getRegisterName(MI->getOperand(1).getReg()) << decIncOperator(MI)
+ << "], %" << getRegisterName(MI->getOperand(0).getReg());
+ return true;
+ }
+ return false;
+}
+
+bool LanaiInstPrinter::printMemoryStoreIncrement(const MCInst *MI,
+ raw_ostream &OS,
+ StringRef Opcode,
+ int AddOffset) {
+ if (isPreIncrementForm(MI, AddOffset)) {
+ OS << "\t" << Opcode << "\t%" << getRegisterName(MI->getOperand(0).getReg())
+ << ", [" << decIncOperator(MI) << "%"
+ << getRegisterName(MI->getOperand(1).getReg()) << "]";
+ return true;
+ }
+ if (isPostIncrementForm(MI, AddOffset)) {
+ OS << "\t" << Opcode << "\t%" << getRegisterName(MI->getOperand(0).getReg())
+ << ", [%" << getRegisterName(MI->getOperand(1).getReg())
+ << decIncOperator(MI) << "]";
+ return true;
+ }
+ return false;
+}
+
+bool LanaiInstPrinter::printAlias(const MCInst *MI, raw_ostream &OS) {
+ switch (MI->getOpcode()) {
+ case Lanai::LDW_RI:
+ // ld 4[*%rN], %rX => ld [++imm], %rX
+ // ld -4[*%rN], %rX => ld [--imm], %rX
+ // ld 4[%rN*], %rX => ld [imm++], %rX
+ // ld -4[%rN*], %rX => ld [imm--], %rX
+ return printMemoryLoadIncrement(MI, OS, "ld", 4);
+ case Lanai::LDHs_RI:
+ return printMemoryLoadIncrement(MI, OS, "ld.h", 2);
+ case Lanai::LDHz_RI:
+ return printMemoryLoadIncrement(MI, OS, "uld.h", 2);
+ case Lanai::LDBs_RI:
+ return printMemoryLoadIncrement(MI, OS, "ld.b", 1);
+ case Lanai::LDBz_RI:
+ return printMemoryLoadIncrement(MI, OS, "uld.b", 1);
+ case Lanai::SW_RI:
+ // st %rX, 4[*%rN] => st %rX, [++imm]
+ // st %rX, -4[*%rN] => st %rX, [--imm]
+ // st %rX, 4[%rN*] => st %rX, [imm++]
+ // st %rX, -4[%rN*] => st %rX, [imm--]
+ return printMemoryStoreIncrement(MI, OS, "st", 4);
+ case Lanai::STH_RI:
+ return printMemoryStoreIncrement(MI, OS, "st.h", 2);
+ case Lanai::STB_RI:
+ return printMemoryStoreIncrement(MI, OS, "st.b", 1);
+ default:
+ return false;
+ }
+}
+
+void LanaiInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
+ StringRef Annotation,
+ const MCSubtargetInfo & /*STI*/) {
+ if (!printAlias(MI, OS) && !printAliasInstr(MI, OS))
+ printInstruction(MI, OS);
+ printAnnotation(OS, Annotation);
+}
+
+void LanaiInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &OS, const char *Modifier) {
+ assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg())
+ OS << "%" << getRegisterName(Op.getReg());
+ else if (Op.isImm())
+ OS << formatHex(Op.getImm());
+ else {
+ assert(Op.isExpr() && "Expected an expression");
+ Op.getExpr()->print(OS, &MAI);
+ }
+}
+
+void LanaiInstPrinter::printMemImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &OS) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isImm()) {
+ OS << '[' << formatHex(Op.getImm()) << ']';
+ } else {
+ // Symbolic operand will be lowered to immediate value by linker
+ assert(Op.isExpr() && "Expected an expression");
+ OS << '[';
+ Op.getExpr()->print(OS, &MAI);
+ OS << ']';
+ }
+}
+
+void LanaiInstPrinter::printHi16ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &OS) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isImm()) {
+ OS << formatHex(Op.getImm() << 16);
+ } else {
+ // Symbolic operand will be lowered to immediate value by linker
+ assert(Op.isExpr() && "Expected an expression");
+ Op.getExpr()->print(OS, &MAI);
+ }
+}
+
+void LanaiInstPrinter::printHi16AndImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &OS) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isImm()) {
+ OS << formatHex((Op.getImm() << 16) | 0xffff);
+ } else {
+ // Symbolic operand will be lowered to immediate value by linker
+ assert(Op.isExpr() && "Expected an expression");
+ Op.getExpr()->print(OS, &MAI);
+ }
+}
+
+void LanaiInstPrinter::printLo16AndImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &OS) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isImm()) {
+ OS << formatHex(0xffff0000 | Op.getImm());
+ } else {
+ // Symbolic operand will be lowered to immediate value by linker
+ assert(Op.isExpr() && "Expected an expression");
+ Op.getExpr()->print(OS, &MAI);
+ }
+}
+
+static void printMemoryBaseRegister(raw_ostream &OS, const unsigned AluCode,
+ const MCOperand &RegOp) {
+ assert(RegOp.isReg() && "Register operand expected");
+ OS << "[";
+ if (LPAC::isPreOp(AluCode))
+ OS << "*";
+ OS << "%" << LanaiInstPrinter::getRegisterName(RegOp.getReg());
+ if (LPAC::isPostOp(AluCode))
+ OS << "*";
+ OS << "]";
+}
+
+template <unsigned SizeInBits>
+static void printMemoryImmediateOffset(const MCAsmInfo &MAI,
+ const MCOperand &OffsetOp,
+ raw_ostream &OS) {
+ assert((OffsetOp.isImm() || OffsetOp.isExpr()) && "Immediate expected");
+ if (OffsetOp.isImm()) {
+ assert(isInt<SizeInBits>(OffsetOp.getImm()) && "Constant value truncated");
+ OS << OffsetOp.getImm();
+ } else
+ OffsetOp.getExpr()->print(OS, &MAI);
+}
+
+void LanaiInstPrinter::printMemRiOperand(const MCInst *MI, int OpNo,
+ raw_ostream &OS,
+ const char * /*Modifier*/) {
+ const MCOperand &RegOp = MI->getOperand(OpNo);
+ const MCOperand &OffsetOp = MI->getOperand(OpNo + 1);
+ const MCOperand &AluOp = MI->getOperand(OpNo + 2);
+ const unsigned AluCode = AluOp.getImm();
+
+ // Offset
+ printMemoryImmediateOffset<16>(MAI, OffsetOp, OS);
+
+ // Register
+ printMemoryBaseRegister(OS, AluCode, RegOp);
+}
+
+void LanaiInstPrinter::printMemRrOperand(const MCInst *MI, int OpNo,
+ raw_ostream &OS,
+ const char * /*Modifier*/) {
+ const MCOperand &RegOp = MI->getOperand(OpNo);
+ const MCOperand &OffsetOp = MI->getOperand(OpNo + 1);
+ const MCOperand &AluOp = MI->getOperand(OpNo + 2);
+ const unsigned AluCode = AluOp.getImm();
+ assert(OffsetOp.isReg() && RegOp.isReg() && "Registers expected.");
+
+ // [ Base OP Offset ]
+ OS << "[";
+ if (LPAC::isPreOp(AluCode))
+ OS << "*";
+ OS << "%" << getRegisterName(RegOp.getReg());
+ if (LPAC::isPostOp(AluCode))
+ OS << "*";
+ OS << " " << LPAC::lanaiAluCodeToString(AluCode) << " ";
+ OS << "%" << getRegisterName(OffsetOp.getReg());
+ OS << "]";
+}
+
+void LanaiInstPrinter::printMemSplsOperand(const MCInst *MI, int OpNo,
+ raw_ostream &OS,
+ const char * /*Modifier*/) {
+ const MCOperand &RegOp = MI->getOperand(OpNo);
+ const MCOperand &OffsetOp = MI->getOperand(OpNo + 1);
+ const MCOperand &AluOp = MI->getOperand(OpNo + 2);
+ const unsigned AluCode = AluOp.getImm();
+
+ // Offset
+ printMemoryImmediateOffset<10>(MAI, OffsetOp, OS);
+
+ // Register
+ printMemoryBaseRegister(OS, AluCode, RegOp);
+}
+
+void LanaiInstPrinter::printCCOperand(const MCInst *MI, int OpNo,
+ raw_ostream &OS) {
+ LPCC::CondCode CC =
+ static_cast<LPCC::CondCode>(MI->getOperand(OpNo).getImm());
+ // Handle the undefined value here for printing so we don't abort().
+ if (CC >= LPCC::UNKNOWN)
+ OS << "<und>";
+ else
+ OS << lanaiCondCodeToString(CC);
+}
+
+void LanaiInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &OS) {
+ LPCC::CondCode CC =
+ static_cast<LPCC::CondCode>(MI->getOperand(OpNo).getImm());
+ // Handle the undefined value here for printing so we don't abort().
+ if (CC >= LPCC::UNKNOWN)
+ OS << "<und>";
+ else if (CC != LPCC::ICC_T)
+ OS << "." << lanaiCondCodeToString(CC);
+}
diff --git a/lib/Target/Lanai/InstPrinter/LanaiInstPrinter.h b/lib/Target/Lanai/InstPrinter/LanaiInstPrinter.h
new file mode 100644
index 000000000000..1c9d186ad819
--- /dev/null
+++ b/lib/Target/Lanai/InstPrinter/LanaiInstPrinter.h
@@ -0,0 +1,65 @@
+//= LanaiInstPrinter.h - Convert Lanai MCInst to asm syntax -------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints a Lanai MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_INSTPRINTER_LANAIINSTPRINTER_H
+#define LLVM_LIB_TARGET_LANAI_INSTPRINTER_LANAIINSTPRINTER_H
+
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+class MCOperand;
+
+class LanaiInstPrinter : public MCInstPrinter {
+public:
+ LanaiInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot,
+ const MCSubtargetInfo &STI) override;
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
+ const char *Modifier = 0);
+ void printPredicateOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printMemRiOperand(const MCInst *MI, int OpNo, raw_ostream &O,
+ const char *Modifier = 0);
+ void printMemRrOperand(const MCInst *MI, int OpNo, raw_ostream &O,
+ const char *Modifier = 0);
+ void printMemSplsOperand(const MCInst *MI, int OpNo, raw_ostream &O,
+ const char *Modifier = 0);
+ void printCCOperand(const MCInst *MI, int OpNo, raw_ostream &O);
+ void printAluOperand(const MCInst *MI, int OpNo, raw_ostream &O);
+ void printHi16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printHi16AndImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printLo16AndImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printMemImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+
+ // Autogenerated by tblgen.
+ void printInstruction(const MCInst *MI, raw_ostream &O);
+ bool printAliasInstr(const MCInst *MI, raw_ostream &OS);
+ void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo);
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+
+private:
+ bool printAlias(const MCInst *MI, raw_ostream &Ostream);
+ bool printInst(const MCInst *MI, raw_ostream &Ostream, StringRef Alias,
+ unsigned OpNo0, unsigned OpnNo1);
+ bool printMemoryLoadIncrement(const MCInst *MI, raw_ostream &Ostream,
+ StringRef Opcode, int AddOffset);
+ bool printMemoryStoreIncrement(const MCInst *MI, raw_ostream &Ostream,
+ StringRef Opcode, int AddOffset);
+};
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_INSTPRINTER_LANAIINSTPRINTER_H
diff --git a/lib/Target/Lanai/LLVMBuild.txt b/lib/Target/Lanai/LLVMBuild.txt
new file mode 100644
index 000000000000..91accbbfb762
--- /dev/null
+++ b/lib/Target/Lanai/LLVMBuild.txt
@@ -0,0 +1,45 @@
+;===- ./lib/Target/Lanai/LLVMBuild.txt -------------------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[common]
+subdirectories = AsmParser Disassembler InstPrinter MCTargetDesc TargetInfo
+
+[component_0]
+type = TargetGroup
+name = Lanai
+parent = Target
+has_asmprinter = 1
+
+[component_1]
+type = Library
+name = LanaiCodeGen
+parent = Lanai
+required_libraries =
+ Analysis
+ AsmPrinter
+ CodeGen
+ Core
+ LanaiAsmParser
+ LanaiMCTargetDesc
+ LanaiInfo
+ LanaiInstPrinter
+ MC
+ SelectionDAG
+ Support
+ Target
+ TransformUtils
+add_to_library_groups = Lanai
diff --git a/lib/Target/Lanai/Lanai.h b/lib/Target/Lanai/Lanai.h
new file mode 100644
index 000000000000..47bd498c579c
--- /dev/null
+++ b/lib/Target/Lanai/Lanai.h
@@ -0,0 +1,51 @@
+//===-- Lanai.h - Top-level interface for Lanai representation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in the LLVM
+// Lanai back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAI_H
+#define LLVM_LIB_TARGET_LANAI_LANAI_H
+
+#include "LanaiAluCode.h"
+#include "LanaiCondCode.h"
+#include "MCTargetDesc/LanaiBaseInfo.h"
+#include "MCTargetDesc/LanaiMCTargetDesc.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class FunctionPass;
+class LanaiTargetMachine;
+class MachineFunctionPass;
+class TargetMachine;
+class formatted_raw_ostream;
+
+// createLanaiISelDag - This pass converts a legalized DAG into a
+// Lanai-specific DAG, ready for instruction scheduling.
+FunctionPass *createLanaiISelDag(LanaiTargetMachine &TM);
+
+// createLanaiDelaySlotFillerPass - This pass fills delay slots
+// with useful instructions or nop's
+FunctionPass *createLanaiDelaySlotFillerPass(const LanaiTargetMachine &TM);
+
+// createLanaiMemAluCombinerPass - This pass combines loads/stores and
+// arithmetic operations.
+FunctionPass *createLanaiMemAluCombinerPass();
+
+// createLanaiSetflagAluCombinerPass - This pass combines SET_FLAG and ALU
+// operations.
+FunctionPass *createLanaiSetflagAluCombinerPass();
+
+extern Target TheLanaiTarget;
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAI_H
diff --git a/lib/Target/Lanai/Lanai.td b/lib/Target/Lanai/Lanai.td
new file mode 100644
index 000000000000..73d080457034
--- /dev/null
+++ b/lib/Target/Lanai/Lanai.td
@@ -0,0 +1,47 @@
+//===- Lanai.td - Describe the Lanai Target Machine --------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces which we are implementing
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// Register File, Calling Conv, Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+include "LanaiSchedule.td"
+include "LanaiRegisterInfo.td"
+include "LanaiCallingConv.td"
+include "LanaiInstrInfo.td"
+
+def LanaiInstrInfo : InstrInfo;
+
+//===----------------------------------------------------------------------===//
+// Lanai processors supported.
+//===----------------------------------------------------------------------===//
+
+def : ProcessorModel<"generic", LanaiSchedModel, []>;
+def : ProcessorModel<"v11", LanaiSchedModel, []>;
+
+def LanaiInstPrinter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ bit isMCAsmWriter = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Declare the target which we are implementing
+//===----------------------------------------------------------------------===//
+
+def Lanai : Target {
+ // Pull in Instruction Info:
+ let InstructionSet = LanaiInstrInfo;
+ let AssemblyWriters = [LanaiInstPrinter];
+}
diff --git a/lib/Target/Lanai/LanaiAluCode.h b/lib/Target/Lanai/LanaiAluCode.h
new file mode 100644
index 000000000000..b6ceedef6651
--- /dev/null
+++ b/lib/Target/Lanai/LanaiAluCode.h
@@ -0,0 +1,148 @@
+//===-- LanaiAluCode.h - ALU operator encoding ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The encoding for ALU operators used in RM and RRM operands
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAIALUCODE_H
+#define LLVM_LIB_TARGET_LANAI_LANAIALUCODE_H
+
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+namespace LPAC {
+enum AluCode {
+ ADD = 0x00,
+ ADDC = 0x01,
+ SUB = 0x02,
+ SUBB = 0x03,
+ AND = 0x04,
+ OR = 0x05,
+ XOR = 0x06,
+ SPECIAL = 0x07,
+
+ // Shift instructions are treated as SPECIAL when encoding the machine
+ // instruction, but kept distinct until lowering. The constant values are
+ // chosen to ease lowering.
+ SHL = 0x17,
+ SRL = 0x27,
+ SRA = 0x37,
+
+ // Indicates an unknown/unsupported operator
+ UNKNOWN = 0xFF,
+};
+
+// Bits indicating post- and pre-operators should be tested and set using Is*
+// and Make* utility functions
+constexpr int Lanai_PRE_OP = 0x40;
+constexpr int Lanai_POST_OP = 0x80;
+
+inline static unsigned encodeLanaiAluCode(unsigned AluOp) {
+ unsigned const OP_ENCODING_MASK = 0x07;
+ return AluOp & OP_ENCODING_MASK;
+}
+
+inline static unsigned getAluOp(unsigned AluOp) {
+ unsigned const ALU_MASK = 0x3F;
+ return AluOp & ALU_MASK;
+}
+
+inline static bool isPreOp(unsigned AluOp) { return AluOp & Lanai_PRE_OP; }
+
+inline static bool isPostOp(unsigned AluOp) { return AluOp & Lanai_POST_OP; }
+
+inline static unsigned makePreOp(unsigned AluOp) {
+ assert(!isPostOp(AluOp) && "Operator can't be a post- and pre-op");
+ return AluOp | Lanai_PRE_OP;
+}
+
+inline static unsigned makePostOp(unsigned AluOp) {
+ assert(!isPreOp(AluOp) && "Operator can't be a post- and pre-op");
+ return AluOp | Lanai_POST_OP;
+}
+
+inline static bool modifiesOp(unsigned AluOp) {
+ return isPreOp(AluOp) | isPostOp(AluOp);
+}
+
+inline static const char *lanaiAluCodeToString(unsigned AluOp) {
+ switch (getAluOp(AluOp)) {
+ case ADD:
+ return "add";
+ case ADDC:
+ return "addc";
+ case SUB:
+ return "sub";
+ case SUBB:
+ return "subb";
+ case AND:
+ return "and";
+ case OR:
+ return "or";
+ case XOR:
+ return "xor";
+ case SHL:
+ return "sh";
+ case SRL:
+ return "sh";
+ case SRA:
+ return "sha";
+ default:
+ llvm_unreachable("Invalid ALU code.");
+ }
+}
+
+inline static AluCode stringToLanaiAluCode(StringRef S) {
+ return StringSwitch<AluCode>(S)
+ .Case("add", ADD)
+ .Case("addc", ADDC)
+ .Case("sub", SUB)
+ .Case("subb", SUBB)
+ .Case("and", AND)
+ .Case("or", OR)
+ .Case("xor", XOR)
+ .Case("sh", SHL)
+ .Case("srl", SRL)
+ .Case("sha", SRA)
+ .Default(UNKNOWN);
+}
+
+inline static AluCode isdToLanaiAluCode(ISD::NodeType Node_type) {
+ switch (Node_type) {
+ case ISD::ADD:
+ return AluCode::ADD;
+ case ISD::ADDE:
+ return AluCode::ADDC;
+ case ISD::SUB:
+ return AluCode::SUB;
+ case ISD::SUBE:
+ return AluCode::SUBB;
+ case ISD::AND:
+ return AluCode::AND;
+ case ISD::OR:
+ return AluCode::OR;
+ case ISD::XOR:
+ return AluCode::XOR;
+ case ISD::SHL:
+ return AluCode::SHL;
+ case ISD::SRL:
+ return AluCode::SRL;
+ case ISD::SRA:
+ return AluCode::SRA;
+ default:
+ return AluCode::UNKNOWN;
+ }
+}
+} // namespace LPAC
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAIALUCODE_H
diff --git a/lib/Target/Lanai/LanaiAsmPrinter.cpp b/lib/Target/Lanai/LanaiAsmPrinter.cpp
new file mode 100644
index 000000000000..9d39cef9f8ed
--- /dev/null
+++ b/lib/Target/Lanai/LanaiAsmPrinter.cpp
@@ -0,0 +1,243 @@
+//===-- LanaiAsmPrinter.cpp - Lanai LLVM assembly writer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to the Lanai assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#include "InstPrinter/LanaiInstPrinter.h"
+#include "Lanai.h"
+#include "LanaiInstrInfo.h"
+#include "LanaiMCInstLower.h"
+#include "LanaiTargetMachine.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "asm-printer"
+
+using namespace llvm;
+
+namespace {
+class LanaiAsmPrinter : public AsmPrinter {
+public:
+ explicit LanaiAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
+
+ const char *getPassName() const override { return "Lanai Assembly Printer"; }
+
+ void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O);
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) override;
+ void EmitInstruction(const MachineInstr *MI) override;
+ bool isBlockOnlyReachableByFallthrough(
+ const MachineBasicBlock *MBB) const override;
+
+private:
+ void customEmitInstruction(const MachineInstr *MI);
+ void emitCallInstruction(const MachineInstr *MI);
+};
+} // end of anonymous namespace
+
+void LanaiAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNum);
+
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ O << LanaiInstPrinter::getRegisterName(MO.getReg());
+ break;
+
+ case MachineOperand::MO_Immediate:
+ O << MO.getImm();
+ break;
+
+ case MachineOperand::MO_MachineBasicBlock:
+ O << *MO.getMBB()->getSymbol();
+ break;
+
+ case MachineOperand::MO_GlobalAddress:
+ O << *getSymbol(MO.getGlobal());
+ break;
+
+ case MachineOperand::MO_BlockAddress: {
+ MCSymbol *BA = GetBlockAddressSymbol(MO.getBlockAddress());
+ O << BA->getName();
+ break;
+ }
+
+ case MachineOperand::MO_ExternalSymbol:
+ O << *GetExternalSymbolSymbol(MO.getSymbolName());
+ break;
+
+ case MachineOperand::MO_JumpTableIndex:
+ O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_'
+ << MO.getIndex();
+ break;
+
+ case MachineOperand::MO_ConstantPoolIndex:
+ O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << '_'
+ << MO.getIndex();
+ return;
+
+ default:
+ llvm_unreachable("<unknown operand type>");
+ }
+}
+
+// PrintAsmOperand - Print out an operand for an inline asm expression.
+bool LanaiAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned /*AsmVariant*/,
+ const char *ExtraCode, raw_ostream &O) {
+ // Does this asm operand have a single letter operand modifier?
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1])
+ return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ // The highest-numbered register of a pair.
+ case 'H': {
+ if (OpNo == 0)
+ return true;
+ const MachineOperand &FlagsOP = MI->getOperand(OpNo - 1);
+ if (!FlagsOP.isImm())
+ return true;
+ unsigned Flags = FlagsOP.getImm();
+ unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+ if (NumVals != 2)
+ return true;
+ unsigned RegOp = OpNo + 1;
+ if (RegOp >= MI->getNumOperands())
+ return true;
+ const MachineOperand &MO = MI->getOperand(RegOp);
+ if (!MO.isReg())
+ return true;
+ unsigned Reg = MO.getReg();
+ O << LanaiInstPrinter::getRegisterName(Reg);
+ return false;
+ }
+ default:
+ return true; // Unknown modifier.
+ }
+ }
+ printOperand(MI, OpNo, O);
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+void LanaiAsmPrinter::emitCallInstruction(const MachineInstr *MI) {
+ assert((MI->getOpcode() == Lanai::CALL || MI->getOpcode() == Lanai::CALLR) &&
+ "Unsupported call function");
+
+ LanaiMCInstLower MCInstLowering(OutContext, *Mang, *this);
+ MCSubtargetInfo STI = getSubtargetInfo();
+ // Insert save rca instruction immediately before the call.
+ // TODO: We should generate a pc-relative mov instruction here instead
+ // of pc + 16 (should be mov .+16 %rca).
+ OutStreamer->EmitInstruction(MCInstBuilder(Lanai::ADD_I_LO)
+ .addReg(Lanai::RCA)
+ .addReg(Lanai::PC)
+ .addImm(16),
+ STI);
+
+ // Push rca onto the stack.
+ // st %rca, [--%sp]
+ OutStreamer->EmitInstruction(MCInstBuilder(Lanai::SW_RI)
+ .addReg(Lanai::RCA)
+ .addReg(Lanai::SP)
+ .addImm(-4)
+ .addImm(LPAC::makePreOp(LPAC::ADD)),
+ STI);
+
+ // Lower the call instruction.
+ if (MI->getOpcode() == Lanai::CALL) {
+ MCInst TmpInst;
+ MCInstLowering.Lower(MI, TmpInst);
+ TmpInst.setOpcode(Lanai::BT);
+ OutStreamer->EmitInstruction(TmpInst, STI);
+ } else {
+ OutStreamer->EmitInstruction(MCInstBuilder(Lanai::ADD_R)
+ .addReg(Lanai::PC)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(Lanai::R0)
+ .addImm(LPCC::ICC_T),
+ STI);
+ }
+}
+
+void LanaiAsmPrinter::customEmitInstruction(const MachineInstr *MI) {
+ LanaiMCInstLower MCInstLowering(OutContext, *Mang, *this);
+ MCSubtargetInfo STI = getSubtargetInfo();
+ MCInst TmpInst;
+ MCInstLowering.Lower(MI, TmpInst);
+ OutStreamer->EmitInstruction(TmpInst, STI);
+}
+
+void LanaiAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ MachineBasicBlock::const_instr_iterator I = MI->getIterator();
+ MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+
+ do {
+ if (I->isCall()) {
+ emitCallInstruction(&*I);
+ continue;
+ }
+
+ customEmitInstruction(&*I);
+ } while ((++I != E) && I->isInsideBundle());
+}
+
+// isBlockOnlyReachableByFallthough - Return true if the basic block has
+// exactly one predecessor and the control transfer mechanism between
+// the predecessor and this block is a fall-through.
+// FIXME: could the overridden cases be handled in AnalyzeBranch?
+bool LanaiAsmPrinter::isBlockOnlyReachableByFallthrough(
+ const MachineBasicBlock *MBB) const {
+ // The predecessor has to be immediately before this block.
+ const MachineBasicBlock *Pred = *MBB->pred_begin();
+
+ // If the predecessor is a switch statement, assume a jump table
+ // implementation, so it is not a fall through.
+ if (const BasicBlock *B = Pred->getBasicBlock())
+ if (isa<SwitchInst>(B->getTerminator()))
+ return false;
+
+ // Check default implementation
+ if (!AsmPrinter::isBlockOnlyReachableByFallthrough(MBB))
+ return false;
+
+ // Otherwise, check the last instruction.
+ // Check if the last terminator is an unconditional branch.
+ MachineBasicBlock::const_iterator I = Pred->end();
+ while (I != Pred->begin() && !(--I)->isTerminator()) {
+ }
+
+ return !I->isBarrier();
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeLanaiAsmPrinter() {
+ RegisterAsmPrinter<LanaiAsmPrinter> X(TheLanaiTarget);
+}
diff --git a/lib/Target/Lanai/LanaiCallingConv.td b/lib/Target/Lanai/LanaiCallingConv.td
new file mode 100644
index 000000000000..056b329c33c5
--- /dev/null
+++ b/lib/Target/Lanai/LanaiCallingConv.td
@@ -0,0 +1,50 @@
+//===- LanaiCallingConv.td - Calling Conventions Lanai -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for the Lanai architectures.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Return Value Calling Conventions
+//===----------------------------------------------------------------------===//
+
+// Lanai 32-bit C Calling convention.
+def CC_Lanai32 : CallingConv<[
+ // Promote i8/i16 args to i32
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ // Put argument in registers if marked 'inreg' and not a vararg call.
+ CCIfNotVarArg<CCIfInReg<CCIfType<[i32],
+ CCAssignToReg<[R6, R7, R18, R19]>>>>,
+
+ // Otherwise they are assigned to the stack in 4-byte aligned units.
+ CCAssignToStack<4, 4>
+]>;
+
+// Lanai 32-bit Fast Calling convention.
+def CC_Lanai32_Fast : CallingConv<[
+ // Promote i8/i16 args to i32
+ CCIfType<[ i8, i16 ], CCPromoteToType<i32>>,
+
+ // Put arguments in registers.
+ CCIfNotVarArg<CCIfType<[i32], CCAssignToReg<[ R6, R7, R18, R19 ]>>>,
+
+ // Otherwise they are assigned to the stack in 4-byte aligned units.
+ CCAssignToStack<4, 4>
+]>;
+
+// Lanai 32-bit C return-value convention.
+def RetCC_Lanai32 : CallingConv<[
+ // Specify two registers to allow returning 64-bit results that have already
+ // been lowered to 2 32-bit values.
+ CCIfType<[i32], CCAssignToReg<[RV, R9]>>
+]>;
+
+def CSR: CalleeSavedRegs<(add)>;
diff --git a/lib/Target/Lanai/LanaiCondCode.h b/lib/Target/Lanai/LanaiCondCode.h
new file mode 100644
index 000000000000..6c5bdefc83dc
--- /dev/null
+++ b/lib/Target/Lanai/LanaiCondCode.h
@@ -0,0 +1,100 @@
+// The encoding used for conditional codes used in BR instructions
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAICONDCODE_H
+#define LLVM_LIB_TARGET_LANAI_LANAICONDCODE_H
+
+#include "llvm/ADT/StringSwitch.h"
+
+namespace llvm {
+namespace LPCC {
+enum CondCode {
+ ICC_T = 0, // true
+ ICC_F = 1, // false
+ ICC_HI = 2, // high
+ ICC_UGT = 2, // unsigned greater than
+ ICC_LS = 3, // low or same
+ ICC_ULE = 3, // unsigned less than or equal
+ ICC_CC = 4, // carry cleared
+ ICC_ULT = 4, // unsigned less than
+ ICC_CS = 5, // carry set
+ ICC_UGE = 5, // unsigned greater than or equal
+ ICC_NE = 6, // not equal
+ ICC_EQ = 7, // equal
+ ICC_VC = 8, // oVerflow cleared
+ ICC_VS = 9, // oVerflow set
+ ICC_PL = 10, // plus
+ ICC_MI = 11, // minus
+ ICC_GE = 12, // greater than or equal
+ ICC_LT = 13, // less than
+ ICC_GT = 14, // greater than
+ ICC_LE = 15, // less than or equal
+ UNKNOWN
+};
+
+inline static StringRef lanaiCondCodeToString(LPCC::CondCode CC) {
+ switch (CC) {
+ case LPCC::ICC_T:
+ return "t"; // true
+ case LPCC::ICC_F:
+ return "f"; // false
+ case LPCC::ICC_NE:
+ return "ne"; // not equal
+ case LPCC::ICC_EQ:
+ return "eq"; // equal
+ case LPCC::ICC_VC:
+ return "vc"; // oVerflow cleared
+ case LPCC::ICC_VS:
+ return "vs"; // oVerflow set
+ case LPCC::ICC_PL:
+ return "pl"; // plus
+ case LPCC::ICC_MI:
+ return "mi"; // minus
+ case LPCC::ICC_GE:
+ return "ge"; // greater than or equal
+ case LPCC::ICC_LT:
+ return "lt"; // less than
+ case LPCC::ICC_GT:
+ return "gt"; // greater than
+ case LPCC::ICC_LE:
+ return "le"; // less than or equal
+ case LPCC::ICC_UGT:
+ return "ugt"; // high | unsigned greater than
+ case LPCC::ICC_ULE:
+ return "ule"; // low or same | unsigned less or equal
+ case LPCC::ICC_ULT:
+ return "ult"; // carry cleared | unsigned less than
+ case LPCC::ICC_UGE:
+ return "uge"; // carry set | unsigned than or equal
+ default:
+ llvm_unreachable("Invalid cond code");
+ }
+}
+
+inline static CondCode suffixToLanaiCondCode(StringRef S) {
+ return StringSwitch<CondCode>(S)
+ .EndsWith("f", LPCC::ICC_F)
+ .EndsWith("hi", LPCC::ICC_HI)
+ .EndsWith("ugt", LPCC::ICC_UGT)
+ .EndsWith("ls", LPCC::ICC_LS)
+ .EndsWith("ule", LPCC::ICC_ULE)
+ .EndsWith("cc", LPCC::ICC_CC)
+ .EndsWith("ult", LPCC::ICC_ULT)
+ .EndsWith("cs", LPCC::ICC_CS)
+ .EndsWith("uge", LPCC::ICC_UGE)
+ .EndsWith("ne", LPCC::ICC_NE)
+ .EndsWith("eq", LPCC::ICC_EQ)
+ .EndsWith("vc", LPCC::ICC_VC)
+ .EndsWith("vs", LPCC::ICC_VS)
+ .EndsWith("pl", LPCC::ICC_PL)
+ .EndsWith("mi", LPCC::ICC_MI)
+ .EndsWith("ge", LPCC::ICC_GE)
+ .EndsWith("lt", LPCC::ICC_LT)
+ .EndsWith("gt", LPCC::ICC_GT)
+ .EndsWith("le", LPCC::ICC_LE)
+ .EndsWith("t", LPCC::ICC_T) // Has to be after others with suffix t
+ .Default(LPCC::UNKNOWN);
+}
+} // namespace LPCC
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAICONDCODE_H
diff --git a/lib/Target/Lanai/LanaiDelaySlotFiller.cpp b/lib/Target/Lanai/LanaiDelaySlotFiller.cpp
new file mode 100644
index 000000000000..7b106547d60b
--- /dev/null
+++ b/lib/Target/Lanai/LanaiDelaySlotFiller.cpp
@@ -0,0 +1,263 @@
+//===-- LanaiDelaySlotFiller.cpp - Lanai delay slot filler ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Simple pass to fills delay slots with useful instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Lanai.h"
+#include "LanaiTargetMachine.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "delay-slot-filler"
+
+STATISTIC(FilledSlots, "Number of delay slots filled");
+
+static cl::opt<bool>
+ NopDelaySlotFiller("lanai-nop-delay-filler", cl::init(false),
+ cl::desc("Fill Lanai delay slots with NOPs."),
+ cl::Hidden);
+
+namespace {
+struct Filler : public MachineFunctionPass {
+ // Target machine description which we query for reg. names, data
+ // layout, etc.
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineBasicBlock::instr_iterator LastFiller;
+
+ static char ID;
+ explicit Filler() : MachineFunctionPass(ID) {}
+
+ const char *getPassName() const override { return "Lanai Delay Slot Filler"; }
+
+ bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ const LanaiSubtarget &Subtarget = MF.getSubtarget<LanaiSubtarget>();
+ TII = Subtarget.getInstrInfo();
+ TRI = Subtarget.getRegisterInfo();
+
+ bool Changed = false;
+ for (MachineFunction::iterator FI = MF.begin(), FE = MF.end(); FI != FE;
+ ++FI)
+ Changed |= runOnMachineBasicBlock(*FI);
+ return Changed;
+ }
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
+ void insertDefsUses(MachineBasicBlock::instr_iterator MI,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses);
+
+ bool isRegInSet(SmallSet<unsigned, 32> &RegSet, unsigned Reg);
+
+ bool delayHasHazard(MachineBasicBlock::instr_iterator MI, bool &SawLoad,
+ bool &SawStore, SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses);
+
+ bool findDelayInstr(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator Slot,
+ MachineBasicBlock::instr_iterator &Filler);
+};
+char Filler::ID = 0;
+} // end of anonymous namespace
+
+// createLanaiDelaySlotFillerPass - Returns a pass that fills in delay
+// slots in Lanai MachineFunctions
+FunctionPass *
+llvm::createLanaiDelaySlotFillerPass(const LanaiTargetMachine & /*tm*/) {
+ return new Filler();
+}
+
+// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
+// There is one or two delay slot per delayed instruction.
+bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+ LastFiller = MBB.instr_end();
+
+ for (MachineBasicBlock::instr_iterator I = MBB.instr_begin();
+ I != MBB.instr_end(); ++I) {
+ if (I->getDesc().hasDelaySlot()) {
+ MachineBasicBlock::instr_iterator InstrWithSlot = I;
+ MachineBasicBlock::instr_iterator J = I;
+
+ // Treat RET specially as it is only instruction with 2 delay slots
+ // generated while all others generated have 1 delay slot.
+ if (I->getOpcode() == Lanai::RET) {
+ // RET is generated as part of epilogue generation and hence we know
+ // what the two instructions preceding it are and that it is safe to
+ // insert RET above them.
+ MachineBasicBlock::reverse_instr_iterator RI(I);
+ assert(RI->getOpcode() == Lanai::LDW_RI && RI->getOperand(0).isReg() &&
+ RI->getOperand(0).getReg() == Lanai::FP &&
+ RI->getOperand(1).isReg() &&
+ RI->getOperand(1).getReg() == Lanai::FP &&
+ RI->getOperand(2).isImm() && RI->getOperand(2).getImm() == -8);
+ ++RI;
+ assert(RI->getOpcode() == Lanai::ADD_I_LO &&
+ RI->getOperand(0).isReg() &&
+ RI->getOperand(0).getReg() == Lanai::SP &&
+ RI->getOperand(1).isReg() &&
+ RI->getOperand(1).getReg() == Lanai::FP);
+ ++RI;
+ MachineBasicBlock::instr_iterator FI(RI.base());
+ MBB.splice(std::next(I), &MBB, FI, I);
+ FilledSlots += 2;
+ } else {
+ if (!NopDelaySlotFiller && findDelayInstr(MBB, I, J)) {
+ MBB.splice(std::next(I), &MBB, J);
+ } else {
+ BuildMI(MBB, std::next(I), DebugLoc(), TII->get(Lanai::NOP));
+ }
+ ++FilledSlots;
+ }
+
+ Changed = true;
+ // Record the filler instruction that filled the delay slot.
+ // The instruction after it will be visited in the next iteration.
+ LastFiller = ++I;
+
+ // Bundle the delay slot filler to InstrWithSlot so that the machine
+ // verifier doesn't expect this instruction to be a terminator.
+ MIBundleBuilder(MBB, InstrWithSlot, std::next(LastFiller));
+ }
+ }
+ return Changed;
+}
+
+bool Filler::findDelayInstr(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator Slot,
+ MachineBasicBlock::instr_iterator &Filler) {
+ SmallSet<unsigned, 32> RegDefs;
+ SmallSet<unsigned, 32> RegUses;
+
+ insertDefsUses(Slot, RegDefs, RegUses);
+
+ bool SawLoad = false;
+ bool SawStore = false;
+
+ for (MachineBasicBlock::reverse_instr_iterator I(Slot); I != MBB.instr_rend();
+ ++I) {
+ // skip debug value
+ if (I->isDebugValue())
+ continue;
+
+ // Convert to forward iterator.
+ MachineBasicBlock::instr_iterator FI(std::next(I).base());
+
+ if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isLabel() ||
+ FI == LastFiller || I->isPseudo())
+ break;
+
+ if (delayHasHazard(FI, SawLoad, SawStore, RegDefs, RegUses)) {
+ insertDefsUses(FI, RegDefs, RegUses);
+ continue;
+ }
+ Filler = FI;
+ return true;
+ }
+ return false;
+}
+
+bool Filler::delayHasHazard(MachineBasicBlock::instr_iterator MI, bool &SawLoad,
+ bool &SawStore, SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses) {
+ if (MI->isImplicitDef() || MI->isKill())
+ return true;
+
+ // Loads or stores cannot be moved past a store to the delay slot
+ // and stores cannot be moved past a load.
+ if (MI->mayLoad()) {
+ if (SawStore)
+ return true;
+ SawLoad = true;
+ }
+
+ if (MI->mayStore()) {
+ if (SawStore)
+ return true;
+ SawStore = true;
+ if (SawLoad)
+ return true;
+ }
+
+ assert((!MI->isCall() && !MI->isReturn()) &&
+ "Cannot put calls or returns in delay slot.");
+
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ unsigned Reg;
+
+ if (!MO.isReg() || !(Reg = MO.getReg()))
+ continue; // skip
+
+ if (MO.isDef()) {
+ // check whether Reg is defined or used before delay slot.
+ if (isRegInSet(RegDefs, Reg) || isRegInSet(RegUses, Reg))
+ return true;
+ }
+ if (MO.isUse()) {
+ // check whether Reg is defined before delay slot.
+ if (isRegInSet(RegDefs, Reg))
+ return true;
+ }
+ }
+ return false;
+}
+
+// Insert Defs and Uses of MI into the sets RegDefs and RegUses.
+void Filler::insertDefsUses(MachineBasicBlock::instr_iterator MI,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses) {
+ // If MI is a call or return, just examine the explicit non-variadic operands.
+ MCInstrDesc MCID = MI->getDesc();
+ unsigned E = MI->isCall() || MI->isReturn() ? MCID.getNumOperands()
+ : MI->getNumOperands();
+ for (unsigned I = 0; I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ unsigned Reg;
+
+ if (!MO.isReg() || !(Reg = MO.getReg()))
+ continue;
+
+ if (MO.isDef())
+ RegDefs.insert(Reg);
+ else if (MO.isUse())
+ RegUses.insert(Reg);
+ }
+
+ // Call & return instructions defines SP implicitly. Implicit defines are not
+ // included in the RegDefs set of calls but instructions modifying SP cannot
+ // be inserted in the delay slot of a call/return as these instructions are
+ // expanded to multiple instructions with SP modified before the branch that
+ // has the delay slot.
+ if (MI->isCall() || MI->isReturn())
+ RegDefs.insert(Lanai::SP);
+}
+
+// Returns true if the Reg or its alias is in the RegSet.
+bool Filler::isRegInSet(SmallSet<unsigned, 32> &RegSet, unsigned Reg) {
+ // Check Reg and all aliased Registers.
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ if (RegSet.count(*AI))
+ return true;
+ return false;
+}
diff --git a/lib/Target/Lanai/LanaiFrameLowering.cpp b/lib/Target/Lanai/LanaiFrameLowering.cpp
new file mode 100644
index 000000000000..cb048d568df7
--- /dev/null
+++ b/lib/Target/Lanai/LanaiFrameLowering.cpp
@@ -0,0 +1,220 @@
+//===-- LanaiFrameLowering.cpp - Lanai Frame Information ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Lanai implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiFrameLowering.h"
+
+#include "LanaiInstrInfo.h"
+#include "LanaiMachineFunctionInfo.h"
+#include "LanaiSubtarget.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
+
+using namespace llvm;
+
+// Determines the size of the frame and maximum call frame size.
+void LanaiFrameLowering::determineFrameLayout(MachineFunction &MF) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const LanaiRegisterInfo *LRI = STI.getRegisterInfo();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ unsigned FrameSize = MFI->getStackSize();
+
+ // Get the alignment.
+ unsigned StackAlign = LRI->needsStackRealignment(MF) ? MFI->getMaxAlignment()
+ : getStackAlignment();
+
+ // Get the maximum call frame size of all the calls.
+ unsigned MaxCallFrameSize = MFI->getMaxCallFrameSize();
+
+ // If we have dynamic alloca then MaxCallFrameSize needs to be aligned so
+ // that allocations will be aligned.
+ if (MFI->hasVarSizedObjects())
+ MaxCallFrameSize = alignTo(MaxCallFrameSize, StackAlign);
+
+ // Update maximum call frame size.
+ MFI->setMaxCallFrameSize(MaxCallFrameSize);
+
+ // Include call frame size in total.
+ if (!(hasReservedCallFrame(MF) && MFI->adjustsStack()))
+ FrameSize += MaxCallFrameSize;
+
+ // Make sure the frame is aligned.
+ FrameSize = alignTo(FrameSize, StackAlign);
+
+ // Update frame info.
+ MFI->setStackSize(FrameSize);
+}
+
+// Iterates through each basic block in a machine function and replaces
+// ADJDYNALLOC pseudo instructions with a Lanai:ADDI with the
+// maximum call frame size as the immediate.
+void LanaiFrameLowering::replaceAdjDynAllocPseudo(MachineFunction &MF) const {
+ const LanaiInstrInfo &LII =
+ *static_cast<const LanaiInstrInfo *>(STI.getInstrInfo());
+ unsigned MaxCallFrameSize = MF.getFrameInfo()->getMaxCallFrameSize();
+
+ for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); MBB != E;
+ ++MBB) {
+ MachineBasicBlock::iterator MBBI = MBB->begin();
+ while (MBBI != MBB->end()) {
+ MachineInstr &MI = *MBBI++;
+ if (MI.getOpcode() == Lanai::ADJDYNALLOC) {
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Dst = MI.getOperand(0).getReg();
+ unsigned Src = MI.getOperand(1).getReg();
+
+ BuildMI(*MBB, MI, DL, LII.get(Lanai::ADD_I_LO), Dst)
+ .addReg(Src)
+ .addImm(MaxCallFrameSize);
+ MI.eraseFromParent();
+ }
+ }
+ }
+}
+
+// Generates the following sequence for function entry:
+// st %fp,-4[*%sp] !push old FP
+// add %sp,8,%fp !generate new FP
+// sub %sp,0x4,%sp !allocate stack space (as needed)
+void LanaiFrameLowering::emitPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
+
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const LanaiInstrInfo &LII =
+ *static_cast<const LanaiInstrInfo *>(STI.getInstrInfo());
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+
+ // Debug location must be unknown since the first debug location is used
+ // to determine the end of the prologue.
+ DebugLoc DL;
+
+ // Determine the correct frame layout
+ determineFrameLayout(MF);
+
+ // FIXME: This appears to be overallocating. Needs investigation.
+ // Get the number of bytes to allocate from the FrameInfo.
+ unsigned StackSize = MFI->getStackSize();
+
+ // Push old FP
+ // st %fp,-4[*%sp]
+ BuildMI(MBB, MBBI, DL, LII.get(Lanai::SW_RI))
+ .addReg(Lanai::FP)
+ .addReg(Lanai::SP)
+ .addImm(-4)
+ .addImm(LPAC::makePreOp(LPAC::ADD))
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // Generate new FP
+ // add %sp,8,%fp
+ BuildMI(MBB, MBBI, DL, LII.get(Lanai::ADD_I_LO), Lanai::FP)
+ .addReg(Lanai::SP)
+ .addImm(8)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // Allocate space on the stack if needed
+ // sub %sp,StackSize,%sp
+ if (StackSize != 0) {
+ BuildMI(MBB, MBBI, DL, LII.get(Lanai::SUB_I_LO), Lanai::SP)
+ .addReg(Lanai::SP)
+ .addImm(StackSize)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Replace ADJDYNANALLOC
+ if (MFI->hasVarSizedObjects())
+ replaceAdjDynAllocPseudo(MF);
+}
+
+MachineBasicBlock::iterator LanaiFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction & /*MF*/, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ // Discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
+ return MBB.erase(I);
+}
+
+// The function epilogue should not depend on the current stack pointer!
+// It should use the frame pointer only. This is mandatory because
+// of alloca; we also take advantage of it to omit stack adjustments
+// before returning.
+//
+// Note that when we go to restore the preserved register values we must
+// not try to address their slots by using offsets from the stack pointer.
+// That's because the stack pointer may have been moved during the function
+// execution due to a call to alloca(). Rather, we must restore all
+// preserved registers via offsets from the frame pointer value.
+//
+// Note also that when the current frame is being "popped" (by adjusting
+// the value of the stack pointer) on function exit, we must (for the
+// sake of alloca) set the new value of the stack pointer based upon
+// the current value of the frame pointer. We can't just add what we
+// believe to be the (static) frame size to the stack pointer because
+// if we did that, and alloca() had been called during this function,
+// we would end up returning *without* having fully deallocated all of
+// the space grabbed by alloca. If that happened, and a function
+// containing one or more alloca() calls was called over and over again,
+// then the stack would grow without limit!
+//
+// RET is lowered to
+// ld -4[%fp],%pc # modify %pc (two delay slots)
+// as the return address is in the stack frame and mov to pc is allowed.
+// emitEpilogue emits
+// mov %fp,%sp # restore the stack pointer
+// ld -8[%fp],%fp # restore the caller's frame pointer
+// before RET and the delay slot filler will move RET such that these
+// instructions execute in the delay slots of the load to PC.
+void LanaiFrameLowering::emitEpilogue(MachineFunction & /*MF*/,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const LanaiInstrInfo &LII =
+ *static_cast<const LanaiInstrInfo *>(STI.getInstrInfo());
+ DebugLoc DL = MBBI->getDebugLoc();
+
+ // Restore the stack pointer using the callee's frame pointer value.
+ BuildMI(MBB, MBBI, DL, LII.get(Lanai::ADD_I_LO), Lanai::SP)
+ .addReg(Lanai::FP)
+ .addImm(0);
+
+ // Restore the frame pointer from the stack.
+ BuildMI(MBB, MBBI, DL, LII.get(Lanai::LDW_RI), Lanai::FP)
+ .addReg(Lanai::FP)
+ .addImm(-8)
+ .addImm(LPAC::ADD);
+}
+
+void LanaiFrameLowering::determineCalleeSaves(MachineFunction &MF,
+ BitVector &SavedRegs,
+ RegScavenger *RS) const {
+ TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
+
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const LanaiRegisterInfo *LRI =
+ static_cast<const LanaiRegisterInfo *>(STI.getRegisterInfo());
+ int Offset = -4;
+
+ // Reserve 4 bytes for the saved RCA
+ MFI->CreateFixedObject(4, Offset, true);
+ Offset -= 4;
+
+ // Reserve 4 bytes for the saved FP
+ MFI->CreateFixedObject(4, Offset, true);
+ Offset -= 4;
+
+ if (LRI->hasBasePointer(MF)) {
+ MFI->CreateFixedObject(4, Offset, true);
+ SavedRegs.reset(LRI->getBaseRegister());
+ }
+}
diff --git a/lib/Target/Lanai/LanaiFrameLowering.h b/lib/Target/Lanai/LanaiFrameLowering.h
new file mode 100644
index 000000000000..2f9b6c3c158f
--- /dev/null
+++ b/lib/Target/Lanai/LanaiFrameLowering.h
@@ -0,0 +1,57 @@
+//===-- LanaiFrameLowering.h - Define frame lowering for Lanai --*- C++-*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements Lanai-specific bits of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAIFRAMELOWERING_H
+#define LLVM_LIB_TARGET_LANAI_LANAIFRAMELOWERING_H
+
+#include "Lanai.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+
+class BitVector;
+class LanaiSubtarget;
+
+class LanaiFrameLowering : public TargetFrameLowering {
+private:
+ void determineFrameLayout(MachineFunction &MF) const;
+ void replaceAdjDynAllocPseudo(MachineFunction &MF) const;
+
+protected:
+ const LanaiSubtarget &STI;
+
+public:
+ explicit LanaiFrameLowering(const LanaiSubtarget &Subtarget)
+ : TargetFrameLowering(StackGrowsDown,
+ /*StackAlignment=*/8,
+ /*LocalAreaOffset=*/0),
+ STI(Subtarget) {}
+
+ // emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ // the function.
+ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
+
+ bool hasFP(const MachineFunction & /*MF*/) const override { return true; }
+
+ void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+ RegScavenger *RS = nullptr) const override;
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAIFRAMELOWERING_H
diff --git a/lib/Target/Lanai/LanaiISelDAGToDAG.cpp b/lib/Target/Lanai/LanaiISelDAGToDAG.cpp
new file mode 100644
index 000000000000..29bc6e8a6c56
--- /dev/null
+++ b/lib/Target/Lanai/LanaiISelDAGToDAG.cpp
@@ -0,0 +1,317 @@
+//===-- LanaiISelDAGToDAG.cpp - A dag to dag inst selector for Lanai ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the Lanai target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Lanai.h"
+#include "LanaiMachineFunctionInfo.h"
+#include "LanaiRegisterInfo.h"
+#include "LanaiSubtarget.h"
+#include "LanaiTargetMachine.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "lanai-isel"
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// LanaiDAGToDAGISel - Lanai specific code to select Lanai machine
+// instructions for SelectionDAG operations.
+//===----------------------------------------------------------------------===//
+namespace {
+
+class LanaiDAGToDAGISel : public SelectionDAGISel {
+public:
+ explicit LanaiDAGToDAGISel(LanaiTargetMachine &TargetMachine)
+ : SelectionDAGISel(TargetMachine) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
+
+ // Pass Name
+ const char *getPassName() const override {
+ return "Lanai DAG->DAG Pattern Instruction Selection";
+ }
+
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
+
+private:
+// Include the pieces autogenerated from the target description.
+#include "LanaiGenDAGISel.inc"
+
+ // Instruction Selection not handled by the auto-generated tablgen
+ void Select(SDNode *N) override;
+
+ // Support functions for the opcodes of Instruction Selection
+ // not handled by the auto-generated tablgen
+ void selectFrameIndex(SDNode *N);
+
+ // Complex Pattern for address selection.
+ bool selectAddrRi(SDValue Addr, SDValue &Base, SDValue &Offset,
+ SDValue &AluOp);
+ bool selectAddrRr(SDValue Addr, SDValue &R1, SDValue &R2, SDValue &AluOp);
+ bool selectAddrSls(SDValue Addr, SDValue &Offset);
+ bool selectAddrSpls(SDValue Addr, SDValue &Base, SDValue &Offset,
+ SDValue &AluOp);
+
+ // getI32Imm - Return a target constant with the specified value, of type i32.
+ inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
+ return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
+ }
+
+private:
+ bool selectAddrRiSpls(SDValue Addr, SDValue &Base, SDValue &Offset,
+ SDValue &AluOp, bool RiMode);
+};
+
+bool canBeRepresentedAsSls(const ConstantSDNode &CN) {
+ // Fits in 21-bit signed immediate and two low-order bits are zero.
+ return isInt<21>(CN.getSExtValue()) && ((CN.getSExtValue() & 0x3) == 0);
+}
+
+} // namespace
+
+// Helper functions for ComplexPattern used on LanaiInstrInfo
+// Used on Lanai Load/Store instructions.
+bool LanaiDAGToDAGISel::selectAddrSls(SDValue Addr, SDValue &Offset) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr)) {
+ SDLoc DL(Addr);
+ // Loading from a constant address.
+ if (canBeRepresentedAsSls(*CN)) {
+ int32_t Imm = CN->getSExtValue();
+ Offset = CurDAG->getTargetConstant(Imm, DL, CN->getValueType(0));
+ return true;
+ }
+ }
+ if (Addr.getOpcode() == ISD::OR &&
+ Addr.getOperand(1).getOpcode() == LanaiISD::SMALL) {
+ Offset = Addr.getOperand(1).getOperand(0);
+ return true;
+ }
+ return false;
+}
+
+bool LanaiDAGToDAGISel::selectAddrRiSpls(SDValue Addr, SDValue &Base,
+ SDValue &Offset, SDValue &AluOp,
+ bool RiMode) {
+ SDLoc DL(Addr);
+
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr)) {
+ if (RiMode) {
+ // Fits in 16-bit signed immediate.
+ if (isInt<16>(CN->getSExtValue())) {
+ int16_t Imm = CN->getSExtValue();
+ Offset = CurDAG->getTargetConstant(Imm, DL, CN->getValueType(0));
+ Base = CurDAG->getRegister(Lanai::R0, CN->getValueType(0));
+ AluOp = CurDAG->getTargetConstant(LPAC::ADD, DL, MVT::i32);
+ return true;
+ }
+ // Allow SLS to match if the constant doesn't fit in 16 bits but can be
+ // represented as an SLS.
+ if (canBeRepresentedAsSls(*CN))
+ return false;
+ } else {
+ // Fits in 10-bit signed immediate.
+ if (isInt<10>(CN->getSExtValue())) {
+ int16_t Imm = CN->getSExtValue();
+ Offset = CurDAG->getTargetConstant(Imm, DL, CN->getValueType(0));
+ Base = CurDAG->getRegister(Lanai::R0, CN->getValueType(0));
+ AluOp = CurDAG->getTargetConstant(LPAC::ADD, DL, MVT::i32);
+ return true;
+ }
+ }
+ }
+
+ // if Address is FI, get the TargetFrameIndex.
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(
+ FIN->getIndex(),
+ getTargetLowering()->getPointerTy(CurDAG->getDataLayout()));
+ Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
+ AluOp = CurDAG->getTargetConstant(LPAC::ADD, DL, MVT::i32);
+ return true;
+ }
+
+ // Skip direct calls
+ if ((Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress))
+ return false;
+
+ // Address of the form imm + reg
+ ISD::NodeType AluOperator = static_cast<ISD::NodeType>(Addr.getOpcode());
+ if (AluOperator == ISD::ADD) {
+ AluOp = CurDAG->getTargetConstant(LPAC::ADD, DL, MVT::i32);
+ // Addresses of the form FI+const
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+ if ((RiMode && isInt<16>(CN->getSExtValue())) ||
+ (!RiMode && isInt<10>(CN->getSExtValue()))) {
+ // If the first operand is a FI, get the TargetFI Node
+ if (FrameIndexSDNode *FIN =
+ dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
+ Base = CurDAG->getTargetFrameIndex(
+ FIN->getIndex(),
+ getTargetLowering()->getPointerTy(CurDAG->getDataLayout()));
+ } else {
+ Base = Addr.getOperand(0);
+ }
+
+ Offset = CurDAG->getTargetConstant(CN->getSExtValue(), DL, MVT::i32);
+ return true;
+ }
+ }
+
+ // Let SLS match SMALL instead of RI.
+ if (AluOperator == ISD::OR && RiMode &&
+ Addr.getOperand(1).getOpcode() == LanaiISD::SMALL)
+ return false;
+
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
+ AluOp = CurDAG->getTargetConstant(LPAC::ADD, DL, MVT::i32);
+ return true;
+}
+
+bool LanaiDAGToDAGISel::selectAddrRi(SDValue Addr, SDValue &Base,
+ SDValue &Offset, SDValue &AluOp) {
+ return selectAddrRiSpls(Addr, Base, Offset, AluOp, /*RiMode=*/true);
+}
+
+bool LanaiDAGToDAGISel::selectAddrSpls(SDValue Addr, SDValue &Base,
+ SDValue &Offset, SDValue &AluOp) {
+ return selectAddrRiSpls(Addr, Base, Offset, AluOp, /*RiMode=*/false);
+}
+
+bool LanaiDAGToDAGISel::selectAddrRr(SDValue Addr, SDValue &R1, SDValue &R2,
+ SDValue &AluOp) {
+ // if Address is FI, get the TargetFrameIndex.
+ if (Addr.getOpcode() == ISD::FrameIndex)
+ return false;
+
+ // Skip direct calls
+ if ((Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress))
+ return false;
+
+ // Address of the form OP + OP
+ ISD::NodeType AluOperator = static_cast<ISD::NodeType>(Addr.getOpcode());
+ LPAC::AluCode AluCode = LPAC::isdToLanaiAluCode(AluOperator);
+ if (AluCode != LPAC::UNKNOWN) {
+ // Skip addresses of the form FI OP const
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+ if (isInt<16>(CN->getSExtValue()))
+ return false;
+
+ // Skip addresses with hi/lo operands
+ if (Addr.getOperand(0).getOpcode() == LanaiISD::HI ||
+ Addr.getOperand(0).getOpcode() == LanaiISD::LO ||
+ Addr.getOperand(0).getOpcode() == LanaiISD::SMALL ||
+ Addr.getOperand(1).getOpcode() == LanaiISD::HI ||
+ Addr.getOperand(1).getOpcode() == LanaiISD::LO ||
+ Addr.getOperand(1).getOpcode() == LanaiISD::SMALL)
+ return false;
+
+ // Addresses of the form register OP register
+ R1 = Addr.getOperand(0);
+ R2 = Addr.getOperand(1);
+ AluOp = CurDAG->getTargetConstant(AluCode, SDLoc(Addr), MVT::i32);
+ return true;
+ }
+
+ // Skip addresses with zero offset
+ return false;
+}
+
+bool LanaiDAGToDAGISel::SelectInlineAsmMemoryOperand(
+ const SDValue &Op, unsigned ConstraintCode, std::vector<SDValue> &OutOps) {
+ SDValue Op0, Op1, AluOp;
+ switch (ConstraintCode) {
+ default:
+ return true;
+ case InlineAsm::Constraint_m: // memory
+ if (!selectAddrRr(Op, Op0, Op1, AluOp) &&
+ !selectAddrRi(Op, Op0, Op1, AluOp))
+ return true;
+ break;
+ }
+
+ OutOps.push_back(Op0);
+ OutOps.push_back(Op1);
+ OutOps.push_back(AluOp);
+ return false;
+}
+
+// Select instructions not customized! Used for
+// expanded, promoted and normal instructions
+void LanaiDAGToDAGISel::Select(SDNode *Node) {
+ unsigned Opcode = Node->getOpcode();
+
+ // Dump information about the Node being selected
+ DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n");
+
+ // If we have a custom node, we already have selected!
+ if (Node->isMachineOpcode()) {
+ DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ return;
+ }
+
+ // Instruction Selection not handled by the auto-generated
+ // tablegen selection should be handled here.
+ switch (Opcode) {
+ case ISD::FrameIndex:
+ selectFrameIndex(Node);
+ return;
+ default:
+ break;
+ }
+
+ // Select the default instruction
+ SelectCode(Node);
+}
+
+void LanaiDAGToDAGISel::selectFrameIndex(SDNode *Node) {
+ SDLoc DL(Node);
+ SDValue Imm = CurDAG->getTargetConstant(0, DL, MVT::i32);
+ int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex();
+ EVT VT = Node->getValueType(0);
+ SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
+ unsigned Opc = Lanai::ADD_I_LO;
+ if (Node->hasOneUse()) {
+ CurDAG->SelectNodeTo(Node, Opc, VT, TFI, Imm);
+ return;
+ }
+ ReplaceNode(Node, CurDAG->getMachineNode(Opc, DL, VT, TFI, Imm));
+}
+
+// createLanaiISelDag - This pass converts a legalized DAG into a
+// Lanai-specific DAG, ready for instruction scheduling.
+FunctionPass *llvm::createLanaiISelDag(LanaiTargetMachine &TM) {
+ return new LanaiDAGToDAGISel(TM);
+}
diff --git a/lib/Target/Lanai/LanaiISelLowering.cpp b/lib/Target/Lanai/LanaiISelLowering.cpp
new file mode 100644
index 000000000000..66416b38e812
--- /dev/null
+++ b/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -0,0 +1,1437 @@
+//===-- LanaiISelLowering.cpp - Lanai DAG Lowering Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LanaiTargetLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiISelLowering.h"
+
+#include "Lanai.h"
+#include "LanaiMachineFunctionInfo.h"
+#include "LanaiSubtarget.h"
+#include "LanaiTargetMachine.h"
+#include "LanaiTargetObjectFile.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "lanai-lower"
+
+using namespace llvm;
+
+// Limit on number of instructions the lowered multiplication may have before a
+// call to the library function should be generated instead. The threshold is
+// currently set to 14 as this was the smallest threshold that resulted in all
+// constant multiplications being lowered. A threshold of 5 covered all cases
+// except for one multiplication which required 14. mulsi3 requires 16
+// instructions (including the prologue and epilogue but excluding instructions
+// at call site). Until we can inline mulsi3, generating at most 14 instructions
+// will be faster than invoking mulsi3.
+static cl::opt<int> LanaiLowerConstantMulThreshold(
+ "lanai-constant-mul-threshold", cl::Hidden,
+ cl::desc("Maximum number of instruction to generate when lowering constant "
+ "multiplication instead of calling library function [default=14]"),
+ cl::init(14));
+
+LanaiTargetLowering::LanaiTargetLowering(const TargetMachine &TM,
+ const LanaiSubtarget &STI)
+ : TargetLowering(TM) {
+ // Set up the register classes.
+ addRegisterClass(MVT::i32, &Lanai::GPRRegClass);
+
+ // Compute derived properties from the register classes
+ TRI = STI.getRegisterInfo();
+ computeRegisterProperties(TRI);
+
+ setStackPointerRegisterToSaveRestore(Lanai::SP);
+
+ setOperationAction(ISD::BR_CC, MVT::i32, Custom);
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+ setOperationAction(ISD::SETCC, MVT::i32, Custom);
+ setOperationAction(ISD::SETCCE, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
+ setOperationAction(ISD::JumpTable, MVT::i32, Custom);
+ setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
+
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
+
+ setOperationAction(ISD::SDIV, MVT::i32, Expand);
+ setOperationAction(ISD::UDIV, MVT::i32, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+
+ setOperationAction(ISD::MUL, MVT::i32, Custom);
+ setOperationAction(ISD::MULHU, MVT::i32, Expand);
+ setOperationAction(ISD::MULHS, MVT::i32, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
+
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::CTPOP, MVT::i32, Legal);
+ setOperationAction(ISD::CTLZ, MVT::i32, Legal);
+ setOperationAction(ISD::CTTZ, MVT::i32, Legal);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
+
+ // Extended load operations for i1 types must be promoted
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ }
+
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::SUB);
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::OR);
+ setTargetDAGCombine(ISD::XOR);
+
+ // Function alignments (log2)
+ setMinFunctionAlignment(2);
+ setPrefFunctionAlignment(2);
+
+ setJumpIsExpensive(true);
+
+ // TODO: Setting the minimum jump table entries needed before a
+ // switch is transformed to a jump table to 100 to avoid creating jump tables
+ // as this was causing bad performance compared to a large group of if
+ // statements. Re-evaluate this on new benchmarks.
+ setMinimumJumpTableEntries(100);
+
+ // Use fast calling convention for library functions.
+ for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) {
+ setLibcallCallingConv(static_cast<RTLIB::Libcall>(I), CallingConv::Fast);
+ }
+
+ MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
+ MaxStoresPerMemsetOptSize = 8;
+ MaxStoresPerMemcpy = 16; // For @llvm.memcpy -> sequence of stores
+ MaxStoresPerMemcpyOptSize = 8;
+ MaxStoresPerMemmove = 16; // For @llvm.memmove -> sequence of stores
+ MaxStoresPerMemmoveOptSize = 8;
+
+ // Booleans always contain 0 or 1.
+ setBooleanContents(ZeroOrOneBooleanContent);
+}
+
+SDValue LanaiTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ case ISD::MUL:
+ return LowerMUL(Op, DAG);
+ case ISD::BR_CC:
+ return LowerBR_CC(Op, DAG);
+ case ISD::ConstantPool:
+ return LowerConstantPool(Op, DAG);
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::BlockAddress:
+ return LowerBlockAddress(Op, DAG);
+ case ISD::JumpTable:
+ return LowerJumpTable(Op, DAG);
+ case ISD::SELECT_CC:
+ return LowerSELECT_CC(Op, DAG);
+ case ISD::SETCC:
+ return LowerSETCC(Op, DAG);
+ case ISD::SETCCE:
+ return LowerSETCCE(Op, DAG);
+ case ISD::SRL_PARTS:
+ return LowerSRL_PARTS(Op, DAG);
+ case ISD::VASTART:
+ return LowerVASTART(Op, DAG);
+ case ISD::DYNAMIC_STACKALLOC:
+ return LowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::RETURNADDR:
+ return LowerRETURNADDR(Op, DAG);
+ case ISD::FRAMEADDR:
+ return LowerFRAMEADDR(Op, DAG);
+ default:
+ llvm_unreachable("unimplemented operand");
+ }
+}
+//===----------------------------------------------------------------------===//
+// Lanai Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+unsigned LanaiTargetLowering::getRegisterByName(const char *RegName, EVT /*VT*/,
+ SelectionDAG & /*DAG*/) const {
+ // Only unallocatable registers should be matched here.
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("pc", Lanai::PC)
+ .Case("sp", Lanai::SP)
+ .Case("fp", Lanai::FP)
+ .Case("rr1", Lanai::RR1)
+ .Case("r10", Lanai::R10)
+ .Case("rr2", Lanai::RR2)
+ .Case("r11", Lanai::R11)
+ .Case("rca", Lanai::RCA)
+ .Default(0);
+
+ if (Reg)
+ return Reg;
+ report_fatal_error("Invalid register name global variable");
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+LanaiTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ if (Constraint.size() == 1)
+ // GCC Constraint Letters
+ switch (Constraint[0]) {
+ case 'r': // GENERAL_REGS
+ return std::make_pair(0U, &Lanai::GPRRegClass);
+ default:
+ break;
+ }
+
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+// Examine constraint type and operand type and determine a weight value.
+// This object must already have been set up with the operand type
+// and the current alternative constraint selected.
+TargetLowering::ConstraintWeight
+LanaiTargetLowering::getSingleConstraintMatchWeight(
+ AsmOperandInfo &Info, const char *Constraint) const {
+ ConstraintWeight Weight = CW_Invalid;
+ Value *CallOperandVal = Info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ // Look at the constraint type.
+ switch (*Constraint) {
+ case 'I': // signed 16 bit immediate
+ case 'J': // integer zero
+ case 'K': // unsigned 16 bit immediate
+ case 'L': // immediate in the range 0 to 31
+ case 'M': // signed 32 bit immediate where lower 16 bits are 0
+ case 'N': // signed 26 bit immediate
+ case 'O': // integer zero
+ if (isa<ConstantInt>(CallOperandVal))
+ Weight = CW_Constant;
+ break;
+ default:
+ Weight = TargetLowering::getSingleConstraintMatchWeight(Info, Constraint);
+ break;
+ }
+ return Weight;
+}
+
+// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+// vector. If it is invalid, don't add anything to Ops.
+void LanaiTargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ SDValue Result(0, 0);
+
+ // Only support length 1 constraints for now.
+ if (Constraint.length() > 1)
+ return;
+
+ char ConstraintLetter = Constraint[0];
+ switch (ConstraintLetter) {
+ case 'I': // Signed 16 bit constant
+ // If this fails, the parent routine will give an error
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (isInt<16>(C->getSExtValue())) {
+ Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(C),
+ Op.getValueType());
+ break;
+ }
+ }
+ return;
+ case 'J': // integer zero
+ case 'O':
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (C->getZExtValue() == 0) {
+ Result = DAG.getTargetConstant(0, SDLoc(C), Op.getValueType());
+ break;
+ }
+ }
+ return;
+ case 'K': // unsigned 16 bit immediate
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (isUInt<16>(C->getZExtValue())) {
+ Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(C),
+ Op.getValueType());
+ break;
+ }
+ }
+ return;
+ case 'L': // immediate in the range 0 to 31
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (C->getZExtValue() <= 31) {
+ Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(C),
+ Op.getValueType());
+ break;
+ }
+ }
+ return;
+ case 'M': // signed 32 bit immediate where lower 16 bits are 0
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ int64_t Val = C->getSExtValue();
+ if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)) {
+ Result = DAG.getTargetConstant(Val, SDLoc(C), Op.getValueType());
+ break;
+ }
+ }
+ return;
+ case 'N': // signed 26 bit immediate
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ int64_t Val = C->getSExtValue();
+ if ((Val >= -33554432) && (Val <= 33554431)) {
+ Result = DAG.getTargetConstant(Val, SDLoc(C), Op.getValueType());
+ break;
+ }
+ }
+ return;
+ default:
+ break; // This will fall through to the generic implementation
+ }
+
+ if (Result.getNode()) {
+ Ops.push_back(Result);
+ return;
+ }
+
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+//===----------------------------------------------------------------------===//
+// Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+
+#include "LanaiGenCallingConv.inc"
+
+static unsigned NumFixedArgs;
+static bool CC_Lanai32_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ // Handle fixed arguments with default CC.
+ // Note: Both the default and fast CC handle VarArg the same and hence the
+ // calling convention of the function is not considered here.
+ if (ValNo < NumFixedArgs) {
+ return CC_Lanai32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
+ }
+
+ // Promote i8/i16 args to i32
+ if (LocVT == MVT::i8 || LocVT == MVT::i16) {
+ LocVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExt;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExt;
+ else
+ LocInfo = CCValAssign::AExt;
+ }
+
+ // VarArgs get passed on stack
+ unsigned Offset = State.AllocateStack(4, 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+SDValue LanaiTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ switch (CallConv) {
+ case CallingConv::C:
+ case CallingConv::Fast:
+ return LowerCCCArguments(Chain, CallConv, IsVarArg, Ins, DL, DAG, InVals);
+ default:
+ llvm_unreachable("Unsupported calling convention");
+ }
+}
+
+SDValue LanaiTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc &DL = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &IsTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool IsVarArg = CLI.IsVarArg;
+
+ // Lanai target does not yet support tail call optimization.
+ IsTailCall = false;
+
+ switch (CallConv) {
+ case CallingConv::Fast:
+ case CallingConv::C:
+ return LowerCCCCallTo(Chain, Callee, CallConv, IsVarArg, IsTailCall, Outs,
+ OutVals, Ins, DL, DAG, InVals);
+ default:
+ llvm_unreachable("Unsupported calling convention");
+ }
+}
+
+// LowerCCCArguments - transform physical registers into virtual registers and
+// generate load operations for arguments places on the stack.
+SDValue LanaiTargetLowering::LowerCCCArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ LanaiMachineFunctionInfo *LanaiMFI = MF.getInfo<LanaiMachineFunctionInfo>();
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
+ if (CallConv == CallingConv::Fast) {
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Lanai32_Fast);
+ } else {
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Lanai32);
+ }
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ if (VA.isRegLoc()) {
+ // Arguments passed in registers
+ EVT RegVT = VA.getLocVT();
+ switch (RegVT.getSimpleVT().SimpleTy) {
+ case MVT::i32: {
+ unsigned VReg = RegInfo.createVirtualRegister(&Lanai::GPRRegClass);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
+
+ // If this is an 8/16-bit value, it is really passed promoted to 32
+ // bits. Insert an assert[sz]ext to capture this, then truncate to the
+ // right size.
+ if (VA.getLocInfo() == CCValAssign::SExt)
+ ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+ else if (VA.getLocInfo() == CCValAssign::ZExt)
+ ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+
+ if (VA.getLocInfo() != CCValAssign::Full)
+ ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
+
+ InVals.push_back(ArgValue);
+ break;
+ }
+ default:
+ DEBUG(dbgs() << "LowerFormalArguments Unhandled argument type: "
+ << RegVT.getEVTString() << "\n");
+ llvm_unreachable("unhandled argument type");
+ }
+ } else {
+ // Sanity check
+ assert(VA.isMemLoc());
+ // Load the argument to a virtual register
+ unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
+ // Check that the argument fits in stack slot
+ if (ObjSize > 4) {
+ errs() << "LowerFormalArguments Unhandled argument type: "
+ << EVT(VA.getLocVT()).getEVTString() << "\n";
+ }
+ // Create the frame index object for this incoming parameter...
+ int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset(), true);
+
+ // Create the SelectionDAG nodes corresponding to a load
+ // from this parameter
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+ InVals.push_back(DAG.getLoad(
+ VA.getLocVT(), DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
+ }
+ }
+
+ // The Lanai ABI for returning structs by value requires that we copy
+ // the sret argument into rv for the return. Save the argument into
+ // a virtual register so that we can access it from the return points.
+ if (MF.getFunction()->hasStructRetAttr()) {
+ unsigned Reg = LanaiMFI->getSRetReturnReg();
+ if (!Reg) {
+ Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
+ LanaiMFI->setSRetReturnReg(Reg);
+ }
+ SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[0]);
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
+ }
+
+ if (IsVarArg) {
+ // Record the frame index of the first variable argument
+ // which is a value necessary to VASTART.
+ int FI = MFI->CreateFixedObject(4, CCInfo.getNextStackOffset(), true);
+ LanaiMFI->setVarArgsFrameIndex(FI);
+ }
+
+ return Chain;
+}
+
+SDValue
+LanaiTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const {
+ // CCValAssign - represent the assignment of the return value to a location
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // CCState - Info about the registers and stack slot.
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+
+ // Analize return values.
+ CCInfo.AnalyzeReturn(Outs, RetCC_Lanai32);
+
+ SDValue Flag;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag);
+
+ // Guarantee that all emitted copies are stuck together with flags.
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+
+ // The Lanai ABI for returning structs by value requires that we copy
+ // the sret argument into rv for the return. We saved the argument into
+ // a virtual register in the entry block, so now we copy the value out
+ // and into rv.
+ if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ LanaiMachineFunctionInfo *LanaiMFI = MF.getInfo<LanaiMachineFunctionInfo>();
+ unsigned Reg = LanaiMFI->getSRetReturnReg();
+ assert(Reg &&
+ "SRetReturnReg should have been set in LowerFormalArguments().");
+ SDValue Val =
+ DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
+
+ Chain = DAG.getCopyToReg(Chain, DL, Lanai::RV, Val, Flag);
+ Flag = Chain.getValue(1);
+ RetOps.push_back(
+ DAG.getRegister(Lanai::RV, getPointerTy(DAG.getDataLayout())));
+ }
+
+ RetOps[0] = Chain; // Update chain
+
+ unsigned Opc = LanaiISD::RET_FLAG;
+ if (Flag.getNode())
+ RetOps.push_back(Flag);
+
+ // Return Void
+ return DAG.getNode(Opc, DL, MVT::Other,
+ ArrayRef<SDValue>(&RetOps[0], RetOps.size()));
+}
+
+// LowerCCCCallTo - functions arguments are copied from virtual regs to
+// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
+SDValue LanaiTargetLowering::LowerCCCCallTo(
+ SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool IsVarArg,
+ bool /*IsTailCall*/, const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
+ GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+
+ NumFixedArgs = 0;
+ if (IsVarArg && G) {
+ const Function *CalleeFn = dyn_cast<Function>(G->getGlobal());
+ if (CalleeFn)
+ NumFixedArgs = CalleeFn->getFunctionType()->getNumParams();
+ }
+ if (NumFixedArgs)
+ CCInfo.AnalyzeCallOperands(Outs, CC_Lanai32_VarArg);
+ else {
+ if (CallConv == CallingConv::Fast)
+ CCInfo.AnalyzeCallOperands(Outs, CC_Lanai32_Fast);
+ else
+ CCInfo.AnalyzeCallOperands(Outs, CC_Lanai32);
+ }
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
+
+ // Create local copies for byval args.
+ SmallVector<SDValue, 8> ByValArgs;
+ for (unsigned I = 0, E = Outs.size(); I != E; ++I) {
+ ISD::ArgFlagsTy Flags = Outs[I].Flags;
+ if (!Flags.isByVal())
+ continue;
+
+ SDValue Arg = OutVals[I];
+ unsigned Size = Flags.getByValSize();
+ unsigned Align = Flags.getByValAlign();
+
+ int FI = MFI->CreateStackObject(Size, Align, false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ SDValue SizeNode = DAG.getConstant(Size, DL, MVT::i32);
+
+ Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align,
+ /*IsVolatile=*/false,
+ /*AlwaysInline=*/false,
+ /*isTailCall=*/false, MachinePointerInfo(),
+ MachinePointerInfo());
+ ByValArgs.push_back(FIPtr);
+ }
+
+ Chain = DAG.getCALLSEQ_START(
+ Chain,
+ DAG.getConstant(NumBytes, DL, getPointerTy(DAG.getDataLayout()), true),
+ DL);
+
+ SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
+ SmallVector<SDValue, 12> MemOpChains;
+ SDValue StackPtr;
+
+ // Walk the register/memloc assignments, inserting copies/loads.
+ for (unsigned I = 0, J = 0, E = ArgLocs.size(); I != E; ++I) {
+ CCValAssign &VA = ArgLocs[I];
+ SDValue Arg = OutVals[I];
+ ISD::ArgFlagsTy Flags = Outs[I].Flags;
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ default:
+ llvm_unreachable("Unknown loc info!");
+ }
+
+ // Use local copy if it is a byval arg.
+ if (Flags.isByVal())
+ Arg = ByValArgs[J++];
+
+ // Arguments that can be passed on register must be kept at RegsToPass
+ // vector
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ } else {
+ assert(VA.isMemLoc());
+
+ if (StackPtr.getNode() == 0)
+ StackPtr = DAG.getCopyFromReg(Chain, DL, Lanai::SP,
+ getPointerTy(DAG.getDataLayout()));
+
+ SDValue PtrOff =
+ DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
+ DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
+
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
+ }
+ }
+
+ // Transform all store nodes into one single node because all store nodes are
+ // independent of each other.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ ArrayRef<SDValue>(&MemOpChains[0], MemOpChains.size()));
+
+ SDValue InFlag;
+
+ // Build a sequence of copy-to-reg nodes chained together with token chain and
+ // flag operands which copy the outgoing args into registers. The InFlag in
+ // necessary since all emitted instructions must be stuck together.
+ for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
+ Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
+ RegsToPass[I].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ // If the callee is a GlobalAddress node (quite common, every direct call is)
+ // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
+ // Likewise ExternalSymbol -> TargetExternalSymbol.
+ uint8_t OpFlag = LanaiII::MO_NO_FLAG;
+ if (G) {
+ Callee = DAG.getTargetGlobalAddress(
+ G->getGlobal(), DL, getPointerTy(DAG.getDataLayout()), 0, OpFlag);
+ } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ Callee = DAG.getTargetExternalSymbol(
+ E->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlag);
+ }
+
+ // Returns a chain & a flag for retval copy to use.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add a register mask operand representing the call-preserved registers.
+ // TODO: Should return-twice functions be handled?
+ const uint32_t *Mask =
+ TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
+ Ops.push_back(DAG.getRegister(RegsToPass[I].first,
+ RegsToPass[I].second.getValueType()));
+
+ if (InFlag.getNode())
+ Ops.push_back(InFlag);
+
+ Chain = DAG.getNode(LanaiISD::CALL, DL, NodeTys,
+ ArrayRef<SDValue>(&Ops[0], Ops.size()));
+ InFlag = Chain.getValue(1);
+
+ // Create the CALLSEQ_END node.
+ Chain = DAG.getCALLSEQ_END(
+ Chain,
+ DAG.getConstant(NumBytes, DL, getPointerTy(DAG.getDataLayout()), true),
+ DAG.getConstant(0, DL, getPointerTy(DAG.getDataLayout()), true), InFlag,
+ DL);
+ InFlag = Chain.getValue(1);
+
+ // Handle result values, copying them out of physregs into vregs that we
+ // return.
+ return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
+ InVals);
+}
+
+// LowerCallResult - Lower the result values of a call into the
+// appropriate copies out of appropriate physical registers.
+SDValue LanaiTargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+
+ CCInfo.AnalyzeCallResult(Ins, RetCC_Lanai32);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (unsigned I = 0; I != RVLocs.size(); ++I) {
+ Chain = DAG.getCopyFromReg(Chain, DL, RVLocs[I].getLocReg(),
+ RVLocs[I].getValVT(), InFlag)
+ .getValue(1);
+ InFlag = Chain.getValue(2);
+ InVals.push_back(Chain.getValue(0));
+ }
+
+ return Chain;
+}
+
+//===----------------------------------------------------------------------===//
+// Custom Lowerings
+//===----------------------------------------------------------------------===//
+
+static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL,
+ SDValue &RHS, SelectionDAG &DAG) {
+ ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
+
+ // For integer, only the SETEQ, SETNE, SETLT, SETLE, SETGT, SETGE, SETULT,
+ // SETULE, SETUGT, and SETUGE opcodes are used (see CodeGen/ISDOpcodes.h)
+ // and Lanai only supports integer comparisons, so only provide definitions
+ // for them.
+ switch (SetCCOpcode) {
+ case ISD::SETEQ:
+ return LPCC::ICC_EQ;
+ case ISD::SETGT:
+ if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS))
+ if (RHSC->getZExtValue() == 0xFFFFFFFF) {
+ // X > -1 -> X >= 0 -> is_plus(X)
+ RHS = DAG.getConstant(0, DL, RHS.getValueType());
+ return LPCC::ICC_PL;
+ }
+ return LPCC::ICC_GT;
+ case ISD::SETUGT:
+ return LPCC::ICC_UGT;
+ case ISD::SETLT:
+ if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS))
+ if (RHSC->getZExtValue() == 0)
+ // X < 0 -> is_minus(X)
+ return LPCC::ICC_MI;
+ return LPCC::ICC_LT;
+ case ISD::SETULT:
+ return LPCC::ICC_ULT;
+ case ISD::SETLE:
+ if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS))
+ if (RHSC->getZExtValue() == 0xFFFFFFFF) {
+ // X <= -1 -> X < 0 -> is_minus(X)
+ RHS = DAG.getConstant(0, DL, RHS.getValueType());
+ return LPCC::ICC_MI;
+ }
+ return LPCC::ICC_LE;
+ case ISD::SETULE:
+ return LPCC::ICC_ULE;
+ case ISD::SETGE:
+ if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS))
+ if (RHSC->getZExtValue() == 0)
+ // X >= 0 -> is_plus(X)
+ return LPCC::ICC_PL;
+ return LPCC::ICC_GE;
+ case ISD::SETUGE:
+ return LPCC::ICC_UGE;
+ case ISD::SETNE:
+ return LPCC::ICC_NE;
+ case ISD::SETONE:
+ case ISD::SETUNE:
+ case ISD::SETOGE:
+ case ISD::SETOLE:
+ case ISD::SETOLT:
+ case ISD::SETOGT:
+ case ISD::SETOEQ:
+ case ISD::SETUEQ:
+ case ISD::SETO:
+ case ISD::SETUO:
+ llvm_unreachable("Unsupported comparison.");
+ default:
+ llvm_unreachable("Unknown integer condition code!");
+ }
+}
+
+SDValue LanaiTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Cond = Op.getOperand(1);
+ SDValue LHS = Op.getOperand(2);
+ SDValue RHS = Op.getOperand(3);
+ SDValue Dest = Op.getOperand(4);
+ SDLoc DL(Op);
+
+ LPCC::CondCode CC = IntCondCCodeToICC(Cond, DL, RHS, DAG);
+ SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32);
+ SDValue Flag =
+ DAG.getNode(LanaiISD::SET_FLAG, DL, MVT::Glue, LHS, RHS, TargetCC);
+
+ return DAG.getNode(LanaiISD::BR_CC, DL, Op.getValueType(), Chain, Dest,
+ TargetCC, Flag);
+}
+
+SDValue LanaiTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
+ EVT VT = Op->getValueType(0);
+ if (VT != MVT::i32)
+ return SDValue();
+
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op->getOperand(1));
+ if (!C)
+ return SDValue();
+
+ int64_t MulAmt = C->getSExtValue();
+ int32_t HighestOne = -1;
+ uint32_t NonzeroEntries = 0;
+ int SignedDigit[32] = {0};
+
+ // Convert to non-adjacent form (NAF) signed-digit representation.
+ // NAF is a signed-digit form where no adjacent digits are non-zero. It is the
+ // minimal Hamming weight representation of a number (on average 1/3 of the
+ // digits will be non-zero vs 1/2 for regular binary representation). And as
+ // the non-zero digits will be the only digits contributing to the instruction
+ // count, this is desirable. The next loop converts it to NAF (following the
+ // approach in 'Guide to Elliptic Curve Cryptography' [ISBN: 038795273X]) by
+ // choosing the non-zero coefficients such that the resulting quotient is
+ // divisible by 2 which will cause the next coefficient to be zero.
+ int64_t E = std::abs(MulAmt);
+ int S = (MulAmt < 0 ? -1 : 1);
+ int I = 0;
+ while (E > 0) {
+ int ZI = 0;
+ if (E % 2 == 1) {
+ ZI = 2 - (E % 4);
+ if (ZI != 0)
+ ++NonzeroEntries;
+ }
+ SignedDigit[I] = S * ZI;
+ if (SignedDigit[I] == 1)
+ HighestOne = I;
+ E = (E - ZI) / 2;
+ ++I;
+ }
+
+ // Compute number of instructions required. Due to differences in lowering
+ // between the different processors this count is not exact.
+ // Start by assuming a shift and a add/sub for every non-zero entry (hence
+ // every non-zero entry requires 1 shift and 1 add/sub except for the first
+ // entry).
+ int32_t InstrRequired = 2 * NonzeroEntries - 1;
+ // Correct possible over-adding due to shift by 0 (which is not emitted).
+ if (std::abs(MulAmt) % 2 == 1)
+ --InstrRequired;
+ // Return if the form generated would exceed the instruction threshold.
+ if (InstrRequired > LanaiLowerConstantMulThreshold)
+ return SDValue();
+
+ SDValue Res;
+ SDLoc DL(Op);
+ SDValue V = Op->getOperand(0);
+
+ // Initialize the running sum. Set the running sum to the maximal shifted
+ // positive value (i.e., largest i such that zi == 1 and MulAmt has V<<i as a
+ // term NAF).
+ if (HighestOne == -1)
+ Res = DAG.getConstant(0, DL, MVT::i32);
+ else {
+ Res = DAG.getNode(ISD::SHL, DL, VT, V,
+ DAG.getConstant(HighestOne, DL, MVT::i32));
+ SignedDigit[HighestOne] = 0;
+ }
+
+ // Assemble multiplication from shift, add, sub using NAF form and running
+ // sum.
+ for (unsigned int I = 0; I < sizeof(SignedDigit) / sizeof(SignedDigit[0]);
+ ++I) {
+ if (SignedDigit[I] == 0)
+ continue;
+
+ // Shifted multiplicand (v<<i).
+ SDValue Op =
+ DAG.getNode(ISD::SHL, DL, VT, V, DAG.getConstant(I, DL, MVT::i32));
+ if (SignedDigit[I] == 1)
+ Res = DAG.getNode(ISD::ADD, DL, VT, Res, Op);
+ else if (SignedDigit[I] == -1)
+ Res = DAG.getNode(ISD::SUB, DL, VT, Res, Op);
+ }
+ return Res;
+}
+
+SDValue LanaiTargetLowering::LowerSETCCE(SDValue Op, SelectionDAG &DAG) const {
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ SDValue Carry = Op.getOperand(2);
+ SDValue Cond = Op.getOperand(3);
+ SDLoc DL(Op);
+
+ LPCC::CondCode CC = IntCondCCodeToICC(Cond, DL, RHS, DAG);
+ SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32);
+ SDValue Flag = DAG.getNode(LanaiISD::SUBBF, DL, MVT::Glue, LHS, RHS, Carry);
+ return DAG.getNode(LanaiISD::SETCC, DL, Op.getValueType(), TargetCC, Flag);
+}
+
+SDValue LanaiTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ SDValue Cond = Op.getOperand(2);
+ SDLoc DL(Op);
+
+ LPCC::CondCode CC = IntCondCCodeToICC(Cond, DL, RHS, DAG);
+ SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32);
+ SDValue Flag =
+ DAG.getNode(LanaiISD::SET_FLAG, DL, MVT::Glue, LHS, RHS, TargetCC);
+
+ return DAG.getNode(LanaiISD::SETCC, DL, Op.getValueType(), TargetCC, Flag);
+}
+
+SDValue LanaiTargetLowering::LowerSELECT_CC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ SDValue TrueV = Op.getOperand(2);
+ SDValue FalseV = Op.getOperand(3);
+ SDValue Cond = Op.getOperand(4);
+ SDLoc DL(Op);
+
+ LPCC::CondCode CC = IntCondCCodeToICC(Cond, DL, RHS, DAG);
+ SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32);
+ SDValue Flag =
+ DAG.getNode(LanaiISD::SET_FLAG, DL, MVT::Glue, LHS, RHS, TargetCC);
+
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
+ return DAG.getNode(LanaiISD::SELECT_CC, DL, VTs, TrueV, FalseV, TargetCC,
+ Flag);
+}
+
+SDValue LanaiTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ LanaiMachineFunctionInfo *FuncInfo = MF.getInfo<LanaiMachineFunctionInfo>();
+
+ SDLoc DL(Op);
+ SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
+ getPointerTy(DAG.getDataLayout()));
+
+ // vastart just stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
+ MachinePointerInfo(SV));
+}
+
+SDValue LanaiTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Size = Op.getOperand(1);
+ SDLoc DL(Op);
+
+ unsigned SPReg = getStackPointerRegisterToSaveRestore();
+
+ // Get a reference to the stack pointer.
+ SDValue StackPointer = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i32);
+
+ // Subtract the dynamic size from the actual stack size to
+ // obtain the new stack size.
+ SDValue Sub = DAG.getNode(ISD::SUB, DL, MVT::i32, StackPointer, Size);
+
+ // For Lanai, the outgoing memory arguments area should be on top of the
+ // alloca area on the stack i.e., the outgoing memory arguments should be
+ // at a lower address than the alloca area. Move the alloca area down the
+ // stack by adding back the space reserved for outgoing arguments to SP
+ // here.
+ //
+ // We do not know what the size of the outgoing args is at this point.
+ // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
+ // stack pointer. We replace this instruction with on that has the correct,
+ // known offset in emitPrologue().
+ SDValue ArgAdjust = DAG.getNode(LanaiISD::ADJDYNALLOC, DL, MVT::i32, Sub);
+
+ // The Sub result contains the new stack start address, so it
+ // must be placed in the stack pointer register.
+ SDValue CopyChain = DAG.getCopyToReg(Chain, DL, SPReg, Sub);
+
+ SDValue Ops[2] = {ArgAdjust, CopyChain};
+ return DAG.getMergeValues(Ops, DL);
+}
+
+SDValue LanaiTargetLowering::LowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ if (Depth) {
+ SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
+ const unsigned Offset = -4;
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
+ DAG.getIntPtrConstant(Offset, DL));
+ return DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
+ }
+
+ // Return the link register, which contains the return address.
+ // Mark it an implicit live-in.
+ unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
+ return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
+}
+
+SDValue LanaiTargetLowering::LowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Lanai::FP, VT);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ while (Depth--) {
+ const unsigned Offset = -8;
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
+ DAG.getIntPtrConstant(Offset, DL));
+ FrameAddr =
+ DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
+ }
+ return FrameAddr;
+}
+
+const char *LanaiTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ case LanaiISD::ADJDYNALLOC:
+ return "LanaiISD::ADJDYNALLOC";
+ case LanaiISD::RET_FLAG:
+ return "LanaiISD::RET_FLAG";
+ case LanaiISD::CALL:
+ return "LanaiISD::CALL";
+ case LanaiISD::SELECT_CC:
+ return "LanaiISD::SELECT_CC";
+ case LanaiISD::SETCC:
+ return "LanaiISD::SETCC";
+ case LanaiISD::SUBBF:
+ return "LanaiISD::SUBBF";
+ case LanaiISD::SET_FLAG:
+ return "LanaiISD::SET_FLAG";
+ case LanaiISD::BR_CC:
+ return "LanaiISD::BR_CC";
+ case LanaiISD::Wrapper:
+ return "LanaiISD::Wrapper";
+ case LanaiISD::HI:
+ return "LanaiISD::HI";
+ case LanaiISD::LO:
+ return "LanaiISD::LO";
+ case LanaiISD::SMALL:
+ return "LanaiISD::SMALL";
+ default:
+ return NULL;
+ }
+}
+
+SDValue LanaiTargetLowering::LowerConstantPool(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
+ const Constant *C = N->getConstVal();
+ const LanaiTargetObjectFile *TLOF =
+ static_cast<const LanaiTargetObjectFile *>(
+ getTargetMachine().getObjFileLowering());
+
+ // If the code model is small or constant will be placed in the small section,
+ // then assume address will fit in 21-bits.
+ if (getTargetMachine().getCodeModel() == CodeModel::Small ||
+ TLOF->isConstantInSmallSection(DAG.getDataLayout(), C)) {
+ SDValue Small = DAG.getTargetConstantPool(
+ C, MVT::i32, N->getAlignment(), N->getOffset(), LanaiII::MO_NO_FLAG);
+ return DAG.getNode(ISD::OR, DL, MVT::i32,
+ DAG.getRegister(Lanai::R0, MVT::i32),
+ DAG.getNode(LanaiISD::SMALL, DL, MVT::i32, Small));
+ } else {
+ uint8_t OpFlagHi = LanaiII::MO_ABS_HI;
+ uint8_t OpFlagLo = LanaiII::MO_ABS_LO;
+
+ SDValue Hi = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
+ N->getOffset(), OpFlagHi);
+ SDValue Lo = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
+ N->getOffset(), OpFlagLo);
+ Hi = DAG.getNode(LanaiISD::HI, DL, MVT::i32, Hi);
+ Lo = DAG.getNode(LanaiISD::LO, DL, MVT::i32, Lo);
+ SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Hi, Lo);
+ return Result;
+ }
+}
+
+SDValue LanaiTargetLowering::LowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
+
+ const LanaiTargetObjectFile *TLOF =
+ static_cast<const LanaiTargetObjectFile *>(
+ getTargetMachine().getObjFileLowering());
+
+ // If the code model is small or global variable will be placed in the small
+ // section, then assume address will fit in 21-bits.
+ if (getTargetMachine().getCodeModel() == CodeModel::Small ||
+ TLOF->isGlobalInSmallSection(GV, getTargetMachine())) {
+ SDValue Small = DAG.getTargetGlobalAddress(
+ GV, DL, getPointerTy(DAG.getDataLayout()), Offset, LanaiII::MO_NO_FLAG);
+ return DAG.getNode(ISD::OR, DL, MVT::i32,
+ DAG.getRegister(Lanai::R0, MVT::i32),
+ DAG.getNode(LanaiISD::SMALL, DL, MVT::i32, Small));
+ } else {
+ uint8_t OpFlagHi = LanaiII::MO_ABS_HI;
+ uint8_t OpFlagLo = LanaiII::MO_ABS_LO;
+
+ // Create the TargetGlobalAddress node, folding in the constant offset.
+ SDValue Hi = DAG.getTargetGlobalAddress(
+ GV, DL, getPointerTy(DAG.getDataLayout()), Offset, OpFlagHi);
+ SDValue Lo = DAG.getTargetGlobalAddress(
+ GV, DL, getPointerTy(DAG.getDataLayout()), Offset, OpFlagLo);
+ Hi = DAG.getNode(LanaiISD::HI, DL, MVT::i32, Hi);
+ Lo = DAG.getNode(LanaiISD::LO, DL, MVT::i32, Lo);
+ return DAG.getNode(ISD::OR, DL, MVT::i32, Hi, Lo);
+ }
+}
+
+SDValue LanaiTargetLowering::LowerBlockAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
+
+ uint8_t OpFlagHi = LanaiII::MO_ABS_HI;
+ uint8_t OpFlagLo = LanaiII::MO_ABS_LO;
+
+ SDValue Hi = DAG.getBlockAddress(BA, MVT::i32, true, OpFlagHi);
+ SDValue Lo = DAG.getBlockAddress(BA, MVT::i32, true, OpFlagLo);
+ Hi = DAG.getNode(LanaiISD::HI, DL, MVT::i32, Hi);
+ Lo = DAG.getNode(LanaiISD::LO, DL, MVT::i32, Lo);
+ SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Hi, Lo);
+ return Result;
+}
+
+SDValue LanaiTargetLowering::LowerJumpTable(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+
+ // If the code model is small assume address will fit in 21-bits.
+ if (getTargetMachine().getCodeModel() == CodeModel::Small) {
+ SDValue Small = DAG.getTargetJumpTable(
+ JT->getIndex(), getPointerTy(DAG.getDataLayout()), LanaiII::MO_NO_FLAG);
+ return DAG.getNode(ISD::OR, DL, MVT::i32,
+ DAG.getRegister(Lanai::R0, MVT::i32),
+ DAG.getNode(LanaiISD::SMALL, DL, MVT::i32, Small));
+ } else {
+ uint8_t OpFlagHi = LanaiII::MO_ABS_HI;
+ uint8_t OpFlagLo = LanaiII::MO_ABS_LO;
+
+ SDValue Hi = DAG.getTargetJumpTable(
+ JT->getIndex(), getPointerTy(DAG.getDataLayout()), OpFlagHi);
+ SDValue Lo = DAG.getTargetJumpTable(
+ JT->getIndex(), getPointerTy(DAG.getDataLayout()), OpFlagLo);
+ Hi = DAG.getNode(LanaiISD::HI, DL, MVT::i32, Hi);
+ Lo = DAG.getNode(LanaiISD::LO, DL, MVT::i32, Lo);
+ SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Hi, Lo);
+ return Result;
+ }
+}
+
+SDValue LanaiTargetLowering::LowerSRL_PARTS(SDValue Op,
+ SelectionDAG &DAG) const {
+ MVT VT = Op.getSimpleValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+
+ // Performs the following for a >> b:
+ // unsigned r_high = a_high >> b;
+ // r_high = (32 - b <= 0) ? 0 : r_high;
+ //
+ // unsigned r_low = a_low >> b;
+ // r_low = (32 - b <= 0) ? r_high : r_low;
+ // r_low = (b == 0) ? r_low : r_low | (a_high << (32 - b));
+ // return (unsigned long long)r_high << 32 | r_low;
+ // Note: This takes advantage of Lanai's shift behavior to avoid needing to
+ // mask the shift amount.
+
+ SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
+ SDValue NegatedPlus32 = DAG.getNode(
+ ISD::SUB, dl, MVT::i32, DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
+ SDValue SetCC = DAG.getSetCC(dl, MVT::i32, NegatedPlus32, Zero, ISD::SETLE);
+
+ SDValue Hi = DAG.getNode(ISD::SRL, dl, MVT::i32, ShOpHi, ShAmt);
+ Hi = DAG.getSelect(dl, MVT::i32, SetCC, Zero, Hi);
+
+ SDValue Lo = DAG.getNode(ISD::SRL, dl, MVT::i32, ShOpLo, ShAmt);
+ Lo = DAG.getSelect(dl, MVT::i32, SetCC, Hi, Lo);
+ SDValue CarryBits =
+ DAG.getNode(ISD::SHL, dl, MVT::i32, ShOpHi, NegatedPlus32);
+ SDValue ShiftIsZero = DAG.getSetCC(dl, MVT::i32, ShAmt, Zero, ISD::SETEQ);
+ Lo = DAG.getSelect(dl, MVT::i32, ShiftIsZero, Lo,
+ DAG.getNode(ISD::OR, dl, MVT::i32, Lo, CarryBits));
+
+ SDValue Ops[2] = {Lo, Hi};
+ return DAG.getMergeValues(Ops, dl);
+}
+
+// Helper function that checks if N is a null or all ones constant.
+static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
+ return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
+}
+
+// Return true if N is conditionally 0 or all ones.
+// Detects these expressions where cc is an i1 value:
+//
+// (select cc 0, y) [AllOnes=0]
+// (select cc y, 0) [AllOnes=0]
+// (zext cc) [AllOnes=0]
+// (sext cc) [AllOnes=0/1]
+// (select cc -1, y) [AllOnes=1]
+// (select cc y, -1) [AllOnes=1]
+//
+// * AllOnes determines whether to check for an all zero (AllOnes false) or an
+// all ones operand (AllOnes true).
+// * Invert is set when N is the all zero/ones constant when CC is false.
+// * OtherOp is set to the alternative value of N.
+//
+// For example, for (select cc X, Y) and AllOnes = 0 if:
+// * X = 0, Invert = False and OtherOp = Y
+// * Y = 0, Invert = True and OtherOp = X
+static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC,
+ bool &Invert, SDValue &OtherOp,
+ SelectionDAG &DAG) {
+ switch (N->getOpcode()) {
+ default:
+ return false;
+ case ISD::SELECT: {
+ CC = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue N2 = N->getOperand(2);
+ if (isZeroOrAllOnes(N1, AllOnes)) {
+ Invert = false;
+ OtherOp = N2;
+ return true;
+ }
+ if (isZeroOrAllOnes(N2, AllOnes)) {
+ Invert = true;
+ OtherOp = N1;
+ return true;
+ }
+ return false;
+ }
+ case ISD::ZERO_EXTEND: {
+ // (zext cc) can never be the all ones value.
+ if (AllOnes)
+ return false;
+ CC = N->getOperand(0);
+ if (CC.getValueType() != MVT::i1)
+ return false;
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ OtherOp = DAG.getConstant(1, dl, VT);
+ Invert = true;
+ return true;
+ }
+ case ISD::SIGN_EXTEND: {
+ CC = N->getOperand(0);
+ if (CC.getValueType() != MVT::i1)
+ return false;
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ Invert = !AllOnes;
+ if (AllOnes)
+ // When looking for an AllOnes constant, N is an sext, and the 'other'
+ // value is 0.
+ OtherOp = DAG.getConstant(0, dl, VT);
+ else
+ OtherOp =
+ DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, VT);
+ return true;
+ }
+ }
+}
+
+// Combine a constant select operand into its use:
+//
+// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
+// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
+// (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1]
+// (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
+// (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
+//
+// The transform is rejected if the select doesn't have a constant operand that
+// is null, or all ones when AllOnes is set.
+//
+// Also recognize sext/zext from i1:
+//
+// (add (zext cc), x) -> (select cc (add x, 1), x)
+// (add (sext cc), x) -> (select cc (add x, -1), x)
+//
+// These transformations eventually create predicated instructions.
+static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
+ TargetLowering::DAGCombinerInfo &DCI,
+ bool AllOnes) {
+ SelectionDAG &DAG = DCI.DAG;
+ EVT VT = N->getValueType(0);
+ SDValue NonConstantVal;
+ SDValue CCOp;
+ bool SwapSelectOps;
+ if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
+ NonConstantVal, DAG))
+ return SDValue();
+
+ // Slct is now know to be the desired identity constant when CC is true.
+ SDValue TrueVal = OtherOp;
+ SDValue FalseVal =
+ DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
+ // Unless SwapSelectOps says CC should be false.
+ if (SwapSelectOps)
+ std::swap(TrueVal, FalseVal);
+
+ return DAG.getNode(ISD::SELECT, SDLoc(N), VT, CCOp, TrueVal, FalseVal);
+}
+
+// Attempt combineSelectAndUse on each operand of a commutative operator N.
+static SDValue
+combineSelectAndUseCommutative(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+ bool AllOnes) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ if (N0.getNode()->hasOneUse())
+ if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
+ return Result;
+ if (N1.getNode()->hasOneUse())
+ if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
+ return Result;
+ return SDValue();
+}
+
+// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
+static SDValue PerformSUBCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
+ if (N1.getNode()->hasOneUse())
+ if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, /*AllOnes=*/false))
+ return Result;
+
+ return SDValue();
+}
+
+SDValue LanaiTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ switch (N->getOpcode()) {
+ default:
+ break;
+ case ISD::ADD:
+ case ISD::OR:
+ case ISD::XOR:
+ return combineSelectAndUseCommutative(N, DCI, /*AllOnes=*/false);
+ case ISD::AND:
+ return combineSelectAndUseCommutative(N, DCI, /*AllOnes=*/true);
+ case ISD::SUB:
+ return PerformSUBCombine(N, DCI);
+ }
+
+ return SDValue();
+}
diff --git a/lib/Target/Lanai/LanaiISelLowering.h b/lib/Target/Lanai/LanaiISelLowering.h
new file mode 100644
index 000000000000..16ce8edd27c5
--- /dev/null
+++ b/lib/Target/Lanai/LanaiISelLowering.h
@@ -0,0 +1,148 @@
+//===-- LanaiISelLowering.h - Lanai DAG Lowering Interface -....-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that Lanai uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAIISELLOWERING_H
+#define LLVM_LIB_TARGET_LANAI_LANAIISELLOWERING_H
+
+#include "Lanai.h"
+#include "LanaiRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+namespace LanaiISD {
+enum {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+
+ ADJDYNALLOC,
+
+ // Return with a flag operand. Operand 0 is the chain operand.
+ RET_FLAG,
+
+ // CALL - These operations represent an abstract call instruction, which
+ // includes a bunch of information.
+ CALL,
+
+ // SELECT_CC - Operand 0 and operand 1 are selection variable, operand 3
+ // is condition code and operand 4 is flag operand.
+ SELECT_CC,
+
+ // SETCC - Store the conditional code to a register.
+ SETCC,
+
+ // SET_FLAG - Set flag compare.
+ SET_FLAG,
+
+ // SUBBF - Subtract with borrow that sets flags.
+ SUBBF,
+
+ // BR_CC - Used to glue together a conditional branch and comparison
+ BR_CC,
+
+ // Wrapper - A wrapper node for TargetConstantPool, TargetExternalSymbol,
+ // and TargetGlobalAddress.
+ Wrapper,
+
+ // Get the Higher/Lower 16 bits from a 32-bit immediate.
+ HI,
+ LO,
+
+ // Small 21-bit immediate in global memory.
+ SMALL
+};
+} // namespace LanaiISD
+
+class LanaiSubtarget;
+
+class LanaiTargetLowering : public TargetLowering {
+public:
+ LanaiTargetLowering(const TargetMachine &TM, const LanaiSubtarget &STI);
+
+ // LowerOperation - Provide custom lowering hooks for some operations.
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+
+ // getTargetNodeName - This method returns the name of a target specific
+ // DAG node.
+ const char *getTargetNodeName(unsigned Opcode) const override;
+
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+
+ unsigned getRegisterByName(const char *RegName, EVT VT,
+ SelectionDAG &DAG) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint, MVT VT) const override;
+ ConstraintWeight
+ getSingleConstraintMatchWeight(AsmOperandInfo &Info,
+ const char *Constraint) const override;
+ void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+
+private:
+ SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool IsVarArg,
+ bool IsTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue LowerCCCArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+ SelectionDAG &DAG) const override;
+
+ const LanaiRegisterInfo *TRI;
+};
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAIISELLOWERING_H
diff --git a/lib/Target/Lanai/LanaiInstrFormats.td b/lib/Target/Lanai/LanaiInstrFormats.td
new file mode 100644
index 000000000000..30289ea4ac0b
--- /dev/null
+++ b/lib/Target/Lanai/LanaiInstrFormats.td
@@ -0,0 +1,561 @@
+//===- LanaiInstrFormats.td - Lanai Instruction Formats ----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+class InstLanai<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Instruction {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+ let Size = 4;
+
+ let Namespace = "Lanai";
+ let DecoderNamespace = "Lanai";
+
+ bits<4> Opcode;
+ let Inst{31 - 28} = Opcode;
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = asmstr;
+ let Pattern = pattern;
+}
+
+//------------------------------------------------------------------------------
+// Register Immediate (RI)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |0.A.A.A| . . . . | . . . . |F.H| . . . . . . . . . . . . . . . |
+// -----------------------------------------------------------------
+// opcode Rd Rs1 constant (16)
+//
+// Action:
+// Rd <- Rs1 op constant
+//
+// Except for shift instructions, `H' determines whether the constant
+// is in the high (1) or low (0) word. The other halfword is 0x0000,
+// except for the `AND' instruction (`AAA' = 100), for which the other
+// halfword is 0xFFFF, and shifts (`AAA' = 111), for which the constant is
+// sign extended.
+//
+// `F' determines whether the instruction modifies (1) or does not
+// modify (0) the program flags.
+//
+// `AAA' specifies the operation: `add' (000), `addc' (001), `sub'
+// (010), `subb' (011), `and' (100), `or' (101), `xor' (110), or `shift'
+// (111). For the shift, `H' specifies a logical (0) or arithmetic (1)
+// shift. The amount and direction of the shift are determined by the
+// sign extended constant interpreted as a two's complement number. The
+// shift operation is defined only for the range of:
+// 31 ... 0 -1 ... -31
+// \ / \ /
+// left right
+// shift shift
+//
+// If and only if the `F' bit is 1, RI instructions modify the
+// condition bits, `Z' (Zero), `N' (Negative), `V' (oVerflow), and `C'
+// (Carry), according to the result. If the flags are updated, they are
+// updated as follows:
+// `Z'
+// is set if the result is zero and cleared otherwise.
+//
+// `N'
+// is set to the most significant bit of the result.
+//
+// `V'
+// For arithmetic instructions (`add', `addc', `sub', `subb') `V' is
+// set if the sign (most significant) bits of the input operands are
+// the same but different from the sign bit of the result and cleared
+// otherwise. For other RI instructions, `V' is cleared.
+//
+// `C'
+// For arithmetic instructions, `C' is set/cleared if there is/is_not
+// a carry generated out of the most significant when performing the
+// twos-complement addition (`sub(a,b) == a + ~b + 1', `subb(a,b) ==
+// a + ~b + `C''). For left shifts, `C' is set to the least
+// significant bit discarded by the shift operation. For all other
+// operations, `C' is cleared.
+//
+// A Jump is accomplished by `Rd' being `pc', and it has one shadow.
+//
+// The all-0s word is the instruction `R0 <- R0 + 0', which is a no-op.
+class InstRI<bits<3> op, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern>, Sched<[WriteALU]> {
+ let Itinerary = IIC_ALU;
+ bits<5> Rd;
+ bits<5> Rs1;
+ bit F;
+ bit H;
+ bits<16> imm16;
+
+ let Opcode{3} = 0;
+ let Opcode{2 - 0} = op;
+ let Inst{27 - 23} = Rd;
+ let Inst{22 - 18} = Rs1;
+ let Inst{17} = F;
+ let Inst{16} = H;
+ let Inst{15 - 0} = imm16;
+}
+
+//------------------------------------------------------------------------------
+// Register Register (RR)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.1.0.0| . . . . | . . . . |F.I| . . . . |B.B.B|J.J.J.J.J|D.D.D|
+// -----------------------------------------------------------------
+// opcode Rd Rs1 Rs2 \ operation /
+//
+// Action:
+// `Rd <- Rs1 op Rs2' iff condition DDDI is true.
+//
+// `DDDI' is as described for the BR instruction.
+//
+// `F' determines whether the instruction modifies (1) or does not
+// modify (0) the program flags.
+//
+// `BBB' determines the operation: `add' (000), `addc' (001), `sub'
+// (010), `subb' (011), `and' (100), `or' (101), `xor' (110), or "special"
+// (111). The `JJJJJ' field is irrelevant except for special.
+//
+// `JJJJJ' determines which special operation is performed. `10---'
+// is a logical shift, and `11---' is an arithmetic shift, and ‘00000` is
+// the SELECT operation. The amount and direction of the shift are
+// determined by the contents of `Rs2' interpreted as a two's complement
+// number (in the same way as shifts in the Register-Immediate
+// instructions in *Note RI::). For the SELECT operation, Rd gets Rs1 if
+// condition DDDI is true, Rs2 otherwise. All other `JJJJJ' combinations
+// are reserved for instructions that may be defined in the future.
+//
+// If the `F' bit is 1, RR instructions modify the condition bits, `Z'
+// (Zero), `N' (Negative), `V' (oVerflow), and `C' (Carry), according to
+// the result. All RR instructions modify the `Z', `N', and `V' flags.
+// Except for arithmetic instructions (`add', `addc', `sub', `subb'), `V'
+// is cleared. Only arithmetic instructions and shifts modify `C'. Right
+// shifts clear C.
+//
+// DDDI is as described in the table for the BR instruction and only used for
+// the select instruction.
+//
+// A Jump is accomplished by `Rd' being `pc', and it has one shadow.
+class InstRR<bits<3> op, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern>, Sched<[WriteALU]> {
+ let Itinerary = IIC_ALU;
+ bits<5> Rd;
+ bits<5> Rs1;
+ bits<5> Rs2;
+ bit F;
+ bits<4> DDDI;
+ bits<5> JJJJJ;
+
+ let Opcode = 0b1100;
+ let Inst{27 - 23} = Rd;
+ let Inst{22 - 18} = Rs1;
+ let Inst{17} = F;
+ let Inst{16} = DDDI{0};
+ let Inst{15 - 11} = Rs2;
+ let Inst{10 - 8} = op;
+ let Inst{7 - 3} = JJJJJ;
+ let Inst{2 - 0} = DDDI{3 - 1};
+}
+
+//------------------------------------------------------------------------------
+// Register Memory (RM)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.0.0.S| . . . . | . . . . |P.Q| . . . . . . . . . . . . . . . |
+// -----------------------------------------------------------------
+// opcode Rd Rs1 constant (16)
+//
+// Action:
+// Rd <- Memory(ea) (Load) see below for the
+// Memory(ea) <- Rd (Store) definition of ea.
+//
+// `S' determines whether the instruction is a Load (0) or a Store (1).
+// Loads appear in Rd one cycle after this instruction executes. If the
+// following instruction reads Rd, that instruction will be delayed by 1
+// clock cycle.
+//
+// PQ operation
+// -- ------------------------------------------
+// 00 ea = Rs1
+// 01 ea = Rs1, Rs1 <- Rs1 + constant
+// 10 ea = Rs1 + constant
+// 11 ea = Rs1 + constant, Rs1 <- Rs1 + constant
+//
+// The constant is sign-extended for this instruction.
+//
+// A Jump is accomplished by `Rd' being `pc', and it has *two* delay slots.
+class InstRM<bit S, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ bits<5> Rd;
+ bits<5> Rs1;
+ bit P;
+ bit Q;
+ bits<16> imm16;
+ // Dummy variables to allow multiclass definition of RM and RRM
+ bits<2> YL;
+ bit E;
+
+ let Opcode{3 - 1} = 0b100;
+ let Opcode{0} = S;
+ let Inst{27 - 23} = Rd;
+ let Inst{22 - 18} = Rs1;
+ let Inst{17} = P;
+ let Inst{16} = Q;
+ let Inst{15 - 0} = imm16;
+
+ let PostEncoderMethod = "adjustPqBitsRmAndRrm";
+}
+
+//------------------------------------------------------------------------------
+// Register Register Memory (RRM)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.0.1.S| . . . . | . . . . |P.Q| . . . . |B.B.B|J.J.J.J.J|Y.L.E|
+// -----------------------------------------------------------------
+// opcode Rd Rs1 Rs2 \ operation /
+//
+// Action:
+// Rd <- Memory(ea) (Load) see below for the
+// Memory(ea) <- Rd (Store) definition of ea.
+//
+// The RRM instruction is identical to the RM (*note RM::.) instruction
+// except that:
+//
+// 1. `Rs1 + constant' is replaced with `Rs1 op Rs2', where `op' is
+// determined in the same way as in the RR instruction (*note RR::.)
+// and
+//
+// 2. part-word memory accesses are allowed as specified below.
+//
+// If `BBB' != 111 (i.e.: For all but shift operations):
+// If `YLE' = 01- => fuLl-word memory access
+// If `YLE' = 00- => half-word memory access
+// If `YLE' = 10- => bYte memory access
+// If `YLE' = --1 => loads are zEro extended
+// If `YLE' = --0 => loads are sign extended
+//
+// If `BBB' = 111 (For shift operations):
+// fullword memory access are performed.
+//
+// All part-word loads write the least significant part of the
+// destination register with the higher-order bits zero- or sign-extended.
+// All part-word stores store the least significant part-word of the
+// source register in the destination memory location.
+//
+// A Jump is accomplished by `Rd' being `pc', and it has *two* delay slots.
+class InstRRM<bit S, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ bits<5> Rd;
+ bits<5> Rs1;
+ bits<5> Rs2;
+ bit P;
+ bit Q;
+ bits<3> BBB;
+ bits<5> JJJJJ;
+ bits<2> YL;
+ bit E;
+
+ let Opcode{3 - 1} = 0b101;
+ let Opcode{0} = S;
+ let Inst{27 - 23} = Rd;
+ let Inst{22 - 18} = Rs1;
+ let Inst{17} = P;
+ let Inst{16} = Q;
+ let Inst{15 - 11} = Rs2;
+ let Inst{10 - 8} = BBB;
+ let Inst{7 - 3} = JJJJJ;
+ let Inst{2 - 1} = YL;
+ let Inst{0} = E;
+
+ let PostEncoderMethod = "adjustPqBitsRmAndRrm";
+}
+
+//------------------------------------------------------------------------------
+// Conditional Branch (BR)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.1.1.0|D.D.D| . . . . . . . . . . . . . . . . . . . . . . |0.I|
+// -----------------------------------------------------------------
+// opcode condition constant (23)
+//
+// Action:
+// if (condition) { `pc' <- 4*(zero-extended constant) }
+//
+// The BR instruction is an absolute branch.
+// The constant is scaled as shown by its position in the instruction word such
+// that it specifies word-aligned addresses in the range [0,2^25-4]
+//
+// The `DDDI' field selects the condition that causes the branch to be taken.
+// (the `I' (Invert sense) bit inverts the sense of the condition):
+//
+// DDDI logical function [code, used for...]
+// ---- -------------------------------------- ------------------------
+// 0000 1 [T, true]
+// 0001 0 [F, false]
+// 0010 C AND Z' [HI, high]
+// 0011 C' OR Z [LS, low or same]
+// 0100 C' [CC, carry cleared]
+// 0101 C [CS, carry set]
+// 0110 Z' [NE, not equal]
+// 0111 Z [EQ, equal]
+// 1000 V' [VC, oVerflow cleared]
+// 1001 V [VS, oVerflow set]
+// 1010 N' [PL, plus]
+// 1011 N [MI, minus]
+// 1100 (N AND V) OR (N' AND V') [GE, greater than or equal]
+// 1101 (N AND V') OR (N' AND V) [LT, less than]
+// 1110 (N AND V AND Z') OR (N' AND V' AND Z') [GT, greater than]
+// 1111 (Z) OR (N AND V') OR (N' AND V) [LE, less than or equal]
+//
+// If the branch is not taken, the BR instruction is a no-op. If the branch is
+// taken, the processor starts executing instructions at the branch target
+// address *after* the processor has executed one more instruction. That is,
+// the branch has one “branch delay slot”. Be very careful if you find yourself
+// wanting to put a branch in a branch delays slot!
+class InstBR<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ let Itinerary = IIC_ALU;
+ bits<25> addr;
+ bits<4> DDDI;
+
+ let Opcode = 0b1110;
+ let Inst{27 - 25} = DDDI{3 - 1};
+ let Inst{24 - 0} = addr;
+ // These instructions overwrite the last two address bits (which are assumed
+ // and ensured to be 0).
+ let Inst{1} = 0;
+ let Inst{0} = DDDI{0};
+}
+
+//------------------------------------------------------------------------------
+// Conditional Branch Relative (BRR)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.1.1.0|D.D.D|1|-| . . . . |-.-| . . . . . . . . . . . . . |1.I|
+// -----------------------------------------------------------------
+// opcode condition Rs1 constant (14)
+// Action:
+// if (condition) { ‘pc’ <- Rs1 + 4*sign-extended constant) }
+//
+// BRR behaves like BR, except the branch target address is a 16-bit PC relative
+// offset.
+class InstBRR<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ bits<4> DDDI;
+ bits<5> Rs1;
+ bits<16> imm16;
+
+ let Opcode = 0b1110;
+ let Inst{27 - 25} = DDDI{3 - 1};
+ let Inst{24} = 1;
+ let Inst{22 - 18} = Rs1;
+ let Inst{17 - 16} = 0;
+ let Inst{15 - 0} = imm16;
+ // Overwrite last two bits which have to be zero
+ let Inst{1} = 1;
+ let Inst{0} = DDDI{0};
+
+ // Set don't cares to zero
+ let Inst{23} = 0;
+}
+
+//------------------------------------------------------------------------------
+// Conditional Set (SCC)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.1.1.0|D.D.D|0.-| . . . . |-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-|1.I|
+// -----------------------------------------------------------------
+// opcode condition Rs1
+//
+// Action:
+// Rs1 <- logical function result
+//
+// SCC sets dst_reg to the boolean result of computing the logical function
+// specified by DDDI, as described in the table for the BR instruction.
+class InstSCC<dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ let Itinerary = IIC_ALU;
+ bits<5> Rs1; // dst_reg in documentation
+ bits<4> DDDI;
+
+ let Opcode = 0b1110;
+ let Inst{27 - 25} = DDDI{3 - 1};
+ let Inst{24} = 0;
+ let Inst{22 - 18} = Rs1;
+ let Inst{1} = 1;
+ let Inst{0} = DDDI{0};
+
+ // Set don't cares to zero
+ let Inst{23} = 0;
+ let Inst{17 - 2} = 0;
+}
+
+//------------------------------------------------------------------------------
+// Special Load/Store (SLS)
+//------------------------------------------------------------------------------
+//
+// Encoding:
+// -----------------------------------------------------------------
+// |1.1.1.1| . . . . | . . . . |0.S| . . . . . . . . . . . . . . . |
+// -----------------------------------------------------------------
+// opcode Rd addr 5msb's address 16 lsb's
+//
+// Action:
+// If S = 0 (LOAD): Rd <- Memory(address);
+// If S = 1 (STORE): Memory(address) <- Rd
+//
+// The timing is the same as for RM (*note RM::.) and RRM (*note
+// RRM::.) instructions. The two low-order bits of the 21-bit address are
+// ignored. The address is zero extended. Fullword memory accesses are
+// performed.
+class InstSLS<bit S, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ bits<5> Rd;
+ bits<5> msb;
+ bits<16> lsb;
+
+ let Opcode = 0b1111;
+ let Inst{27 - 23} = Rd;
+ let Inst{22 - 18} = msb;
+ let Inst{17} = 0;
+ let Inst{16} = S;
+ let Inst{15 - 0} = lsb;
+}
+
+//------------------------------------------------------------------------------
+// Special Load Immediate (SLI)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.1.1.1| . . . . | . . . . |1.0| . . . . . . . . . . . . . . . |
+// -----------------------------------------------------------------
+// opcode Rd const 5msb's constant 16 lsb's
+//
+// Action:
+// Rd <- constant
+//
+// The 21-bit constant is zero-extended. The timing is the same as the
+// RM instruction (*note RM::.).
+class InstSLI<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ bits<5> Rd;
+ bits<5> msb;
+ bits<16> lsb;
+
+ let Opcode = 0b1111;
+ let Inst{27 - 23} = Rd;
+ let Inst{22 - 18} = msb;
+ let Inst{17} = 1;
+ let Inst{16} = 0;
+ let Inst{15 - 0} = lsb;
+}
+
+//------------------------------------------------------------------------------
+// Special Part-Word Load/Store (SPLS)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.1.1.1| . . . . | . . . . |1.1.0.Y.S.E.P.Q| . . . . . . . . . |
+// -----------------------------------------------------------------
+// opcode Rd Rs1 constant (10)
+//
+// Action:
+// If `YS' = 11 (bYte Store):
+// Memory(ea) <- (least significant byte of Rr)
+// If `YS' = 01 (halfword Store):
+// Memory(ea) <- (least significant half-word of Rr)
+// If `YS' = 10 (bYte load): Rr <- Memory(ea)
+// If `YS' = 00 (halfword load): Rr <- Memory(ea)
+// [Note: here ea is determined as in the the RM instruction. ]
+// If `SE' = 01 then the value is zEro extended
+// before being loaded into Rd.
+// If `SE' = 00 then the value is sign extended
+// before being loaded into Rd.
+//
+// `P' and `Q' are used to determine `ea' as in the RM instruction. The
+// constant is sign extended. The timing is the same as the RM and RRM
+// instructions. *Note RM:: and *Note RRM::.
+//
+// All part-word loads write the part-word into the least significant
+// part of the destination register, with the higher-order bits zero- or
+// sign-extended. All part-word stores store the least significant
+// part-word of the source register into the destination memory location.
+class InstSPLS<dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ bits<5> Rd;
+ bits<5> Rs1;
+ bits<5> msb;
+ bit Y;
+ bit S;
+ bit E;
+ bit P;
+ bit Q;
+ bits<10> imm10;
+
+ let Opcode = 0b1111;
+ let Inst{27 - 23} = Rd;
+ let Inst{22 - 18} = Rs1;
+ let Inst{17 - 15} = 0b110;
+ let Inst{14} = Y;
+ let Inst{13} = S;
+ let Inst{12} = E;
+ let Inst{11} = P;
+ let Inst{10} = Q;
+ let Inst{9 - 0} = imm10;
+
+ let PostEncoderMethod = "adjustPqBitsSpls";
+}
+
+//------------------------------------------------------------------------------
+// Special instructions (popc, leadz, trailz)
+//------------------------------------------------------------------------------
+// Encoding:
+// -----------------------------------------------------------------
+// |1.1.0.1| Rd | Rs1 |F.-| . . . . | . . | . . . . | OP |
+// -----------------------------------------------------------------
+// opcode Rd Rs1
+// Action:
+// Rd <- Perform action encoded in OP on Rs1
+// OP is one of:
+// 0b001 POPC Population count;
+// 0b010 LEADZ Count number of leading zeros;
+// 0b011 TRAILZ Count number of trailing zeros;
+class InstSpecial<bits<3> op, dag outs, dag ins, string asmstr,
+ list<dag> pattern> : InstLanai<outs, ins, asmstr,
+ pattern>, Sched<[WriteALU]> {
+ let Itinerary = IIC_ALU;
+ bit F;
+ bits<5> Rd;
+ bits<5> Rs1;
+
+ let Opcode = 0b1101;
+ let Inst{27 - 23} = Rd;
+ let Inst{22 - 18} = Rs1;
+ let Inst{17} = F;
+ let Inst{16 - 3} = 0;
+ let Inst{2 - 0} = op;
+}
+
+// Pseudo instructions
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstLanai<outs, ins, asmstr, pattern> {
+ let Inst{15 - 0} = 0;
+ let isPseudo = 1;
+}
diff --git a/lib/Target/Lanai/LanaiInstrInfo.cpp b/lib/Target/Lanai/LanaiInstrInfo.cpp
new file mode 100644
index 000000000000..673d23daf886
--- /dev/null
+++ b/lib/Target/Lanai/LanaiInstrInfo.cpp
@@ -0,0 +1,803 @@
+//===-- LanaiInstrInfo.cpp - Lanai Instruction Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Lanai implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Lanai.h"
+#include "LanaiInstrInfo.h"
+#include "LanaiMachineFunctionInfo.h"
+#include "LanaiTargetMachine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define GET_INSTRINFO_CTOR_DTOR
+#include "LanaiGenInstrInfo.inc"
+
+LanaiInstrInfo::LanaiInstrInfo()
+ : LanaiGenInstrInfo(Lanai::ADJCALLSTACKDOWN, Lanai::ADJCALLSTACKUP),
+ RegisterInfo() {}
+
+void LanaiInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Position,
+ const DebugLoc &DL,
+ unsigned DestinationRegister,
+ unsigned SourceRegister,
+ bool KillSource) const {
+ if (!Lanai::GPRRegClass.contains(DestinationRegister, SourceRegister)) {
+ llvm_unreachable("Impossible reg-to-reg copy");
+ }
+
+ BuildMI(MBB, Position, DL, get(Lanai::OR_I_LO), DestinationRegister)
+ .addReg(SourceRegister, getKillRegState(KillSource))
+ .addImm(0);
+}
+
+void LanaiInstrInfo::storeRegToStackSlot(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
+ unsigned SourceRegister, bool IsKill, int FrameIndex,
+ const TargetRegisterClass *RegisterClass,
+ const TargetRegisterInfo * /*RegisterInfo*/) const {
+ DebugLoc DL;
+ if (Position != MBB.end()) {
+ DL = Position->getDebugLoc();
+ }
+
+ if (!Lanai::GPRRegClass.hasSubClassEq(RegisterClass)) {
+ llvm_unreachable("Can't store this register to stack slot");
+ }
+ BuildMI(MBB, Position, DL, get(Lanai::SW_RI))
+ .addReg(SourceRegister, getKillRegState(IsKill))
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addImm(LPAC::ADD);
+}
+
+void LanaiInstrInfo::loadRegFromStackSlot(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
+ unsigned DestinationRegister, int FrameIndex,
+ const TargetRegisterClass *RegisterClass,
+ const TargetRegisterInfo * /*RegisterInfo*/) const {
+ DebugLoc DL;
+ if (Position != MBB.end()) {
+ DL = Position->getDebugLoc();
+ }
+
+ if (!Lanai::GPRRegClass.hasSubClassEq(RegisterClass)) {
+ llvm_unreachable("Can't load this register from stack slot");
+ }
+ BuildMI(MBB, Position, DL, get(Lanai::LDW_RI), DestinationRegister)
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addImm(LPAC::ADD);
+}
+
+bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(
+ MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis * /*AA*/) const {
+ assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
+ assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
+
+ if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
+ MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
+ return false;
+
+ // Retrieve the base register, offset from the base register and width. Width
+ // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
+ // base registers are identical, and the offset of a lower memory access +
+ // the width doesn't overlap the offset of a higher memory access,
+ // then the memory accesses are different.
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ unsigned BaseRegA = 0, BaseRegB = 0;
+ int64_t OffsetA = 0, OffsetB = 0;
+ unsigned int WidthA = 0, WidthB = 0;
+ if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
+ getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
+ if (BaseRegA == BaseRegB) {
+ int LowOffset = std::min(OffsetA, OffsetB);
+ int HighOffset = std::max(OffsetA, OffsetB);
+ int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
+ if (LowOffset + LowWidth <= HighOffset)
+ return true;
+ }
+ }
+ return false;
+}
+
+bool LanaiInstrInfo::expandPostRAPseudo(MachineInstr & /*MI*/) const {
+ return false;
+}
+
+static LPCC::CondCode getOppositeCondition(LPCC::CondCode CC) {
+ switch (CC) {
+ case LPCC::ICC_T: // true
+ return LPCC::ICC_F;
+ case LPCC::ICC_F: // false
+ return LPCC::ICC_T;
+ case LPCC::ICC_HI: // high
+ return LPCC::ICC_LS;
+ case LPCC::ICC_LS: // low or same
+ return LPCC::ICC_HI;
+ case LPCC::ICC_CC: // carry cleared
+ return LPCC::ICC_CS;
+ case LPCC::ICC_CS: // carry set
+ return LPCC::ICC_CC;
+ case LPCC::ICC_NE: // not equal
+ return LPCC::ICC_EQ;
+ case LPCC::ICC_EQ: // equal
+ return LPCC::ICC_NE;
+ case LPCC::ICC_VC: // oVerflow cleared
+ return LPCC::ICC_VS;
+ case LPCC::ICC_VS: // oVerflow set
+ return LPCC::ICC_VC;
+ case LPCC::ICC_PL: // plus (note: 0 is "minus" too here)
+ return LPCC::ICC_MI;
+ case LPCC::ICC_MI: // minus
+ return LPCC::ICC_PL;
+ case LPCC::ICC_GE: // greater than or equal
+ return LPCC::ICC_LT;
+ case LPCC::ICC_LT: // less than
+ return LPCC::ICC_GE;
+ case LPCC::ICC_GT: // greater than
+ return LPCC::ICC_LE;
+ case LPCC::ICC_LE: // less than or equal
+ return LPCC::ICC_GT;
+ default:
+ llvm_unreachable("Invalid condtional code");
+ }
+}
+
+std::pair<unsigned, unsigned>
+LanaiInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
+ return std::make_pair(TF, 0u);
+}
+
+ArrayRef<std::pair<unsigned, const char *>>
+LanaiInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
+ using namespace LanaiII;
+ static const std::pair<unsigned, const char *> TargetFlags[] = {
+ {MO_ABS_HI, "lanai-hi"},
+ {MO_ABS_LO, "lanai-lo"},
+ {MO_NO_FLAG, "lanai-nf"}};
+ return makeArrayRef(TargetFlags);
+}
+
+bool LanaiInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const {
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case Lanai::SFSUB_F_RI_LO:
+ case Lanai::SFSUB_F_RI_HI:
+ SrcReg = MI.getOperand(0).getReg();
+ SrcReg2 = 0;
+ CmpMask = ~0;
+ CmpValue = MI.getOperand(1).getImm();
+ return true;
+ case Lanai::SFSUB_F_RR:
+ SrcReg = MI.getOperand(0).getReg();
+ SrcReg2 = MI.getOperand(1).getReg();
+ CmpMask = ~0;
+ CmpValue = 0;
+ return true;
+ }
+
+ return false;
+}
+
+// isRedundantFlagInstr - check whether the first instruction, whose only
+// purpose is to update flags, can be made redundant.
+// * SFSUB_F_RR can be made redundant by SUB_RI if the operands are the same.
+// * SFSUB_F_RI can be made redundant by SUB_I if the operands are the same.
+inline static bool isRedundantFlagInstr(MachineInstr *CmpI, unsigned SrcReg,
+ unsigned SrcReg2, int ImmValue,
+ MachineInstr *OI) {
+ if (CmpI->getOpcode() == Lanai::SFSUB_F_RR &&
+ OI->getOpcode() == Lanai::SUB_R &&
+ ((OI->getOperand(1).getReg() == SrcReg &&
+ OI->getOperand(2).getReg() == SrcReg2) ||
+ (OI->getOperand(1).getReg() == SrcReg2 &&
+ OI->getOperand(2).getReg() == SrcReg)))
+ return true;
+
+ if (((CmpI->getOpcode() == Lanai::SFSUB_F_RI_LO &&
+ OI->getOpcode() == Lanai::SUB_I_LO) ||
+ (CmpI->getOpcode() == Lanai::SFSUB_F_RI_HI &&
+ OI->getOpcode() == Lanai::SUB_I_HI)) &&
+ OI->getOperand(1).getReg() == SrcReg &&
+ OI->getOperand(2).getImm() == ImmValue)
+ return true;
+ return false;
+}
+
+inline static unsigned flagSettingOpcodeVariant(unsigned OldOpcode) {
+ switch (OldOpcode) {
+ case Lanai::ADD_I_HI:
+ return Lanai::ADD_F_I_HI;
+ case Lanai::ADD_I_LO:
+ return Lanai::ADD_F_I_LO;
+ case Lanai::ADD_R:
+ return Lanai::ADD_F_R;
+ case Lanai::ADDC_I_HI:
+ return Lanai::ADDC_F_I_HI;
+ case Lanai::ADDC_I_LO:
+ return Lanai::ADDC_F_I_LO;
+ case Lanai::ADDC_R:
+ return Lanai::ADDC_F_R;
+ case Lanai::AND_I_HI:
+ return Lanai::AND_F_I_HI;
+ case Lanai::AND_I_LO:
+ return Lanai::AND_F_I_LO;
+ case Lanai::AND_R:
+ return Lanai::AND_F_R;
+ case Lanai::OR_I_HI:
+ return Lanai::OR_F_I_HI;
+ case Lanai::OR_I_LO:
+ return Lanai::OR_F_I_LO;
+ case Lanai::OR_R:
+ return Lanai::OR_F_R;
+ case Lanai::SL_I:
+ return Lanai::SL_F_I;
+ case Lanai::SRL_R:
+ return Lanai::SRL_F_R;
+ case Lanai::SA_I:
+ return Lanai::SA_F_I;
+ case Lanai::SRA_R:
+ return Lanai::SRA_F_R;
+ case Lanai::SUB_I_HI:
+ return Lanai::SUB_F_I_HI;
+ case Lanai::SUB_I_LO:
+ return Lanai::SUB_F_I_LO;
+ case Lanai::SUB_R:
+ return Lanai::SUB_F_R;
+ case Lanai::SUBB_I_HI:
+ return Lanai::SUBB_F_I_HI;
+ case Lanai::SUBB_I_LO:
+ return Lanai::SUBB_F_I_LO;
+ case Lanai::SUBB_R:
+ return Lanai::SUBB_F_R;
+ case Lanai::XOR_I_HI:
+ return Lanai::XOR_F_I_HI;
+ case Lanai::XOR_I_LO:
+ return Lanai::XOR_F_I_LO;
+ case Lanai::XOR_R:
+ return Lanai::XOR_F_R;
+ default:
+ return Lanai::NOP;
+ }
+}
+
+bool LanaiInstrInfo::optimizeCompareInstr(
+ MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int /*CmpMask*/,
+ int CmpValue, const MachineRegisterInfo *MRI) const {
+ // Get the unique definition of SrcReg.
+ MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
+ if (!MI)
+ return false;
+
+ // Get ready to iterate backward from CmpInstr.
+ MachineBasicBlock::iterator I = CmpInstr, E = MI,
+ B = CmpInstr.getParent()->begin();
+
+ // Early exit if CmpInstr is at the beginning of the BB.
+ if (I == B)
+ return false;
+
+ // There are two possible candidates which can be changed to set SR:
+ // One is MI, the other is a SUB instruction.
+ // * For SFSUB_F_RR(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
+ // * For SFSUB_F_RI(r1, CmpValue), we are looking for SUB(r1, CmpValue).
+ MachineInstr *Sub = nullptr;
+ if (SrcReg2 != 0)
+ // MI is not a candidate to transform into a flag setting instruction.
+ MI = nullptr;
+ else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) {
+ // Conservatively refuse to convert an instruction which isn't in the same
+ // BB as the comparison. Don't return if SFSUB_F_RI and CmpValue != 0 as Sub
+ // may still be a candidate.
+ if (CmpInstr.getOpcode() == Lanai::SFSUB_F_RI_LO)
+ MI = nullptr;
+ else
+ return false;
+ }
+
+ // Check that SR isn't set between the comparison instruction and the
+ // instruction we want to change while searching for Sub.
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ for (--I; I != E; --I) {
+ const MachineInstr &Instr = *I;
+
+ if (Instr.modifiesRegister(Lanai::SR, TRI) ||
+ Instr.readsRegister(Lanai::SR, TRI))
+ // This instruction modifies or uses SR after the one we want to change.
+ // We can't do this transformation.
+ return false;
+
+ // Check whether CmpInstr can be made redundant by the current instruction.
+ if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &*I)) {
+ Sub = &*I;
+ break;
+ }
+
+ // Don't search outside the containing basic block.
+ if (I == B)
+ return false;
+ }
+
+ // Return false if no candidates exist.
+ if (!MI && !Sub)
+ return false;
+
+ // The single candidate is called MI.
+ if (!MI)
+ MI = Sub;
+
+ if (flagSettingOpcodeVariant(MI->getOpcode()) != Lanai::NOP) {
+ bool isSafe = false;
+
+ SmallVector<std::pair<MachineOperand *, LPCC::CondCode>, 4>
+ OperandsToUpdate;
+ I = CmpInstr;
+ E = CmpInstr.getParent()->end();
+ while (!isSafe && ++I != E) {
+ const MachineInstr &Instr = *I;
+ for (unsigned IO = 0, EO = Instr.getNumOperands(); !isSafe && IO != EO;
+ ++IO) {
+ const MachineOperand &MO = Instr.getOperand(IO);
+ if (MO.isRegMask() && MO.clobbersPhysReg(Lanai::SR)) {
+ isSafe = true;
+ break;
+ }
+ if (!MO.isReg() || MO.getReg() != Lanai::SR)
+ continue;
+ if (MO.isDef()) {
+ isSafe = true;
+ break;
+ }
+ // Condition code is after the operand before SR.
+ LPCC::CondCode CC;
+ CC = (LPCC::CondCode)Instr.getOperand(IO - 1).getImm();
+
+ if (Sub) {
+ LPCC::CondCode NewCC = getOppositeCondition(CC);
+ if (NewCC == LPCC::ICC_T)
+ return false;
+ // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on
+ // CMP needs to be updated to be based on SUB. Push the condition
+ // code operands to OperandsToUpdate. If it is safe to remove
+ // CmpInstr, the condition code of these operands will be modified.
+ if (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
+ Sub->getOperand(2).getReg() == SrcReg) {
+ OperandsToUpdate.push_back(
+ std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
+ }
+ } else {
+ // No Sub, so this is x = <op> y, z; cmp x, 0.
+ switch (CC) {
+ case LPCC::ICC_EQ: // Z
+ case LPCC::ICC_NE: // Z
+ case LPCC::ICC_MI: // N
+ case LPCC::ICC_PL: // N
+ case LPCC::ICC_F: // none
+ case LPCC::ICC_T: // none
+ // SR can be used multiple times, we should continue.
+ break;
+ case LPCC::ICC_CS: // C
+ case LPCC::ICC_CC: // C
+ case LPCC::ICC_VS: // V
+ case LPCC::ICC_VC: // V
+ case LPCC::ICC_HI: // C Z
+ case LPCC::ICC_LS: // C Z
+ case LPCC::ICC_GE: // N V
+ case LPCC::ICC_LT: // N V
+ case LPCC::ICC_GT: // Z N V
+ case LPCC::ICC_LE: // Z N V
+ // The instruction uses the V bit or C bit which is not safe.
+ return false;
+ case LPCC::UNKNOWN:
+ return false;
+ }
+ }
+ }
+ }
+
+ // If SR is not killed nor re-defined, we should check whether it is
+ // live-out. If it is live-out, do not optimize.
+ if (!isSafe) {
+ MachineBasicBlock *MBB = CmpInstr.getParent();
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end();
+ SI != SE; ++SI)
+ if ((*SI)->isLiveIn(Lanai::SR))
+ return false;
+ }
+
+ // Toggle the optional operand to SR.
+ MI->setDesc(get(flagSettingOpcodeVariant(MI->getOpcode())));
+ MI->addRegisterDefined(Lanai::SR);
+ CmpInstr.eraseFromParent();
+ return true;
+ }
+
+ return false;
+}
+
+bool LanaiInstrInfo::analyzeSelect(const MachineInstr &MI,
+ SmallVectorImpl<MachineOperand> &Cond,
+ unsigned &TrueOp, unsigned &FalseOp,
+ bool &Optimizable) const {
+ assert(MI.getOpcode() == Lanai::SELECT && "unknown select instruction");
+ // Select operands:
+ // 0: Def.
+ // 1: True use.
+ // 2: False use.
+ // 3: Condition code.
+ TrueOp = 1;
+ FalseOp = 2;
+ Cond.push_back(MI.getOperand(3));
+ Optimizable = true;
+ return false;
+}
+
+// Identify instructions that can be folded into a SELECT instruction, and
+// return the defining instruction.
+static MachineInstr *canFoldIntoSelect(unsigned Reg,
+ const MachineRegisterInfo &MRI) {
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ return nullptr;
+ if (!MRI.hasOneNonDBGUse(Reg))
+ return nullptr;
+ MachineInstr *MI = MRI.getVRegDef(Reg);
+ if (!MI)
+ return nullptr;
+ // MI is folded into the SELECT by predicating it.
+ if (!MI->isPredicable())
+ return nullptr;
+ // Check if MI has any non-dead defs or physreg uses. This also detects
+ // predicated instructions which will be reading SR.
+ for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ // Reject frame index operands.
+ if (MO.isFI() || MO.isCPI() || MO.isJTI())
+ return nullptr;
+ if (!MO.isReg())
+ continue;
+ // MI can't have any tied operands, that would conflict with predication.
+ if (MO.isTied())
+ return nullptr;
+ if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ return nullptr;
+ if (MO.isDef() && !MO.isDead())
+ return nullptr;
+ }
+ bool DontMoveAcrossStores = true;
+ if (!MI->isSafeToMove(/*AliasAnalysis=*/nullptr, DontMoveAcrossStores))
+ return nullptr;
+ return MI;
+}
+
+MachineInstr *
+LanaiInstrInfo::optimizeSelect(MachineInstr &MI,
+ SmallPtrSetImpl<MachineInstr *> &SeenMIs,
+ bool /*PreferFalse*/) const {
+ assert(MI.getOpcode() == Lanai::SELECT && "unknown select instruction");
+ MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
+ MachineInstr *DefMI = canFoldIntoSelect(MI.getOperand(1).getReg(), MRI);
+ bool Invert = !DefMI;
+ if (!DefMI)
+ DefMI = canFoldIntoSelect(MI.getOperand(2).getReg(), MRI);
+ if (!DefMI)
+ return nullptr;
+
+ // Find new register class to use.
+ MachineOperand FalseReg = MI.getOperand(Invert ? 1 : 2);
+ unsigned DestReg = MI.getOperand(0).getReg();
+ const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
+ if (!MRI.constrainRegClass(DestReg, PreviousClass))
+ return nullptr;
+
+ // Create a new predicated version of DefMI.
+ MachineInstrBuilder NewMI =
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg);
+
+ // Copy all the DefMI operands, excluding its (null) predicate.
+ const MCInstrDesc &DefDesc = DefMI->getDesc();
+ for (unsigned i = 1, e = DefDesc.getNumOperands();
+ i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
+ NewMI.addOperand(DefMI->getOperand(i));
+
+ unsigned CondCode = MI.getOperand(3).getImm();
+ if (Invert)
+ NewMI.addImm(getOppositeCondition(LPCC::CondCode(CondCode)));
+ else
+ NewMI.addImm(CondCode);
+ NewMI.copyImplicitOps(MI);
+
+ // The output register value when the predicate is false is an implicit
+ // register operand tied to the first def. The tie makes the register
+ // allocator ensure the FalseReg is allocated the same register as operand 0.
+ FalseReg.setImplicit();
+ NewMI.addOperand(FalseReg);
+ NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
+
+ // Update SeenMIs set: register newly created MI and erase removed DefMI.
+ SeenMIs.insert(NewMI);
+ SeenMIs.erase(DefMI);
+
+ // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
+ // DefMI would be invalid when transferred inside the loop. Checking for a
+ // loop is expensive, but at least remove kill flags if they are in different
+ // BBs.
+ if (DefMI->getParent() != MI.getParent())
+ NewMI->clearKillInfo();
+
+ // The caller will erase MI, but not DefMI.
+ DefMI->eraseFromParent();
+ return NewMI;
+}
+
+// The analyzeBranch function is used to examine conditional instructions and
+// remove unnecessary instructions. This method is used by BranchFolder and
+// IfConverter machine function passes to improve the CFG.
+// - TrueBlock is set to the destination if condition evaluates true (it is the
+// nullptr if the destination is the fall-through branch);
+// - FalseBlock is set to the destination if condition evaluates to false (it
+// is the nullptr if the branch is unconditional);
+// - condition is populated with machine operands needed to generate the branch
+// to insert in InsertBranch;
+// Returns: false if branch could successfully be analyzed.
+bool LanaiInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TrueBlock,
+ MachineBasicBlock *&FalseBlock,
+ SmallVectorImpl<MachineOperand> &Condition,
+ bool AllowModify) const {
+ // Iterator to current instruction being considered.
+ MachineBasicBlock::iterator Instruction = MBB.end();
+
+ // Start from the bottom of the block and work up, examining the
+ // terminator instructions.
+ while (Instruction != MBB.begin()) {
+ --Instruction;
+
+ // Skip over debug values.
+ if (Instruction->isDebugValue())
+ continue;
+
+ // Working from the bottom, when we see a non-terminator
+ // instruction, we're done.
+ if (!isUnpredicatedTerminator(*Instruction))
+ break;
+
+ // A terminator that isn't a branch can't easily be handled
+ // by this analysis.
+ if (!Instruction->isBranch())
+ return true;
+
+ // Handle unconditional branches.
+ if (Instruction->getOpcode() == Lanai::BT) {
+ if (!AllowModify) {
+ TrueBlock = Instruction->getOperand(0).getMBB();
+ continue;
+ }
+
+ // If the block has any instructions after a branch, delete them.
+ while (std::next(Instruction) != MBB.end()) {
+ std::next(Instruction)->eraseFromParent();
+ }
+
+ Condition.clear();
+ FalseBlock = nullptr;
+
+ // Delete the jump if it's equivalent to a fall-through.
+ if (MBB.isLayoutSuccessor(Instruction->getOperand(0).getMBB())) {
+ TrueBlock = nullptr;
+ Instruction->eraseFromParent();
+ Instruction = MBB.end();
+ continue;
+ }
+
+ // TrueBlock is used to indicate the unconditional destination.
+ TrueBlock = Instruction->getOperand(0).getMBB();
+ continue;
+ }
+
+ // Handle conditional branches
+ unsigned Opcode = Instruction->getOpcode();
+ if (Opcode != Lanai::BRCC)
+ return true; // Unknown opcode.
+
+ // Multiple conditional branches are not handled here so only proceed if
+ // there are no conditions enqueued.
+ if (Condition.empty()) {
+ LPCC::CondCode BranchCond =
+ static_cast<LPCC::CondCode>(Instruction->getOperand(1).getImm());
+
+ // TrueBlock is the target of the previously seen unconditional branch.
+ FalseBlock = TrueBlock;
+ TrueBlock = Instruction->getOperand(0).getMBB();
+ Condition.push_back(MachineOperand::CreateImm(BranchCond));
+ continue;
+ }
+
+ // Multiple conditional branches are not handled.
+ return true;
+ }
+
+ // Return false indicating branch successfully analyzed.
+ return false;
+}
+
+// ReverseBranchCondition - Reverses the branch condition of the specified
+// condition list, returning false on success and true if it cannot be
+// reversed.
+bool LanaiInstrInfo::ReverseBranchCondition(
+ SmallVectorImpl<llvm::MachineOperand> &Condition) const {
+ assert((Condition.size() == 1) &&
+ "Lanai branch conditions should have one component.");
+
+ LPCC::CondCode BranchCond =
+ static_cast<LPCC::CondCode>(Condition[0].getImm());
+ Condition[0].setImm(getOppositeCondition(BranchCond));
+ return false;
+}
+
+// Insert the branch with condition specified in condition and given targets
+// (TrueBlock and FalseBlock). This function returns the number of machine
+// instructions inserted.
+unsigned LanaiInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TrueBlock,
+ MachineBasicBlock *FalseBlock,
+ ArrayRef<MachineOperand> Condition,
+ const DebugLoc &DL) const {
+ // Shouldn't be a fall through.
+ assert(TrueBlock && "InsertBranch must not be told to insert a fallthrough");
+
+ // If condition is empty then an unconditional branch is being inserted.
+ if (Condition.empty()) {
+ assert(!FalseBlock && "Unconditional branch with multiple successors!");
+ BuildMI(&MBB, DL, get(Lanai::BT)).addMBB(TrueBlock);
+ return 1;
+ }
+
+ // Else a conditional branch is inserted.
+ assert((Condition.size() == 1) &&
+ "Lanai branch conditions should have one component.");
+ unsigned ConditionalCode = Condition[0].getImm();
+ BuildMI(&MBB, DL, get(Lanai::BRCC)).addMBB(TrueBlock).addImm(ConditionalCode);
+
+ // If no false block, then false behavior is fall through and no branch needs
+ // to be inserted.
+ if (!FalseBlock)
+ return 1;
+
+ BuildMI(&MBB, DL, get(Lanai::BT)).addMBB(FalseBlock);
+ return 2;
+}
+
+unsigned LanaiInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator Instruction = MBB.end();
+ unsigned Count = 0;
+
+ while (Instruction != MBB.begin()) {
+ --Instruction;
+ if (Instruction->isDebugValue())
+ continue;
+ if (Instruction->getOpcode() != Lanai::BT &&
+ Instruction->getOpcode() != Lanai::BRCC) {
+ break;
+ }
+
+ // Remove the branch.
+ Instruction->eraseFromParent();
+ Instruction = MBB.end();
+ ++Count;
+ }
+
+ return Count;
+}
+
+unsigned LanaiInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ if (MI.getOpcode() == Lanai::LDW_RI)
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
+ }
+ return 0;
+}
+
+unsigned LanaiInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const {
+ if (MI.getOpcode() == Lanai::LDW_RI) {
+ unsigned Reg;
+ if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
+ return Reg;
+ // Check for post-frame index elimination operations
+ const MachineMemOperand *Dummy;
+ return hasLoadFromStackSlot(MI, Dummy, FrameIndex);
+ }
+ return 0;
+}
+
+unsigned LanaiInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ if (MI.getOpcode() == Lanai::SW_RI)
+ if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
+ MI.getOperand(1).getImm() == 0) {
+ FrameIndex = MI.getOperand(0).getIndex();
+ return MI.getOperand(2).getReg();
+ }
+ return 0;
+}
+
+bool LanaiInstrInfo::getMemOpBaseRegImmOfsWidth(
+ MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
+ const TargetRegisterInfo * /*TRI*/) const {
+ // Handle only loads/stores with base register followed by immediate offset
+ // and with add as ALU op.
+ if (LdSt.getNumOperands() != 4)
+ return false;
+ if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm() ||
+ !(LdSt.getOperand(3).isImm() && LdSt.getOperand(3).getImm() == LPAC::ADD))
+ return false;
+
+ switch (LdSt.getOpcode()) {
+ default:
+ return false;
+ case Lanai::LDW_RI:
+ case Lanai::LDW_RR:
+ case Lanai::SW_RR:
+ case Lanai::SW_RI:
+ Width = 4;
+ break;
+ case Lanai::LDHs_RI:
+ case Lanai::LDHz_RI:
+ case Lanai::STH_RI:
+ Width = 2;
+ break;
+ case Lanai::LDBs_RI:
+ case Lanai::LDBz_RI:
+ case Lanai::STB_RI:
+ Width = 1;
+ break;
+ }
+
+ BaseReg = LdSt.getOperand(1).getReg();
+ Offset = LdSt.getOperand(2).getImm();
+ return true;
+}
+
+bool LanaiInstrInfo::getMemOpBaseRegImmOfs(
+ MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
+ const TargetRegisterInfo *TRI) const {
+ switch (LdSt.getOpcode()) {
+ default:
+ return false;
+ case Lanai::LDW_RI:
+ case Lanai::LDW_RR:
+ case Lanai::SW_RR:
+ case Lanai::SW_RI:
+ case Lanai::LDHs_RI:
+ case Lanai::LDHz_RI:
+ case Lanai::STH_RI:
+ case Lanai::LDBs_RI:
+ case Lanai::LDBz_RI:
+ unsigned Width;
+ return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
+ }
+}
diff --git a/lib/Target/Lanai/LanaiInstrInfo.h b/lib/Target/Lanai/LanaiInstrInfo.h
new file mode 100644
index 000000000000..51f6c6ea436d
--- /dev/null
+++ b/lib/Target/Lanai/LanaiInstrInfo.h
@@ -0,0 +1,184 @@
+//===- LanaiInstrInfo.h - Lanai Instruction Information ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Lanai implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAIINSTRINFO_H
+#define LLVM_LIB_TARGET_LANAI_LANAIINSTRINFO_H
+
+#include "LanaiRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "LanaiGenInstrInfo.inc"
+
+namespace llvm {
+
+class LanaiInstrInfo : public LanaiGenInstrInfo {
+ const LanaiRegisterInfo RegisterInfo;
+
+public:
+ LanaiInstrInfo();
+
+ // getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
+ // such, whenever a client has an instance of instruction info, it should
+ // always be able to get register info as well (through this method).
+ virtual const LanaiRegisterInfo &getRegisterInfo() const {
+ return RegisterInfo;
+ }
+
+ bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
+ AliasAnalysis *AA) const override;
+
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+
+ unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
+ int &FrameIndex) const override;
+
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
+ const DebugLoc &DL, unsigned DestinationRegister,
+ unsigned SourceRegister, bool KillSource) const override;
+
+ void
+ storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Position,
+ unsigned SourceRegister, bool IsKill, int FrameIndex,
+ const TargetRegisterClass *RegisterClass,
+ const TargetRegisterInfo *RegisterInfo) const override;
+
+ void
+ loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator Position,
+ unsigned DestinationRegister, int FrameIndex,
+ const TargetRegisterClass *RegisterClass,
+ const TargetRegisterInfo *RegisterInfo) const override;
+
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
+
+ bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
+ int64_t &Offset,
+ const TargetRegisterInfo *TRI) const override;
+
+ bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg,
+ int64_t &Offset, unsigned &Width,
+ const TargetRegisterInfo *TRI) const;
+
+ std::pair<unsigned, unsigned>
+ decomposeMachineOperandsTargetFlags(unsigned TF) const override;
+
+ ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableDirectMachineOperandTargetFlags() const override;
+
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TrueBlock,
+ MachineBasicBlock *&FalseBlock,
+ SmallVectorImpl<MachineOperand> &Condition,
+ bool AllowModify) const override;
+
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+
+ // For a comparison instruction, return the source registers in SrcReg and
+ // SrcReg2 if having two register operands, and the value it compares against
+ // in CmpValue. Return true if the comparison instruction can be analyzed.
+ bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const override;
+
+ // See if the comparison instruction can be converted into something more
+ // efficient. E.g., on Lanai register-register instructions can set the flag
+ // register, obviating the need for a separate compare.
+ bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int CmpMask, int CmpValue,
+ const MachineRegisterInfo *MRI) const override;
+
+ // Analyze the given select instruction, returning true if it cannot be
+ // understood. It is assumed that MI->isSelect() is true.
+ //
+ // When successful, return the controlling condition and the operands that
+ // determine the true and false result values.
+ //
+ // Result = SELECT Cond, TrueOp, FalseOp
+ //
+ // Lanai can optimize certain select instructions, for example by predicating
+ // the instruction defining one of the operands and sets Optimizable to true.
+ bool analyzeSelect(const MachineInstr &MI,
+ SmallVectorImpl<MachineOperand> &Cond, unsigned &TrueOp,
+ unsigned &FalseOp, bool &Optimizable) const override;
+
+ // Given a select instruction that was understood by analyzeSelect and
+ // returned Optimizable = true, attempt to optimize MI by merging it with one
+ // of its operands. Returns NULL on failure.
+ //
+ // When successful, returns the new select instruction. The client is
+ // responsible for deleting MI.
+ //
+ // If both sides of the select can be optimized, the TrueOp is modifed.
+ // PreferFalse is not used.
+ MachineInstr *optimizeSelect(MachineInstr &MI,
+ SmallPtrSetImpl<MachineInstr *> &SeenMIs,
+ bool PreferFalse) const override;
+
+ bool ReverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Condition) const override;
+
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TrueBlock,
+ MachineBasicBlock *FalseBlock,
+ ArrayRef<MachineOperand> Condition,
+ const DebugLoc &DL) const override;
+};
+
+static inline bool isSPLSOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ case Lanai::LDBs_RI:
+ case Lanai::LDBz_RI:
+ case Lanai::LDHs_RI:
+ case Lanai::LDHz_RI:
+ case Lanai::STB_RI:
+ case Lanai::STH_RI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool isRMOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ case Lanai::LDW_RI:
+ case Lanai::SW_RI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool isRRMOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ case Lanai::LDBs_RR:
+ case Lanai::LDBz_RR:
+ case Lanai::LDHs_RR:
+ case Lanai::LDHz_RR:
+ case Lanai::LDWz_RR:
+ case Lanai::LDW_RR:
+ case Lanai::STB_RR:
+ case Lanai::STH_RR:
+ case Lanai::SW_RR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAIINSTRINFO_H
diff --git a/lib/Target/Lanai/LanaiInstrInfo.td b/lib/Target/Lanai/LanaiInstrInfo.td
new file mode 100644
index 000000000000..cd1abc1f3359
--- /dev/null
+++ b/lib/Target/Lanai/LanaiInstrInfo.td
@@ -0,0 +1,892 @@
+//===-- LanaiInstrInfo.td - Target Description for Lanai Target -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Lanai instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction format superclass
+//===----------------------------------------------------------------------===//
+
+include "LanaiInstrFormats.td"
+
+// -------------------------------------------------- //
+// Instruction Operands and Patterns
+// -------------------------------------------------- //
+
+// These are target-independent nodes, but have target-specific formats.
+def SDT_LanaiCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
+def SDT_LanaiCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>]>;
+def SDT_LanaiCall : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
+def SDT_LanaiSetFlag : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
+def SDT_LanaiSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<1, 2>]>;
+def SDT_LanaiSetCC : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>]>;
+def SDT_LanaiBrCC : SDTypeProfile<0, 2, [SDTCisVT<0, OtherVT>,
+ SDTCisVT<1, i32>]>;
+def SDT_LanaiAdjDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>]>;
+
+def Call : SDNode<"LanaiISD::CALL", SDT_LanaiCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+def RetFlag : SDNode<"LanaiISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def CallSeqStart : SDNode<"ISD::CALLSEQ_START", SDT_LanaiCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue]>;
+def CallSeqEnd : SDNode<"ISD::CALLSEQ_END", SDT_LanaiCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def LanaiSetFlag : SDNode<"LanaiISD::SET_FLAG", SDT_LanaiSetFlag,
+ [SDNPOutGlue]>;
+def LanaiSubbF : SDNode<"LanaiISD::SUBBF", SDT_LanaiSetFlag,
+ [SDNPOutGlue, SDNPInGlue]>;
+def LanaiBrCC : SDNode<"LanaiISD::BR_CC", SDT_LanaiBrCC,
+ [SDNPHasChain, SDNPInGlue]>;
+def LanaiSelectCC : SDNode<"LanaiISD::SELECT_CC", SDT_LanaiSelectCC,
+ [SDNPInGlue]>;
+def LanaiSetCC : SDNode<"LanaiISD::SETCC", SDT_LanaiSetCC,
+ [SDNPInGlue]>;
+def LanaiHi : SDNode<"LanaiISD::HI", SDTIntUnaryOp>;
+def LanaiLo : SDNode<"LanaiISD::LO", SDTIntUnaryOp>;
+def LanaiSmall : SDNode<"LanaiISD::SMALL", SDTIntUnaryOp>;
+def LanaiAdjDynAlloc : SDNode<"LanaiISD::ADJDYNALLOC", SDT_LanaiAdjDynAlloc>;
+
+// Extract bits 0-15 (low-end) of an immediate value.
+def LO16 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant((uint64_t)N->getZExtValue() & 0xffff,
+ SDLoc(N), MVT::i32);
+}]>;
+
+// Extract bits 16-31 (high-end) of an immediate value.
+// Transformation function: shift the immediate value down into the low bits.
+def HI16 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant((uint64_t)N->getZExtValue() >> 16, SDLoc(N),
+ MVT::i32);
+}]>;
+
+def NEG : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def LO21 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant((uint64_t)N->getZExtValue() & 0x1fffff,
+ SDLoc(N), MVT::i32);
+}]>;
+
+// Branch targets
+def BrTargetAsmOperand : AsmOperandClass {
+ let Name = "BrTarget";
+}
+def BrTarget : Operand<OtherVT> {
+ let ParserMatchClass = BrTargetAsmOperand;
+ let EncoderMethod = "getBranchTargetOpValue";
+ let DecoderMethod = "decodeBranch";
+}
+
+def CallTargetAsmOperand : AsmOperandClass {
+ let Name = "CallTarget";
+}
+def CallTarget : Operand<i32> {
+ let ParserMatchClass = CallTargetAsmOperand;
+ let EncoderMethod = "getBranchTargetOpValue";
+ let DecoderMethod = "decodeBranch";
+}
+
+def ImmShiftAsmOperand : AsmOperandClass { let Name = "ImmShift"; }
+def immShift : Operand<i32>, PatLeaf<(imm), [{
+ int Imm = N->getSExtValue();
+ return Imm >= -31 && Imm <= 31;}]> {
+ let ParserMatchClass = ImmShiftAsmOperand;
+ let DecoderMethod = "decodeShiftImm";
+}
+
+def Imm10AsmOperand : AsmOperandClass { let Name = "Imm10"; }
+def imm10 : Operand<i32>, PatLeaf<(imm), [{
+ return isInt<10>(N->getSExtValue()); }]> {
+ let ParserMatchClass = Imm10AsmOperand;
+}
+
+def immZExt21 : PatLeaf<(imm),
+ [{return isUInt<21>(N->getZExtValue()); }], LO21>;
+
+def LoImm16AsmOperand : AsmOperandClass { let Name = "LoImm16"; }
+def i32lo16z : Operand<i32>, PatLeaf<(i32 imm), [{
+ // i32lo16 predicate - true if the 32-bit immediate has only rightmost 16
+ // bits set.
+ return ((N->getZExtValue() & 0xFFFFUL) == N->getZExtValue());}], LO16> {
+ let ParserMatchClass = LoImm16AsmOperand;
+}
+def i32neg16 : Operand<i32>, PatLeaf<(i32 imm), [{
+ // i32neg16 predicate - true if the 32-bit immediate is negative and can
+ // be represented by a 16 bit integer.
+ int Imm = N->getSExtValue();
+ return (Imm < 0) && (isInt<16>(Imm));}], LO16> {
+ let ParserMatchClass = LoImm16AsmOperand;
+}
+def i32lo16s : Operand<i32>, PatLeaf<(i32 imm), [{
+ // i32lo16 predicate - true if the 32-bit immediate has only rightmost 16
+ // bits set.
+ return ((int64_t)(N->getSExtValue() & 0xFFFFUL) == N->getSExtValue());}], LO16> {
+ let ParserMatchClass = LoImm16AsmOperand;
+}
+
+def LoImm16AndAsmOperand : AsmOperandClass { let Name = "LoImm16And"; }
+def i32lo16and : Operand<i32>, PatLeaf<(i32 imm), [{
+ // i32lo16 predicate - true if the 32-bit immediate has the rightmost 16
+ // bits set and the leftmost 16 bits 1's.
+ return (N->getZExtValue() >= 0xFFFF0000UL);}], LO16> {
+ let ParserMatchClass = LoImm16AndAsmOperand;
+ let PrintMethod = "printLo16AndImmOperand";
+}
+
+def HiImm16AsmOperand : AsmOperandClass { let Name = "HiImm16"; }
+def i32hi16 : Operand<i32>, PatLeaf<(i32 imm), [{
+ // i32hi16 predicate - true if the 32-bit immediate has only leftmost 16
+ // bits set.
+ return ((N->getZExtValue() & 0xFFFF0000UL) == N->getZExtValue());}], HI16> {
+ let ParserMatchClass = HiImm16AsmOperand;
+ let PrintMethod = "printHi16ImmOperand";
+}
+
+def HiImm16AndAsmOperand : AsmOperandClass { let Name = "HiImm16And"; }
+def i32hi16and : Operand<i32>, PatLeaf<(i32 imm), [{
+ // i32lo16 predicate - true if the 32-bit immediate has the leftmost 16
+ // bits set and the rightmost 16 bits 1's.
+ return ((N->getZExtValue() & 0xFFFFUL) == 0xFFFFUL);}], HI16> {
+ let ParserMatchClass = HiImm16AndAsmOperand;
+ let PrintMethod = "printHi16AndImmOperand";
+}
+
+def LoImm21AsmOperand : AsmOperandClass { let Name = "LoImm21"; }
+def i32lo21 : Operand<i32>, PatLeaf<(i32 imm), [{
+ // i32lo21 predicate - true if the 32-bit immediate has only rightmost 21
+ // bits set.
+ return ((N->getZExtValue() & 0x1FFFFFUL) == N->getZExtValue());}], LO21> {
+ let ParserMatchClass = LoImm21AsmOperand;
+}
+
+def AluOp : Operand<i32> {
+ let PrintMethod = "printAluOperand";
+}
+
+// Addressing modes.
+def ADDRrr : ComplexPattern<i32, 3, "selectAddrRr", [], []>;
+def ADDRri : ComplexPattern<i32, 3, "selectAddrRi", [frameindex], []>;
+def ADDRsls : ComplexPattern<i32, 1, "selectAddrSls", [frameindex], []>;
+def ADDRspls : ComplexPattern<i32, 3, "selectAddrSpls", [frameindex], []>;
+
+// Address operands
+def MemRegImmAsmOperand : AsmOperandClass {
+ let Name = "MemRegImm";
+ let ParserMethod = "parseMemoryOperand";
+}
+def MEMri : Operand<i32> {
+ let DecoderMethod = "decodeRiMemoryValue";
+ let EncoderMethod = "getRiMemoryOpValue";
+ let MIOperandInfo = (ops GPR:$base, i32lo16s:$offset, AluOp:$Opcode);
+ let ParserMatchClass = MemRegImmAsmOperand;
+ let PrintMethod = "printMemRiOperand";
+}
+
+def MemRegRegAsmOperand : AsmOperandClass {
+ let Name = "MemRegReg";
+ let ParserMethod = "parseMemoryOperand";
+}
+def MEMrr : Operand<i32> {
+ let DecoderMethod = "decodeRrMemoryValue";
+ let EncoderMethod = "getRrMemoryOpValue";
+ let MIOperandInfo = (ops GPR:$Op1, GPR:$Op2, AluOp:$Opcode);
+ let ParserMatchClass = MemRegRegAsmOperand;
+ let PrintMethod = "printMemRrOperand";
+}
+
+def MemImmAsmOperand : AsmOperandClass {
+ let Name = "MemImm";
+ let ParserMethod = "parseMemoryOperand";
+}
+def MEMi : Operand<i32> {
+ let MIOperandInfo = (ops i32lo21:$offset);
+ let ParserMatchClass = MemImmAsmOperand;
+ let PrintMethod = "printMemImmOperand";
+}
+
+def MemSplsAsmOperand : AsmOperandClass {
+ let Name = "MemSpls";
+ let ParserMethod = "parseMemoryOperand";
+}
+def MEMspls : Operand<i32> {
+ let DecoderMethod = "decodeSplsValue";
+ let EncoderMethod = "getSplsOpValue";
+ let MIOperandInfo = (ops GPR:$base, imm10:$offset, AluOp:$Opcode);
+ let ParserMatchClass = MemSplsAsmOperand;
+ let PrintMethod = "printMemSplsOperand";
+}
+
+def CCOp : Operand<i32> {
+ let PrintMethod = "printCCOperand";
+}
+
+// Predicate operand. Default to 0 = true.
+def CondCodeOperand : AsmOperandClass { let Name = "CondCode"; }
+
+def pred : PredicateOperand<i32, (ops i32imm), (ops (i32 0))> {
+ let PrintMethod = "printPredicateOperand";
+ let ParserMatchClass = CondCodeOperand;
+ let DecoderMethod = "decodePredicateOperand";
+}
+
+let hasSideEffects = 0, Inst = 0x00000001 in
+ def NOP : InstLanai<(outs), (ins), "nop", []>;
+
+// Special NOPs to change logging level in vlanai.
+let hasSideEffects = 0, Inst = 0x00000002 in
+ def LOG0 : InstLanai<(outs), (ins), "log_0", []>;
+let hasSideEffects = 0, Inst = 0x00000003 in
+ def LOG1 : InstLanai<(outs), (ins), "log_1", []>;
+let hasSideEffects = 0, Inst = 0x00000004 in
+ def LOG2 : InstLanai<(outs), (ins), "log_2", []>;
+let hasSideEffects = 0, Inst = 0x00000005 in
+ def LOG3 : InstLanai<(outs), (ins), "log_3", []>;
+let hasSideEffects = 0, Inst = 0x00000006 in
+ def LOG4 : InstLanai<(outs), (ins), "log_4", []>;
+
+// Map an SPLS instruction onto itself. All other instructions will be mapped
+// onto -1. Used to identify SPLS instructions.
+def splsIdempotent : InstrMapping {
+ let FilterClass = "InstSPLS";
+ let RowFields = ["AsmString"];
+ let ColFields = ["PostEncoderMethod"];
+ let KeyCol = ["adjustPqBitsSpls"];
+ let ValueCols = [["adjustPqBitsSpls"]];
+}
+
+// -------------------------------------------------- //
+// ALU instructions
+// -------------------------------------------------- //
+multiclass ALUbase<bits<3> subOp, string AsmStr, SDNode OpNode,
+ PatLeaf LoExt, PatLeaf HiExt,
+ list<dag> loPattern, list<dag> hiPattern> {
+ // Register Immediate
+ let H = 0 in
+ def LO : InstRI<subOp, (outs GPR:$Rd), (ins GPR:$Rs1, LoExt:$imm16),
+ !strconcat(AsmStr, "\t$Rs1, $imm16, $Rd"),
+ loPattern>;
+ let H = 1 in
+ def HI : InstRI<subOp, (outs GPR:$Rd), (ins GPR:$Rs1, HiExt:$imm16),
+ !strconcat(AsmStr, "\t$Rs1, $imm16, $Rd"),
+ hiPattern>;
+
+}
+
+multiclass ALUarith<bits<3> subOp, string AsmStr, SDNode OpNode,
+ PatLeaf LoExt, PatLeaf HiExt> {
+ defm I_ : ALUbase<subOp, AsmStr, OpNode, LoExt, HiExt, [], []>;
+
+ // Register Register
+ let JJJJJ = 0 in
+ def R : InstRR<subOp, (outs GPR:$Rd), (ins GPR:$Rs1, GPR:$Rs2, pred:$DDDI),
+ !strconcat(AsmStr, "$DDDI\t$Rs1, $Rs2, $Rd"),
+ [(set GPR:$Rd, (OpNode GPR:$Rs1, GPR:$Rs2))]>;
+}
+
+multiclass ALUlogic<bits<3> subOp, string AsmStr, SDNode OpNode,
+ PatLeaf LoExt, PatLeaf HiExt> {
+ defm I_ : ALUbase<subOp, AsmStr, OpNode, LoExt, HiExt,
+ [(set GPR:$Rd, (OpNode GPR:$Rs1, LoExt:$imm16))],
+ [(set GPR:$Rd, (OpNode GPR:$Rs1, HiExt:$imm16))]>;
+
+ // Register Register
+ let JJJJJ = 0 in
+ def R : InstRR<subOp, (outs GPR:$Rd), (ins GPR:$Rs1, GPR:$Rs2, pred:$DDDI),
+ !strconcat(AsmStr, "$DDDI\t$Rs1, $Rs2, $Rd"),
+ [(set GPR:$Rd, (OpNode GPR:$Rs1, GPR:$Rs2))]>;
+}
+
+// Non flag setting ALU operations
+let isAsCheapAsAMove = 1, F = 0 in {
+ let isCommutable = 1 in {
+ defm ADD_ : ALUarith<0b000, "add", add, i32lo16z, i32hi16>;
+ }
+ defm SUB_ : ALUarith<0b010, "sub", sub, i32lo16z, i32hi16>;
+ let isCommutable = 1 in {
+ defm AND_ : ALUlogic<0b100, "and", and, i32lo16and, i32hi16and>;
+ defm OR_ : ALUlogic<0b101, "or", or, i32lo16z, i32hi16>;
+ defm XOR_ : ALUlogic<0b110, "xor", xor, i32lo16z, i32hi16>;
+ }
+}
+
+def : Pat<(add GPR:$Rs1, i32lo16z:$imm),
+ (ADD_I_LO GPR:$Rs1, i32lo16z:$imm)>;
+
+def : Pat<(sub GPR:$Rs1, i32lo16z:$imm),
+ (SUB_I_LO GPR:$Rs1, i32lo16z:$imm)>;
+
+def : Pat<(add GPR:$Rs1, i32hi16:$imm),
+ (ADD_I_HI GPR:$Rs1, i32hi16:$imm)>;
+
+def : Pat<(sub GPR:$Rs1, i32hi16:$imm),
+ (SUB_I_HI GPR:$Rs1, i32hi16:$imm)>;
+
+def : Pat<(i32 i32lo16and:$imm), (AND_I_LO (i32 R1), i32lo16and:$imm)>;
+def : Pat<(i32 i32hi16and:$imm), (AND_I_HI (i32 R1), i32hi16and:$imm)>;
+
+// Change add/sub with negative number to sub/add
+def : Pat<(add GPR:$Rs1, i32neg16:$imm),
+ (SUB_I_LO GPR:$Rs1, (NEG $imm))>;
+def : Pat<(sub GPR:$Rs1, i32neg16:$imm),
+ (ADD_I_LO GPR:$Rs1, (NEG $imm))>;
+
+// Flag (incl. carry) setting addition and subtraction
+let F = 1, Defs = [SR] in {
+ defm ADD_F_ : ALUarith<0b000, "add.f", addc, i32lo16z, i32hi16>;
+ defm SUB_F_ : ALUarith<0b010, "sub.f", subc, i32lo16z, i32hi16>;
+}
+
+def : Pat<(addc GPR:$Rs1, i32lo16z:$imm),
+ (ADD_F_I_LO GPR:$Rs1, i32lo16z:$imm)>;
+
+def : Pat<(subc GPR:$Rs1, i32lo16z:$imm),
+ (SUB_F_I_LO GPR:$Rs1, i32lo16z:$imm)>;
+
+def : Pat<(addc GPR:$Rs1, i32hi16:$imm),
+ (ADD_F_I_HI GPR:$Rs1, i32hi16:$imm)>;
+
+def : Pat<(subc GPR:$Rs1, i32hi16:$imm),
+ (SUB_F_I_HI GPR:$Rs1, i32hi16:$imm)>;
+
+// Carry using addition and subtraction
+let F = 0, Uses = [SR] in {
+ defm ADDC_ : ALUarith<0b001, "addc", adde, i32lo16z, i32hi16>;
+ defm SUBB_ : ALUarith<0b011, "subb", sube, i32lo16z, i32hi16>;
+}
+
+def : Pat<(adde GPR:$Rs1, i32lo16z:$imm),
+ (ADDC_I_LO GPR:$Rs1, i32lo16z:$imm)>;
+
+def : Pat<(sube GPR:$Rs1, i32lo16z:$imm),
+ (SUBB_I_LO GPR:$Rs1, i32lo16z:$imm)>;
+
+def : Pat<(adde GPR:$Rs1, i32hi16:$imm),
+ (ADDC_I_HI GPR:$Rs1, i32hi16:$imm)>;
+
+def : Pat<(sube GPR:$Rs1, i32hi16:$imm),
+ (SUBB_I_HI GPR:$Rs1, i32hi16:$imm)>;
+
+// Flag setting ALU operations
+let isAsCheapAsAMove = 1, F = 1, Defs = [SR] in {
+ let isCommutable = 1 in {
+ defm AND_F_ : ALUlogic<0b100, "and.f", and, i32lo16and, i32hi16and>;
+ defm OR_F_ : ALUlogic<0b101, "or.f", or, i32lo16z, i32hi16>;
+ defm XOR_F_ : ALUlogic<0b110, "xor.f", xor, i32lo16z, i32hi16>;
+ }
+}
+
+let isAsCheapAsAMove = 1, F = 1, Defs = [SR], Uses = [SR] in {
+ defm ADDC_F_ : ALUarith<0b001, "addc.f", adde, i32lo16z, i32hi16>;
+ defm SUBB_F_ : ALUarith<0b011, "subb.f", sube, i32lo16z, i32hi16>;
+}
+
+def : Pat<(LanaiSubbF GPR:$Rs1, GPR:$Rs2),
+ (SUBB_F_R GPR:$Rs1, GPR:$Rs2)>;
+
+def : Pat<(LanaiSubbF GPR:$Rs1, i32lo16z:$imm),
+ (SUBB_F_I_LO GPR:$Rs1, i32lo16z:$imm)>;
+
+def : Pat<(LanaiSubbF GPR:$Rs1, i32hi16:$imm),
+ (SUBB_F_I_HI GPR:$Rs1, i32hi16:$imm)>;
+
+def : InstAlias<"mov $src, $dst", (ADD_R GPR:$dst, GPR:$src, R0, 0)>;
+
+let isAsCheapAsAMove = 1, Rs1 = R0.Num, isCodeGenOnly = 1, H = 1, F = 0,
+ isReMaterializable = 1 in
+ def MOVHI : InstRI<0b000, (outs GPR:$Rd), (ins i32hi16:$imm16),
+ "mov\t$imm16, $Rd",
+ [(set GPR:$Rd, i32hi16:$imm16)]>;
+
+def : InstAlias<"mov $imm16, $dst", (ADD_I_LO GPR:$dst, R0, i32lo16z:$imm16)>;
+def : InstAlias<"mov $imm16, $dst", (ADD_I_HI GPR:$dst, R0, i32hi16:$imm16)>;
+def : InstAlias<"mov $imm16, $dst",
+ (AND_I_LO GPR:$dst, R1, i32lo16and:$imm16)>;
+def : InstAlias<"mov $imm16, $dst",
+ (AND_I_HI GPR:$dst, R1, i32hi16and:$imm16)>;
+
+// Shift instructions
+class ShiftRI<string AsmStr, list<dag> Pattern>
+ : InstRI<0b111, (outs GPR:$Rd), (ins GPR:$Rs1, immShift:$imm16),
+ !strconcat(AsmStr, "\t$Rs1, $imm16, $Rd"), Pattern> {
+ let isReMaterializable = 1;
+}
+
+let F = 0 in {
+ let H = 0 in
+ def SL_I : ShiftRI<"sh", [(set GPR:$Rd, (shl GPR:$Rs1, immShift:$imm16))]>;
+ let H = 1 in
+ def SA_I : ShiftRI<"sha", []>;
+}
+def : Pat<(srl GPR:$Rs1, immShift:$imm), (SL_I GPR:$Rs1, (NEG $imm))>;
+def : Pat<(sra GPR:$Rs1, immShift:$imm), (SA_I GPR:$Rs1, (NEG $imm))>;
+
+let F = 1, Defs = [SR] in {
+ let H = 0 in
+ def SL_F_I : ShiftRI<"sh.f", []>;
+ let H = 1 in
+ def SA_F_I : ShiftRI<"sha.f", []>;
+}
+
+class ShiftRR<string AsmStr, list<dag> Pattern>
+ : InstRR<0b111, (outs GPR:$Rd), (ins GPR:$Rs1, GPR:$Rs2, pred:$DDDI), AsmStr,
+ Pattern>;
+
+let F = 0 in {
+ let JJJJJ = 0b10000 in
+ def SHL_R : ShiftRR<"sh$DDDI\t$Rs1, $Rs2, $Rd",
+ [(set GPR:$Rd, (shl GPR:$Rs1, GPR:$Rs2))]>;
+ let isCodeGenOnly = 1 in {
+ let JJJJJ = 0b10000 in
+ def SRL_R : ShiftRR<"sh$DDDI\t$Rs1, $Rs2, $Rd", []>;
+ }
+ let JJJJJ = 0b11000 in
+ def SRA_R : ShiftRR<"sha$DDDI\t$Rs1, $Rs2, $Rd", []>;
+}
+
+let F = 1, Defs = [SR] in {
+ let JJJJJ = 0b10000 in
+ def SHL_F_R : ShiftRR<"sh.f$DDDI\t$Rs1, $Rs2, $Rd", []>;
+ let isCodeGenOnly = 1 in {
+ let JJJJJ = 0b10000 in
+ def SRL_F_R : ShiftRR<"sh.f$DDDI\t$Rs1, $Rs2, $Rd", []>;
+ }
+ let JJJJJ = 0b11000 in
+ def SRA_F_R : ShiftRR<"sha.f$DDDI\t$Rs1, $Rs2, $Rd", []>;
+}
+
+// Expand shift-right operations
+def : Pat<(srl GPR:$Rs1, GPR:$Rs2),
+ (SRL_R GPR:$Rs1, (SUB_R R0, GPR:$Rs2))>;
+def : Pat<(sra GPR:$Rs1, GPR:$Rs2),
+ (SRA_R GPR:$Rs1, (SUB_R R0, GPR:$Rs2))>;
+
+// -------------------------------------------------- //
+// LOAD instructions
+// -------------------------------------------------- //
+
+class LoadRR<string OpcString, PatFrag OpNode, ValueType Ty>
+ : InstRRM<0b0, (outs GPR:$Rd), (ins MEMrr:$src),
+ !strconcat(OpcString, "\t$src, $Rd"),
+ [(set (Ty GPR:$Rd), (OpNode ADDRrr:$src))]>,
+ Sched<[WriteLD]> {
+ bits<20> src;
+
+ let Rs1 = src{19-15};
+ let Rs2 = src{14-10};
+ let P = src{9};
+ let Q = src{8};
+ let BBB = src{7-5};
+ let JJJJJ = src{4-0};
+ let mayLoad = 1;
+}
+
+class LoadRI<string OpcString, PatFrag OpNode, ValueType Ty>
+ : InstRM<0b0, (outs GPR:$Rd), (ins MEMri:$src),
+ !strconcat(OpcString, "\t$src, $Rd"),
+ [(set (Ty GPR:$Rd), (OpNode ADDRri:$src))]>,
+ Sched<[WriteLD]> {
+ bits<23> src;
+
+ let Itinerary = IIC_LD;
+ let Rs1 = src{22-18};
+ let P = src{17};
+ let Q = src{16};
+ let imm16 = src{15-0};
+ let isReMaterializable = 1;
+ let mayLoad = 1;
+}
+
+let E = 0 in {
+ let YL = 0b01 in {
+ // uld is used here and ld in the alias as the alias is printed out first if
+ // an alias exist
+ def LDW_RI : LoadRI<"uld", load, i32>;
+ def LDW_RR : LoadRR<"ld", load, i32>;
+ }
+}
+
+def : InstAlias<"ld $src, $dst", (LDW_RI GPR:$dst, MEMri:$src)>;
+
+let E = 1 in {
+ let YL = 0b01 in {
+ def LDWz_RR : LoadRR<"uld", zextloadi32, i32>;
+ }
+}
+
+let E = 1 in {
+ let YL = 0b00 in
+ def LDHz_RR : LoadRR<"uld.h", zextloadi16, i32>;
+ let YL = 0b10 in
+ def LDBz_RR : LoadRR<"uld.b", zextloadi8, i32>;
+}
+
+let E = 0 in {
+ let YL = 0b00 in
+ def LDHs_RR : LoadRR<"ld.h", sextloadi16, i32>;
+ let YL = 0b10 in
+ def LDBs_RR : LoadRR<"ld.b", sextloadi8, i32>;
+}
+
+def LDADDR : InstSLS<0x0, (outs GPR:$Rd), (ins MEMi:$src),
+ "ld\t$src, $Rd",
+ [(set (i32 GPR:$Rd), (load ADDRsls:$src))]>,
+ Sched<[WriteLD]> {
+ bits<21> src;
+
+ let Itinerary = IIC_LD;
+ let msb = src{20-16};
+ let lsb = src{15-0};
+ let isReMaterializable = 1;
+ let mayLoad = 1;
+}
+
+class LoadSPLS<string asmstring, PatFrag opNode>
+ : InstSPLS<(outs GPR:$Rd), (ins MEMspls:$src),
+ !strconcat(asmstring, "\t$src, $Rd"),
+ [(set (i32 GPR:$Rd), (opNode ADDRspls:$src))]>,
+ Sched<[WriteLDSW]> {
+ bits<17> src;
+ let Itinerary = IIC_LDSW;
+ let Rs1 = src{16-12};
+ let P = src{11};
+ let Q = src{10};
+ let imm10 = src{9-0};
+ let mayLoad = 1;
+ let isReMaterializable = 1;
+}
+
+let Y = 0, S = 0, E = 1 in
+ def LDHz_RI : LoadSPLS<"uld.h", zextloadi16>;
+
+let Y = 0, S = 0, E = 0 in
+ def LDHs_RI : LoadSPLS<"ld.h", sextloadi16>;
+
+let Y = 1, S = 0, E = 1 in
+ def LDBz_RI : LoadSPLS<"uld.b", zextloadi8>;
+
+let Y = 1, S = 0, E = 0 in
+ def LDBs_RI : LoadSPLS<"ld.b", sextloadi8>;
+
+def SLI : InstSLI<(outs GPR:$Rd), (ins i32lo21:$imm),
+ "mov\t$imm, $Rd",
+ [(set GPR:$Rd, i32lo21:$imm)]> {
+ bits<21> imm;
+
+ let msb = imm{20-16};
+ let lsb = imm{15-0};
+ let isReMaterializable = 1;
+ let isAsCheapAsAMove = 1;
+}
+
+// -------------------------------------------------- //
+// STORE instructions
+// -------------------------------------------------- //
+
+class StoreRR<string OpcString, PatFrag OpNode, ValueType Ty>
+ : InstRRM<0b1, (outs), (ins GPR:$Rd, MEMrr:$dst),
+ !strconcat(OpcString, "\t$Rd, $dst"),
+ [(OpNode (Ty GPR:$Rd), ADDRrr:$dst)]>,
+ Sched<[WriteST]> {
+ bits<20> dst;
+
+ let Itinerary = IIC_ST;
+ let Rs1 = dst{19-15};
+ let Rs2 = dst{14-10};
+ let P = dst{9};
+ let Q = dst{8};
+ let BBB = dst{7-5};
+ let JJJJJ = dst{4-0};
+ let mayStore = 1;
+}
+
+class StoreRI<string OpcString, PatFrag OpNode, ValueType Ty>
+ : InstRM<0b1, (outs), (ins GPR:$Rd, MEMri:$dst),
+ !strconcat(OpcString, "\t$Rd, $dst"),
+ [(OpNode (Ty GPR:$Rd), ADDRri:$dst)]>,
+ Sched<[WriteST]> {
+ bits<23> dst;
+
+ let Itinerary = IIC_ST;
+ let Rs1 = dst{22-18};
+ let P = dst{17};
+ let Q = dst{16};
+ let imm16 = dst{15-0};
+ let mayStore = 1;
+}
+
+let YL = 0b01, E = 0 in {
+ def SW_RR : StoreRR<"st", store, i32>;
+ def SW_RI : StoreRI<"st", store, i32>;
+}
+
+let E = 0 in {
+ let YL = 0b00 in
+ def STH_RR : StoreRR<"st.h", truncstorei16, i32>;
+ let YL = 0b10 in
+ def STB_RR : StoreRR<"st.b", truncstorei8, i32>;
+}
+
+def STADDR : InstSLS<0x1, (outs), (ins GPR:$Rd, MEMi:$dst),
+ "st\t$Rd, $dst",
+ [(store (i32 GPR:$Rd), ADDRsls:$dst)]>,
+ Sched<[WriteST]> {
+ bits<21> dst;
+
+ let Itinerary = IIC_ST;
+ let msb = dst{20-16};
+ let lsb = dst{15-0};
+ let mayStore = 1;
+}
+
+class StoreSPLS<string asmstring, PatFrag opNode>
+ : InstSPLS<(outs), (ins GPR:$Rd, MEMspls:$dst),
+ !strconcat(asmstring, "\t$Rd, $dst"),
+ [(opNode (i32 GPR:$Rd), ADDRspls:$dst)]>,
+ Sched<[WriteSTSW]> {
+ bits<17> dst;
+
+ let Itinerary = IIC_STSW;
+ let Rs1 = dst{16-12};
+ let P = dst{11};
+ let Q = dst{10};
+ let imm10 = dst{9-0};
+ let mayStore = 1;
+}
+
+let Y = 0, S = 1, E = 0 in
+ def STH_RI : StoreSPLS<"st.h", truncstorei16>;
+
+let Y = 1, S = 1, E = 0 in
+ def STB_RI : StoreSPLS<"st.b", truncstorei8>;
+
+// -------------------------------------------------- //
+// BRANCH instructions
+// -------------------------------------------------- //
+
+let isBranch = 1, isBarrier = 1, isTerminator = 1, hasDelaySlot = 1 in {
+ def BT : InstBR<(outs), (ins BrTarget:$addr),
+ "bt\t$addr",
+ [(br bb:$addr)]> {
+ let DDDI = 0b0000;
+ }
+ let Uses = [SR] in
+ def BRCC : InstBR<(outs), (ins BrTarget:$addr, CCOp:$DDDI),
+ "b$DDDI\t$addr",
+ [(LanaiBrCC bb:$addr, imm:$DDDI)]>;
+
+ let isIndirectBranch = 1 in {
+ def JR : InstRR<0b101, (outs), (ins GPR:$Rs2), "bt\t$Rs2",
+ [(brind GPR:$Rs2)]> {
+ let Rs1 = R0.Num;
+ let Rd = R2.Num;
+ let F = 0;
+ let JJJJJ = 0;
+ let DDDI = 0;
+ }
+ }
+}
+
+// -------------------------------------------------- //
+// Condition/SF instructions
+// -------------------------------------------------- //
+
+// Instructions to set flags used in lowering comparisons.
+multiclass SF<bits<3> op2Val, string AsmStr> {
+ let F = 1, Rd = R0.Num, JJJJJ = 0, Defs = [SR], DDDI = 0 in
+ def _RR : InstRR<op2Val, (outs), (ins GPR:$Rs1, GPR:$Rs2),
+ !strconcat(AsmStr, "\t$Rs1, $Rs2, %r0"),
+ [(LanaiSetFlag (i32 GPR:$Rs1), (i32 GPR:$Rs2))]>;
+ let F = 1, Rd = R0.Num, H = 0, Defs = [SR] in
+ def _RI_LO : InstRI<op2Val, (outs), (ins GPR:$Rs1, i32lo16z:$imm16),
+ !strconcat(AsmStr, "\t$Rs1, $imm16, %r0"),
+ [(LanaiSetFlag (i32 GPR:$Rs1), i32lo16z:$imm16)]>;
+ let F = 1, Rd = R0.Num, H = 1, Defs = [SR] in
+ def _RI_HI : InstRI<op2Val, (outs), (ins GPR:$Rs1, i32hi16:$imm16),
+ !strconcat(AsmStr, "\t$Rs1, $imm16, %r0"),
+ [(LanaiSetFlag (i32 GPR:$Rs1), i32hi16:$imm16)]>;
+}
+let isCodeGenOnly = 1, isCompare = 1 in {
+ defm SFSUB_F : SF<0b010, "sub.f">;
+}
+
+// Jump and link
+let isCall = 1, hasDelaySlot = 1, isCodeGenOnly = 1, Uses = [SP],
+ Defs = [RCA] in {
+ def CALL : Pseudo<(outs), (ins CallTarget:$addr), "", []>;
+ def CALLR : Pseudo<(outs), (ins GPR:$Rs1), "", [(Call GPR:$Rs1)]>;
+}
+
+let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
+ Uses = [RCA] in {
+ def RET : InstRM<0b0, (outs), (ins),
+ "ld\t-4[%fp], %pc ! return",
+ [(RetFlag)]> {
+ let Rd = PC.Num;
+ let Rs1 = FP.Num;
+ let P = 1;
+ let Q = 0;
+ let imm16 = -4;
+
+ // Post encoding is not needed for RET.
+ let PostEncoderMethod = "";
+ }
+}
+
+// ADJCALLSTACKDOWN/UP implicitly use/def SP because they may be expanded into
+// a stack adjustment and the codegen must know that they may modify the stack
+// pointer before prolog-epilog rewriting occurs.
+// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
+// sub / add which can clobber SP.
+let Defs = [SP], Uses = [SP] in {
+ def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
+ "#ADJCALLSTACKDOWN $amt",
+ [(CallSeqStart timm:$amt)]>;
+ def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "#ADJCALLSTACKUP $amt1 $amt2",
+ [(CallSeqEnd timm:$amt1, timm:$amt2)]>;
+}
+
+let Defs = [SP], Uses = [SP] in {
+ def ADJDYNALLOC : Pseudo<(outs GPR:$dst), (ins GPR:$src),
+ "#ADJDYNALLOC $dst $src",
+ [(set GPR:$dst, (LanaiAdjDynAlloc GPR:$src))]>;
+}
+
+let Uses = [SR] in {
+ def SCC : InstSCC<(outs GPR:$Rs1), (ins CCOp:$DDDI),
+ "s$DDDI\t$Rs1",
+ [(set (i32 GPR:$Rs1), (LanaiSetCC imm:$DDDI))]>;
+}
+
+// SCC's output is already 1-bit so and'ing with 1 is redundant.
+def : Pat<(and (LanaiSetCC imm:$DDDI), 1), (SCC imm:$DDDI)>;
+
+// Select with hardware support
+let Uses = [SR], isSelect = 1 in {
+ def SELECT : InstRR<0b111, (outs GPR:$Rd),
+ (ins GPR:$Rs1, GPR:$Rs2, CCOp:$DDDI),
+ "sel.$DDDI $Rs1, $Rs2, $Rd",
+ [(set (i32 GPR:$Rd),
+ (LanaiSelectCC (i32 GPR:$Rs1), (i32 GPR:$Rs2),
+ (imm:$DDDI)))]> {
+ let JJJJJ = 0;
+ let F = 0;
+ }
+}
+
+let isBranch = 1, isBarrier = 1, isTerminator = 1, hasDelaySlot = 1,
+ isIndirectBranch = 1, Uses = [SR] in {
+ def BRIND_CC : InstRR<0b101, (outs), (ins GPR:$Rs1, CCOp:$DDDI),
+ "b$DDDI\t$Rs1", []> {
+ let F = 0;
+ let JJJJJ = 0;
+ let Rd = PC.Num;
+ let Rs2 = R0.Num;
+ }
+
+ def BRIND_CCA : InstRR<0b101, (outs), (ins GPR:$Rs1, GPR:$Rs2, CCOp:$DDDI),
+ "b${DDDI}\t$Rs1 add $Rs2", []> {
+ let F = 0;
+ let Rd = PC.Num;
+ let JJJJJ = 0;
+ }
+}
+
+// TODO: This only considers the case where BROFF is an immediate and not where
+// it is a register. Add support for register relative branching.
+let isBranch = 1, isBarrier = 1, isTerminator = 1, hasDelaySlot = 1, Rs1 = 0,
+ Uses = [SR] in
+ def BRR : InstBRR<(outs), (ins i16imm:$imm16, CCOp:$DDDI),
+ "b${DDDI}.r\t$imm16", []>;
+
+let F = 0 in {
+// Population Count (POPC)
+def POPC: InstSpecial<0b001, (outs GPR:$Rd), (ins GPR:$Rs1),
+ "popc\t$Rs1, $Rd",
+ [(set GPR:$Rd, (ctpop GPR:$Rs1))]>;
+
+// Count Leading Zeros (LEADZ)
+def LEADZ: InstSpecial<0b010, (outs GPR:$Rd), (ins GPR:$Rs1),
+ "leadz\t$Rs1, $Rd", [(set GPR:$Rd, (ctlz GPR:$Rs1))]>;
+
+// Count Trailing Zeros (TRAILZ)
+def TRAILZ : InstSpecial<0b011, (outs GPR:$Rd), (ins GPR:$Rs1),
+ "trailz\t$Rs1, $Rd",
+ [(set GPR:$Rd, (cttz GPR:$Rs1))]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Non-Instruction Patterns
+//===----------------------------------------------------------------------===//
+
+// i32 0 and R0 can be used interchangeably.
+def : Pat<(i32 0), (i32 R0)>;
+// i32 -1 and R1 can be used interchangeably.
+def : Pat<(i32 -1), (i32 R1)>;
+
+// unsigned 16-bit immediate
+def : Pat<(i32 i32lo16z:$imm), (OR_I_LO (i32 R0), imm:$imm)>;
+
+// arbitrary immediate
+def : Pat<(i32 imm:$imm), (OR_I_LO (MOVHI (HI16 imm:$imm)), (LO16 imm:$imm))>;
+
+// Calls
+def : Pat<(Call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
+def : Pat<(Call texternalsym:$dst), (CALL texternalsym:$dst)>;
+
+// Loads
+def : Pat<(extloadi8 ADDRspls:$src), (i32 (LDBz_RI ADDRspls:$src))>;
+def : Pat<(extloadi16 ADDRspls:$src), (i32 (LDHz_RI ADDRspls:$src))>;
+
+// GlobalAddress, ExternalSymbol, Jumptable, ConstantPool
+def : Pat<(LanaiHi tglobaladdr:$dst), (MOVHI tglobaladdr:$dst)>;
+def : Pat<(LanaiLo tglobaladdr:$dst), (OR_I_LO (i32 R0), tglobaladdr:$dst)>;
+def : Pat<(LanaiSmall tglobaladdr:$dst), (SLI tglobaladdr:$dst)>;
+def : Pat<(LanaiHi texternalsym:$dst), (MOVHI texternalsym:$dst)>;
+def : Pat<(LanaiLo texternalsym:$dst), (OR_I_LO (i32 R0), texternalsym:$dst)>;
+def : Pat<(LanaiSmall texternalsym:$dst), (SLI texternalsym:$dst)>;
+def : Pat<(LanaiHi tblockaddress:$dst), (MOVHI tblockaddress:$dst)>;
+def : Pat<(LanaiLo tblockaddress:$dst), (OR_I_LO (i32 R0), tblockaddress:$dst)>;
+def : Pat<(LanaiSmall tblockaddress:$dst), (SLI tblockaddress:$dst)>;
+def : Pat<(LanaiHi tjumptable:$dst), (MOVHI tjumptable:$dst)>;
+def : Pat<(LanaiLo tjumptable:$dst), (OR_I_LO (i32 R0), tjumptable:$dst)>;
+def : Pat<(LanaiSmall tjumptable:$dst), (SLI tjumptable:$dst)>;
+def : Pat<(LanaiHi tconstpool:$dst), (MOVHI tconstpool:$dst)>;
+def : Pat<(LanaiLo tconstpool:$dst), (OR_I_LO (i32 R0), tconstpool:$dst)>;
+def : Pat<(LanaiSmall tconstpool:$dst), (SLI tconstpool:$dst)>;
+
+def : Pat<(or GPR:$hi, (LanaiLo tglobaladdr:$lo)),
+ (OR_I_LO GPR:$hi, tglobaladdr:$lo)>;
+def : Pat<(or R0, (LanaiSmall tglobaladdr:$small)),
+ (SLI tglobaladdr:$small)>;
+def : Pat<(or GPR:$hi, (LanaiLo texternalsym:$lo)),
+ (OR_I_LO GPR:$hi, texternalsym:$lo)>;
+def : Pat<(or R0, (LanaiSmall texternalsym:$small)),
+ (SLI texternalsym:$small)>;
+def : Pat<(or GPR:$hi, (LanaiLo tblockaddress:$lo)),
+ (OR_I_LO GPR:$hi, tblockaddress:$lo)>;
+def : Pat<(or R0, (LanaiSmall tblockaddress:$small)),
+ (SLI tblockaddress:$small)>;
+def : Pat<(or GPR:$hi, (LanaiLo tjumptable:$lo)),
+ (OR_I_LO GPR:$hi, tjumptable:$lo)>;
+def : Pat<(or R0, (LanaiSmall tjumptable:$small)),
+ (SLI tjumptable:$small)>;
+def : Pat<(or GPR:$hi, (LanaiLo tconstpool:$lo)),
+ (OR_I_LO GPR:$hi, tconstpool:$lo)>;
+def : Pat<(or R0, (LanaiSmall tconstpool:$small)),
+ (SLI tconstpool:$small)>;
diff --git a/lib/Target/Lanai/LanaiMCInstLower.cpp b/lib/Target/Lanai/LanaiMCInstLower.cpp
new file mode 100644
index 000000000000..6c809b43f7ed
--- /dev/null
+++ b/lib/Target/Lanai/LanaiMCInstLower.cpp
@@ -0,0 +1,140 @@
+//=-- LanaiMCInstLower.cpp - Convert Lanai MachineInstr to an MCInst --------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower Lanai MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiMCInstLower.h"
+
+#include "MCTargetDesc/LanaiBaseInfo.h"
+#include "MCTargetDesc/LanaiMCExpr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+MCSymbol *
+LanaiMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
+ return Printer.getSymbol(MO.getGlobal());
+}
+
+MCSymbol *
+LanaiMCInstLower::GetBlockAddressSymbol(const MachineOperand &MO) const {
+ return Printer.GetBlockAddressSymbol(MO.getBlockAddress());
+}
+
+MCSymbol *
+LanaiMCInstLower::GetExternalSymbolSymbol(const MachineOperand &MO) const {
+ return Printer.GetExternalSymbolSymbol(MO.getSymbolName());
+}
+
+MCSymbol *LanaiMCInstLower::GetJumpTableSymbol(const MachineOperand &MO) const {
+ SmallString<256> Name;
+ raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "JTI"
+ << Printer.getFunctionNumber() << '_'
+ << MO.getIndex();
+ // Create a symbol for the name.
+ return Ctx.getOrCreateSymbol(Name.str());
+}
+
+MCSymbol *
+LanaiMCInstLower::GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
+ SmallString<256> Name;
+ raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "CPI"
+ << Printer.getFunctionNumber() << '_'
+ << MO.getIndex();
+ // Create a symbol for the name.
+ return Ctx.getOrCreateSymbol(Name.str());
+}
+
+MCOperand LanaiMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
+ MCSymbol *Sym) const {
+ LanaiMCExpr::VariantKind Kind;
+
+ switch (MO.getTargetFlags()) {
+ case LanaiII::MO_NO_FLAG:
+ Kind = LanaiMCExpr::VK_Lanai_None;
+ break;
+ case LanaiII::MO_ABS_HI:
+ Kind = LanaiMCExpr::VK_Lanai_ABS_HI;
+ break;
+ case LanaiII::MO_ABS_LO:
+ Kind = LanaiMCExpr::VK_Lanai_ABS_LO;
+ break;
+ default:
+ llvm_unreachable("Unknown target flag on GV operand");
+ }
+
+ const MCExpr *Expr =
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Ctx);
+ if (!MO.isJTI() && MO.getOffset())
+ Expr = MCBinaryExpr::createAdd(
+ Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
+ Expr = LanaiMCExpr::create(Kind, Expr, Ctx);
+ return MCOperand::createExpr(Expr);
+}
+
+void LanaiMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
+ OutMI.setOpcode(MI->getOpcode());
+
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+
+ MCOperand MCOp;
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
+ if (MO.isImplicit())
+ continue;
+ MCOp = MCOperand::createReg(MO.getReg());
+ break;
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::createImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = MCOperand::createExpr(
+ MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx));
+ break;
+ case MachineOperand::MO_RegisterMask:
+ continue;
+ case MachineOperand::MO_GlobalAddress:
+ MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
+ break;
+ case MachineOperand::MO_BlockAddress:
+ MCOp = LowerSymbolOperand(MO, GetBlockAddressSymbol(MO));
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
+ break;
+ case MachineOperand::MO_JumpTableIndex:
+ MCOp = LowerSymbolOperand(MO, GetJumpTableSymbol(MO));
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ MCOp = LowerSymbolOperand(MO, GetConstantPoolIndexSymbol(MO));
+ break;
+ default:
+ MI->dump();
+ llvm_unreachable("unknown operand type");
+ }
+
+ OutMI.addOperand(MCOp);
+ }
+}
diff --git a/lib/Target/Lanai/LanaiMCInstLower.h b/lib/Target/Lanai/LanaiMCInstLower.h
new file mode 100644
index 000000000000..41c0766e86da
--- /dev/null
+++ b/lib/Target/Lanai/LanaiMCInstLower.h
@@ -0,0 +1,48 @@
+//===-- LanaiMCInstLower.h - Lower MachineInstr to MCInst -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAIMCINSTLOWER_H
+#define LLVM_LIB_TARGET_LANAI_LANAIMCINSTLOWER_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class AsmPrinter;
+class MCContext;
+class MCInst;
+class MCOperand;
+class MCSymbol;
+class MachineInstr;
+class MachineModuleInfoMachO;
+class MachineOperand;
+class Mangler;
+
+// LanaiMCInstLower - This class is used to lower an MachineInstr
+// into an MCInst.
+class LLVM_LIBRARY_VISIBILITY LanaiMCInstLower {
+ MCContext &Ctx;
+
+ AsmPrinter &Printer;
+
+public:
+ LanaiMCInstLower(MCContext &CTX, Mangler & /*Mang*/, AsmPrinter &AP)
+ : Ctx(CTX), Printer(AP) {}
+ void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+
+ MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
+
+ MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
+ MCSymbol *GetBlockAddressSymbol(const MachineOperand &MO) const;
+ MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
+ MCSymbol *GetJumpTableSymbol(const MachineOperand &MO) const;
+ MCSymbol *GetConstantPoolIndexSymbol(const MachineOperand &MO) const;
+};
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAIMCINSTLOWER_H
diff --git a/lib/Target/Lanai/LanaiMachineFunctionInfo.cpp b/lib/Target/Lanai/LanaiMachineFunctionInfo.cpp
new file mode 100644
index 000000000000..c72271b67790
--- /dev/null
+++ b/lib/Target/Lanai/LanaiMachineFunctionInfo.cpp
@@ -0,0 +1,23 @@
+//===-- LanaiMachineFuctionInfo.cpp - Lanai machine function info ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiMachineFunctionInfo.h"
+
+using namespace llvm;
+
+void LanaiMachineFunctionInfo::anchor() {}
+
+unsigned LanaiMachineFunctionInfo::getGlobalBaseReg() {
+ // Return if it has already been initialized.
+ if (GlobalBaseReg)
+ return GlobalBaseReg;
+
+ return GlobalBaseReg =
+ MF.getRegInfo().createVirtualRegister(&Lanai::GPRRegClass);
+}
diff --git a/lib/Target/Lanai/LanaiMachineFunctionInfo.h b/lib/Target/Lanai/LanaiMachineFunctionInfo.h
new file mode 100644
index 000000000000..3bd9112a9e13
--- /dev/null
+++ b/lib/Target/Lanai/LanaiMachineFunctionInfo.h
@@ -0,0 +1,58 @@
+//===- LanaiMachineFuctionInfo.h - Lanai machine func info -------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares Lanai-specific per-machine-function information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAIMACHINEFUNCTIONINFO_H
+#define LLVM_LIB_TARGET_LANAI_LANAIMACHINEFUNCTIONINFO_H
+
+#include "LanaiRegisterInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+namespace llvm {
+
+// LanaiMachineFunctionInfo - This class is derived from MachineFunction and
+// contains private Lanai target-specific information for each MachineFunction.
+class LanaiMachineFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
+
+ MachineFunction &MF;
+
+ // SRetReturnReg - Lanai ABI require that sret lowering includes
+ // returning the value of the returned struct in a register. This field
+ // holds the virtual register into which the sret argument is passed.
+ unsigned SRetReturnReg;
+
+ // GlobalBaseReg - keeps track of the virtual register initialized for
+ // use as the global base register. This is used for PIC in some PIC
+ // relocation models.
+ unsigned GlobalBaseReg;
+
+ // VarArgsFrameIndex - FrameIndex for start of varargs area.
+ int VarArgsFrameIndex;
+
+public:
+ explicit LanaiMachineFunctionInfo(MachineFunction &MF)
+ : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), VarArgsFrameIndex(0) {}
+
+ unsigned getSRetReturnReg() const { return SRetReturnReg; }
+ void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
+
+ unsigned getGlobalBaseReg();
+
+ int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
+ void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAIMACHINEFUNCTIONINFO_H
diff --git a/lib/Target/Lanai/LanaiMemAluCombiner.cpp b/lib/Target/Lanai/LanaiMemAluCombiner.cpp
new file mode 100644
index 000000000000..c5a46143ee56
--- /dev/null
+++ b/lib/Target/Lanai/LanaiMemAluCombiner.cpp
@@ -0,0 +1,422 @@
+//===-- LanaiMemAluCombiner.cpp - Pass to combine memory & ALU operations -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Simple pass to combine memory and ALU operations
+//
+// The Lanai ISA supports instructions where a load/store modifies the base
+// register used in the load/store operation. This pass finds suitable
+// load/store and ALU instructions and combines them into one instruction.
+//
+// For example,
+// ld [ %r6 -- ], %r12
+// is a supported instruction that is not currently generated by the instruction
+// selection pass of this backend. This pass generates these instructions by
+// merging
+// add %r6, -4, %r6
+// followed by
+// ld [ %r6 ], %r12
+// in the same machine basic block into one machine instruction.
+//===----------------------------------------------------------------------===//
+
+#include "Lanai.h"
+#include "LanaiTargetMachine.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+using namespace llvm;
+
+#define GET_INSTRMAP_INFO
+#include "LanaiGenInstrInfo.inc"
+
+#define DEBUG_TYPE "lanai-mem-alu-combiner"
+
+STATISTIC(NumLdStAluCombined, "Number of memory and ALU instructions combined");
+
+static llvm::cl::opt<bool> DisableMemAluCombiner(
+ "disable-lanai-mem-alu-combiner", llvm::cl::init(false),
+ llvm::cl::desc("Do not combine ALU and memory operators"),
+ llvm::cl::Hidden);
+
+namespace llvm {
+void initializeLanaiMemAluCombinerPass(PassRegistry &);
+} // namespace llvm
+
+namespace {
+typedef MachineBasicBlock::iterator MbbIterator;
+typedef MachineFunction::iterator MfIterator;
+
+class LanaiMemAluCombiner : public MachineFunctionPass {
+public:
+ static char ID;
+ explicit LanaiMemAluCombiner() : MachineFunctionPass(ID) {
+ initializeLanaiMemAluCombinerPass(*PassRegistry::getPassRegistry());
+ }
+
+ const char *getPassName() const override {
+ return "Lanai load / store optimization pass";
+ }
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
+private:
+ MbbIterator findClosestSuitableAluInstr(MachineBasicBlock *BB,
+ const MbbIterator &MemInstr,
+ bool Decrement);
+ void insertMergedInstruction(MachineBasicBlock *BB,
+ const MbbIterator &MemInstr,
+ const MbbIterator &AluInstr, bool Before);
+ bool combineMemAluInBasicBlock(MachineBasicBlock *BB);
+
+ // Target machine description which we query for register names, data
+ // layout, etc.
+ const TargetInstrInfo *TII;
+};
+} // namespace
+
+char LanaiMemAluCombiner::ID = 0;
+
+INITIALIZE_PASS(LanaiMemAluCombiner, DEBUG_TYPE,
+ "Lanai memory ALU combiner pass", false, false)
+
+namespace {
+bool isSpls(uint16_t Opcode) { return Lanai::splsIdempotent(Opcode) == Opcode; }
+
+// Determine the opcode for the merged instruction created by considering the
+// old memory operation's opcode and whether the merged opcode will have an
+// immediate offset.
+unsigned mergedOpcode(unsigned OldOpcode, bool ImmediateOffset) {
+ switch (OldOpcode) {
+ case Lanai::LDW_RI:
+ case Lanai::LDW_RR:
+ if (ImmediateOffset)
+ return Lanai::LDW_RI;
+ return Lanai::LDW_RR;
+ case Lanai::LDHs_RI:
+ case Lanai::LDHs_RR:
+ if (ImmediateOffset)
+ return Lanai::LDHs_RI;
+ return Lanai::LDHs_RR;
+ case Lanai::LDHz_RI:
+ case Lanai::LDHz_RR:
+ if (ImmediateOffset)
+ return Lanai::LDHz_RI;
+ return Lanai::LDHz_RR;
+ case Lanai::LDBs_RI:
+ case Lanai::LDBs_RR:
+ if (ImmediateOffset)
+ return Lanai::LDBs_RI;
+ return Lanai::LDBs_RR;
+ case Lanai::LDBz_RI:
+ case Lanai::LDBz_RR:
+ if (ImmediateOffset)
+ return Lanai::LDBz_RI;
+ return Lanai::LDBz_RR;
+ case Lanai::SW_RI:
+ case Lanai::SW_RR:
+ if (ImmediateOffset)
+ return Lanai::SW_RI;
+ return Lanai::SW_RR;
+ case Lanai::STB_RI:
+ case Lanai::STB_RR:
+ if (ImmediateOffset)
+ return Lanai::STB_RI;
+ return Lanai::STB_RR;
+ case Lanai::STH_RI:
+ case Lanai::STH_RR:
+ if (ImmediateOffset)
+ return Lanai::STH_RI;
+ return Lanai::STH_RR;
+ default:
+ return 0;
+ }
+}
+
+// Check if the machine instruction has non-volatile memory operands of the type
+// supported for combining with ALU instructions.
+bool isNonVolatileMemoryOp(const MachineInstr &MI) {
+ if (!MI.hasOneMemOperand())
+ return false;
+
+ // Determine if the machine instruction is a supported memory operation by
+ // testing if the computed merge opcode is a valid memory operation opcode.
+ if (mergedOpcode(MI.getOpcode(), false) == 0)
+ return false;
+
+ const MachineMemOperand *MemOperand = *MI.memoperands_begin();
+
+ // Don't move volatile memory accesses
+ if (MemOperand->isVolatile())
+ return false;
+
+ return true;
+}
+
+// Test to see if two machine operands are of the same type. This test is less
+// strict than the MachineOperand::isIdenticalTo function.
+bool isSameOperand(const MachineOperand &Op1, const MachineOperand &Op2) {
+ if (Op1.getType() != Op2.getType())
+ return false;
+
+ switch (Op1.getType()) {
+ case MachineOperand::MO_Register:
+ return Op1.getReg() == Op2.getReg();
+ case MachineOperand::MO_Immediate:
+ return Op1.getImm() == Op2.getImm();
+ default:
+ return false;
+ }
+}
+
+bool isZeroOperand(const MachineOperand &Op) {
+ return ((Op.isReg() && Op.getReg() == Lanai::R0) ||
+ (Op.isImm() && Op.getImm() == 0));
+}
+
+// Determines whether a register is used by an instruction.
+bool InstrUsesReg(const MbbIterator &Instr, const MachineOperand *Reg) {
+ for (MachineInstr::const_mop_iterator Mop = Instr->operands_begin();
+ Mop != Instr->operands_end(); ++Mop) {
+ if (isSameOperand(*Mop, *Reg))
+ return true;
+ }
+ return false;
+}
+
+// Converts between machine opcode and AluCode.
+// Flag using/modifying ALU operations should not be considered for merging and
+// are omitted from this list.
+LPAC::AluCode mergedAluCode(unsigned AluOpcode) {
+ switch (AluOpcode) {
+ case Lanai::ADD_I_LO:
+ case Lanai::ADD_R:
+ return LPAC::ADD;
+ case Lanai::SUB_I_LO:
+ case Lanai::SUB_R:
+ return LPAC::SUB;
+ case Lanai::AND_I_LO:
+ case Lanai::AND_R:
+ return LPAC::AND;
+ case Lanai::OR_I_LO:
+ case Lanai::OR_R:
+ return LPAC::OR;
+ case Lanai::XOR_I_LO:
+ case Lanai::XOR_R:
+ return LPAC::XOR;
+ case Lanai::SHL_R:
+ return LPAC::SHL;
+ case Lanai::SRL_R:
+ return LPAC::SRL;
+ case Lanai::SRA_R:
+ return LPAC::SRA;
+ case Lanai::SA_I:
+ case Lanai::SL_I:
+ default:
+ return LPAC::UNKNOWN;
+ }
+}
+
+// Insert a new combined memory and ALU operation instruction.
+//
+// This function builds a new machine instruction using the MachineInstrBuilder
+// class and inserts it before the memory instruction.
+void LanaiMemAluCombiner::insertMergedInstruction(MachineBasicBlock *BB,
+ const MbbIterator &MemInstr,
+ const MbbIterator &AluInstr,
+ bool Before) {
+ // Insert new combined load/store + alu operation
+ MachineOperand Dest = MemInstr->getOperand(0);
+ MachineOperand Base = MemInstr->getOperand(1);
+ MachineOperand MemOffset = MemInstr->getOperand(2);
+ MachineOperand AluOffset = AluInstr->getOperand(2);
+
+ // Abort if ALU offset is not a register or immediate
+ assert((AluOffset.isReg() || AluOffset.isImm()) &&
+ "Unsupported operand type in merge");
+
+ // Determined merged instructions opcode and ALU code
+ LPAC::AluCode AluOpcode = mergedAluCode(AluInstr->getOpcode());
+ unsigned NewOpc = mergedOpcode(MemInstr->getOpcode(), AluOffset.isImm());
+
+ assert(AluOpcode != LPAC::UNKNOWN && "Unknown ALU code in merging");
+ assert(NewOpc != 0 && "Unknown merged node opcode");
+
+ // Build and insert new machine instruction
+ MachineInstrBuilder InstrBuilder =
+ BuildMI(*BB, MemInstr, MemInstr->getDebugLoc(), TII->get(NewOpc));
+ InstrBuilder.addReg(Dest.getReg(), getDefRegState(true));
+ InstrBuilder.addReg(Base.getReg(), getKillRegState(true));
+
+ // Add offset to machine instruction
+ if (AluOffset.isReg())
+ InstrBuilder.addReg(AluOffset.getReg());
+ else if (AluOffset.isImm())
+ InstrBuilder.addImm(AluOffset.getImm());
+ else
+ llvm_unreachable("Unsupported ld/st ALU merge.");
+
+ // Create a pre-op if the ALU operation preceded the memory operation or the
+ // MemOffset is non-zero (i.e. the memory value should be adjusted before
+ // accessing it), else create a post-op.
+ if (Before || !isZeroOperand(MemOffset))
+ InstrBuilder.addImm(LPAC::makePreOp(AluOpcode));
+ else
+ InstrBuilder.addImm(LPAC::makePostOp(AluOpcode));
+
+ // Transfer memory operands.
+ InstrBuilder->setMemRefs(MemInstr->memoperands_begin(),
+ MemInstr->memoperands_end());
+}
+
+// Function determines if ALU operation (in alu_iter) can be combined with
+// a load/store with base and offset.
+bool isSuitableAluInstr(bool IsSpls, const MbbIterator &AluIter,
+ const MachineOperand &Base,
+ const MachineOperand &Offset) {
+ // ALU operations have 3 operands
+ if (AluIter->getNumOperands() != 3)
+ return false;
+
+ MachineOperand &Dest = AluIter->getOperand(0);
+ MachineOperand &Op1 = AluIter->getOperand(1);
+ MachineOperand &Op2 = AluIter->getOperand(2);
+
+ // Only match instructions using the base register as destination and with the
+ // base and first operand equal
+ if (!isSameOperand(Dest, Base) || !isSameOperand(Dest, Op1))
+ return false;
+
+ if (Op2.isImm()) {
+ // It is not a match if the 2nd operand in the ALU operation is an
+ // immediate but the ALU operation is not an addition.
+ if (AluIter->getOpcode() != Lanai::ADD_I_LO)
+ return false;
+
+ if (Offset.isReg() && Offset.getReg() == Lanai::R0)
+ return true;
+
+ if (Offset.isImm() &&
+ ((Offset.getImm() == 0 &&
+ // Check that the Op2 would fit in the immediate field of the
+ // memory operation.
+ ((IsSpls && isInt<10>(Op2.getImm())) ||
+ (!IsSpls && isInt<16>(Op2.getImm())))) ||
+ Offset.getImm() == Op2.getImm()))
+ return true;
+ } else if (Op2.isReg()) {
+ // The Offset and 2nd operand are both registers and equal
+ if (Offset.isReg() && Op2.getReg() == Offset.getReg())
+ return true;
+ } else
+ // Only consider operations with register or immediate values
+ return false;
+
+ return false;
+}
+
+MbbIterator LanaiMemAluCombiner::findClosestSuitableAluInstr(
+ MachineBasicBlock *BB, const MbbIterator &MemInstr, const bool Decrement) {
+ MachineOperand *Base = &MemInstr->getOperand(1);
+ MachineOperand *Offset = &MemInstr->getOperand(2);
+ bool IsSpls = isSpls(MemInstr->getOpcode());
+
+ MbbIterator First = MemInstr;
+ MbbIterator Last = Decrement ? BB->begin() : BB->end();
+
+ while (First != Last) {
+ Decrement ? --First : ++First;
+
+ // Skip over debug instructions
+ if (First->isDebugValue())
+ continue;
+
+ if (isSuitableAluInstr(IsSpls, First, *Base, *Offset)) {
+ return First;
+ }
+
+ // Usage of the base or offset register is not a form suitable for merging.
+ if (First != Last) {
+ if (InstrUsesReg(First, Base))
+ break;
+ if (Offset->isReg() && InstrUsesReg(First, Offset))
+ break;
+ }
+ }
+
+ return MemInstr;
+}
+
+bool LanaiMemAluCombiner::combineMemAluInBasicBlock(MachineBasicBlock *BB) {
+ bool Modified = false;
+
+ MbbIterator MBBIter = BB->begin(), End = BB->end();
+ while (MBBIter != End) {
+ bool IsMemOp = isNonVolatileMemoryOp(*MBBIter);
+
+ if (IsMemOp) {
+ MachineOperand AluOperand = MBBIter->getOperand(3);
+ unsigned int DestReg = MBBIter->getOperand(0).getReg(),
+ BaseReg = MBBIter->getOperand(1).getReg();
+ assert(AluOperand.isImm() && "Unexpected memory operator type");
+ LPAC::AluCode AluOpcode = static_cast<LPAC::AluCode>(AluOperand.getImm());
+
+ // Skip memory operations that already modify the base register or if
+ // the destination and base register are the same
+ if (!LPAC::modifiesOp(AluOpcode) && DestReg != BaseReg) {
+ for (int Inc = 0; Inc <= 1; ++Inc) {
+ MbbIterator AluIter =
+ findClosestSuitableAluInstr(BB, MBBIter, Inc == 0);
+ if (AluIter != MBBIter) {
+ insertMergedInstruction(BB, MBBIter, AluIter, Inc == 0);
+
+ ++NumLdStAluCombined;
+ Modified = true;
+
+ // Erase the matching ALU instruction
+ BB->erase(AluIter);
+ // Erase old load/store instruction
+ BB->erase(MBBIter++);
+ break;
+ }
+ }
+ }
+ }
+ if (MBBIter == End)
+ break;
+ ++MBBIter;
+ }
+
+ return Modified;
+}
+
+// Driver function that iterates over the machine basic building blocks of a
+// machine function
+bool LanaiMemAluCombiner::runOnMachineFunction(MachineFunction &MF) {
+ if (DisableMemAluCombiner)
+ return false;
+
+ TII = MF.getSubtarget<LanaiSubtarget>().getInstrInfo();
+ bool Modified = false;
+ for (MfIterator MFI = MF.begin(); MFI != MF.end(); ++MFI) {
+ Modified |= combineMemAluInBasicBlock(&*MFI);
+ }
+ return Modified;
+}
+} // namespace
+
+FunctionPass *llvm::createLanaiMemAluCombinerPass() {
+ return new LanaiMemAluCombiner();
+}
diff --git a/lib/Target/Lanai/LanaiRegisterInfo.cpp b/lib/Target/Lanai/LanaiRegisterInfo.cpp
new file mode 100644
index 000000000000..a4c612258e7b
--- /dev/null
+++ b/lib/Target/Lanai/LanaiRegisterInfo.cpp
@@ -0,0 +1,287 @@
+//===-- LanaiRegisterInfo.cpp - Lanai Register Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Lanai implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiRegisterInfo.h"
+#include "Lanai.h"
+#include "LanaiSubtarget.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+#define GET_REGINFO_TARGET_DESC
+#include "LanaiGenRegisterInfo.inc"
+
+using namespace llvm;
+
+LanaiRegisterInfo::LanaiRegisterInfo() : LanaiGenRegisterInfo(Lanai::RCA) {}
+
+const uint16_t *
+LanaiRegisterInfo::getCalleeSavedRegs(const MachineFunction * /*MF*/) const {
+ return CSR_SaveList;
+}
+
+BitVector LanaiRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ BitVector Reserved(getNumRegs());
+
+ Reserved.set(Lanai::R0);
+ Reserved.set(Lanai::R1);
+ Reserved.set(Lanai::PC);
+ Reserved.set(Lanai::R2);
+ Reserved.set(Lanai::SP);
+ Reserved.set(Lanai::R4);
+ Reserved.set(Lanai::FP);
+ Reserved.set(Lanai::R5);
+ Reserved.set(Lanai::RR1);
+ Reserved.set(Lanai::R10);
+ Reserved.set(Lanai::RR2);
+ Reserved.set(Lanai::R11);
+ Reserved.set(Lanai::RCA);
+ Reserved.set(Lanai::R15);
+ if (hasBasePointer(MF))
+ Reserved.set(getBaseRegister());
+ return Reserved;
+}
+
+bool LanaiRegisterInfo::requiresRegisterScavenging(
+ const MachineFunction & /*MF*/) const {
+ return true;
+}
+
+bool LanaiRegisterInfo::trackLivenessAfterRegAlloc(
+ const MachineFunction & /*MF*/) const {
+ return true;
+}
+
+static bool isALUArithLoOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ case Lanai::ADD_I_LO:
+ case Lanai::SUB_I_LO:
+ case Lanai::ADD_F_I_LO:
+ case Lanai::SUB_F_I_LO:
+ case Lanai::ADDC_I_LO:
+ case Lanai::SUBB_I_LO:
+ case Lanai::ADDC_F_I_LO:
+ case Lanai::SUBB_F_I_LO:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static unsigned getOppositeALULoOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ case Lanai::ADD_I_LO:
+ return Lanai::SUB_I_LO;
+ case Lanai::SUB_I_LO:
+ return Lanai::ADD_I_LO;
+ case Lanai::ADD_F_I_LO:
+ return Lanai::SUB_F_I_LO;
+ case Lanai::SUB_F_I_LO:
+ return Lanai::ADD_F_I_LO;
+ case Lanai::ADDC_I_LO:
+ return Lanai::SUBB_I_LO;
+ case Lanai::SUBB_I_LO:
+ return Lanai::ADDC_I_LO;
+ case Lanai::ADDC_F_I_LO:
+ return Lanai::SUBB_F_I_LO;
+ case Lanai::SUBB_F_I_LO:
+ return Lanai::ADDC_F_I_LO;
+ default:
+ llvm_unreachable("Invalid ALU lo opcode");
+ }
+}
+
+static unsigned getRRMOpcodeVariant(unsigned Opcode) {
+ switch (Opcode) {
+ case Lanai::LDBs_RI:
+ return Lanai::LDBs_RR;
+ case Lanai::LDBz_RI:
+ return Lanai::LDBz_RR;
+ case Lanai::LDHs_RI:
+ return Lanai::LDHs_RR;
+ case Lanai::LDHz_RI:
+ return Lanai::LDHz_RR;
+ case Lanai::LDW_RI:
+ return Lanai::LDW_RR;
+ case Lanai::STB_RI:
+ return Lanai::STB_RR;
+ case Lanai::STH_RI:
+ return Lanai::STH_RR;
+ case Lanai::SW_RI:
+ return Lanai::SW_RR;
+ default:
+ llvm_unreachable("Opcode has no RRM variant");
+ }
+}
+
+void LanaiRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected");
+
+ MachineInstr &MI = *II;
+ MachineFunction &MF = *MI.getParent()->getParent();
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ bool HasFP = TFI->hasFP(MF);
+ DebugLoc DL = MI.getDebugLoc();
+
+ int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
+
+ int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
+ MI.getOperand(FIOperandNum + 1).getImm();
+
+ // Addressable stack objects are addressed using neg. offsets from fp
+ // or pos. offsets from sp/basepointer
+ if (!HasFP || (needsStackRealignment(MF) && FrameIndex >= 0))
+ Offset += MF.getFrameInfo()->getStackSize();
+
+ unsigned FrameReg = getFrameRegister(MF);
+ if (FrameIndex >= 0) {
+ if (hasBasePointer(MF))
+ FrameReg = getBaseRegister();
+ else if (needsStackRealignment(MF))
+ FrameReg = Lanai::SP;
+ }
+
+ // Replace frame index with a frame pointer reference.
+ // If the offset is small enough to fit in the immediate field, directly
+ // encode it.
+ // Otherwise scavenge a register and encode it into a MOVHI, OR_I_LO sequence.
+ if ((isSPLSOpcode(MI.getOpcode()) && !isInt<10>(Offset)) ||
+ !isInt<16>(Offset)) {
+ assert(RS && "Register scavenging must be on");
+ unsigned Reg = RS->FindUnusedReg(&Lanai::GPRRegClass);
+ if (!Reg)
+ Reg = RS->scavengeRegister(&Lanai::GPRRegClass, II, SPAdj);
+ assert(Reg && "Register scavenger failed");
+
+ bool HasNegOffset = false;
+ // ALU ops have unsigned immediate values. If the Offset is negative, we
+ // negate it here and reverse the opcode later.
+ if (Offset < 0) {
+ HasNegOffset = true;
+ Offset = -Offset;
+ }
+
+ if (!isInt<16>(Offset)) {
+ // Reg = hi(offset) | lo(offset)
+ BuildMI(*MI.getParent(), II, DL, TII->get(Lanai::MOVHI), Reg)
+ .addImm(static_cast<uint32_t>(Offset) >> 16);
+ BuildMI(*MI.getParent(), II, DL, TII->get(Lanai::OR_I_LO), Reg)
+ .addReg(Reg)
+ .addImm(Offset & 0xffffU);
+ } else {
+ // Reg = mov(offset)
+ BuildMI(*MI.getParent(), II, DL, TII->get(Lanai::ADD_I_LO), Reg)
+ .addImm(0)
+ .addImm(Offset);
+ }
+ // Reg = FrameReg OP Reg
+ if (MI.getOpcode() == Lanai::ADD_I_LO) {
+ BuildMI(*MI.getParent(), II, DL,
+ HasNegOffset ? TII->get(Lanai::SUB_R) : TII->get(Lanai::ADD_R),
+ MI.getOperand(0).getReg())
+ .addReg(FrameReg)
+ .addReg(Reg)
+ .addImm(LPCC::ICC_T);
+ MI.eraseFromParent();
+ return;
+ }
+ if (isSPLSOpcode(MI.getOpcode()) || isRMOpcode(MI.getOpcode())) {
+ MI.setDesc(TII->get(getRRMOpcodeVariant(MI.getOpcode())));
+ if (HasNegOffset) {
+ // Change the ALU op (operand 3) from LPAC::ADD (the default) to
+ // LPAC::SUB with the already negated offset.
+ assert((MI.getOperand(3).getImm() == LPAC::ADD) &&
+ "Unexpected ALU op in RRM instruction");
+ MI.getOperand(3).setImm(LPAC::SUB);
+ }
+ } else
+ llvm_unreachable("Unexpected opcode in frame index operation");
+
+ MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*isDef=*/false);
+ MI.getOperand(FIOperandNum + 1)
+ .ChangeToRegister(Reg, /*isDef=*/false, /*isImp=*/false,
+ /*isKill=*/true);
+ return;
+ }
+
+ // ALU arithmetic ops take unsigned immediates. If the offset is negative,
+ // we replace the instruction with one that inverts the opcode and negates
+ // the immediate.
+ if ((Offset < 0) && isALUArithLoOpcode(MI.getOpcode())) {
+ unsigned NewOpcode = getOppositeALULoOpcode(MI.getOpcode());
+ // We know this is an ALU op, so we know the operands are as follows:
+ // 0: destination register
+ // 1: source register (frame register)
+ // 2: immediate
+ BuildMI(*MI.getParent(), II, DL, TII->get(NewOpcode),
+ MI.getOperand(0).getReg())
+ .addReg(FrameReg)
+ .addImm(-Offset);
+ MI.eraseFromParent();
+ } else {
+ MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*isDef=*/false);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+ }
+}
+
+bool LanaiRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // When we need stack realignment and there are dynamic allocas, we can't
+ // reference off of the stack pointer, so we reserve a base pointer.
+ if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
+ return true;
+
+ return false;
+}
+
+unsigned LanaiRegisterInfo::getRARegister() const { return Lanai::RCA; }
+
+unsigned
+LanaiRegisterInfo::getFrameRegister(const MachineFunction & /*MF*/) const {
+ return Lanai::FP;
+}
+
+unsigned LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; }
+
+bool LanaiRegisterInfo::canRealignStack(const MachineFunction &MF) const {
+ if (!TargetRegisterInfo::canRealignStack(MF))
+ return false;
+ return true;
+}
+
+unsigned LanaiRegisterInfo::getEHExceptionRegister() const {
+ llvm_unreachable("no exception support");
+ return 0;
+}
+
+unsigned LanaiRegisterInfo::getEHHandlerRegister() const {
+ llvm_unreachable("no exception support");
+ return 0;
+}
+
+const uint32_t *
+LanaiRegisterInfo::getCallPreservedMask(const MachineFunction & /*MF*/,
+ CallingConv::ID /*CC*/) const {
+ return CSR_RegMask;
+}
diff --git a/lib/Target/Lanai/LanaiRegisterInfo.h b/lib/Target/Lanai/LanaiRegisterInfo.h
new file mode 100644
index 000000000000..8b84bbc460e8
--- /dev/null
+++ b/lib/Target/Lanai/LanaiRegisterInfo.h
@@ -0,0 +1,63 @@
+//===- LanaiRegisterInfo.h - Lanai Register Information Impl ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Lanai implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAIREGISTERINFO_H
+#define LLVM_LIB_TARGET_LANAI_LANAIREGISTERINFO_H
+
+#include "llvm/Target/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "LanaiGenRegisterInfo.inc"
+
+namespace llvm {
+
+class TargetInstrInfo;
+class Type;
+
+struct LanaiRegisterInfo : public LanaiGenRegisterInfo {
+ LanaiRegisterInfo();
+
+ const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID) const override;
+
+ // Code Generation virtual methods.
+ const uint16_t *
+ getCalleeSavedRegs(const MachineFunction *MF = 0) const override;
+
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override;
+
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
+
+ void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = NULL) const override;
+
+ bool canRealignStack(const MachineFunction &MF) const override;
+
+ // Debug information queries.
+ unsigned getRARegister() const;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
+ unsigned getBaseRegister() const;
+ bool hasBasePointer(const MachineFunction &MF) const;
+
+ // Exception handling queries.
+ unsigned getEHExceptionRegister() const;
+ unsigned getEHHandlerRegister() const;
+ int getDwarfRegNum(unsigned RegNum, bool IsEH) const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAIREGISTERINFO_H
diff --git a/lib/Target/Lanai/LanaiRegisterInfo.td b/lib/Target/Lanai/LanaiRegisterInfo.td
new file mode 100644
index 000000000000..cf8cfe30cce9
--- /dev/null
+++ b/lib/Target/Lanai/LanaiRegisterInfo.td
@@ -0,0 +1,64 @@
+//===- LanaiRegisterInfo.td - Lanai Register defs ------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Declarations that describe the Lanai register file
+//===----------------------------------------------------------------------===//
+
+// Registers are identified with 5-bit ID numbers.
+class LanaiReg<bits<5> num, string n, list<Register> subregs = [],
+ list<string> altNames = []> : Register<n, altNames> {
+ field bits<5> Num;
+ let Num = num;
+ let Namespace = "Lanai";
+ let SubRegs = subregs;
+}
+
+let Namespace = "Lanai" in {
+ def sub_32 : SubRegIndex<32>;
+}
+
+// Integer registers
+foreach i = 0-31 in {
+ def R#i : LanaiReg<i, "r"#i>, DwarfRegNum<[i]>;
+}
+
+// Register aliases
+let SubRegIndices = [sub_32] in {
+ def PC : LanaiReg< 2, "pc", [R2]>, DwarfRegAlias<R2>;
+ def SP : LanaiReg< 4, "sp", [R4]>, DwarfRegAlias<R4>;
+ def FP : LanaiReg< 5, "fp", [R5]>, DwarfRegAlias<R5>;
+ def RV : LanaiReg< 8, "rv", [R8]>, DwarfRegAlias<R8>;
+ def RR1 : LanaiReg<10, "rr1", [R10]>, DwarfRegAlias<R10>;
+ def RR2 : LanaiReg<11, "rr2", [R11]>, DwarfRegAlias<R11>;
+ def RCA : LanaiReg<15, "rca", [R15]>, DwarfRegAlias<R15>;
+}
+
+// Define a status register to capture the dependencies between the set flag
+// and setcc instructions
+def SR : LanaiReg< 0, "sw">;
+
+// Register classes.
+def GPR : RegisterClass<"Lanai", [i32], 32,
+ (add R3, R9, R12, R13, R14, R16, R17,
+ (sequence "R%i", 20, 31),
+ R6, R7, R18, R19, // registers for passing arguments
+ R15, RCA, // register for constant addresses
+ R10, RR1, R11, RR2, // programmer controlled registers
+ R8, RV, // return value
+ R5, FP, // frame pointer
+ R4, SP, // stack pointer
+ R2, PC, // program counter
+ R1, // all 1s (0xffffffff)
+ R0 // constant 0
+ )>;
+
+// Condition code register class
+def CCR : RegisterClass<"Lanai", [i32], 32, (add SR)> {
+ let CopyCost = -1; // Don't allow copying of status registers
+ let isAllocatable = 0;
+}
diff --git a/lib/Target/Lanai/LanaiSchedule.td b/lib/Target/Lanai/LanaiSchedule.td
new file mode 100644
index 000000000000..7f931c4be8bb
--- /dev/null
+++ b/lib/Target/Lanai/LanaiSchedule.td
@@ -0,0 +1,70 @@
+//=-LanaiSchedule.td - Lanai Scheduling Definitions --*- tablegen -*-=========//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def ALU_FU : FuncUnit;
+def LDST_FU : FuncUnit;
+
+def IIC_ALU : InstrItinClass;
+def IIC_LD : InstrItinClass;
+def IIC_ST : InstrItinClass;
+def IIC_LDSW : InstrItinClass;
+def IIC_STSW : InstrItinClass;
+
+def LanaiItinerary : ProcessorItineraries<[ALU_FU, LDST_FU],[],[
+ InstrItinData<IIC_LD, [InstrStage<1, [LDST_FU]>]>,
+ InstrItinData<IIC_ST, [InstrStage<1, [LDST_FU]>]>,
+ InstrItinData<IIC_LDSW, [InstrStage<2, [LDST_FU]>]>,
+ InstrItinData<IIC_STSW, [InstrStage<2, [LDST_FU]>]>,
+ InstrItinData<IIC_ALU, [InstrStage<1, [ALU_FU]>]>
+]>;
+
+def LanaiSchedModel : SchedMachineModel {
+ // Cycles for loads to access the cache [default = -1]
+ let LoadLatency = 2;
+
+ // Max micro-ops that can be buffered for optimized loop dispatch/execution.
+ // [default = -1]
+ let LoopMicroOpBufferSize = 0;
+
+ // Allow scheduler to assign default model to any unrecognized opcodes.
+ // [default = 1]
+ let CompleteModel = 0;
+
+ // Max micro-ops that may be scheduled per cycle. [default = 1]
+ let IssueWidth = 1;
+
+ // Extra cycles for a mispredicted branch. [default = -1]
+ let MispredictPenalty = 10;
+
+ // Enable Post RegAlloc Scheduler pass. [default = 0]
+ let PostRAScheduler = 0;
+
+ // Max micro-ops that can be buffered. [default = -1]
+ let MicroOpBufferSize = 0;
+
+ // Per-cycle resources tables. [default = NoItineraries]
+ let Itineraries = LanaiItinerary;
+}
+
+def ALU : ProcResource<1> { let BufferSize = 0; }
+def LdSt : ProcResource<1> { let BufferSize = 0; }
+
+def WriteLD : SchedWrite;
+def WriteST : SchedWrite;
+def WriteLDSW : SchedWrite;
+def WriteSTSW : SchedWrite;
+def WriteALU : SchedWrite;
+
+let SchedModel = LanaiSchedModel in {
+ def : WriteRes<WriteLD, [LdSt]> { let Latency = 2; }
+ def : WriteRes<WriteST, [LdSt]> { let Latency = 2; }
+ def : WriteRes<WriteLDSW, [LdSt]> { let Latency = 2; }
+ def : WriteRes<WriteSTSW, [LdSt]> { let Latency = 4; }
+ def : WriteRes<WriteALU, [ALU]> { let Latency = 1; }
+}
diff --git a/lib/Target/Lanai/LanaiSelectionDAGInfo.cpp b/lib/Target/Lanai/LanaiSelectionDAGInfo.cpp
new file mode 100644
index 000000000000..b71c30fe3e05
--- /dev/null
+++ b/lib/Target/Lanai/LanaiSelectionDAGInfo.cpp
@@ -0,0 +1,35 @@
+//===-- LanaiSelectionDAGInfo.cpp - Lanai SelectionDAG Info -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LanaiSelectionDAGInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiSelectionDAGInfo.h"
+
+#include "LanaiTargetMachine.h"
+
+#define DEBUG_TYPE "lanai-selectiondag-info"
+
+namespace llvm {
+
+SDValue LanaiSelectionDAGInfo::EmitTargetCodeForMemcpy(
+ SelectionDAG & /*DAG*/, const SDLoc & /*dl*/, SDValue /*Chain*/,
+ SDValue /*Dst*/, SDValue /*Src*/, SDValue Size, unsigned /*Align*/,
+ bool /*isVolatile*/, bool /*AlwaysInline*/,
+ MachinePointerInfo /*DstPtrInfo*/,
+ MachinePointerInfo /*SrcPtrInfo*/) const {
+ ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+ if (!ConstantSize)
+ return SDValue();
+
+ return SDValue();
+}
+
+} // namespace llvm
diff --git a/lib/Target/Lanai/LanaiSelectionDAGInfo.h b/lib/Target/Lanai/LanaiSelectionDAGInfo.h
new file mode 100644
index 000000000000..bfd2be2ede09
--- /dev/null
+++ b/lib/Target/Lanai/LanaiSelectionDAGInfo.h
@@ -0,0 +1,36 @@
+//===-- LanaiSelectionDAGInfo.h - Lanai SelectionDAG Info -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Lanai subclass for TargetSelectionDAGInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAISELECTIONDAGINFO_H
+#define LLVM_LIB_TARGET_LANAI_LANAISELECTIONDAGINFO_H
+
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+class LanaiSelectionDAGInfo : public SelectionDAGTargetInfo {
+public:
+ LanaiSelectionDAGInfo() = default;
+
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const override;
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAISELECTIONDAGINFO_H
diff --git a/lib/Target/Lanai/LanaiSubtarget.cpp b/lib/Target/Lanai/LanaiSubtarget.cpp
new file mode 100644
index 000000000000..0fa5e82a7a66
--- /dev/null
+++ b/lib/Target/Lanai/LanaiSubtarget.cpp
@@ -0,0 +1,47 @@
+//===- LanaiSubtarget.cpp - Lanai Subtarget Information -----------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Lanai specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiSubtarget.h"
+
+#include "Lanai.h"
+
+#define DEBUG_TYPE "lanai-subtarget"
+
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "LanaiGenSubtargetInfo.inc"
+
+using namespace llvm;
+
+void LanaiSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
+ std::string CPUName = CPU;
+ if (CPUName.empty())
+ CPUName = "generic";
+
+ ParseSubtargetFeatures(CPUName, FS);
+}
+
+LanaiSubtarget &LanaiSubtarget::initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS) {
+ initSubtargetFeatures(CPU, FS);
+ return *this;
+}
+
+LanaiSubtarget::LanaiSubtarget(const Triple &TargetTriple, StringRef Cpu,
+ StringRef FeatureString, const TargetMachine &TM,
+ const TargetOptions & /*Options*/,
+ CodeModel::Model /*CodeModel*/,
+ CodeGenOpt::Level /*OptLevel*/)
+ : LanaiGenSubtargetInfo(TargetTriple, Cpu, FeatureString),
+ FrameLowering(initializeSubtargetDependencies(Cpu, FeatureString)),
+ InstrInfo(), TLInfo(TM, *this), TSInfo() {}
diff --git a/lib/Target/Lanai/LanaiSubtarget.h b/lib/Target/Lanai/LanaiSubtarget.h
new file mode 100644
index 000000000000..2732ef3097ec
--- /dev/null
+++ b/lib/Target/Lanai/LanaiSubtarget.h
@@ -0,0 +1,76 @@
+//=====-- LanaiSubtarget.h - Define Subtarget for the Lanai -----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Lanai specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAISUBTARGET_H
+#define LLVM_LIB_TARGET_LANAI_LANAISUBTARGET_H
+
+#include "LanaiFrameLowering.h"
+#include "LanaiISelLowering.h"
+#include "LanaiInstrInfo.h"
+#include "LanaiSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+#define GET_SUBTARGETINFO_HEADER
+#include "LanaiGenSubtargetInfo.inc"
+
+namespace llvm {
+
+class LanaiSubtarget : public LanaiGenSubtargetInfo {
+public:
+ // This constructor initializes the data members to match that
+ // of the specified triple.
+ LanaiSubtarget(const Triple &TargetTriple, StringRef Cpu,
+ StringRef FeatureString, const TargetMachine &TM,
+ const TargetOptions &Options, CodeModel::Model CodeModel,
+ CodeGenOpt::Level OptLevel);
+
+ // ParseSubtargetFeatures - Parses features string setting specified
+ // subtarget options. Definition of function is auto generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ LanaiSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
+
+ void initSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ bool enableMachineScheduler() const override { return true; }
+
+ const LanaiInstrInfo *getInstrInfo() const override { return &InstrInfo; }
+
+ const TargetFrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
+ }
+
+ const LanaiRegisterInfo *getRegisterInfo() const override {
+ return &InstrInfo.getRegisterInfo();
+ }
+
+ const LanaiTargetLowering *getTargetLowering() const override {
+ return &TLInfo;
+ }
+
+ const LanaiSelectionDAGInfo *getSelectionDAGInfo() const override {
+ return &TSInfo;
+ }
+
+private:
+ LanaiFrameLowering FrameLowering;
+ LanaiInstrInfo InstrInfo;
+ LanaiTargetLowering TLInfo;
+ LanaiSelectionDAGInfo TSInfo;
+};
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAISUBTARGET_H
diff --git a/lib/Target/Lanai/LanaiTargetMachine.cpp b/lib/Target/Lanai/LanaiTargetMachine.cpp
new file mode 100644
index 000000000000..b1f4b496eb9e
--- /dev/null
+++ b/lib/Target/Lanai/LanaiTargetMachine.cpp
@@ -0,0 +1,112 @@
+//===-- LanaiTargetMachine.cpp - Define TargetMachine for Lanai ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the info about Lanai target spec.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiTargetMachine.h"
+
+#include "Lanai.h"
+#include "LanaiTargetObjectFile.h"
+#include "LanaiTargetTransformInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+
+namespace llvm {
+void initializeLanaiMemAluCombinerPass(PassRegistry &);
+} // namespace llvm
+
+extern "C" void LLVMInitializeLanaiTarget() {
+ // Register the target.
+ RegisterTargetMachine<LanaiTargetMachine> registered_target(TheLanaiTarget);
+}
+
+static std::string computeDataLayout() {
+ // Data layout (keep in sync with clang/lib/Basic/Targets.cpp)
+ return "E" // Big endian
+ "-m:e" // ELF name manging
+ "-p:32:32" // 32-bit pointers, 32 bit aligned
+ "-i64:64" // 64 bit integers, 64 bit aligned
+ "-a:0:32" // 32 bit alignment of objects of aggregate type
+ "-n32" // 32 bit native integer width
+ "-S64"; // 64 bit natural stack alignment
+}
+
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::PIC_;
+ return *RM;
+}
+
+LanaiTargetMachine::LanaiTargetMachine(const Target &T, const Triple &TT,
+ StringRef Cpu, StringRef FeatureString,
+ const TargetOptions &Options,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CodeModel,
+ CodeGenOpt::Level OptLevel)
+ : LLVMTargetMachine(T, computeDataLayout(), TT, Cpu, FeatureString, Options,
+ getEffectiveRelocModel(RM), CodeModel, OptLevel),
+ Subtarget(TT, Cpu, FeatureString, *this, Options, CodeModel, OptLevel),
+ TLOF(new LanaiTargetObjectFile()) {
+ initAsmInfo();
+}
+
+TargetIRAnalysis LanaiTargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis([this](const Function &F) {
+ return TargetTransformInfo(LanaiTTIImpl(this, F));
+ });
+}
+
+namespace {
+// Lanai Code Generator Pass Configuration Options.
+class LanaiPassConfig : public TargetPassConfig {
+public:
+ LanaiPassConfig(LanaiTargetMachine *TM, PassManagerBase *PassManager)
+ : TargetPassConfig(TM, *PassManager) {}
+
+ LanaiTargetMachine &getLanaiTargetMachine() const {
+ return getTM<LanaiTargetMachine>();
+ }
+
+ bool addInstSelector() override;
+ void addPreSched2() override;
+ void addPreEmitPass() override;
+};
+} // namespace
+
+TargetPassConfig *
+LanaiTargetMachine::createPassConfig(PassManagerBase &PassManager) {
+ return new LanaiPassConfig(this, &PassManager);
+}
+
+// Install an instruction selector pass.
+bool LanaiPassConfig::addInstSelector() {
+ addPass(createLanaiISelDag(getLanaiTargetMachine()));
+ return false;
+}
+
+// Implemented by targets that want to run passes immediately before
+// machine code is emitted.
+void LanaiPassConfig::addPreEmitPass() {
+ addPass(createLanaiDelaySlotFillerPass(getLanaiTargetMachine()));
+}
+
+// Run passes after prolog-epilog insertion and before the second instruction
+// scheduling pass.
+void LanaiPassConfig::addPreSched2() {
+ addPass(createLanaiMemAluCombinerPass());
+}
diff --git a/lib/Target/Lanai/LanaiTargetMachine.h b/lib/Target/Lanai/LanaiTargetMachine.h
new file mode 100644
index 000000000000..5278c70d909d
--- /dev/null
+++ b/lib/Target/Lanai/LanaiTargetMachine.h
@@ -0,0 +1,55 @@
+//===-- LanaiTargetMachine.h - Define TargetMachine for Lanai --- C++ ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Lanai specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAITARGETMACHINE_H
+#define LLVM_LIB_TARGET_LANAI_LANAITARGETMACHINE_H
+
+#include "LanaiFrameLowering.h"
+#include "LanaiISelLowering.h"
+#include "LanaiInstrInfo.h"
+#include "LanaiSelectionDAGInfo.h"
+#include "LanaiSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class formatted_raw_ostream;
+
+class LanaiTargetMachine : public LLVMTargetMachine {
+ LanaiSubtarget Subtarget;
+ std::unique_ptr<TargetLoweringObjectFile> TLOF;
+
+public:
+ LanaiTargetMachine(const Target &TheTarget, const Triple &TargetTriple,
+ StringRef Cpu, StringRef FeatureString,
+ const TargetOptions &Options,
+ Optional<Reloc::Model> RelocationModel,
+ CodeModel::Model CodeModel, CodeGenOpt::Level OptLevel);
+
+ const LanaiSubtarget *
+ getSubtargetImpl(const llvm::Function & /*Fn*/) const override {
+ return &Subtarget;
+ }
+
+ TargetIRAnalysis getTargetIRAnalysis() override;
+
+ // Pass Pipeline Configuration
+ TargetPassConfig *createPassConfig(PassManagerBase &pass_manager) override;
+
+ TargetLoweringObjectFile *getObjFileLowering() const override {
+ return TLOF.get();
+ }
+};
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAITARGETMACHINE_H
diff --git a/lib/Target/Lanai/LanaiTargetObjectFile.cpp b/lib/Target/Lanai/LanaiTargetObjectFile.cpp
new file mode 100644
index 000000000000..4048c8535215
--- /dev/null
+++ b/lib/Target/Lanai/LanaiTargetObjectFile.cpp
@@ -0,0 +1,123 @@
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiTargetObjectFile.h"
+
+#include "LanaiSubtarget.h"
+#include "LanaiTargetMachine.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+static cl::opt<unsigned> SSThreshold(
+ "lanai-ssection-threshold", cl::Hidden,
+ cl::desc("Small data and bss section threshold size (default=0)"),
+ cl::init(0));
+
+void LanaiTargetObjectFile::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
+ TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ InitializeELF(TM.Options.UseInitArray);
+
+ SmallDataSection = getContext().getELFSection(
+ ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC);
+}
+
+// A address must be loaded from a small section if its size is less than the
+// small section size threshold. Data in this section must be addressed using
+// gp_rel operator.
+static bool isInSmallSection(uint64_t Size) {
+ // gcc has traditionally not treated zero-sized objects as small data, so this
+ // is effectively part of the ABI.
+ return Size > 0 && Size <= SSThreshold;
+}
+
+// Return true if this global address should be placed into small data/bss
+// section.
+bool LanaiTargetObjectFile::isGlobalInSmallSection(
+ const GlobalValue *GV, const TargetMachine &TM) const {
+ // We first check the case where global is a declaration, because finding
+ // section kind using getKindForGlobal() is only allowed for global
+ // definitions.
+ if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
+ return isGlobalInSmallSectionImpl(GV, TM);
+
+ return isGlobalInSmallSection(GV, TM, getKindForGlobal(GV, TM));
+}
+
+// Return true if this global address should be placed into small data/bss
+// section.
+bool LanaiTargetObjectFile::isGlobalInSmallSection(const GlobalValue *GV,
+ const TargetMachine &TM,
+ SectionKind Kind) const {
+ return (isGlobalInSmallSectionImpl(GV, TM) &&
+ (Kind.isData() || Kind.isBSS() || Kind.isCommon()));
+}
+
+// Return true if this global address should be placed into small data/bss
+// section. This method does all the work, except for checking the section
+// kind.
+bool LanaiTargetObjectFile::isGlobalInSmallSectionImpl(
+ const GlobalValue *GV, const TargetMachine & /*TM*/) const {
+ // Only global variables, not functions.
+ const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GV);
+ if (!GVA)
+ return false;
+
+ if (GV->hasLocalLinkage())
+ return false;
+
+ if (((GV->hasExternalLinkage() && GV->isDeclaration()) ||
+ GV->hasCommonLinkage()))
+ return false;
+
+ Type *Ty = GV->getType()->getElementType();
+ return isInSmallSection(
+ GV->getParent()->getDataLayout().getTypeAllocSize(Ty));
+}
+
+MCSection *
+LanaiTargetObjectFile::SelectSectionForGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const {
+ // Handle Small Section classification here.
+ if (Kind.isBSS() && isGlobalInSmallSection(GV, TM, Kind))
+ return SmallBSSSection;
+ if (Kind.isData() && isGlobalInSmallSection(GV, TM, Kind))
+ return SmallDataSection;
+
+ // Otherwise, we work the same as ELF.
+ return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang,
+ TM);
+}
+
+/// Return true if this constant should be placed into small data section.
+bool LanaiTargetObjectFile::isConstantInSmallSection(const DataLayout &DL,
+ const Constant *CN) const {
+ return isInSmallSection(DL.getTypeAllocSize(CN->getType()));
+}
+
+MCSection *LanaiTargetObjectFile::getSectionForConstant(const DataLayout &DL,
+ SectionKind Kind,
+ const Constant *C,
+ unsigned &Align) const {
+ if (isConstantInSmallSection(DL, C))
+ return SmallDataSection;
+
+ // Otherwise, we work the same as ELF.
+ return TargetLoweringObjectFileELF::getSectionForConstant(DL, Kind, C, Align);
+}
diff --git a/lib/Target/Lanai/LanaiTargetObjectFile.h b/lib/Target/Lanai/LanaiTargetObjectFile.h
new file mode 100644
index 000000000000..eb5195469f55
--- /dev/null
+++ b/lib/Target/Lanai/LanaiTargetObjectFile.h
@@ -0,0 +1,46 @@
+//===-- LanaiTargetObjectFile.h - Lanai Object Info -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAITARGETOBJECTFILE_H
+#define LLVM_LIB_TARGET_LANAI_LANAITARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+namespace llvm {
+class LanaiTargetMachine;
+class LanaiTargetObjectFile : public TargetLoweringObjectFileELF {
+ MCSection *SmallDataSection;
+ MCSection *SmallBSSSection;
+
+public:
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+
+ /// Return true if this global address should be placed into small data/bss
+ /// section.
+ bool isGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
+ SectionKind Kind) const;
+ bool isGlobalInSmallSection(const GlobalValue *GV,
+ const TargetMachine &TM) const;
+ bool isGlobalInSmallSectionImpl(const GlobalValue *GV,
+ const TargetMachine &TM) const;
+
+ MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ /// Return true if this constant should be placed into small data section.
+ bool isConstantInSmallSection(const DataLayout &DL, const Constant *CN) const;
+
+ MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+ const Constant *C,
+ unsigned &Align) const override;
+};
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAITARGETOBJECTFILE_H
diff --git a/lib/Target/Lanai/LanaiTargetTransformInfo.h b/lib/Target/Lanai/LanaiTargetTransformInfo.h
new file mode 100644
index 000000000000..6300d2502d67
--- /dev/null
+++ b/lib/Target/Lanai/LanaiTargetTransformInfo.h
@@ -0,0 +1,86 @@
+//===-- LanaiTargetTransformInfo.h - Lanai specific TTI ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file a TargetTransformInfo::Concept conforming object specific to the
+// Lanai target machine. It uses the target's detailed information to
+// provide more precise answers to certain TTI queries, while letting the
+// target independent and default TTI implementations handle the rest.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_LANAITARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_LANAI_LANAITARGETTRANSFORMINFO_H
+
+#include "Lanai.h"
+#include "LanaiSubtarget.h"
+#include "LanaiTargetMachine.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+class LanaiTTIImpl : public BasicTTIImplBase<LanaiTTIImpl> {
+ typedef BasicTTIImplBase<LanaiTTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const LanaiSubtarget *ST;
+ const LanaiTargetLowering *TLI;
+
+ const LanaiSubtarget *getST() const { return ST; }
+ const LanaiTargetLowering *getTLI() const { return TLI; }
+
+public:
+ explicit LanaiTTIImpl(const LanaiTargetMachine *TM, const Function &F)
+ : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
+
+ LanaiTTIImpl(const LanaiTTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
+ LanaiTTIImpl(LanaiTTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(Arg.ST), TLI(Arg.TLI) {}
+
+ bool shouldBuildLookupTables() const { return false; }
+
+ TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth) {
+ if (TyWidth == 32)
+ return TTI::PSK_FastHardware;
+ return TTI::PSK_Software;
+ }
+
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+
+ switch (ISD) {
+ default:
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo);
+ case ISD::MUL:
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::UREM:
+ // This increases the cost associated with multiplication and division
+ // to 64 times what the baseline arithmetic cost is. The arithmetic
+ // instruction cost was arbitrarily chosen to reduce the desirability
+ // of emitting arithmetic instructions that are emulated in software.
+ // TODO: Investigate the performance impact given specialized lowerings.
+ return 64 * BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo);
+ }
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_LANAITARGETTRANSFORMINFO_H
diff --git a/lib/Target/Lanai/MCTargetDesc/CMakeLists.txt b/lib/Target/Lanai/MCTargetDesc/CMakeLists.txt
new file mode 100644
index 000000000000..d65a1fd58901
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/CMakeLists.txt
@@ -0,0 +1,8 @@
+add_llvm_library(LLVMLanaiMCTargetDesc
+ LanaiAsmBackend.cpp
+ LanaiELFObjectWriter.cpp
+ LanaiMCAsmInfo.cpp
+ LanaiMCCodeEmitter.cpp
+ LanaiMCExpr.cpp
+ LanaiMCTargetDesc.cpp
+)
diff --git a/lib/Target/Lanai/MCTargetDesc/LLVMBuild.txt b/lib/Target/Lanai/MCTargetDesc/LLVMBuild.txt
new file mode 100644
index 000000000000..7dc2a7694ab1
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===-- ./lib/Target/Lanai/MCTargetDesc/LLVMBuild.txt -----------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = LanaiMCTargetDesc
+parent = Lanai
+required_libraries = LanaiInfo LanaiInstPrinter MC Support
+add_to_library_groups = Lanai
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp b/lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp
new file mode 100644
index 000000000000..a3d8699f1317
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiAsmBackend.cpp
@@ -0,0 +1,172 @@
+//===-- LanaiAsmBackend.cpp - Lanai Assembler Backend ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiFixupKinds.h"
+#include "MCTargetDesc/LanaiMCTargetDesc.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+// Prepare value for the target space
+static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+ switch (Kind) {
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ case FK_Data_8:
+ return Value;
+ case Lanai::FIXUP_LANAI_21:
+ case Lanai::FIXUP_LANAI_21_F:
+ case Lanai::FIXUP_LANAI_25:
+ case Lanai::FIXUP_LANAI_32:
+ case Lanai::FIXUP_LANAI_HI16:
+ case Lanai::FIXUP_LANAI_LO16:
+ return Value;
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ }
+}
+
+namespace {
+class LanaiAsmBackend : public MCAsmBackend {
+ Triple::OSType OSType;
+
+public:
+ LanaiAsmBackend(const Target &T, Triple::OSType OST)
+ : MCAsmBackend(), OSType(OST) {}
+
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const override;
+
+ MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override;
+
+ // No instruction requires relaxation
+ bool fixupNeedsRelaxation(const MCFixup & /*Fixup*/, uint64_t /*Value*/,
+ const MCRelaxableFragment * /*DF*/,
+ const MCAsmLayout & /*Layout*/) const override {
+ return false;
+ }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
+
+ unsigned getNumFixupKinds() const override {
+ return Lanai::NumTargetFixupKinds;
+ }
+
+ bool mayNeedRelaxation(const MCInst & /*Inst*/) const override {
+ return false;
+ }
+
+ void relaxInstruction(const MCInst & /*Inst*/,
+ const MCSubtargetInfo & /*STI*/,
+ MCInst & /*Res*/) const override {}
+
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
+};
+
+bool LanaiAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ if ((Count % 4) != 0)
+ return false;
+
+ for (uint64_t i = 0; i < Count; i += 4)
+ OW->write32(0x15000000);
+
+ return true;
+}
+
+void LanaiAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
+ unsigned /*DataSize*/, uint64_t Value,
+ bool /*IsPCRel*/) const {
+ MCFixupKind Kind = Fixup.getKind();
+ Value = adjustFixupValue(static_cast<unsigned>(Kind), Value);
+
+ if (!Value)
+ return; // This value doesn't change the encoding
+
+ // Where in the object and where the number of bytes that need
+ // fixing up
+ unsigned Offset = Fixup.getOffset();
+ unsigned NumBytes = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
+ unsigned FullSize = 4;
+
+ // Grab current value, if any, from bits.
+ uint64_t CurVal = 0;
+
+ // Load instruction and apply value
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = (FullSize - 1 - i);
+ CurVal |= static_cast<uint64_t>(static_cast<uint8_t>(Data[Offset + Idx]))
+ << (i * 8);
+ }
+
+ uint64_t Mask =
+ (static_cast<uint64_t>(-1) >> (64 - getFixupKindInfo(Kind).TargetSize));
+ CurVal |= Value & Mask;
+
+ // Write out the fixed up bytes back to the code/data bits.
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = (FullSize - 1 - i);
+ Data[Offset + Idx] = static_cast<uint8_t>((CurVal >> (i * 8)) & 0xff);
+ }
+}
+
+MCObjectWriter *
+LanaiAsmBackend::createObjectWriter(raw_pwrite_stream &OS) const {
+ return createLanaiELFObjectWriter(OS,
+ MCELFObjectTargetWriter::getOSABI(OSType));
+}
+
+const MCFixupKindInfo &
+LanaiAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
+ static const MCFixupKindInfo Infos[Lanai::NumTargetFixupKinds] = {
+ // This table *must* be in same the order of fixup_* kinds in
+ // LanaiFixupKinds.h.
+ // Note: The number of bits indicated here are assumed to be contiguous.
+ // This does not hold true for LANAI_21 and LANAI_21_F which are applied
+ // to bits 0x7cffff and 0x7cfffc, respectively. Since the 'bits' counts
+ // here are used only for cosmetic purposes, we set the size to 16 bits
+ // for these 21-bit relocation as llvm/lib/MC/MCAsmStreamer.cpp checks
+ // no bits are set in the fixup range.
+ //
+ // name offset bits flags
+ {"FIXUP_LANAI_NONE", 0, 32, 0},
+ {"FIXUP_LANAI_21", 16, 16 /*21*/, 0},
+ {"FIXUP_LANAI_21_F", 16, 16 /*21*/, 0},
+ {"FIXUP_LANAI_25", 7, 25, 0},
+ {"FIXUP_LANAI_32", 0, 32, 0},
+ {"FIXUP_LANAI_HI16", 16, 16, 0},
+ {"FIXUP_LANAI_LO16", 16, 16, 0}};
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+}
+
+} // namespace
+
+MCAsmBackend *llvm::createLanaiAsmBackend(const Target &T,
+ const MCRegisterInfo & /*MRI*/,
+ const Triple &TheTriple,
+ StringRef /*CPU*/) {
+ if (!TheTriple.isOSBinFormatELF())
+ llvm_unreachable("OS not supported");
+
+ return new LanaiAsmBackend(T, TheTriple.getOS());
+}
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiBaseInfo.h b/lib/Target/Lanai/MCTargetDesc/LanaiBaseInfo.h
new file mode 100644
index 000000000000..ce7f83509c9b
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiBaseInfo.h
@@ -0,0 +1,119 @@
+//===-- LanaiBaseInfo.h - Top level definitions for Lanai MC ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains small standalone helper functions and enum definitions for
+// the Lanai target useful for the compiler back-end and the MC libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIBASEINFO_H
+#define LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIBASEINFO_H
+
+#include "LanaiMCTargetDesc.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+// LanaiII - This namespace holds all of the target specific flags that
+// instruction info tracks.
+namespace LanaiII {
+// Target Operand Flag enum.
+enum TOF {
+ //===------------------------------------------------------------------===//
+ // Lanai Specific MachineOperand flags.
+ MO_NO_FLAG,
+
+ // MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol
+ // address.
+ MO_ABS_HI,
+ MO_ABS_LO,
+};
+} // namespace LanaiII
+
+static inline unsigned getLanaiRegisterNumbering(unsigned Reg) {
+ switch (Reg) {
+ case Lanai::R0:
+ return 0;
+ case Lanai::R1:
+ return 1;
+ case Lanai::R2:
+ case Lanai::PC:
+ return 2;
+ case Lanai::R3:
+ return 3;
+ case Lanai::R4:
+ case Lanai::SP:
+ return 4;
+ case Lanai::R5:
+ case Lanai::FP:
+ return 5;
+ case Lanai::R6:
+ return 6;
+ case Lanai::R7:
+ return 7;
+ case Lanai::R8:
+ case Lanai::RV:
+ return 8;
+ case Lanai::R9:
+ return 9;
+ case Lanai::R10:
+ case Lanai::RR1:
+ return 10;
+ case Lanai::R11:
+ case Lanai::RR2:
+ return 11;
+ case Lanai::R12:
+ return 12;
+ case Lanai::R13:
+ return 13;
+ case Lanai::R14:
+ return 14;
+ case Lanai::R15:
+ case Lanai::RCA:
+ return 15;
+ case Lanai::R16:
+ return 16;
+ case Lanai::R17:
+ return 17;
+ case Lanai::R18:
+ return 18;
+ case Lanai::R19:
+ return 19;
+ case Lanai::R20:
+ return 20;
+ case Lanai::R21:
+ return 21;
+ case Lanai::R22:
+ return 22;
+ case Lanai::R23:
+ return 23;
+ case Lanai::R24:
+ return 24;
+ case Lanai::R25:
+ return 25;
+ case Lanai::R26:
+ return 26;
+ case Lanai::R27:
+ return 27;
+ case Lanai::R28:
+ return 28;
+ case Lanai::R29:
+ return 29;
+ case Lanai::R30:
+ return 30;
+ case Lanai::R31:
+ return 31;
+ default:
+ llvm_unreachable("Unknown register number!");
+ }
+}
+} // namespace llvm
+#endif // LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIBASEINFO_H
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiELFObjectWriter.cpp b/lib/Target/Lanai/MCTargetDesc/LanaiELFObjectWriter.cpp
new file mode 100644
index 000000000000..e30d5e9a18eb
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiELFObjectWriter.cpp
@@ -0,0 +1,95 @@
+//===-- LanaiELFObjectWriter.cpp - Lanai ELF Writer -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/LanaiBaseInfo.h"
+#include "MCTargetDesc/LanaiFixupKinds.h"
+#include "MCTargetDesc/LanaiMCTargetDesc.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+class LanaiELFObjectWriter : public MCELFObjectTargetWriter {
+public:
+ explicit LanaiELFObjectWriter(uint8_t OSABI);
+
+ ~LanaiELFObjectWriter() override;
+
+protected:
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
+ bool needsRelocateWithSymbol(const MCSymbol &SD,
+ unsigned Type) const override;
+};
+} // namespace
+
+LanaiELFObjectWriter::LanaiELFObjectWriter(uint8_t OSABI)
+ : MCELFObjectTargetWriter(/*Is64Bit_=*/false, OSABI, ELF::EM_LANAI,
+ /*HasRelocationAddend=*/true) {}
+
+LanaiELFObjectWriter::~LanaiELFObjectWriter() {}
+
+unsigned LanaiELFObjectWriter::getRelocType(MCContext & /*Ctx*/,
+ const MCValue & /*Target*/,
+ const MCFixup &Fixup,
+ bool /*IsPCRel*/) const {
+ unsigned Type;
+ unsigned Kind = static_cast<unsigned>(Fixup.getKind());
+ switch (Kind) {
+ case Lanai::FIXUP_LANAI_21:
+ Type = ELF::R_LANAI_21;
+ break;
+ case Lanai::FIXUP_LANAI_21_F:
+ Type = ELF::R_LANAI_21_F;
+ break;
+ case Lanai::FIXUP_LANAI_25:
+ Type = ELF::R_LANAI_25;
+ break;
+ case Lanai::FIXUP_LANAI_32:
+ case FK_Data_4:
+ Type = ELF::R_LANAI_32;
+ break;
+ case Lanai::FIXUP_LANAI_HI16:
+ Type = ELF::R_LANAI_HI16;
+ break;
+ case Lanai::FIXUP_LANAI_LO16:
+ Type = ELF::R_LANAI_LO16;
+ break;
+ case Lanai::FIXUP_LANAI_NONE:
+ Type = ELF::R_LANAI_NONE;
+ break;
+
+ default:
+ llvm_unreachable("Invalid fixup kind!");
+ }
+ return Type;
+}
+
+bool LanaiELFObjectWriter::needsRelocateWithSymbol(const MCSymbol & /*SD*/,
+ unsigned Type) const {
+ switch (Type) {
+ case ELF::R_LANAI_21:
+ case ELF::R_LANAI_21_F:
+ case ELF::R_LANAI_25:
+ case ELF::R_LANAI_32:
+ case ELF::R_LANAI_HI16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+MCObjectWriter *llvm::createLanaiELFObjectWriter(raw_pwrite_stream &OS,
+ uint8_t OSABI) {
+ MCELFObjectTargetWriter *MOTW = new LanaiELFObjectWriter(OSABI);
+ return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/false);
+}
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiFixupKinds.h b/lib/Target/Lanai/MCTargetDesc/LanaiFixupKinds.h
new file mode 100644
index 000000000000..9ff8340d2922
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiFixupKinds.h
@@ -0,0 +1,43 @@
+//===-- LanaiFixupKinds.h - Lanai Specific Fixup Entries --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIFIXUPKINDS_H
+#define LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIFIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+namespace Lanai {
+// Although most of the current fixup types reflect a unique relocation
+// one can have multiple fixup types for a given relocation and thus need
+// to be uniquely named.
+//
+// This table *must* be in the save order of
+// MCFixupKindInfo Infos[Lanai::NumTargetFixupKinds]
+// in LanaiAsmBackend.cpp.
+//
+enum Fixups {
+ // Results in R_Lanai_NONE
+ FIXUP_LANAI_NONE = FirstTargetFixupKind,
+
+ FIXUP_LANAI_21, // 21-bit symbol relocation
+ FIXUP_LANAI_21_F, // 21-bit symbol relocation, last two bits masked to 0
+ FIXUP_LANAI_25, // 25-bit branch targets
+ FIXUP_LANAI_32, // general 32-bit relocation
+ FIXUP_LANAI_HI16, // upper 16-bits of a symbolic relocation
+ FIXUP_LANAI_LO16, // lower 16-bits of a symbolic relocation
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+} // namespace Lanai
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIFIXUPKINDS_H
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiMCAsmInfo.cpp b/lib/Target/Lanai/MCTargetDesc/LanaiMCAsmInfo.cpp
new file mode 100644
index 000000000000..7e2705e67b6d
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiMCAsmInfo.cpp
@@ -0,0 +1,43 @@
+//===-- LanaiMCAsmInfo.cpp - Lanai asm properties -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the LanaiMCAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiMCAsmInfo.h"
+
+#include "llvm/ADT/Triple.h"
+
+using namespace llvm;
+
+void LanaiMCAsmInfo::anchor() {}
+
+LanaiMCAsmInfo::LanaiMCAsmInfo(const Triple & /*TheTriple*/) {
+ IsLittleEndian = false;
+ PrivateGlobalPrefix = ".L";
+ WeakRefDirective = "\t.weak\t";
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+
+ // Lanai assembly requires ".section" before ".bss"
+ UsesELFSectionDirectiveForBSS = true;
+
+ // Use the integrated assembler instead of system one.
+ UseIntegratedAssembler = true;
+
+ // Use '!' as comment string to correspond with old toolchain.
+ CommentString = "!";
+
+ // Target supports emission of debugging information.
+ SupportsDebugInformation = true;
+
+ // Set the instruction alignment. Currently used only for address adjustment
+ // in dwarf generation.
+ MinInstAlignment = 4;
+}
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiMCAsmInfo.h b/lib/Target/Lanai/MCTargetDesc/LanaiMCAsmInfo.h
new file mode 100644
index 000000000000..3eef0592d2fa
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiMCAsmInfo.h
@@ -0,0 +1,31 @@
+//=====-- LanaiMCAsmInfo.h - Lanai asm properties -----------*- C++ -*--====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the LanaiMCAsmInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIMCASMINFO_H
+#define LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIMCASMINFO_H
+
+#include "llvm/MC/MCAsmInfoELF.h"
+
+namespace llvm {
+class Triple;
+
+class LanaiMCAsmInfo : public MCAsmInfoELF {
+ void anchor() override;
+
+public:
+ explicit LanaiMCAsmInfo(const Triple &TheTriple);
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIMCASMINFO_H
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiMCCodeEmitter.cpp b/lib/Target/Lanai/MCTargetDesc/LanaiMCCodeEmitter.cpp
new file mode 100644
index 000000000000..f14adc27dd45
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiMCCodeEmitter.cpp
@@ -0,0 +1,326 @@
+//===-- LanaiMCCodeEmitter.cpp - Convert Lanai code to machine code -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LanaiMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Lanai.h"
+#include "MCTargetDesc/LanaiBaseInfo.h"
+#include "MCTargetDesc/LanaiFixupKinds.h"
+#include "MCTargetDesc/LanaiMCExpr.h"
+#include "MCTargetDesc/LanaiMCTargetDesc.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "mccodeemitter"
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
+
+namespace llvm {
+namespace {
+class LanaiMCCodeEmitter : public MCCodeEmitter {
+ LanaiMCCodeEmitter(const LanaiMCCodeEmitter &); // DO NOT IMPLEMENT
+ void operator=(const LanaiMCCodeEmitter &); // DO NOT IMPLEMENT
+ const MCInstrInfo &InstrInfo;
+ MCContext &Context;
+
+public:
+ LanaiMCCodeEmitter(const MCInstrInfo &MCII, MCContext &C)
+ : InstrInfo(MCII), Context(C) {}
+
+ ~LanaiMCCodeEmitter() override {}
+
+ // The functions below are called by TableGen generated functions for getting
+ // the binary encoding of instructions/opereands.
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &Inst,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const;
+
+ // getMachineOpValue - Return binary encoding of operand. If the machine
+ // operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &Inst, const MCOperand &MCOp,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const;
+
+ unsigned getRiMemoryOpValue(const MCInst &Inst, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const;
+
+ unsigned getRrMemoryOpValue(const MCInst &Inst, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const;
+
+ unsigned getSplsOpValue(const MCInst &Inst, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const;
+
+ unsigned getBranchTargetOpValue(const MCInst &Inst, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const;
+
+ unsigned getCallTargetOpValue(const MCInst &Inst, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const;
+
+ void encodeInstruction(const MCInst &Inst, raw_ostream &Ostream,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const override;
+
+ unsigned adjustPqBitsRmAndRrm(const MCInst &Inst, unsigned Value,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned adjustPqBitsSpls(const MCInst &Inst, unsigned Value,
+ const MCSubtargetInfo &STI) const;
+};
+
+Lanai::Fixups FixupKind(const MCExpr *Expr) {
+ if (isa<MCSymbolRefExpr>(Expr))
+ return Lanai::FIXUP_LANAI_21;
+ if (const LanaiMCExpr *McExpr = dyn_cast<LanaiMCExpr>(Expr)) {
+ LanaiMCExpr::VariantKind ExprKind = McExpr->getKind();
+ switch (ExprKind) {
+ case LanaiMCExpr::VK_Lanai_None:
+ return Lanai::FIXUP_LANAI_21;
+ case LanaiMCExpr::VK_Lanai_ABS_HI:
+ return Lanai::FIXUP_LANAI_HI16;
+ case LanaiMCExpr::VK_Lanai_ABS_LO:
+ return Lanai::FIXUP_LANAI_LO16;
+ }
+ }
+ return Lanai::Fixups(0);
+}
+
+// getMachineOpValue - Return binary encoding of operand. If the machine
+// operand requires relocation, record the relocation and return zero.
+unsigned LanaiMCCodeEmitter::getMachineOpValue(
+ const MCInst &Inst, const MCOperand &MCOp, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const {
+ if (MCOp.isReg())
+ return getLanaiRegisterNumbering(MCOp.getReg());
+ if (MCOp.isImm())
+ return static_cast<unsigned>(MCOp.getImm());
+
+ // MCOp must be an expression
+ assert(MCOp.isExpr());
+ const MCExpr *Expr = MCOp.getExpr();
+
+ // Extract the symbolic reference side of a binary expression.
+ if (Expr->getKind() == MCExpr::Binary) {
+ const MCBinaryExpr *BinaryExpr = static_cast<const MCBinaryExpr *>(Expr);
+ Expr = BinaryExpr->getLHS();
+ }
+
+ assert(isa<LanaiMCExpr>(Expr) || Expr->getKind() == MCExpr::SymbolRef);
+ // Push fixup (all info is contained within)
+ Fixups.push_back(
+ MCFixup::create(0, MCOp.getExpr(), MCFixupKind(FixupKind(Expr))));
+ return 0;
+}
+
+// Helper function to adjust P and Q bits on load and store instructions.
+unsigned adjustPqBits(const MCInst &Inst, unsigned Value, unsigned PBitShift,
+ unsigned QBitShift) {
+ const MCOperand AluOp = Inst.getOperand(3);
+ unsigned AluCode = AluOp.getImm();
+
+ // Set the P bit to one iff the immediate is nonzero and not a post-op
+ // instruction.
+ const MCOperand Op2 = Inst.getOperand(2);
+ Value &= ~(1 << PBitShift);
+ if (!LPAC::isPostOp(AluCode) &&
+ ((Op2.isImm() && Op2.getImm() != 0) ||
+ (Op2.isReg() && Op2.getReg() != Lanai::R0) || (Op2.isExpr())))
+ Value |= (1 << PBitShift);
+
+ // Set the Q bit to one iff it is a post- or pre-op instruction.
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg() &&
+ "Expected register operand.");
+ Value &= ~(1 << QBitShift);
+ if (LPAC::modifiesOp(AluCode) && ((Op2.isImm() && Op2.getImm() != 0) ||
+ (Op2.isReg() && Op2.getReg() != Lanai::R0)))
+ Value |= (1 << QBitShift);
+
+ return Value;
+}
+
+unsigned
+LanaiMCCodeEmitter::adjustPqBitsRmAndRrm(const MCInst &Inst, unsigned Value,
+ const MCSubtargetInfo &STI) const {
+ return adjustPqBits(Inst, Value, 17, 16);
+}
+
+unsigned
+LanaiMCCodeEmitter::adjustPqBitsSpls(const MCInst &Inst, unsigned Value,
+ const MCSubtargetInfo &STI) const {
+ return adjustPqBits(Inst, Value, 11, 10);
+}
+
+void LanaiMCCodeEmitter::encodeInstruction(
+ const MCInst &Inst, raw_ostream &Ostream, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const {
+ // Get instruction encoding and emit it
+ unsigned Value = getBinaryCodeForInstr(Inst, Fixups, SubtargetInfo);
+ ++MCNumEmitted; // Keep track of the number of emitted insns.
+
+ // Emit bytes in big-endian
+ for (int i = (4 - 1) * 8; i >= 0; i -= 8)
+ Ostream << static_cast<char>((Value >> i) & 0xff);
+}
+
+// Encode Lanai Memory Operand
+unsigned LanaiMCCodeEmitter::getRiMemoryOpValue(
+ const MCInst &Inst, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const {
+ unsigned Encoding;
+ const MCOperand Op1 = Inst.getOperand(OpNo + 0);
+ const MCOperand Op2 = Inst.getOperand(OpNo + 1);
+ const MCOperand AluOp = Inst.getOperand(OpNo + 2);
+
+ assert(Op1.isReg() && "First operand is not register.");
+ assert((Op2.isImm() || Op2.isExpr()) &&
+ "Second operand is neither an immediate nor an expression.");
+ assert((LPAC::getAluOp(AluOp.getImm()) == LPAC::ADD) &&
+ "Register immediate only supports addition operator");
+
+ Encoding = (getLanaiRegisterNumbering(Op1.getReg()) << 18);
+ if (Op2.isImm()) {
+ assert(isInt<16>(Op2.getImm()) &&
+ "Constant value truncated (limited to 16-bit)");
+
+ Encoding |= (Op2.getImm() & 0xffff);
+ if (Op2.getImm() != 0) {
+ if (LPAC::isPreOp(AluOp.getImm()))
+ Encoding |= (0x3 << 16);
+ if (LPAC::isPostOp(AluOp.getImm()))
+ Encoding |= (0x1 << 16);
+ }
+ } else
+ getMachineOpValue(Inst, Op2, Fixups, SubtargetInfo);
+
+ return Encoding;
+}
+
+unsigned LanaiMCCodeEmitter::getRrMemoryOpValue(
+ const MCInst &Inst, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const {
+ unsigned Encoding;
+ const MCOperand Op1 = Inst.getOperand(OpNo + 0);
+ const MCOperand Op2 = Inst.getOperand(OpNo + 1);
+ const MCOperand AluMCOp = Inst.getOperand(OpNo + 2);
+
+ assert(Op1.isReg() && "First operand is not register.");
+ Encoding = (getLanaiRegisterNumbering(Op1.getReg()) << 15);
+ assert(Op2.isReg() && "Second operand is not register.");
+ Encoding |= (getLanaiRegisterNumbering(Op2.getReg()) << 10);
+
+ assert(AluMCOp.isImm() && "Third operator is not immediate.");
+ // Set BBB
+ unsigned AluOp = AluMCOp.getImm();
+ Encoding |= LPAC::encodeLanaiAluCode(AluOp) << 5;
+ // Set P and Q
+ if (LPAC::isPreOp(AluOp))
+ Encoding |= (0x3 << 8);
+ if (LPAC::isPostOp(AluOp))
+ Encoding |= (0x1 << 8);
+ // Set JJJJ
+ switch (LPAC::getAluOp(AluOp)) {
+ case LPAC::SHL:
+ case LPAC::SRL:
+ Encoding |= 0x10;
+ break;
+ case LPAC::SRA:
+ Encoding |= 0x18;
+ break;
+ default:
+ break;
+ }
+
+ return Encoding;
+}
+
+unsigned
+LanaiMCCodeEmitter::getSplsOpValue(const MCInst &Inst, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const {
+ unsigned Encoding;
+ const MCOperand Op1 = Inst.getOperand(OpNo + 0);
+ const MCOperand Op2 = Inst.getOperand(OpNo + 1);
+ const MCOperand AluOp = Inst.getOperand(OpNo + 2);
+
+ assert(Op1.isReg() && "First operand is not register.");
+ assert((Op2.isImm() || Op2.isExpr()) &&
+ "Second operand is neither an immediate nor an expression.");
+ assert((LPAC::getAluOp(AluOp.getImm()) == LPAC::ADD) &&
+ "Register immediate only supports addition operator");
+
+ Encoding = (getLanaiRegisterNumbering(Op1.getReg()) << 12);
+ if (Op2.isImm()) {
+ assert(isInt<10>(Op2.getImm()) &&
+ "Constant value truncated (limited to 10-bit)");
+
+ Encoding |= (Op2.getImm() & 0x3ff);
+ if (Op2.getImm() != 0) {
+ if (LPAC::isPreOp(AluOp.getImm()))
+ Encoding |= (0x3 << 10);
+ if (LPAC::isPostOp(AluOp.getImm()))
+ Encoding |= (0x1 << 10);
+ }
+ } else
+ getMachineOpValue(Inst, Op2, Fixups, SubtargetInfo);
+
+ return Encoding;
+}
+
+unsigned LanaiMCCodeEmitter::getCallTargetOpValue(
+ const MCInst &Inst, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const {
+ const MCOperand &MCOp = Inst.getOperand(OpNo);
+ if (MCOp.isReg() || MCOp.isImm())
+ return getMachineOpValue(Inst, MCOp, Fixups, SubtargetInfo);
+
+ Fixups.push_back(MCFixup::create(
+ 0, MCOp.getExpr(), static_cast<MCFixupKind>(Lanai::FIXUP_LANAI_25)));
+
+ return 0;
+}
+
+unsigned LanaiMCCodeEmitter::getBranchTargetOpValue(
+ const MCInst &Inst, unsigned OpNo, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &SubtargetInfo) const {
+ const MCOperand &MCOp = Inst.getOperand(OpNo);
+ if (MCOp.isReg() || MCOp.isImm())
+ return getMachineOpValue(Inst, MCOp, Fixups, SubtargetInfo);
+
+ Fixups.push_back(MCFixup::create(
+ 0, MCOp.getExpr(), static_cast<MCFixupKind>(Lanai::FIXUP_LANAI_25)));
+
+ return 0;
+}
+
+#include "LanaiGenMCCodeEmitter.inc"
+} // namespace
+} // namespace llvm
+
+llvm::MCCodeEmitter *
+llvm::createLanaiMCCodeEmitter(const MCInstrInfo &InstrInfo,
+ const MCRegisterInfo & /*MRI*/,
+ MCContext &context) {
+ return new LanaiMCCodeEmitter(InstrInfo, context);
+}
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiMCExpr.cpp b/lib/Target/Lanai/MCTargetDesc/LanaiMCExpr.cpp
new file mode 100644
index 000000000000..201c95de07f4
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiMCExpr.cpp
@@ -0,0 +1,60 @@
+//===-- LanaiMCExpr.cpp - Lanai specific MC expression classes ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiMCExpr.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCStreamer.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "lanaimcexpr"
+
+const LanaiMCExpr *LanaiMCExpr::create(VariantKind Kind, const MCExpr *Expr,
+ MCContext &Ctx) {
+ return new (Ctx) LanaiMCExpr(Kind, Expr);
+}
+
+void LanaiMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
+ if (Kind == VK_Lanai_None) {
+ Expr->print(OS, MAI);
+ return;
+ }
+
+ switch (Kind) {
+ default:
+ llvm_unreachable("Invalid kind!");
+ case VK_Lanai_ABS_HI:
+ OS << "hi";
+ break;
+ case VK_Lanai_ABS_LO:
+ OS << "lo";
+ break;
+ }
+
+ OS << '(';
+ const MCExpr *Expr = getSubExpr();
+ Expr->print(OS, MAI);
+ OS << ')';
+}
+
+void LanaiMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
+}
+
+bool LanaiMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const {
+ if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup))
+ return false;
+
+ Res =
+ MCValue::get(Res.getSymA(), Res.getSymB(), Res.getConstant(), getKind());
+
+ return true;
+}
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiMCExpr.h b/lib/Target/Lanai/MCTargetDesc/LanaiMCExpr.h
new file mode 100644
index 000000000000..5004d541ff70
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiMCExpr.h
@@ -0,0 +1,56 @@
+//===-- LanaiMCExpr.h - Lanai specific MC expression classes ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIMCEXPR_H
+#define LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIMCEXPR_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
+
+namespace llvm {
+
+class LanaiMCExpr : public MCTargetExpr {
+public:
+ enum VariantKind { VK_Lanai_None, VK_Lanai_ABS_HI, VK_Lanai_ABS_LO };
+
+private:
+ const VariantKind Kind;
+ const MCExpr *Expr;
+
+ explicit LanaiMCExpr(VariantKind Kind, const MCExpr *Expr)
+ : Kind(Kind), Expr(Expr) {}
+
+public:
+ static const LanaiMCExpr *create(VariantKind Kind, const MCExpr *Expr,
+ MCContext &Ctx);
+
+ // Returns the kind of this expression.
+ VariantKind getKind() const { return Kind; }
+
+ // Returns the child of this expression.
+ const MCExpr *getSubExpr() const { return Expr; }
+
+ void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
+ bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const override;
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+ MCFragment *findAssociatedFragment() const override {
+ return getSubExpr()->findAssociatedFragment();
+ }
+
+ // There are no TLS LanaiMCExprs at the moment.
+ void fixELFSymbolsInTLSFixups(MCAssembler & /*Asm*/) const override {}
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+};
+} // end namespace llvm
+
+#endif
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp b/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp
new file mode 100644
index 000000000000..04bedfb7fba7
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp
@@ -0,0 +1,149 @@
+//===-- LanaiMCTargetDesc.cpp - Lanai Target Descriptions -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Lanai specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LanaiMCTargetDesc.h"
+
+#include "InstPrinter/LanaiInstPrinter.h"
+#include "LanaiMCAsmInfo.h"
+#include "llvm/MC/MCInstrAnalysis.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#define GET_INSTRINFO_MC_DESC
+#include "LanaiGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "LanaiGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "LanaiGenRegisterInfo.inc"
+
+using namespace llvm;
+
+static MCInstrInfo *createLanaiMCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitLanaiMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createLanaiMCRegisterInfo(const Triple & /*TT*/) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ InitLanaiMCRegisterInfo(X, Lanai::RCA, 0, 0, Lanai::PC);
+ return X;
+}
+
+static MCSubtargetInfo *
+createLanaiMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
+ std::string CPUName = CPU;
+ if (CPUName.empty())
+ CPUName = "generic";
+
+ return createLanaiMCSubtargetInfoImpl(TT, CPUName, FS);
+}
+
+static MCStreamer *createMCStreamer(const Triple &T, MCContext &Context,
+ MCAsmBackend &MAB, raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter, bool RelaxAll) {
+ if (!T.isOSBinFormatELF())
+ llvm_unreachable("OS not supported");
+
+ return createELFStreamer(Context, MAB, OS, Emitter, RelaxAll);
+}
+
+static MCInstPrinter *createLanaiMCInstPrinter(const Triple & /*T*/,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI) {
+ if (SyntaxVariant == 0)
+ return new LanaiInstPrinter(MAI, MII, MRI);
+ return 0;
+}
+
+MCRelocationInfo *createLanaiElfRelocation(const Triple &TheTriple,
+ MCContext &Ctx) {
+ return createMCRelocationInfo(TheTriple, Ctx);
+}
+
+class LanaiMCInstrAnalysis : public MCInstrAnalysis {
+public:
+ explicit LanaiMCInstrAnalysis(const MCInstrInfo *Info)
+ : MCInstrAnalysis(Info) {}
+
+ bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
+ uint64_t &Target) const override {
+ if (Inst.getNumOperands() == 0)
+ return false;
+
+ if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType ==
+ MCOI::OPERAND_PCREL) {
+ int64_t Imm = Inst.getOperand(0).getImm();
+ Target = Addr + Size + Imm;
+ return true;
+ } else {
+ int64_t Imm = Inst.getOperand(0).getImm();
+
+ // Skip case where immediate is 0 as that occurs in file that isn't linked
+ // and the branch target inferred would be wrong.
+ if (Imm == 0)
+ return false;
+
+ Target = Imm;
+ return true;
+ }
+ }
+};
+
+static MCInstrAnalysis *createLanaiInstrAnalysis(const MCInstrInfo *Info) {
+ return new LanaiMCInstrAnalysis(Info);
+}
+
+extern "C" void LLVMInitializeLanaiTargetMC() {
+ // Register the MC asm info.
+ RegisterMCAsmInfo<LanaiMCAsmInfo> X(TheLanaiTarget);
+
+ // Register the MC instruction info.
+ TargetRegistry::RegisterMCInstrInfo(TheLanaiTarget, createLanaiMCInstrInfo);
+
+ // Register the MC register info.
+ TargetRegistry::RegisterMCRegInfo(TheLanaiTarget, createLanaiMCRegisterInfo);
+
+ // Register the MC subtarget info.
+ TargetRegistry::RegisterMCSubtargetInfo(TheLanaiTarget,
+ createLanaiMCSubtargetInfo);
+
+ // Register the MC code emitter
+ TargetRegistry::RegisterMCCodeEmitter(TheLanaiTarget,
+ llvm::createLanaiMCCodeEmitter);
+
+ // Register the ASM Backend
+ TargetRegistry::RegisterMCAsmBackend(TheLanaiTarget, createLanaiAsmBackend);
+
+ // Register the MCInstPrinter.
+ TargetRegistry::RegisterMCInstPrinter(TheLanaiTarget,
+ createLanaiMCInstPrinter);
+
+ // Register the ELF streamer.
+ TargetRegistry::RegisterELFStreamer(TheLanaiTarget, createMCStreamer);
+
+ // Register the MC relocation info.
+ TargetRegistry::RegisterMCRelocationInfo(TheLanaiTarget,
+ createLanaiElfRelocation);
+
+ // Register the MC instruction analyzer.
+ TargetRegistry::RegisterMCInstrAnalysis(TheLanaiTarget,
+ createLanaiInstrAnalysis);
+}
diff --git a/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.h b/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.h
new file mode 100644
index 000000000000..e117ed7a500f
--- /dev/null
+++ b/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.h
@@ -0,0 +1,59 @@
+//===-- LanaiMCTargetDesc.h - Lanai Target Descriptions ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Lanai specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIMCTARGETDESC_H
+#define LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIMCTARGETDESC_H
+
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCInstrInfo;
+class MCInstrAnalysis;
+class MCObjectWriter;
+class MCRelocationInfo;
+class MCSubtargetInfo;
+class Target;
+class Triple;
+class StringRef;
+class raw_pwrite_stream;
+
+extern Target TheLanaiTarget;
+
+MCCodeEmitter *createLanaiMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx);
+
+MCAsmBackend *createLanaiAsmBackend(const Target &T, const MCRegisterInfo &MRI,
+ const Triple &TheTriple, StringRef CPU);
+
+MCObjectWriter *createLanaiELFObjectWriter(raw_pwrite_stream &OS,
+ uint8_t OSABI);
+} // namespace llvm
+
+// Defines symbolic names for Lanai registers. This defines a mapping from
+// register name to register number.
+#define GET_REGINFO_ENUM
+#include "LanaiGenRegisterInfo.inc"
+
+// Defines symbolic names for the Lanai instructions.
+#define GET_INSTRINFO_ENUM
+#include "LanaiGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "LanaiGenSubtargetInfo.inc"
+
+#endif // LLVM_LIB_TARGET_LANAI_MCTARGETDESC_LANAIMCTARGETDESC_H
diff --git a/lib/Target/Lanai/TargetInfo/CMakeLists.txt b/lib/Target/Lanai/TargetInfo/CMakeLists.txt
new file mode 100644
index 000000000000..01611b54b2d8
--- /dev/null
+++ b/lib/Target/Lanai/TargetInfo/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMLanaiInfo
+ LanaiTargetInfo.cpp
+ )
diff --git a/lib/Target/CppBackend/TargetInfo/LLVMBuild.txt b/lib/Target/Lanai/TargetInfo/LLVMBuild.txt
index 9c186a52f4fa..9922ec36daae 100644
--- a/lib/Target/CppBackend/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/Lanai/TargetInfo/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/CppBackend/TargetInfo/LLVMBuild.txt ---------*- Conf -*--===;
+;===- ./lib/Target/Lanai/TargetInfo/LLVMBuild.txt --------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
@@ -17,7 +17,7 @@
[component_0]
type = Library
-name = CppBackendInfo
-parent = CppBackend
+name = LanaiInfo
+parent = Lanai
required_libraries = Support
-add_to_library_groups = CppBackend
+add_to_library_groups = Lanai
diff --git a/lib/Target/Lanai/TargetInfo/LanaiTargetInfo.cpp b/lib/Target/Lanai/TargetInfo/LanaiTargetInfo.cpp
new file mode 100644
index 000000000000..bd615d6ad3ac
--- /dev/null
+++ b/lib/Target/Lanai/TargetInfo/LanaiTargetInfo.cpp
@@ -0,0 +1,20 @@
+//===-- LanaiTargetInfo.cpp - Lanai Target Implementation -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Lanai.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+Target llvm::TheLanaiTarget;
+
+extern "C" void LLVMInitializeLanaiTargetInfo() {
+ RegisterTarget<Triple::lanai> X(TheLanaiTarget, "lanai", "Lanai");
+}
diff --git a/lib/Target/MSP430/InstPrinter/Makefile b/lib/Target/MSP430/InstPrinter/Makefile
deleted file mode 100644
index a5293ab8a234..000000000000
--- a/lib/Target/MSP430/InstPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/MSP430/AsmPrinter/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMSP430AsmPrinter
-
-# Hack: we need to include 'main' MSP430 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp b/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
index 807d1129b5fc..b3631caca952 100644
--- a/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
+++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
@@ -14,7 +14,6 @@
#include "MSP430MCTargetDesc.h"
#include "InstPrinter/MSP430InstPrinter.h"
#include "MSP430MCAsmInfo.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -48,15 +47,6 @@ createMSP430MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
return createMSP430MCSubtargetInfoImpl(TT, CPU, FS);
}
-static MCCodeGenInfo *createMSP430MCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
-}
-
static MCInstPrinter *createMSP430MCInstPrinter(const Triple &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -71,10 +61,6 @@ extern "C" void LLVMInitializeMSP430TargetMC() {
// Register the MC asm info.
RegisterMCAsmInfo<MSP430MCAsmInfo> X(TheMSP430Target);
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheMSP430Target,
- createMSP430MCCodeGenInfo);
-
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(TheMSP430Target, createMSP430MCInstrInfo);
diff --git a/lib/Target/MSP430/MCTargetDesc/Makefile b/lib/Target/MSP430/MCTargetDesc/Makefile
deleted file mode 100644
index bb857998eef9..000000000000
--- a/lib/Target/MSP430/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/MSP430/TargetDesc/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMSP430Desc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/MSP430/MSP430BranchSelector.cpp b/lib/Target/MSP430/MSP430BranchSelector.cpp
index 606abc250d98..511e5bcdec0d 100644
--- a/lib/Target/MSP430/MSP430BranchSelector.cpp
+++ b/lib/Target/MSP430/MSP430BranchSelector.cpp
@@ -39,6 +39,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "MSP430 Branch Selector";
}
@@ -62,16 +67,12 @@ bool MSP430BSel::runOnMachineFunction(MachineFunction &Fn) {
// Measure each MBB and compute a size for the entire function.
unsigned FuncSize = 0;
- for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
- ++MFI) {
- MachineBasicBlock *MBB = &*MFI;
-
+ for (MachineBasicBlock &MBB : Fn) {
unsigned BlockSize = 0;
- for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end();
- MBBI != EE; ++MBBI)
- BlockSize += TII->GetInstSizeInBytes(MBBI);
+ for (MachineInstr &MI : MBB)
+ BlockSize += TII->GetInstSizeInBytes(MI);
- BlockSizes[MBB->getNumber()] = BlockSize;
+ BlockSizes[MBB.getNumber()] = BlockSize;
FuncSize += BlockSize;
}
@@ -106,7 +107,7 @@ bool MSP430BSel::runOnMachineFunction(MachineFunction &Fn) {
I != E; ++I) {
if ((I->getOpcode() != MSP430::JCC || I->getOperand(0).isImm()) &&
I->getOpcode() != MSP430::JMP) {
- MBBStartOffset += TII->GetInstSizeInBytes(I);
+ MBBStartOffset += TII->GetInstSizeInBytes(*I);
continue;
}
@@ -140,8 +141,8 @@ bool MSP430BSel::runOnMachineFunction(MachineFunction &Fn) {
// Otherwise, we have to expand it to a long branch.
unsigned NewSize;
- MachineInstr *OldBranch = I;
- DebugLoc dl = OldBranch->getDebugLoc();
+ MachineInstr &OldBranch = *I;
+ DebugLoc dl = OldBranch.getDebugLoc();
if (I->getOpcode() == MSP430::JMP) {
NewSize = 4;
@@ -163,7 +164,7 @@ bool MSP430BSel::runOnMachineFunction(MachineFunction &Fn) {
I = BuildMI(MBB, I, dl, TII->get(MSP430::Bi)).addMBB(Dest);
// Remove the old branch from the function.
- OldBranch->eraseFromParent();
+ OldBranch.eraseFromParent();
// Remember that this instruction is NewSize bytes, increase the size of the
// block by NewSize-2, remember to iterate.
diff --git a/lib/Target/MSP430/MSP430FrameLowering.cpp b/lib/Target/MSP430/MSP430FrameLowering.cpp
index eb720809e47c..29555f99e23d 100644
--- a/lib/Target/MSP430/MSP430FrameLowering.cpp
+++ b/lib/Target/MSP430/MSP430FrameLowering.cpp
@@ -22,7 +22,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
@@ -224,9 +223,9 @@ MSP430FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
return true;
}
-void MSP430FrameLowering::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+MachineBasicBlock::iterator MSP430FrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
const MSP430InstrInfo &TII =
*static_cast<const MSP430InstrInfo *>(MF.getSubtarget().getInstrInfo());
unsigned StackAlign = getStackAlignment();
@@ -236,8 +235,8 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// adjcallstackup instruction into a 'sub SP, <amt>' and the
// adjcallstackdown instruction into 'add SP, <amt>'
// TODO: consider using push / pop instead of sub + store / add
- MachineInstr *Old = I;
- uint64_t Amount = Old->getOperand(0).getImm();
+ MachineInstr &Old = *I;
+ uint64_t Amount = Old.getOperand(0).getImm();
if (Amount != 0) {
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
@@ -245,19 +244,21 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
Amount = (Amount+StackAlign-1)/StackAlign*StackAlign;
MachineInstr *New = nullptr;
- if (Old->getOpcode() == TII.getCallFrameSetupOpcode()) {
- New = BuildMI(MF, Old->getDebugLoc(),
- TII.get(MSP430::SUB16ri), MSP430::SP)
- .addReg(MSP430::SP).addImm(Amount);
+ if (Old.getOpcode() == TII.getCallFrameSetupOpcode()) {
+ New =
+ BuildMI(MF, Old.getDebugLoc(), TII.get(MSP430::SUB16ri), MSP430::SP)
+ .addReg(MSP430::SP)
+ .addImm(Amount);
} else {
- assert(Old->getOpcode() == TII.getCallFrameDestroyOpcode());
+ assert(Old.getOpcode() == TII.getCallFrameDestroyOpcode());
// factor out the amount the callee already popped.
- uint64_t CalleeAmt = Old->getOperand(1).getImm();
+ uint64_t CalleeAmt = Old.getOperand(1).getImm();
Amount -= CalleeAmt;
if (Amount)
- New = BuildMI(MF, Old->getDebugLoc(),
- TII.get(MSP430::ADD16ri), MSP430::SP)
- .addReg(MSP430::SP).addImm(Amount);
+ New = BuildMI(MF, Old.getDebugLoc(), TII.get(MSP430::ADD16ri),
+ MSP430::SP)
+ .addReg(MSP430::SP)
+ .addImm(Amount);
}
if (New) {
@@ -272,10 +273,11 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// If we are performing frame pointer elimination and if the callee pops
// something off the stack pointer, add it back.
if (uint64_t CalleeAmt = I->getOperand(1).getImm()) {
- MachineInstr *Old = I;
+ MachineInstr &Old = *I;
MachineInstr *New =
- BuildMI(MF, Old->getDebugLoc(), TII.get(MSP430::SUB16ri),
- MSP430::SP).addReg(MSP430::SP).addImm(CalleeAmt);
+ BuildMI(MF, Old.getDebugLoc(), TII.get(MSP430::SUB16ri), MSP430::SP)
+ .addReg(MSP430::SP)
+ .addImm(CalleeAmt);
// The SRW implicit def is dead.
New->getOperand(3).setIsDead();
@@ -283,7 +285,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
}
}
- MBB.erase(I);
+ return MBB.erase(I);
}
void
diff --git a/lib/Target/MSP430/MSP430FrameLowering.h b/lib/Target/MSP430/MSP430FrameLowering.h
index 48c4dc866a63..f77de18b4d16 100644
--- a/lib/Target/MSP430/MSP430FrameLowering.h
+++ b/lib/Target/MSP430/MSP430FrameLowering.h
@@ -30,9 +30,9 @@ public:
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 8a01334ee2dd..69c609d04b5e 100644
--- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -24,7 +24,6 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -111,10 +110,10 @@ namespace {
#include "MSP430GenDAGISel.inc"
private:
- SDNode *Select(SDNode *N) override;
- SDNode *SelectIndexedLoad(SDNode *Op);
- SDNode *SelectIndexedBinOp(SDNode *Op, SDValue N1, SDValue N2,
- unsigned Opc8, unsigned Opc16);
+ void Select(SDNode *N) override;
+ bool tryIndexedLoad(SDNode *Op);
+ bool tryIndexedBinOp(SDNode *Op, SDValue N1, SDValue N2, unsigned Opc8,
+ unsigned Opc16);
bool SelectAddr(SDValue Addr, SDValue &Base, SDValue &Disp);
};
@@ -324,10 +323,10 @@ static bool isValidIndexedLoad(const LoadSDNode *LD) {
return true;
}
-SDNode *MSP430DAGToDAGISel::SelectIndexedLoad(SDNode *N) {
+bool MSP430DAGToDAGISel::tryIndexedLoad(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
if (!isValidIndexedLoad(LD))
- return nullptr;
+ return false;
MVT VT = LD->getMemoryVT().getSimpleVT();
@@ -340,23 +339,23 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedLoad(SDNode *N) {
Opcode = MSP430::MOV16rm_POST;
break;
default:
- return nullptr;
+ return false;
}
- return CurDAG->getMachineNode(Opcode, SDLoc(N),
- VT, MVT::i16, MVT::Other,
- LD->getBasePtr(), LD->getChain());
+ ReplaceNode(N,
+ CurDAG->getMachineNode(Opcode, SDLoc(N), VT, MVT::i16, MVT::Other,
+ LD->getBasePtr(), LD->getChain()));
+ return true;
}
-SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op,
- SDValue N1, SDValue N2,
- unsigned Opc8, unsigned Opc16) {
+bool MSP430DAGToDAGISel::tryIndexedBinOp(SDNode *Op, SDValue N1, SDValue N2,
+ unsigned Opc8, unsigned Opc16) {
if (N1.getOpcode() == ISD::LOAD &&
N1.hasOneUse() &&
IsLegalToFold(N1, Op, Op, OptLevel)) {
LoadSDNode *LD = cast<LoadSDNode>(N1);
if (!isValidIndexedLoad(LD))
- return nullptr;
+ return false;
MVT VT = LD->getMemoryVT().getSimpleVT();
unsigned Opc = (VT == MVT::i16 ? Opc16 : Opc8);
@@ -370,14 +369,14 @@ SDNode *MSP430DAGToDAGISel::SelectIndexedBinOp(SDNode *Op,
ReplaceUses(SDValue(N1.getNode(), 2), SDValue(ResNode, 2));
// Transfer writeback.
ReplaceUses(SDValue(N1.getNode(), 1), SDValue(ResNode, 1));
- return ResNode;
+ return true;
}
- return nullptr;
+ return false;
}
-SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) {
+void MSP430DAGToDAGISel::Select(SDNode *Node) {
SDLoc dl(Node);
// Dump information about the Node being selected
@@ -391,7 +390,7 @@ SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) {
Node->dump(CurDAG);
errs() << "\n");
Node->setNodeId(-1);
- return nullptr;
+ return;
}
// Few custom selection stuff.
@@ -401,89 +400,70 @@ SDNode *MSP430DAGToDAGISel::Select(SDNode *Node) {
assert(Node->getValueType(0) == MVT::i16);
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i16);
- if (Node->hasOneUse())
- return CurDAG->SelectNodeTo(Node, MSP430::ADD16ri, MVT::i16, TFI,
- CurDAG->getTargetConstant(0, dl, MVT::i16));
- return CurDAG->getMachineNode(MSP430::ADD16ri, dl, MVT::i16, TFI,
- CurDAG->getTargetConstant(0, dl, MVT::i16));
+ if (Node->hasOneUse()) {
+ CurDAG->SelectNodeTo(Node, MSP430::ADD16ri, MVT::i16, TFI,
+ CurDAG->getTargetConstant(0, dl, MVT::i16));
+ return;
+ }
+ ReplaceNode(Node, CurDAG->getMachineNode(
+ MSP430::ADD16ri, dl, MVT::i16, TFI,
+ CurDAG->getTargetConstant(0, dl, MVT::i16)));
+ return;
}
case ISD::LOAD:
- if (SDNode *ResNode = SelectIndexedLoad(Node))
- return ResNode;
+ if (tryIndexedLoad(Node))
+ return;
// Other cases are autogenerated.
break;
case ISD::ADD:
- if (SDNode *ResNode =
- SelectIndexedBinOp(Node,
- Node->getOperand(0), Node->getOperand(1),
- MSP430::ADD8rm_POST, MSP430::ADD16rm_POST))
- return ResNode;
- else if (SDNode *ResNode =
- SelectIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
- MSP430::ADD8rm_POST, MSP430::ADD16rm_POST))
- return ResNode;
+ if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
+ MSP430::ADD8rm_POST, MSP430::ADD16rm_POST))
+ return;
+ else if (tryIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
+ MSP430::ADD8rm_POST, MSP430::ADD16rm_POST))
+ return;
// Other cases are autogenerated.
break;
case ISD::SUB:
- if (SDNode *ResNode =
- SelectIndexedBinOp(Node,
- Node->getOperand(0), Node->getOperand(1),
- MSP430::SUB8rm_POST, MSP430::SUB16rm_POST))
- return ResNode;
+ if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
+ MSP430::SUB8rm_POST, MSP430::SUB16rm_POST))
+ return;
// Other cases are autogenerated.
break;
case ISD::AND:
- if (SDNode *ResNode =
- SelectIndexedBinOp(Node,
- Node->getOperand(0), Node->getOperand(1),
- MSP430::AND8rm_POST, MSP430::AND16rm_POST))
- return ResNode;
- else if (SDNode *ResNode =
- SelectIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
- MSP430::AND8rm_POST, MSP430::AND16rm_POST))
- return ResNode;
+ if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
+ MSP430::AND8rm_POST, MSP430::AND16rm_POST))
+ return;
+ else if (tryIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
+ MSP430::AND8rm_POST, MSP430::AND16rm_POST))
+ return;
// Other cases are autogenerated.
break;
case ISD::OR:
- if (SDNode *ResNode =
- SelectIndexedBinOp(Node,
- Node->getOperand(0), Node->getOperand(1),
- MSP430::OR8rm_POST, MSP430::OR16rm_POST))
- return ResNode;
- else if (SDNode *ResNode =
- SelectIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
- MSP430::OR8rm_POST, MSP430::OR16rm_POST))
- return ResNode;
+ if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
+ MSP430::OR8rm_POST, MSP430::OR16rm_POST))
+ return;
+ else if (tryIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
+ MSP430::OR8rm_POST, MSP430::OR16rm_POST))
+ return;
// Other cases are autogenerated.
break;
case ISD::XOR:
- if (SDNode *ResNode =
- SelectIndexedBinOp(Node,
- Node->getOperand(0), Node->getOperand(1),
- MSP430::XOR8rm_POST, MSP430::XOR16rm_POST))
- return ResNode;
- else if (SDNode *ResNode =
- SelectIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
- MSP430::XOR8rm_POST, MSP430::XOR16rm_POST))
- return ResNode;
+ if (tryIndexedBinOp(Node, Node->getOperand(0), Node->getOperand(1),
+ MSP430::XOR8rm_POST, MSP430::XOR16rm_POST))
+ return;
+ else if (tryIndexedBinOp(Node, Node->getOperand(1), Node->getOperand(0),
+ MSP430::XOR8rm_POST, MSP430::XOR16rm_POST))
+ return;
// Other cases are autogenerated.
break;
}
// Select the default instruction
- SDNode *ResNode = SelectCode(Node);
-
- DEBUG(errs() << "=> ");
- if (ResNode == nullptr || ResNode == Node)
- DEBUG(Node->dump(CurDAG));
- else
- DEBUG(ResNode->dump(CurDAG));
- DEBUG(errs() << "\n");
-
- return ResNode;
+ SelectCode(Node);
}
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index 18f38b7e90da..cb2c62029454 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -117,12 +117,8 @@ MSP430TargetLowering::MSP430TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::CTTZ, MVT::i8, Expand);
setOperationAction(ISD::CTTZ, MVT::i16, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i8, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
setOperationAction(ISD::CTLZ, MVT::i8, Expand);
setOperationAction(ISD::CTLZ, MVT::i16, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Expand);
setOperationAction(ISD::CTPOP, MVT::i8, Expand);
setOperationAction(ISD::CTPOP, MVT::i16, Expand);
@@ -362,16 +358,10 @@ static void AnalyzeReturnValues(CCState &State,
std::reverse(RVLocs.begin(), RVLocs.end());
}
-SDValue
-MSP430TargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- SDLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
+SDValue MSP430TargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
switch (CallConv) {
default:
@@ -418,16 +408,10 @@ MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
/// LowerCCCArguments - transform physical registers into virtual registers and
/// generate load operations for arguments places on the stack.
// FIXME: struct return stuff
-SDValue
-MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- SDLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
+SDValue MSP430TargetLowering::LowerCCCArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
@@ -455,7 +439,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
{
#ifndef NDEBUG
errs() << "LowerFormalArguments Unhandled argument type: "
- << RegVT.getSimpleVT().SimpleTy << "\n";
+ << RegVT.getEVTString() << "\n";
#endif
llvm_unreachable(nullptr);
}
@@ -506,8 +490,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, MVT::i16);
InVal = DAG.getLoad(
VA.getLocVT(), dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- false, false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
}
InVals.push_back(InVal);
@@ -518,11 +501,11 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
}
SDValue
-MSP430TargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@@ -570,16 +553,12 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
/// LowerCCCCallTo - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
// TODO: sret.
-SDValue
-MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
- const SmallVectorImpl<ISD::OutputArg>
- &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue MSP430TargetLowering::LowerCCCCallTo(
+ SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
@@ -645,8 +624,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
MachinePointerInfo(),
MachinePointerInfo());
} else {
- MemOp = DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo(),
- false, false, 0);
+ MemOp = DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
}
MemOpChains.push_back(MemOp);
@@ -708,12 +686,10 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
-SDValue
-MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue MSP430TargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@@ -808,8 +784,7 @@ SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op,
}
static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
- ISD::CondCode CC,
- SDLoc dl, SelectionDAG &DAG) {
+ ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG) {
// FIXME: Handle bittests someday
assert(!LHS.getValueType().isFloatingPoint() && "We don't handle FP yet");
@@ -1048,13 +1023,13 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
DAG.getConstant(DAG.getDataLayout().getPointerSize(), dl, MVT::i16);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo());
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo());
}
SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -1069,8 +1044,7 @@ SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
MSP430::FP, VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
- MachinePointerInfo(),
- false, false, false, 0);
+ MachinePointerInfo());
return FrameAddr;
}
@@ -1086,9 +1060,8 @@ SDValue MSP430TargetLowering::LowerVASTART(SDValue Op,
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
// Create a store of the frame index to the location operand
- return DAG.getStore(Op.getOperand(0), SDLoc(Op), FrameIndex,
- Op.getOperand(1), MachinePointerInfo(SV),
- false, false, 0);
+ return DAG.getStore(Op.getOperand(0), SDLoc(Op), FrameIndex, Op.getOperand(1),
+ MachinePointerInfo(SV));
}
SDValue MSP430TargetLowering::LowerJumpTable(SDValue Op,
@@ -1189,17 +1162,17 @@ bool MSP430TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
// Other Lowering Code
//===----------------------------------------------------------------------===//
-MachineBasicBlock*
-MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
+MachineBasicBlock *
+MSP430TargetLowering::EmitShiftInstr(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *F = BB->getParent();
MachineRegisterInfo &RI = F->getRegInfo();
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
const TargetInstrInfo &TII = *F->getSubtarget().getInstrInfo();
unsigned Opc;
const TargetRegisterClass * RC;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Invalid shift opcode!");
case MSP430::Shl8:
Opc = MSP430::SHL8r1;
@@ -1253,9 +1226,9 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
unsigned ShiftAmtReg2 = RI.createVirtualRegister(&MSP430::GR8RegClass);
unsigned ShiftReg = RI.createVirtualRegister(RC);
unsigned ShiftReg2 = RI.createVirtualRegister(RC);
- unsigned ShiftAmtSrcReg = MI->getOperand(2).getReg();
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned ShiftAmtSrcReg = MI.getOperand(2).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
// BB:
// cmp 0, N
@@ -1291,14 +1264,14 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
.addReg(SrcReg).addMBB(BB)
.addReg(ShiftReg2).addMBB(LoopBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return RemBB;
}
-MachineBasicBlock*
-MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+MachineBasicBlock *
+MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
- unsigned Opc = MI->getOpcode();
+ unsigned Opc = MI.getOpcode();
if (Opc == MSP430::Shl8 || Opc == MSP430::Shl16 ||
Opc == MSP430::Sra8 || Opc == MSP430::Sra16 ||
@@ -1306,7 +1279,7 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
return EmitShiftInstr(MI, BB);
const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
assert((Opc == MSP430::Select16 || Opc == MSP430::Select8) &&
"Unexpected instr type to insert");
@@ -1340,8 +1313,8 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB->addSuccessor(copy1MBB);
BuildMI(BB, dl, TII.get(MSP430::JCC))
- .addMBB(copy1MBB)
- .addImm(MI->getOperand(3).getImm());
+ .addMBB(copy1MBB)
+ .addImm(MI.getOperand(3).getImm());
// copy0MBB:
// %FalseValue = ...
@@ -1355,11 +1328,12 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = copy1MBB;
- BuildMI(*BB, BB->begin(), dl, TII.get(MSP430::PHI),
- MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
+ BuildMI(*BB, BB->begin(), dl, TII.get(MSP430::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(copy0MBB)
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(thisMBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h
index 2d63852c185b..8864807e999e 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/lib/Target/MSP430/MSP430ISelLowering.h
@@ -121,9 +121,10 @@ namespace llvm {
bool isZExtFree(EVT VT1, EVT VT2) const override;
bool isZExtFree(SDValue Val, EVT VT2) const override;
- MachineBasicBlock* EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const override;
- MachineBasicBlock* EmitShiftInstr(MachineInstr *MI,
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
+ MachineBasicBlock *EmitShiftInstr(MachineInstr &MI,
MachineBasicBlock *BB) const;
private:
@@ -133,38 +134,34 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue LowerCCCArguments(SDValue Chain,
- CallingConv::ID CallConv,
+ SDValue LowerCCCArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl,
- SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
- SDValue LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
+ const SDLoc &dl, SelectionDAG &DAG) const override;
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Base,
diff --git a/lib/Target/MSP430/MSP430InstrInfo.cpp b/lib/Target/MSP430/MSP430InstrInfo.cpp
index d4f82bda1ec9..c834da3a11cd 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -78,18 +78,20 @@ void MSP430InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
if (RC == &MSP430::GR16RegClass)
BuildMI(MBB, MI, DL, get(MSP430::MOV16rm))
- .addReg(DestReg).addFrameIndex(FrameIdx).addImm(0).addMemOperand(MMO);
+ .addReg(DestReg, getDefRegState(true)).addFrameIndex(FrameIdx)
+ .addImm(0).addMemOperand(MMO);
else if (RC == &MSP430::GR8RegClass)
BuildMI(MBB, MI, DL, get(MSP430::MOV8rm))
- .addReg(DestReg).addFrameIndex(FrameIdx).addImm(0).addMemOperand(MMO);
+ .addReg(DestReg, getDefRegState(true)).addFrameIndex(FrameIdx)
+ .addImm(0).addMemOperand(MMO);
else
llvm_unreachable("Cannot store this register to stack slot!");
}
void MSP430InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
unsigned Opc;
if (MSP430::GR16RegClass.contains(DestReg, SrcReg))
Opc = MSP430::MOV16rr;
@@ -156,18 +158,19 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return false;
}
-bool MSP430InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- if (!MI->isTerminator()) return false;
+bool MSP430InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
+ if (!MI.isTerminator())
+ return false;
// Conditional branch is a special case.
- if (MI->isBranch() && !MI->isBarrier())
+ if (MI.isBranch() && !MI.isBarrier())
return true;
- if (!MI->isPredicable())
+ if (!MI.isPredicable())
return true;
return !isPredicated(MI);
}
-bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool MSP430InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -182,7 +185,7 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Working from the bottom, when we see a non-terminator
// instruction, we're done.
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
break;
// A terminator that isn't a branch can't easily be handled
@@ -257,11 +260,11 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return false;
}
-unsigned
-MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+unsigned MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
@@ -290,8 +293,8 @@ MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
-unsigned MSP430InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- const MCInstrDesc &Desc = MI->getDesc();
+unsigned MSP430InstrInfo::GetInstSizeInBytes(const MachineInstr &MI) const {
+ const MCInstrDesc &Desc = MI.getDesc();
switch (Desc.TSFlags & MSP430II::SizeMask) {
default:
@@ -304,14 +307,14 @@ unsigned MSP430InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
case TargetOpcode::DBG_VALUE:
return 0;
case TargetOpcode::INLINEASM: {
- const MachineFunction *MF = MI->getParent()->getParent();
+ const MachineFunction *MF = MI.getParent()->getParent();
const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
- return TII.getInlineAsmLength(MI->getOperand(0).getSymbolName(),
+ return TII.getInlineAsmLength(MI.getOperand(0).getSymbolName(),
*MF->getTarget().getMCAsmInfo());
}
}
case MSP430II::SizeSpecial:
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unknown instruction size!");
case MSP430::SAR8r1c:
case MSP430::SAR16r1c:
diff --git a/lib/Target/MSP430/MSP430InstrInfo.h b/lib/Target/MSP430/MSP430InstrInfo.h
index 3cf3b1bb8ab2..46d4738d89af 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/lib/Target/MSP430/MSP430InstrInfo.h
@@ -52,9 +52,8 @@ public:
///
const TargetRegisterInfo &getRegisterInfo() const { return RI; }
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -69,22 +68,21 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
- unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+ unsigned GetInstSizeInBytes(const MachineInstr &MI) const;
// Branch folding goodness
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
- bool isUnpredicatedTerminator(const MachineInstr *MI) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
+ bool isUnpredicatedTerminator(const MachineInstr &MI) const override;
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
-
+ const DebugLoc &DL) const override;
};
}
diff --git a/lib/Target/MSP430/MSP430Subtarget.h b/lib/Target/MSP430/MSP430Subtarget.h
index ff2656d26dd2..1a00d85e01cb 100644
--- a/lib/Target/MSP430/MSP430Subtarget.h
+++ b/lib/Target/MSP430/MSP430Subtarget.h
@@ -18,8 +18,8 @@
#include "MSP430ISelLowering.h"
#include "MSP430InstrInfo.h"
#include "MSP430RegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -35,7 +35,7 @@ class MSP430Subtarget : public MSP430GenSubtargetInfo {
MSP430FrameLowering FrameLowering;
MSP430InstrInfo InstrInfo;
MSP430TargetLowering TLInfo;
- TargetSelectionDAGInfo TSInfo;
+ SelectionDAGTargetInfo TSInfo;
public:
/// This constructor initializes the data members to match that
@@ -60,7 +60,7 @@ public:
const MSP430TargetLowering *getTargetLowering() const override {
return &TLInfo;
}
- const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
};
diff --git a/lib/Target/MSP430/MSP430TargetMachine.cpp b/lib/Target/MSP430/MSP430TargetMachine.cpp
index 97a4047d1d63..b2e698ca5548 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -15,6 +15,7 @@
#include "MSP430.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/TargetRegistry.h"
@@ -25,13 +26,20 @@ extern "C" void LLVMInitializeMSP430Target() {
RegisterTargetMachine<MSP430TargetMachine> X(TheMSP430Target);
}
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::Static;
+ return *RM;
+}
+
MSP430TargetMachine::MSP430TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, "e-m:e-p:16:16-i32:16:32-a:16-n8:16", TT, CPU, FS,
- Options, RM, CM, OL),
+ Options, getEffectiveRelocModel(RM), CM, OL),
TLOF(make_unique<TargetLoweringObjectFileELF>()),
// FIXME: Check DataLayout string.
Subtarget(TT, CPU, FS, *this) {
diff --git a/lib/Target/MSP430/MSP430TargetMachine.h b/lib/Target/MSP430/MSP430TargetMachine.h
index 4f955a8049c7..de8f06e71dee 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.h
+++ b/lib/Target/MSP430/MSP430TargetMachine.h
@@ -30,7 +30,7 @@ class MSP430TargetMachine : public LLVMTargetMachine {
public:
MSP430TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
~MSP430TargetMachine() override;
diff --git a/lib/Target/MSP430/Makefile b/lib/Target/MSP430/Makefile
deleted file mode 100644
index 82216edd81e4..000000000000
--- a/lib/Target/MSP430/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- lib/Target/MSP430/Makefile --------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMMSP430CodeGen
-TARGET = MSP430
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = MSP430GenRegisterInfo.inc MSP430GenInstrInfo.inc \
- MSP430GenAsmWriter.inc \
- MSP430GenDAGISel.inc MSP430GenCallingConv.inc \
- MSP430GenSubtargetInfo.inc
-
-DIRS = InstPrinter TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
-
diff --git a/lib/Target/MSP430/TargetInfo/Makefile b/lib/Target/MSP430/TargetInfo/Makefile
deleted file mode 100644
index abb08f2548ee..000000000000
--- a/lib/Target/MSP430/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/MSP430/TargetInfo/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMSP430Info
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Makefile b/lib/Target/Makefile
deleted file mode 100644
index 50a360f1f868..000000000000
--- a/lib/Target/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/Target/Makefile ----------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-LIBRARYNAME = LLVMTarget
-BUILD_ARCHIVE = 1
-
-# We include this early so we can access the value of TARGETS_TO_BUILD as the
-# value for PARALLEL_DIRS which must be set before Makefile.rules is included
-include $(LEVEL)/Makefile.config
-
-PARALLEL_DIRS := $(TARGETS_TO_BUILD)
-
-include $(LLVM_SRC_ROOT)/Makefile.rules
diff --git a/lib/Target/Mips/AsmParser/Makefile b/lib/Target/Mips/AsmParser/Makefile
deleted file mode 100644
index 679acee9fe72..000000000000
--- a/lib/Target/Mips/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/Mips/AsmParser/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMipsAsmParser
-
-# Hack: we need to include 'main' mips target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index d4e061f00d3a..b51d0200b0b1 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -13,7 +13,6 @@
#include "MipsRegisterInfo.h"
#include "MipsTargetObjectFile.h"
#include "MipsTargetStreamer.h"
-#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCContext.h"
@@ -22,11 +21,13 @@
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
@@ -157,7 +158,6 @@ class MipsAsmParser : public MCTargetAsmParser {
OperandMatchResultTy parseImm(OperandVector &Operands);
OperandMatchResultTy parseJumpTarget(OperandVector &Operands);
OperandMatchResultTy parseInvNum(OperandVector &Operands);
- OperandMatchResultTy parseLSAImm(OperandVector &Operands);
OperandMatchResultTy parseRegisterPair(OperandVector &Operands);
OperandMatchResultTy parseMovePRegPair(OperandVector &Operands);
OperandMatchResultTy parseRegisterList(OperandVector &Operands);
@@ -173,74 +173,77 @@ class MipsAsmParser : public MCTargetAsmParser {
};
// Expands assembly pseudo instructions.
- MacroExpanderResultTy
- tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ MacroExpanderResultTy tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- bool expandJalWithRegs(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandJalWithRegs(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
bool loadImmediate(int64_t ImmValue, unsigned DstReg, unsigned SrcReg,
bool Is32BitImm, bool IsAddress, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ MCStreamer &Out, const MCSubtargetInfo *STI);
bool loadAndAddSymbolAddress(const MCExpr *SymExpr, unsigned DstReg,
unsigned SrcReg, bool Is32BitSym, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ MCStreamer &Out, const MCSubtargetInfo *STI);
bool expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ MCStreamer &Out, const MCSubtargetInfo *STI);
bool expandLoadAddress(unsigned DstReg, unsigned BaseReg,
const MCOperand &Offset, bool Is32BitAddress,
- SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions);
+ SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- bool expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- void expandMemInst(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions, bool isLoad,
- bool isImmOpnd);
+ void expandMemInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI, bool IsLoad, bool IsImmOpnd);
- bool expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ void expandLoadInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI, bool IsImmOpnd);
- bool expandAliasImmediate(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ void expandStoreInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI, bool IsImmOpnd);
- bool expandBranchImm(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- bool expandCondBranches(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandAliasImmediate(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- bool expandDiv(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions, const bool IsMips64,
- const bool Signed);
+ bool expandBranchImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- bool expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandCondBranches(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- bool expandUlw(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandDiv(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI, const bool IsMips64,
+ const bool Signed);
- bool expandRotation(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
- bool expandRotationImm(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
- bool expandDRotation(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
- bool expandDRotationImm(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandTrunc(MCInst &Inst, bool IsDouble, bool Is64FPU, SMLoc IDLoc,
+ MCStreamer &Out, const MCSubtargetInfo *STI);
- void createNop(bool hasShortDelaySlot, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- void createAddu(unsigned DstReg, unsigned SrcReg, unsigned TrgReg,
- bool Is64Bit, SmallVectorImpl<MCInst> &Instructions);
+ bool expandUlw(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
- void createCpRestoreMemOp(bool IsLoad, int StackOffset, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool expandRotation(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out, const MCSubtargetInfo *STI);
+ bool expandRotationImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandDRotation(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandDRotationImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+
+ bool expandAbs(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
bool reportParseError(Twine ErrorMsg);
bool reportParseError(SMLoc Loc, Twine ErrorMsg);
@@ -263,6 +266,7 @@ class MipsAsmParser : public MCTargetAsmParser {
bool parseDirectiveSet();
bool parseDirectiveOption();
bool parseInsnDirective();
+ bool parseSSectionDirective(StringRef Section, unsigned Type);
bool parseSetAtDirective();
bool parseSetNoAtDirective();
@@ -295,16 +299,12 @@ class MipsAsmParser : public MCTargetAsmParser {
bool parseInternalDirectiveReallowModule();
- MCSymbolRefExpr::VariantKind getVariantKind(StringRef Symbol);
-
bool eatComma(StringRef ErrorStr);
int matchCPURegisterName(StringRef Symbol);
int matchHWRegsRegisterName(StringRef Symbol);
- int matchRegisterByNumber(unsigned RegNum, unsigned RegClass);
-
int matchFPURegisterName(StringRef Name);
int matchFCCRegisterName(StringRef Name);
@@ -317,15 +317,13 @@ class MipsAsmParser : public MCTargetAsmParser {
unsigned getReg(int RC, int RegNo);
- unsigned getGPR(int RegNo);
-
/// Returns the internal register number for the current AT. Also checks if
/// the current AT is unavailable (set to $0) and gives an error if it is.
/// This should be used in pseudo-instruction expansions which need AT.
unsigned getATReg(SMLoc Loc);
- bool processInstruction(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ bool processInstruction(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
// Helper function that checks if the value of a vector index is within the
// boundaries of accepted values for each RegisterKind
@@ -395,6 +393,8 @@ class MipsAsmParser : public MCTargetAsmParser {
public:
enum MipsMatchResultTy {
Match_RequiresDifferentSrcAndDst = FIRST_TARGET_MATCH_RESULT_TY,
+ Match_RequiresDifferentOperands,
+ Match_RequiresNoZeroRegister,
#define GET_OPERAND_DIAGNOSTIC_TYPES
#include "MipsGenAsmMatcher.inc"
#undef GET_OPERAND_DIAGNOSTIC_TYPES
@@ -427,13 +427,12 @@ public:
CurrentFn = nullptr;
- IsPicEnabled =
- (getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_);
+ IsPicEnabled = getContext().getObjectFileInfo()->isPositionIndependent();
IsCpRestoreSet = false;
CpRestoreOffset = -1;
- Triple TheTriple(sti.getTargetTriple());
+ const Triple &TheTriple = sti.getTargetTriple();
if ((TheTriple.getArch() == Triple::mips) ||
(TheTriple.getArch() == Triple::mips64))
IsLittleEndian = false;
@@ -584,7 +583,6 @@ private:
enum KindTy {
k_Immediate, /// An immediate (possibly involving symbol references)
k_Memory, /// Base + Offset Memory Address
- k_PhysRegister, /// A physical register from the Mips namespace
k_RegisterIndex, /// A register index in one or more RegKind.
k_Token, /// A simple token
k_RegList, /// A physical register list
@@ -604,10 +602,6 @@ private:
unsigned Length;
};
- struct PhysRegOp {
- unsigned Num; /// Register Number
- };
-
struct RegIdxOp {
unsigned Index; /// Index into the register class
RegKind Kind; /// Bitfield of the kinds it could possibly be
@@ -629,7 +623,6 @@ private:
union {
struct Token Tok;
- struct PhysRegOp PhysReg;
struct RegIdxOp RegIdx;
struct ImmOp Imm;
struct MemOp Mem;
@@ -938,6 +931,34 @@ public:
Inst.addOperand(MCOperand::createImm(Imm));
}
+ template <unsigned Bits>
+ void addSImmOperands(MCInst &Inst, unsigned N) const {
+ if (isImm() && !isConstantImm()) {
+ addExpr(Inst, getImm());
+ return;
+ }
+ addConstantSImmOperands<Bits, 0, 0>(Inst, N);
+ }
+
+ template <unsigned Bits>
+ void addUImmOperands(MCInst &Inst, unsigned N) const {
+ if (isImm() && !isConstantImm()) {
+ addExpr(Inst, getImm());
+ return;
+ }
+ addConstantUImmOperands<Bits, 0, 0>(Inst, N);
+ }
+
+ template <unsigned Bits, int Offset = 0, int AdjustOffset = 0>
+ void addConstantSImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ int64_t Imm = getConstantImm() - Offset;
+ Imm = SignExtend64<Bits>(Imm);
+ Imm += Offset;
+ Imm += AdjustOffset;
+ Inst.addOperand(MCOperand::createImm(Imm));
+ }
+
void addImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCExpr *Expr = getImm();
@@ -973,9 +994,19 @@ public:
void addRegPairOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
+ assert((RegIdx.Kind & RegKind_GPR) && "Invalid access!");
unsigned RegNo = getRegPair();
- Inst.addOperand(MCOperand::createReg(RegNo++));
- Inst.addOperand(MCOperand::createReg(RegNo));
+ AsmParser.warnIfRegIndexIsAT(RegNo, StartLoc);
+ Inst.addOperand(MCOperand::createReg(
+ RegIdx.RegInfo->getRegClass(
+ AsmParser.getABI().AreGprs64bit()
+ ? Mips::GPR64RegClassID
+ : Mips::GPR32RegClassID).getRegister(RegNo++)));
+ Inst.addOperand(MCOperand::createReg(
+ RegIdx.RegInfo->getRegClass(
+ AsmParser.getABI().AreGprs64bit()
+ ? Mips::GPR64RegClassID
+ : Mips::GPR32RegClassID).getRegister(RegNo)));
}
void addMovePRegPairOperands(MCInst &Inst, unsigned N) const {
@@ -985,12 +1016,9 @@ public:
}
bool isReg() const override {
- // As a special case until we sort out the definition of div/divu, pretend
- // that $0/$zero are k_PhysRegister so that MCK_ZERO works correctly.
- if (isGPRAsmReg() && RegIdx.Index == 0)
- return true;
-
- return Kind == k_PhysRegister;
+ // As a special case until we sort out the definition of div/divu, accept
+ // $0/$zero here so that MCK_ZERO works correctly.
+ return isGPRAsmReg() && RegIdx.Index == 0;
}
bool isRegIdx() const { return Kind == k_RegisterIndex; }
bool isImm() const override { return Kind == k_Immediate; }
@@ -1003,8 +1031,23 @@ public:
template <unsigned Bits, int Offset = 0> bool isConstantUImm() const {
return isConstantImm() && isUInt<Bits>(getConstantImm() - Offset);
}
- template <unsigned Bits> bool isConstantSImm() const {
- return isConstantImm() && isInt<Bits>(getConstantImm());
+ template <unsigned Bits> bool isSImm() const {
+ return isConstantImm() ? isInt<Bits>(getConstantImm()) : isImm();
+ }
+ template <unsigned Bits> bool isUImm() const {
+ return isConstantImm() ? isUInt<Bits>(getConstantImm()) : isImm();
+ }
+ template <unsigned Bits> bool isAnyImm() const {
+ return isConstantImm() ? (isInt<Bits>(getConstantImm()) ||
+ isUInt<Bits>(getConstantImm()))
+ : isImm();
+ }
+ template <unsigned Bits, int Offset = 0> bool isConstantSImm() const {
+ return isConstantImm() && isInt<Bits>(getConstantImm() - Offset);
+ }
+ template <unsigned Bottom, unsigned Top> bool isConstantUImmRange() const {
+ return isConstantImm() && getConstantImm() >= Bottom &&
+ getConstantImm() <= Top;
}
bool isToken() const override {
// Note: It's not possible to pretend that other operand kinds are tokens.
@@ -1015,13 +1058,22 @@ public:
bool isConstantMemOff() const {
return isMem() && isa<MCConstantExpr>(getMemOff());
}
- template <unsigned Bits> bool isMemWithSimmOffset() const {
- return isMem() && isConstantMemOff() && isInt<Bits>(getConstantMemOff())
- && getMemBase()->isGPRAsmReg();
- }
- template <unsigned Bits> bool isMemWithSimmOffsetGPR() const {
- return isMem() && isConstantMemOff() && isInt<Bits>(getConstantMemOff()) &&
- getMemBase()->isGPRAsmReg();
+ // Allow relocation operators.
+ // FIXME: This predicate and others need to look through binary expressions
+ // and determine whether a Value is a constant or not.
+ template <unsigned Bits, unsigned ShiftAmount = 0>
+ bool isMemWithSimmOffset() const {
+ if (!isMem())
+ return false;
+ if (!getMemBase()->isGPRAsmReg())
+ return false;
+ if (isa<MCTargetExpr>(getMemOff()) ||
+ (isConstantMemOff() &&
+ isShiftedInt<Bits, ShiftAmount>(getConstantMemOff())))
+ return true;
+ MCValue Res;
+ bool IsReloc = getMemOff()->evaluateAsRelocatable(Res, nullptr, nullptr);
+ return IsReloc && isShiftedInt<Bits, ShiftAmount>(Res.getConstant());
}
bool isMemWithGRPMM16Base() const {
return isMem() && getMemBase()->isMM16AsmReg();
@@ -1035,11 +1087,21 @@ public:
&& (getConstantMemOff() % 4 == 0) && getMemBase()->isRegIdx()
&& (getMemBase()->getGPR32Reg() == Mips::SP);
}
+ template <unsigned Bits> bool isMemWithSimmWordAlignedOffsetGP() const {
+ return isMem() && isConstantMemOff() && isInt<Bits>(getConstantMemOff())
+ && (getConstantMemOff() % 4 == 0) && getMemBase()->isRegIdx()
+ && (getMemBase()->getGPR32Reg() == Mips::GP);
+ }
template <unsigned Bits, unsigned ShiftLeftAmount>
bool isScaledUImm() const {
return isConstantImm() &&
isShiftedUInt<Bits, ShiftLeftAmount>(getConstantImm());
}
+ template <unsigned Bits, unsigned ShiftLeftAmount>
+ bool isScaledSImm() const {
+ return isConstantImm() &&
+ isShiftedInt<Bits, ShiftLeftAmount>(getConstantImm());
+ }
bool isRegList16() const {
if (!isRegList())
return false;
@@ -1086,7 +1148,15 @@ public:
(R0 == Mips::A0 && R1 == Mips::S6) ||
(R0 == Mips::A0 && R1 == Mips::A1) ||
(R0 == Mips::A0 && R1 == Mips::A2) ||
- (R0 == Mips::A0 && R1 == Mips::A3))
+ (R0 == Mips::A0 && R1 == Mips::A3) ||
+ (R0 == Mips::A1_64 && R1 == Mips::A2_64) ||
+ (R0 == Mips::A1_64 && R1 == Mips::A3_64) ||
+ (R0 == Mips::A2_64 && R1 == Mips::A3_64) ||
+ (R0 == Mips::A0_64 && R1 == Mips::S5_64) ||
+ (R0 == Mips::A0_64 && R1 == Mips::S6_64) ||
+ (R0 == Mips::A0_64 && R1 == Mips::A1_64) ||
+ (R0 == Mips::A0_64 && R1 == Mips::A2_64) ||
+ (R0 == Mips::A0_64 && R1 == Mips::A3_64))
return true;
return false;
@@ -1096,17 +1166,19 @@ public:
assert(Kind == k_Token && "Invalid access!");
return StringRef(Tok.Data, Tok.Length);
}
- bool isRegPair() const { return Kind == k_RegPair; }
+ bool isRegPair() const {
+ return Kind == k_RegPair && RegIdx.Index <= 30;
+ }
unsigned getReg() const override {
- // As a special case until we sort out the definition of div/divu, pretend
- // that $0/$zero are k_PhysRegister so that MCK_ZERO works correctly.
+ // As a special case until we sort out the definition of div/divu, accept
+ // $0/$zero here so that MCK_ZERO works correctly.
if (Kind == k_RegisterIndex && RegIdx.Index == 0 &&
RegIdx.Kind & RegKind_GPR)
return getGPR32Reg(); // FIXME: GPR64 too
- assert(Kind == k_PhysRegister && "Invalid access!");
- return PhysReg.Num;
+ llvm_unreachable("Invalid access!");
+ return 0;
}
const MCExpr *getImm() const {
@@ -1250,10 +1322,13 @@ public:
return Op;
}
- static std::unique_ptr<MipsOperand>
- CreateRegPair(unsigned RegNo, SMLoc S, SMLoc E, MipsAsmParser &Parser) {
+ static std::unique_ptr<MipsOperand> CreateRegPair(const MipsOperand &MOP,
+ SMLoc S, SMLoc E,
+ MipsAsmParser &Parser) {
auto Op = make_unique<MipsOperand>(k_RegPair, Parser);
- Op->RegIdx.Index = RegNo;
+ Op->RegIdx.Index = MOP.RegIdx.Index;
+ Op->RegIdx.RegInfo = MOP.RegIdx.RegInfo;
+ Op->RegIdx.Kind = MOP.RegIdx.Kind;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
@@ -1331,7 +1406,6 @@ public:
break;
case k_RegList:
delete RegList.List;
- case k_PhysRegister:
case k_RegisterIndex:
case k_Token:
case k_RegPair:
@@ -1353,9 +1427,6 @@ public:
OS << *Mem.Off;
OS << ">";
break;
- case k_PhysRegister:
- OS << "PhysReg<" << PhysReg.Num << ">";
- break;
case k_RegisterIndex:
OS << "RegIdx<" << RegIdx.Index << ":" << RegIdx.Kind << ">";
break;
@@ -1434,83 +1505,10 @@ static unsigned countMCSymbolRefExpr(const MCExpr *Expr) {
return 0;
}
-namespace {
-void emitRX(unsigned Opcode, unsigned Reg0, MCOperand Op1, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- MCInst tmpInst;
- tmpInst.setOpcode(Opcode);
- tmpInst.addOperand(MCOperand::createReg(Reg0));
- tmpInst.addOperand(Op1);
- tmpInst.setLoc(IDLoc);
- Instructions.push_back(tmpInst);
-}
-
-void emitRI(unsigned Opcode, unsigned Reg0, int32_t Imm, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- emitRX(Opcode, Reg0, MCOperand::createImm(Imm), IDLoc, Instructions);
-}
-
-void emitRR(unsigned Opcode, unsigned Reg0, unsigned Reg1, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- emitRX(Opcode, Reg0, MCOperand::createReg(Reg1), IDLoc, Instructions);
-}
-
-void emitII(unsigned Opcode, int16_t Imm1, int16_t Imm2, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- MCInst tmpInst;
- tmpInst.setOpcode(Opcode);
- tmpInst.addOperand(MCOperand::createImm(Imm1));
- tmpInst.addOperand(MCOperand::createImm(Imm2));
- tmpInst.setLoc(IDLoc);
- Instructions.push_back(tmpInst);
-}
-
-void emitR(unsigned Opcode, unsigned Reg0, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- MCInst tmpInst;
- tmpInst.setOpcode(Opcode);
- tmpInst.addOperand(MCOperand::createReg(Reg0));
- tmpInst.setLoc(IDLoc);
- Instructions.push_back(tmpInst);
-}
-
-void emitRRX(unsigned Opcode, unsigned Reg0, unsigned Reg1, MCOperand Op2,
- SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
- MCInst tmpInst;
- tmpInst.setOpcode(Opcode);
- tmpInst.addOperand(MCOperand::createReg(Reg0));
- tmpInst.addOperand(MCOperand::createReg(Reg1));
- tmpInst.addOperand(Op2);
- tmpInst.setLoc(IDLoc);
- Instructions.push_back(tmpInst);
-}
-
-void emitRRR(unsigned Opcode, unsigned Reg0, unsigned Reg1, unsigned Reg2,
- SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
- emitRRX(Opcode, Reg0, Reg1, MCOperand::createReg(Reg2), IDLoc,
- Instructions);
-}
-
-void emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm,
- SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
- emitRRX(Opcode, Reg0, Reg1, MCOperand::createImm(Imm), IDLoc,
- Instructions);
-}
-
-void emitAppropriateDSLL(unsigned DstReg, unsigned SrcReg, int16_t ShiftAmount,
- SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
- if (ShiftAmount >= 32) {
- emitRRI(Mips::DSLL32, DstReg, SrcReg, ShiftAmount - 32, IDLoc,
- Instructions);
- return;
- }
-
- emitRRI(Mips::DSLL, DstReg, SrcReg, ShiftAmount, IDLoc, Instructions);
-}
-} // end anonymous namespace.
-
bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
bool ExpandedJalSym = false;
@@ -1560,6 +1558,10 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
case Mips::BLTZAL_MM:
case Mips::BC1F_MM:
case Mips::BC1T_MM:
+ case Mips::BC1EQZC_MMR6:
+ case Mips::BC1NEZC_MMR6:
+ case Mips::BC2EQZC_MMR6:
+ case Mips::BC2NEZC_MMR6:
assert(MCID.getNumOperands() == 2 && "unexpected number of operands");
Offset = Inst.getOperand(1);
if (!Offset.isImm())
@@ -1650,9 +1652,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
return Error(IDLoc, "jal doesn't support multiple symbols in PIC mode");
// FIXME: This is checking the expression can be handled by the later stages
- // of the assembler. We ought to leave it to those later stages but
- // we can't do that until we stop evaluateRelocExpr() rewriting the
- // expressions into non-equivalent forms.
+ // of the assembler. We ought to leave it to those later stages.
const MCSymbol *JalSym = getSingleMCSymbol(JalExpr);
// FIXME: Add support for label+offset operands (currently causes an error).
@@ -1666,33 +1666,38 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
// addiu $25, $25, 0
// R_(MICRO)MIPS_LO16 label
// jalr $25
- const MCExpr *Got16RelocExpr = evaluateRelocExpr(JalExpr, "got");
- const MCExpr *Lo16RelocExpr = evaluateRelocExpr(JalExpr, "lo");
-
- emitRRX(Mips::LW, Mips::T9, Mips::GP,
- MCOperand::createExpr(Got16RelocExpr), IDLoc, Instructions);
- emitRRX(Mips::ADDiu, Mips::T9, Mips::T9,
- MCOperand::createExpr(Lo16RelocExpr), IDLoc, Instructions);
+ const MCExpr *Got16RelocExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_GOT, JalExpr, getContext());
+ const MCExpr *Lo16RelocExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, JalExpr, getContext());
+
+ TOut.emitRRX(Mips::LW, Mips::T9, Mips::GP,
+ MCOperand::createExpr(Got16RelocExpr), IDLoc, STI);
+ TOut.emitRRX(Mips::ADDiu, Mips::T9, Mips::T9,
+ MCOperand::createExpr(Lo16RelocExpr), IDLoc, STI);
} else if (isABI_N32() || isABI_N64()) {
// If it's a local symbol and the N32/N64 ABIs are being used,
// we expand to:
// lw/ld $25, 0($gp)
// R_(MICRO)MIPS_GOT_DISP label
// jalr $25
- const MCExpr *GotDispRelocExpr = evaluateRelocExpr(JalExpr, "got_disp");
+ const MCExpr *GotDispRelocExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_GOT_DISP, JalExpr, getContext());
- emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9, Mips::GP,
- MCOperand::createExpr(GotDispRelocExpr), IDLoc, Instructions);
+ TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9,
+ Mips::GP, MCOperand::createExpr(GotDispRelocExpr), IDLoc,
+ STI);
}
} else {
// If it's an external/weak symbol, we expand to:
// lw/ld $25, 0($gp)
// R_(MICRO)MIPS_CALL16 label
// jalr $25
- const MCExpr *Call16RelocExpr = evaluateRelocExpr(JalExpr, "call16");
+ const MCExpr *Call16RelocExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, JalExpr, getContext());
- emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9, Mips::GP,
- MCOperand::createExpr(Call16RelocExpr), IDLoc, Instructions);
+ TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9, Mips::GP,
+ MCOperand::createExpr(Call16RelocExpr), IDLoc, STI);
}
MCInst JalrInst;
@@ -1723,7 +1728,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
int MemOffset = Op.getImm();
if (MemOffset < -32768 || MemOffset > 32767) {
// Offset can't exceed 16bit value.
- expandMemInst(Inst, IDLoc, Instructions, MCID.mayLoad(), true);
+ expandMemInst(Inst, IDLoc, Out, STI, MCID.mayLoad(), true);
return false;
}
} else if (Op.isExpr()) {
@@ -1733,11 +1738,11 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
static_cast<const MCSymbolRefExpr *>(Expr);
if (SR->getKind() == MCSymbolRefExpr::VK_None) {
// Expand symbol.
- expandMemInst(Inst, IDLoc, Instructions, MCID.mayLoad(), false);
+ expandMemInst(Inst, IDLoc, Out, STI, MCID.mayLoad(), false);
return false;
}
} else if (!isEvaluated(Expr)) {
- expandMemInst(Inst, IDLoc, Instructions, MCID.mayLoad(), false);
+ expandMemInst(Inst, IDLoc, Out, STI, MCID.mayLoad(), false);
return false;
}
}
@@ -1763,8 +1768,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
(BaseReg.getReg() == Mips::GP ||
BaseReg.getReg() == Mips::GP_64)) {
- emitRRI(Mips::LWGP_MM, DstReg.getReg(), Mips::GP, MemOffset,
- IDLoc, Instructions);
+ TOut.emitRRI(Mips::LWGP_MM, DstReg.getReg(), Mips::GP, MemOffset,
+ IDLoc, STI);
return false;
}
}
@@ -1780,14 +1785,6 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
switch (Inst.getOpcode()) {
default:
break;
- case Mips::ADDIUS5_MM:
- Opnd = Inst.getOperand(2);
- if (!Opnd.isImm())
- return Error(IDLoc, "expected immediate operand kind");
- Imm = Opnd.getImm();
- if (Imm < -8 || Imm > 7)
- return Error(IDLoc, "immediate operand value out of range");
- break;
case Mips::ADDIUSP_MM:
Opnd = Inst.getOperand(0);
if (!Opnd.isImm())
@@ -1823,16 +1820,6 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
((Imm % 4 == 0) && Imm < 28 && Imm > 0)))
return Error(IDLoc, "immediate operand value out of range");
break;
- case Mips::ADDIUR1SP_MM:
- Opnd = Inst.getOperand(1);
- if (!Opnd.isImm())
- return Error(IDLoc, "expected immediate operand kind");
- Imm = Opnd.getImm();
- if (OffsetToAlignment(Imm, 4LL))
- return Error(IDLoc, "misaligned immediate operand value");
- if (Imm < 0 || Imm > 255)
- return Error(IDLoc, "immediate operand value out of range");
- break;
case Mips::ANDI16_MM:
Opnd = Inst.getOperand(2);
if (!Opnd.isImm())
@@ -1851,12 +1838,6 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
if (Imm < -1 || Imm > 14)
return Error(IDLoc, "immediate operand value out of range");
break;
- case Mips::TEQ_MM:
- case Mips::TGE_MM:
- case Mips::TGEU_MM:
- case Mips::TLT_MM:
- case Mips::TLTU_MM:
- case Mips::TNE_MM:
case Mips::SB16_MM:
case Mips::SB16_MMR6:
Opnd = Inst.getOperand(2);
@@ -1897,11 +1878,16 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
}
}
+ bool FillDelaySlot =
+ MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder();
+ if (FillDelaySlot)
+ TOut.emitDirectiveSetNoReorder();
+
MacroExpanderResultTy ExpandResult =
- tryExpandInstruction(Inst, IDLoc, Instructions);
+ tryExpandInstruction(Inst, IDLoc, Out, STI);
switch (ExpandResult) {
case MER_NotAMacro:
- Instructions.push_back(Inst);
+ Out.EmitInstruction(Inst, *STI);
break;
case MER_Success:
break;
@@ -1909,10 +1895,17 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
return true;
}
+ // We know we emitted an instruction on the MER_NotAMacro or MER_Success path.
+ // If we're in microMIPS mode then we must also set EF_MIPS_MICROMIPS.
+ if (inMicroMipsMode())
+ TOut.setUsesMicroMips();
+
// If this instruction has a delay slot and .set reorder is active,
// emit a NOP after it.
- if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder())
- createNop(hasShortDelaySlot(Inst.getOpcode()), IDLoc, Instructions);
+ if (FillDelaySlot) {
+ TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst.getOpcode()), IDLoc, STI);
+ TOut.emitDirectiveSetReorder();
+ }
if ((Inst.getOpcode() == Mips::JalOneReg ||
Inst.getOpcode() == Mips::JalTwoReg || ExpandedJalSym) &&
@@ -1922,16 +1915,11 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
// If .set reorder has been used, we've already emitted a NOP.
// If .set noreorder has been used, we need to emit a NOP at this point.
if (!AssemblerOptions.back()->isReorder())
- createNop(hasShortDelaySlot(Inst.getOpcode()), IDLoc, Instructions);
+ TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst.getOpcode()), IDLoc,
+ STI);
// Load the $gp from the stack.
- SmallVector<MCInst, 3> LoadInsts;
- createCpRestoreMemOp(true /*IsLoad*/, CpRestoreOffset /*StackOffset*/,
- IDLoc, LoadInsts);
-
- for (const MCInst &Inst : LoadInsts)
- Instructions.push_back(Inst);
-
+ TOut.emitGPRestore(CpRestoreOffset, IDLoc, STI);
} else
Warning(IDLoc, "no .cprestore used in PIC mode");
}
@@ -1940,17 +1928,15 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
}
MipsAsmParser::MacroExpanderResultTy
-MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
switch (Inst.getOpcode()) {
default:
return MER_NotAMacro;
case Mips::LoadImm32:
- return expandLoadImm(Inst, true, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandLoadImm(Inst, true, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::LoadImm64:
- return expandLoadImm(Inst, false, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandLoadImm(Inst, false, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::LoadAddrImm32:
case Mips::LoadAddrImm64:
assert(Inst.getOperand(0).isReg() && "expected register operand kind");
@@ -1960,7 +1946,7 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
return expandLoadAddress(Inst.getOperand(0).getReg(), Mips::NoRegister,
Inst.getOperand(1),
Inst.getOpcode() == Mips::LoadAddrImm32, IDLoc,
- Instructions)
+ Out, STI)
? MER_Fail
: MER_Success;
case Mips::LoadAddrReg32:
@@ -1973,24 +1959,23 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
return expandLoadAddress(Inst.getOperand(0).getReg(),
Inst.getOperand(1).getReg(), Inst.getOperand(2),
Inst.getOpcode() == Mips::LoadAddrReg32, IDLoc,
- Instructions)
+ Out, STI)
? MER_Fail
: MER_Success;
case Mips::B_MM_Pseudo:
case Mips::B_MMR6_Pseudo:
- return expandUncondBranchMMPseudo(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandUncondBranchMMPseudo(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::SWM_MM:
case Mips::LWM_MM:
- return expandLoadStoreMultiple(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandLoadStoreMultiple(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::JalOneReg:
case Mips::JalTwoReg:
- return expandJalWithRegs(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandJalWithRegs(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::BneImm:
case Mips::BeqImm:
- return expandBranchImm(Inst, IDLoc, Instructions) ? MER_Fail : MER_Success;
+ return expandBranchImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::BLT:
case Mips::BLE:
case Mips::BGE:
@@ -2023,29 +2008,36 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
case Mips::BLEULImmMacro:
case Mips::BGEULImmMacro:
case Mips::BGTULImmMacro:
- return expandCondBranches(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandCondBranches(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SDivMacro:
- return expandDiv(Inst, IDLoc, Instructions, false, true) ? MER_Fail
- : MER_Success;
+ return expandDiv(Inst, IDLoc, Out, STI, false, true) ? MER_Fail
+ : MER_Success;
case Mips::DSDivMacro:
- return expandDiv(Inst, IDLoc, Instructions, true, true) ? MER_Fail
- : MER_Success;
+ return expandDiv(Inst, IDLoc, Out, STI, true, true) ? MER_Fail
+ : MER_Success;
case Mips::UDivMacro:
- return expandDiv(Inst, IDLoc, Instructions, false, false) ? MER_Fail
- : MER_Success;
+ return expandDiv(Inst, IDLoc, Out, STI, false, false) ? MER_Fail
+ : MER_Success;
case Mips::DUDivMacro:
- return expandDiv(Inst, IDLoc, Instructions, true, false) ? MER_Fail
- : MER_Success;
+ return expandDiv(Inst, IDLoc, Out, STI, true, false) ? MER_Fail
+ : MER_Success;
+ case Mips::PseudoTRUNC_W_S:
+ return expandTrunc(Inst, false, false, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
+ case Mips::PseudoTRUNC_W_D32:
+ return expandTrunc(Inst, true, false, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
+ case Mips::PseudoTRUNC_W_D:
+ return expandTrunc(Inst, true, true, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
case Mips::Ulh:
- return expandUlh(Inst, true, IDLoc, Instructions) ? MER_Fail : MER_Success;
+ return expandUlh(Inst, true, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::Ulhu:
- return expandUlh(Inst, false, IDLoc, Instructions) ? MER_Fail : MER_Success;
+ return expandUlh(Inst, false, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::Ulw:
- return expandUlw(Inst, IDLoc, Instructions) ? MER_Fail : MER_Success;
+ return expandUlw(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::NORImm:
- return expandAliasImmediate(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::ADDi:
case Mips::ADDiu:
case Mips::SLTi:
@@ -2055,8 +2047,8 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
int64_t ImmValue = Inst.getOperand(2).getImm();
if (isInt<16>(ImmValue))
return MER_NotAMacro;
- return expandAliasImmediate(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
}
return MER_NotAMacro;
case Mips::ANDi:
@@ -2067,31 +2059,32 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLoc IDLoc,
int64_t ImmValue = Inst.getOperand(2).getImm();
if (isUInt<16>(ImmValue))
return MER_NotAMacro;
- return expandAliasImmediate(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail
+ : MER_Success;
}
return MER_NotAMacro;
case Mips::ROL:
case Mips::ROR:
- return expandRotation(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandRotation(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::ROLImm:
case Mips::RORImm:
- return expandRotationImm(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandRotationImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::DROL:
case Mips::DROR:
- return expandDRotation(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandDRotation(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::DROLImm:
case Mips::DRORImm:
- return expandDRotationImm(Inst, IDLoc, Instructions) ? MER_Fail
- : MER_Success;
+ return expandDRotationImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
+ case Mips::ABSMacro:
+ return expandAbs(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
}
}
bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+
// Create a JALR instruction which is going to replace the pseudo-JAL.
MCInst JalrInst;
JalrInst.setLoc(IDLoc);
@@ -2121,14 +2114,14 @@ bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc,
const MCOperand SecondRegOp = Inst.getOperand(1);
JalrInst.addOperand(SecondRegOp);
}
- Instructions.push_back(JalrInst);
+ Out.EmitInstruction(JalrInst, *STI);
// If .set reorder is active and branch instruction has a delay slot,
// emit a NOP after it.
const MCInstrDesc &MCID = getInstDesc(JalrInst.getOpcode());
- if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder()) {
- createNop(hasShortDelaySlot(JalrInst.getOpcode()), IDLoc, Instructions);
- }
+ if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder())
+ TOut.emitEmptyDelaySlot(hasShortDelaySlot(JalrInst.getOpcode()), IDLoc,
+ STI);
return false;
}
@@ -2150,11 +2143,12 @@ template <unsigned N> static bool isShiftedUIntAtAnyPosition(uint64_t x) {
/// @param IsAddress True if the immediate represents an address. False if it
/// is an integer.
/// @param IDLoc Location of the immediate in the source file.
-/// @param Instructions The instructions emitted by this expansion.
bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
unsigned SrcReg, bool Is32BitImm,
- bool IsAddress, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ bool IsAddress, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+
if (!Is32BitImm && !isGP64bit()) {
Error(IDLoc, "instruction requires a 64-bit architecture");
return true;
@@ -2180,7 +2174,8 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
UseSrcReg = true;
unsigned TmpReg = DstReg;
- if (UseSrcReg && (DstReg == SrcReg)) {
+ if (UseSrcReg &&
+ getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg, SrcReg)) {
// At this point we need AT to perform the expansions and we exit if it is
// not available.
unsigned ATReg = getATReg(IDLoc);
@@ -2197,11 +2192,11 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
// traditional assembler behaviour. N32 would normally use addiu for both
// integers and addresses.
if (IsAddress && !Is32BitImm) {
- emitRRI(Mips::DADDiu, DstReg, SrcReg, ImmValue, IDLoc, Instructions);
+ TOut.emitRRI(Mips::DADDiu, DstReg, SrcReg, ImmValue, IDLoc, STI);
return false;
}
- emitRRI(Mips::ADDiu, DstReg, SrcReg, ImmValue, IDLoc, Instructions);
+ TOut.emitRRI(Mips::ADDiu, DstReg, SrcReg, ImmValue, IDLoc, STI);
return false;
}
@@ -2213,9 +2208,9 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
return true;
}
- emitRRI(Mips::ORi, TmpReg, ZeroReg, ImmValue, IDLoc, Instructions);
+ TOut.emitRRI(Mips::ORi, TmpReg, ZeroReg, ImmValue, IDLoc, STI);
if (UseSrcReg)
- emitRRR(ABI.GetPtrAdduOp(), DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(ABI.GetPtrAdduOp(), DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
@@ -2229,29 +2224,29 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
// Traditional behaviour seems to special case this particular value. It's
// not clear why other masks are handled differently.
if (ImmValue == 0xffffffff) {
- emitRI(Mips::LUi, TmpReg, 0xffff, IDLoc, Instructions);
- emitRRI(Mips::DSRL32, TmpReg, TmpReg, 0, IDLoc, Instructions);
+ TOut.emitRI(Mips::LUi, TmpReg, 0xffff, IDLoc, STI);
+ TOut.emitRRI(Mips::DSRL32, TmpReg, TmpReg, 0, IDLoc, STI);
if (UseSrcReg)
- emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
// Expand to an ORi instead of a LUi to avoid sign-extending into the
// upper 32 bits.
- emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits31To16, IDLoc, Instructions);
- emitRRI(Mips::DSLL, TmpReg, TmpReg, 16, IDLoc, Instructions);
+ TOut.emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits31To16, IDLoc, STI);
+ TOut.emitRRI(Mips::DSLL, TmpReg, TmpReg, 16, IDLoc, STI);
if (Bits15To0)
- emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, Instructions);
+ TOut.emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, STI);
if (UseSrcReg)
- emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
- emitRI(Mips::LUi, TmpReg, Bits31To16, IDLoc, Instructions);
+ TOut.emitRI(Mips::LUi, TmpReg, Bits31To16, IDLoc, STI);
if (Bits15To0)
- emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, Instructions);
+ TOut.emitRRI(Mips::ORi, TmpReg, TmpReg, Bits15To0, IDLoc, STI);
if (UseSrcReg)
- emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
@@ -2267,11 +2262,11 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
unsigned LastSet = findLastSet((uint64_t)ImmValue);
unsigned ShiftAmount = FirstSet - (15 - (LastSet - FirstSet));
uint16_t Bits = (ImmValue >> ShiftAmount) & 0xffff;
- emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits, IDLoc, Instructions);
- emitRRI(Mips::DSLL, TmpReg, TmpReg, ShiftAmount, IDLoc, Instructions);
+ TOut.emitRRI(Mips::ORi, TmpReg, ZeroReg, Bits, IDLoc, STI);
+ TOut.emitRRI(Mips::DSLL, TmpReg, TmpReg, ShiftAmount, IDLoc, STI);
if (UseSrcReg)
- emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
@@ -2284,7 +2279,7 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
// Load bits 32-63 of ImmValue into bits 0-31 of the temporary register.
if (loadImmediate(ImmValue >> 32, TmpReg, Mips::NoRegister, true, false,
- IDLoc, Instructions))
+ IDLoc, Out, STI))
return false;
// Shift and accumulate into the register. If a 16-bit chunk is zero, then
@@ -2294,9 +2289,8 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
uint16_t ImmChunk = (ImmValue >> BitNum) & 0xffff;
if (ImmChunk != 0) {
- emitAppropriateDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc,
- Instructions);
- emitRRI(Mips::ORi, TmpReg, TmpReg, ImmChunk, IDLoc, Instructions);
+ TOut.emitDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc, STI);
+ TOut.emitRRI(Mips::ORi, TmpReg, TmpReg, ImmChunk, IDLoc, STI);
ShiftCarriedForwards = 0;
}
@@ -2306,24 +2300,23 @@ bool MipsAsmParser::loadImmediate(int64_t ImmValue, unsigned DstReg,
// Finish any remaining shifts left by trailing zeros.
if (ShiftCarriedForwards)
- emitAppropriateDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc,
- Instructions);
+ TOut.emitDSLL(TmpReg, TmpReg, ShiftCarriedForwards, IDLoc, STI);
if (UseSrcReg)
- emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(AdduOp, DstReg, TmpReg, SrcReg, IDLoc, STI);
return false;
}
bool MipsAsmParser::expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ MCStreamer &Out, const MCSubtargetInfo *STI) {
const MCOperand &ImmOp = Inst.getOperand(1);
assert(ImmOp.isImm() && "expected immediate operand kind");
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), Mips::NoRegister,
- Is32BitImm, false, IDLoc, Instructions))
+ Is32BitImm, false, IDLoc, Out, STI))
return true;
return false;
@@ -2332,7 +2325,8 @@ bool MipsAsmParser::expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
bool MipsAsmParser::expandLoadAddress(unsigned DstReg, unsigned BaseReg,
const MCOperand &Offset,
bool Is32BitAddress, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
// la can't produce a usable address when addresses are 64-bit.
if (Is32BitAddress && ABI.ArePtrs64bit()) {
// FIXME: Demote this to a warning and continue as if we had 'dla' instead.
@@ -2344,31 +2338,109 @@ bool MipsAsmParser::expandLoadAddress(unsigned DstReg, unsigned BaseReg,
}
// dla requires 64-bit addresses.
- if (!Is32BitAddress && !ABI.ArePtrs64bit()) {
+ if (!Is32BitAddress && !hasMips3()) {
Error(IDLoc, "instruction requires a 64-bit architecture");
return true;
}
if (!Offset.isImm())
return loadAndAddSymbolAddress(Offset.getExpr(), DstReg, BaseReg,
- Is32BitAddress, IDLoc, Instructions);
+ Is32BitAddress, IDLoc, Out, STI);
+
+ if (!ABI.ArePtrs64bit()) {
+ // Continue as if we had 'la' whether we had 'la' or 'dla'.
+ Is32BitAddress = true;
+ }
return loadImmediate(Offset.getImm(), DstReg, BaseReg, Is32BitAddress, true,
- IDLoc, Instructions);
+ IDLoc, Out, STI);
}
-bool MipsAsmParser::loadAndAddSymbolAddress(
- const MCExpr *SymExpr, unsigned DstReg, unsigned SrcReg, bool Is32BitSym,
- SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
+bool MipsAsmParser::loadAndAddSymbolAddress(const MCExpr *SymExpr,
+ unsigned DstReg, unsigned SrcReg,
+ bool Is32BitSym, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ bool UseSrcReg = SrcReg != Mips::NoRegister;
warnIfNoMacro(IDLoc);
- const MCExpr *Symbol = cast<MCExpr>(SymExpr);
- const MipsMCExpr *HiExpr = MipsMCExpr::create(
- MCSymbolRefExpr::VK_Mips_ABS_HI, Symbol, getContext());
- const MipsMCExpr *LoExpr = MipsMCExpr::create(
- MCSymbolRefExpr::VK_Mips_ABS_LO, Symbol, getContext());
+ if (inPicMode() && ABI.IsO32()) {
+ MCValue Res;
+ if (!SymExpr->evaluateAsRelocatable(Res, nullptr, nullptr)) {
+ Error(IDLoc, "expected relocatable expression");
+ return true;
+ }
+ if (Res.getSymB() != nullptr) {
+ Error(IDLoc, "expected relocatable expression with only one symbol");
+ return true;
+ }
+
+ // The case where the result register is $25 is somewhat special. If the
+ // symbol in the final relocation is external and not modified with a
+ // constant then we must use R_MIPS_CALL16 instead of R_MIPS_GOT16.
+ if ((DstReg == Mips::T9 || DstReg == Mips::T9_64) && !UseSrcReg &&
+ Res.getConstant() == 0 && !Res.getSymA()->getSymbol().isInSection() &&
+ !Res.getSymA()->getSymbol().isTemporary()) {
+ const MCExpr *CallExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
+ TOut.emitRRX(Mips::LW, DstReg, ABI.GetGlobalPtr(),
+ MCOperand::createExpr(CallExpr), IDLoc, STI);
+ return false;
+ }
- bool UseSrcReg = SrcReg != Mips::NoRegister;
+ // The remaining cases are:
+ // External GOT: lw $tmp, %got(symbol+offset)($gp)
+ // >addiu $tmp, $tmp, %lo(offset)
+ // >addiu $rd, $tmp, $rs
+ // Local GOT: lw $tmp, %got(symbol+offset)($gp)
+ // addiu $tmp, $tmp, %lo(symbol+offset)($gp)
+ // >addiu $rd, $tmp, $rs
+ // The addiu's marked with a '>' may be omitted if they are redundant. If
+ // this happens then the last instruction must use $rd as the result
+ // register.
+ const MipsMCExpr *GotExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_GOT, SymExpr, getContext());
+ const MCExpr *LoExpr = nullptr;
+ if (Res.getSymA()->getSymbol().isInSection() ||
+ Res.getSymA()->getSymbol().isTemporary())
+ LoExpr = MipsMCExpr::create(MipsMCExpr::MEK_LO, SymExpr, getContext());
+ else if (Res.getConstant() != 0) {
+ // External symbols fully resolve the symbol with just the %got(symbol)
+ // but we must still account for any offset to the symbol for expressions
+ // like symbol+8.
+ LoExpr = MCConstantExpr::create(Res.getConstant(), getContext());
+ }
+
+ unsigned TmpReg = DstReg;
+ if (UseSrcReg &&
+ getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg,
+ SrcReg)) {
+ // If $rs is the same as $rd, we need to use AT.
+ // If it is not available we exit.
+ unsigned ATReg = getATReg(IDLoc);
+ if (!ATReg)
+ return true;
+ TmpReg = ATReg;
+ }
+
+ TOut.emitRRX(Mips::LW, TmpReg, ABI.GetGlobalPtr(),
+ MCOperand::createExpr(GotExpr), IDLoc, STI);
+
+ if (LoExpr)
+ TOut.emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
+ IDLoc, STI);
+
+ if (UseSrcReg)
+ TOut.emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, STI);
+
+ return false;
+ }
+
+ const MipsMCExpr *HiExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_HI, SymExpr, getContext());
+ const MipsMCExpr *LoExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, SymExpr, getContext());
// This is the 64-bit symbol address expansion.
if (ABI.ArePtrs64bit() && isGP64bit()) {
@@ -2378,12 +2450,14 @@ bool MipsAsmParser::loadAndAddSymbolAddress(
if (!ATReg)
return true;
- const MipsMCExpr *HighestExpr = MipsMCExpr::create(
- MCSymbolRefExpr::VK_Mips_HIGHEST, Symbol, getContext());
- const MipsMCExpr *HigherExpr = MipsMCExpr::create(
- MCSymbolRefExpr::VK_Mips_HIGHER, Symbol, getContext());
+ const MipsMCExpr *HighestExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_HIGHEST, SymExpr, getContext());
+ const MipsMCExpr *HigherExpr =
+ MipsMCExpr::create(MipsMCExpr::MEK_HIGHER, SymExpr, getContext());
- if (UseSrcReg && (DstReg == SrcReg)) {
+ if (UseSrcReg &&
+ getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg,
+ SrcReg)) {
// If $rs is the same as $rd:
// (d)la $rd, sym($rd) => lui $at, %highest(sym)
// daddiu $at, $at, %higher(sym)
@@ -2392,17 +2466,17 @@ bool MipsAsmParser::loadAndAddSymbolAddress(
// dsll $at, $at, 16
// daddiu $at, $at, %lo(sym)
// daddu $rd, $at, $rd
- emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HighestExpr), IDLoc,
- Instructions);
- emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HigherExpr),
- IDLoc, Instructions);
- emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, Instructions);
- emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HiExpr), IDLoc,
- Instructions);
- emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, Instructions);
- emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr), IDLoc,
- Instructions);
- emitRRR(Mips::DADDu, DstReg, ATReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HighestExpr), IDLoc,
+ STI);
+ TOut.emitRRX(Mips::DADDiu, ATReg, ATReg,
+ MCOperand::createExpr(HigherExpr), IDLoc, STI);
+ TOut.emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, STI);
+ TOut.emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(HiExpr),
+ IDLoc, STI);
+ TOut.emitRRI(Mips::DSLL, ATReg, ATReg, 16, IDLoc, STI);
+ TOut.emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr),
+ IDLoc, STI);
+ TOut.emitRRR(Mips::DADDu, DstReg, ATReg, SrcReg, IDLoc, STI);
return false;
}
@@ -2415,18 +2489,17 @@ bool MipsAsmParser::loadAndAddSymbolAddress(
// dsll32 $rd, $rd, 0
// daddu $rd, $rd, $at
// (daddu $rd, $rd, $rs)
- emitRX(Mips::LUi, DstReg, MCOperand::createExpr(HighestExpr), IDLoc,
- Instructions);
- emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HiExpr), IDLoc,
- Instructions);
- emitRRX(Mips::DADDiu, DstReg, DstReg, MCOperand::createExpr(HigherExpr),
- IDLoc, Instructions);
- emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr), IDLoc,
- Instructions);
- emitRRI(Mips::DSLL32, DstReg, DstReg, 0, IDLoc, Instructions);
- emitRRR(Mips::DADDu, DstReg, DstReg, ATReg, IDLoc, Instructions);
+ TOut.emitRX(Mips::LUi, DstReg, MCOperand::createExpr(HighestExpr), IDLoc,
+ STI);
+ TOut.emitRX(Mips::LUi, ATReg, MCOperand::createExpr(HiExpr), IDLoc, STI);
+ TOut.emitRRX(Mips::DADDiu, DstReg, DstReg,
+ MCOperand::createExpr(HigherExpr), IDLoc, STI);
+ TOut.emitRRX(Mips::DADDiu, ATReg, ATReg, MCOperand::createExpr(LoExpr),
+ IDLoc, STI);
+ TOut.emitRRI(Mips::DSLL32, DstReg, DstReg, 0, IDLoc, STI);
+ TOut.emitRRR(Mips::DADDu, DstReg, DstReg, ATReg, IDLoc, STI);
if (UseSrcReg)
- emitRRR(Mips::DADDu, DstReg, DstReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(Mips::DADDu, DstReg, DstReg, SrcReg, IDLoc, STI);
return false;
}
@@ -2441,7 +2514,8 @@ bool MipsAsmParser::loadAndAddSymbolAddress(
// ori $rd, $rd, %lo(sym)
// (addu $rd, $rd, $rs)
unsigned TmpReg = DstReg;
- if (UseSrcReg && (DstReg == SrcReg)) {
+ if (UseSrcReg &&
+ getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg, SrcReg)) {
// If $rs is the same as $rd, we need to use AT.
// If it is not available we exit.
unsigned ATReg = getATReg(IDLoc);
@@ -2450,20 +2524,24 @@ bool MipsAsmParser::loadAndAddSymbolAddress(
TmpReg = ATReg;
}
- emitRX(Mips::LUi, TmpReg, MCOperand::createExpr(HiExpr), IDLoc, Instructions);
- emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr), IDLoc,
- Instructions);
+ TOut.emitRX(Mips::LUi, TmpReg, MCOperand::createExpr(HiExpr), IDLoc, STI);
+ TOut.emitRRX(Mips::ADDiu, TmpReg, TmpReg, MCOperand::createExpr(LoExpr),
+ IDLoc, STI);
if (UseSrcReg)
- emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(Mips::ADDu, DstReg, TmpReg, SrcReg, IDLoc, STI);
else
- assert(DstReg == TmpReg);
+ assert(
+ getContext().getRegisterInfo()->isSuperOrSubRegisterEq(DstReg, TmpReg));
return false;
}
-bool MipsAsmParser::expandUncondBranchMMPseudo(
- MCInst &Inst, SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
+bool MipsAsmParser::expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+
assert(getInstDesc(Inst.getOpcode()).getNumOperands() == 1 &&
"unexpected number of operands");
@@ -2493,19 +2571,20 @@ bool MipsAsmParser::expandUncondBranchMMPseudo(
Inst.addOperand(MCOperand::createImm(Offset.getImm()));
}
}
- Instructions.push_back(Inst);
+ Out.EmitInstruction(Inst, *STI);
// If .set reorder is active and branch instruction has a delay slot,
// emit a NOP after it.
const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder())
- createNop(true, IDLoc, Instructions);
+ TOut.emitEmptyDelaySlot(true, IDLoc, STI);
return false;
}
-bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
@@ -2513,7 +2592,8 @@ bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc,
assert(ImmOp.isImm() && "expected immediate operand kind");
const MCOperand &MemOffsetOp = Inst.getOperand(2);
- assert(MemOffsetOp.isImm() && "expected immediate operand kind");
+ assert((MemOffsetOp.isImm() || MemOffsetOp.isExpr()) &&
+ "expected immediate or expression operand");
unsigned OpCode = 0;
switch(Inst.getOpcode()) {
@@ -2530,8 +2610,8 @@ bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc,
int64_t ImmValue = ImmOp.getImm();
if (ImmValue == 0)
- emitRRX(OpCode, DstRegOp.getReg(), Mips::ZERO, MemOffsetOp, IDLoc,
- Instructions);
+ TOut.emitRRX(OpCode, DstRegOp.getReg(), Mips::ZERO, MemOffsetOp, IDLoc,
+ STI);
else {
warnIfNoMacro(IDLoc);
@@ -2540,94 +2620,112 @@ bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc,
return true;
if (loadImmediate(ImmValue, ATReg, Mips::NoRegister, !isGP64bit(), true,
- IDLoc, Instructions))
+ IDLoc, Out, STI))
return true;
- emitRRX(OpCode, DstRegOp.getReg(), ATReg, MemOffsetOp, IDLoc, Instructions);
+ TOut.emitRRX(OpCode, DstRegOp.getReg(), ATReg, MemOffsetOp, IDLoc, STI);
}
return false;
}
-void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions,
- bool isLoad, bool isImmOpnd) {
- unsigned ImmOffset, HiOffset, LoOffset;
- const MCExpr *ExprOffset;
- unsigned TmpRegNum;
- // 1st operand is either the source or destination register.
- assert(Inst.getOperand(0).isReg() && "expected register operand kind");
- unsigned RegOpNum = Inst.getOperand(0).getReg();
- // 2nd operand is the base register.
- assert(Inst.getOperand(1).isReg() && "expected register operand kind");
- unsigned BaseRegNum = Inst.getOperand(1).getReg();
- // 3rd operand is either an immediate or expression.
- if (isImmOpnd) {
- assert(Inst.getOperand(2).isImm() && "expected immediate operand kind");
- ImmOffset = Inst.getOperand(2).getImm();
- LoOffset = ImmOffset & 0x0000ffff;
- HiOffset = (ImmOffset & 0xffff0000) >> 16;
- // If msb of LoOffset is 1(negative number) we must increment HiOffset.
- if (LoOffset & 0x8000)
- HiOffset++;
- } else
- ExprOffset = Inst.getOperand(2).getExpr();
- // These are some of the types of expansions we perform here:
- // 1) lw $8, sym => lui $8, %hi(sym)
- // lw $8, %lo(sym)($8)
- // 2) lw $8, offset($9) => lui $8, %hi(offset)
- // add $8, $8, $9
- // lw $8, %lo(offset)($9)
- // 3) lw $8, offset($8) => lui $at, %hi(offset)
- // add $at, $at, $8
- // lw $8, %lo(offset)($at)
- // 4) sw $8, sym => lui $at, %hi(sym)
- // sw $8, %lo(sym)($at)
- // 5) sw $8, offset($8) => lui $at, %hi(offset)
- // add $at, $at, $8
- // sw $8, %lo(offset)($at)
- // 6) ldc1 $f0, sym => lui $at, %hi(sym)
- // ldc1 $f0, %lo(sym)($at)
- //
- // For load instructions we can use the destination register as a temporary
- // if base and dst are different (examples 1 and 2) and if the base register
- // is general purpose otherwise we must use $at (example 6) and error if it's
- // not available. For stores we must use $at (examples 4 and 5) because we
- // must not clobber the source register setting up the offset.
+void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI, bool IsLoad,
+ bool IsImmOpnd) {
+ if (IsLoad) {
+ expandLoadInst(Inst, IDLoc, Out, STI, IsImmOpnd);
+ return;
+ }
+ expandStoreInst(Inst, IDLoc, Out, STI, IsImmOpnd);
+}
+
+void MipsAsmParser::expandLoadInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI, bool IsImmOpnd) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+
+ unsigned DstReg = Inst.getOperand(0).getReg();
+ unsigned BaseReg = Inst.getOperand(1).getReg();
+
const MCInstrDesc &Desc = getInstDesc(Inst.getOpcode());
- int16_t RegClassOp0 = Desc.OpInfo[0].RegClass;
- unsigned RegClassIDOp0 =
- getContext().getRegisterInfo()->getRegClass(RegClassOp0).getID();
- bool IsGPR = (RegClassIDOp0 == Mips::GPR32RegClassID) ||
- (RegClassIDOp0 == Mips::GPR64RegClassID);
- if (isLoad && IsGPR && (BaseRegNum != RegOpNum))
- TmpRegNum = RegOpNum;
- else {
+ int16_t DstRegClass = Desc.OpInfo[0].RegClass;
+ unsigned DstRegClassID =
+ getContext().getRegisterInfo()->getRegClass(DstRegClass).getID();
+ bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) ||
+ (DstRegClassID == Mips::GPR64RegClassID);
+
+ if (IsImmOpnd) {
+ // Try to use DstReg as the temporary.
+ if (IsGPR && (BaseReg != DstReg)) {
+ TOut.emitLoadWithImmOffset(Inst.getOpcode(), DstReg, BaseReg,
+ Inst.getOperand(2).getImm(), DstReg, IDLoc,
+ STI);
+ return;
+ }
+
// At this point we need AT to perform the expansions and we exit if it is
// not available.
- TmpRegNum = getATReg(IDLoc);
- if (!TmpRegNum)
+ unsigned ATReg = getATReg(IDLoc);
+ if (!ATReg)
return;
+
+ TOut.emitLoadWithImmOffset(Inst.getOpcode(), DstReg, BaseReg,
+ Inst.getOperand(2).getImm(), ATReg, IDLoc, STI);
+ return;
}
- emitRX(Mips::LUi, TmpRegNum,
- isImmOpnd ? MCOperand::createImm(HiOffset)
- : MCOperand::createExpr(evaluateRelocExpr(ExprOffset, "hi")),
- IDLoc, Instructions);
- // Add temp register to base.
- if (BaseRegNum != Mips::ZERO)
- emitRRR(Mips::ADDu, TmpRegNum, TmpRegNum, BaseRegNum, IDLoc, Instructions);
- // And finally, create original instruction with low part
- // of offset and new base.
- emitRRX(Inst.getOpcode(), RegOpNum, TmpRegNum,
- isImmOpnd
- ? MCOperand::createImm(LoOffset)
- : MCOperand::createExpr(evaluateRelocExpr(ExprOffset, "lo")),
- IDLoc, Instructions);
+ const MCExpr *ExprOffset = Inst.getOperand(2).getExpr();
+ MCOperand LoOperand = MCOperand::createExpr(
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, ExprOffset, getContext()));
+ MCOperand HiOperand = MCOperand::createExpr(
+ MipsMCExpr::create(MipsMCExpr::MEK_HI, ExprOffset, getContext()));
+
+ // Try to use DstReg as the temporary.
+ if (IsGPR && (BaseReg != DstReg)) {
+ TOut.emitLoadWithSymOffset(Inst.getOpcode(), DstReg, BaseReg, HiOperand,
+ LoOperand, DstReg, IDLoc, STI);
+ return;
+ }
+
+ // At this point we need AT to perform the expansions and we exit if it is
+ // not available.
+ unsigned ATReg = getATReg(IDLoc);
+ if (!ATReg)
+ return;
+
+ TOut.emitLoadWithSymOffset(Inst.getOpcode(), DstReg, BaseReg, HiOperand,
+ LoOperand, ATReg, IDLoc, STI);
}
-bool
-MipsAsmParser::expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+void MipsAsmParser::expandStoreInst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI,
+ bool IsImmOpnd) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+
+ unsigned SrcReg = Inst.getOperand(0).getReg();
+ unsigned BaseReg = Inst.getOperand(1).getReg();
+
+ if (IsImmOpnd) {
+ TOut.emitStoreWithImmOffset(Inst.getOpcode(), SrcReg, BaseReg,
+ Inst.getOperand(2).getImm(),
+ [&]() { return getATReg(IDLoc); }, IDLoc, STI);
+ return;
+ }
+
+ unsigned ATReg = getATReg(IDLoc);
+ if (!ATReg)
+ return;
+
+ const MCExpr *ExprOffset = Inst.getOperand(2).getExpr();
+ MCOperand LoOperand = MCOperand::createExpr(
+ MipsMCExpr::create(MipsMCExpr::MEK_LO, ExprOffset, getContext()));
+ MCOperand HiOperand = MCOperand::createExpr(
+ MipsMCExpr::create(MipsMCExpr::MEK_HI, ExprOffset, getContext()));
+ TOut.emitStoreWithSymOffset(Inst.getOpcode(), SrcReg, BaseReg, HiOperand,
+ LoOperand, ATReg, IDLoc, STI);
+}
+
+bool MipsAsmParser::expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
unsigned OpNum = Inst.getNumOperands();
unsigned Opcode = Inst.getOpcode();
unsigned NewOpcode = Opcode == Mips::SWM_MM ? Mips::SWM32_MM : Mips::LWM32_MM;
@@ -2650,12 +2748,14 @@ MipsAsmParser::expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
}
Inst.setOpcode(NewOpcode);
- Instructions.push_back(Inst);
+ Out.EmitInstruction(Inst, *STI);
return false;
}
bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
bool EmittedNoMacroWarning = false;
unsigned PseudoOpcode = Inst.getOpcode();
unsigned SrcReg = Inst.getOperand(0).getReg();
@@ -2730,7 +2830,7 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc,
}
if (loadImmediate(TrgOp.getImm(), TrgReg, Mips::NoRegister, !isGP64bit(),
- false, IDLoc, Instructions))
+ false, IDLoc, Out, STI))
return true;
}
@@ -2790,37 +2890,37 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc,
// with GAS' behaviour. However, they may not generate the most efficient
// code in some circumstances.
if (PseudoOpcode == Mips::BLT) {
- emitRX(Mips::BLTZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc,
- Instructions);
+ TOut.emitRX(Mips::BLTZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr),
+ IDLoc, STI);
return false;
}
if (PseudoOpcode == Mips::BLE) {
- emitRX(Mips::BLEZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc,
- Instructions);
+ TOut.emitRX(Mips::BLEZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr),
+ IDLoc, STI);
Warning(IDLoc, "branch is always taken");
return false;
}
if (PseudoOpcode == Mips::BGE) {
- emitRX(Mips::BGEZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc,
- Instructions);
+ TOut.emitRX(Mips::BGEZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr),
+ IDLoc, STI);
Warning(IDLoc, "branch is always taken");
return false;
}
if (PseudoOpcode == Mips::BGT) {
- emitRX(Mips::BGTZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc,
- Instructions);
+ TOut.emitRX(Mips::BGTZ, Mips::ZERO, MCOperand::createExpr(OffsetExpr),
+ IDLoc, STI);
return false;
}
if (PseudoOpcode == Mips::BGTU) {
- emitRRX(Mips::BNE, Mips::ZERO, Mips::ZERO,
- MCOperand::createExpr(OffsetExpr), IDLoc, Instructions);
+ TOut.emitRRX(Mips::BNE, Mips::ZERO, Mips::ZERO,
+ MCOperand::createExpr(OffsetExpr), IDLoc, STI);
return false;
}
if (AcceptsEquality) {
// If both registers are $0 and the pseudo-branch accepts equality, it
// will always be taken, so we emit an unconditional branch.
- emitRRX(Mips::BEQ, Mips::ZERO, Mips::ZERO,
- MCOperand::createExpr(OffsetExpr), IDLoc, Instructions);
+ TOut.emitRRX(Mips::BEQ, Mips::ZERO, Mips::ZERO,
+ MCOperand::createExpr(OffsetExpr), IDLoc, STI);
Warning(IDLoc, "branch is always taken");
return false;
}
@@ -2844,8 +2944,8 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc,
// the pseudo-branch will always be taken, so we emit an unconditional
// branch.
// This only applies to unsigned pseudo-branches.
- emitRRX(Mips::BEQ, Mips::ZERO, Mips::ZERO,
- MCOperand::createExpr(OffsetExpr), IDLoc, Instructions);
+ TOut.emitRRX(Mips::BEQ, Mips::ZERO, Mips::ZERO,
+ MCOperand::createExpr(OffsetExpr), IDLoc, STI);
Warning(IDLoc, "branch is always taken");
return false;
}
@@ -2862,17 +2962,17 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc,
//
// Because only BLEU and BGEU branch on equality, we can use the
// AcceptsEquality variable to decide when to emit the BEQZ.
- emitRRX(AcceptsEquality ? Mips::BEQ : Mips::BNE,
- IsSrcRegZero ? TrgReg : SrcReg, Mips::ZERO,
- MCOperand::createExpr(OffsetExpr), IDLoc, Instructions);
+ TOut.emitRRX(AcceptsEquality ? Mips::BEQ : Mips::BNE,
+ IsSrcRegZero ? TrgReg : SrcReg, Mips::ZERO,
+ MCOperand::createExpr(OffsetExpr), IDLoc, STI);
return false;
}
// If we have a signed pseudo-branch and one of the registers is $0,
// we can use an appropriate compare-to-zero branch. We select which one
// to use in the switch statement above.
- emitRX(IsSrcRegZero ? ZeroSrcOpcode : ZeroTrgOpcode,
- IsSrcRegZero ? TrgReg : SrcReg, MCOperand::createExpr(OffsetExpr),
- IDLoc, Instructions);
+ TOut.emitRX(IsSrcRegZero ? ZeroSrcOpcode : ZeroTrgOpcode,
+ IsSrcRegZero ? TrgReg : SrcReg,
+ MCOperand::createExpr(OffsetExpr), IDLoc, STI);
return false;
}
@@ -2900,32 +3000,33 @@ bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc,
//
// The same applies to the unsigned variants, except that SLTu is used
// instead of SLT.
- emitRRR(IsUnsigned ? Mips::SLTu : Mips::SLT, ATRegNum,
- ReverseOrderSLT ? TrgReg : SrcReg, ReverseOrderSLT ? SrcReg : TrgReg,
- IDLoc, Instructions);
-
- emitRRX(IsLikely ? (AcceptsEquality ? Mips::BEQL : Mips::BNEL)
- : (AcceptsEquality ? Mips::BEQ : Mips::BNE),
- ATRegNum, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc,
- Instructions);
+ TOut.emitRRR(IsUnsigned ? Mips::SLTu : Mips::SLT, ATRegNum,
+ ReverseOrderSLT ? TrgReg : SrcReg,
+ ReverseOrderSLT ? SrcReg : TrgReg, IDLoc, STI);
+
+ TOut.emitRRX(IsLikely ? (AcceptsEquality ? Mips::BEQL : Mips::BNEL)
+ : (AcceptsEquality ? Mips::BEQ : Mips::BNE),
+ ATRegNum, Mips::ZERO, MCOperand::createExpr(OffsetExpr), IDLoc,
+ STI);
return false;
}
-bool MipsAsmParser::expandDiv(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions,
- const bool IsMips64, const bool Signed) {
- if (hasMips32r6()) {
- Error(IDLoc, "instruction not supported on mips32r6 or mips64r6");
- return false;
- }
+bool MipsAsmParser::expandDiv(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI, const bool IsMips64,
+ const bool Signed) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
warnIfNoMacro(IDLoc);
- const MCOperand &RsRegOp = Inst.getOperand(0);
+ const MCOperand &RdRegOp = Inst.getOperand(0);
+ assert(RdRegOp.isReg() && "expected register operand kind");
+ unsigned RdReg = RdRegOp.getReg();
+
+ const MCOperand &RsRegOp = Inst.getOperand(1);
assert(RsRegOp.isReg() && "expected register operand kind");
unsigned RsReg = RsRegOp.getReg();
- const MCOperand &RtRegOp = Inst.getOperand(1);
+ const MCOperand &RtRegOp = Inst.getOperand(2);
assert(RtRegOp.isReg() && "expected register operand kind");
unsigned RtReg = RtRegOp.getReg();
unsigned DivOp;
@@ -2947,15 +3048,15 @@ bool MipsAsmParser::expandDiv(MCInst &Inst, SMLoc IDLoc,
if (IsMips64) {
if (Signed && (RtReg == Mips::ZERO || RtReg == Mips::ZERO_64)) {
if (UseTraps) {
- emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, Instructions);
+ TOut.emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, STI);
return false;
}
- emitII(Mips::BREAK, 0x7, 0, IDLoc, Instructions);
+ TOut.emitII(Mips::BREAK, 0x7, 0, IDLoc, STI);
return false;
}
} else {
- emitRR(DivOp, RsReg, RtReg, IDLoc, Instructions);
+ TOut.emitRR(DivOp, RsReg, RtReg, IDLoc, STI);
return false;
}
}
@@ -2964,11 +3065,11 @@ bool MipsAsmParser::expandDiv(MCInst &Inst, SMLoc IDLoc,
Warning(IDLoc, "division by zero");
if (Signed) {
if (UseTraps) {
- emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, Instructions);
+ TOut.emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, STI);
return false;
}
- emitII(Mips::BREAK, 0x7, 0, IDLoc, Instructions);
+ TOut.emitII(Mips::BREAK, 0x7, 0, IDLoc, STI);
return false;
}
}
@@ -2980,22 +3081,21 @@ bool MipsAsmParser::expandDiv(MCInst &Inst, SMLoc IDLoc,
if (UseTraps) {
BranchTarget = IsMips64 ? 12 : 8;
- emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, Instructions);
+ TOut.emitRRI(Mips::TEQ, RtReg, ZeroReg, 0x7, IDLoc, STI);
} else {
BranchTarget = IsMips64 ? 20 : 16;
BranchTargetNoTraps = 8;
// Branch to the li instruction.
- emitRRI(Mips::BNE, RtReg, ZeroReg, BranchTargetNoTraps, IDLoc,
- Instructions);
+ TOut.emitRRI(Mips::BNE, RtReg, ZeroReg, BranchTargetNoTraps, IDLoc, STI);
}
- emitRR(DivOp, RsReg, RtReg, IDLoc, Instructions);
+ TOut.emitRR(DivOp, RsReg, RtReg, IDLoc, STI);
if (!UseTraps)
- emitII(Mips::BREAK, 0x7, 0, IDLoc, Instructions);
+ TOut.emitII(Mips::BREAK, 0x7, 0, IDLoc, STI);
if (!Signed) {
- emitR(Mips::MFLO, RsReg, IDLoc, Instructions);
+ TOut.emitR(Mips::MFLO, RdReg, IDLoc, STI);
return false;
}
@@ -3003,32 +3103,73 @@ bool MipsAsmParser::expandDiv(MCInst &Inst, SMLoc IDLoc,
if (!ATReg)
return true;
- emitRRI(Mips::ADDiu, ATReg, ZeroReg, -1, IDLoc, Instructions);
+ TOut.emitRRI(Mips::ADDiu, ATReg, ZeroReg, -1, IDLoc, STI);
if (IsMips64) {
// Branch to the mflo instruction.
- emitRRI(Mips::BNE, RtReg, ATReg, BranchTarget, IDLoc, Instructions);
- emitRRI(Mips::ADDiu, ATReg, ZeroReg, 1, IDLoc, Instructions);
- emitRRI(Mips::DSLL32, ATReg, ATReg, 0x1f, IDLoc, Instructions);
+ TOut.emitRRI(Mips::BNE, RtReg, ATReg, BranchTarget, IDLoc, STI);
+ TOut.emitRRI(Mips::ADDiu, ATReg, ZeroReg, 1, IDLoc, STI);
+ TOut.emitRRI(Mips::DSLL32, ATReg, ATReg, 0x1f, IDLoc, STI);
} else {
// Branch to the mflo instruction.
- emitRRI(Mips::BNE, RtReg, ATReg, BranchTarget, IDLoc, Instructions);
- emitRI(Mips::LUi, ATReg, (uint16_t)0x8000, IDLoc, Instructions);
+ TOut.emitRRI(Mips::BNE, RtReg, ATReg, BranchTarget, IDLoc, STI);
+ TOut.emitRI(Mips::LUi, ATReg, (uint16_t)0x8000, IDLoc, STI);
}
if (UseTraps)
- emitRRI(Mips::TEQ, RsReg, ATReg, 0x6, IDLoc, Instructions);
+ TOut.emitRRI(Mips::TEQ, RsReg, ATReg, 0x6, IDLoc, STI);
else {
// Branch to the mflo instruction.
- emitRRI(Mips::BNE, RsReg, ATReg, BranchTargetNoTraps, IDLoc, Instructions);
- emitRRI(Mips::SLL, ZeroReg, ZeroReg, 0, IDLoc, Instructions);
- emitII(Mips::BREAK, 0x6, 0, IDLoc, Instructions);
+ TOut.emitRRI(Mips::BNE, RsReg, ATReg, BranchTargetNoTraps, IDLoc, STI);
+ TOut.emitRRI(Mips::SLL, ZeroReg, ZeroReg, 0, IDLoc, STI);
+ TOut.emitII(Mips::BREAK, 0x6, 0, IDLoc, STI);
+ }
+ TOut.emitR(Mips::MFLO, RdReg, IDLoc, STI);
+ return false;
+}
+
+bool MipsAsmParser::expandTrunc(MCInst &Inst, bool IsDouble, bool Is64FPU,
+ SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+
+ assert(Inst.getNumOperands() == 3 && "Invalid operand count");
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg() &&
+ Inst.getOperand(2).isReg() && "Invalid instruction operand.");
+
+ unsigned FirstReg = Inst.getOperand(0).getReg();
+ unsigned SecondReg = Inst.getOperand(1).getReg();
+ unsigned ThirdReg = Inst.getOperand(2).getReg();
+
+ if (hasMips1() && !hasMips2()) {
+ unsigned ATReg = getATReg(IDLoc);
+ if (!ATReg)
+ return true;
+ TOut.emitRR(Mips::CFC1, ThirdReg, Mips::RA, IDLoc, STI);
+ TOut.emitRR(Mips::CFC1, ThirdReg, Mips::RA, IDLoc, STI);
+ TOut.emitNop(IDLoc, STI);
+ TOut.emitRRI(Mips::ORi, ATReg, ThirdReg, 0x3, IDLoc, STI);
+ TOut.emitRRI(Mips::XORi, ATReg, ATReg, 0x2, IDLoc, STI);
+ TOut.emitRR(Mips::CTC1, Mips::RA, ATReg, IDLoc, STI);
+ TOut.emitNop(IDLoc, STI);
+ TOut.emitRR(IsDouble ? (Is64FPU ? Mips::CVT_W_D64 : Mips::CVT_W_D32)
+ : Mips::CVT_W_S,
+ FirstReg, SecondReg, IDLoc, STI);
+ TOut.emitRR(Mips::CTC1, Mips::RA, ThirdReg, IDLoc, STI);
+ TOut.emitNop(IDLoc, STI);
+ return false;
}
- emitR(Mips::MFLO, RsReg, IDLoc, Instructions);
+
+ TOut.emitRR(IsDouble ? (Is64FPU ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32)
+ : Mips::TRUNC_W_S,
+ FirstReg, SecondReg, IDLoc, STI);
+
return false;
}
bool MipsAsmParser::expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ MCStreamer &Out, const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+
if (hasMips32r6() || hasMips64r6()) {
Error(IDLoc, "instruction not supported on mips32r6 or mips64r6");
return false;
@@ -3063,7 +3204,7 @@ bool MipsAsmParser::expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc,
LoadedOffsetInAT = true;
if (loadImmediate(OffsetValue, ATReg, Mips::NoRegister, !ABI.ArePtrs64bit(),
- true, IDLoc, Instructions))
+ true, IDLoc, Out, STI))
return true;
// NOTE: We do this (D)ADDu here instead of doing it in loadImmediate()
@@ -3073,7 +3214,7 @@ bool MipsAsmParser::expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc,
// NOTE: If there is no source register specified in the ULHU, the parser
// will interpret it as $0.
if (SrcReg != Mips::ZERO && SrcReg != Mips::ZERO_64)
- createAddu(ATReg, ATReg, SrcReg, ABI.ArePtrs64bit(), Instructions);
+ TOut.emitAddu(ATReg, ATReg, SrcReg, ABI.ArePtrs64bit(), STI);
}
unsigned FirstLbuDstReg = LoadedOffsetInAT ? DstReg : ATReg;
@@ -3091,21 +3232,23 @@ bool MipsAsmParser::expandUlh(MCInst &Inst, bool Signed, SMLoc IDLoc,
unsigned SllReg = LoadedOffsetInAT ? DstReg : ATReg;
- emitRRI(Signed ? Mips::LB : Mips::LBu, FirstLbuDstReg, LbuSrcReg,
- FirstLbuOffset, IDLoc, Instructions);
+ TOut.emitRRI(Signed ? Mips::LB : Mips::LBu, FirstLbuDstReg, LbuSrcReg,
+ FirstLbuOffset, IDLoc, STI);
- emitRRI(Mips::LBu, SecondLbuDstReg, LbuSrcReg, SecondLbuOffset, IDLoc,
- Instructions);
+ TOut.emitRRI(Mips::LBu, SecondLbuDstReg, LbuSrcReg, SecondLbuOffset, IDLoc,
+ STI);
- emitRRI(Mips::SLL, SllReg, SllReg, 8, IDLoc, Instructions);
+ TOut.emitRRI(Mips::SLL, SllReg, SllReg, 8, IDLoc, STI);
- emitRRR(Mips::OR, DstReg, DstReg, ATReg, IDLoc, Instructions);
+ TOut.emitRRR(Mips::OR, DstReg, DstReg, ATReg, IDLoc, STI);
return false;
}
-bool MipsAsmParser::expandUlw(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+bool MipsAsmParser::expandUlw(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+
if (hasMips32r6() || hasMips64r6()) {
Error(IDLoc, "instruction not supported on mips32r6 or mips64r6");
return false;
@@ -3137,7 +3280,7 @@ bool MipsAsmParser::expandUlw(MCInst &Inst, SMLoc IDLoc,
warnIfNoMacro(IDLoc);
if (loadImmediate(OffsetValue, ATReg, Mips::NoRegister, !ABI.ArePtrs64bit(),
- true, IDLoc, Instructions))
+ true, IDLoc, Out, STI))
return true;
// NOTE: We do this (D)ADDu here instead of doing it in loadImmediate()
@@ -3147,7 +3290,7 @@ bool MipsAsmParser::expandUlw(MCInst &Inst, SMLoc IDLoc,
// NOTE: If there is no source register specified in the ULW, the parser
// will interpret it as $0.
if (SrcReg != Mips::ZERO && SrcReg != Mips::ZERO_64)
- createAddu(ATReg, ATReg, SrcReg, ABI.ArePtrs64bit(), Instructions);
+ TOut.emitAddu(ATReg, ATReg, SrcReg, ABI.ArePtrs64bit(), STI);
}
unsigned FinalSrcReg = LoadedOffsetInAT ? ATReg : SrcReg;
@@ -3160,17 +3303,19 @@ bool MipsAsmParser::expandUlw(MCInst &Inst, SMLoc IDLoc,
RightLoadOffset = LoadedOffsetInAT ? 3 : (OffsetValue + 3);
}
- emitRRI(Mips::LWL, DstRegOp.getReg(), FinalSrcReg, LeftLoadOffset, IDLoc,
- Instructions);
+ TOut.emitRRI(Mips::LWL, DstRegOp.getReg(), FinalSrcReg, LeftLoadOffset, IDLoc,
+ STI);
- emitRRI(Mips::LWR, DstRegOp.getReg(), FinalSrcReg, RightLoadOffset, IDLoc,
- Instructions);
+ TOut.emitRRI(Mips::LWR, DstRegOp.getReg(), FinalSrcReg, RightLoadOffset,
+ IDLoc, STI);
return false;
}
bool MipsAsmParser::expandAliasImmediate(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
assert (Inst.getNumOperands() == 3 && "Invalid operand count");
assert (Inst.getOperand(0).isReg() &&
@@ -3195,7 +3340,7 @@ bool MipsAsmParser::expandAliasImmediate(MCInst &Inst, SMLoc IDLoc,
DstReg = ATReg;
}
- if (!loadImmediate(ImmValue, DstReg, Mips::NoRegister, Is32Bit, false, Inst.getLoc(), Instructions)) {
+ if (!loadImmediate(ImmValue, DstReg, Mips::NoRegister, Is32Bit, false, Inst.getLoc(), Out, STI)) {
switch (FinalOpcode) {
default:
llvm_unreachable("unimplemented expansion");
@@ -3226,17 +3371,17 @@ bool MipsAsmParser::expandAliasImmediate(MCInst &Inst, SMLoc IDLoc,
}
if (FinalDstReg == Mips::NoRegister)
- emitRRR(FinalOpcode, DstReg, DstReg, SrcReg, IDLoc, Instructions);
+ TOut.emitRRR(FinalOpcode, DstReg, DstReg, SrcReg, IDLoc, STI);
else
- emitRRR(FinalOpcode, FinalDstReg, FinalDstReg, DstReg, IDLoc,
- Instructions);
+ TOut.emitRRR(FinalOpcode, FinalDstReg, FinalDstReg, DstReg, IDLoc, STI);
return false;
}
return true;
}
-bool MipsAsmParser::expandRotation(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+bool MipsAsmParser::expandRotation(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DReg = Inst.getOperand(0).getReg();
unsigned SReg = Inst.getOperand(1).getReg();
@@ -3255,13 +3400,13 @@ bool MipsAsmParser::expandRotation(MCInst &Inst, SMLoc IDLoc,
}
if (Inst.getOpcode() == Mips::ROL) {
- emitRRR(Mips::SUBu, TmpReg, Mips::ZERO, TReg, Inst.getLoc(), Instructions);
- emitRRR(Mips::ROTRV, DReg, SReg, TmpReg, Inst.getLoc(), Instructions);
+ TOut.emitRRR(Mips::SUBu, TmpReg, Mips::ZERO, TReg, Inst.getLoc(), STI);
+ TOut.emitRRR(Mips::ROTRV, DReg, SReg, TmpReg, Inst.getLoc(), STI);
return false;
}
if (Inst.getOpcode() == Mips::ROR) {
- emitRRR(Mips::ROTRV, DReg, SReg, TReg, Inst.getLoc(), Instructions);
+ TOut.emitRRR(Mips::ROTRV, DReg, SReg, TReg, Inst.getLoc(), STI);
return false;
}
@@ -3287,10 +3432,10 @@ bool MipsAsmParser::expandRotation(MCInst &Inst, SMLoc IDLoc,
if (!ATReg)
return true;
- emitRRR(Mips::SUBu, ATReg, Mips::ZERO, TReg, Inst.getLoc(), Instructions);
- emitRRR(FirstShift, ATReg, SReg, ATReg, Inst.getLoc(), Instructions);
- emitRRR(SecondShift, DReg, SReg, TReg, Inst.getLoc(), Instructions);
- emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), Instructions);
+ TOut.emitRRR(Mips::SUBu, ATReg, Mips::ZERO, TReg, Inst.getLoc(), STI);
+ TOut.emitRRR(FirstShift, ATReg, SReg, ATReg, Inst.getLoc(), STI);
+ TOut.emitRRR(SecondShift, DReg, SReg, TReg, Inst.getLoc(), STI);
+ TOut.emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), STI);
return false;
}
@@ -3299,8 +3444,9 @@ bool MipsAsmParser::expandRotation(MCInst &Inst, SMLoc IDLoc,
}
bool MipsAsmParser::expandRotationImm(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
-
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DReg = Inst.getOperand(0).getReg();
unsigned SReg = Inst.getOperand(1).getReg();
@@ -3316,12 +3462,12 @@ bool MipsAsmParser::expandRotationImm(MCInst &Inst, SMLoc IDLoc,
uint64_t ShiftValue = ImmValue;
if (ImmValue != 0)
ShiftValue = MaxShift - ImmValue;
- emitRRI(Mips::ROTR, DReg, SReg, ShiftValue, Inst.getLoc(), Instructions);
+ TOut.emitRRI(Mips::ROTR, DReg, SReg, ShiftValue, Inst.getLoc(), STI);
return false;
}
if (Inst.getOpcode() == Mips::RORImm) {
- emitRRI(Mips::ROTR, DReg, SReg, ImmValue, Inst.getLoc(), Instructions);
+ TOut.emitRRI(Mips::ROTR, DReg, SReg, ImmValue, Inst.getLoc(), STI);
return false;
}
@@ -3331,7 +3477,7 @@ bool MipsAsmParser::expandRotationImm(MCInst &Inst, SMLoc IDLoc,
if (hasMips32()) {
if (ImmValue == 0) {
- emitRRI(Mips::SRL, DReg, SReg, 0, Inst.getLoc(), Instructions);
+ TOut.emitRRI(Mips::SRL, DReg, SReg, 0, Inst.getLoc(), STI);
return false;
}
@@ -3352,9 +3498,9 @@ bool MipsAsmParser::expandRotationImm(MCInst &Inst, SMLoc IDLoc,
if (!ATReg)
return true;
- emitRRI(FirstShift, ATReg, SReg, ImmValue, Inst.getLoc(), Instructions);
- emitRRI(SecondShift, DReg, SReg, 32 - ImmValue, Inst.getLoc(), Instructions);
- emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), Instructions);
+ TOut.emitRRI(FirstShift, ATReg, SReg, ImmValue, Inst.getLoc(), STI);
+ TOut.emitRRI(SecondShift, DReg, SReg, 32 - ImmValue, Inst.getLoc(), STI);
+ TOut.emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), STI);
return false;
}
@@ -3362,9 +3508,9 @@ bool MipsAsmParser::expandRotationImm(MCInst &Inst, SMLoc IDLoc,
return true;
}
-bool MipsAsmParser::expandDRotation(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
-
+bool MipsAsmParser::expandDRotation(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DReg = Inst.getOperand(0).getReg();
unsigned SReg = Inst.getOperand(1).getReg();
@@ -3383,13 +3529,13 @@ bool MipsAsmParser::expandDRotation(MCInst &Inst, SMLoc IDLoc,
}
if (Inst.getOpcode() == Mips::DROL) {
- emitRRR(Mips::DSUBu, TmpReg, Mips::ZERO, TReg, Inst.getLoc(), Instructions);
- emitRRR(Mips::DROTRV, DReg, SReg, TmpReg, Inst.getLoc(), Instructions);
+ TOut.emitRRR(Mips::DSUBu, TmpReg, Mips::ZERO, TReg, Inst.getLoc(), STI);
+ TOut.emitRRR(Mips::DROTRV, DReg, SReg, TmpReg, Inst.getLoc(), STI);
return false;
}
if (Inst.getOpcode() == Mips::DROR) {
- emitRRR(Mips::DROTRV, DReg, SReg, TReg, Inst.getLoc(), Instructions);
+ TOut.emitRRR(Mips::DROTRV, DReg, SReg, TReg, Inst.getLoc(), STI);
return false;
}
@@ -3415,10 +3561,10 @@ bool MipsAsmParser::expandDRotation(MCInst &Inst, SMLoc IDLoc,
if (!ATReg)
return true;
- emitRRR(Mips::DSUBu, ATReg, Mips::ZERO, TReg, Inst.getLoc(), Instructions);
- emitRRR(FirstShift, ATReg, SReg, ATReg, Inst.getLoc(), Instructions);
- emitRRR(SecondShift, DReg, SReg, TReg, Inst.getLoc(), Instructions);
- emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), Instructions);
+ TOut.emitRRR(Mips::DSUBu, ATReg, Mips::ZERO, TReg, Inst.getLoc(), STI);
+ TOut.emitRRR(FirstShift, ATReg, SReg, ATReg, Inst.getLoc(), STI);
+ TOut.emitRRR(SecondShift, DReg, SReg, TReg, Inst.getLoc(), STI);
+ TOut.emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), STI);
return false;
}
@@ -3427,8 +3573,9 @@ bool MipsAsmParser::expandDRotation(MCInst &Inst, SMLoc IDLoc,
}
bool MipsAsmParser::expandDRotationImm(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
-
+ MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
unsigned ATReg = Mips::NoRegister;
unsigned DReg = Inst.getOperand(0).getReg();
unsigned SReg = Inst.getOperand(1).getReg();
@@ -3462,7 +3609,7 @@ bool MipsAsmParser::expandDRotationImm(MCInst &Inst, SMLoc IDLoc,
if (Inst.getOpcode() == Mips::DROLImm)
ShiftValue = (32 - ImmValue % 32) % 32;
- emitRRI(FinalOpcode, DReg, SReg, ShiftValue, Inst.getLoc(), Instructions);
+ TOut.emitRRI(FinalOpcode, DReg, SReg, ShiftValue, Inst.getLoc(), STI);
return false;
}
@@ -3470,7 +3617,7 @@ bool MipsAsmParser::expandDRotationImm(MCInst &Inst, SMLoc IDLoc,
if (hasMips64()) {
if (ImmValue == 0) {
- emitRRI(Mips::DSRL, DReg, SReg, 0, Inst.getLoc(), Instructions);
+ TOut.emitRRI(Mips::DSRL, DReg, SReg, 0, Inst.getLoc(), STI);
return false;
}
@@ -3511,9 +3658,10 @@ bool MipsAsmParser::expandDRotationImm(MCInst &Inst, SMLoc IDLoc,
if (!ATReg)
return true;
- emitRRI(FirstShift, ATReg, SReg, ImmValue % 32, Inst.getLoc(), Instructions);
- emitRRI(SecondShift, DReg, SReg, (32 - ImmValue % 32) % 32, Inst.getLoc(), Instructions);
- emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), Instructions);
+ TOut.emitRRI(FirstShift, ATReg, SReg, ImmValue % 32, Inst.getLoc(), STI);
+ TOut.emitRRI(SecondShift, DReg, SReg, (32 - ImmValue % 32) % 32,
+ Inst.getLoc(), STI);
+ TOut.emitRRR(Mips::OR, DReg, DReg, ATReg, Inst.getLoc(), STI);
return false;
}
@@ -3521,49 +3669,76 @@ bool MipsAsmParser::expandDRotationImm(MCInst &Inst, SMLoc IDLoc,
return true;
}
-void MipsAsmParser::createNop(bool hasShortDelaySlot, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- if (hasShortDelaySlot)
- emitRR(Mips::MOVE16_MM, Mips::ZERO, Mips::ZERO, IDLoc, Instructions);
- else
- emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, Instructions);
-}
+bool MipsAsmParser::expandAbs(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ unsigned FirstRegOp = Inst.getOperand(0).getReg();
+ unsigned SecondRegOp = Inst.getOperand(1).getReg();
-void MipsAsmParser::createAddu(unsigned DstReg, unsigned SrcReg,
- unsigned TrgReg, bool Is64Bit,
- SmallVectorImpl<MCInst> &Instructions) {
- emitRRR(Is64Bit ? Mips::DADDu : Mips::ADDu, DstReg, SrcReg, TrgReg, SMLoc(),
- Instructions);
-}
-
-void MipsAsmParser::createCpRestoreMemOp(
- bool IsLoad, int StackOffset, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
- // If the offset can not fit into 16 bits, we need to expand.
- if (!isInt<16>(StackOffset)) {
- MCInst MemInst;
- MemInst.setOpcode(IsLoad ? Mips::LW : Mips::SW);
- MemInst.addOperand(MCOperand::createReg(Mips::GP));
- MemInst.addOperand(MCOperand::createReg(Mips::SP));
- MemInst.addOperand(MCOperand::createImm(StackOffset));
- expandMemInst(MemInst, IDLoc, Instructions, IsLoad, true /*HasImmOpnd*/);
- return;
- }
+ TOut.emitRI(Mips::BGEZ, SecondRegOp, 8, IDLoc, STI);
+ if (FirstRegOp != SecondRegOp)
+ TOut.emitRRR(Mips::ADDu, FirstRegOp, SecondRegOp, Mips::ZERO, IDLoc, STI);
+ else
+ TOut.emitEmptyDelaySlot(false, IDLoc, STI);
+ TOut.emitRRR(Mips::SUB, FirstRegOp, Mips::ZERO, SecondRegOp, IDLoc, STI);
- emitRRI(IsLoad ? Mips::LW : Mips::SW, Mips::GP, Mips::SP, StackOffset, IDLoc,
- Instructions);
+ return false;
}
unsigned MipsAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
+ switch (Inst.getOpcode()) {
// As described by the Mips32r2 spec, the registers Rd and Rs for
// jalr.hb must be different.
- unsigned Opcode = Inst.getOpcode();
-
- if (Opcode == Mips::JALR_HB &&
- (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()))
- return Match_RequiresDifferentSrcAndDst;
-
- return Match_Success;
+ // It also applies for registers Rt and Rs of microMIPSr6 jalrc.hb instruction
+ // and registers Rd and Base for microMIPS lwp instruction
+ case Mips::JALR_HB:
+ case Mips::JALRC_HB_MMR6:
+ case Mips::JALRC_MMR6:
+ if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg())
+ return Match_RequiresDifferentSrcAndDst;
+ return Match_Success;
+ case Mips::LWP_MM:
+ case Mips::LWP_MMR6:
+ if (Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg())
+ return Match_RequiresDifferentSrcAndDst;
+ return Match_Success;
+ // As described the MIPSR6 spec, the compact branches that compare registers
+ // must:
+ // a) Not use the zero register.
+ // b) Not use the same register twice.
+ // c) rs < rt for bnec, beqc.
+ // NB: For this case, the encoding will swap the operands as their
+ // ordering doesn't matter. GAS performs this transformation too.
+ // Hence, that constraint does not have to be enforced.
+ //
+ // The compact branches that branch iff the signed addition of two registers
+ // would overflow must have rs >= rt. That can be handled like beqc/bnec with
+ // operand swapping. They do not have restriction of using the zero register.
+ case Mips::BLEZC:
+ case Mips::BGEZC:
+ case Mips::BGTZC:
+ case Mips::BLTZC:
+ case Mips::BEQZC:
+ case Mips::BNEZC:
+ if (Inst.getOperand(0).getReg() == Mips::ZERO)
+ return Match_RequiresNoZeroRegister;
+ return Match_Success;
+ case Mips::BGEC:
+ case Mips::BLTC:
+ case Mips::BGEUC:
+ case Mips::BLTUC:
+ case Mips::BEQC:
+ case Mips::BNEC:
+ if (Inst.getOperand(0).getReg() == Mips::ZERO)
+ return Match_RequiresNoZeroRegister;
+ if (Inst.getOperand(1).getReg() == Mips::ZERO)
+ return Match_RequiresNoZeroRegister;
+ if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg())
+ return Match_RequiresDifferentOperands;
+ return Match_Success;
+ default:
+ return Match_Success;
+ }
}
static SMLoc RefineErrorLoc(const SMLoc Loc, const OperandVector &Operands,
@@ -3584,16 +3759,13 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
bool MatchingInlineAsm) {
MCInst Inst;
- SmallVector<MCInst, 8> Instructions;
unsigned MatchResult =
MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
switch (MatchResult) {
case Match_Success: {
- if (processInstruction(Inst, IDLoc, Instructions))
+ if (processInstruction(Inst, IDLoc, Out, STI))
return true;
- for (unsigned i = 0; i < Instructions.size(); i++)
- Out.EmitInstruction(Instructions[i], getSTI());
return false;
}
case Match_MissingFeature:
@@ -3616,6 +3788,10 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return Error(IDLoc, "invalid instruction");
case Match_RequiresDifferentSrcAndDst:
return Error(IDLoc, "source and destination must be different");
+ case Match_RequiresDifferentOperands:
+ return Error(IDLoc, "registers must be different");
+ case Match_RequiresNoZeroRegister:
+ return Error(IDLoc, "invalid operand ($zero) for instruction");
case Match_Immz:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected '0'");
case Match_UImm1_0:
@@ -3633,9 +3809,15 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_UImm4_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 4-bit unsigned immediate");
+ case Match_SImm4_0:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 4-bit signed immediate");
case Match_UImm5_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 5-bit unsigned immediate");
+ case Match_SImm5_0:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 5-bit signed immediate");
case Match_UImm5_1:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected immediate in range 1 .. 32");
@@ -3653,21 +3835,81 @@ bool MipsAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_UImm5_Lsl2:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected both 7-bit unsigned immediate and multiple of 4");
+ case Match_UImmRange2_64:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected immediate in range 2 .. 64");
case Match_UImm6_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 6-bit unsigned immediate");
- case Match_SImm6:
+ case Match_UImm6_Lsl2:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected both 8-bit unsigned immediate and multiple of 4");
+ case Match_SImm6_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 6-bit signed immediate");
case Match_UImm7_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 7-bit unsigned immediate");
+ case Match_UImm7_N1:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected immediate in range -1 .. 126");
+ case Match_SImm7_Lsl2:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected both 9-bit signed immediate and multiple of 4");
case Match_UImm8_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 8-bit unsigned immediate");
case Match_UImm10_0:
return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
"expected 10-bit unsigned immediate");
+ case Match_SImm10_0:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 10-bit signed immediate");
+ case Match_SImm11_0:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 11-bit signed immediate");
+ case Match_UImm16:
+ case Match_UImm16_Relaxed:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 16-bit unsigned immediate");
+ case Match_SImm16:
+ case Match_SImm16_Relaxed:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 16-bit signed immediate");
+ case Match_UImm20_0:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 20-bit unsigned immediate");
+ case Match_UImm26_0:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 26-bit unsigned immediate");
+ case Match_SImm32:
+ case Match_SImm32_Relaxed:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected 32-bit signed immediate");
+ case Match_MemSImm9:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected memory with 9-bit signed offset");
+ case Match_MemSImm10:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected memory with 10-bit signed offset");
+ case Match_MemSImm10Lsl1:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected memory with 11-bit signed offset and multiple of 2");
+ case Match_MemSImm10Lsl2:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected memory with 12-bit signed offset and multiple of 4");
+ case Match_MemSImm10Lsl3:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected memory with 13-bit signed offset and multiple of 8");
+ case Match_MemSImm11:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected memory with 11-bit signed offset");
+ case Match_MemSImm12:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected memory with 12-bit signed offset");
+ case Match_MemSImm16:
+ return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo),
+ "expected memory with 16-bit signed offset");
}
llvm_unreachable("Implement any new match types added!");
@@ -3871,19 +4113,6 @@ unsigned MipsAsmParser::getReg(int RC, int RegNo) {
return *(getContext().getRegisterInfo()->getRegClass(RC).begin() + RegNo);
}
-unsigned MipsAsmParser::getGPR(int RegNo) {
- return getReg(isGP64bit() ? Mips::GPR64RegClassID : Mips::GPR32RegClassID,
- RegNo);
-}
-
-int MipsAsmParser::matchRegisterByNumber(unsigned RegNum, unsigned RegClass) {
- if (RegNum >
- getContext().getRegisterInfo()->getRegClass(RegClass).getNumRegs() - 1)
- return -1;
-
- return getReg(RegClass, RegNum);
-}
-
bool MipsAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
MCAsmParser &Parser = getParser();
DEBUG(dbgs() << "parseOperand\n");
@@ -3960,63 +4189,41 @@ bool MipsAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
const MCExpr *MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr,
StringRef RelocStr) {
- const MCExpr *Res;
- // Check the type of the expression.
- if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Expr)) {
- // It's a constant, evaluate reloc value.
- int16_t Val;
- switch (getVariantKind(RelocStr)) {
- case MCSymbolRefExpr::VK_Mips_ABS_LO:
- // Get the 1st 16-bits.
- Val = MCE->getValue() & 0xffff;
- break;
- case MCSymbolRefExpr::VK_Mips_ABS_HI:
- // Get the 2nd 16-bits. Also add 1 if bit 15 is 1, to compensate for low
- // 16 bits being negative.
- Val = ((MCE->getValue() + 0x8000) >> 16) & 0xffff;
- break;
- case MCSymbolRefExpr::VK_Mips_HIGHER:
- // Get the 3rd 16-bits.
- Val = ((MCE->getValue() + 0x80008000LL) >> 32) & 0xffff;
- break;
- case MCSymbolRefExpr::VK_Mips_HIGHEST:
- // Get the 4th 16-bits.
- Val = ((MCE->getValue() + 0x800080008000LL) >> 48) & 0xffff;
- break;
- default:
- report_fatal_error("unsupported reloc value");
- }
- return MCConstantExpr::create(Val, getContext());
- }
-
- if (const MCSymbolRefExpr *MSRE = dyn_cast<MCSymbolRefExpr>(Expr)) {
- // It's a symbol, create a symbolic expression from the symbol.
- const MCSymbol *Symbol = &MSRE->getSymbol();
- MCSymbolRefExpr::VariantKind VK = getVariantKind(RelocStr);
- Res = MCSymbolRefExpr::create(Symbol, VK, getContext());
- return Res;
- }
-
- if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr)) {
- MCSymbolRefExpr::VariantKind VK = getVariantKind(RelocStr);
-
- // Try to create target expression.
- if (MipsMCExpr::isSupportedBinaryExpr(VK, BE))
- return MipsMCExpr::create(VK, Expr, getContext());
-
- const MCExpr *LExp = evaluateRelocExpr(BE->getLHS(), RelocStr);
- const MCExpr *RExp = evaluateRelocExpr(BE->getRHS(), RelocStr);
- Res = MCBinaryExpr::create(BE->getOpcode(), LExp, RExp, getContext());
- return Res;
- }
-
- if (const MCUnaryExpr *UN = dyn_cast<MCUnaryExpr>(Expr)) {
- const MCExpr *UnExp = evaluateRelocExpr(UN->getSubExpr(), RelocStr);
- Res = MCUnaryExpr::create(UN->getOpcode(), UnExp, getContext());
- return Res;
- }
- // Just return the original expression.
- return Expr;
+ if (RelocStr == "hi(%neg(%gp_rel")
+ return MipsMCExpr::createGpOff(MipsMCExpr::MEK_HI, Expr, getContext());
+ else if (RelocStr == "lo(%neg(%gp_rel")
+ return MipsMCExpr::createGpOff(MipsMCExpr::MEK_LO, Expr, getContext());
+
+ MipsMCExpr::MipsExprKind Kind =
+ StringSwitch<MipsMCExpr::MipsExprKind>(RelocStr)
+ .Case("call16", MipsMCExpr::MEK_GOT_CALL)
+ .Case("call_hi", MipsMCExpr::MEK_CALL_HI16)
+ .Case("call_lo", MipsMCExpr::MEK_CALL_LO16)
+ .Case("dtprel_hi", MipsMCExpr::MEK_DTPREL_HI)
+ .Case("dtprel_lo", MipsMCExpr::MEK_DTPREL_LO)
+ .Case("got", MipsMCExpr::MEK_GOT)
+ .Case("got_disp", MipsMCExpr::MEK_GOT_DISP)
+ .Case("got_hi", MipsMCExpr::MEK_GOT_HI16)
+ .Case("got_lo", MipsMCExpr::MEK_GOT_LO16)
+ .Case("got_ofst", MipsMCExpr::MEK_GOT_OFST)
+ .Case("got_page", MipsMCExpr::MEK_GOT_PAGE)
+ .Case("gottprel", MipsMCExpr::MEK_GOTTPREL)
+ .Case("gp_rel", MipsMCExpr::MEK_GPREL)
+ .Case("hi", MipsMCExpr::MEK_HI)
+ .Case("higher", MipsMCExpr::MEK_HIGHER)
+ .Case("highest", MipsMCExpr::MEK_HIGHEST)
+ .Case("lo", MipsMCExpr::MEK_LO)
+ .Case("neg", MipsMCExpr::MEK_NEG)
+ .Case("pcrel_hi", MipsMCExpr::MEK_PCREL_HI16)
+ .Case("pcrel_lo", MipsMCExpr::MEK_PCREL_LO16)
+ .Case("tlsgd", MipsMCExpr::MEK_TLSGD)
+ .Case("tlsldm", MipsMCExpr::MEK_TLSLDM)
+ .Case("tprel_hi", MipsMCExpr::MEK_TPREL_HI)
+ .Case("tprel_lo", MipsMCExpr::MEK_TPREL_LO)
+ .Default(MipsMCExpr::MEK_None);
+
+ assert(Kind != MipsMCExpr::MEK_None);
+ return MipsMCExpr::create(Kind, Expr, getContext());
}
bool MipsAsmParser::isEvaluated(const MCExpr *Expr) {
@@ -4248,12 +4455,6 @@ bool MipsAsmParser::searchSymbolAlias(OperandVector &Operands) {
llvm_unreachable("Should never ParseFail");
return false;
}
- } else if (Expr->getKind() == MCExpr::Constant) {
- Parser.Lex();
- const MCConstantExpr *Const = static_cast<const MCConstantExpr *>(Expr);
- Operands.push_back(
- MipsOperand::CreateImm(Const, S, Parser.getTok().getLoc(), *this));
- return true;
}
}
return false;
@@ -4439,46 +4640,6 @@ MipsAsmParser::parseInvNum(OperandVector &Operands) {
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseLSAImm(OperandVector &Operands) {
- MCAsmParser &Parser = getParser();
- switch (getLexer().getKind()) {
- default:
- return MatchOperand_NoMatch;
- case AsmToken::LParen:
- case AsmToken::Plus:
- case AsmToken::Minus:
- case AsmToken::Integer:
- break;
- }
-
- const MCExpr *Expr;
- SMLoc S = Parser.getTok().getLoc();
-
- if (getParser().parseExpression(Expr))
- return MatchOperand_ParseFail;
-
- int64_t Val;
- if (!Expr->evaluateAsAbsolute(Val)) {
- Error(S, "expected immediate value");
- return MatchOperand_ParseFail;
- }
-
- // The LSA instruction allows a 2-bit unsigned immediate. For this reason
- // and because the CPU always adds one to the immediate field, the allowed
- // range becomes 1..4. We'll only check the range here and will deal
- // with the addition/subtraction when actually decoding/encoding
- // the instruction.
- if (Val < 1 || Val > 4) {
- Error(S, "immediate not in range (1..4)");
- return MatchOperand_ParseFail;
- }
-
- Operands.push_back(
- MipsOperand::CreateImm(Expr, S, Parser.getTok().getLoc(), *this));
- return MatchOperand_Success;
-}
-
-MipsAsmParser::OperandMatchResultTy
MipsAsmParser::parseRegisterList(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
SmallVector<unsigned, 10> Regs;
@@ -4573,10 +4734,10 @@ MipsAsmParser::parseRegisterPair(OperandVector &Operands) {
return MatchOperand_ParseFail;
SMLoc E = Parser.getTok().getLoc();
- MipsOperand &Op = static_cast<MipsOperand &>(*Operands.back());
- unsigned Reg = Op.getGPR32Reg();
+ MipsOperand Op = static_cast<MipsOperand &>(*Operands.back());
+
Operands.pop_back();
- Operands.push_back(MipsOperand::CreateRegPair(Reg, S, E, *this));
+ Operands.push_back(MipsOperand::CreateRegPair(Op, S, E, *this));
return MatchOperand_Success;
}
@@ -4619,42 +4780,6 @@ MipsAsmParser::parseMovePRegPair(OperandVector &Operands) {
return MatchOperand_Success;
}
-MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) {
-
- MCSymbolRefExpr::VariantKind VK =
- StringSwitch<MCSymbolRefExpr::VariantKind>(Symbol)
- .Case("hi", MCSymbolRefExpr::VK_Mips_ABS_HI)
- .Case("lo", MCSymbolRefExpr::VK_Mips_ABS_LO)
- .Case("gp_rel", MCSymbolRefExpr::VK_Mips_GPREL)
- .Case("call16", MCSymbolRefExpr::VK_Mips_GOT_CALL)
- .Case("got", MCSymbolRefExpr::VK_Mips_GOT)
- .Case("tlsgd", MCSymbolRefExpr::VK_Mips_TLSGD)
- .Case("tlsldm", MCSymbolRefExpr::VK_Mips_TLSLDM)
- .Case("dtprel_hi", MCSymbolRefExpr::VK_Mips_DTPREL_HI)
- .Case("dtprel_lo", MCSymbolRefExpr::VK_Mips_DTPREL_LO)
- .Case("gottprel", MCSymbolRefExpr::VK_Mips_GOTTPREL)
- .Case("tprel_hi", MCSymbolRefExpr::VK_Mips_TPREL_HI)
- .Case("tprel_lo", MCSymbolRefExpr::VK_Mips_TPREL_LO)
- .Case("got_disp", MCSymbolRefExpr::VK_Mips_GOT_DISP)
- .Case("got_page", MCSymbolRefExpr::VK_Mips_GOT_PAGE)
- .Case("got_ofst", MCSymbolRefExpr::VK_Mips_GOT_OFST)
- .Case("hi(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_HI)
- .Case("lo(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_LO)
- .Case("got_hi", MCSymbolRefExpr::VK_Mips_GOT_HI16)
- .Case("got_lo", MCSymbolRefExpr::VK_Mips_GOT_LO16)
- .Case("call_hi", MCSymbolRefExpr::VK_Mips_CALL_HI16)
- .Case("call_lo", MCSymbolRefExpr::VK_Mips_CALL_LO16)
- .Case("higher", MCSymbolRefExpr::VK_Mips_HIGHER)
- .Case("highest", MCSymbolRefExpr::VK_Mips_HIGHEST)
- .Case("pcrel_hi", MCSymbolRefExpr::VK_Mips_PCREL_HI16)
- .Case("pcrel_lo", MCSymbolRefExpr::VK_Mips_PCREL_LO16)
- .Default(MCSymbolRefExpr::VK_None);
-
- assert(VK != MCSymbolRefExpr::VK_None);
-
- return VK;
-}
-
/// Sometimes (i.e. load/stores) the operand may be followed immediately by
/// either this.
/// ::= '(', register, ')'
@@ -4767,6 +4892,8 @@ bool MipsAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
return false;
}
+// FIXME: Given that these have the same name, these should both be
+// consistent on affecting the Parser.
bool MipsAsmParser::reportParseError(Twine ErrorMsg) {
MCAsmParser &Parser = getParser();
SMLoc Loc = getLexer().getLoc();
@@ -5174,7 +5301,7 @@ bool MipsAsmParser::parseSetArchDirective() {
.Case("mips64r3", "mips64r3")
.Case("mips64r5", "mips64r5")
.Case("mips64r6", "mips64r6")
- .Case("cnmips", "cnmips")
+ .Case("octeon", "cnmips")
.Case("r4000", "mips3") // This is an implementation of Mips3.
.Default("");
@@ -5200,6 +5327,7 @@ bool MipsAsmParser::parseSetFeature(uint64_t Feature) {
getTargetStreamer().emitDirectiveSetDsp();
break;
case Mips::FeatureMicroMips:
+ setFeatureBits(Mips::FeatureMicroMips, "micromips");
getTargetStreamer().emitDirectiveSetMicroMips();
break;
case Mips::FeatureMips1:
@@ -5356,12 +5484,9 @@ bool MipsAsmParser::parseDirectiveCpRestore(SMLoc Loc) {
return false;
}
- // Store the $gp on the stack.
- SmallVector<MCInst, 3> StoreInsts;
- createCpRestoreMemOp(false /*IsLoad*/, CpRestoreOffset /*StackOffset*/, Loc,
- StoreInsts);
-
- getTargetStreamer().emitDirectiveCpRestore(StoreInsts, CpRestoreOffset);
+ if (!getTargetStreamer().emitDirectiveCpRestore(
+ CpRestoreOffset, [&]() { return getATReg(Loc); }, Loc, STI))
+ return true;
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
@@ -5376,7 +5501,6 @@ bool MipsAsmParser::parseDirectiveCPSetup() {
OperandMatchResultTy ResTy = parseAnyRegister(TmpReg);
if (ResTy == MatchOperand_NoMatch) {
reportParseError("expected register containing function address");
- Parser.eatToEndOfStatement();
return false;
}
@@ -5502,6 +5626,7 @@ bool MipsAsmParser::parseDirectiveSet() {
} else if (Tok.getString() == "nomips16") {
return parseSetNoMips16Directive();
} else if (Tok.getString() == "nomicromips") {
+ clearFeatureBits(Mips::FeatureMicroMips, "micromips");
getTargetStreamer().emitDirectiveSetNoMicroMips();
Parser.eatToEndOfStatement();
return false;
@@ -5686,6 +5811,24 @@ bool MipsAsmParser::parseInsnDirective() {
return false;
}
+/// parseSSectionDirective
+/// ::= .sbss
+/// ::= .sdata
+bool MipsAsmParser::parseSSectionDirective(StringRef Section, unsigned Type) {
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token, expected end of statement");
+ return false;
+ }
+
+ MCSection *ELFSection = getContext().getELFSection(
+ Section, Type, ELF::SHF_WRITE | ELF::SHF_ALLOC | ELF::SHF_MIPS_GPREL);
+ getParser().getStreamer().SwitchSection(ELFSection);
+
+ getParser().Lex(); // Eat EndOfStatement token.
+ return false;
+}
+
/// parseDirectiveModule
/// ::= .module oddspreg
/// ::= .module nooddspreg
@@ -5905,13 +6048,22 @@ bool MipsAsmParser::parseFpABIValue(MipsABIFlagsSection::FpABIKind &FpABI,
}
bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
+ // This returns false if this function recognizes the directive
+ // regardless of whether it is successfully handles or reports an
+ // error. Otherwise it returns true to give the generic parser a
+ // chance at recognizing it.
+
MCAsmParser &Parser = getParser();
StringRef IDVal = DirectiveID.getString();
- if (IDVal == ".cpload")
- return parseDirectiveCpLoad(DirectiveID.getLoc());
- if (IDVal == ".cprestore")
- return parseDirectiveCpRestore(DirectiveID.getLoc());
+ if (IDVal == ".cpload") {
+ parseDirectiveCpLoad(DirectiveID.getLoc());
+ return false;
+ }
+ if (IDVal == ".cprestore") {
+ parseDirectiveCpRestore(DirectiveID.getLoc());
+ return false;
+ }
if (IDVal == ".dword") {
parseDataDirective(8, DirectiveID.getLoc());
return false;
@@ -6068,7 +6220,8 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
}
if (IDVal == ".set") {
- return parseDirectiveSet();
+ parseDirectiveSet();
+ return false;
}
if (IDVal == ".mask" || IDVal == ".fmask") {
@@ -6147,8 +6300,15 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
return false;
}
- if (IDVal == ".option")
- return parseDirectiveOption();
+ if (IDVal == ".hword") {
+ parseDataDirective(2, DirectiveID.getLoc());
+ return false;
+ }
+
+ if (IDVal == ".option") {
+ parseDirectiveOption();
+ return false;
+ }
if (IDVal == ".abicalls") {
getTargetStreamer().emitDirectiveAbiCalls();
@@ -6161,20 +6321,34 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
return false;
}
- if (IDVal == ".cpsetup")
- return parseDirectiveCPSetup();
-
- if (IDVal == ".cpreturn")
- return parseDirectiveCPReturn();
-
- if (IDVal == ".module")
- return parseDirectiveModule();
-
- if (IDVal == ".llvm_internal_mips_reallow_module_directive")
- return parseInternalDirectiveReallowModule();
-
- if (IDVal == ".insn")
- return parseInsnDirective();
+ if (IDVal == ".cpsetup") {
+ parseDirectiveCPSetup();
+ return false;
+ }
+ if (IDVal == ".cpreturn") {
+ parseDirectiveCPReturn();
+ return false;
+ }
+ if (IDVal == ".module") {
+ parseDirectiveModule();
+ return false;
+ }
+ if (IDVal == ".llvm_internal_mips_reallow_module_directive") {
+ parseInternalDirectiveReallowModule();
+ return false;
+ }
+ if (IDVal == ".insn") {
+ parseInsnDirective();
+ return false;
+ }
+ if (IDVal == ".sbss") {
+ parseSSectionDirective(IDVal, ELF::SHT_NOBITS);
+ return false;
+ }
+ if (IDVal == ".sdata") {
+ parseSSectionDirective(IDVal, ELF::SHT_PROGBITS);
+ return false;
+ }
return true;
}
diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt
index bde843afd3d2..3650cc9fe072 100644
--- a/lib/Target/Mips/CMakeLists.txt
+++ b/lib/Target/Mips/CMakeLists.txt
@@ -27,6 +27,7 @@ add_llvm_target(MipsCodeGen
MipsConstantIslandPass.cpp
MipsDelaySlotFiller.cpp
MipsFastISel.cpp
+ MipsHazardSchedule.cpp
MipsInstrInfo.cpp
MipsISelDAGToDAG.cpp
MipsISelLowering.cpp
diff --git a/lib/Target/Mips/Disassembler/Makefile b/lib/Target/Mips/Disassembler/Makefile
deleted file mode 100644
index 7900373dd2b2..000000000000
--- a/lib/Target/Mips/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/Mips/Disassembler/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMipsDisassembler
-
-# Hack: we need to include 'main' Mips target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index 3c1a771f97e9..aebb4ef419d1 100644
--- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -15,7 +15,7 @@
#include "MipsRegisterInfo.h"
#include "MipsSubtarget.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -39,14 +39,18 @@ public:
IsMicroMips(STI.getFeatureBits()[Mips::FeatureMicroMips]),
IsBigEndian(IsBigEndian) {}
+ bool hasMips2() const { return STI.getFeatureBits()[Mips::FeatureMips2]; }
bool hasMips3() const { return STI.getFeatureBits()[Mips::FeatureMips3]; }
bool hasMips32() const { return STI.getFeatureBits()[Mips::FeatureMips32]; }
bool hasMips32r6() const {
return STI.getFeatureBits()[Mips::FeatureMips32r6];
}
+ bool isFP64() const { return STI.getFeatureBits()[Mips::FeatureFP64Bit]; }
bool isGP64() const { return STI.getFeatureBits()[Mips::FeatureGP64Bit]; }
+ bool isPTR64() const { return STI.getFeatureBits()[Mips::FeaturePTR64Bit]; }
+
bool hasCnMips() const { return STI.getFeatureBits()[Mips::FeatureCnMips]; }
bool hasCOP3() const {
@@ -193,6 +197,11 @@ static DecodeStatus DecodeBranchTarget(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeBranchTarget1SImm16(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeJumpTarget(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -203,6 +212,11 @@ static DecodeStatus DecodeBranchTarget21(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeBranchTarget21MM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeBranchTarget26(MCInst &Inst,
unsigned Offset,
uint64_t Address,
@@ -340,6 +354,10 @@ static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeFMemMMR2(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeFMem2(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
@@ -352,6 +370,10 @@ static DecodeStatus DecodeFMemCop2R6(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeFMemCop2MMR6(MCInst &Inst, unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -362,12 +384,7 @@ static DecodeStatus DecodeAddiur2Simm7(MCInst &Inst,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeUImm6Lsl2(MCInst &Inst,
- unsigned Value,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeLiSimm7(MCInst &Inst,
+static DecodeStatus DecodeLi16Imm(MCInst &Inst,
unsigned Value,
uint64_t Address,
const void *Decoder);
@@ -377,19 +394,23 @@ static DecodeStatus DecodePOOL16BEncodedField(MCInst &Inst,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeSimm4(MCInst &Inst,
- unsigned Value,
- uint64_t Address,
- const void *Decoder);
-
-static DecodeStatus DecodeSimm16(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder);
+template <unsigned Bits, int Offset, int Scale>
+static DecodeStatus DecodeUImmWithOffsetAndScale(MCInst &Inst, unsigned Value,
+ uint64_t Address,
+ const void *Decoder);
template <unsigned Bits, int Offset>
static DecodeStatus DecodeUImmWithOffset(MCInst &Inst, unsigned Value,
- uint64_t Address, const void *Decoder);
+ uint64_t Address,
+ const void *Decoder) {
+ return DecodeUImmWithOffsetAndScale<Bits, Offset, 1>(Inst, Value, Address,
+ Decoder);
+}
+
+template <unsigned Bits, int Offset = 0, int ScaleBy = 1>
+static DecodeStatus DecodeSImmWithOffsetAndScale(MCInst &Inst, unsigned Value,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeInsSize(MCInst &Inst,
unsigned Insn,
@@ -408,9 +429,6 @@ static DecodeStatus DecodeSimm9SP(MCInst &Inst, unsigned Insn,
static DecodeStatus DecodeANDI16Imm(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeUImm5lsl2(MCInst &Inst, unsigned Insn,
- uint64_t Address, const void *Decoder);
-
static DecodeStatus DecodeSimm23Lsl2(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
@@ -427,11 +445,21 @@ DecodeAddiGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
template <typename InsnType>
static DecodeStatus
+DecodePOP35GroupBranchMMR6(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
DecodeDaddiGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
const void *Decoder);
template <typename InsnType>
static DecodeStatus
+DecodePOP37GroupBranchMMR6(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
DecodeBlezlGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
const void *Decoder);
@@ -450,6 +478,16 @@ static DecodeStatus
DecodeBlezGroupBranch(MCInst &MI, InsnType insn, uint64_t Address,
const void *Decoder);
+template <typename InsnType>
+static DecodeStatus
+DecodeBgtzGroupBranchMMR6(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
+template <typename InsnType>
+static DecodeStatus
+DecodeBlezGroupBranchMMR6(MCInst &MI, InsnType insn, uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeRegListOperand(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
@@ -563,7 +601,7 @@ static DecodeStatus DecodeAddiGroupBranch(MCInst &MI, InsnType insn,
InsnType Rs = fieldFromInstruction(insn, 21, 5);
InsnType Rt = fieldFromInstruction(insn, 16, 5);
- InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
+ int64_t Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4 + 4;
bool HasRs = false;
if (Rs >= Rt) {
@@ -587,6 +625,37 @@ static DecodeStatus DecodeAddiGroupBranch(MCInst &MI, InsnType insn,
}
template <typename InsnType>
+static DecodeStatus DecodePOP35GroupBranchMMR6(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ InsnType Rt = fieldFromInstruction(insn, 21, 5);
+ InsnType Rs = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 2;
+
+ if (Rs >= Rt) {
+ MI.setOpcode(Mips::BOVC_MMR6);
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+ } else if (Rs != 0 && Rs < Rt) {
+ MI.setOpcode(Mips::BEQC_MMR6);
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+ } else {
+ MI.setOpcode(Mips::BEQZALC_MMR6);
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+ }
+
+ MI.addOperand(MCOperand::createImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
static DecodeStatus DecodeDaddiGroupBranch(MCInst &MI, InsnType insn,
uint64_t Address,
const void *Decoder) {
@@ -602,7 +671,7 @@ static DecodeStatus DecodeDaddiGroupBranch(MCInst &MI, InsnType insn,
InsnType Rs = fieldFromInstruction(insn, 21, 5);
InsnType Rt = fieldFromInstruction(insn, 16, 5);
- InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
+ int64_t Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4 + 4;
bool HasRs = false;
if (Rs >= Rt) {
@@ -626,6 +695,37 @@ static DecodeStatus DecodeDaddiGroupBranch(MCInst &MI, InsnType insn,
}
template <typename InsnType>
+static DecodeStatus DecodePOP37GroupBranchMMR6(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ InsnType Rt = fieldFromInstruction(insn, 21, 5);
+ InsnType Rs = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 2;
+
+ if (Rs >= Rt) {
+ MI.setOpcode(Mips::BNVC_MMR6);
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+ } else if (Rs != 0 && Rs < Rt) {
+ MI.setOpcode(Mips::BNEC_MMR6);
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rs)));
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+ } else {
+ MI.setOpcode(Mips::BNEZALC_MMR6);
+ MI.addOperand(MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID,
+ Rt)));
+ }
+
+ MI.addOperand(MCOperand::createImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
static DecodeStatus DecodeBlezlGroupBranch(MCInst &MI, InsnType insn,
uint64_t Address,
const void *Decoder) {
@@ -642,7 +742,7 @@ static DecodeStatus DecodeBlezlGroupBranch(MCInst &MI, InsnType insn,
InsnType Rs = fieldFromInstruction(insn, 21, 5);
InsnType Rt = fieldFromInstruction(insn, 16, 5);
- InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
+ int64_t Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4 + 4;
bool HasRs = false;
if (Rt == 0)
@@ -687,7 +787,7 @@ static DecodeStatus DecodeBgtzlGroupBranch(MCInst &MI, InsnType insn,
InsnType Rs = fieldFromInstruction(insn, 21, 5);
InsnType Rt = fieldFromInstruction(insn, 16, 5);
- InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
+ int64_t Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4 + 4;
if (Rt == 0)
return MCDisassembler::Fail;
@@ -729,7 +829,7 @@ static DecodeStatus DecodeBgtzGroupBranch(MCInst &MI, InsnType insn,
InsnType Rs = fieldFromInstruction(insn, 21, 5);
InsnType Rt = fieldFromInstruction(insn, 16, 5);
- InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
+ int64_t Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4 + 4;
bool HasRs = false;
bool HasRt = false;
@@ -778,7 +878,7 @@ static DecodeStatus DecodeBlezGroupBranch(MCInst &MI, InsnType insn,
InsnType Rs = fieldFromInstruction(insn, 21, 5);
InsnType Rt = fieldFromInstruction(insn, 16, 5);
- InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
+ int64_t Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4 + 4;
bool HasRs = false;
if (Rt == 0)
@@ -917,6 +1017,17 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
Size = 4;
return Result;
}
+
+ if (hasMips32r6() && isFP64()) {
+ DEBUG(dbgs() << "Trying MicroMips32r6FP64 table (32-bit opcodes):\n");
+ Result = decodeInstruction(DecoderTableMicroMips32r6FP6432, Instr, Insn,
+ Address, this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+ }
+
// This is an invalid instruction. Let the disassembler move forward by the
// minimum instruction size.
Size = 2;
@@ -949,6 +1060,16 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
}
+ if (hasMips32r6() && isPTR64()) {
+ DEBUG(dbgs() << "Trying Mips32r6_64r6 (PTR64) table (32-bit opcodes):\n");
+ Result = decodeInstruction(DecoderTableMips32r6_64r6_PTR6432, Instr, Insn,
+ Address, this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+ }
+
if (hasMips32r6()) {
DEBUG(dbgs() << "Trying Mips32r6_64r6 table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableMips32r6_64r632, Instr, Insn,
@@ -959,6 +1080,16 @@ DecodeStatus MipsDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
}
}
+ if (hasMips2() && isPTR64()) {
+ DEBUG(dbgs() << "Trying Mips32r6_64r6 (PTR64) table (32-bit opcodes):\n");
+ Result = decodeInstruction(DecoderTableMips32_64_PTR6432, Instr, Insn,
+ Address, this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+ }
+
if (hasCnMips()) {
DEBUG(dbgs() << "Trying CnMips table (32-bit opcodes):\n");
Result = decodeInstruction(DecoderTableCnMips32, Instr, Insn,
@@ -1534,7 +1665,8 @@ static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
// fallthrough
default:
Inst.addOperand(MCOperand::createReg(Reg));
- if (Inst.getOpcode() == Mips::LWP_MM || Inst.getOpcode() == Mips::SWP_MM)
+ if (Inst.getOpcode() == Mips::LWP_MM || Inst.getOpcode() == Mips::SWP_MM ||
+ Inst.getOpcode() == Mips::LWP_MMR6 || Inst.getOpcode() == Mips::SWP_MMR6)
Inst.addOperand(MCOperand::createReg(Reg+1));
Inst.addOperand(MCOperand::createReg(Base));
@@ -1580,6 +1712,24 @@ static DecodeStatus DecodeFMem(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeFMemMMR2(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ // This function is the same as DecodeFMem but with the Reg and Base fields
+ // swapped according to microMIPS spec.
+ int Offset = SignExtend32<16>(Insn & 0xffff);
+ unsigned Base = fieldFromInstruction(Insn, 16, 5);
+ unsigned Reg = fieldFromInstruction(Insn, 21, 5);
+
+ Reg = getReg(Decoder, Mips::FGR64RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::createReg(Reg));
+ Inst.addOperand(MCOperand::createReg(Base));
+ Inst.addOperand(MCOperand::createImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeFMem2(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -1633,6 +1783,23 @@ static DecodeStatus DecodeFMemCop2R6(MCInst &Inst,
return MCDisassembler::Success;
}
+
+static DecodeStatus DecodeFMemCop2MMR6(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ int Offset = SignExtend32<11>(Insn & 0x07ff);
+ unsigned Reg = fieldFromInstruction(Insn, 21, 5);
+ unsigned Base = fieldFromInstruction(Insn, 16, 5);
+
+ Reg = getReg(Decoder, Mips::COP2RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::createReg(Reg));
+ Inst.addOperand(MCOperand::createReg(Base));
+ Inst.addOperand(MCOperand::createImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -1808,6 +1975,15 @@ static DecodeStatus DecodeBranchTarget(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeBranchTarget1SImm16(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder) {
+ int32_t BranchOffset = (SignExtend32<16>(Offset) * 2);
+ Inst.addOperand(MCOperand::createImm(BranchOffset));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeJumpTarget(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -1822,7 +1998,17 @@ static DecodeStatus DecodeBranchTarget21(MCInst &Inst,
unsigned Offset,
uint64_t Address,
const void *Decoder) {
- int32_t BranchOffset = SignExtend32<21>(Offset) * 4;
+ int32_t BranchOffset = SignExtend32<21>(Offset) * 4 + 4;
+
+ Inst.addOperand(MCOperand::createImm(BranchOffset));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeBranchTarget21MM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder) {
+ int32_t BranchOffset = SignExtend32<21>(Offset) << 1;
Inst.addOperand(MCOperand::createImm(BranchOffset));
return MCDisassembler::Success;
@@ -1832,7 +2018,7 @@ static DecodeStatus DecodeBranchTarget26(MCInst &Inst,
unsigned Offset,
uint64_t Address,
const void *Decoder) {
- int32_t BranchOffset = SignExtend32<26>(Offset) * 4;
+ int32_t BranchOffset = SignExtend32<26>(Offset) * 4 + 4;
Inst.addOperand(MCOperand::createImm(BranchOffset));
return MCDisassembler::Success;
@@ -1897,15 +2083,7 @@ static DecodeStatus DecodeAddiur2Simm7(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeUImm6Lsl2(MCInst &Inst,
- unsigned Value,
- uint64_t Address,
- const void *Decoder) {
- Inst.addOperand(MCOperand::createImm(Value << 2));
- return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeLiSimm7(MCInst &Inst,
+static DecodeStatus DecodeLi16Imm(MCInst &Inst,
unsigned Value,
uint64_t Address,
const void *Decoder) {
@@ -1924,28 +2102,22 @@ static DecodeStatus DecodePOOL16BEncodedField(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeSimm4(MCInst &Inst,
- unsigned Value,
- uint64_t Address,
- const void *Decoder) {
- Inst.addOperand(MCOperand::createImm(SignExtend32<4>(Value)));
- return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeSimm16(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- Inst.addOperand(MCOperand::createImm(SignExtend32<16>(Insn)));
+template <unsigned Bits, int Offset, int Scale>
+static DecodeStatus DecodeUImmWithOffsetAndScale(MCInst &Inst, unsigned Value,
+ uint64_t Address,
+ const void *Decoder) {
+ Value &= ((1 << Bits) - 1);
+ Value *= Scale;
+ Inst.addOperand(MCOperand::createImm(Value + Offset));
return MCDisassembler::Success;
}
-template <unsigned Bits, int Offset>
-static DecodeStatus DecodeUImmWithOffset(MCInst &Inst, unsigned Value,
- uint64_t Address,
- const void *Decoder) {
- Value &= ((1 << Bits) - 1);
- Inst.addOperand(MCOperand::createImm(Value + Offset));
+template <unsigned Bits, int Offset, int ScaleBy>
+static DecodeStatus DecodeSImmWithOffsetAndScale(MCInst &Inst, unsigned Value,
+ uint64_t Address,
+ const void *Decoder) {
+ int32_t Imm = SignExtend32<Bits>(Value) * ScaleBy;
+ Inst.addOperand(MCOperand::createImm(Imm + Offset));
return MCDisassembler::Success;
}
@@ -1996,12 +2168,6 @@ static DecodeStatus DecodeANDI16Imm(MCInst &Inst, unsigned Insn,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeUImm5lsl2(MCInst &Inst, unsigned Insn,
- uint64_t Address, const void *Decoder) {
- Inst.addOperand(MCOperand::createImm(Insn << 2));
- return MCDisassembler::Success;
-}
-
static DecodeStatus DecodeRegListOperand(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -2105,3 +2271,87 @@ static DecodeStatus DecodeSimm23Lsl2(MCInst &Inst, unsigned Insn,
Inst.addOperand(MCOperand::createImm(SignExtend32<25>(Insn << 2)));
return MCDisassembler::Success;
}
+
+template <typename InsnType>
+static DecodeStatus DecodeBgtzGroupBranchMMR6(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // We have:
+ // 0b000111 ttttt sssss iiiiiiiiiiiiiiii
+ // Invalid if rt == 0
+ // BGTZALC_MMR6 if rs == 0 && rt != 0
+ // BLTZALC_MMR6 if rs != 0 && rs == rt
+ // BLTUC_MMR6 if rs != 0 && rs != rt
+
+ InsnType Rt = fieldFromInstruction(insn, 21, 5);
+ InsnType Rs = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 2;
+ bool HasRs = false;
+ bool HasRt = false;
+
+ if (Rt == 0)
+ return MCDisassembler::Fail;
+ else if (Rs == 0) {
+ MI.setOpcode(Mips::BGTZALC_MMR6);
+ HasRt = true;
+ }
+ else if (Rs == Rt) {
+ MI.setOpcode(Mips::BLTZALC_MMR6);
+ HasRs = true;
+ }
+ else {
+ MI.setOpcode(Mips::BLTUC_MMR6);
+ HasRs = true;
+ HasRt = true;
+ }
+
+ if (HasRs)
+ MI.addOperand(
+ MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID, Rs)));
+
+ if (HasRt)
+ MI.addOperand(
+ MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID, Rt)));
+
+ MI.addOperand(MCOperand::createImm(Imm));
+
+ return MCDisassembler::Success;
+}
+
+template <typename InsnType>
+static DecodeStatus DecodeBlezGroupBranchMMR6(MCInst &MI, InsnType insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // We have:
+ // 0b000110 ttttt sssss iiiiiiiiiiiiiiii
+ // Invalid if rs == 0
+ // BLEZALC_MMR6 if rs == 0 && rt != 0
+ // BGEZALC_MMR6 if rs == rt && rt != 0
+ // BGEUC_MMR6 if rs != rt && rs != 0 && rt != 0
+
+ InsnType Rt = fieldFromInstruction(insn, 21, 5);
+ InsnType Rs = fieldFromInstruction(insn, 16, 5);
+ InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 2;
+ bool HasRs = false;
+
+ if (Rt == 0)
+ return MCDisassembler::Fail;
+ else if (Rs == 0)
+ MI.setOpcode(Mips::BLEZALC_MMR6);
+ else if (Rs == Rt)
+ MI.setOpcode(Mips::BGEZALC_MMR6);
+ else {
+ HasRs = true;
+ MI.setOpcode(Mips::BGEUC_MMR6);
+ }
+
+ if (HasRs)
+ MI.addOperand(
+ MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID, Rs)));
+ MI.addOperand(
+ MCOperand::createReg(getReg(Decoder, Mips::GPR32RegClassID, Rt)));
+
+ MI.addOperand(MCOperand::createImm(Imm));
+
+ return MCDisassembler::Success;
+}
diff --git a/lib/Target/Mips/InstPrinter/Makefile b/lib/Target/Mips/InstPrinter/Makefile
deleted file mode 100644
index f07f3ed381ee..000000000000
--- a/lib/Target/Mips/InstPrinter/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/Mips/AsmPrinter/Makefile -----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMipsAsmPrinter
-
-# Hack: we need to include 'main' mips target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index a7b7d2e080ee..0fd593fcfbe1 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -122,70 +122,6 @@ void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
}
}
-static void printExpr(const MCExpr *Expr, const MCAsmInfo *MAI,
- raw_ostream &OS) {
- int Offset = 0;
- const MCSymbolRefExpr *SRE;
-
- if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr)) {
- SRE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS());
- assert(SRE && CE && "Binary expression must be sym+const.");
- Offset = CE->getValue();
- } else if (const MipsMCExpr *ME = dyn_cast<MipsMCExpr>(Expr)) {
- ME->print(OS, MAI);
- return;
- } else
- SRE = cast<MCSymbolRefExpr>(Expr);
-
- MCSymbolRefExpr::VariantKind Kind = SRE->getKind();
-
- switch (Kind) {
- default: llvm_unreachable("Invalid kind!");
- case MCSymbolRefExpr::VK_None: break;
- case MCSymbolRefExpr::VK_Mips_GPREL: OS << "%gp_rel("; break;
- case MCSymbolRefExpr::VK_Mips_GOT_CALL: OS << "%call16("; break;
- case MCSymbolRefExpr::VK_Mips_GOT16: OS << "%got("; break;
- case MCSymbolRefExpr::VK_Mips_GOT: OS << "%got("; break;
- case MCSymbolRefExpr::VK_Mips_ABS_HI: OS << "%hi("; break;
- case MCSymbolRefExpr::VK_Mips_ABS_LO: OS << "%lo("; break;
- case MCSymbolRefExpr::VK_Mips_TLSGD: OS << "%tlsgd("; break;
- case MCSymbolRefExpr::VK_Mips_TLSLDM: OS << "%tlsldm("; break;
- case MCSymbolRefExpr::VK_Mips_DTPREL_HI: OS << "%dtprel_hi("; break;
- case MCSymbolRefExpr::VK_Mips_DTPREL_LO: OS << "%dtprel_lo("; break;
- case MCSymbolRefExpr::VK_Mips_GOTTPREL: OS << "%gottprel("; break;
- case MCSymbolRefExpr::VK_Mips_TPREL_HI: OS << "%tprel_hi("; break;
- case MCSymbolRefExpr::VK_Mips_TPREL_LO: OS << "%tprel_lo("; break;
- case MCSymbolRefExpr::VK_Mips_GPOFF_HI: OS << "%hi(%neg(%gp_rel("; break;
- case MCSymbolRefExpr::VK_Mips_GPOFF_LO: OS << "%lo(%neg(%gp_rel("; break;
- case MCSymbolRefExpr::VK_Mips_GOT_DISP: OS << "%got_disp("; break;
- case MCSymbolRefExpr::VK_Mips_GOT_PAGE: OS << "%got_page("; break;
- case MCSymbolRefExpr::VK_Mips_GOT_OFST: OS << "%got_ofst("; break;
- case MCSymbolRefExpr::VK_Mips_HIGHER: OS << "%higher("; break;
- case MCSymbolRefExpr::VK_Mips_HIGHEST: OS << "%highest("; break;
- case MCSymbolRefExpr::VK_Mips_GOT_HI16: OS << "%got_hi("; break;
- case MCSymbolRefExpr::VK_Mips_GOT_LO16: OS << "%got_lo("; break;
- case MCSymbolRefExpr::VK_Mips_CALL_HI16: OS << "%call_hi("; break;
- case MCSymbolRefExpr::VK_Mips_CALL_LO16: OS << "%call_lo("; break;
- case MCSymbolRefExpr::VK_Mips_PCREL_HI16: OS << "%pcrel_hi("; break;
- case MCSymbolRefExpr::VK_Mips_PCREL_LO16: OS << "%pcrel_lo("; break;
- }
-
- SRE->getSymbol().print(OS, MAI);
-
- if (Offset) {
- if (Offset > 0)
- OS << '+';
- OS << Offset;
- }
-
- if ((Kind == MCSymbolRefExpr::VK_Mips_GPOFF_HI) ||
- (Kind == MCSymbolRefExpr::VK_Mips_GPOFF_LO))
- OS << ")))";
- else if (Kind != MCSymbolRefExpr::VK_None)
- OS << ')';
-}
-
void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
@@ -195,30 +131,27 @@ void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
}
if (Op.isImm()) {
- O << Op.getImm();
+ O << formatImm(Op.getImm());
return;
}
assert(Op.isExpr() && "unknown operand kind in printOperand");
- printExpr(Op.getExpr(), &MAI, O);
+ Op.getExpr()->print(O, &MAI, true);
}
-void MipsInstPrinter::printUnsignedImm(const MCInst *MI, int opNum,
- raw_ostream &O) {
+template <unsigned Bits, unsigned Offset>
+void MipsInstPrinter::printUImm(const MCInst *MI, int opNum, raw_ostream &O) {
const MCOperand &MO = MI->getOperand(opNum);
- if (MO.isImm())
- O << (unsigned short int)MO.getImm();
- else
- printOperand(MI, opNum, O);
-}
+ if (MO.isImm()) {
+ uint64_t Imm = MO.getImm();
+ Imm -= Offset;
+ Imm &= (1 << Bits) - 1;
+ Imm += Offset;
+ O << formatImm(Imm);
+ return;
+ }
-void MipsInstPrinter::printUnsignedImm8(const MCInst *MI, int opNum,
- raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(opNum);
- if (MO.isImm())
- O << (unsigned short int)(unsigned char)MO.getImm();
- else
- printOperand(MI, opNum, O);
+ printOperand(MI, opNum, O);
}
void MipsInstPrinter::
@@ -325,6 +258,7 @@ bool MipsInstPrinter::printAlias(const MCInst &MI, raw_ostream &OS) {
return isReg<Mips::RA_64>(MI, 0) && printAlias("jalr", MI, 1, OS);
case Mips::NOR:
case Mips::NOR_MM:
+ case Mips::NOR_MMR6:
// nor $r0, $r1, $zero => not $r0, $r1
return isReg<Mips::ZERO>(MI, 2) && printAlias("not", MI, 0, 1, OS);
case Mips::NOR64:
@@ -343,7 +277,7 @@ void MipsInstPrinter::printSaveRestore(const MCInst *MI, raw_ostream &O) {
if (MI->getOperand(i).isReg())
printRegName(O, MI->getOperand(i).getReg());
else
- printUnsignedImm(MI, i, O);
+ printUImm<16>(MI, i, O);
}
}
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
index 0e61ea61899a..4a76b5acac79 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
@@ -93,8 +93,8 @@ public:
private:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
- void printUnsignedImm(const MCInst *MI, int opNum, raw_ostream &O);
- void printUnsignedImm8(const MCInst *MI, int opNum, raw_ostream &O);
+ template <unsigned Bits, unsigned Offset = 0>
+ void printUImm(const MCInst *MI, int opNum, raw_ostream &O);
void printMemOperand(const MCInst *MI, int opNum, raw_ostream &O);
void printMemOperandEA(const MCInst *MI, int opNum, raw_ostream &O);
void printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O);
diff --git a/lib/Target/Mips/MCTargetDesc/Makefile b/lib/Target/Mips/MCTargetDesc/Makefile
deleted file mode 100644
index 22a27218f28d..000000000000
--- a/lib/Target/Mips/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- lib/Target/Mips/TargetDesc/Makefile -----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMipsDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
-
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp
index 70b9cca8cf6e..932d38a0b9fe 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp
@@ -53,17 +53,17 @@ uint8_t MipsABIFlagsSection::getCPR1SizeValue() {
namespace llvm {
MCStreamer &operator<<(MCStreamer &OS, MipsABIFlagsSection &ABIFlagsSection) {
// Write out a Elf_Internal_ABIFlags_v0 struct
- OS.EmitIntValue(ABIFlagsSection.getVersionValue(), 2); // version
- OS.EmitIntValue(ABIFlagsSection.getISALevelValue(), 1); // isa_level
- OS.EmitIntValue(ABIFlagsSection.getISARevisionValue(), 1); // isa_rev
- OS.EmitIntValue(ABIFlagsSection.getGPRSizeValue(), 1); // gpr_size
- OS.EmitIntValue(ABIFlagsSection.getCPR1SizeValue(), 1); // cpr1_size
- OS.EmitIntValue(ABIFlagsSection.getCPR2SizeValue(), 1); // cpr2_size
- OS.EmitIntValue(ABIFlagsSection.getFpABIValue(), 1); // fp_abi
- OS.EmitIntValue(ABIFlagsSection.getISAExtensionSetValue(), 4); // isa_ext
- OS.EmitIntValue(ABIFlagsSection.getASESetValue(), 4); // ases
- OS.EmitIntValue(ABIFlagsSection.getFlags1Value(), 4); // flags1
- OS.EmitIntValue(ABIFlagsSection.getFlags2Value(), 4); // flags2
+ OS.EmitIntValue(ABIFlagsSection.getVersionValue(), 2); // version
+ OS.EmitIntValue(ABIFlagsSection.getISALevelValue(), 1); // isa_level
+ OS.EmitIntValue(ABIFlagsSection.getISARevisionValue(), 1); // isa_rev
+ OS.EmitIntValue(ABIFlagsSection.getGPRSizeValue(), 1); // gpr_size
+ OS.EmitIntValue(ABIFlagsSection.getCPR1SizeValue(), 1); // cpr1_size
+ OS.EmitIntValue(ABIFlagsSection.getCPR2SizeValue(), 1); // cpr2_size
+ OS.EmitIntValue(ABIFlagsSection.getFpABIValue(), 1); // fp_abi
+ OS.EmitIntValue(ABIFlagsSection.getISAExtensionValue(), 4); // isa_ext
+ OS.EmitIntValue(ABIFlagsSection.getASESetValue(), 4); // ases
+ OS.EmitIntValue(ABIFlagsSection.getFlags1Value(), 4); // flags1
+ OS.EmitIntValue(ABIFlagsSection.getFlags2Value(), 4); // flags2
return OS;
}
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
index b078cd30a87b..3966cae9fe33 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
@@ -35,7 +35,7 @@ struct MipsABIFlagsSection {
// The size of co-processor 2 registers.
Mips::AFL_REG CPR2Size;
// Processor-specific extension.
- uint32_t ISAExtensionSet;
+ Mips::AFL_EXT ISAExtension;
// Mask of ASEs used.
uint32_t ASESet;
@@ -51,8 +51,8 @@ public:
MipsABIFlagsSection()
: Version(0), ISALevel(0), ISARevision(0), GPRSize(Mips::AFL_REG_NONE),
CPR1Size(Mips::AFL_REG_NONE), CPR2Size(Mips::AFL_REG_NONE),
- ISAExtensionSet(0), ASESet(0), OddSPReg(false), Is32BitABI(false),
- FpABI(FpABIKind::ANY) {}
+ ISAExtension(Mips::AFL_EXT_NONE), ASESet(0), OddSPReg(false),
+ Is32BitABI(false), FpABI(FpABIKind::ANY) {}
uint16_t getVersionValue() { return (uint16_t)Version; }
uint8_t getISALevelValue() { return (uint8_t)ISALevel; }
@@ -61,7 +61,7 @@ public:
uint8_t getCPR1SizeValue();
uint8_t getCPR2SizeValue() { return (uint8_t)CPR2Size; }
uint8_t getFpABIValue();
- uint32_t getISAExtensionSetValue() { return (uint32_t)ISAExtensionSet; }
+ uint32_t getISAExtensionValue() { return (uint32_t)ISAExtension; }
uint32_t getASESetValue() { return (uint32_t)ASESet; }
uint32_t getFlags1Value() {
@@ -141,6 +141,14 @@ public:
}
template <class PredicateLibrary>
+ void setISAExtensionFromPredicates(const PredicateLibrary &P) {
+ if (P.hasCnMips())
+ ISAExtension = Mips::AFL_EXT_OCTEON;
+ else
+ ISAExtension = Mips::AFL_EXT_NONE;
+ }
+
+ template <class PredicateLibrary>
void setASESetFromPredicates(const PredicateLibrary &P) {
ASESet = 0;
if (P.hasDSP())
@@ -179,6 +187,7 @@ public:
setISALevelAndRevisionFromPredicates(P);
setGPRSizeFromPredicates(P);
setCPR1SizeFromPredicates(P);
+ setISAExtensionFromPredicates(P);
setASESetFromPredicates(P);
setFpAbiFromPredicates(P);
OddSPReg = P.useOddSPReg();
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
index cdcc3923b81e..3cf632e789de 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
@@ -42,7 +42,7 @@ ArrayRef<MCPhysReg> MipsABIInfo::GetVarArgRegs() const {
unsigned MipsABIInfo::GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const {
if (IsO32())
return CC != CallingConv::Fast ? 16 : 0;
- if (IsN32() || IsN64() || IsEABI())
+ if (IsN32() || IsN64())
return 0;
llvm_unreachable("Unhandled ABI");
}
@@ -55,39 +55,12 @@ MipsABIInfo MipsABIInfo::computeTargetABI(const Triple &TT, StringRef CPU,
return MipsABIInfo::N32();
else if (Options.getABIName().startswith("n64"))
return MipsABIInfo::N64();
- else if (Options.getABIName().startswith("eabi"))
- return MipsABIInfo::EABI();
else if (!Options.getABIName().empty())
llvm_unreachable("Unknown ABI option for MIPS");
- // FIXME: This shares code with the selectMipsCPU routine that's
- // used and not shared in a couple of other places. This needs unifying
- // at some level.
- if (CPU.empty() || CPU == "generic") {
- if (TT.getArch() == Triple::mips || TT.getArch() == Triple::mipsel)
- CPU = "mips32";
- else
- CPU = "mips64";
- }
-
- return StringSwitch<MipsABIInfo>(CPU)
- .Case("mips1", MipsABIInfo::O32())
- .Case("mips2", MipsABIInfo::O32())
- .Case("mips32", MipsABIInfo::O32())
- .Case("mips32r2", MipsABIInfo::O32())
- .Case("mips32r3", MipsABIInfo::O32())
- .Case("mips32r5", MipsABIInfo::O32())
- .Case("mips32r6", MipsABIInfo::O32())
- .Case("mips3", MipsABIInfo::N64())
- .Case("mips4", MipsABIInfo::N64())
- .Case("mips5", MipsABIInfo::N64())
- .Case("mips64", MipsABIInfo::N64())
- .Case("mips64r2", MipsABIInfo::N64())
- .Case("mips64r3", MipsABIInfo::N64())
- .Case("mips64r5", MipsABIInfo::N64())
- .Case("mips64r6", MipsABIInfo::N64())
- .Case("octeon", MipsABIInfo::N64())
- .Default(MipsABIInfo::Unknown());
+ if (TT.getArch() == Triple::mips64 || TT.getArch() == Triple::mips64el)
+ return MipsABIInfo::N64();
+ return MipsABIInfo::O32();
}
unsigned MipsABIInfo::GetStackPtr() const {
@@ -102,6 +75,10 @@ unsigned MipsABIInfo::GetBasePtr() const {
return ArePtrs64bit() ? Mips::S7_64 : Mips::S7;
}
+unsigned MipsABIInfo::GetGlobalPtr() const {
+ return ArePtrs64bit() ? Mips::GP_64 : Mips::GP;
+}
+
unsigned MipsABIInfo::GetNullPtr() const {
return ArePtrs64bit() ? Mips::ZERO_64 : Mips::ZERO;
}
@@ -118,6 +95,14 @@ unsigned MipsABIInfo::GetPtrAddiuOp() const {
return ArePtrs64bit() ? Mips::DADDiu : Mips::ADDiu;
}
+unsigned MipsABIInfo::GetPtrSubuOp() const {
+ return ArePtrs64bit() ? Mips::DSUBu : Mips::SUBu;
+}
+
+unsigned MipsABIInfo::GetPtrAndOp() const {
+ return ArePtrs64bit() ? Mips::AND64 : Mips::AND;
+}
+
unsigned MipsABIInfo::GetGPRMoveOp() const {
return ArePtrs64bit() ? Mips::OR64 : Mips::OR;
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h
index ffa2c765e79b..9372a3c2bb1f 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h
@@ -10,20 +10,20 @@
#ifndef LLVM_LIB_TARGET_MIPS_MCTARGETDESC_MIPSABIINFO_H
#define LLVM_LIB_TARGET_MIPS_MCTARGETDESC_MIPSABIINFO_H
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/MC/MCRegisterInfo.h"
namespace llvm {
+template <typename T> class ArrayRef;
class MCTargetOptions;
class StringRef;
class TargetRegisterClass;
class MipsABIInfo {
public:
- enum class ABI { Unknown, O32, N32, N64, EABI };
+ enum class ABI { Unknown, O32, N32, N64 };
protected:
ABI ThisABI;
@@ -35,7 +35,6 @@ public:
static MipsABIInfo O32() { return MipsABIInfo(ABI::O32); }
static MipsABIInfo N32() { return MipsABIInfo(ABI::N32); }
static MipsABIInfo N64() { return MipsABIInfo(ABI::N64); }
- static MipsABIInfo EABI() { return MipsABIInfo(ABI::EABI); }
static MipsABIInfo computeTargetABI(const Triple &TT, StringRef CPU,
const MCTargetOptions &Options);
@@ -43,7 +42,6 @@ public:
bool IsO32() const { return ThisABI == ABI::O32; }
bool IsN32() const { return ThisABI == ABI::N32; }
bool IsN64() const { return ThisABI == ABI::N64; }
- bool IsEABI() const { return ThisABI == ABI::EABI; }
ABI GetEnumValue() const { return ThisABI; }
/// The registers to use for byval arguments.
@@ -66,10 +64,13 @@ public:
unsigned GetStackPtr() const;
unsigned GetFramePtr() const;
unsigned GetBasePtr() const;
+ unsigned GetGlobalPtr() const;
unsigned GetNullPtr() const;
unsigned GetZeroReg() const;
unsigned GetPtrAdduOp() const;
unsigned GetPtrAddiuOp() const;
+ unsigned GetPtrSubuOp() const;
+ unsigned GetPtrAndOp() const;
unsigned GetGPRMoveOp() const;
inline bool ArePtrs64bit() const { return IsN64(); }
inline bool AreGprs64bit() const { return IsN32() || IsN64(); }
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index e4865e2455ee..8292d6b4c55a 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -14,6 +14,7 @@
#include "MCTargetDesc/MipsFixupKinds.h"
#include "MCTargetDesc/MipsAsmBackend.h"
+#include "MCTargetDesc/MipsMCExpr.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
@@ -23,7 +24,9 @@
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCValue.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -40,9 +43,6 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
default:
return 0;
case FK_Data_2:
- case FK_GPRel_4:
- case FK_Data_4:
- case FK_Data_8:
case Mips::fixup_Mips_LO16:
case Mips::fixup_Mips_GPREL16:
case Mips::fixup_Mips_GPOFF_HI:
@@ -57,6 +57,11 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
case Mips::fixup_MICROMIPS_GOT_OFST:
case Mips::fixup_MICROMIPS_GOT_DISP:
case Mips::fixup_MIPS_PCLO16:
+ Value &= 0xffff;
+ break;
+ case FK_GPRel_4:
+ case FK_Data_4:
+ case FK_Data_8:
break;
case Mips::fixup_Mips_PC16:
// The displacement is then divided by 4 to give us an 18 bit
@@ -69,6 +74,7 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
}
break;
case Mips::fixup_MIPS_PC19_S2:
+ case Mips::fixup_MICROMIPS_PC19_S2:
// Forcing a signed division because Value can be negative.
Value = (int64_t)Value / 4;
// We now check if Value can be encoded as a 19-bit signed immediate.
@@ -84,7 +90,8 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
Value >>= 2;
break;
case Mips::fixup_Mips_HI16:
- case Mips::fixup_Mips_GOT_Local:
+ case Mips::fixup_Mips_GOT:
+ case Mips::fixup_MICROMIPS_GOT16:
case Mips::fixup_Mips_GOT_HI16:
case Mips::fixup_Mips_CALL_HI16:
case Mips::fixup_MICROMIPS_HI16:
@@ -142,6 +149,19 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
return 0;
}
break;
+ case Mips::fixup_MICROMIPS_PC18_S3:
+ // Check alignment.
+ if ((Value & 7) && Ctx) {
+ Ctx->reportError(Fixup.getLoc(), "out of range PC18 fixup");
+ }
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t)Value / 8;
+ // We now check if Value can be encoded as a 18-bit signed immediate.
+ if (!isInt<18>(Value) && Ctx) {
+ Ctx->reportError(Fixup.getLoc(), "out of range PC18 fixup");
+ return 0;
+ }
+ break;
case Mips::fixup_MIPS_PC21_S2:
// Forcing a signed division because Value can be negative.
Value = (int64_t) Value / 4;
@@ -160,6 +180,24 @@ static unsigned adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
return 0;
}
break;
+ case Mips::fixup_MICROMIPS_PC26_S1:
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t)Value / 2;
+ // We now check if Value can be encoded as a 26-bit signed immediate.
+ if (!isInt<26>(Value) && Ctx) {
+ Ctx->reportFatalError(Fixup.getLoc(), "out of range PC26 fixup");
+ return 0;
+ }
+ break;
+ case Mips::fixup_MICROMIPS_PC21_S1:
+ // Forcing a signed division because Value can be negative.
+ Value = (int64_t)Value / 2;
+ // We now check if Value can be encoded as a 21-bit signed immediate.
+ if (!isInt<21>(Value) && Ctx) {
+ Ctx->reportError(Fixup.getLoc(), "out of range PC21 fixup");
+ return 0;
+ }
+ break;
}
return Value;
@@ -248,16 +286,11 @@ void MipsAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
}
}
-bool MipsAsmBackend::getFixupKind(StringRef Name, MCFixupKind &MappedKind) const {
- if (Name == "R_MIPS_NONE") {
- MappedKind = (MCFixupKind)Mips::fixup_Mips_NONE;
- return true;
- }
- if (Name == "R_MIPS_32") {
- MappedKind = FK_Data_4;
- return true;
- }
- return MCAsmBackend::getFixupKind(Name, MappedKind);
+Optional<MCFixupKind> MipsAsmBackend::getFixupKind(StringRef Name) const {
+ return StringSwitch<Optional<MCFixupKind>>(Name)
+ .Case("R_MIPS_NONE", (MCFixupKind)Mips::fixup_Mips_NONE)
+ .Case("R_MIPS_32", FK_Data_4)
+ .Default(MCAsmBackend::getFixupKind(Name));
}
const MCFixupKindInfo &MipsAsmBackend::
@@ -276,8 +309,7 @@ getFixupKindInfo(MCFixupKind Kind) const {
{ "fixup_Mips_LO16", 0, 16, 0 },
{ "fixup_Mips_GPREL16", 0, 16, 0 },
{ "fixup_Mips_LITERAL", 0, 16, 0 },
- { "fixup_Mips_GOT_Global", 0, 16, 0 },
- { "fixup_Mips_GOT_Local", 0, 16, 0 },
+ { "fixup_Mips_GOT", 0, 16, 0 },
{ "fixup_Mips_PC16", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_Mips_CALL16", 0, 16, 0 },
{ "fixup_Mips_GPREL32", 0, 32, 0 },
@@ -316,6 +348,10 @@ getFixupKindInfo(MCFixupKind Kind) const {
{ "fixup_MICROMIPS_PC7_S1", 0, 7, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_PC10_S1", 0, 10, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_PC16_S1", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC26_S1", 0, 26, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC19_S2", 0, 19, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC18_S3", 0, 18, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC21_S1", 0, 21, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_CALL16", 0, 16, 0 },
{ "fixup_MICROMIPS_GOT_DISP", 0, 16, 0 },
{ "fixup_MICROMIPS_GOT_PAGE", 0, 16, 0 },
@@ -342,8 +378,7 @@ getFixupKindInfo(MCFixupKind Kind) const {
{ "fixup_Mips_LO16", 16, 16, 0 },
{ "fixup_Mips_GPREL16", 16, 16, 0 },
{ "fixup_Mips_LITERAL", 16, 16, 0 },
- { "fixup_Mips_GOT_Global", 16, 16, 0 },
- { "fixup_Mips_GOT_Local", 16, 16, 0 },
+ { "fixup_Mips_GOT", 16, 16, 0 },
{ "fixup_Mips_PC16", 16, 16, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_Mips_CALL16", 16, 16, 0 },
{ "fixup_Mips_GPREL32", 0, 32, 0 },
@@ -382,6 +417,10 @@ getFixupKindInfo(MCFixupKind Kind) const {
{ "fixup_MICROMIPS_PC7_S1", 9, 7, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_PC10_S1", 6, 10, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_PC16_S1",16, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC26_S1", 6, 26, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC19_S2",13, 19, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC18_S3",14, 18, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_PC21_S1",11, 21, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_MICROMIPS_CALL16", 16, 16, 0 },
{ "fixup_MICROMIPS_GOT_DISP", 16, 16, 0 },
{ "fixup_MICROMIPS_GOT_PAGE", 16, 16, 0 },
@@ -435,6 +474,8 @@ void MipsAsmBackend::processFixupValue(const MCAssembler &Asm,
// we are only checking if the fixup can be applied correctly. We have
// access to MCContext from here which allows us to report a fatal error
// with *possibly* a source code location.
+ // The caller will also ignore any changes we make to Value
+ // (recordRelocation() overwrites it with it's own calculation).
(void)adjustFixupValue(Fixup, Value, &Asm.getContext());
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
index 1c9af9227ffe..f260cfa566c9 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
@@ -41,7 +41,7 @@ public:
void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value, bool IsPCRel) const override;
- bool getFixupKind(StringRef Name, MCFixupKind &MappedKind) const override;
+ Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
unsigned getNumFixupKinds() const override {
@@ -75,7 +75,8 @@ public:
/// \param Inst - The instruction to relax, which may be the same
/// as the output.
/// \param [out] Res On return, the relaxed instruction.
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {}
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {}
/// @}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index ff7779ec1e78..2bcff881788c 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -33,9 +33,8 @@ namespace MipsII {
MO_NO_FLAG,
- /// MO_GOT16 - Represents the offset into the global offset table at which
+ /// MO_GOT - Represents the offset into the global offset table at which
/// the address the relocation entry symbol resides during execution.
- MO_GOT16,
MO_GOT,
/// MO_GOT_CALL - Represents the offset into the global offset table at
@@ -117,7 +116,12 @@ namespace MipsII {
/// FrmOther - This form is for instructions that have no specific format.
FrmOther = 6,
- FormMask = 15
+ FormMask = 15,
+ /// IsCTI - Instruction is a Control Transfer Instruction.
+ IsCTI = 1 << 4,
+ /// HasForbiddenSlot - Instruction has a forbidden slot.
+ HasForbiddenSlot = 1 << 5
+
};
}
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 5b9f02b89be5..cdad7ce1b73a 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -7,8 +7,11 @@
//
//===----------------------------------------------------------------------===//
+#include <algorithm>
+#include <list>
#include "MCTargetDesc/MipsBaseInfo.h"
#include "MCTargetDesc/MipsFixupKinds.h"
+#include "MCTargetDesc/MipsMCExpr.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCAssembler.h"
@@ -17,42 +20,190 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCValue.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include <list>
+
+#define DEBUG_TYPE "mips-elf-object-writer"
using namespace llvm;
namespace {
-// A helper structure based on ELFRelocationEntry, used for sorting entries in
-// the relocation table.
+/// Holds additional information needed by the relocation ordering algorithm.
struct MipsRelocationEntry {
- MipsRelocationEntry(const ELFRelocationEntry &R)
- : R(R), SortOffset(R.Offset), HasMatchingHi(false) {}
- const ELFRelocationEntry R;
- // SortOffset equals R.Offset except for the *HI16 relocations, for which it
- // will be set based on the R.Offset of the matching *LO16 relocation.
- int64_t SortOffset;
- // True when this is a *LO16 relocation chosen as a match for a *HI16
- // relocation.
- bool HasMatchingHi;
+ const ELFRelocationEntry R; ///< The relocation.
+ bool Matched; ///< Is this relocation part of a match.
+
+ MipsRelocationEntry(const ELFRelocationEntry &R) : R(R), Matched(false) {}
+
+ void print(raw_ostream &Out) const {
+ R.print(Out);
+ Out << ", Matched=" << Matched;
+ }
+};
+
+#ifndef NDEBUG
+raw_ostream &operator<<(raw_ostream &OS, const MipsRelocationEntry &RHS) {
+ RHS.print(OS);
+ return OS;
+}
+#endif
+
+class MipsELFObjectWriter : public MCELFObjectTargetWriter {
+public:
+ MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI, bool _isN64,
+ bool IsLittleEndian);
+
+ ~MipsELFObjectWriter() override;
+
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
+ bool needsRelocateWithSymbol(const MCSymbol &Sym,
+ unsigned Type) const override;
+ virtual void sortRelocs(const MCAssembler &Asm,
+ std::vector<ELFRelocationEntry> &Relocs) override;
+};
+
+/// Copy elements in the range [First, Last) to d1 when the predicate is true or
+/// d2 when the predicate is false. This is essentially both std::copy_if and
+/// std::remove_copy_if combined into a single pass.
+template <class InputIt, class OutputIt1, class OutputIt2, class UnaryPredicate>
+std::pair<OutputIt1, OutputIt2> copy_if_else(InputIt First, InputIt Last,
+ OutputIt1 d1, OutputIt2 d2,
+ UnaryPredicate Predicate) {
+ for (InputIt I = First; I != Last; ++I) {
+ if (Predicate(*I)) {
+ *d1 = *I;
+ d1++;
+ } else {
+ *d2 = *I;
+ d2++;
+ }
+ }
+
+ return std::make_pair(d1, d2);
+}
+
+/// The possible results of the Predicate function used by find_best.
+enum FindBestPredicateResult {
+ FindBest_NoMatch = 0, ///< The current element is not a match.
+ FindBest_Match, ///< The current element is a match but better ones are
+ /// possible.
+ FindBest_PerfectMatch, ///< The current element is an unbeatable match.
};
- class MipsELFObjectWriter : public MCELFObjectTargetWriter {
- public:
- MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
- bool _isN64, bool IsLittleEndian);
+/// Find the best match in the range [First, Last).
+///
+/// An element matches when Predicate(X) returns FindBest_Match or
+/// FindBest_PerfectMatch. A value of FindBest_PerfectMatch also terminates
+/// the search. BetterThan(A, B) is a comparator that returns true when A is a
+/// better match than B. The return value is the position of the best match.
+///
+/// This is similar to std::find_if but finds the best of multiple possible
+/// matches.
+template <class InputIt, class UnaryPredicate, class Comparator>
+InputIt find_best(InputIt First, InputIt Last, UnaryPredicate Predicate,
+ Comparator BetterThan) {
+ InputIt Best = Last;
+
+ for (InputIt I = First; I != Last; ++I) {
+ unsigned Matched = Predicate(*I);
+ if (Matched != FindBest_NoMatch) {
+ DEBUG(dbgs() << std::distance(First, I) << " is a match (";
+ I->print(dbgs()); dbgs() << ")\n");
+ if (Best == Last || BetterThan(*I, *Best)) {
+ DEBUG(dbgs() << ".. and it beats the last one\n");
+ Best = I;
+ }
+ }
+ if (Matched == FindBest_PerfectMatch) {
+ DEBUG(dbgs() << ".. and it is unbeatable\n");
+ break;
+ }
+ }
+
+ return Best;
+}
+
+/// Determine the low relocation that matches the given relocation.
+/// If the relocation does not need a low relocation then the return value
+/// is ELF::R_MIPS_NONE.
+///
+/// The relocations that need a matching low part are
+/// R_(MIPS|MICROMIPS|MIPS16)_HI16 for all symbols and
+/// R_(MIPS|MICROMIPS|MIPS16)_GOT16 for local symbols only.
+static unsigned getMatchingLoType(const ELFRelocationEntry &Reloc) {
+ unsigned Type = Reloc.Type;
+ if (Type == ELF::R_MIPS_HI16)
+ return ELF::R_MIPS_LO16;
+ if (Type == ELF::R_MICROMIPS_HI16)
+ return ELF::R_MICROMIPS_LO16;
+ if (Type == ELF::R_MIPS16_HI16)
+ return ELF::R_MIPS16_LO16;
+
+ if (Reloc.OriginalSymbol->getBinding() != ELF::STB_LOCAL)
+ return ELF::R_MIPS_NONE;
+
+ if (Type == ELF::R_MIPS_GOT16)
+ return ELF::R_MIPS_LO16;
+ if (Type == ELF::R_MICROMIPS_GOT16)
+ return ELF::R_MICROMIPS_LO16;
+ if (Type == ELF::R_MIPS16_GOT16)
+ return ELF::R_MIPS16_LO16;
+
+ return ELF::R_MIPS_NONE;
+}
- ~MipsELFObjectWriter() override;
+/// Determine whether a relocation (X) matches the one given in R.
+///
+/// A relocation matches if:
+/// - It's type matches that of a corresponding low part. This is provided in
+/// MatchingType for efficiency.
+/// - It's based on the same symbol.
+/// - It's offset of greater or equal to that of the one given in R.
+/// It should be noted that this rule assumes the programmer does not use
+/// offsets that exceed the alignment of the symbol. The carry-bit will be
+/// incorrect if this is not true.
+///
+/// A matching relocation is unbeatable if:
+/// - It is not already involved in a match.
+/// - It's offset is exactly that of the one given in R.
+static FindBestPredicateResult isMatchingReloc(const MipsRelocationEntry &X,
+ const ELFRelocationEntry &R,
+ unsigned MatchingType) {
+ if (X.R.Type == MatchingType && X.R.OriginalSymbol == R.OriginalSymbol) {
+ if (!X.Matched &&
+ X.R.OriginalAddend == R.OriginalAddend)
+ return FindBest_PerfectMatch;
+ else if (X.R.OriginalAddend >= R.OriginalAddend)
+ return FindBest_Match;
+ }
+ return FindBest_NoMatch;
+}
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
- bool needsRelocateWithSymbol(const MCSymbol &Sym,
- unsigned Type) const override;
- virtual void sortRelocs(const MCAssembler &Asm,
- std::vector<ELFRelocationEntry> &Relocs) override;
- };
+/// Determine whether Candidate or PreviousBest is the better match.
+/// The return value is true if Candidate is the better match.
+///
+/// A matching relocation is a better match if:
+/// - It has a smaller addend.
+/// - It is not already involved in a match.
+static bool compareMatchingRelocs(const MipsRelocationEntry &Candidate,
+ const MipsRelocationEntry &PreviousBest) {
+ if (Candidate.R.OriginalAddend != PreviousBest.R.OriginalAddend)
+ return Candidate.R.OriginalAddend < PreviousBest.R.OriginalAddend;
+ return PreviousBest.Matched && !Candidate.Matched;
}
+#ifndef NDEBUG
+/// Print all the relocations.
+template <class Container>
+static void dumpRelocs(const char *Prefix, const Container &Relocs) {
+ for (const auto &R : Relocs)
+ dbgs() << Prefix << R << "\n";
+}
+#endif
+
+} // end anonymous namespace
+
MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
bool _isN64, bool IsLittleEndian)
: MCELFObjectTargetWriter(_is64Bit, OSABI, ELF::EM_MIPS,
@@ -61,7 +212,8 @@ MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
MipsELFObjectWriter::~MipsELFObjectWriter() {}
-unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned MipsELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
// Determine the type of the relocation.
@@ -89,6 +241,14 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_MICROMIPS_PC10_S1;
case Mips::fixup_MICROMIPS_PC16_S1:
return ELF::R_MICROMIPS_PC16_S1;
+ case Mips::fixup_MICROMIPS_PC26_S1:
+ return ELF::R_MICROMIPS_PC26_S1;
+ case Mips::fixup_MICROMIPS_PC19_S2:
+ return ELF::R_MICROMIPS_PC19_S2;
+ case Mips::fixup_MICROMIPS_PC18_S3:
+ return ELF::R_MICROMIPS_PC18_S3;
+ case Mips::fixup_MICROMIPS_PC21_S1:
+ return ELF::R_MICROMIPS_PC21_S1;
case Mips::fixup_MIPS_PC19_S2:
return ELF::R_MIPS_PC19_S2;
case Mips::fixup_MIPS_PC18_S3:
@@ -125,8 +285,7 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_MIPS_26;
case Mips::fixup_Mips_CALL16:
return ELF::R_MIPS_CALL16;
- case Mips::fixup_Mips_GOT_Global:
- case Mips::fixup_Mips_GOT_Local:
+ case Mips::fixup_Mips_GOT:
return ELF::R_MIPS_GOT16;
case Mips::fixup_Mips_HI16:
return ELF::R_MIPS_HI16;
@@ -211,213 +370,266 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
llvm_unreachable("invalid fixup kind!");
}
-// Sort entries by SortOffset in descending order.
-// When there are more *HI16 relocs paired with one *LO16 reloc, the 2nd rule
-// sorts them in ascending order of R.Offset.
-static int cmpRelMips(const MipsRelocationEntry *AP,
- const MipsRelocationEntry *BP) {
- const MipsRelocationEntry &A = *AP;
- const MipsRelocationEntry &B = *BP;
- if (A.SortOffset != B.SortOffset)
- return B.SortOffset - A.SortOffset;
- if (A.R.Offset != B.R.Offset)
- return A.R.Offset - B.R.Offset;
- if (B.R.Type != A.R.Type)
- return B.R.Type - A.R.Type;
- //llvm_unreachable("ELFRelocs might be unstable!");
- return 0;
-}
-
-// For the given Reloc.Type, return the matching relocation type, as in the
-// table below.
-static unsigned getMatchingLoType(const MCAssembler &Asm,
- const ELFRelocationEntry &Reloc) {
- unsigned Type = Reloc.Type;
- if (Type == ELF::R_MIPS_HI16)
- return ELF::R_MIPS_LO16;
- if (Type == ELF::R_MICROMIPS_HI16)
- return ELF::R_MICROMIPS_LO16;
- if (Type == ELF::R_MIPS16_HI16)
- return ELF::R_MIPS16_LO16;
-
- if (Reloc.Symbol->getBinding() != ELF::STB_LOCAL)
- return ELF::R_MIPS_NONE;
-
- if (Type == ELF::R_MIPS_GOT16)
- return ELF::R_MIPS_LO16;
- if (Type == ELF::R_MICROMIPS_GOT16)
- return ELF::R_MICROMIPS_LO16;
- if (Type == ELF::R_MIPS16_GOT16)
- return ELF::R_MIPS16_LO16;
-
- return ELF::R_MIPS_NONE;
-}
-
-// Return true if First needs a matching *LO16, its matching *LO16 type equals
-// Second's type and both relocations are against the same symbol.
-static bool areMatchingHiAndLo(const MCAssembler &Asm,
- const ELFRelocationEntry &First,
- const ELFRelocationEntry &Second) {
- return getMatchingLoType(Asm, First) != ELF::R_MIPS_NONE &&
- getMatchingLoType(Asm, First) == Second.Type &&
- First.Symbol && First.Symbol == Second.Symbol;
-}
-
-// Return true if MipsRelocs[Index] is a *LO16 preceded by a matching *HI16.
-static bool
-isPrecededByMatchingHi(const MCAssembler &Asm, uint32_t Index,
- std::vector<MipsRelocationEntry> &MipsRelocs) {
- return Index < MipsRelocs.size() - 1 &&
- areMatchingHiAndLo(Asm, MipsRelocs[Index + 1].R, MipsRelocs[Index].R);
-}
-
-// Return true if MipsRelocs[Index] is a *LO16 not preceded by a matching *HI16
-// and not chosen by a *HI16 as a match.
-static bool isFreeLo(const MCAssembler &Asm, uint32_t Index,
- std::vector<MipsRelocationEntry> &MipsRelocs) {
- return Index < MipsRelocs.size() && !MipsRelocs[Index].HasMatchingHi &&
- !isPrecededByMatchingHi(Asm, Index, MipsRelocs);
-}
-
-// Lo is chosen as a match for Hi, set their fields accordingly.
-// Mips instructions have fixed length of at least two bytes (two for
-// micromips/mips16, four for mips32/64), so we can set HI's SortOffset to
-// matching LO's Offset minus one to simplify the sorting function.
-static void setMatch(MipsRelocationEntry &Hi, MipsRelocationEntry &Lo) {
- Lo.HasMatchingHi = true;
- Hi.SortOffset = Lo.R.Offset - 1;
-}
-
-// We sort relocation table entries by offset, except for one additional rule
-// required by MIPS ABI: every *HI16 relocation must be immediately followed by
-// the corresponding *LO16 relocation. We also support a GNU extension that
-// allows more *HI16s paired with one *LO16.
-//
-// *HI16 relocations and their matching *LO16 are:
-//
-// +---------------------------------------------+-------------------+
-// | *HI16 | matching *LO16 |
-// |---------------------------------------------+-------------------|
-// | R_MIPS_HI16, local R_MIPS_GOT16 | R_MIPS_LO16 |
-// | R_MICROMIPS_HI16, local R_MICROMIPS_GOT16 | R_MICROMIPS_LO16 |
-// | R_MIPS16_HI16, local R_MIPS16_GOT16 | R_MIPS16_LO16 |
-// +---------------------------------------------+-------------------+
-//
-// (local R_*_GOT16 meaning R_*_GOT16 against the local symbol.)
-//
-// To handle *HI16 and *LO16 relocations, the linker needs a combined addend
-// ("AHL") calculated from both *HI16 ("AHI") and *LO16 ("ALO") relocations:
-// AHL = (AHI << 16) + (short)ALO;
-//
-// We are reusing gnu as sorting algorithm so we are emitting the relocation
-// table sorted the same way as gnu as would sort it, for easier comparison of
-// the generated .o files.
-//
-// The logic is:
-// search the table (starting from the highest offset and going back to zero)
-// for all *HI16 relocations that don't have a matching *LO16.
-// For every such HI, find a matching LO with highest offset that isn't already
-// matched with another HI. If there are no free LOs, match it with the first
-// found (starting from lowest offset).
-// When there are more HIs matched with one LO, sort them in descending order by
-// offset.
-//
-// In other words, when searching for a matching LO:
-// - don't look for a 'better' match for the HIs that are already followed by a
-// matching LO;
-// - prefer LOs without a pair;
-// - prefer LOs with higher offset;
-
-static int cmpRel(const ELFRelocationEntry *AP, const ELFRelocationEntry *BP) {
- const ELFRelocationEntry &A = *AP;
- const ELFRelocationEntry &B = *BP;
- if (A.Offset != B.Offset)
- return B.Offset - A.Offset;
- if (B.Type != A.Type)
- return A.Type - B.Type;
- return 0;
-}
-
+/// Sort relocation table entries by offset except where another order is
+/// required by the MIPS ABI.
+///
+/// MIPS has a few relocations that have an AHL component in the expression used
+/// to evaluate them. This AHL component is an addend with the same number of
+/// bits as a symbol value but not all of our ABI's are able to supply a
+/// sufficiently sized addend in a single relocation.
+///
+/// The O32 ABI for example, uses REL relocations which store the addend in the
+/// section data. All the relocations with AHL components affect 16-bit fields
+/// so the addend for a single relocation is limited to 16-bit. This ABI
+/// resolves the limitation by linking relocations (e.g. R_MIPS_HI16 and
+/// R_MIPS_LO16) and distributing the addend between the linked relocations. The
+/// ABI mandates that such relocations must be next to each other in a
+/// particular order (e.g. R_MIPS_HI16 must be immediately followed by a
+/// matching R_MIPS_LO16) but the rule is less strict in practice.
+///
+/// The de facto standard is lenient in the following ways:
+/// - 'Immediately following' does not refer to the next relocation entry but
+/// the next matching relocation.
+/// - There may be multiple high parts relocations for one low part relocation.
+/// - There may be multiple low part relocations for one high part relocation.
+/// - The AHL addend in each part does not have to be exactly equal as long as
+/// the difference does not affect the carry bit from bit 15 into 16. This is
+/// to allow, for example, the use of %lo(foo) and %lo(foo+4) when loading
+/// both halves of a long long.
+///
+/// See getMatchingLoType() for a description of which high part relocations
+/// match which low part relocations. One particular thing to note is that
+/// R_MIPS_GOT16 and similar only have AHL addends if they refer to local
+/// symbols.
+///
+/// It should also be noted that this function is not affected by whether
+/// the symbol was kept or rewritten into a section-relative equivalent. We
+/// always match using the expressions from the source.
void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
std::vector<ELFRelocationEntry> &Relocs) {
if (Relocs.size() < 2)
return;
- // Sorts entries by Offset in descending order.
- array_pod_sort(Relocs.begin(), Relocs.end(), cmpRel);
-
- // Init MipsRelocs from Relocs.
- std::vector<MipsRelocationEntry> MipsRelocs;
- for (unsigned I = 0, E = Relocs.size(); I != E; ++I)
- MipsRelocs.push_back(MipsRelocationEntry(Relocs[I]));
-
- // Find a matching LO for all HIs that need it.
- for (int32_t I = 0, E = MipsRelocs.size(); I != E; ++I) {
- if (getMatchingLoType(Asm, MipsRelocs[I].R) == ELF::R_MIPS_NONE ||
- (I > 0 && isPrecededByMatchingHi(Asm, I - 1, MipsRelocs)))
- continue;
-
- int32_t MatchedLoIndex = -1;
-
- // Search the list in the ascending order of Offset.
- for (int32_t J = MipsRelocs.size() - 1, N = -1; J != N; --J) {
- // check for a match
- if (areMatchingHiAndLo(Asm, MipsRelocs[I].R, MipsRelocs[J].R) &&
- (MatchedLoIndex == -1 || // first match
- // or we already have a match,
- // but this one is with higher offset and it's free
- (MatchedLoIndex > J && isFreeLo(Asm, J, MipsRelocs))))
- MatchedLoIndex = J;
- }
-
- if (MatchedLoIndex != -1)
- // We have a match.
- setMatch(MipsRelocs[I], MipsRelocs[MatchedLoIndex]);
+ // Sort relocations by the address they are applied to.
+ std::sort(Relocs.begin(), Relocs.end(),
+ [](const ELFRelocationEntry &A, const ELFRelocationEntry &B) {
+ return A.Offset < B.Offset;
+ });
+
+ std::list<MipsRelocationEntry> Sorted;
+ std::list<ELFRelocationEntry> Remainder;
+
+ DEBUG(dumpRelocs("R: ", Relocs));
+
+ // Separate the movable relocations (AHL relocations using the high bits) from
+ // the immobile relocations (everything else). This does not preserve high/low
+ // matches that already existed in the input.
+ copy_if_else(Relocs.begin(), Relocs.end(), std::back_inserter(Remainder),
+ std::back_inserter(Sorted), [](const ELFRelocationEntry &Reloc) {
+ return getMatchingLoType(Reloc) != ELF::R_MIPS_NONE;
+ });
+
+ for (auto &R : Remainder) {
+ DEBUG(dbgs() << "Matching: " << R << "\n");
+
+ unsigned MatchingType = getMatchingLoType(R);
+ assert(MatchingType != ELF::R_MIPS_NONE &&
+ "Wrong list for reloc that doesn't need a match");
+
+ // Find the best matching relocation for the current high part.
+ // See isMatchingReloc for a description of a matching relocation and
+ // compareMatchingRelocs for a description of what 'best' means.
+ auto InsertionPoint =
+ find_best(Sorted.begin(), Sorted.end(),
+ [&R, &MatchingType](const MipsRelocationEntry &X) {
+ return isMatchingReloc(X, R, MatchingType);
+ },
+ compareMatchingRelocs);
+
+ // If we matched then insert the high part in front of the match and mark
+ // both relocations as being involved in a match. We only mark the high
+ // part for cosmetic reasons in the debug output.
+ //
+ // If we failed to find a match then the high part is orphaned. This is not
+ // permitted since the relocation cannot be evaluated without knowing the
+ // carry-in. We can sometimes handle this using a matching low part that is
+ // already used in a match but we already cover that case in
+ // isMatchingReloc and compareMatchingRelocs. For the remaining cases we
+ // should insert the high part at the end of the list. This will cause the
+ // linker to fail but the alternative is to cause the linker to bind the
+ // high part to a semi-matching low part and silently calculate the wrong
+ // value. Unfortunately we have no means to warn the user that we did this
+ // so leave it up to the linker to complain about it.
+ if (InsertionPoint != Sorted.end())
+ InsertionPoint->Matched = true;
+ Sorted.insert(InsertionPoint, R)->Matched = true;
}
- // SortOffsets are calculated, call the sorting function.
- array_pod_sort(MipsRelocs.begin(), MipsRelocs.end(), cmpRelMips);
+ DEBUG(dumpRelocs("S: ", Sorted));
+
+ assert(Relocs.size() == Sorted.size() && "Some relocs were not consumed");
- // Copy sorted MipsRelocs back to Relocs.
- for (unsigned I = 0, E = MipsRelocs.size(); I != E; ++I)
- Relocs[I] = MipsRelocs[I].R;
+ // Overwrite the original vector with the sorted elements. The caller expects
+ // them in reverse order.
+ unsigned CopyTo = 0;
+ for (const auto &R : reverse(Sorted))
+ Relocs[CopyTo++] = R.R;
}
bool MipsELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym,
unsigned Type) const {
- // FIXME: This is extremely conservative. This really needs to use a
- // whitelist with a clear explanation for why each realocation needs to
- // point to the symbol, not to the section.
+ // If it's a compound relocation for N64 then we need the relocation if any
+ // sub-relocation needs it.
+ if (!isUInt<8>(Type))
+ return needsRelocateWithSymbol(Sym, Type & 0xff) ||
+ needsRelocateWithSymbol(Sym, (Type >> 8) & 0xff) ||
+ needsRelocateWithSymbol(Sym, (Type >> 16) & 0xff);
+
switch (Type) {
default:
+ errs() << Type << "\n";
+ llvm_unreachable("Unexpected relocation");
return true;
+ // This relocation doesn't affect the section data.
+ case ELF::R_MIPS_NONE:
+ return false;
+
+ // On REL ABI's (e.g. O32), these relocations form pairs. The pairing is done
+ // by the static linker by matching the symbol and offset.
+ // We only see one relocation at a time but it's still safe to relocate with
+ // the section so long as both relocations make the same decision.
+ //
+ // Some older linkers may require the symbol for particular cases. Such cases
+ // are not supported yet but can be added as required.
case ELF::R_MIPS_GOT16:
case ELF::R_MIPS16_GOT16:
case ELF::R_MICROMIPS_GOT16:
- llvm_unreachable("Should have been handled already");
-
- // These relocations might be paired with another relocation. The pairing is
- // done by the static linker by matching the symbol. Since we only see one
- // relocation at a time, we have to force them to relocate with a symbol to
- // avoid ending up with a pair where one points to a section and another
- // points to a symbol.
case ELF::R_MIPS_HI16:
case ELF::R_MIPS16_HI16:
case ELF::R_MICROMIPS_HI16:
case ELF::R_MIPS_LO16:
case ELF::R_MIPS16_LO16:
case ELF::R_MICROMIPS_LO16:
- return true;
+ // FIXME: It should be safe to return false for the STO_MIPS_MICROMIPS but
+ // we neglect to handle the adjustment to the LSB of the addend that
+ // it causes in applyFixup() and similar.
+ if (cast<MCSymbolELF>(Sym).getOther() & ELF::STO_MIPS_MICROMIPS)
+ return true;
+ return false;
+ case ELF::R_MIPS_16:
case ELF::R_MIPS_32:
+ case ELF::R_MIPS_GPREL32:
if (cast<MCSymbolELF>(Sym).getOther() & ELF::STO_MIPS_MICROMIPS)
return true;
- // falltrough
+ // fallthrough
case ELF::R_MIPS_26:
case ELF::R_MIPS_64:
case ELF::R_MIPS_GPREL16:
+ case ELF::R_MIPS_PC16:
+ case ELF::R_MIPS_SUB:
return false;
+
+ // FIXME: Many of these relocations should probably return false but this
+ // hasn't been confirmed to be safe yet.
+ case ELF::R_MIPS_REL32:
+ case ELF::R_MIPS_LITERAL:
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_SHIFT5:
+ case ELF::R_MIPS_SHIFT6:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE:
+ case ELF::R_MIPS_GOT_OFST:
+ case ELF::R_MIPS_GOT_HI16:
+ case ELF::R_MIPS_GOT_LO16:
+ case ELF::R_MIPS_INSERT_A:
+ case ELF::R_MIPS_INSERT_B:
+ case ELF::R_MIPS_DELETE:
+ case ELF::R_MIPS_HIGHER:
+ case ELF::R_MIPS_HIGHEST:
+ case ELF::R_MIPS_CALL_HI16:
+ case ELF::R_MIPS_CALL_LO16:
+ case ELF::R_MIPS_SCN_DISP:
+ case ELF::R_MIPS_REL16:
+ case ELF::R_MIPS_ADD_IMMEDIATE:
+ case ELF::R_MIPS_PJUMP:
+ case ELF::R_MIPS_RELGOT:
+ case ELF::R_MIPS_JALR:
+ case ELF::R_MIPS_TLS_DTPMOD32:
+ case ELF::R_MIPS_TLS_DTPREL32:
+ case ELF::R_MIPS_TLS_DTPMOD64:
+ case ELF::R_MIPS_TLS_DTPREL64:
+ case ELF::R_MIPS_TLS_GD:
+ case ELF::R_MIPS_TLS_LDM:
+ case ELF::R_MIPS_TLS_DTPREL_HI16:
+ case ELF::R_MIPS_TLS_DTPREL_LO16:
+ case ELF::R_MIPS_TLS_GOTTPREL:
+ case ELF::R_MIPS_TLS_TPREL32:
+ case ELF::R_MIPS_TLS_TPREL64:
+ case ELF::R_MIPS_TLS_TPREL_HI16:
+ case ELF::R_MIPS_TLS_TPREL_LO16:
+ case ELF::R_MIPS_GLOB_DAT:
+ case ELF::R_MIPS_PC21_S2:
+ case ELF::R_MIPS_PC26_S2:
+ case ELF::R_MIPS_PC18_S3:
+ case ELF::R_MIPS_PC19_S2:
+ case ELF::R_MIPS_PCHI16:
+ case ELF::R_MIPS_PCLO16:
+ case ELF::R_MIPS_COPY:
+ case ELF::R_MIPS_JUMP_SLOT:
+ case ELF::R_MIPS_NUM:
+ case ELF::R_MIPS_PC32:
+ case ELF::R_MIPS_EH:
+ case ELF::R_MICROMIPS_26_S1:
+ case ELF::R_MICROMIPS_GPREL16:
+ case ELF::R_MICROMIPS_LITERAL:
+ case ELF::R_MICROMIPS_PC7_S1:
+ case ELF::R_MICROMIPS_PC10_S1:
+ case ELF::R_MICROMIPS_PC16_S1:
+ case ELF::R_MICROMIPS_CALL16:
+ case ELF::R_MICROMIPS_GOT_DISP:
+ case ELF::R_MICROMIPS_GOT_PAGE:
+ case ELF::R_MICROMIPS_GOT_OFST:
+ case ELF::R_MICROMIPS_GOT_HI16:
+ case ELF::R_MICROMIPS_GOT_LO16:
+ case ELF::R_MICROMIPS_SUB:
+ case ELF::R_MICROMIPS_HIGHER:
+ case ELF::R_MICROMIPS_HIGHEST:
+ case ELF::R_MICROMIPS_CALL_HI16:
+ case ELF::R_MICROMIPS_CALL_LO16:
+ case ELF::R_MICROMIPS_SCN_DISP:
+ case ELF::R_MICROMIPS_JALR:
+ case ELF::R_MICROMIPS_HI0_LO16:
+ case ELF::R_MICROMIPS_TLS_GD:
+ case ELF::R_MICROMIPS_TLS_LDM:
+ case ELF::R_MICROMIPS_TLS_DTPREL_HI16:
+ case ELF::R_MICROMIPS_TLS_DTPREL_LO16:
+ case ELF::R_MICROMIPS_TLS_GOTTPREL:
+ case ELF::R_MICROMIPS_TLS_TPREL_HI16:
+ case ELF::R_MICROMIPS_TLS_TPREL_LO16:
+ case ELF::R_MICROMIPS_GPREL7_S2:
+ case ELF::R_MICROMIPS_PC23_S2:
+ case ELF::R_MICROMIPS_PC21_S1:
+ case ELF::R_MICROMIPS_PC26_S1:
+ case ELF::R_MICROMIPS_PC18_S3:
+ case ELF::R_MICROMIPS_PC19_S2:
+ return true;
+
+ // FIXME: Many of these should probably return false but MIPS16 isn't
+ // supported by the integrated assembler.
+ case ELF::R_MIPS16_26:
+ case ELF::R_MIPS16_GPREL:
+ case ELF::R_MIPS16_CALL16:
+ case ELF::R_MIPS16_TLS_GD:
+ case ELF::R_MIPS16_TLS_LDM:
+ case ELF::R_MIPS16_TLS_DTPREL_HI16:
+ case ELF::R_MIPS16_TLS_DTPREL_LO16:
+ case ELF::R_MIPS16_TLS_GOTTPREL:
+ case ELF::R_MIPS16_TLS_TPREL_HI16:
+ case ELF::R_MIPS16_TLS_TPREL_LO16:
+ llvm_unreachable("Unsupported MIPS16 relocation");
+ return true;
}
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
index 3652f4bab0d4..b4d8e9494650 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
@@ -50,11 +50,8 @@ namespace Mips {
// 16 bit literal fixup resulting in - R_MIPS_LITERAL.
fixup_Mips_LITERAL,
- // Global symbol fixup resulting in - R_MIPS_GOT16.
- fixup_Mips_GOT_Global,
-
- // Local symbol fixup resulting in - R_MIPS_GOT16.
- fixup_Mips_GOT_Local,
+ // Symbol fixup resulting in - R_MIPS_GOT16.
+ fixup_Mips_GOT,
// PC relative branch fixup resulting in - R_MIPS_PC16.
fixup_Mips_PC16,
@@ -170,6 +167,18 @@ namespace Mips {
// resulting in - R_MICROMIPS_PC16_S1
fixup_MICROMIPS_PC16_S1,
+ // resulting in - R_MICROMIPS_PC26_S1
+ fixup_MICROMIPS_PC26_S1,
+
+ // resulting in - R_MICROMIPS_PC19_S2
+ fixup_MICROMIPS_PC19_S2,
+
+ // resulting in - R_MICROMIPS_PC18_S3
+ fixup_MICROMIPS_PC18_S3,
+
+ // resulting in - R_MICROMIPS_PC21_S1
+ fixup_MICROMIPS_PC21_S1,
+
// resulting in - R_MICROMIPS_CALL16
fixup_MICROMIPS_CALL16,
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
index 4d554583dc78..1622b2212665 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
@@ -42,4 +42,9 @@ MipsMCAsmInfo::MipsMCAsmInfo(const Triple &TheTriple) {
SupportsDebugInformation = true;
ExceptionsType = ExceptionHandling::DwarfCFI;
DwarfRegNumForCFI = true;
+
+ // Enable IAS by default for O32.
+ if (TheTriple.getArch() == Triple::mips ||
+ TheTriple.getArch() == Triple::mipsel)
+ UseIntegratedAssembler = true;
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index 4b030ebfce8c..401c7d42c4ce 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -78,19 +78,25 @@ static void LowerLargeShift(MCInst& Inst) {
case Mips::DROTR:
Inst.setOpcode(Mips::DROTR32);
return;
+ case Mips::DSLL_MM64R6:
+ Inst.setOpcode(Mips::DSLL32_MM64R6);
+ return;
+ case Mips::DSRL_MM64R6:
+ Inst.setOpcode(Mips::DSRL32_MM64R6);
+ return;
+ case Mips::DSRA_MM64R6:
+ Inst.setOpcode(Mips::DSRA32_MM64R6);
+ return;
+ case Mips::DROTR_MM64R6:
+ Inst.setOpcode(Mips::DROTR32_MM64R6);
+ return;
}
}
-// Pick a DEXT or DINS instruction variant based on the pos and size operands
-static void LowerDextDins(MCInst& InstIn) {
- int Opcode = InstIn.getOpcode();
-
- if (Opcode == Mips::DEXT)
- assert(InstIn.getNumOperands() == 4 &&
- "Invalid no. of machine operands for DEXT!");
- else // Only DEXT and DINS are possible
- assert(InstIn.getNumOperands() == 5 &&
- "Invalid no. of machine operands for DINS!");
+// Pick a DINS instruction variant based on the pos and size operands
+static void LowerDins(MCInst& InstIn) {
+ assert(InstIn.getNumOperands() == 5 &&
+ "Invalid no. of machine operands for DINS!");
assert(InstIn.getOperand(2).isImm());
int64_t pos = InstIn.getOperand(2).getImm();
@@ -98,20 +104,50 @@ static void LowerDextDins(MCInst& InstIn) {
int64_t size = InstIn.getOperand(3).getImm();
if (size <= 32) {
- if (pos < 32) // DEXT/DINS, do nothing
+ if (pos < 32) // DINS, do nothing
return;
- // DEXTU/DINSU
+ // DINSU
InstIn.getOperand(2).setImm(pos - 32);
- InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTU : Mips::DINSU);
+ InstIn.setOpcode(Mips::DINSU);
return;
}
- // DEXTM/DINSM
- assert(pos < 32 && "DEXT/DINS cannot have both size and pos > 32");
+ // DINSM
+ assert(pos < 32 && "DINS cannot have both size and pos > 32");
InstIn.getOperand(3).setImm(size - 32);
- InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTM : Mips::DINSM);
+ InstIn.setOpcode(Mips::DINSM);
return;
}
+// Fix a bad compact branch encoding for beqc/bnec.
+void MipsMCCodeEmitter::LowerCompactBranch(MCInst& Inst) const {
+
+ // Encoding may be illegal !(rs < rt), but this situation is
+ // easily fixed.
+ unsigned RegOp0 = Inst.getOperand(0).getReg();
+ unsigned RegOp1 = Inst.getOperand(1).getReg();
+
+ unsigned Reg0 = Ctx.getRegisterInfo()->getEncodingValue(RegOp0);
+ unsigned Reg1 = Ctx.getRegisterInfo()->getEncodingValue(RegOp1);
+
+ if (Inst.getOpcode() == Mips::BNEC || Inst.getOpcode() == Mips::BEQC) {
+ assert(Reg0 != Reg1 && "Instruction has bad operands ($rs == $rt)!");
+ if (Reg0 < Reg1)
+ return;
+ } else if (Inst.getOpcode() == Mips::BNVC || Inst.getOpcode() == Mips::BOVC) {
+ if (Reg0 >= Reg1)
+ return;
+ } else if (Inst.getOpcode() == Mips::BNVC_MMR6 ||
+ Inst.getOpcode() == Mips::BOVC_MMR6) {
+ if (Reg1 >= Reg0)
+ return;
+ } else
+ llvm_unreachable("Cannot rewrite unknown branch!");
+
+ Inst.getOperand(0).setReg(RegOp1);
+ Inst.getOperand(1).setReg(RegOp0);
+
+}
+
bool MipsMCCodeEmitter::isMicroMips(const MCSubtargetInfo &STI) const {
return STI.getFeatureBits()[Mips::FeatureMicroMips];
}
@@ -161,12 +197,24 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
case Mips::DSRL:
case Mips::DSRA:
case Mips::DROTR:
+ case Mips::DSLL_MM64R6:
+ case Mips::DSRL_MM64R6:
+ case Mips::DSRA_MM64R6:
+ case Mips::DROTR_MM64R6:
LowerLargeShift(TmpInst);
break;
// Double extract instruction is chosen by pos and size operands
- case Mips::DEXT:
case Mips::DINS:
- LowerDextDins(TmpInst);
+ LowerDins(TmpInst);
+ break;
+ // Compact branches, enforce encoding restrictions.
+ case Mips::BEQC:
+ case Mips::BNEC:
+ case Mips::BOVC:
+ case Mips::BOVC_MMR6:
+ case Mips::BNVC:
+ case Mips::BNVC_MMR6:
+ LowerCompactBranch(TmpInst);
}
unsigned long N = Fixups.size();
@@ -237,6 +285,53 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
return 0;
}
+/// getBranchTargetOpValue1SImm16 - Return binary encoding of the branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTargetOpValue1SImm16(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm()) return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getBranchTargetOpValue expects only expressions or immediates");
+
+ const MCExpr *FixupExpression = MCBinaryExpr::createAdd(
+ MO.getExpr(), MCConstantExpr::create(-4, Ctx), Ctx);
+ Fixups.push_back(MCFixup::create(0, FixupExpression,
+ MCFixupKind(Mips::fixup_Mips_PC16)));
+ return 0;
+}
+
+/// getBranchTargetOpValueMMR6 - Return binary encoding of the branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTargetOpValueMMR6(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm())
+ return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getBranchTargetOpValueMMR6 expects only expressions or immediates");
+
+ const MCExpr *FixupExpression = MCBinaryExpr::createAdd(
+ MO.getExpr(), MCConstantExpr::create(-2, Ctx), Ctx);
+ Fixups.push_back(MCFixup::create(0, FixupExpression,
+ MCFixupKind(Mips::fixup_Mips_PC16)));
+ return 0;
+}
+
/// getBranchTarget7OpValueMM - Return binary encoding of the microMIPS branch
/// target operand. If the machine operand requires relocation,
/// record the relocation and return zero.
@@ -327,6 +422,29 @@ getBranchTarget21OpValue(const MCInst &MI, unsigned OpNo,
return 0;
}
+/// getBranchTarget21OpValueMM - Return binary encoding of the branch
+/// target operand for microMIPS. If the machine operand requires
+/// relocation, record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTarget21OpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm()) return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getBranchTarget21OpValueMM expects only expressions or immediates");
+
+ const MCExpr *FixupExpression = MCBinaryExpr::createAdd(
+ MO.getExpr(), MCConstantExpr::create(-4, Ctx), Ctx);
+ Fixups.push_back(MCFixup::create(0, FixupExpression,
+ MCFixupKind(Mips::fixup_MICROMIPS_PC21_S1)));
+ return 0;
+}
+
/// getBranchTarget26OpValue - Return binary encoding of the branch
/// target operand. If the machine operand requires relocation,
/// record the relocation and return zero.
@@ -363,7 +481,13 @@ unsigned MipsMCCodeEmitter::getBranchTarget26OpValueMM(
if (MO.isImm())
return MO.getImm() >> 1;
- // TODO: Push 26 PC fixup.
+ assert(MO.isExpr() &&
+ "getBranchTarget26OpValueMM expects only expressions or immediates");
+
+ const MCExpr *FixupExpression = MCBinaryExpr::createAdd(
+ MO.getExpr(), MCConstantExpr::create(-4, Ctx), Ctx);
+ Fixups.push_back(MCFixup::create(0, FixupExpression,
+ MCFixupKind(Mips::fixup_MICROMIPS_PC26_S1)));
return 0;
}
@@ -510,126 +634,117 @@ getExprOpValue(const MCExpr *Expr, SmallVectorImpl<MCFixup> &Fixups,
Mips::Fixups FixupKind = Mips::Fixups(0);
switch (MipsExpr->getKind()) {
- default: llvm_unreachable("Unsupported fixup kind for target expression!");
- case MipsMCExpr::VK_Mips_HIGHEST:
- FixupKind = Mips::fixup_Mips_HIGHEST;
+ case MipsMCExpr::MEK_NEG:
+ case MipsMCExpr::MEK_None:
+ case MipsMCExpr::MEK_Special:
+ llvm_unreachable("Unhandled fixup kind!");
break;
- case MipsMCExpr::VK_Mips_HIGHER:
- FixupKind = Mips::fixup_Mips_HIGHER;
+ case MipsMCExpr::MEK_CALL_HI16:
+ FixupKind = Mips::fixup_Mips_CALL_HI16;
break;
- case MipsMCExpr::VK_Mips_HI:
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_HI16
- : Mips::fixup_Mips_HI16;
+ case MipsMCExpr::MEK_CALL_LO16:
+ FixupKind = Mips::fixup_Mips_CALL_LO16;
break;
- case MipsMCExpr::VK_Mips_LO:
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_LO16
- : Mips::fixup_Mips_LO16;
+ case MipsMCExpr::MEK_DTPREL_HI:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_DTPREL_HI16
+ : Mips::fixup_Mips_DTPREL_HI;
break;
- }
- Fixups.push_back(MCFixup::create(0, MipsExpr, MCFixupKind(FixupKind)));
- return 0;
- }
-
- if (Kind == MCExpr::SymbolRef) {
- Mips::Fixups FixupKind = Mips::Fixups(0);
-
- switch(cast<MCSymbolRefExpr>(Expr)->getKind()) {
- default: llvm_unreachable("Unknown fixup kind!");
+ case MipsMCExpr::MEK_DTPREL_LO:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_DTPREL_LO16
+ : Mips::fixup_Mips_DTPREL_LO;
break;
- case MCSymbolRefExpr::VK_None:
- FixupKind = Mips::fixup_Mips_32; // FIXME: This is ok for O32/N32 but not N64.
+ case MipsMCExpr::MEK_GOTTPREL:
+ FixupKind = Mips::fixup_Mips_GOTTPREL;
+ break;
+ case MipsMCExpr::MEK_GOT:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT16
+ : Mips::fixup_Mips_GOT;
+ break;
+ case MipsMCExpr::MEK_GOT_CALL:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_CALL16
+ : Mips::fixup_Mips_CALL16;
+ break;
+ case MipsMCExpr::MEK_GOT_DISP:
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT_DISP
+ : Mips::fixup_Mips_GOT_DISP;
break;
- case MCSymbolRefExpr::VK_Mips_GPOFF_HI :
- FixupKind = Mips::fixup_Mips_GPOFF_HI;
+ case MipsMCExpr::MEK_GOT_HI16:
+ FixupKind = Mips::fixup_Mips_GOT_HI16;
break;
- case MCSymbolRefExpr::VK_Mips_GPOFF_LO :
- FixupKind = Mips::fixup_Mips_GPOFF_LO;
+ case MipsMCExpr::MEK_GOT_LO16:
+ FixupKind = Mips::fixup_Mips_GOT_LO16;
break;
- case MCSymbolRefExpr::VK_Mips_GOT_PAGE :
+ case MipsMCExpr::MEK_GOT_PAGE:
FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT_PAGE
- : Mips::fixup_Mips_GOT_PAGE;
+ : Mips::fixup_Mips_GOT_PAGE;
break;
- case MCSymbolRefExpr::VK_Mips_GOT_OFST :
+ case MipsMCExpr::MEK_GOT_OFST:
FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT_OFST
- : Mips::fixup_Mips_GOT_OFST;
+ : Mips::fixup_Mips_GOT_OFST;
break;
- case MCSymbolRefExpr::VK_Mips_GOT_DISP :
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT_DISP
- : Mips::fixup_Mips_GOT_DISP;
- break;
- case MCSymbolRefExpr::VK_Mips_GPREL:
+ case MipsMCExpr::MEK_GPREL:
FixupKind = Mips::fixup_Mips_GPREL16;
break;
- case MCSymbolRefExpr::VK_Mips_GOT_CALL:
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_CALL16
- : Mips::fixup_Mips_CALL16;
+ case MipsMCExpr::MEK_LO: {
+ // Check for %lo(%neg(%gp_rel(X)))
+ if (MipsExpr->isGpOff()) {
+ FixupKind = Mips::fixup_Mips_GPOFF_LO;
+ break;
+ }
+ FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_LO16
+ : Mips::fixup_Mips_LO16;
break;
- case MCSymbolRefExpr::VK_Mips_GOT16:
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT16
- : Mips::fixup_Mips_GOT_Global;
+ }
+ case MipsMCExpr::MEK_HIGHEST:
+ FixupKind = Mips::fixup_Mips_HIGHEST;
break;
- case MCSymbolRefExpr::VK_Mips_GOT:
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_GOT16
- : Mips::fixup_Mips_GOT_Local;
+ case MipsMCExpr::MEK_HIGHER:
+ FixupKind = Mips::fixup_Mips_HIGHER;
break;
- case MCSymbolRefExpr::VK_Mips_ABS_HI:
+ case MipsMCExpr::MEK_HI:
+ // Check for %hi(%neg(%gp_rel(X)))
+ if (MipsExpr->isGpOff()) {
+ FixupKind = Mips::fixup_Mips_GPOFF_HI;
+ break;
+ }
FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_HI16
- : Mips::fixup_Mips_HI16;
+ : Mips::fixup_Mips_HI16;
break;
- case MCSymbolRefExpr::VK_Mips_ABS_LO:
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_LO16
- : Mips::fixup_Mips_LO16;
+ case MipsMCExpr::MEK_PCREL_HI16:
+ FixupKind = Mips::fixup_MIPS_PCHI16;
+ break;
+ case MipsMCExpr::MEK_PCREL_LO16:
+ FixupKind = Mips::fixup_MIPS_PCLO16;
break;
- case MCSymbolRefExpr::VK_Mips_TLSGD:
+ case MipsMCExpr::MEK_TLSGD:
FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_GD
- : Mips::fixup_Mips_TLSGD;
+ : Mips::fixup_Mips_TLSGD;
break;
- case MCSymbolRefExpr::VK_Mips_TLSLDM:
+ case MipsMCExpr::MEK_TLSLDM:
FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_LDM
- : Mips::fixup_Mips_TLSLDM;
- break;
- case MCSymbolRefExpr::VK_Mips_DTPREL_HI:
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_DTPREL_HI16
- : Mips::fixup_Mips_DTPREL_HI;
- break;
- case MCSymbolRefExpr::VK_Mips_DTPREL_LO:
- FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_DTPREL_LO16
- : Mips::fixup_Mips_DTPREL_LO;
- break;
- case MCSymbolRefExpr::VK_Mips_GOTTPREL:
- FixupKind = Mips::fixup_Mips_GOTTPREL;
+ : Mips::fixup_Mips_TLSLDM;
break;
- case MCSymbolRefExpr::VK_Mips_TPREL_HI:
+ case MipsMCExpr::MEK_TPREL_HI:
FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_TPREL_HI16
- : Mips::fixup_Mips_TPREL_HI;
+ : Mips::fixup_Mips_TPREL_HI;
break;
- case MCSymbolRefExpr::VK_Mips_TPREL_LO:
+ case MipsMCExpr::MEK_TPREL_LO:
FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_TLS_TPREL_LO16
- : Mips::fixup_Mips_TPREL_LO;
+ : Mips::fixup_Mips_TPREL_LO;
break;
- case MCSymbolRefExpr::VK_Mips_HIGHER:
- FixupKind = Mips::fixup_Mips_HIGHER;
- break;
- case MCSymbolRefExpr::VK_Mips_HIGHEST:
- FixupKind = Mips::fixup_Mips_HIGHEST;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT_HI16:
- FixupKind = Mips::fixup_Mips_GOT_HI16;
- break;
- case MCSymbolRefExpr::VK_Mips_GOT_LO16:
- FixupKind = Mips::fixup_Mips_GOT_LO16;
- break;
- case MCSymbolRefExpr::VK_Mips_CALL_HI16:
- FixupKind = Mips::fixup_Mips_CALL_HI16;
- break;
- case MCSymbolRefExpr::VK_Mips_CALL_LO16:
- FixupKind = Mips::fixup_Mips_CALL_LO16;
- break;
- case MCSymbolRefExpr::VK_Mips_PCREL_HI16:
- FixupKind = Mips::fixup_MIPS_PCHI16;
+ }
+ Fixups.push_back(MCFixup::create(0, MipsExpr, MCFixupKind(FixupKind)));
+ return 0;
+ }
+
+ if (Kind == MCExpr::SymbolRef) {
+ Mips::Fixups FixupKind = Mips::Fixups(0);
+
+ switch(cast<MCSymbolRefExpr>(Expr)->getKind()) {
+ default: llvm_unreachable("Unknown fixup kind!");
break;
- case MCSymbolRefExpr::VK_Mips_PCREL_LO16:
- FixupKind = Mips::fixup_MIPS_PCLO16;
+ case MCSymbolRefExpr::VK_None:
+ FixupKind = Mips::fixup_Mips_32; // FIXME: This is ok for O32/N32 but not N64.
break;
} // switch
@@ -660,61 +775,20 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
return getExprOpValue(MO.getExpr(),Fixups, STI);
}
-/// getMSAMemEncoding - Return binary encoding of memory operand for LD/ST
-/// instructions.
-unsigned
-MipsMCCodeEmitter::getMSAMemEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
- // Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
- assert(MI.getOperand(OpNo).isReg());
- unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups, STI) << 16;
- unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI);
-
- // The immediate field of an LD/ST instruction is scaled which means it must
- // be divided (when encoding) by the size (in bytes) of the instructions'
- // data format.
- // .b - 1 byte
- // .h - 2 bytes
- // .w - 4 bytes
- // .d - 8 bytes
- switch(MI.getOpcode())
- {
- default:
- assert (0 && "Unexpected instruction");
- break;
- case Mips::LD_B:
- case Mips::ST_B:
- // We don't need to scale the offset in this case
- break;
- case Mips::LD_H:
- case Mips::ST_H:
- OffBits >>= 1;
- break;
- case Mips::LD_W:
- case Mips::ST_W:
- OffBits >>= 2;
- break;
- case Mips::LD_D:
- case Mips::ST_D:
- OffBits >>= 3;
- break;
- }
-
- return (OffBits & 0xFFFF) | RegBits;
-}
-
-/// getMemEncoding - Return binary encoding of memory related operand.
+/// Return binary encoding of memory related operand.
/// If the offset operand requires relocation, record the relocation.
-unsigned
-MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
- SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const {
+template <unsigned ShiftAmount>
+unsigned MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
// Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
assert(MI.getOperand(OpNo).isReg());
unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups, STI) << 16;
unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI);
+ // Apply the scale factor if there is one.
+ OffBits >>= ShiftAmount;
+
return (OffBits & 0xFFFF) | RegBits;
}
@@ -804,6 +878,19 @@ getMemEncodingMMImm9(const MCInst &MI, unsigned OpNo,
}
unsigned MipsMCCodeEmitter::
+getMemEncodingMMImm11(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Base register is encoded in bits 20-16, offset is encoded in bits 10-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups,
+ STI) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI);
+
+ return (OffBits & 0x07FF) | RegBits;
+}
+
+unsigned MipsMCCodeEmitter::
getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
@@ -906,8 +993,9 @@ MipsMCCodeEmitter::getSimm19Lsl2Encoding(const MCInst &MI, unsigned OpNo,
"getSimm19Lsl2Encoding expects only expressions or an immediate");
const MCExpr *Expr = MO.getExpr();
- Fixups.push_back(MCFixup::create(0, Expr,
- MCFixupKind(Mips::fixup_MIPS_PC19_S2)));
+ Mips::Fixups FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_PC19_S2
+ : Mips::fixup_MIPS_PC19_S2;
+ Fixups.push_back(MCFixup::create(0, Expr, MCFixupKind(FixupKind)));
return 0;
}
@@ -927,8 +1015,9 @@ MipsMCCodeEmitter::getSimm18Lsl3Encoding(const MCInst &MI, unsigned OpNo,
"getSimm18Lsl2Encoding expects only expressions or an immediate");
const MCExpr *Expr = MO.getExpr();
- Fixups.push_back(MCFixup::create(0, Expr,
- MCFixupKind(Mips::fixup_MIPS_PC18_S3)));
+ Mips::Fixups FixupKind = isMicroMips(STI) ? Mips::fixup_MICROMIPS_PC18_S3
+ : Mips::fixup_MIPS_PC18_S3;
+ Fixups.push_back(MCFixup::create(0, Expr, MCFixupKind(FixupKind)));
return 0;
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h
index fdacd172e3a2..0f4dfe1200c0 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.h
@@ -102,6 +102,20 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ // getBranchTargetOpValue1SImm16 - Return binary encoding of the branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValue1SImm16(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ // getBranchTargetOpValueMMR6 - Return binary encoding of the branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValueMMR6(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
// getBranchTarget7OpValue - Return binary encoding of the microMIPS branch
// target operand. If the machine operand requires relocation,
// record the relocation and return zero.
@@ -130,6 +144,13 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ // getBranchTarget21OpValueMM - Return binary encoding of the branch
+ // offset operand for microMIPS. If the machine operand requires
+ // relocation,record the relocation and return zero.
+ unsigned getBranchTarget21OpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
// getBranchTarget26OpValue - Return binary encoding of the branch
// offset operand. If the machine operand requires relocation,
// record the relocation and return zero.
@@ -161,6 +182,7 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ template <unsigned ShiftAmount = 0>
unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
@@ -182,6 +204,9 @@ public:
unsigned getMemEncodingMMImm9(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ unsigned getMemEncodingMMImm11(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
@@ -238,6 +263,8 @@ public:
unsigned getRegisterListOpValue16(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ private:
+ void LowerCompactBranch(MCInst& Inst) const;
}; // class MipsMCCodeEmitter
} // namespace llvm.
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp
index c85fc4816b08..082bb87fcb8a 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp
@@ -12,69 +12,110 @@
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCObjectStreamer.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/Support/ELF.h"
using namespace llvm;
#define DEBUG_TYPE "mipsmcexpr"
-bool MipsMCExpr::isSupportedBinaryExpr(MCSymbolRefExpr::VariantKind VK,
- const MCBinaryExpr *BE) {
- switch (VK) {
- case MCSymbolRefExpr::VK_Mips_ABS_LO:
- case MCSymbolRefExpr::VK_Mips_ABS_HI:
- case MCSymbolRefExpr::VK_Mips_HIGHER:
- case MCSymbolRefExpr::VK_Mips_HIGHEST:
- break;
- default:
- return false;
- }
+const MipsMCExpr *MipsMCExpr::create(MipsMCExpr::MipsExprKind Kind,
+ const MCExpr *Expr, MCContext &Ctx) {
+ return new (Ctx) MipsMCExpr(Kind, Expr);
+}
- // We support expressions of the form "(sym1 binop1 sym2) binop2 const",
- // where "binop2 const" is optional.
- if (isa<MCBinaryExpr>(BE->getLHS())) {
- if (!isa<MCConstantExpr>(BE->getRHS()))
- return false;
- BE = cast<MCBinaryExpr>(BE->getLHS());
- }
- return (isa<MCSymbolRefExpr>(BE->getLHS())
- && isa<MCSymbolRefExpr>(BE->getRHS()));
+const MipsMCExpr *MipsMCExpr::createGpOff(MipsMCExpr::MipsExprKind Kind,
+ const MCExpr *Expr, MCContext &Ctx) {
+ return create(Kind, create(MEK_NEG, create(MEK_GPREL, Expr, Ctx), Ctx), Ctx);
}
-const MipsMCExpr*
-MipsMCExpr::create(MCSymbolRefExpr::VariantKind VK, const MCExpr *Expr,
- MCContext &Ctx) {
- VariantKind Kind;
- switch (VK) {
- case MCSymbolRefExpr::VK_Mips_ABS_LO:
- Kind = VK_Mips_LO;
+void MipsMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
+ int64_t AbsVal;
+
+ switch (Kind) {
+ case MEK_None:
+ case MEK_Special:
+ llvm_unreachable("MEK_None and MEK_Special are invalid");
break;
- case MCSymbolRefExpr::VK_Mips_ABS_HI:
- Kind = VK_Mips_HI;
+ case MEK_CALL_HI16:
+ OS << "%call_hi";
break;
- case MCSymbolRefExpr::VK_Mips_HIGHER:
- Kind = VK_Mips_HIGHER;
+ case MEK_CALL_LO16:
+ OS << "%call_lo";
break;
- case MCSymbolRefExpr::VK_Mips_HIGHEST:
- Kind = VK_Mips_HIGHEST;
+ case MEK_DTPREL_HI:
+ OS << "%dtprel_hi";
+ break;
+ case MEK_DTPREL_LO:
+ OS << "%dtprel_lo";
+ break;
+ case MEK_GOT:
+ OS << "%got";
+ break;
+ case MEK_GOTTPREL:
+ OS << "%gottprel";
+ break;
+ case MEK_GOT_CALL:
+ OS << "%call16";
+ break;
+ case MEK_GOT_DISP:
+ OS << "%got_disp";
+ break;
+ case MEK_GOT_HI16:
+ OS << "%got_hi";
+ break;
+ case MEK_GOT_LO16:
+ OS << "%got_lo";
+ break;
+ case MEK_GOT_PAGE:
+ OS << "%got_page";
+ break;
+ case MEK_GOT_OFST:
+ OS << "%got_ofst";
+ break;
+ case MEK_GPREL:
+ OS << "%gp_rel";
+ break;
+ case MEK_HI:
+ OS << "%hi";
+ break;
+ case MEK_HIGHER:
+ OS << "%higher";
+ break;
+ case MEK_HIGHEST:
+ OS << "%highest";
+ break;
+ case MEK_LO:
+ OS << "%lo";
+ break;
+ case MEK_NEG:
+ OS << "%neg";
+ break;
+ case MEK_PCREL_HI16:
+ OS << "%pcrel_hi";
+ break;
+ case MEK_PCREL_LO16:
+ OS << "%pcrel_lo";
+ break;
+ case MEK_TLSGD:
+ OS << "%tlsgd";
+ break;
+ case MEK_TLSLDM:
+ OS << "%tlsldm";
+ break;
+ case MEK_TPREL_HI:
+ OS << "%tprel_hi";
+ break;
+ case MEK_TPREL_LO:
+ OS << "%tprel_lo";
break;
- default:
- llvm_unreachable("Invalid kind!");
- }
-
- return new (Ctx) MipsMCExpr(Kind, Expr);
-}
-
-void MipsMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
- switch (Kind) {
- default: llvm_unreachable("Invalid kind!");
- case VK_Mips_LO: OS << "%lo"; break;
- case VK_Mips_HI: OS << "%hi"; break;
- case VK_Mips_HIGHER: OS << "%higher"; break;
- case VK_Mips_HIGHEST: OS << "%highest"; break;
}
OS << '(';
- Expr->print(OS, MAI);
+ if (Expr->evaluateAsAbsolute(AbsVal))
+ OS << AbsVal;
+ else
+ Expr->print(OS, MAI, true);
OS << ')';
}
@@ -82,9 +123,165 @@ bool
MipsMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
const MCAsmLayout *Layout,
const MCFixup *Fixup) const {
- return getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup);
+ // Look for the %hi(%neg(%gp_rel(X))) and %lo(%neg(%gp_rel(X))) special cases.
+ if (isGpOff()) {
+ const MCExpr *SubExpr =
+ cast<MipsMCExpr>(cast<MipsMCExpr>(getSubExpr())->getSubExpr())
+ ->getSubExpr();
+ if (!SubExpr->evaluateAsRelocatable(Res, Layout, Fixup))
+ return false;
+
+ Res = MCValue::get(Res.getSymA(), Res.getSymB(), Res.getConstant(),
+ MEK_Special);
+ return true;
+ }
+
+ if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup))
+ return false;
+
+ if (Res.getRefKind() != MCSymbolRefExpr::VK_None)
+ return false;
+
+ // evaluateAsAbsolute() and evaluateAsValue() require that we evaluate the
+ // %hi/%lo/etc. here. Fixup is a null pointer when either of these is the
+ // caller.
+ if (Res.isAbsolute() && Fixup == nullptr) {
+ int64_t AbsVal = Res.getConstant();
+ switch (Kind) {
+ case MEK_None:
+ case MEK_Special:
+ llvm_unreachable("MEK_None and MEK_Special are invalid");
+ case MEK_DTPREL_HI:
+ case MEK_DTPREL_LO:
+ case MEK_GOT:
+ case MEK_GOTTPREL:
+ case MEK_GOT_CALL:
+ case MEK_GOT_DISP:
+ case MEK_GOT_HI16:
+ case MEK_GOT_LO16:
+ case MEK_GOT_OFST:
+ case MEK_GOT_PAGE:
+ case MEK_GPREL:
+ case MEK_PCREL_HI16:
+ case MEK_PCREL_LO16:
+ case MEK_TLSGD:
+ case MEK_TLSLDM:
+ case MEK_TPREL_HI:
+ case MEK_TPREL_LO:
+ return false;
+ case MEK_LO:
+ case MEK_CALL_LO16:
+ AbsVal = SignExtend64<16>(AbsVal);
+ break;
+ case MEK_CALL_HI16:
+ case MEK_HI:
+ AbsVal = SignExtend64<16>((AbsVal + 0x8000) >> 16);
+ break;
+ case MEK_HIGHER:
+ AbsVal = SignExtend64<16>((AbsVal + 0x80008000LL) >> 32);
+ break;
+ case MEK_HIGHEST:
+ AbsVal = SignExtend64<16>((AbsVal + 0x800080008000LL) >> 48);
+ break;
+ case MEK_NEG:
+ AbsVal = -AbsVal;
+ break;
+ }
+ Res = MCValue::get(AbsVal);
+ return true;
+ }
+
+ // We want to defer it for relocatable expressions since the constant is
+ // applied to the whole symbol value.
+ //
+ // The value of getKind() that is given to MCValue is only intended to aid
+ // debugging when inspecting MCValue objects. It shouldn't be relied upon
+ // for decision making.
+ Res = MCValue::get(Res.getSymA(), Res.getSymB(), Res.getConstant(), getKind());
+
+ return true;
}
void MipsMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
Streamer.visitUsedExpr(*getSubExpr());
}
+
+static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
+ switch (Expr->getKind()) {
+ case MCExpr::Target:
+ fixELFSymbolsInTLSFixupsImpl(cast<MipsMCExpr>(Expr)->getSubExpr(), Asm);
+ break;
+ case MCExpr::Constant:
+ break;
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
+ fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm);
+ fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm);
+ break;
+ }
+ case MCExpr::SymbolRef: {
+ // We're known to be under a TLS fixup, so any symbol should be
+ // modified. There should be only one.
+ const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr);
+ cast<MCSymbolELF>(SymRef.getSymbol()).setType(ELF::STT_TLS);
+ break;
+ }
+ case MCExpr::Unary:
+ fixELFSymbolsInTLSFixupsImpl(cast<MCUnaryExpr>(Expr)->getSubExpr(), Asm);
+ break;
+ }
+}
+
+void MipsMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
+ switch (getKind()) {
+ case MEK_None:
+ case MEK_Special:
+ llvm_unreachable("MEK_None and MEK_Special are invalid");
+ break;
+ case MEK_CALL_HI16:
+ case MEK_CALL_LO16:
+ case MEK_DTPREL_HI:
+ case MEK_DTPREL_LO:
+ case MEK_GOT:
+ case MEK_GOT_CALL:
+ case MEK_GOT_DISP:
+ case MEK_GOT_HI16:
+ case MEK_GOT_LO16:
+ case MEK_GOT_OFST:
+ case MEK_GOT_PAGE:
+ case MEK_GPREL:
+ case MEK_HI:
+ case MEK_HIGHER:
+ case MEK_HIGHEST:
+ case MEK_LO:
+ case MEK_NEG:
+ case MEK_PCREL_HI16:
+ case MEK_PCREL_LO16:
+ case MEK_TLSLDM:
+ // If we do have nested target-specific expressions, they will be in
+ // a consecutive chain.
+ if (const MipsMCExpr *E = dyn_cast<const MipsMCExpr>(getSubExpr()))
+ E->fixELFSymbolsInTLSFixups(Asm);
+ break;
+ case MEK_GOTTPREL:
+ case MEK_TLSGD:
+ case MEK_TPREL_HI:
+ case MEK_TPREL_LO:
+ fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
+ break;
+ }
+}
+
+bool MipsMCExpr::isGpOff(MipsExprKind &Kind) const {
+ if (getKind() == MEK_HI || getKind() == MEK_LO) {
+ if (const MipsMCExpr *S1 = dyn_cast<const MipsMCExpr>(getSubExpr())) {
+ if (const MipsMCExpr *S2 = dyn_cast<const MipsMCExpr>(S1->getSubExpr())) {
+ if (S1->getKind() == MEK_NEG && S2->getKind() == MEK_GPREL) {
+ Kind = getKind();
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h b/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h
index e889972c5c0e..d1a4334ec640 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h
@@ -18,49 +18,73 @@ namespace llvm {
class MipsMCExpr : public MCTargetExpr {
public:
- enum VariantKind {
- VK_Mips_None,
- VK_Mips_LO,
- VK_Mips_HI,
- VK_Mips_HIGHER,
- VK_Mips_HIGHEST
+ enum MipsExprKind {
+ MEK_None,
+ MEK_CALL_HI16,
+ MEK_CALL_LO16,
+ MEK_DTPREL_HI,
+ MEK_DTPREL_LO,
+ MEK_GOT,
+ MEK_GOTTPREL,
+ MEK_GOT_CALL,
+ MEK_GOT_DISP,
+ MEK_GOT_HI16,
+ MEK_GOT_LO16,
+ MEK_GOT_OFST,
+ MEK_GOT_PAGE,
+ MEK_GPREL,
+ MEK_HI,
+ MEK_HIGHER,
+ MEK_HIGHEST,
+ MEK_LO,
+ MEK_NEG,
+ MEK_PCREL_HI16,
+ MEK_PCREL_LO16,
+ MEK_TLSGD,
+ MEK_TLSLDM,
+ MEK_TPREL_HI,
+ MEK_TPREL_LO,
+ MEK_Special,
};
private:
- const VariantKind Kind;
+ const MipsExprKind Kind;
const MCExpr *Expr;
- explicit MipsMCExpr(VariantKind Kind, const MCExpr *Expr)
- : Kind(Kind), Expr(Expr) {}
+ explicit MipsMCExpr(MipsExprKind Kind, const MCExpr *Expr)
+ : Kind(Kind), Expr(Expr) {}
public:
- static bool isSupportedBinaryExpr(MCSymbolRefExpr::VariantKind VK,
- const MCBinaryExpr *BE);
+ static const MipsMCExpr *create(MipsExprKind Kind, const MCExpr *Expr,
+ MCContext &Ctx);
+ static const MipsMCExpr *createGpOff(MipsExprKind Kind, const MCExpr *Expr,
+ MCContext &Ctx);
- static const MipsMCExpr *create(MCSymbolRefExpr::VariantKind VK,
- const MCExpr *Expr, MCContext &Ctx);
+ /// Get the kind of this expression.
+ MipsExprKind getKind() const { return Kind; }
- /// getOpcode - Get the kind of this expression.
- VariantKind getKind() const { return Kind; }
-
- /// getSubExpr - Get the child of this expression.
+ /// Get the child of this expression.
const MCExpr *getSubExpr() const { return Expr; }
void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
- bool evaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout,
+ bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
const MCFixup *Fixup) const override;
void visitUsedExpr(MCStreamer &Streamer) const override;
MCFragment *findAssociatedFragment() const override {
return getSubExpr()->findAssociatedFragment();
}
- // There are no TLS MipsMCExprs at the moment.
- void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override {}
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
}
+
+ bool isGpOff(MipsExprKind &Kind) const;
+ bool isGpOff() const {
+ MipsExprKind Kind;
+ return isGpOff(Kind);
+ }
};
} // end namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index 949ee1474f96..a05573950974 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -11,21 +11,20 @@
//
//===----------------------------------------------------------------------===//
+#include "MipsMCTargetDesc.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MipsELFStreamer.h"
#include "MipsMCAsmInfo.h"
#include "MipsMCNaCl.h"
-#include "MipsMCTargetDesc.h"
#include "MipsTargetStreamer.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCELFStreamer.h"
+#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MachineLocation.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
@@ -82,18 +81,6 @@ static MCAsmInfo *createMipsMCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createMipsMCCodeGenInfo(const Triple &TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- if (CM == CodeModel::JITDefault)
- RM = Reloc::Static;
- else if (RM == Reloc::Default)
- RM = Reloc::PIC_;
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
-}
-
static MCInstPrinter *createMipsMCInstPrinter(const Triple &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -129,15 +116,44 @@ createMipsObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
return new MipsTargetELFStreamer(S, STI);
}
+namespace {
+
+class MipsMCInstrAnalysis : public MCInstrAnalysis {
+public:
+ MipsMCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {}
+
+ bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
+ uint64_t &Target) const override {
+ unsigned NumOps = Inst.getNumOperands();
+ if (NumOps == 0)
+ return false;
+ switch (Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType) {
+ case MCOI::OPERAND_UNKNOWN:
+ case MCOI::OPERAND_IMMEDIATE:
+ // jal, bal ...
+ Target = Inst.getOperand(NumOps - 1).getImm();
+ return true;
+ case MCOI::OPERAND_PCREL:
+ // b, j, beq ...
+ Target = Addr + Inst.getOperand(NumOps - 1).getImm();
+ return true;
+ default:
+ return false;
+ }
+ }
+};
+}
+
+static MCInstrAnalysis *createMipsMCInstrAnalysis(const MCInstrInfo *Info) {
+ return new MipsMCInstrAnalysis(Info);
+}
+
extern "C" void LLVMInitializeMipsTargetMC() {
for (Target *T : {&TheMipsTarget, &TheMipselTarget, &TheMips64Target,
&TheMips64elTarget}) {
// Register the MC asm info.
RegisterMCAsmInfoFn X(*T, createMipsMCAsmInfo);
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(*T, createMipsMCCodeGenInfo);
-
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createMipsMCInstrInfo);
@@ -156,6 +172,9 @@ extern "C" void LLVMInitializeMipsTargetMC() {
// Register the MC subtarget info.
TargetRegistry::RegisterMCSubtargetInfo(*T, createMipsMCSubtargetInfo);
+ // Register the MC instruction analyzer.
+ TargetRegistry::RegisterMCInstrAnalysis(*T, createMipsMCInstrAnalysis);
+
// Register the MCInstPrinter.
TargetRegistry::RegisterMCInstPrinter(*T, createMipsMCInstPrinter);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index e5fa7556053f..7f79eb400f59 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -11,11 +11,12 @@
//
//===----------------------------------------------------------------------===//
+#include "MipsTargetStreamer.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MipsELFStreamer.h"
+#include "MipsMCExpr.h"
#include "MipsMCTargetDesc.h"
#include "MipsTargetObjectFile.h"
-#include "MipsTargetStreamer.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -27,12 +28,19 @@
using namespace llvm;
+namespace {
+static cl::opt<bool> RoundSectionSizes(
+ "mips-round-section-sizes", cl::init(false),
+ cl::desc("Round section sizes up to the section alignment"), cl::Hidden);
+} // end anonymous namespace
+
MipsTargetStreamer::MipsTargetStreamer(MCStreamer &S)
: MCTargetStreamer(S), ModuleDirectiveAllowed(true) {
GPRInfoSet = FPRInfoSet = FrameInfoSet = false;
}
void MipsTargetStreamer::emitDirectiveSetMicroMips() {}
void MipsTargetStreamer::emitDirectiveSetNoMicroMips() {}
+void MipsTargetStreamer::setUsesMicroMips() {}
void MipsTargetStreamer::emitDirectiveSetMips16() {}
void MipsTargetStreamer::emitDirectiveSetNoMips16() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetReorder() { forbidModuleDirective(); }
@@ -89,9 +97,11 @@ void MipsTargetStreamer::emitDirectiveSetHardFloat() {
void MipsTargetStreamer::emitDirectiveSetDsp() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveSetNoDsp() { forbidModuleDirective(); }
void MipsTargetStreamer::emitDirectiveCpLoad(unsigned RegNo) {}
-void MipsTargetStreamer::emitDirectiveCpRestore(
- SmallVector<MCInst, 3> &StoreInsts, int Offset) {
+bool MipsTargetStreamer::emitDirectiveCpRestore(
+ int Offset, function_ref<unsigned()> GetATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
forbidModuleDirective();
+ return true;
}
void MipsTargetStreamer::emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset,
const MCSymbol &Sym, bool IsReg) {
@@ -116,6 +126,217 @@ void MipsTargetStreamer::emitDirectiveSetNoOddSPReg() {
forbidModuleDirective();
}
+void MipsTargetStreamer::emitR(unsigned Opcode, unsigned Reg0, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(MCOperand::createReg(Reg0));
+ TmpInst.setLoc(IDLoc);
+ getStreamer().EmitInstruction(TmpInst, *STI);
+}
+
+void MipsTargetStreamer::emitRX(unsigned Opcode, unsigned Reg0, MCOperand Op1,
+ SMLoc IDLoc, const MCSubtargetInfo *STI) {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(MCOperand::createReg(Reg0));
+ TmpInst.addOperand(Op1);
+ TmpInst.setLoc(IDLoc);
+ getStreamer().EmitInstruction(TmpInst, *STI);
+}
+
+void MipsTargetStreamer::emitRI(unsigned Opcode, unsigned Reg0, int32_t Imm,
+ SMLoc IDLoc, const MCSubtargetInfo *STI) {
+ emitRX(Opcode, Reg0, MCOperand::createImm(Imm), IDLoc, STI);
+}
+
+void MipsTargetStreamer::emitRR(unsigned Opcode, unsigned Reg0, unsigned Reg1,
+ SMLoc IDLoc, const MCSubtargetInfo *STI) {
+ emitRX(Opcode, Reg0, MCOperand::createReg(Reg1), IDLoc, STI);
+}
+
+void MipsTargetStreamer::emitII(unsigned Opcode, int16_t Imm1, int16_t Imm2,
+ SMLoc IDLoc, const MCSubtargetInfo *STI) {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(MCOperand::createImm(Imm1));
+ TmpInst.addOperand(MCOperand::createImm(Imm2));
+ TmpInst.setLoc(IDLoc);
+ getStreamer().EmitInstruction(TmpInst, *STI);
+}
+
+void MipsTargetStreamer::emitRRX(unsigned Opcode, unsigned Reg0, unsigned Reg1,
+ MCOperand Op2, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode);
+ TmpInst.addOperand(MCOperand::createReg(Reg0));
+ TmpInst.addOperand(MCOperand::createReg(Reg1));
+ TmpInst.addOperand(Op2);
+ TmpInst.setLoc(IDLoc);
+ getStreamer().EmitInstruction(TmpInst, *STI);
+}
+
+void MipsTargetStreamer::emitRRR(unsigned Opcode, unsigned Reg0, unsigned Reg1,
+ unsigned Reg2, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ emitRRX(Opcode, Reg0, Reg1, MCOperand::createReg(Reg2), IDLoc, STI);
+}
+
+void MipsTargetStreamer::emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1,
+ int16_t Imm, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ emitRRX(Opcode, Reg0, Reg1, MCOperand::createImm(Imm), IDLoc, STI);
+}
+
+void MipsTargetStreamer::emitAddu(unsigned DstReg, unsigned SrcReg,
+ unsigned TrgReg, bool Is64Bit,
+ const MCSubtargetInfo *STI) {
+ emitRRR(Is64Bit ? Mips::DADDu : Mips::ADDu, DstReg, SrcReg, TrgReg, SMLoc(),
+ STI);
+}
+
+void MipsTargetStreamer::emitDSLL(unsigned DstReg, unsigned SrcReg,
+ int16_t ShiftAmount, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ if (ShiftAmount >= 32) {
+ emitRRI(Mips::DSLL32, DstReg, SrcReg, ShiftAmount - 32, IDLoc, STI);
+ return;
+ }
+
+ emitRRI(Mips::DSLL, DstReg, SrcReg, ShiftAmount, IDLoc, STI);
+}
+
+void MipsTargetStreamer::emitEmptyDelaySlot(bool hasShortDelaySlot, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ if (hasShortDelaySlot)
+ emitRR(Mips::MOVE16_MM, Mips::ZERO, Mips::ZERO, IDLoc, STI);
+ else
+ emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI);
+}
+
+void MipsTargetStreamer::emitNop(SMLoc IDLoc, const MCSubtargetInfo *STI) {
+ emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI);
+}
+
+/// Emit the $gp restore operation for .cprestore.
+void MipsTargetStreamer::emitGPRestore(int Offset, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ emitLoadWithImmOffset(Mips::LW, Mips::GP, Mips::SP, Offset, Mips::GP, IDLoc,
+ STI);
+}
+
+/// Emit a store instruction with an immediate offset.
+void MipsTargetStreamer::emitStoreWithImmOffset(
+ unsigned Opcode, unsigned SrcReg, unsigned BaseReg, int64_t Offset,
+ function_ref<unsigned()> GetATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ if (isInt<16>(Offset)) {
+ emitRRI(Opcode, SrcReg, BaseReg, Offset, IDLoc, STI);
+ return;
+ }
+
+ // sw $8, offset($8) => lui $at, %hi(offset)
+ // add $at, $at, $8
+ // sw $8, %lo(offset)($at)
+
+ unsigned ATReg = GetATReg();
+ if (!ATReg)
+ return;
+
+ unsigned LoOffset = Offset & 0x0000ffff;
+ unsigned HiOffset = (Offset & 0xffff0000) >> 16;
+
+ // If msb of LoOffset is 1(negative number) we must increment HiOffset
+ // to account for the sign-extension of the low part.
+ if (LoOffset & 0x8000)
+ HiOffset++;
+
+ // Generate the base address in ATReg.
+ emitRI(Mips::LUi, ATReg, HiOffset, IDLoc, STI);
+ if (BaseReg != Mips::ZERO)
+ emitRRR(Mips::ADDu, ATReg, ATReg, BaseReg, IDLoc, STI);
+ // Emit the store with the adjusted base and offset.
+ emitRRI(Opcode, SrcReg, ATReg, LoOffset, IDLoc, STI);
+}
+
+/// Emit a store instruction with an symbol offset. Symbols are assumed to be
+/// out of range for a simm16 will be expanded to appropriate instructions.
+void MipsTargetStreamer::emitStoreWithSymOffset(
+ unsigned Opcode, unsigned SrcReg, unsigned BaseReg, MCOperand &HiOperand,
+ MCOperand &LoOperand, unsigned ATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ // sw $8, sym => lui $at, %hi(sym)
+ // sw $8, %lo(sym)($at)
+
+ // Generate the base address in ATReg.
+ emitRX(Mips::LUi, ATReg, HiOperand, IDLoc, STI);
+ if (BaseReg != Mips::ZERO)
+ emitRRR(Mips::ADDu, ATReg, ATReg, BaseReg, IDLoc, STI);
+ // Emit the store with the adjusted base and offset.
+ emitRRX(Opcode, SrcReg, ATReg, LoOperand, IDLoc, STI);
+}
+
+/// Emit a load instruction with an immediate offset. DstReg and TmpReg are
+/// permitted to be the same register iff DstReg is distinct from BaseReg and
+/// DstReg is a GPR. It is the callers responsibility to identify such cases
+/// and pass the appropriate register in TmpReg.
+void MipsTargetStreamer::emitLoadWithImmOffset(unsigned Opcode, unsigned DstReg,
+ unsigned BaseReg, int64_t Offset,
+ unsigned TmpReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ if (isInt<16>(Offset)) {
+ emitRRI(Opcode, DstReg, BaseReg, Offset, IDLoc, STI);
+ return;
+ }
+
+ // 1) lw $8, offset($9) => lui $8, %hi(offset)
+ // add $8, $8, $9
+ // lw $8, %lo(offset)($9)
+ // 2) lw $8, offset($8) => lui $at, %hi(offset)
+ // add $at, $at, $8
+ // lw $8, %lo(offset)($at)
+
+ unsigned LoOffset = Offset & 0x0000ffff;
+ unsigned HiOffset = (Offset & 0xffff0000) >> 16;
+
+ // If msb of LoOffset is 1(negative number) we must increment HiOffset
+ // to account for the sign-extension of the low part.
+ if (LoOffset & 0x8000)
+ HiOffset++;
+
+ // Generate the base address in TmpReg.
+ emitRI(Mips::LUi, TmpReg, HiOffset, IDLoc, STI);
+ if (BaseReg != Mips::ZERO)
+ emitRRR(Mips::ADDu, TmpReg, TmpReg, BaseReg, IDLoc, STI);
+ // Emit the load with the adjusted base and offset.
+ emitRRI(Opcode, DstReg, TmpReg, LoOffset, IDLoc, STI);
+}
+
+/// Emit a load instruction with an symbol offset. Symbols are assumed to be
+/// out of range for a simm16 will be expanded to appropriate instructions.
+/// DstReg and TmpReg are permitted to be the same register iff DstReg is a
+/// GPR. It is the callers responsibility to identify such cases and pass the
+/// appropriate register in TmpReg.
+void MipsTargetStreamer::emitLoadWithSymOffset(unsigned Opcode, unsigned DstReg,
+ unsigned BaseReg,
+ MCOperand &HiOperand,
+ MCOperand &LoOperand,
+ unsigned TmpReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ // 1) lw $8, sym => lui $8, %hi(sym)
+ // lw $8, %lo(sym)($8)
+ // 2) ldc1 $f0, sym => lui $at, %hi(sym)
+ // ldc1 $f0, %lo(sym)($at)
+
+ // Generate the base address in TmpReg.
+ emitRX(Mips::LUi, TmpReg, HiOperand, IDLoc, STI);
+ if (BaseReg != Mips::ZERO)
+ emitRRR(Mips::ADDu, TmpReg, TmpReg, BaseReg, IDLoc, STI);
+ // Emit the load with the adjusted base and offset.
+ emitRRX(Opcode, DstReg, TmpReg, LoOperand, IDLoc, STI);
+}
+
MipsTargetAsmStreamer::MipsTargetAsmStreamer(MCStreamer &S,
formatted_raw_ostream &OS)
: MipsTargetStreamer(S), OS(OS) {}
@@ -364,10 +585,12 @@ void MipsTargetAsmStreamer::emitDirectiveCpLoad(unsigned RegNo) {
forbidModuleDirective();
}
-void MipsTargetAsmStreamer::emitDirectiveCpRestore(
- SmallVector<MCInst, 3> &StoreInsts, int Offset) {
- MipsTargetStreamer::emitDirectiveCpRestore(StoreInsts, Offset);
+bool MipsTargetAsmStreamer::emitDirectiveCpRestore(
+ int Offset, function_ref<unsigned()> GetATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer::emitDirectiveCpRestore(Offset, GetATReg, IDLoc, STI);
OS << "\t.cprestore\t" << Offset << "\n";
+ return true;
}
void MipsTargetAsmStreamer::emitDirectiveCpsetup(unsigned RegNo,
@@ -437,7 +660,15 @@ MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S,
const MCSubtargetInfo &STI)
: MipsTargetStreamer(S), MicroMipsEnabled(false), STI(STI) {
MCAssembler &MCA = getStreamer().getAssembler();
- Pic = MCA.getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_;
+
+ // It's possible that MCObjectFileInfo isn't fully initialized at this point
+ // due to an initialization order problem where LLVMTargetMachine creates the
+ // target streamer before TargetLoweringObjectFile calls
+ // InitializeMCObjectFileInfo. There doesn't seem to be a single place that
+ // covers all cases so this statement covers most cases and direct object
+ // emission must call setPic() once MCObjectFileInfo has been initialized. The
+ // cases we don't handle here are covered by MipsAsmPrinter.
+ Pic = MCA.getContext().getObjectFileInfo()->isPositionIndependent();
const FeatureBitset &Features = STI.getFeatureBits();
@@ -482,6 +713,10 @@ MipsTargetELFStreamer::MipsTargetELFStreamer(MCStreamer &S,
else
EFlags |= ELF::EF_MIPS_ARCH_1;
+ // Machine
+ if (Features[Mips::FeatureCnMips])
+ EFlags |= ELF::EF_MIPS_MACH_OCTEON;
+
// Other options.
if (Features[Mips::FeatureNaN2008])
EFlags |= ELF::EF_MIPS_NAN2008;
@@ -521,6 +756,26 @@ void MipsTargetELFStreamer::finish() {
DataSection.setAlignment(std::max(16u, DataSection.getAlignment()));
BSSSection.setAlignment(std::max(16u, BSSSection.getAlignment()));
+ if (RoundSectionSizes) {
+ // Make sections sizes a multiple of the alignment. This is useful for
+ // verifying the output of IAS against the output of other assemblers but
+ // it's not necessary to produce a correct object and increases section
+ // size.
+ MCStreamer &OS = getStreamer();
+ for (MCSection &S : MCA) {
+ MCSectionELF &Section = static_cast<MCSectionELF &>(S);
+
+ unsigned Alignment = Section.getAlignment();
+ if (Alignment) {
+ OS.SwitchSection(&Section);
+ if (Section.UseCodeAlign())
+ OS.EmitCodeAlignment(Alignment, Alignment);
+ else
+ OS.EmitValueToAlignment(Alignment, 0, 1, Alignment);
+ }
+ }
+ }
+
const FeatureBitset &Features = STI.getFeatureBits();
// Update e_header flags. See the FIXME and comment above in
@@ -576,11 +831,6 @@ MCELFStreamer &MipsTargetELFStreamer::getStreamer() {
void MipsTargetELFStreamer::emitDirectiveSetMicroMips() {
MicroMipsEnabled = true;
-
- MCAssembler &MCA = getStreamer().getAssembler();
- unsigned Flags = MCA.getELFHeaderEFlags();
- Flags |= ELF::EF_MIPS_MICROMIPS;
- MCA.setELFHeaderEFlags(Flags);
forbidModuleDirective();
}
@@ -589,6 +839,13 @@ void MipsTargetELFStreamer::emitDirectiveSetNoMicroMips() {
forbidModuleDirective();
}
+void MipsTargetELFStreamer::setUsesMicroMips() {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ unsigned Flags = MCA.getELFHeaderEFlags();
+ Flags |= ELF::EF_MIPS_MICROMIPS;
+ MCA.setELFHeaderEFlags(Flags);
+}
+
void MipsTargetELFStreamer::emitDirectiveSetMips16() {
MCAssembler &MCA = getStreamer().getAssembler();
unsigned Flags = MCA.getELFHeaderEFlags();
@@ -610,8 +867,7 @@ void MipsTargetELFStreamer::emitDirectiveEnd(StringRef Name) {
MCContext &Context = MCA.getContext();
MCStreamer &OS = getStreamer();
- MCSectionELF *Sec = Context.getELFSection(".pdr", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHT_REL);
+ MCSectionELF *Sec = Context.getELFSection(".pdr", ELF::SHT_PROGBITS, 0);
MCSymbol *Sym = Context.getOrCreateSymbol(Name);
const MCSymbolRefExpr *ExprRef =
@@ -760,8 +1016,11 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsigned RegNo) {
MCInst TmpInst;
TmpInst.setOpcode(Mips::LUi);
TmpInst.addOperand(MCOperand::createReg(Mips::GP));
- const MCSymbolRefExpr *HiSym = MCSymbolRefExpr::create(
- "_gp_disp", MCSymbolRefExpr::VK_Mips_ABS_HI, MCA.getContext());
+ const MCExpr *HiSym = MipsMCExpr::create(
+ MipsMCExpr::MEK_HI,
+ MCSymbolRefExpr::create("_gp_disp", MCSymbolRefExpr::VK_None,
+ MCA.getContext()),
+ MCA.getContext());
TmpInst.addOperand(MCOperand::createExpr(HiSym));
getStreamer().EmitInstruction(TmpInst, STI);
@@ -770,8 +1029,11 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsigned RegNo) {
TmpInst.setOpcode(Mips::ADDiu);
TmpInst.addOperand(MCOperand::createReg(Mips::GP));
TmpInst.addOperand(MCOperand::createReg(Mips::GP));
- const MCSymbolRefExpr *LoSym = MCSymbolRefExpr::create(
- "_gp_disp", MCSymbolRefExpr::VK_Mips_ABS_LO, MCA.getContext());
+ const MCExpr *LoSym = MipsMCExpr::create(
+ MipsMCExpr::MEK_LO,
+ MCSymbolRefExpr::create("_gp_disp", MCSymbolRefExpr::VK_None,
+ MCA.getContext()),
+ MCA.getContext());
TmpInst.addOperand(MCOperand::createExpr(LoSym));
getStreamer().EmitInstruction(TmpInst, STI);
@@ -786,9 +1048,10 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsigned RegNo) {
forbidModuleDirective();
}
-void MipsTargetELFStreamer::emitDirectiveCpRestore(
- SmallVector<MCInst, 3> &StoreInsts, int Offset) {
- MipsTargetStreamer::emitDirectiveCpRestore(StoreInsts, Offset);
+bool MipsTargetELFStreamer::emitDirectiveCpRestore(
+ int Offset, function_ref<unsigned()> GetATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer::emitDirectiveCpRestore(Offset, GetATReg, IDLoc, STI);
// .cprestore offset
// When PIC mode is enabled and the O32 ABI is used, this directive expands
// to:
@@ -798,10 +1061,12 @@ void MipsTargetELFStreamer::emitDirectiveCpRestore(
// Note that .cprestore is ignored if used with the N32 and N64 ABIs or if it
// is used in non-PIC mode.
if (!Pic || (getABI().IsN32() || getABI().IsN64()))
- return;
+ return true;
- for (const MCInst &Inst : StoreInsts)
- getStreamer().EmitInstruction(Inst, STI);
+ // Store the $gp on the stack.
+ emitStoreWithImmOffset(Mips::SW, Mips::GP, Mips::SP, Offset, GetATReg, IDLoc,
+ STI);
+ return true;
}
void MipsTargetELFStreamer::emitDirectiveCpsetup(unsigned RegNo,
@@ -812,54 +1077,55 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsigned RegNo,
if (!Pic || !(getABI().IsN32() || getABI().IsN64()))
return;
+ forbidModuleDirective();
+
MCAssembler &MCA = getStreamer().getAssembler();
MCInst Inst;
// Either store the old $gp in a register or on the stack
if (IsReg) {
// move $save, $gpreg
- Inst.setOpcode(Mips::OR64);
- Inst.addOperand(MCOperand::createReg(RegOrOffset));
- Inst.addOperand(MCOperand::createReg(Mips::GP));
- Inst.addOperand(MCOperand::createReg(Mips::ZERO));
+ emitRRR(Mips::OR64, RegOrOffset, Mips::GP, Mips::ZERO, SMLoc(), &STI);
} else {
// sd $gpreg, offset($sp)
- Inst.setOpcode(Mips::SD);
- Inst.addOperand(MCOperand::createReg(Mips::GP));
- Inst.addOperand(MCOperand::createReg(Mips::SP));
- Inst.addOperand(MCOperand::createImm(RegOrOffset));
+ emitRRI(Mips::SD, Mips::GP, Mips::SP, RegOrOffset, SMLoc(), &STI);
}
- getStreamer().EmitInstruction(Inst, STI);
- Inst.clear();
- const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::create(
- &Sym, MCSymbolRefExpr::VK_Mips_GPOFF_HI, MCA.getContext());
- const MCSymbolRefExpr *LoExpr = MCSymbolRefExpr::create(
- &Sym, MCSymbolRefExpr::VK_Mips_GPOFF_LO, MCA.getContext());
+ if (getABI().IsN32()) {
+ MCSymbol *GPSym = MCA.getContext().getOrCreateSymbol("__gnu_local_gp");
+ const MipsMCExpr *HiExpr = MipsMCExpr::create(
+ MipsMCExpr::MEK_HI, MCSymbolRefExpr::create(GPSym, MCA.getContext()),
+ MCA.getContext());
+ const MipsMCExpr *LoExpr = MipsMCExpr::create(
+ MipsMCExpr::MEK_LO, MCSymbolRefExpr::create(GPSym, MCA.getContext()),
+ MCA.getContext());
+
+ // lui $gp, %hi(__gnu_local_gp)
+ emitRX(Mips::LUi, Mips::GP, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
+
+ // addiu $gp, $gp, %lo(__gnu_local_gp)
+ emitRRX(Mips::ADDiu, Mips::GP, Mips::GP, MCOperand::createExpr(LoExpr),
+ SMLoc(), &STI);
+
+ return;
+ }
+
+ const MipsMCExpr *HiExpr = MipsMCExpr::createGpOff(
+ MipsMCExpr::MEK_HI, MCSymbolRefExpr::create(&Sym, MCA.getContext()),
+ MCA.getContext());
+ const MipsMCExpr *LoExpr = MipsMCExpr::createGpOff(
+ MipsMCExpr::MEK_LO, MCSymbolRefExpr::create(&Sym, MCA.getContext()),
+ MCA.getContext());
// lui $gp, %hi(%neg(%gp_rel(funcSym)))
- Inst.setOpcode(Mips::LUi);
- Inst.addOperand(MCOperand::createReg(Mips::GP));
- Inst.addOperand(MCOperand::createExpr(HiExpr));
- getStreamer().EmitInstruction(Inst, STI);
- Inst.clear();
+ emitRX(Mips::LUi, Mips::GP, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
// addiu $gp, $gp, %lo(%neg(%gp_rel(funcSym)))
- Inst.setOpcode(Mips::ADDiu);
- Inst.addOperand(MCOperand::createReg(Mips::GP));
- Inst.addOperand(MCOperand::createReg(Mips::GP));
- Inst.addOperand(MCOperand::createExpr(LoExpr));
- getStreamer().EmitInstruction(Inst, STI);
- Inst.clear();
+ emitRRX(Mips::ADDiu, Mips::GP, Mips::GP, MCOperand::createExpr(LoExpr),
+ SMLoc(), &STI);
// daddu $gp, $gp, $funcreg
- Inst.setOpcode(Mips::DADDu);
- Inst.addOperand(MCOperand::createReg(Mips::GP));
- Inst.addOperand(MCOperand::createReg(Mips::GP));
- Inst.addOperand(MCOperand::createReg(RegNo));
- getStreamer().EmitInstruction(Inst, STI);
-
- forbidModuleDirective();
+ emitRRR(Mips::DADDu, Mips::GP, Mips::GP, RegNo, SMLoc(), &STI);
}
void MipsTargetELFStreamer::emitDirectiveCpreturn(unsigned SaveLocation,
diff --git a/lib/Target/Mips/Makefile b/lib/Target/Mips/Makefile
deleted file mode 100644
index 56db450f6961..000000000000
--- a/lib/Target/Mips/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
-##===- lib/Target/Mips/Makefile ----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMMipsCodeGen
-TARGET = Mips
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = MipsGenRegisterInfo.inc MipsGenInstrInfo.inc \
- MipsGenAsmWriter.inc MipsGenFastISel.inc \
- MipsGenDAGISel.inc MipsGenCallingConv.inc \
- MipsGenSubtargetInfo.inc MipsGenMCCodeEmitter.inc \
- MipsGenDisassemblerTables.inc \
- MipsGenMCPseudoLowering.inc MipsGenAsmMatcher.inc
-
-DIRS = InstPrinter Disassembler AsmParser TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
-
diff --git a/lib/Target/Mips/MicroMips32r6InstrFormats.td b/lib/Target/Mips/MicroMips32r6InstrFormats.td
index 400f6eef3fb0..2f0933277e81 100644
--- a/lib/Target/Mips/MicroMips32r6InstrFormats.td
+++ b/lib/Target/Mips/MicroMips32r6InstrFormats.td
@@ -14,6 +14,7 @@
class MMR6Arch<string opstr> {
string Arch = "micromipsr6";
string BaseOpcode = opstr;
+ string DecoderNamespace = "MicroMipsR6";
}
// Class used for microMIPS32r6 and microMIPS64r6 instructions.
@@ -22,6 +23,24 @@ class MicroMipsR6Inst16 : PredicateControl {
let InsnPredicates = [HasMicroMips32r6];
}
+//===----------------------------------------------------------------------===//
+//
+// Disambiguators
+//
+//===----------------------------------------------------------------------===//
+//
+// Some encodings are ambiguous except by comparing field values.
+
+class MMDecodeDisambiguatedBy<string Name> : DecodeDisambiguates<Name> {
+ string DecoderNamespace = "MicroMipsR6_Ambiguous";
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Encoding Formats
+//
+//===----------------------------------------------------------------------===//
+
class BC16_FM_MM16R6 {
bits<10> offset;
@@ -52,6 +71,32 @@ class POOL16C_JALRC_FM_MM16R6<bits<5> op> {
let Inst{4-0} = op;
}
+class POP35_BOVC_FM_MMR6<string instr_asm> : MipsR6Inst, MMR6Arch<instr_asm> {
+ bits<5> rt;
+ bits<5> rs;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b011101;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
+class POP37_BNVC_FM_MMR6<string instr_asm> : MipsR6Inst, MMR6Arch<instr_asm> {
+ bits<5> rt;
+ bits<5> rs;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b011111;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
class POOL16C_JRCADDIUSP_FM_MM16R6<bits<5> op> {
bits<5> imm;
@@ -449,7 +494,8 @@ class LOAD_UPPER_IMM_FM_MMR6 {
let Inst{15-0} = imm16;
}
-class CMP_BRANCH_1R_RT_OFF16_FM_MMR6<bits<6> funct> : MipsR6Inst {
+class CMP_BRANCH_1R_RT_OFF16_FM_MMR6<string instr_asm, bits<6> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
bits<5> rt;
bits<16> offset;
@@ -461,7 +507,8 @@ class CMP_BRANCH_1R_RT_OFF16_FM_MMR6<bits<6> funct> : MipsR6Inst {
let Inst{15-0} = offset;
}
-class CMP_BRANCH_1R_BOTH_OFF16_FM_MMR6<bits<6> funct> : MipsR6Inst {
+class CMP_BRANCH_1R_BOTH_OFF16_FM_MMR6<string instr_asm, bits<6> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
bits<5> rt;
bits<16> offset;
@@ -473,6 +520,37 @@ class CMP_BRANCH_1R_BOTH_OFF16_FM_MMR6<bits<6> funct> : MipsR6Inst {
let Inst{15-0} = offset;
}
+class POOL32A_JALRC_FM_MMR6<string instr_asm, bits<10> funct>
+ : MipsR6Inst, MMR6Arch<instr_asm> {
+ bits<5> rt;
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0b111100;
+}
+
+class POOL32A_EXT_INS_FM_MMR6<string instr_asm, bits<6> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<5> rs;
+ bits<5> size;
+ bits<5> pos;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-11} = size;
+ let Inst{10-6} = pos;
+ let Inst{5-0} = funct;
+}
+
class POOL32A_ERET_FM_MMR6<string instr_asm, bits<10> funct>
: MMR6Arch<instr_asm> {
bits<32> Inst;
@@ -751,7 +829,7 @@ class POOL16C_NOT16_FM_MMR6 : MicroMipsR6Inst16 {
let Inst{3-0} = 0b0000;
}
-class POOL16C_OR16_XOR16_FM_MMR6<bits<4> op> {
+class POOL16C_OR16_XOR16_FM_MMR6<bits<4> op> : MicroMipsR6Inst16 {
bits<3> rt;
bits<3> rs;
@@ -860,3 +938,157 @@ class POOL32F_CLASS_FM_MMR6<string instr_asm, bits<2> fmt, bits<9> funct>
let Inst{10-9} = fmt;
let Inst{8-0} = funct;
}
+
+class POOL32A_TLBINV_FM_MMR6<string instr_asm, bits<10> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x0;
+ let Inst{25-16} = 0x0;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0b111100;
+}
+
+class POOL32A_MFTC0_FM_MMR6<string instr_asm, bits<5> funct, bits<6> opcode>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<5> rs;
+ bits<3> sel;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b000000;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-14} = 0;
+ let Inst{13-11} = sel;
+ let Inst{10-6} = funct;
+ let Inst{5-0} = opcode;
+}
+
+class POOL32F_MFTC1_FM_MMR6<string instr_asm, bits<8> funct>
+ : MMR6Arch<instr_asm> {
+ bits<5> rt;
+ bits<5> fs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b010101;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = fs;
+ let Inst{15-14} = 0;
+ let Inst{13-6} = funct;
+ let Inst{5-0} = 0b111011;
+}
+
+class POOL32A_MFTC2_FM_MMR6<string instr_asm, bits<10> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<5> impl;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b000000;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = impl;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0b111100;
+}
+
+class CMP_BRANCH_2R_OFF16_FM_MMR6<string opstr, bits<6> funct>
+ : MipsR6Inst, MMR6Arch<opstr> {
+ bits<5> rt;
+ bits<5> rs;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = funct;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
+class POOL32A_DVPEVP_FM_MMR6<string instr_asm, bits<10> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b000000;
+ let Inst{25-21} = 0b00000;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0b111100;
+}
+
+class POOL32B_LWP_SWP_FM_MMR6<bits<4> funct> : MipsR6Inst {
+ bits<5> rd;
+ bits<21> addr;
+ bits<5> base = addr{20-16};
+ bits<12> offset = addr{11-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x8;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = base;
+ let Inst{15-12} = funct;
+ let Inst{11-0} = offset;
+}
+
+class CMP_BRANCH_OFF21_FM_MMR6<string opstr, bits<6> funct> : MipsR6Inst {
+ bits<5> rs;
+ bits<21> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = funct;
+ let Inst{25-21} = rs;
+ let Inst{20-0} = offset;
+}
+
+class POOL32I_BRANCH_COP_1_2_FM_MMR6<string instr_asm, bits<5> funct>
+ : MMR6Arch<instr_asm> {
+ bits<5> rt;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b010000;
+ let Inst{25-21} = funct;
+ let Inst{20-16} = rt;
+ let Inst{15-0} = offset;
+}
+
+class LDWC1_SDWC1_FM_MMR6<string instr_asm, bits<6> funct>
+ : MMR6Arch<instr_asm> {
+ bits<5> ft;
+ bits<21> addr;
+ bits<5> base = addr{20-16};
+ bits<16> offset = addr{15-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = funct;
+ let Inst{25-21} = ft;
+ let Inst{20-16} = base;
+ let Inst{15-0} = offset;
+}
+
+class POOL32B_LDWC2_SDWC2_FM_MMR6<string instr_asm, bits<4> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<21> addr;
+ bits<5> base = addr{20-16};
+ bits<11> offset = addr{10-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b001000;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = base;
+ let Inst{15-12} = funct;
+ let Inst{11} = 0;
+ let Inst{10-0} = offset;
+}
diff --git a/lib/Target/Mips/MicroMips32r6InstrInfo.td b/lib/Target/Mips/MicroMips32r6InstrInfo.td
index 31b5db036daa..2b636cfe3bfe 100644
--- a/lib/Target/Mips/MicroMips32r6InstrInfo.td
+++ b/lib/Target/Mips/MicroMips32r6InstrInfo.td
@@ -11,6 +11,13 @@
//
//===----------------------------------------------------------------------===//
+def brtarget21_mm : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTarget21OpValueMM";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTarget21MM";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
def brtarget26_mm : Operand<OtherVT> {
let EncoderMethod = "getBranchTarget26OpValueMM";
let OperandType = "OPERAND_PCREL";
@@ -18,6 +25,13 @@ def brtarget26_mm : Operand<OtherVT> {
let ParserMatchClass = MipsJumpTargetAsmOperand;
}
+def brtargetr6 : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValueMMR6";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTargetMM";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
+
//===----------------------------------------------------------------------===//
//
// Instruction Encodings
@@ -40,12 +54,26 @@ class BEQZC16_MMR6_ENC : BEQZC_BNEZC_FM_MM16R6<0x23>;
class BNEZC16_MMR6_ENC : BEQZC_BNEZC_FM_MM16R6<0x2b>;
class BITSWAP_MMR6_ENC : POOL32A_BITSWAP_FM_MMR6<0b101100>;
class BRK_MMR6_ENC : BREAK_MMR6_ENC<"break">;
-class BEQZALC_MMR6_ENC : CMP_BRANCH_1R_RT_OFF16_FM_MMR6<0b011101>;
-class BNEZALC_MMR6_ENC : CMP_BRANCH_1R_RT_OFF16_FM_MMR6<0b011111>;
-class BGTZALC_MMR6_ENC : CMP_BRANCH_1R_RT_OFF16_FM_MMR6<0b111000>;
-class BLTZALC_MMR6_ENC : CMP_BRANCH_1R_BOTH_OFF16_FM_MMR6<0b111000>;
-class BGEZALC_MMR6_ENC : CMP_BRANCH_1R_BOTH_OFF16_FM_MMR6<0b110000>;
-class BLEZALC_MMR6_ENC : CMP_BRANCH_1R_RT_OFF16_FM_MMR6<0b110000>;
+class BEQZC_MMR6_ENC : CMP_BRANCH_OFF21_FM_MMR6<"beqzc", 0b100000>;
+class BNEZC_MMR6_ENC : CMP_BRANCH_OFF21_FM_MMR6<"bnezc", 0b101000>;
+class BGEC_MMR6_ENC : CMP_BRANCH_2R_OFF16_FM_MMR6<"bgec", 0b111001>;
+class BGEUC_MMR6_ENC : CMP_BRANCH_2R_OFF16_FM_MMR6<"bgeuc", 0b110000>,
+ DecodeDisambiguates<"BlezGroupBranchMMR6">;
+class BLTC_MMR6_ENC : CMP_BRANCH_2R_OFF16_FM_MMR6<"bltc", 0b110001>;
+class BLTUC_MMR6_ENC : CMP_BRANCH_2R_OFF16_FM_MMR6<"bltuc", 0b111000>,
+ DecodeDisambiguates<"BgtzGroupBranchMMR6">;
+class BEQC_MMR6_ENC : CMP_BRANCH_2R_OFF16_FM_MMR6<"beqc", 0b011101>;
+class BNEC_MMR6_ENC : CMP_BRANCH_2R_OFF16_FM_MMR6<"bnec", 0b011111>;
+class BEQZALC_MMR6_ENC : CMP_BRANCH_1R_RT_OFF16_FM_MMR6<"beqzalc", 0b011101>;
+class BNEZALC_MMR6_ENC : CMP_BRANCH_1R_RT_OFF16_FM_MMR6<"bnezalc", 0b011111>;
+class BGTZALC_MMR6_ENC : CMP_BRANCH_1R_RT_OFF16_FM_MMR6<"bgtzalc", 0b111000>,
+ MMDecodeDisambiguatedBy<"BgtzGroupBranchMMR6">;
+class BLTZALC_MMR6_ENC : CMP_BRANCH_1R_BOTH_OFF16_FM_MMR6<"bltzalc", 0b111000>,
+ MMDecodeDisambiguatedBy<"BgtzGroupBranchMMR6">;
+class BGEZALC_MMR6_ENC : CMP_BRANCH_1R_BOTH_OFF16_FM_MMR6<"bgezalc", 0b110000>,
+ MMDecodeDisambiguatedBy<"BlezGroupBranchMMR6">;
+class BLEZALC_MMR6_ENC : CMP_BRANCH_1R_RT_OFF16_FM_MMR6<"blezalc", 0b110000>,
+ MMDecodeDisambiguatedBy<"BlezGroupBranchMMR6">;
class CACHE_MMR6_ENC : CACHE_PREF_FM_MMR6<0b001000, 0b0110>;
class CLO_MMR6_ENC : POOL32A_2R_FM_MMR6<0b0100101100>;
class CLZ_MMR6_ENC : SPECIAL_2R_FM_MMR6<0b010000>;
@@ -63,14 +91,27 @@ class JIC_MMR6_ENC : JMP_IDX_COMPACT_FM<0b101000>;
class JRC16_MMR6_ENC: POOL16C_JALRC_FM_MM16R6<0x3>;
class JRCADDIUSP_MMR6_ENC : POOL16C_JRCADDIUSP_FM_MM16R6<0x13>;
class LSA_MMR6_ENC : POOL32A_LSA_FM<0b001111>;
+class LWP_MMR6_ENC : POOL32B_LWP_SWP_FM_MMR6<0x1>;
class LWPC_MMR6_ENC : PCREL19_FM_MMR6<0b01>;
class LWM16_MMR6_ENC : POOL16C_LWM_SWM_FM_MM16R6<0x2>;
+class MFC0_MMR6_ENC : POOL32A_MFTC0_FM_MMR6<"mfc0", 0b00011, 0b111100>;
+class MFC1_MMR6_ENC : POOL32F_MFTC1_FM_MMR6<"mfc1", 0b10000000>;
+class MFC2_MMR6_ENC : POOL32A_MFTC2_FM_MMR6<"mfc2", 0b0100110100>;
+class MFHC0_MMR6_ENC : POOL32A_MFTC0_FM_MMR6<"mfhc0", 0b00011, 0b110100>;
+class MFHC1_MMR6_ENC : POOL32F_MFTC1_FM_MMR6<"mfhc1", 0b11000000>;
+class MFHC2_MMR6_ENC : POOL32A_MFTC2_FM_MMR6<"mfhc2", 0b1000110100>;
class MOD_MMR6_ENC : ARITH_FM_MMR6<"mod", 0x158>;
class MODU_MMR6_ENC : ARITH_FM_MMR6<"modu", 0x1d8>;
class MUL_MMR6_ENC : ARITH_FM_MMR6<"mul", 0x18>;
class MUH_MMR6_ENC : ARITH_FM_MMR6<"muh", 0x58>;
class MULU_MMR6_ENC : ARITH_FM_MMR6<"mulu", 0x98>;
class MUHU_MMR6_ENC : ARITH_FM_MMR6<"muhu", 0xd8>;
+class MTC0_MMR6_ENC : POOL32A_MFTC0_FM_MMR6<"mtc0", 0b01011, 0b111100>;
+class MTC1_MMR6_ENC : POOL32F_MFTC1_FM_MMR6<"mtc1", 0b10100000>;
+class MTC2_MMR6_ENC : POOL32A_MFTC2_FM_MMR6<"mtc2", 0b0101110100>;
+class MTHC0_MMR6_ENC : POOL32A_MFTC0_FM_MMR6<"mthc0", 0b01011, 0b110100>;
+class MTHC1_MMR6_ENC : POOL32F_MFTC1_FM_MMR6<"mthc1", 0b11100000>;
+class MTHC2_MMR6_ENC : POOL32A_MFTC2_FM_MMR6<"mthc2", 0b1001110100>;
class NOR_MMR6_ENC : ARITH_FM_MMR6<"nor", 0x2d0>;
class OR_MMR6_ENC : ARITH_FM_MMR6<"or", 0x290>;
class ORI_MMR6_ENC : ADDI_FM_MMR6<"ori", 0x14>;
@@ -89,6 +130,7 @@ class SWE_MMR6_ENC : POOL32C_SWE_FM_MMR6<"swe", 0x18, 0xa, 0x7>;
class SW16_MMR6_ENC : LOAD_STORE_FM_MM16<0x3a>;
class SWM16_MMR6_ENC : POOL16C_LWM_SWM_FM_MM16R6<0xa>;
class SWSP_MMR6_ENC : LOAD_STORE_SP_FM_MM16<0x32>;
+class SWP_MMR6_ENC : POOL32B_LWP_SWP_FM_MMR6<0x9>;
class PREFE_MMR6_ENC : POOL32C_ST_EVA_FM_MMR6<0b011000, 0b010>;
class CACHEE_MMR6_ENC : POOL32C_ST_EVA_FM_MMR6<0b011000, 0b011>;
class WRPGPR_MMR6_ENC : POOL32A_WRPGPR_WSBH_FM_MMR6<0x3c5>;
@@ -134,6 +176,7 @@ class LLE_MMR6_ENC : LOAD_WORD_EVA_FM_MMR6<0b110>;
class LWE_MMR6_ENC : LOAD_WORD_EVA_FM_MMR6<0b111>;
class LW_MMR6_ENC : LOAD_WORD_FM_MMR6;
class LUI_MMR6_ENC : LOAD_UPPER_IMM_FM_MMR6;
+class JALRC_HB_MMR6_ENC : POOL32A_JALRC_FM_MMR6<"jalrc.hb", 0b0001111100>;
class RECIP_S_MMR6_ENC : POOL32F_RECIP_ROUND_FM_MMR6<"recip.s", 0, 0b01001000>;
class RECIP_D_MMR6_ENC : POOL32F_RECIP_ROUND_FM_MMR6<"recip.d", 1, 0b01001000>;
class RINT_S_MMR6_ENC : POOL32F_RINT_FM_MMR6<"rint.s", 0>;
@@ -150,11 +193,15 @@ class SEL_S_MMR6_ENC : POOL32F_SEL_FM_MMR6<"sel.s", 0, 0b010111000>;
class SEL_D_MMR6_ENC : POOL32F_SEL_FM_MMR6<"sel.d", 1, 0b010111000>;
class SELEQZ_S_MMR6_ENC : POOL32F_SEL_FM_MMR6<"seleqz.s", 0, 0b000111000>;
class SELEQZ_D_MMR6_ENC : POOL32F_SEL_FM_MMR6<"seleqz.d", 1, 0b000111000>;
-class SELENZ_S_MMR6_ENC : POOL32F_SEL_FM_MMR6<"selenz.s", 0, 0b001111000>;
-class SELENZ_D_MMR6_ENC : POOL32F_SEL_FM_MMR6<"selenz.d", 1, 0b001111000>;
+class SELNEZ_S_MMR6_ENC : POOL32F_SEL_FM_MMR6<"selnez.s", 0, 0b001111000>;
+class SELNEZ_D_MMR6_ENC : POOL32F_SEL_FM_MMR6<"selnez.d", 1, 0b001111000>;
class CLASS_S_MMR6_ENC : POOL32F_CLASS_FM_MMR6<"class.s", 0, 0b001100000>;
class CLASS_D_MMR6_ENC : POOL32F_CLASS_FM_MMR6<"class.d", 1, 0b001100000>;
-
+class EXT_MMR6_ENC : POOL32A_EXT_INS_FM_MMR6<"ext", 0b101100>;
+class INS_MMR6_ENC : POOL32A_EXT_INS_FM_MMR6<"ins", 0b001100>;
+class JALRC_MMR6_ENC : POOL32A_JALRC_FM_MMR6<"jalrc", 0b0000111100>;
+class BOVC_MMR6_ENC : POP35_BOVC_FM_MMR6<"bovc">;
+class BNVC_MMR6_ENC : POP37_BNVC_FM_MMR6<"bnvc">;
class ADDU16_MMR6_ENC : POOL16A_ADDU16_FM_MMR6;
class AND16_MMR6_ENC : POOL16C_AND16_FM_MMR6;
class ANDI16_MMR6_ENC : ANDI_FM_MM16<0b001011>, MicroMipsR6Inst16;
@@ -168,10 +215,24 @@ class MOVE16_MMR6_ENC : MOVE_FM_MM16<0b000011>;
class SDBBP16_MMR6_ENC : POOL16C_BREAKPOINT_FM_MMR6<0b111011>;
class SUBU16_MMR6_ENC : POOL16A_SUBU16_FM_MMR6;
class XOR16_MMR6_ENC : POOL16C_OR16_XOR16_FM_MMR6<0b1000>;
+class TLBINV_MMR6_ENC : POOL32A_TLBINV_FM_MMR6<"tlbinv", 0x10d>;
+class TLBINVF_MMR6_ENC : POOL32A_TLBINV_FM_MMR6<"tlbinvf", 0x14d>;
+class DVP_MMR6_ENC : POOL32A_DVPEVP_FM_MMR6<"dvp", 0b0001100101>;
+class EVP_MMR6_ENC : POOL32A_DVPEVP_FM_MMR6<"evp", 0b0011100101>;
+class BC1EQZC_MMR6_ENC : POOL32I_BRANCH_COP_1_2_FM_MMR6<"bc1eqzc", 0b01000>;
+class BC1NEZC_MMR6_ENC : POOL32I_BRANCH_COP_1_2_FM_MMR6<"bc1nezc", 0b01001>;
+class BC2EQZC_MMR6_ENC : POOL32I_BRANCH_COP_1_2_FM_MMR6<"bc2eqzc", 0b01010>;
+class BC2NEZC_MMR6_ENC : POOL32I_BRANCH_COP_1_2_FM_MMR6<"bc2nezc", 0b01011>;
+class LDC1_MMR6_ENC : LDWC1_SDWC1_FM_MMR6<"ldc1", 0b101111>;
+class SDC1_MMR6_ENC : LDWC1_SDWC1_FM_MMR6<"sdc1", 0b101110>;
+class LDC2_MMR6_ENC : POOL32B_LDWC2_SDWC2_FM_MMR6<"ldc2", 0b0010>;
+class SDC2_MMR6_ENC : POOL32B_LDWC2_SDWC2_FM_MMR6<"sdc2", 0b1010>;
+class LWC2_MMR6_ENC : POOL32B_LDWC2_SDWC2_FM_MMR6<"lwc2", 0b0000>;
+class SWC2_MMR6_ENC : POOL32B_LDWC2_SDWC2_FM_MMR6<"swc2", 0b1000>;
class CMP_CBR_RT_Z_MMR6_DESC_BASE<string instr_asm, DAGOperand opnd,
RegisterOperand GPROpnd>
- : BRANCH_DESC_BASE, MMR6Arch<instr_asm> {
+ : BRANCH_DESC_BASE {
dag InOperandList = (ins GPROpnd:$rt, opnd:$offset);
dag OutOperandList = (outs);
string AsmString = !strconcat(instr_asm, "\t$rt, $offset");
@@ -208,6 +269,27 @@ class BNEZALC_MMR6_DESC : CMP_CBR_RT_Z_MMR6_DESC_BASE<"bnezalc", brtarget_mm,
list<Register> Defs = [RA];
}
+class CMP_CBR_2R_MMR6_DESC_BASE<string instr_asm, DAGOperand opnd,
+ RegisterOperand GPROpnd> : BRANCH_DESC_BASE {
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt, opnd:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = !strconcat(instr_asm, "\t$rs, $rt, $offset");
+ list<Register> Defs = [AT];
+}
+
+class BGEC_MMR6_DESC : CMP_CBR_2R_MMR6_DESC_BASE<"bgec", brtarget_mm,
+ GPR32Opnd>;
+class BGEUC_MMR6_DESC : CMP_CBR_2R_MMR6_DESC_BASE<"bgeuc", brtarget_mm,
+ GPR32Opnd>;
+class BLTC_MMR6_DESC : CMP_CBR_2R_MMR6_DESC_BASE<"bltc", brtarget_mm,
+ GPR32Opnd>;
+class BLTUC_MMR6_DESC : CMP_CBR_2R_MMR6_DESC_BASE<"bltuc", brtarget_mm,
+ GPR32Opnd>;
+class BEQC_MMR6_DESC : CMP_CBR_2R_MMR6_DESC_BASE<"beqc", brtarget_mm,
+ GPR32Opnd>;
+class BNEC_MMR6_DESC : CMP_CBR_2R_MMR6_DESC_BASE<"bnec", brtarget_mm,
+ GPR32Opnd>;
+
/// Floating Point Instructions
class FADD_S_MMR6_ENC : POOL32F_ARITH_FM_MMR6<"add.s", 0, 0b00110000>;
class FADD_D_MMR6_ENC : POOL32F_ARITH_FM_MMR6<"add.d", 1, 0b00110000>;
@@ -252,12 +334,12 @@ class CVT_S_L_MMR6_ENC : POOL32F_CVT_DS_FM<"cvt.s.l", 2, 0b1101101>;
//===----------------------------------------------------------------------===//
class ADD_MMR6_DESC : ArithLogicR<"add", GPR32Opnd>;
-class ADDIU_MMR6_DESC : ArithLogicI<"addiu", simm16, GPR32Opnd>;
+class ADDIU_MMR6_DESC : ArithLogicI<"addiu", simm16, GPR32Opnd, II_ADDIU, immSExt16, add>;
class ADDU_MMR6_DESC : ArithLogicR<"addu", GPR32Opnd>;
-class MUL_MMR6_DESC : ArithLogicR<"mul", GPR32Opnd>;
-class MUH_MMR6_DESC : ArithLogicR<"muh", GPR32Opnd>;
-class MULU_MMR6_DESC : ArithLogicR<"mulu", GPR32Opnd>;
-class MUHU_MMR6_DESC : ArithLogicR<"muhu", GPR32Opnd>;
+class MUL_MMR6_DESC : ArithLogicR<"mul", GPR32Opnd, 1, II_MUL, mul>;
+class MUH_MMR6_DESC : ArithLogicR<"muh", GPR32Opnd, 1, II_MUH, mulhs>;
+class MULU_MMR6_DESC : ArithLogicR<"mulu", GPR32Opnd, 1, II_MULU>;
+class MUHU_MMR6_DESC : ArithLogicR<"muhu", GPR32Opnd, 1, II_MUHU, mulhu>;
class BC_MMR6_DESC_BASE<string instr_asm, DAGOperand opnd>
: BRANCH_DESC_BASE, MMR6Arch<instr_asm> {
@@ -476,6 +558,32 @@ class PCREL_MMR6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
class ADDIUPC_MMR6_DESC : PCREL_MMR6_DESC_BASE<"addiupc", GPR32Opnd, simm19_lsl2>;
class LWPC_MMR6_DESC: PCREL_MMR6_DESC_BASE<"lwpc", GPR32Opnd, simm19_lsl2>;
+class LWP_MMR6_DESC : MMR6Arch<"lwp"> {
+ dag OutOperandList = (outs regpair:$rd);
+ dag InOperandList = (ins mem_simm12:$addr);
+ string AsmString = !strconcat("lwp", "\t$rd, $addr");
+ list<dag> Pattern = [];
+ InstrItinClass Itin = NoItinerary;
+ ComplexPattern Addr = addr;
+ Format f = FrmI;
+ string BaseOpcode = "lwp";
+ string DecoderMethod = "DecodeMemMMImm12";
+ bit mayLoad = 1;
+}
+
+class SWP_MMR6_DESC : MMR6Arch<"swp"> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins regpair:$rd, mem_simm12:$addr);
+ string AsmString = !strconcat("swp", "\t$rd, $addr");
+ list<dag> Pattern = [];
+ InstrItinClass Itin = NoItinerary;
+ ComplexPattern Addr = addr;
+ Format f = FrmI;
+ string BaseOpcode = "swp";
+ string DecoderMethod = "DecodeMemMMImm12";
+ bit mayStore = 1;
+}
+
class SELEQNE_Z_MMR6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
: MMR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rd);
@@ -499,17 +607,38 @@ class RDHWR_MMR6_DESC : MMR6Arch<"rdhwr">, MipsR6Inst {
class WAIT_MMR6_DESC : WaitMM<"wait">;
class SSNOP_MMR6_DESC : Barrier<"ssnop">;
class SLL_MMR6_DESC : shift_rotate_imm<"sll", uimm5, GPR32Opnd, II_SLL>;
-class DIV_MMR6_DESC : ArithLogicR<"div", GPR32Opnd>;
-class DIVU_MMR6_DESC : ArithLogicR<"divu", GPR32Opnd>;
-class MOD_MMR6_DESC : ArithLogicR<"mod", GPR32Opnd>;
-class MODU_MMR6_DESC : ArithLogicR<"modu", GPR32Opnd>;
-class AND_MMR6_DESC : ArithLogicR<"and", GPR32Opnd>;
-class ANDI_MMR6_DESC : ArithLogicI<"andi", simm16, GPR32Opnd>;
-class NOR_MMR6_DESC : ArithLogicR<"nor", GPR32Opnd>;
-class OR_MMR6_DESC : ArithLogicR<"or", GPR32Opnd>;
-class ORI_MMR6_DESC : ArithLogicI<"ori", simm16, GPR32Opnd>;
-class XOR_MMR6_DESC : ArithLogicR<"xor", GPR32Opnd>;
-class XORI_MMR6_DESC : ArithLogicI<"xori", simm16, GPR32Opnd>;
+
+class DIVMOD_MMR6_DESC_BASE<string opstr, RegisterOperand GPROpnd,
+ SDPatternOperator OpNode=null_frag>
+ : MipsR6Inst {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
+ string AsmString = !strconcat(opstr, "\t$rd, $rs, $rt");
+ list<dag> Pattern = [(set GPROpnd:$rd, (OpNode GPROpnd:$rs, GPROpnd:$rt))];
+ string BaseOpcode = opstr;
+ Format f = FrmR;
+ let isCommutable = 0;
+ let isReMaterializable = 1;
+
+ // This instruction doesn't trap division by zero itself. We must insert
+ // teq instructions as well.
+ bit usesCustomInserter = 1;
+}
+class DIV_MMR6_DESC : DIVMOD_MMR6_DESC_BASE<"div", GPR32Opnd, sdiv>;
+class DIVU_MMR6_DESC : DIVMOD_MMR6_DESC_BASE<"divu", GPR32Opnd, udiv>;
+class MOD_MMR6_DESC : DIVMOD_MMR6_DESC_BASE<"mod", GPR32Opnd, srem>;
+class MODU_MMR6_DESC : DIVMOD_MMR6_DESC_BASE<"modu", GPR32Opnd, urem>;
+class AND_MMR6_DESC : ArithLogicR<"and", GPR32Opnd, 1, II_AND, and>;
+class ANDI_MMR6_DESC : ArithLogicI<"andi", uimm16, GPR32Opnd, II_ANDI>;
+class NOR_MMR6_DESC : LogicNOR<"nor", GPR32Opnd>;
+class OR_MMR6_DESC : ArithLogicR<"or", GPR32Opnd, 1, II_OR, or>;
+class ORI_MMR6_DESC : ArithLogicI<"ori", uimm16, GPR32Opnd, II_ORI, immZExt16,
+ or> {
+ int AddedComplexity = 1;
+}
+class XOR_MMR6_DESC : ArithLogicR<"xor", GPR32Opnd, 1, II_XOR, xor>;
+class XORI_MMR6_DESC : ArithLogicI<"xori", uimm16, GPR32Opnd, II_XORI,
+ immZExt16, xor>;
class SWE_MMR6_DESC_BASE<string opstr, DAGOperand RO, DAGOperand MO,
SDPatternOperator OpNode = null_frag,
@@ -536,6 +665,155 @@ class WRPGPR_WSBH_MMR6_DESC_BASE<string instr_asm, RegisterOperand RO>
class WRPGPR_MMR6_DESC : WRPGPR_WSBH_MMR6_DESC_BASE<"wrpgpr", GPR32Opnd>;
class WSBH_MMR6_DESC : WRPGPR_WSBH_MMR6_DESC_BASE<"wsbh", GPR32Opnd>;
+class MTC0_MMR6_DESC_BASE<string opstr, RegisterOperand DstRC,
+ RegisterOperand SrcRC> {
+ dag InOperandList = (ins SrcRC:$rt, uimm3:$sel);
+ dag OutOperandList = (outs DstRC:$rs);
+ string AsmString = !strconcat(opstr, "\t$rt, $rs, $sel");
+ list<dag> Pattern = [];
+ Format f = FrmFR;
+ string BaseOpcode = opstr;
+}
+class MTC1_MMR6_DESC_BASE<
+ string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
+ InstrItinClass Itin = NoItinerary, SDPatternOperator OpNode = null_frag>
+ : MipsR6Inst {
+ dag InOperandList = (ins SrcRC:$rt);
+ dag OutOperandList = (outs DstRC:$fs);
+ string AsmString = !strconcat(opstr, "\t$rt, $fs");
+ list<dag> Pattern = [(set DstRC:$fs, (OpNode SrcRC:$rt))];
+ Format f = FrmFR;
+ InstrItinClass Itinerary = Itin;
+ string BaseOpcode = opstr;
+}
+class MTC1_64_MMR6_DESC_BASE<
+ string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
+ InstrItinClass Itin = NoItinerary> : MipsR6Inst {
+ dag InOperandList = (ins DstRC:$fs_in, SrcRC:$rt);
+ dag OutOperandList = (outs DstRC:$fs);
+ string AsmString = !strconcat(opstr, "\t$rt, $fs");
+ list<dag> Pattern = [];
+ Format f = FrmFR;
+ InstrItinClass Itinerary = Itin;
+ string BaseOpcode = opstr;
+ // $fs_in is part of a white lie to work around a widespread bug in the FPU
+ // implementation. See expandBuildPairF64 for details.
+ let Constraints = "$fs = $fs_in";
+}
+class MTC2_MMR6_DESC_BASE<string opstr, RegisterOperand DstRC,
+ RegisterOperand SrcRC> {
+ dag InOperandList = (ins SrcRC:$rt);
+ dag OutOperandList = (outs DstRC:$impl);
+ string AsmString = !strconcat(opstr, "\t$rt, $impl");
+ list<dag> Pattern = [];
+ Format f = FrmFR;
+ string BaseOpcode = opstr;
+}
+
+class MTC0_MMR6_DESC : MTC0_MMR6_DESC_BASE<"mtc0", COP0Opnd, GPR32Opnd>;
+class MTC1_MMR6_DESC : MTC1_MMR6_DESC_BASE<"mtc1", FGR32Opnd, GPR32Opnd,
+ II_MTC1, bitconvert>, HARDFLOAT;
+class MTC2_MMR6_DESC : MTC2_MMR6_DESC_BASE<"mtc2", COP2Opnd, GPR32Opnd>;
+class MTHC0_MMR6_DESC : MTC0_MMR6_DESC_BASE<"mthc0", COP0Opnd, GPR32Opnd>;
+class MTHC1_D32_MMR6_DESC : MTC1_64_MMR6_DESC_BASE<"mthc1", AFGR64Opnd, GPR32Opnd>,
+ HARDFLOAT, FGR_32;
+class MTHC1_D64_MMR6_DESC : MTC1_64_MMR6_DESC_BASE<"mthc1", FGR64Opnd, GPR32Opnd>,
+ HARDFLOAT, FGR_64;
+class MTHC2_MMR6_DESC : MTC2_MMR6_DESC_BASE<"mthc2", COP2Opnd, GPR32Opnd>;
+
+class MFC0_MMR6_DESC_BASE<string opstr, RegisterOperand DstRC,
+ RegisterOperand SrcRC> {
+ dag InOperandList = (ins SrcRC:$rs, uimm3:$sel);
+ dag OutOperandList = (outs DstRC:$rt);
+ string AsmString = !strconcat(opstr, "\t$rt, $rs, $sel");
+ list<dag> Pattern = [];
+ Format f = FrmFR;
+ string BaseOpcode = opstr;
+}
+class MFC1_MMR6_DESC_BASE<string opstr, RegisterOperand DstRC,
+ RegisterOperand SrcRC,
+ InstrItinClass Itin = NoItinerary,
+ SDPatternOperator OpNode = null_frag> : MipsR6Inst {
+ dag InOperandList = (ins SrcRC:$fs);
+ dag OutOperandList = (outs DstRC:$rt);
+ string AsmString = !strconcat(opstr, "\t$rt, $fs");
+ list<dag> Pattern = [(set DstRC:$rt, (OpNode SrcRC:$fs))];
+ Format f = FrmFR;
+ InstrItinClass Itinerary = Itin;
+ string BaseOpcode = opstr;
+}
+class MFC2_MMR6_DESC_BASE<string opstr, RegisterOperand DstRC,
+ RegisterOperand SrcRC> {
+ dag InOperandList = (ins SrcRC:$impl);
+ dag OutOperandList = (outs DstRC:$rt);
+ string AsmString = !strconcat(opstr, "\t$rt, $impl");
+ list<dag> Pattern = [];
+ Format f = FrmFR;
+ string BaseOpcode = opstr;
+}
+class MFC0_MMR6_DESC : MFC0_MMR6_DESC_BASE<"mfc0", GPR32Opnd, COP0Opnd>;
+class MFC1_MMR6_DESC : MFC1_MMR6_DESC_BASE<"mfc1", GPR32Opnd, FGR32Opnd,
+ II_MFC1, bitconvert>, HARDFLOAT;
+class MFC2_MMR6_DESC : MFC2_MMR6_DESC_BASE<"mfc2", GPR32Opnd, COP2Opnd>;
+class MFHC0_MMR6_DESC : MFC0_MMR6_DESC_BASE<"mfhc0", GPR32Opnd, COP0Opnd>;
+class MFHC1_D32_MMR6_DESC : MFC1_MMR6_DESC_BASE<"mfhc1", GPR32Opnd, AFGR64Opnd,
+ II_MFHC1>, HARDFLOAT, FGR_32;
+class MFHC1_D64_MMR6_DESC : MFC1_MMR6_DESC_BASE<"mfhc1", GPR32Opnd, FGR64Opnd,
+ II_MFHC1>, HARDFLOAT, FGR_64;
+class MFHC2_MMR6_DESC : MFC2_MMR6_DESC_BASE<"mfhc2", GPR32Opnd, COP2Opnd>;
+
+class LDC1_D64_MMR6_DESC : MipsR6Inst, HARDFLOAT, FGR_64 {
+ dag InOperandList = (ins mem_mm_16:$addr);
+ dag OutOperandList = (outs FGR64Opnd:$ft);
+ string AsmString = !strconcat("ldc1", "\t$ft, $addr");
+ list<dag> Pattern = [(set FGR64Opnd:$ft, (load addrimm16:$addr))];
+ Format f = FrmFI;
+ InstrItinClass Itinerary = II_LDC1;
+ string BaseOpcode = "ldc1";
+ bit mayLoad = 1;
+ let DecoderMethod = "DecodeFMemMMR2";
+}
+
+class SDC1_D64_MMR6_DESC : MipsR6Inst, HARDFLOAT, FGR_64 {
+ dag InOperandList = (ins FGR64Opnd:$ft, mem_mm_16:$addr);
+ dag OutOperandList = (outs);
+ string AsmString = !strconcat("sdc1", "\t$ft, $addr");
+ list<dag> Pattern = [(store FGR64Opnd:$ft, addrimm16:$addr)];
+ Format f = FrmFI;
+ InstrItinClass Itinerary = II_SDC1;
+ string BaseOpcode = "sdc1";
+ bit mayStore = 1;
+ let DecoderMethod = "DecodeFMemMMR2";
+}
+
+class LDC2_LWC2_MMR6_DESC_BASE<string opstr> {
+ dag OutOperandList = (outs COP2Opnd:$rt);
+ dag InOperandList = (ins mem_mm_11:$addr);
+ string AsmString = !strconcat(opstr, "\t$rt, $addr");
+ list<dag> Pattern = [(set COP2Opnd:$rt, (load addrimm11:$addr))];
+ Format f = FrmFI;
+ InstrItinClass Itinerary = NoItinerary;
+ string BaseOpcode = opstr;
+ bit mayLoad = 1;
+ string DecoderMethod = "DecodeFMemCop2MMR6";
+}
+class LDC2_MMR6_DESC : LDC2_LWC2_MMR6_DESC_BASE<"ldc2">;
+class LWC2_MMR6_DESC : LDC2_LWC2_MMR6_DESC_BASE<"lwc2">;
+
+class SDC2_SWC2_MMR6_DESC_BASE<string opstr> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins COP2Opnd:$rt, mem_mm_11:$addr);
+ string AsmString = !strconcat(opstr, "\t$rt, $addr");
+ list<dag> Pattern = [(store COP2Opnd:$rt, addrimm11:$addr)];
+ Format f = FrmFI;
+ InstrItinClass Itinerary = NoItinerary;
+ string BaseOpcode = opstr;
+ bit mayStore = 1;
+ string DecoderMethod = "DecodeFMemCop2MMR6";
+}
+class SDC2_MMR6_DESC : SDC2_SWC2_MMR6_DESC_BASE<"sdc2">;
+class SWC2_MMR6_DESC : SDC2_SWC2_MMR6_DESC_BASE<"swc2">;
+
/// Floating Point Instructions
class FARITH_MMR6_DESC_BASE<string instr_asm, RegisterOperand RC,
InstrItinClass Itin, bit isComm,
@@ -633,69 +911,69 @@ class CVT_S_L_MMR6_DESC : CVT_MMR6_DESC_BASE<"cvt.s.l", FGR64Opnd, FGR32Opnd,
multiclass CMP_CC_MMR6<bits<6> format, string Typestr,
RegisterOperand FGROpnd> {
- def CMP_AF_#NAME : POOL32F_CMP_FM<
+ def CMP_AF_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.af.", Typestr), format, FIELD_CMP_COND_AF>,
- CMP_CONDN_DESC_BASE<"af", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"af", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_UN_#NAME : POOL32F_CMP_FM<
+ def CMP_UN_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.un.", Typestr), format, FIELD_CMP_COND_UN>,
- CMP_CONDN_DESC_BASE<"un", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"un", Typestr, FGROpnd, setuo>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_EQ_#NAME : POOL32F_CMP_FM<
+ def CMP_EQ_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.eq.", Typestr), format, FIELD_CMP_COND_EQ>,
- CMP_CONDN_DESC_BASE<"eq", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"eq", Typestr, FGROpnd, setoeq>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_UEQ_#NAME : POOL32F_CMP_FM<
+ def CMP_UEQ_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.ueq.", Typestr), format, FIELD_CMP_COND_UEQ>,
- CMP_CONDN_DESC_BASE<"ueq", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"ueq", Typestr, FGROpnd, setueq>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_LT_#NAME : POOL32F_CMP_FM<
+ def CMP_LT_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.lt.", Typestr), format, FIELD_CMP_COND_LT>,
- CMP_CONDN_DESC_BASE<"lt", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"lt", Typestr, FGROpnd, setolt>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_ULT_#NAME : POOL32F_CMP_FM<
+ def CMP_ULT_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.ult.", Typestr), format, FIELD_CMP_COND_ULT>,
- CMP_CONDN_DESC_BASE<"ult", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"ult", Typestr, FGROpnd, setult>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_LE_#NAME : POOL32F_CMP_FM<
+ def CMP_LE_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.le.", Typestr), format, FIELD_CMP_COND_LE>,
- CMP_CONDN_DESC_BASE<"le", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"le", Typestr, FGROpnd, setole>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_ULE_#NAME : POOL32F_CMP_FM<
+ def CMP_ULE_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.ule.", Typestr), format, FIELD_CMP_COND_ULE>,
- CMP_CONDN_DESC_BASE<"ule", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"ule", Typestr, FGROpnd, setule>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_SAF_#NAME : POOL32F_CMP_FM<
+ def CMP_SAF_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.saf.", Typestr), format, FIELD_CMP_COND_SAF>,
- CMP_CONDN_DESC_BASE<"saf", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"saf", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_SUN_#NAME : POOL32F_CMP_FM<
+ def CMP_SUN_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.sun.", Typestr), format, FIELD_CMP_COND_SUN>,
- CMP_CONDN_DESC_BASE<"sun", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"sun", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_SEQ_#NAME : POOL32F_CMP_FM<
+ def CMP_SEQ_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.seq.", Typestr), format, FIELD_CMP_COND_SEQ>,
- CMP_CONDN_DESC_BASE<"seq", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"seq", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_SUEQ_#NAME : POOL32F_CMP_FM<
+ def CMP_SUEQ_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.sueq.", Typestr), format, FIELD_CMP_COND_SUEQ>,
- CMP_CONDN_DESC_BASE<"sueq", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"sueq", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_SLT_#NAME : POOL32F_CMP_FM<
+ def CMP_SLT_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.slt.", Typestr), format, FIELD_CMP_COND_SLT>,
- CMP_CONDN_DESC_BASE<"slt", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"slt", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_SULT_#NAME : POOL32F_CMP_FM<
+ def CMP_SULT_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.sult.", Typestr), format, FIELD_CMP_COND_SULT>,
- CMP_CONDN_DESC_BASE<"sult", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"sult", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_SLE_#NAME : POOL32F_CMP_FM<
+ def CMP_SLE_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.sle.", Typestr), format, FIELD_CMP_COND_SLE>,
- CMP_CONDN_DESC_BASE<"sle", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"sle", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
- def CMP_SULE_#NAME : POOL32F_CMP_FM<
+ def CMP_SULE_#NAME : R6MMR6Rel, POOL32F_CMP_FM<
!strconcat("cmp.sule.", Typestr), format, FIELD_CMP_COND_SULE>,
- CMP_CONDN_DESC_BASE<"sule", Typestr, FGROpnd>, HARDFLOAT, R6MMR6Rel,
+ CMP_CONDN_DESC_BASE<"sule", Typestr, FGROpnd>, HARDFLOAT,
ISA_MICROMIPS32R6;
}
@@ -769,8 +1047,8 @@ class SEL_D_MMR6_DESC : COP1_SEL_DESC_BASE<"sel.d", FGR64Opnd> {
class SELEQZ_S_MMR6_DESC : SELEQNEZ_DESC_BASE<"seleqz.s", FGR32Opnd>;
class SELEQZ_D_MMR6_DESC : SELEQNEZ_DESC_BASE<"seleqz.d", FGR64Opnd>;
-class SELENZ_S_MMR6_DESC : SELEQNEZ_DESC_BASE<"selnez.s", FGR32Opnd>;
-class SELENZ_D_MMR6_DESC : SELEQNEZ_DESC_BASE<"selnez.d", FGR64Opnd>;
+class SELNEZ_S_MMR6_DESC : SELEQNEZ_DESC_BASE<"selnez.s", FGR32Opnd>;
+class SELNEZ_D_MMR6_DESC : SELEQNEZ_DESC_BASE<"selnez.d", FGR64Opnd>;
class RINT_S_MMR6_DESC : CLASS_RINT_DESC_BASE<"rint.s", FGR32Opnd>;
class RINT_D_MMR6_DESC : CLASS_RINT_DESC_BASE<"rint.d", FGR64Opnd>;
class CLASS_S_MMR6_DESC : CLASS_RINT_DESC_BASE<"class.s", FGR32Opnd>;
@@ -785,7 +1063,7 @@ class SB_MMR6_DESC : STORE_MMR6_DESC_BASE<"sb", GPR32Opnd>;
class STORE_EVA_MMR6_DESC_BASE<string instr_asm, RegisterOperand RO>
: MMR6Arch<instr_asm>, MipsR6Inst {
dag OutOperandList = (outs);
- dag InOperandList = (ins RO:$rt, mem_mm_9:$addr);
+ dag InOperandList = (ins RO:$rt, mem_simm9:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
string DecoderMethod = "DecodeStoreEvaOpMM";
bit mayStore = 1;
@@ -797,7 +1075,7 @@ class SHE_MMR6_DESC : STORE_EVA_MMR6_DESC_BASE<"she", GPR32Opnd>;
class LOAD_WORD_EVA_MMR6_DESC_BASE<string instr_asm, RegisterOperand RO> :
MMR6Arch<instr_asm>, MipsR6Inst {
dag OutOperandList = (outs RO:$rt);
- dag InOperandList = (ins mem_mm_12:$addr);
+ dag InOperandList = (ins mem_simm9:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
string DecoderMethod = "DecodeMemMMImm9";
bit mayLoad = 1;
@@ -805,30 +1083,42 @@ class LOAD_WORD_EVA_MMR6_DESC_BASE<string instr_asm, RegisterOperand RO> :
class LLE_MMR6_DESC : LOAD_WORD_EVA_MMR6_DESC_BASE<"lle", GPR32Opnd>;
class LWE_MMR6_DESC : LOAD_WORD_EVA_MMR6_DESC_BASE<"lwe", GPR32Opnd>;
class ADDU16_MMR6_DESC : ArithRMM16<"addu16", GPRMM16Opnd, 1, II_ADDU, add>,
- MMR6Arch<"addu16">;
+ MMR6Arch<"addu16"> {
+ int AddedComplexity = 1;
+}
class AND16_MMR6_DESC : LogicRMM16<"and16", GPRMM16Opnd, II_AND, and>,
- MMR6Arch<"and16">;
+ MMR6Arch<"and16"> {
+ int AddedComplexity = 1;
+}
class ANDI16_MMR6_DESC : AndImmMM16<"andi16", GPRMM16Opnd, II_AND>,
MMR6Arch<"andi16">;
-class NOT16_MMR6_DESC : NotMM16<"not16", GPRMM16Opnd>, MMR6Arch<"not16">;
+class NOT16_MMR6_DESC : NotMM16<"not16", GPRMM16Opnd>, MMR6Arch<"not16"> {
+ int AddedComplexity = 1;
+}
class OR16_MMR6_DESC : LogicRMM16<"or16", GPRMM16Opnd, II_OR, or>,
- MMR6Arch<"or16">;
+ MMR6Arch<"or16"> {
+ int AddedComplexity = 1;
+}
class SLL16_MMR6_DESC : ShiftIMM16<"sll16", uimm3_shift, GPRMM16Opnd, II_SLL>,
MMR6Arch<"sll16">;
class SRL16_MMR6_DESC : ShiftIMM16<"srl16", uimm3_shift, GPRMM16Opnd, II_SRL>,
MMR6Arch<"srl16">;
-class BREAK16_MMR6_DESC : BrkSdbbp16MM<"break16">, MMR6Arch<"srl16">,
+class BREAK16_MMR6_DESC : BrkSdbbp16MM<"break16">, MMR6Arch<"break16">,
MicroMipsR6Inst16;
-class LI16_MMR6_DESC : LoadImmMM16<"li16", li_simm7, GPRMM16Opnd>,
- MMR6Arch<"srl16">, MicroMipsR6Inst16, IsAsCheapAsAMove;
-class MOVE16_MMR6_DESC : MoveMM16<"move16", GPR32Opnd>, MMR6Arch<"srl16">,
+class LI16_MMR6_DESC : LoadImmMM16<"li16", li16_imm, GPRMM16Opnd>,
+ MMR6Arch<"li16">, MicroMipsR6Inst16, IsAsCheapAsAMove;
+class MOVE16_MMR6_DESC : MoveMM16<"move16", GPR32Opnd>, MMR6Arch<"move16">,
MicroMipsR6Inst16;
class SDBBP16_MMR6_DESC : BrkSdbbp16MM<"sdbbp16">, MMR6Arch<"sdbbp16">,
MicroMipsR6Inst16;
class SUBU16_MMR6_DESC : ArithRMM16<"subu16", GPRMM16Opnd, 0, II_SUBU, sub>,
- MMR6Arch<"sdbbp16">, MicroMipsR6Inst16;
+ MMR6Arch<"subu16">, MicroMipsR6Inst16 {
+ int AddedComplexity = 1;
+}
class XOR16_MMR6_DESC : LogicRMM16<"xor16", GPRMM16Opnd, II_XOR, xor>,
- MMR6Arch<"sdbbp16">, MicroMipsR6Inst16;
+ MMR6Arch<"xor16"> {
+ int AddedComplexity = 1;
+}
class LW_MMR6_DESC : MMR6Arch<"lw">, MipsR6Inst {
dag OutOperandList = (outs GPR32Opnd:$rt);
@@ -854,9 +1144,9 @@ class LUI_MMR6_DESC : IsAsCheapAsAMove, MMR6Arch<"lui">, MipsR6Inst{
class SYNC_MMR6_DESC : MMR6Arch<"sync">, MipsR6Inst {
dag OutOperandList = (outs);
- dag InOperandList = (ins i32imm:$stype);
+ dag InOperandList = (ins uimm5:$stype);
string AsmString = !strconcat("sync", "\t$stype");
- list<dag> Pattern = [(MipsSync imm:$stype)];
+ list<dag> Pattern = [(MipsSync immZExt5:$stype)];
InstrItinClass Itinerary = NoItinerary;
bit HasSideEffects = 1;
}
@@ -924,6 +1214,111 @@ class SWSP_MMR6_DESC
let mayStore = 1;
}
+class JALRC_HB_MMR6_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$rs);
+ string AsmString = !strconcat("jalrc.hb", "\t$rt, $rs");
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = NoItinerary;
+ Format Form = FrmJ;
+ bit isIndirectBranch = 1;
+ bit hasDelaySlot = 0;
+}
+
+class TLBINV_MMR6_DESC_BASE<string opstr> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins);
+ string AsmString = opstr;
+ list<dag> Pattern = [];
+}
+
+class TLBINV_MMR6_DESC : TLBINV_MMR6_DESC_BASE<"tlbinv">;
+class TLBINVF_MMR6_DESC : TLBINV_MMR6_DESC_BASE<"tlbinvf">;
+
+class DVPEVP_MMR6_DESC_BASE<string opstr> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins GPR32Opnd:$rs);
+ string AsmString = !strconcat(opstr, "\t$rs");
+ list<dag> Pattern = [];
+}
+
+class DVP_MMR6_DESC : DVPEVP_MMR6_DESC_BASE<"dvp">;
+class EVP_MMR6_DESC : DVPEVP_MMR6_DESC_BASE<"evp">;
+
+class BEQZC_MMR6_DESC
+ : CMP_CBR_EQNE_Z_DESC_BASE<"beqzc", brtarget21_mm, GPR32Opnd>,
+ MMR6Arch<"beqzc">;
+class BNEZC_MMR6_DESC
+ : CMP_CBR_EQNE_Z_DESC_BASE<"bnezc", brtarget21_mm, GPR32Opnd>,
+ MMR6Arch<"bnezc">;
+
+class BRANCH_COP1_MMR6_DESC_BASE<string opstr> :
+ InstSE<(outs), (ins FGR64Opnd:$rt, brtarget_mm:$offset),
+ !strconcat(opstr, "\t$rt, $offset"), [], II_BC1CCZ, FrmI>,
+ HARDFLOAT, BRANCH_DESC_BASE {
+ list<Register> Defs = [AT];
+}
+
+class BC1EQZC_MMR6_DESC : BRANCH_COP1_MMR6_DESC_BASE<"bc1eqzc">;
+class BC1NEZC_MMR6_DESC : BRANCH_COP1_MMR6_DESC_BASE<"bc1nezc">;
+
+class BRANCH_COP2_MMR6_DESC_BASE<string opstr> : BRANCH_DESC_BASE {
+ dag InOperandList = (ins COP2Opnd:$rt, brtarget_mm:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = !strconcat(opstr, "\t$rt, $offset");
+ list<Register> Defs = [AT];
+}
+
+class BC2EQZC_MMR6_DESC : BRANCH_COP2_MMR6_DESC_BASE<"bc2eqzc">;
+class BC2NEZC_MMR6_DESC : BRANCH_COP2_MMR6_DESC_BASE<"bc2nezc">;
+
+class EXT_MMR6_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$rs, uimm5:$pos, uimm5_plus1:$size);
+ string AsmString = !strconcat("ext", "\t$rt, $rs, $pos, $size");
+ list<dag> Pattern = [(set GPR32Opnd:$rt, (MipsExt GPR32Opnd:$rs, imm:$pos,
+ imm:$size))];
+ InstrItinClass Itinerary = II_EXT;
+ Format Form = FrmR;
+ string BaseOpcode = "ext";
+}
+
+class INS_MMR6_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$rs, uimm5:$pos, uimm5_inssize_plus1:$size,
+ GPR32Opnd:$src);
+ string AsmString = !strconcat("ins", "\t$rt, $rs, $pos, $size");
+ list<dag> Pattern = [(set GPR32Opnd:$rt, (MipsIns GPR32Opnd:$rs, imm:$pos,
+ imm:$size, GPR32Opnd:$src))];
+ InstrItinClass Itinerary = II_INS;
+ Format Form = FrmR;
+ string BaseOpcode = "ins";
+ string Constraints = "$src = $rt";
+}
+
+class JALRC_MMR6_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$rs);
+ string AsmString = !strconcat("jalrc", "\t$rt, $rs");
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = II_JALRC;
+ bit isCall = 1;
+ bit hasDelaySlot = 0;
+ list<Register> Defs = [RA];
+}
+
+class BOVC_BNVC_MMR6_DESC_BASE<string instr_asm, Operand opnd,
+ RegisterOperand GPROpnd>
+ : BRANCH_DESC_BASE {
+ dag InOperandList = (ins GPROpnd:$rt, GPROpnd:$rs, opnd:$offset);
+ dag OutOperandList = (outs);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $offset");
+ list<Register> Defs = [AT];
+}
+
+class BOVC_MMR6_DESC : BOVC_BNVC_MMR6_DESC_BASE<"bovc", brtargetr6, GPR32Opnd>;
+class BNVC_MMR6_DESC : BOVC_BNVC_MMR6_DESC_BASE<"bnvc", brtargetr6, GPR32Opnd>;
+
//===----------------------------------------------------------------------===//
//
// Instruction Definitions
@@ -946,22 +1341,18 @@ def AUI_MMR6 : R6MMR6Rel, AUI_MMR6_ENC, AUI_MMR6_DESC, ISA_MICROMIPS32R6;
def BALC_MMR6 : R6MMR6Rel, BALC_MMR6_ENC, BALC_MMR6_DESC, ISA_MICROMIPS32R6;
def BC_MMR6 : R6MMR6Rel, BC_MMR6_ENC, BC_MMR6_DESC, ISA_MICROMIPS32R6;
def BC16_MMR6 : StdMMR6Rel, BC16_MMR6_DESC, BC16_MMR6_ENC, ISA_MICROMIPS32R6;
+def BEQZC_MMR6 : R6MMR6Rel, BEQZC_MMR6_ENC, BEQZC_MMR6_DESC,
+ ISA_MICROMIPS32R6;
def BEQZC16_MMR6 : StdMMR6Rel, BEQZC16_MMR6_DESC, BEQZC16_MMR6_ENC,
ISA_MICROMIPS32R6;
+def BNEZC_MMR6 : R6MMR6Rel, BNEZC_MMR6_ENC, BNEZC_MMR6_DESC,
+ ISA_MICROMIPS32R6;
def BNEZC16_MMR6 : StdMMR6Rel, BNEZC16_MMR6_DESC, BNEZC16_MMR6_ENC,
ISA_MICROMIPS32R6;
def BITSWAP_MMR6 : R6MMR6Rel, BITSWAP_MMR6_ENC, BITSWAP_MMR6_DESC,
ISA_MICROMIPS32R6;
def BEQZALC_MMR6 : R6MMR6Rel, BEQZALC_MMR6_ENC, BEQZALC_MMR6_DESC,
ISA_MICROMIPS32R6;
-def BGEZALC_MMR6 : R6MMR6Rel, BGEZALC_MMR6_ENC, BGEZALC_MMR6_DESC,
- ISA_MICROMIPS32R6;
-def BGTZALC_MMR6 : R6MMR6Rel, BGTZALC_MMR6_ENC, BGTZALC_MMR6_DESC,
- ISA_MICROMIPS32R6;
-def BLEZALC_MMR6 : R6MMR6Rel, BLEZALC_MMR6_ENC, BLEZALC_MMR6_DESC,
- ISA_MICROMIPS32R6;
-def BLTZALC_MMR6 : R6MMR6Rel, BLTZALC_MMR6_ENC, BLTZALC_MMR6_DESC,
- ISA_MICROMIPS32R6;
def BNEZALC_MMR6 : R6MMR6Rel, BNEZALC_MMR6_ENC, BNEZALC_MMR6_DESC,
ISA_MICROMIPS32R6;
def BREAK_MMR6 : StdMMR6Rel, BRK_MMR6_DESC, BRK_MMR6_ENC, ISA_MICROMIPS32R6;
@@ -985,8 +1376,30 @@ def JRC16_MMR6 : R6MMR6Rel, JRC16_MMR6_DESC, JRC16_MMR6_ENC, ISA_MICROMIPS32R6;
def JRCADDIUSP_MMR6 : R6MMR6Rel, JRCADDIUSP_MMR6_DESC, JRCADDIUSP_MMR6_ENC,
ISA_MICROMIPS32R6;
def LSA_MMR6 : R6MMR6Rel, LSA_MMR6_ENC, LSA_MMR6_DESC, ISA_MICROMIPS32R6;
+def LWP_MMR6 : StdMMR6Rel, LWP_MMR6_ENC, LWP_MMR6_DESC, ISA_MICROMIPS32R6;
def LWPC_MMR6 : R6MMR6Rel, LWPC_MMR6_ENC, LWPC_MMR6_DESC, ISA_MICROMIPS32R6;
def LWM16_MMR6 : StdMMR6Rel, LWM16_MMR6_DESC, LWM16_MMR6_ENC, ISA_MICROMIPS32R6;
+def MTC0_MMR6 : StdMMR6Rel, MTC0_MMR6_ENC, MTC0_MMR6_DESC, ISA_MICROMIPS32R6;
+def MTC1_MMR6 : StdMMR6Rel, MTC1_MMR6_DESC, MTC1_MMR6_ENC, ISA_MICROMIPS32R6;
+def MTC2_MMR6 : StdMMR6Rel, MTC2_MMR6_ENC, MTC2_MMR6_DESC, ISA_MICROMIPS32R6;
+def MTHC0_MMR6 : R6MMR6Rel, MTHC0_MMR6_ENC, MTHC0_MMR6_DESC, ISA_MICROMIPS32R6;
+def MTHC1_D32_MMR6 : StdMMR6Rel, MTHC1_D32_MMR6_DESC, MTHC1_MMR6_ENC, ISA_MICROMIPS32R6;
+let DecoderNamespace = "MicroMips32r6FP64" in {
+ def MTHC1_D64_MMR6 : R6MMR6Rel, MTHC1_D64_MMR6_DESC, MTHC1_MMR6_ENC,
+ ISA_MICROMIPS32R6;
+}
+def MTHC2_MMR6 : StdMMR6Rel, MTHC2_MMR6_ENC, MTHC2_MMR6_DESC, ISA_MICROMIPS32R6;
+def MFC0_MMR6 : StdMMR6Rel, MFC0_MMR6_ENC, MFC0_MMR6_DESC, ISA_MICROMIPS32R6;
+def MFC1_MMR6 : StdMMR6Rel, MFC1_MMR6_DESC, MFC1_MMR6_ENC, ISA_MICROMIPS32R6;
+def MFC2_MMR6 : StdMMR6Rel, MFC2_MMR6_ENC, MFC2_MMR6_DESC, ISA_MICROMIPS32R6;
+def MFHC0_MMR6 : R6MMR6Rel, MFHC0_MMR6_ENC, MFHC0_MMR6_DESC, ISA_MICROMIPS32R6;
+def MFHC1_D32_MMR6 : StdMMR6Rel, MFHC1_D32_MMR6_DESC, MFHC1_MMR6_ENC,
+ ISA_MICROMIPS32R6;
+let DecoderNamespace = "MicroMips32r6FP64" in {
+ def MFHC1_D64_MMR6 : StdMMR6Rel, MFHC1_D64_MMR6_DESC, MFHC1_MMR6_ENC,
+ ISA_MICROMIPS32R6;
+}
+def MFHC2_MMR6 : StdMMR6Rel, MFHC2_MMR6_ENC, MFHC2_MMR6_DESC, ISA_MICROMIPS32R6;
def MOD_MMR6 : R6MMR6Rel, MOD_MMR6_DESC, MOD_MMR6_ENC, ISA_MICROMIPS32R6;
def MODU_MMR6 : R6MMR6Rel, MODU_MMR6_DESC, MODU_MMR6_ENC, ISA_MICROMIPS32R6;
def MUL_MMR6 : R6MMR6Rel, MUL_MMR6_DESC, MUL_MMR6_ENC, ISA_MICROMIPS32R6;
@@ -1011,6 +1424,7 @@ def SUBU_MMR6 : StdMMR6Rel, SUBU_MMR6_DESC, SUBU_MMR6_ENC, ISA_MICROMIPS32R6;
def SW16_MMR6 : StdMMR6Rel, SW16_MMR6_DESC, SW16_MMR6_ENC, ISA_MICROMIPS32R6;
def SWM16_MMR6 : StdMMR6Rel, SWM16_MMR6_DESC, SWM16_MMR6_ENC, ISA_MICROMIPS32R6;
def SWSP_MMR6 : StdMMR6Rel, SWSP_MMR6_DESC, SWSP_MMR6_ENC, ISA_MICROMIPS32R6;
+def SWP_MMR6 : StdMMR6Rel, SWP_MMR6_ENC, SWP_MMR6_DESC, ISA_MICROMIPS32R6;
def PREFE_MMR6 : StdMMR6Rel, PREFE_MMR6_ENC, PREFE_MMR6_DESC, ISA_MICROMIPS32R6;
def CACHEE_MMR6 : StdMMR6Rel, CACHEE_MMR6_ENC, CACHEE_MMR6_DESC,
ISA_MICROMIPS32R6;
@@ -1174,6 +1588,11 @@ def SUBU16_MMR6 : StdMMR6Rel, SUBU16_MMR6_DESC, SUBU16_MMR6_ENC,
ISA_MICROMIPS32R6;
def XOR16_MMR6 : StdMMR6Rel, XOR16_MMR6_DESC, XOR16_MMR6_ENC,
ISA_MICROMIPS32R6;
+def JALRC_HB_MMR6 : R6MMR6Rel, JALRC_HB_MMR6_ENC, JALRC_HB_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+def EXT_MMR6 : StdMMR6Rel, EXT_MMR6_ENC, EXT_MMR6_DESC, ISA_MICROMIPS32R6;
+def INS_MMR6 : StdMMR6Rel, INS_MMR6_ENC, INS_MMR6_DESC, ISA_MICROMIPS32R6;
+def JALRC_MMR6 : R6MMR6Rel, JALRC_MMR6_ENC, JALRC_MMR6_DESC, ISA_MICROMIPS32R6;
def RECIP_S_MMR6 : StdMMR6Rel, RECIP_S_MMR6_ENC, RECIP_S_MMR6_DESC,
ISA_MICROMIPS32R6;
def RECIP_D_MMR6 : StdMMR6Rel, RECIP_D_MMR6_ENC, RECIP_D_MMR6_DESC, ISA_MICROMIPS32R6;
@@ -1188,22 +1607,69 @@ def ROUND_W_S_MMR6 : StdMMR6Rel, ROUND_W_S_MMR6_ENC, ROUND_W_S_MMR6_DESC,
ISA_MICROMIPS32R6;
def ROUND_W_D_MMR6 : StdMMR6Rel, ROUND_W_D_MMR6_ENC, ROUND_W_D_MMR6_DESC,
ISA_MICROMIPS32R6;
-def SEL_S_MMR6 : StdMMR6Rel, SEL_S_MMR6_ENC, SEL_S_MMR6_DESC, ISA_MICROMIPS32R6;
-def SEL_D_MMR6 : StdMMR6Rel, SEL_D_MMR6_ENC, SEL_D_MMR6_DESC, ISA_MICROMIPS32R6;
-def SELEQZ_S_MMR6 : StdMMR6Rel, SELEQZ_S_MMR6_ENC, SELEQZ_S_MMR6_DESC,
+def SEL_S_MMR6 : R6MMR6Rel, SEL_S_MMR6_ENC, SEL_S_MMR6_DESC, ISA_MICROMIPS32R6;
+def SEL_D_MMR6 : R6MMR6Rel, SEL_D_MMR6_ENC, SEL_D_MMR6_DESC, ISA_MICROMIPS32R6;
+def SELEQZ_S_MMR6 : R6MMR6Rel, SELEQZ_S_MMR6_ENC, SELEQZ_S_MMR6_DESC,
ISA_MICROMIPS32R6;
-def SELEQZ_D_MMR6 : StdMMR6Rel, SELEQZ_D_MMR6_ENC, SELEQZ_D_MMR6_DESC,
+def SELEQZ_D_MMR6 : R6MMR6Rel, SELEQZ_D_MMR6_ENC, SELEQZ_D_MMR6_DESC,
ISA_MICROMIPS32R6;
-def SELENZ_S_MMR6 : StdMMR6Rel, SELENZ_S_MMR6_ENC, SELENZ_S_MMR6_DESC,
+def SELNEZ_S_MMR6 : R6MMR6Rel, SELNEZ_S_MMR6_ENC, SELNEZ_S_MMR6_DESC,
ISA_MICROMIPS32R6;
-def SELENZ_D_MMR6 : StdMMR6Rel, SELENZ_D_MMR6_ENC, SELENZ_D_MMR6_DESC,
+def SELNEZ_D_MMR6 : R6MMR6Rel, SELNEZ_D_MMR6_ENC, SELNEZ_D_MMR6_DESC,
ISA_MICROMIPS32R6;
def CLASS_S_MMR6 : StdMMR6Rel, CLASS_S_MMR6_ENC, CLASS_S_MMR6_DESC,
ISA_MICROMIPS32R6;
def CLASS_D_MMR6 : StdMMR6Rel, CLASS_D_MMR6_ENC, CLASS_D_MMR6_DESC,
ISA_MICROMIPS32R6;
+def TLBINV_MMR6 : StdMMR6Rel, TLBINV_MMR6_ENC, TLBINV_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+def TLBINVF_MMR6 : StdMMR6Rel, TLBINVF_MMR6_ENC, TLBINVF_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+def DVP_MMR6 : R6MMR6Rel, DVP_MMR6_ENC, DVP_MMR6_DESC, ISA_MICROMIPS32R6;
+def EVP_MMR6 : R6MMR6Rel, EVP_MMR6_ENC, EVP_MMR6_DESC, ISA_MICROMIPS32R6;
+def BC1EQZC_MMR6 : R6MMR6Rel, BC1EQZC_MMR6_DESC, BC1EQZC_MMR6_ENC,
+ ISA_MICROMIPS32R6;
+def BC1NEZC_MMR6 : R6MMR6Rel, BC1NEZC_MMR6_DESC, BC1NEZC_MMR6_ENC,
+ ISA_MICROMIPS32R6;
+def BC2EQZC_MMR6 : R6MMR6Rel, MipsR6Inst, BC2EQZC_MMR6_ENC, BC2EQZC_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+def BC2NEZC_MMR6 : R6MMR6Rel, MipsR6Inst, BC2NEZC_MMR6_ENC, BC2NEZC_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+let DecoderNamespace = "MicroMips32r6FP64" in {
+ def LDC1_D64_MMR6 : StdMMR6Rel, LDC1_D64_MMR6_DESC, LDC1_MMR6_ENC,
+ ISA_MICROMIPS32R6 {
+ let BaseOpcode = "LDC164";
+ }
+ def SDC1_D64_MMR6 : StdMMR6Rel, SDC1_D64_MMR6_DESC, SDC1_MMR6_ENC,
+ ISA_MICROMIPS32R6;
+}
+def LDC2_MMR6 : StdMMR6Rel, LDC2_MMR6_ENC, LDC2_MMR6_DESC, ISA_MICROMIPS32R6;
+def SDC2_MMR6 : StdMMR6Rel, SDC2_MMR6_ENC, SDC2_MMR6_DESC, ISA_MICROMIPS32R6;
+def LWC2_MMR6 : StdMMR6Rel, LWC2_MMR6_ENC, LWC2_MMR6_DESC, ISA_MICROMIPS32R6;
+def SWC2_MMR6 : StdMMR6Rel, SWC2_MMR6_ENC, SWC2_MMR6_DESC, ISA_MICROMIPS32R6;
}
+def BOVC_MMR6 : R6MMR6Rel, BOVC_MMR6_ENC, BOVC_MMR6_DESC, ISA_MICROMIPS32R6,
+ MMDecodeDisambiguatedBy<"POP35GroupBranchMMR6">;
+def BNVC_MMR6 : R6MMR6Rel, BNVC_MMR6_ENC, BNVC_MMR6_DESC, ISA_MICROMIPS32R6,
+ MMDecodeDisambiguatedBy<"POP37GroupBranchMMR6">;
+def BGEC_MMR6 : R6MMR6Rel, BGEC_MMR6_ENC, BGEC_MMR6_DESC, ISA_MICROMIPS32R6;
+def BGEUC_MMR6 : R6MMR6Rel, BGEUC_MMR6_ENC, BGEUC_MMR6_DESC, ISA_MICROMIPS32R6;
+def BLTC_MMR6 : R6MMR6Rel, BLTC_MMR6_ENC, BLTC_MMR6_DESC, ISA_MICROMIPS32R6;
+def BLTUC_MMR6 : R6MMR6Rel, BLTUC_MMR6_ENC, BLTUC_MMR6_DESC, ISA_MICROMIPS32R6;
+def BEQC_MMR6 : R6MMR6Rel, BEQC_MMR6_ENC, BEQC_MMR6_DESC, ISA_MICROMIPS32R6,
+ DecodeDisambiguates<"POP35GroupBranchMMR6">;
+def BNEC_MMR6 : R6MMR6Rel, BNEC_MMR6_ENC, BNEC_MMR6_DESC, ISA_MICROMIPS32R6,
+ DecodeDisambiguates<"POP37GroupBranchMMR6">;
+def BGEZALC_MMR6 : R6MMR6Rel, BGEZALC_MMR6_ENC, BGEZALC_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+def BGTZALC_MMR6 : R6MMR6Rel, BGTZALC_MMR6_ENC, BGTZALC_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+def BLEZALC_MMR6 : R6MMR6Rel, BLEZALC_MMR6_ENC, BLEZALC_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+def BLTZALC_MMR6 : R6MMR6Rel, BLTZALC_MMR6_ENC, BLTZALC_MMR6_DESC,
+ ISA_MICROMIPS32R6;
+
//===----------------------------------------------------------------------===//
//
// MicroMips instruction aliases
@@ -1222,6 +1688,45 @@ def : MipsInstAlias<"sdbbp", (SDBBP_MMR6 0), 1>, ISA_MICROMIPS32R6;
def : MipsInstAlias<"rdhwr $rt, $rs",
(RDHWR_MMR6 GPR32Opnd:$rt, HWRegsOpnd:$rs, 0), 1>,
ISA_MICROMIPS32R6;
+def : MipsInstAlias<"mtc0 $rt, $rs",
+ (MTC0_MMR6 COP0Opnd:$rs, GPR32Opnd:$rt, 0), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"mthc0 $rt, $rs",
+ (MTHC0_MMR6 COP0Opnd:$rs, GPR32Opnd:$rt, 0), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"mfc0 $rt, $rs",
+ (MFC0_MMR6 GPR32Opnd:$rt, COP0Opnd:$rs, 0), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"mfhc0 $rt, $rs",
+ (MFHC0_MMR6 GPR32Opnd:$rt, COP0Opnd:$rs, 0), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"jalrc.hb $rs", (JALRC_HB_MMR6 RA, GPR32Opnd:$rs), 1>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"dvp", (DVP_MMR6 ZERO), 0>, ISA_MICROMIPS32R6;
+def : MipsInstAlias<"evp", (EVP_MMR6 ZERO), 0>, ISA_MICROMIPS32R6;
+def : MipsInstAlias<"jalrc $rs", (JALRC_MMR6 RA, GPR32Opnd:$rs), 1>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"and $rs, $rt, $imm",
+ (ANDI_MMR6 GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"and $rs, $imm",
+ (ANDI_MMR6 GPR32Opnd:$rs, GPR32Opnd:$rs, uimm16:$imm), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"or $rs, $rt, $imm",
+ (ORI_MMR6 GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"or $rs, $imm",
+ (ORI_MMR6 GPR32Opnd:$rs, GPR32Opnd:$rs, uimm16:$imm), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"xor $rs, $rt, $imm",
+ (XORI_MMR6 GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"xor $rs, $imm",
+ (XORI_MMR6 GPR32Opnd:$rs, GPR32Opnd:$rs, uimm16:$imm), 0>,
+ ISA_MICROMIPS32R6;
+def : MipsInstAlias<"not $rt, $rs",
+ (NOR_MMR6 GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>,
+ ISA_MICROMIPS32R6;
//===----------------------------------------------------------------------===//
//
@@ -1231,3 +1736,39 @@ def : MipsInstAlias<"rdhwr $rt, $rs",
def : MipsPat<(store GPRMM16:$src, addrimm4lsl2:$addr),
(SW16_MMR6 GPRMM16:$src, addrimm4lsl2:$addr)>, ISA_MICROMIPS32R6;
+def : MipsPat<(subc GPR32:$lhs, GPR32:$rhs),
+ (SUBU_MMR6 GPR32:$lhs, GPR32:$rhs)>, ISA_MICROMIPS32R6;
+
+def : MipsPat<(select i32:$cond, i32:$t, i32:$f),
+ (OR_MM (SELNEZ_MMR6 i32:$t, i32:$cond),
+ (SELEQZ_MMR6 i32:$f, i32:$cond))>,
+ ISA_MICROMIPS32R6;
+def : MipsPat<(select i32:$cond, i32:$t, immz),
+ (SELNEZ_MMR6 i32:$t, i32:$cond)>,
+ ISA_MICROMIPS32R6;
+def : MipsPat<(select i32:$cond, immz, i32:$f),
+ (SELEQZ_MMR6 i32:$f, i32:$cond)>,
+ ISA_MICROMIPS32R6;
+
+defm : SelectInt_Pats<i32, OR_MM, XORI_MMR6, SLTi_MM, SLTiu_MM, SELEQZ_MMR6,
+ SELNEZ_MMR6, immZExt16, i32>, ISA_MICROMIPS32R6;
+
+defm S_MMR6 : Cmp_Pats<f32, NOR_MMR6, ZERO>, ISA_MICROMIPS32R6;
+defm D_MMR6 : Cmp_Pats<f64, NOR_MMR6, ZERO>, ISA_MICROMIPS32R6;
+
+def : MipsPat<(and GPRMM16:$src, immZExtAndi16:$imm),
+ (ANDI16_MMR6 GPRMM16:$src, immZExtAndi16:$imm)>,
+ ISA_MICROMIPS32R6;
+def : MipsPat<(and GPR32:$src, immZExt16:$imm),
+ (ANDI_MMR6 GPR32:$src, immZExt16:$imm)>, ISA_MICROMIPS32R6;
+def : MipsPat<(i32 immZExt16:$imm),
+ (XORI_MMR6 ZERO, immZExt16:$imm)>, ISA_MICROMIPS32R6;
+def : MipsPat<(not GPRMM16:$in),
+ (NOT16_MMR6 GPRMM16:$in)>, ISA_MICROMIPS32R6;
+def : MipsPat<(not GPR32:$in),
+ (NOR_MMR6 GPR32Opnd:$in, ZERO)>, ISA_MICROMIPS32R6;
+// Patterns for load with a reg+imm operand.
+let AddedComplexity = 41 in {
+ def : LoadRegImmPat<LDC1_D64_MMR6, f64, load>, FGR_64, ISA_MICROMIPS32R6;
+ def : StoreRegImmPat<SDC1_D64_MMR6, f64>, FGR_64, ISA_MICROMIPS32R6;
+}
diff --git a/lib/Target/Mips/MicroMips64r6InstrFormats.td b/lib/Target/Mips/MicroMips64r6InstrFormats.td
index da305a2d508a..4add305522f7 100644
--- a/lib/Target/Mips/MicroMips64r6InstrFormats.td
+++ b/lib/Target/Mips/MicroMips64r6InstrFormats.td
@@ -71,16 +71,151 @@ class POOL32S_DALIGN_FM_MMR6 {
class POOL32A_DIVMOD_FM_MMR6<string instr_asm, bits<9> funct>
: MMR6Arch<instr_asm> {
+ bits<5> rt;
+ bits<5> rs;
bits<5> rd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b010110;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-11} = rd;
+ let Inst{10-9} = 0b00;
+ let Inst{8-0} = funct;
+}
+
+class POOL32S_DMFTC0_FM_MMR6<string instr_asm, bits<5> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
bits<5> rs;
+ bits<3> sel;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b010110;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-14} = 0;
+ let Inst{13-11} = sel;
+ let Inst{10-6} = funct;
+ let Inst{5-0} = 0b111100;
+}
+
+class POOL32S_ARITH_FM_MMR6<string opstr, bits<9> funct>
+ : MMR6Arch<opstr> {
bits<5> rt;
+ bits<5> rs;
+ bits<5> rd;
bits<32> Inst;
let Inst{31-26} = 0b010110;
- let Inst{25-21} = rd;
+ let Inst{25-21} = rt;
let Inst{20-16} = rs;
- let Inst{15-11} = rt;
+ let Inst{15-11} = rd;
let Inst{10-9} = 0b00;
- let Inst{8-0} = funct;
+ let Inst{8-0} = funct;
+}
+
+class DADDIU_FM_MMR6<string opstr> : MMR6Arch<opstr> {
+ bits<5> rt;
+ bits<5> rs;
+ bits<16> imm16;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b010111;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = imm16;
+}
+
+class PCREL18_FM_MMR6<bits<3> funct> : MipsR6Inst {
+ bits<5> rt;
+ bits<18> imm;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b011110;
+ let Inst{25-21} = rt;
+ let Inst{20-18} = funct;
+ let Inst{17-0} = imm;
+}
+
+class POOL32S_2R_FM_MMR6<string instr_asm, bits<10> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b010110;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0b111100;
+}
+
+class POOL32S_2RSA5B0_FM_MMR6<string instr_asm, bits<9> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<5> rs;
+ bits<5> sa;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b010110;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-11} = sa;
+ let Inst{10-9} = 0b00;
+ let Inst{8-0} = funct;
+}
+
+class LD_SD_32_2R_OFFSET16_FM_MMR6<string instr_asm, bits<6> op>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<21> addr;
+ bits<5> base = addr{20-16};
+ bits<16> offset = addr{15-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = op;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = base;
+ let Inst{15-0} = offset;
+}
+
+class POOL32C_2R_OFFSET12_FM_MMR6<string instr_asm, bits<4> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<21> addr;
+ bits<5> base = addr{20-16};
+ bits<12> offset = addr{11-0};
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b011000;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = base;
+ let Inst{15-12} = funct;
+ let Inst{11-0} = offset;
+}
+
+class POOL32S_3R_FM_MMR6<string instr_asm, bits<9> funct>
+ : MMR6Arch<instr_asm>, MipsR6Inst {
+ bits<5> rt;
+ bits<5> rs;
+ bits<5> rd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0b010110;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-11} = rd;
+ let Inst{10-9} = 0b00;
+ let Inst{8-0} = funct;
}
diff --git a/lib/Target/Mips/MicroMips64r6InstrInfo.td b/lib/Target/Mips/MicroMips64r6InstrInfo.td
index ec1aef86a942..87c41deb85af 100644
--- a/lib/Target/Mips/MicroMips64r6InstrInfo.td
+++ b/lib/Target/Mips/MicroMips64r6InstrInfo.td
@@ -28,6 +28,45 @@ class DDIV_MM64R6_ENC : POOL32A_DIVMOD_FM_MMR6<"ddiv", 0b100011000>;
class DMOD_MM64R6_ENC : POOL32A_DIVMOD_FM_MMR6<"dmod", 0b101011000>;
class DDIVU_MM64R6_ENC : POOL32A_DIVMOD_FM_MMR6<"ddivu", 0b110011000>;
class DMODU_MM64R6_ENC : POOL32A_DIVMOD_FM_MMR6<"dmodu", 0b111011000>;
+class DINSU_MM64R6_ENC : POOL32S_EXTBITS_FM_MMR6<0b110100>;
+class DINSM_MM64R6_ENC : POOL32S_EXTBITS_FM_MMR6<0b000100>;
+class DINS_MM64R6_ENC : POOL32S_EXTBITS_FM_MMR6<0b001100>;
+class DMTC0_MM64R6_ENC : POOL32S_DMFTC0_FM_MMR6<"dmtc0", 0b01011>;
+class DMTC1_MM64R6_ENC : POOL32F_MFTC1_FM_MMR6<"dmtc1", 0b10110000>;
+class DMTC2_MM64R6_ENC : POOL32A_MFTC2_FM_MMR6<"dmtc2", 0b0111110100>;
+class DMFC0_MM64R6_ENC : POOL32S_DMFTC0_FM_MMR6<"dmfc0", 0b00011>;
+class DMFC1_MM64R6_ENC : POOL32F_MFTC1_FM_MMR6<"dmfc1", 0b10010000>;
+class DMFC2_MM64R6_ENC : POOL32A_MFTC2_FM_MMR6<"dmfc2", 0b0110110100>;
+class DADD_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dadd", 0b100010000>;
+class DADDIU_MM64R6_ENC : DADDIU_FM_MMR6<"daddiu">;
+class DADDU_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"daddu", 0b101010000>;
+class LDPC_MMR646_ENC : PCREL18_FM_MMR6<0b110>;
+class DSUB_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dsub", 0b110010000>;
+class DSUBU_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dsubu", 0b111010000>;
+class DMUL_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dmul", 0b000011000>;
+class DMUH_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dmuh", 0b001011000>;
+class DMULU_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dmulu", 0b010011000>;
+class DMUHU_MM64R6_ENC : POOL32S_ARITH_FM_MMR6<"dmuhu", 0b011011000>;
+class DSBH_MM64R6_ENC : POOL32S_2R_FM_MMR6<"dsbh", 0b0111101100>;
+class DSHD_MM64R6_ENC : POOL32S_2R_FM_MMR6<"dshd", 0b1111101100>;
+class DSLL_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsll", 0b000000000>;
+class DSLL32_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsll32", 0b000001000>;
+class DSLLV_MM64R6_ENC : POOL32S_3R_FM_MMR6<"dsllv", 0b000010000>;
+class DSRAV_MM64R6_ENC : POOL32S_3R_FM_MMR6<"dsrav", 0b010010000>;
+class DSRA_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsra", 0b010000000>;
+class DSRA32_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsra32", 0b010000100>;
+class DCLO_MM64R6_ENC : POOL32S_2R_FM_MMR6<"dclo", 0b0100101100>;
+class DCLZ_MM64R6_ENC : POOL32S_2R_FM_MMR6<"dclz", 0b0101101100>;
+class DROTR_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"drotr", 0b011000000>;
+class DROTR32_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"drotr32", 0b011001000>;
+class DROTRV_MM64R6_ENC : POOL32S_3R_FM_MMR6<"drotrv", 0b011010000>;
+class LD_MM64R6_ENC : LD_SD_32_2R_OFFSET16_FM_MMR6<"ld", 0b110111>;
+class LLD_MM64R6_ENC : POOL32C_2R_OFFSET12_FM_MMR6<"lld", 0b0111>;
+class LWU_MM64R6_ENC : POOL32C_2R_OFFSET12_FM_MMR6<"lwu", 0b1110>;
+class SD_MM64R6_ENC : LD_SD_32_2R_OFFSET16_FM_MMR6<"sd", 0b110110>;
+class DSRL_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsrl", 0b001000000>;
+class DSRL32_MM64R6_ENC : POOL32S_2RSA5B0_FM_MMR6<"dsrl32", 0b001001000>;
+class DSRLV_MM64R6_ENC : POOL32S_3R_FM_MMR6<"dsrlv", 0b001010000>;
//===----------------------------------------------------------------------===//
//
@@ -68,7 +107,7 @@ class EXTBITS_DESC_BASE<string instr_asm, RegisterOperand RO, Operand PosOpnd,
// TODO: Add 'pos + size' constraint check to dext* instructions
// DEXT: 0 < pos + size <= 63
// DEXTM, DEXTU: 32 < pos + size <= 64
-class DEXT_MMR6_DESC : EXTBITS_DESC_BASE<"dext", GPR64Opnd, uimm5,
+class DEXT_MMR6_DESC : EXTBITS_DESC_BASE<"dext", GPR64Opnd, uimm5_report_uimm6,
uimm5_plus1, MipsExt>;
class DEXTM_MMR6_DESC : EXTBITS_DESC_BASE<"dextm", GPR64Opnd, uimm5,
uimm5_plus33, MipsExt>;
@@ -85,10 +124,189 @@ class DALIGN_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
class DALIGN_MMR6_DESC : DALIGN_DESC_BASE<"dalign", GPR64Opnd, uimm3>;
-class DDIV_MM64R6_DESC : ArithLogicR<"ddiv", GPR32Opnd>;
-class DMOD_MM64R6_DESC : ArithLogicR<"dmod", GPR32Opnd>;
-class DDIVU_MM64R6_DESC : ArithLogicR<"ddivu", GPR32Opnd>;
-class DMODU_MM64R6_DESC : ArithLogicR<"dmodu", GPR32Opnd>;
+class DDIV_MM64R6_DESC : DIVMOD_MMR6_DESC_BASE<"ddiv", GPR64Opnd, sdiv>;
+class DMOD_MM64R6_DESC : DIVMOD_MMR6_DESC_BASE<"dmod", GPR64Opnd, srem>;
+class DDIVU_MM64R6_DESC : DIVMOD_MMR6_DESC_BASE<"ddivu", GPR64Opnd, udiv>;
+class DMODU_MM64R6_DESC : DIVMOD_MMR6_DESC_BASE<"dmodu", GPR64Opnd, urem>;
+
+class DCLO_MM64R6_DESC {
+ dag OutOperandList = (outs GPR64Opnd:$rt);
+ dag InOperandList = (ins GPR64Opnd:$rs);
+ string AsmString = !strconcat("dclo", "\t$rt, $rs");
+ list<dag> Pattern = [(set GPR64Opnd:$rt, (ctlz (not GPR64Opnd:$rs)))];
+ InstrItinClass Itinerary = II_CLO;
+ Format Form = FrmR;
+ string BaseOpcode = "dclo";
+}
+
+class DCLZ_MM64R6_DESC {
+ dag OutOperandList = (outs GPR64Opnd:$rt);
+ dag InOperandList = (ins GPR64Opnd:$rs);
+ string AsmString = !strconcat("dclz", "\t$rt, $rs");
+ list<dag> Pattern = [(set GPR64Opnd:$rt, (ctlz GPR64Opnd:$rs))];
+ InstrItinClass Itinerary = II_CLZ;
+ Format Form = FrmR;
+ string BaseOpcode = "dclz";
+}
+
+class DINSU_MM64R6_DESC : InsBase<"dinsu", GPR64Opnd, uimm5_plus32,
+ uimm5_inssize_plus1, MipsIns>;
+class DINSM_MM64R6_DESC : InsBase<"dinsm", GPR64Opnd, uimm5, uimm_range_2_64>;
+class DINS_MM64R6_DESC : InsBase<"dins", GPR64Opnd, uimm5, uimm5_inssize_plus1,
+ MipsIns>;
+class DMTC0_MM64R6_DESC : MTC0_MMR6_DESC_BASE<"dmtc0", COP0Opnd, GPR64Opnd>;
+class DMTC1_MM64R6_DESC : MTC1_MMR6_DESC_BASE<"dmtc1", FGR64Opnd, GPR64Opnd,
+ II_DMTC1, bitconvert>;
+class DMTC2_MM64R6_DESC : MTC2_MMR6_DESC_BASE<"dmtc2", COP2Opnd, GPR64Opnd>;
+
+class DMFC0_MM64R6_DESC : MFC0_MMR6_DESC_BASE<"dmfc0", GPR64Opnd, COP0Opnd>;
+class DMFC1_MM64R6_DESC : MFC1_MMR6_DESC_BASE<"dmfc1", GPR64Opnd, FGR64Opnd,
+ II_DMFC1, bitconvert>;
+class DMFC2_MM64R6_DESC : MFC2_MMR6_DESC_BASE<"dmfc2", GPR64Opnd, COP2Opnd>;
+
+class DADD_MM64R6_DESC : ArithLogicR<"dadd", GPR64Opnd, 1, II_DADD>;
+class DADDIU_MM64R6_DESC : ArithLogicI<"daddiu", simm16_64, GPR64Opnd,
+ II_DADDIU, immSExt16, add>,
+ IsAsCheapAsAMove;
+class DADDU_MM64R6_DESC : ArithLogicR<"daddu", GPR64Opnd, 1, II_DADDU, add>;
+
+class DSUB_DESC_BASE<string instr_asm, RegisterOperand RO,
+ InstrItinClass Itin = NoItinerary,
+ SDPatternOperator OpNode = null_frag>
+ : MipsR6Inst {
+ dag OutOperandList = (outs RO:$rd);
+ dag InOperandList = (ins RO:$rs, RO:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
+ list<dag> Pattern = [(set RO:$rd, (OpNode RO:$rs, RO:$rt))];
+ InstrItinClass Itinerary = Itin;
+ Format Form = FrmR;
+ string BaseOpcode = instr_asm;
+ let isCommutable = 0;
+ let isReMaterializable = 1;
+ let TwoOperandAliasConstraint = "$rd = $rs";
+}
+class DSUB_MM64R6_DESC : DSUB_DESC_BASE<"dsub", GPR64Opnd, II_DSUB>;
+class DSUBU_MM64R6_DESC : DSUB_DESC_BASE<"dsubu", GPR64Opnd, II_DSUBU, sub>;
+
+class LDPC_MM64R6_DESC : PCREL_MMR6_DESC_BASE<"ldpc", GPR64Opnd, simm18_lsl3>;
+
+class MUL_MM64R6_DESC_BASE<string opstr, RegisterOperand GPROpnd,
+ InstrItinClass Itin = NoItinerary,
+ SDPatternOperator Op = null_frag> : MipsR6Inst {
+ dag OutOperandList = (outs GPROpnd:$rd);
+ dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
+ string AsmString = !strconcat(opstr, "\t$rd, $rs, $rt");
+ InstrItinClass Itinerary = Itin;
+ list<dag> Pattern = [(set GPROpnd:$rd, (Op GPROpnd:$rs, GPROpnd:$rt))];
+}
+
+class DMUL_MM64R6_DESC : MUL_MM64R6_DESC_BASE<"dmul", GPR64Opnd, II_DMUL, mul>;
+class DMUH_MM64R6_DESC : MUL_MM64R6_DESC_BASE<"dmuh", GPR64Opnd, II_DMUH,
+ mulhs>;
+class DMULU_MM64R6_DESC : MUL_MM64R6_DESC_BASE<"dmulu", GPR64Opnd, II_DMULU>;
+class DMUHU_MM64R6_DESC : MUL_MM64R6_DESC_BASE<"dmuhu", GPR64Opnd, II_DMUHU,
+ mulhu>;
+
+class DSBH_DSHD_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+ dag OutOperandList = (outs GPROpnd:$rt);
+ dag InOperandList = (ins GPROpnd:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $rs");
+ bit hasSideEffects = 0;
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = NoItinerary;
+ Format Form = FrmR;
+ string BaseOpcode = instr_asm;
+}
+
+class DSBH_MM64R6_DESC : DSBH_DSHD_DESC_BASE<"dsbh", GPR64Opnd>;
+class DSHD_MM64R6_DESC : DSBH_DSHD_DESC_BASE<"dshd", GPR64Opnd>;
+
+class SHIFT_ROTATE_IMM_MM64R6<string instr_asm, Operand ImmOpnd,
+ InstrItinClass itin,
+ SDPatternOperator OpNode = null_frag,
+ SDPatternOperator PO = null_frag> {
+ dag OutOperandList = (outs GPR64Opnd:$rt);
+ dag InOperandList = (ins GPR64Opnd:$rs, ImmOpnd:$sa);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
+ list<dag> Pattern = [(set GPR64Opnd:$rt, (OpNode GPR64Opnd:$rs, PO:$sa))];
+ InstrItinClass Itinerary = itin;
+ Format Form = FrmR;
+ string TwoOperandAliasConstraint = "$rs = $rt";
+ string BaseOpcode = instr_asm;
+}
+
+class SHIFT_ROTATE_REG_MM64R6<string instr_asm, InstrItinClass itin,
+ SDPatternOperator OpNode = null_frag> {
+ dag OutOperandList = (outs GPR64Opnd:$rd);
+ dag InOperandList = (ins GPR64Opnd:$rt, GPR32Opnd:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs");
+ list<dag> Pattern = [(set GPR64Opnd:$rd,
+ (OpNode GPR64Opnd:$rt, GPR32Opnd:$rs))];
+ InstrItinClass Itinerary = itin;
+ Format Form = FrmR;
+ string BaseOpcode = instr_asm;
+}
+
+class DSLL_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsll", uimm6, II_DSLL, shl,
+ immZExt6>;
+class DSLL32_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsll32", uimm5, II_DSLL32>;
+class DSLLV_MM64R6_DESC : SHIFT_ROTATE_REG_MM64R6<"dsllv", II_DSLLV, shl>;
+class DSRAV_MM64R6_DESC : SHIFT_ROTATE_REG_MM64R6<"dsrav", II_DSRAV, sra>;
+class DSRA_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsra", uimm6, II_DSRA, sra,
+ immZExt6>;
+class DSRA32_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsra32", uimm5, II_DSRA32>;
+class DROTR_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"drotr", uimm6, II_DROTR,
+ rotr, immZExt6>;
+class DROTR32_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"drotr32", uimm5,
+ II_DROTR32>;
+class DROTRV_MM64R6_DESC : SHIFT_ROTATE_REG_MM64R6<"drotrv", II_DROTRV, rotr>;
+class DSRL_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsrl", uimm6, II_DSRL, srl,
+ immZExt6>;
+class DSRL32_MM64R6_DESC : SHIFT_ROTATE_IMM_MM64R6<"dsrl32", uimm5, II_DSRL32>;
+class DSRLV_MM64R6_DESC : SHIFT_ROTATE_REG_MM64R6<"dsrlv", II_DSRLV, srl>;
+
+class Load_MM64R6<string instr_asm, Operand MemOpnd, InstrItinClass itin,
+ SDPatternOperator OpNode = null_frag> {
+ dag OutOperandList = (outs GPR64Opnd:$rt);
+ dag InOperandList = (ins MemOpnd:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
+ list<dag> Pattern = [(set GPR64Opnd:$rt, (OpNode addr:$addr))];
+ InstrItinClass Itinerary = itin;
+ Format Form = FrmI;
+ bit mayLoad = 1;
+ bit canFoldAsLoad = 1;
+ string BaseOpcode = instr_asm;
+}
+
+class LD_MM64R6_DESC : Load_MM64R6<"ld", mem_simm16, II_LD, load> {
+ string DecoderMethod = "DecodeMemMMImm16";
+}
+class LWU_MM64R6_DESC : Load_MM64R6<"lwu", mem_simm12, II_LWU, zextloadi32>{
+ string DecoderMethod = "DecodeMemMMImm12";
+}
+
+class LLD_MM64R6_DESC {
+ dag OutOperandList = (outs GPR64Opnd:$rt);
+ dag InOperandList = (ins mem_simm12:$addr);
+ string AsmString = "lld\t$rt, $addr";
+ list<dag> Pattern = [];
+ bit mayLoad = 1;
+ InstrItinClass Itinerary = II_LLD;
+ string BaseOpcode = "lld";
+ string DecoderMethod = "DecodeMemMMImm12";
+}
+
+class SD_MM64R6_DESC {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins GPR64Opnd:$rt, mem_simm16:$addr);
+ string AsmString = "sd\t$rt, $addr";
+ list<dag> Pattern = [(store GPR64Opnd:$rt, addr:$addr)];
+ InstrItinClass Itinerary = II_SD;
+ Format Form = FrmI;
+ bit mayStore = 1;
+ string BaseOpcode = "sd";
+ string DecoderMethod = "DecodeMemMMImm16";
+}
//===----------------------------------------------------------------------===//
//
@@ -116,4 +334,180 @@ let DecoderNamespace = "MicroMipsR6" in {
ISA_MICROMIPS64R6;
def DMODU_MM64R6 : R6MMR6Rel, DMODU_MM64R6_DESC, DMODU_MM64R6_ENC,
ISA_MICROMIPS64R6;
+ def DINSU_MM64R6: R6MMR6Rel, DINSU_MM64R6_DESC, DINSU_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DINSM_MM64R6: R6MMR6Rel, DINSM_MM64R6_DESC, DINSM_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DINS_MM64R6: R6MMR6Rel, DINS_MM64R6_DESC, DINS_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DMTC0_MM64R6 : StdMMR6Rel, DMTC0_MM64R6_ENC, DMTC0_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DMTC1_MM64R6 : StdMMR6Rel, DMTC1_MM64R6_DESC, DMTC1_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DMTC2_MM64R6 : StdMMR6Rel, DMTC2_MM64R6_ENC, DMTC2_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DMFC0_MM64R6 : StdMMR6Rel, DMFC0_MM64R6_ENC, DMFC0_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DMFC1_MM64R6 : StdMMR6Rel, DMFC1_MM64R6_DESC, DMFC1_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DMFC2_MM64R6 : StdMMR6Rel, DMFC2_MM64R6_ENC, DMFC2_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DADD_MM64R6: StdMMR6Rel, DADD_MM64R6_DESC, DADD_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DADDIU_MM64R6: StdMMR6Rel, DADDIU_MM64R6_DESC, DADDIU_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DADDU_MM64R6: StdMMR6Rel, DADDU_MM64R6_DESC, DADDU_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def LDPC_MM64R6 : R6MMR6Rel, LDPC_MMR646_ENC, LDPC_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSUB_MM64R6 : StdMMR6Rel, DSUB_MM64R6_DESC, DSUB_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DSUBU_MM64R6 : StdMMR6Rel, DSUBU_MM64R6_DESC, DSUBU_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DMUL_MM64R6 : R6MMR6Rel, DMUL_MM64R6_DESC, DMUL_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DMUH_MM64R6 : R6MMR6Rel, DMUH_MM64R6_DESC, DMUH_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DMULU_MM64R6 : R6MMR6Rel, DMULU_MM64R6_DESC, DMULU_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DMUHU_MM64R6 : R6MMR6Rel, DMUHU_MM64R6_DESC, DMUHU_MM64R6_ENC,
+ ISA_MICROMIPS64R6;
+ def DSBH_MM64R6 : R6MMR6Rel, DSBH_MM64R6_ENC, DSBH_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSHD_MM64R6 : R6MMR6Rel, DSHD_MM64R6_ENC, DSHD_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSLL_MM64R6 : StdMMR6Rel, DSLL_MM64R6_ENC, DSLL_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSLL32_MM64R6 : StdMMR6Rel, DSLL32_MM64R6_ENC, DSLL32_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSLLV_MM64R6 : StdMMR6Rel, DSLLV_MM64R6_ENC, DSLLV_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSRAV_MM64R6 : StdMMR6Rel, DSRAV_MM64R6_ENC, DSRAV_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSRA_MM64R6 : StdMMR6Rel, DSRA_MM64R6_ENC, DSRA_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSRA32_MM64R6 : StdMMR6Rel, DSRA32_MM64R6_ENC, DSRA32_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DCLO_MM64R6 : StdMMR6Rel, R6MMR6Rel, DCLO_MM64R6_ENC, DCLO_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DCLZ_MM64R6 : StdMMR6Rel, R6MMR6Rel, DCLZ_MM64R6_ENC, DCLZ_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DROTR_MM64R6 : StdMMR6Rel, DROTR_MM64R6_ENC, DROTR_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DROTR32_MM64R6 : StdMMR6Rel, DROTR32_MM64R6_ENC, DROTR32_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DROTRV_MM64R6 : StdMMR6Rel, DROTRV_MM64R6_ENC, DROTRV_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def LD_MM64R6 : StdMMR6Rel, LD_MM64R6_ENC, LD_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def LLD_MM64R6 : StdMMR6Rel, R6MMR6Rel, LLD_MM64R6_ENC, LLD_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def LWU_MM64R6 : StdMMR6Rel, LWU_MM64R6_ENC, LWU_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def SD_MM64R6 : StdMMR6Rel, SD_MM64R6_ENC, SD_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSRL_MM64R6 : StdMMR6Rel, DSRL_MM64R6_ENC, DSRL_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSRL32_MM64R6 : StdMMR6Rel, DSRL32_MM64R6_ENC, DSRL32_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
+ def DSRLV_MM64R6 : StdMMR6Rel, DSRLV_MM64R6_ENC, DSRLV_MM64R6_DESC,
+ ISA_MICROMIPS64R6;
}
+
+//===----------------------------------------------------------------------===//
+//
+// Arbitrary patterns that map to one or more instructions
+//
+//===----------------------------------------------------------------------===//
+
+def : MipsPat<(MipsLo tglobaladdr:$in),
+ (DADDIU_MM64R6 ZERO_64, tglobaladdr:$in)>, ISA_MICROMIPS64R6;
+def : MipsPat<(MipsLo tblockaddress:$in),
+ (DADDIU_MM64R6 ZERO_64, tblockaddress:$in)>, ISA_MICROMIPS64R6;
+def : MipsPat<(MipsLo tjumptable:$in),
+ (DADDIU_MM64R6 ZERO_64, tjumptable:$in)>, ISA_MICROMIPS64R6;
+def : MipsPat<(MipsLo tconstpool:$in),
+ (DADDIU_MM64R6 ZERO_64, tconstpool:$in)>, ISA_MICROMIPS64R6;
+def : MipsPat<(MipsLo tglobaltlsaddr:$in),
+ (DADDIU_MM64R6 ZERO_64, tglobaltlsaddr:$in)>, ISA_MICROMIPS64R6;
+def : MipsPat<(MipsLo texternalsym:$in),
+ (DADDIU_MM64R6 ZERO_64, texternalsym:$in)>, ISA_MICROMIPS64R6;
+
+def : MipsPat<(add GPR64:$hi, (MipsLo tglobaladdr:$lo)),
+ (DADDIU_MM64R6 GPR64:$hi, tglobaladdr:$lo)>, ISA_MICROMIPS64R6;
+def : MipsPat<(add GPR64:$hi, (MipsLo tblockaddress:$lo)),
+ (DADDIU_MM64R6 GPR64:$hi, tblockaddress:$lo)>, ISA_MICROMIPS64R6;
+def : MipsPat<(add GPR64:$hi, (MipsLo tjumptable:$lo)),
+ (DADDIU_MM64R6 GPR64:$hi, tjumptable:$lo)>, ISA_MICROMIPS64R6;
+def : MipsPat<(add GPR64:$hi, (MipsLo tconstpool:$lo)),
+ (DADDIU_MM64R6 GPR64:$hi, tconstpool:$lo)>, ISA_MICROMIPS64R6;
+def : MipsPat<(add GPR64:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (DADDIU_MM64R6 GPR64:$hi, tglobaltlsaddr:$lo)>, ISA_MICROMIPS64R6;
+
+def : MipsPat<(addc GPR64:$lhs, GPR64:$rhs),
+ (DADDU_MM64R6 GPR64:$lhs, GPR64:$rhs)>, ISA_MICROMIPS64R6;
+def : MipsPat<(addc GPR64:$lhs, immSExt16:$imm),
+ (DADDIU_MM64R6 GPR64:$lhs, imm:$imm)>, ISA_MICROMIPS64R6;
+
+
+def : MipsPat<(rotr GPR64:$rt, (i32 (trunc GPR64:$rs))),
+ (DROTRV_MM64R6 GPR64:$rt, (EXTRACT_SUBREG GPR64:$rs, sub_32))>,
+ ISA_MICROMIPS64R6;
+
+
+def : WrapperPat<tglobaladdr, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
+def : WrapperPat<tconstpool, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
+def : WrapperPat<texternalsym, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
+def : WrapperPat<tblockaddress, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
+def : WrapperPat<tjumptable, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
+def : WrapperPat<tglobaltlsaddr, DADDIU_MM64R6, GPR64>, ISA_MICROMIPS64R6;
+
+// Carry pattern
+def : MipsPat<(subc GPR64:$lhs, GPR64:$rhs),
+ (DSUBU_MM64R6 GPR64:$lhs, GPR64:$rhs)>, ISA_MICROMIPS64R6;
+
+def : MipsPat<(atomic_load_64 addr:$a), (LD_MM64R6 addr:$a)>, ISA_MICROMIPS64R6;
+
+//===----------------------------------------------------------------------===//
+//
+// Instruction aliases
+//
+//===----------------------------------------------------------------------===//
+
+def : MipsInstAlias<"dmtc0 $rt, $rd",
+ (DMTC0_MM64R6 COP0Opnd:$rd, GPR64Opnd:$rt, 0), 0>;
+def : MipsInstAlias<"dmfc0 $rt, $rd",
+ (DMFC0_MM64R6 GPR64Opnd:$rt, COP0Opnd:$rd, 0), 0>,
+ ISA_MICROMIPS64R6;
+def : MipsInstAlias<"daddu $rs, $rt, $imm",
+ (DADDIU_MM64R6 GPR64Opnd:$rs,
+ GPR64Opnd:$rt,
+ simm16_64:$imm),
+ 0>, ISA_MICROMIPS64R6;
+def : MipsInstAlias<"daddu $rs, $imm",
+ (DADDIU_MM64R6 GPR64Opnd:$rs,
+ GPR64Opnd:$rs,
+ simm16_64:$imm),
+ 0>, ISA_MICROMIPS64R6;
+def : MipsInstAlias<"dsubu $rt, $rs, $imm",
+ (DADDIU_MM64R6 GPR64Opnd:$rt,
+ GPR64Opnd:$rs,
+ InvertedImOperand64:$imm),
+ 0>, ISA_MICROMIPS64R6;
+def : MipsInstAlias<"dsubu $rs, $imm",
+ (DADDIU_MM64R6 GPR64Opnd:$rs,
+ GPR64Opnd:$rs,
+ InvertedImOperand64:$imm),
+ 0>, ISA_MICROMIPS64R6;
+def : MipsInstAlias<"dneg $rt, $rs",
+ (DSUB_MM64R6 GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rs), 1>,
+ ISA_MICROMIPS64R6;
+def : MipsInstAlias<"dneg $rt",
+ (DSUB_MM64R6 GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rt), 0>,
+ ISA_MICROMIPS64R6;
+def : MipsInstAlias<"dnegu $rt, $rs",
+ (DSUBU_MM64R6 GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rs), 1>,
+ ISA_MICROMIPS64R6;
+def : MipsInstAlias<"dnegu $rt",
+ (DSUBU_MM64R6 GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rt), 0>,
+ ISA_MICROMIPS64R6;
diff --git a/lib/Target/Mips/MicroMipsDSPInstrFormats.td b/lib/Target/Mips/MicroMipsDSPInstrFormats.td
index f11c09abfc36..af6473c468d9 100644
--- a/lib/Target/Mips/MicroMipsDSPInstrFormats.td
+++ b/lib/Target/Mips/MicroMipsDSPInstrFormats.td
@@ -242,3 +242,61 @@ class POOL32A_5B01RAC_FMT<string opstr, bits<8> op> : MMDSPInst<opstr> {
let Inst{13-6} = op;
let Inst{5-0} = 0b111100;
}
+
+class POOL32I_IMMB0_FMT<string opstr, bits<5> op> : MMDSPInst<opstr> {
+ bits<16> offset;
+
+ let Inst{31-26} = 0b010000;
+ let Inst{25-21} = op;
+ let Inst{20-16} = 0;
+ let Inst{15-0} = offset;
+}
+
+class POOL32A_2RBP_FMT<string opstr> : MMDSPInst<opstr> {
+ bits<5> rt;
+ bits<5> rs;
+ bits<2> bp;
+
+ let Inst{31-26} = 0;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-14} = bp;
+ let Inst{13-6} = 0b00100010;
+ let Inst{5-0} = 0b111100;
+}
+
+class POOL32A_2RB0_FMT<string opstr, bits<10> op> : MMDSPInst<opstr> {
+ bits<5> rt;
+ bits<5> rs;
+
+ let Inst{31-26} = 0;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-10} = 0;
+ let Inst{9-0} = op;
+}
+
+class POOL32S_3RB0_FMT<string opstr, bits<10> op> : MMDSPInst<opstr> {
+ bits<5> rt;
+ bits<5> rs;
+ bits<5> rd;
+
+ let Inst{31-26} = 0b010110;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-11} = rd;
+ let Inst{10} = 0b0;
+ let Inst{9-0} = op;
+}
+
+class POOL32A_2R2B0_FMT<string opstr, bits<10> op> : MMDSPInst<opstr> {
+ bits<5> rt;
+ bits<5> rs;
+
+ let Inst{31-26} = 0;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-11} = 0;
+ let Inst{10} = 0;
+ let Inst{9-0} = op;
+}
diff --git a/lib/Target/Mips/MicroMipsDSPInstrInfo.td b/lib/Target/Mips/MicroMipsDSPInstrInfo.td
index b342e2371df4..f82f82fc7e45 100644
--- a/lib/Target/Mips/MicroMipsDSPInstrInfo.td
+++ b/lib/Target/Mips/MicroMipsDSPInstrInfo.td
@@ -155,6 +155,26 @@ class PICK_QB_MM_ENC : POOL32A_3RB0_FMT<"pick.qb", 0b0111101101>;
class SHILO_MM_ENC : POOL32A_4B0SHIFT6AC4B0_FMT<"shilo", 0b0000011101>;
class SHILOV_MM_ENC : POOL32A_5B01RAC_FMT<"shilov", 0b01001001>;
class WRDSP_MM_ENC : POOL32A_1RMASK7_FMT<"wrdsp", 0b01011001>;
+class APPEND_MMR2_ENC : POOL32A_2RSA5B0_FMT<"append", 0b1000010101>;
+class MODSUB_MM_ENC : POOL32A_3RB0_FMT<"modsub", 0b1010010101>;
+class MULSA_W_PH_MMR2_ENC : POOL32A_2RAC_FMT<"mulsa.w.ph", 0b10110010>;
+class MULSAQ_S_W_PH_MM_ENC : POOL32A_2RAC_FMT<"mulsaq_s.w.ph", 0b11110010>;
+class BPOSGE32C_MMR3_ENC : POOL32I_IMMB0_FMT<"bposge32c", 0b11001>;
+class BITREV_MM_ENC : POOL32A_2R_FMT<"bitrev", 0b0011000100>;
+class BALIGN_MMR2_ENC : POOL32A_2RBP_FMT<"balign">;
+class BPOSGE32_MM_ENC : POOL32I_IMMB0_FMT<"bposge32", 0b11011>;
+class CMP_EQ_PH_MM_ENC : POOL32A_2RB0_FMT<"cmp.eq.ph", 0b0000000101>;
+class CMP_LE_PH_MM_ENC : POOL32A_2RB0_FMT<"cmp.le.ph", 0b0010000101>;
+class CMP_LT_PH_MM_ENC : POOL32A_2RB0_FMT<"cmp.lt.ph", 0b0001000101>;
+class CMPGDU_EQ_QB_MMR2_ENC : POOL32A_3RB0_FMT<"cmpgdu.eq.qb", 0b0110000101>;
+class CMPGDU_LT_QB_MMR2_ENC : POOL32A_3RB0_FMT<"cmpgdu.lt.qb", 0b0111000101>;
+class CMPGDU_LE_QB_MMR2_ENC : POOL32A_3RB0_FMT<"cmpgdu.le.qb", 0b1000000101>;
+class CMPGU_EQ_QB_MM_ENC : POOL32S_3RB0_FMT<"cmpgu.eq.qb", 0b0011000101>;
+class CMPGU_LT_QB_MM_ENC : POOL32S_3RB0_FMT<"cmpgu.lt.qb", 0b0100000101>;
+class CMPGU_LE_QB_MM_ENC : POOL32S_3RB0_FMT<"cmpgu.le.qb", 0b0101000101>;
+class CMPU_EQ_QB_MM_ENC : POOL32A_2R2B0_FMT<"cmpu.eq.qb", 0b1001000101>;
+class CMPU_LT_QB_MM_ENC : POOL32A_2R2B0_FMT<"cmpu.lt.qb", 0b1010000101>;
+class CMPU_LE_QB_MM_ENC : POOL32A_2R2B0_FMT<"cmpu.le.qb", 0b1011000101>;
// Instruction desc.
class ABSQ_S_PH_MM_R2_DESC_BASE<string opstr, SDPatternOperator OpNode,
@@ -339,15 +359,15 @@ class RADDU_W_QB_MM_DESC {
class RDDSP_MM_DESC {
dag OutOperandList = (outs GPR32Opnd:$rt);
- dag InOperandList = (ins uimm16:$mask);
+ dag InOperandList = (ins uimm7:$mask);
string AsmString = !strconcat("rddsp", "\t$rt, $mask");
- list<dag> Pattern = [(set GPR32Opnd:$rt, (int_mips_rddsp immZExt10:$mask))];
+ list<dag> Pattern = [(set GPR32Opnd:$rt, (int_mips_rddsp immZExt7:$mask))];
InstrItinClass Itinerary = NoItinerary;
}
class REPL_QB_MM_DESC {
dag OutOperandList = (outs DSPROpnd:$rt);
- dag InOperandList = (ins uimm16:$imm);
+ dag InOperandList = (ins uimm8:$imm);
string AsmString = !strconcat("repl.qb", "\t$rt, $imm");
list<dag> Pattern = [(set DSPROpnd:$rt, (int_mips_repl_qb immZExt8:$imm))];
InstrItinClass Itinerary = NoItinerary;
@@ -368,6 +388,33 @@ class WRDSP_MM_DESC {
InstrItinClass Itinerary = NoItinerary;
}
+class BPOSGE32C_MMR3_DESC {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins brtarget1SImm16:$offset);
+ string AsmString = !strconcat("bposge32c", "\t$offset");
+ InstrItinClass Itinerary = NoItinerary;
+ bit isBranch = 1;
+ bit isTerminator = 1;
+ bit hasDelaySlot = 0;
+}
+
+class BALIGN_MMR2_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$rs, uimm2:$bp, GPR32Opnd:$src);
+ string AsmString = !strconcat("balign", "\t$rt, $rs, $bp");
+ list<dag> Pattern = [(set GPR32Opnd:$rt, (int_mips_balign GPR32Opnd:$src,
+ GPR32Opnd:$rs,
+ immZExt2:$bp))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$src = $rt";
+}
+
+class BITREV_MM_DESC : ABSQ_S_PH_MM_R2_DESC_BASE<"bitrev", int_mips_bitrev,
+ NoItinerary, GPR32Opnd>;
+
+class BPOSGE32_MM_DESC : BPOSGE32_DESC_BASE<"bposge32", brtarget_mm,
+ NoItinerary>;
+
// Instruction defs.
// microMIPS DSP Rev 1
def ADDQ_PH_MM : DspMMRel, ADDQ_PH_MM_ENC, ADDQ_PH_DESC;
@@ -472,6 +519,20 @@ def PICK_QB_MM : DspMMRel, PICK_QB_MM_ENC, PICK_QB_DESC;
def SHILO_MM : DspMMRel, SHILO_MM_ENC, SHILO_DESC;
def SHILOV_MM : DspMMRel, SHILOV_MM_ENC, SHILOV_DESC;
def WRDSP_MM : DspMMRel, WRDSP_MM_ENC, WRDSP_MM_DESC;
+def MODSUB_MM : DspMMRel, MODSUB_MM_ENC, MODSUB_DESC;
+def MULSAQ_S_W_PH_MM : DspMMRel, MULSAQ_S_W_PH_MM_ENC, MULSAQ_S_W_PH_DESC;
+def BITREV_MM : DspMMRel, BITREV_MM_ENC, BITREV_MM_DESC;
+def BPOSGE32_MM : DspMMRel, BPOSGE32_MM_ENC, BPOSGE32_MM_DESC,
+ ISA_MIPS1_NOT_32R6_64R6;
+def CMP_EQ_PH_MM : DspMMRel, CMP_EQ_PH_MM_ENC, CMP_EQ_PH_DESC;
+def CMP_LT_PH_MM : DspMMRel, CMP_LT_PH_MM_ENC, CMP_LT_PH_DESC;
+def CMP_LE_PH_MM : DspMMRel, CMP_LE_PH_MM_ENC, CMP_LE_PH_DESC;
+def CMPGU_EQ_QB_MM : DspMMRel, CMPGU_EQ_QB_MM_ENC, CMPGU_EQ_QB_DESC;
+def CMPGU_LT_QB_MM : DspMMRel, CMPGU_LT_QB_MM_ENC, CMPGU_LT_QB_DESC;
+def CMPGU_LE_QB_MM : DspMMRel, CMPGU_LE_QB_MM_ENC, CMPGU_LE_QB_DESC;
+def CMPU_EQ_QB_MM : DspMMRel, CMPU_EQ_QB_MM_ENC, CMPU_EQ_QB_DESC;
+def CMPU_LT_QB_MM : DspMMRel, CMPU_LT_QB_MM_ENC, CMPU_LT_QB_DESC;
+def CMPU_LE_QB_MM : DspMMRel, CMPU_LE_QB_MM_ENC, CMPU_LE_QB_DESC;
// microMIPS DSP Rev 2
def ABSQ_S_QB_MMR2 : DspMMRel, ABSQ_S_QB_MMR2_ENC, ABSQ_S_QB_MMR2_DESC,
ISA_DSPR2;
@@ -495,6 +556,13 @@ def SHRA_R_QB_MMR2 : DspMMRel, SHRA_R_QB_MMR2_ENC, SHRA_R_QB_MMR2_DESC,
def SHRAV_QB_MMR2 : DspMMRel, SHRAV_QB_MMR2_ENC, SHRAV_QB_MMR2_DESC, ISA_DSPR2;
def SHRAV_R_QB_MMR2 : DspMMRel, SHRAV_R_QB_MMR2_ENC, SHRAV_R_QB_MMR2_DESC,
ISA_DSPR2;
+def BALIGN_MMR2 : DspMMRel, BALIGN_MMR2_ENC, BALIGN_MMR2_DESC, ISA_DSPR2;
+def CMPGDU_EQ_QB_MMR2 : DspMMRel, CMPGDU_EQ_QB_MMR2_ENC, CMPGDU_EQ_QB_DESC,
+ ISA_DSPR2;
+def CMPGDU_LT_QB_MMR2 : DspMMRel, CMPGDU_LT_QB_MMR2_ENC, CMPGDU_LT_QB_DESC,
+ ISA_DSPR2;
+def CMPGDU_LE_QB_MMR2 : DspMMRel, CMPGDU_LE_QB_MMR2_ENC, CMPGDU_LE_QB_DESC,
+ ISA_DSPR2;
def SHRL_PH_MMR2 : DspMMRel, SHRL_PH_MMR2_ENC, SHRL_PH_MMR2_DESC, ISA_DSPR2;
def SHRLV_PH_MMR2 : DspMMRel, SHRLV_PH_MMR2_ENC, SHRLV_PH_MMR2_DESC, ISA_DSPR2;
def SUBQH_PH_MMR2 : DspMMRel, SUBQH_PH_MMR2_ENC, SUBQH_PH_DESC, ISA_DSPR2;
@@ -526,3 +594,8 @@ def PREPEND_MMR2 : DspMMRel, PREPEND_MMR2_ENC, PREPEND_DESC, ISA_DSPR2;
// Instruction alias.
def : MMDSPInstAlias<"wrdsp $rt", (WRDSP_MM GPR32Opnd:$rt, 0x1F), 1>;
+def APPEND_MMR2 : DspMMRel, APPEND_MMR2_ENC, APPEND_DESC, ISA_DSPR2;
+def MULSA_W_PH_MMR2 : DspMMRel, MULSA_W_PH_MMR2_ENC, MULSA_W_PH_DESC, ISA_DSPR2;
+// microMIPS DSP Rev 3
+def BPOSGE32C_MMR3 : DspMMRel, BPOSGE32C_MMR3_ENC, BPOSGE32C_MMR3_DESC,
+ ISA_DSPR3;
diff --git a/lib/Target/Mips/MicroMipsInstrFPU.td b/lib/Target/Mips/MicroMipsInstrFPU.td
index 756e6c92c1d1..7b0e00bd1c3c 100644
--- a/lib/Target/Mips/MicroMipsInstrFPU.td
+++ b/lib/Target/Mips/MicroMipsInstrFPU.td
@@ -17,12 +17,6 @@ def FMUL_MM : MMRel, ADDS_FT<"mul.d", AFGR64Opnd, II_MUL_D, 1, fmul>,
def FSUB_MM : MMRel, ADDS_FT<"sub.d", AFGR64Opnd, II_SUB_D, 0, fsub>,
ADDS_FM_MM<1, 0x70>;
-def LWC1_MM : MMRel, LW_FT<"lwc1", FGR32Opnd, II_LWC1, load>, LW_FM_MM<0x27>;
-def SWC1_MM : MMRel, SW_FT<"swc1", FGR32Opnd, II_SWC1, store>,
- LW_FM_MM<0x26>;
-def LDC1_MM : MMRel, LW_FT<"ldc1", AFGR64Opnd, II_LDC1, load>, LW_FM_MM<0x2f>;
-def SDC1_MM : MMRel, SW_FT<"sdc1", AFGR64Opnd, II_SDC1, store>,
- LW_FM_MM<0x2e>;
def LWXC1_MM : MMRel, LWXC1_FT<"lwxc1", FGR32Opnd, II_LWXC1, load>,
LWXC1_FM_MM<0x48>, INSN_MIPS4_32R2_NOT_32R6_64R6;
def SWXC1_MM : MMRel, SWXC1_FT<"swxc1", FGR32Opnd, II_SWXC1, store>,
@@ -114,10 +108,6 @@ def MFC1_MM : MMRel, MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd,
II_MFC1, bitconvert>, MFC1_FM_MM<0x80>;
def MTC1_MM : MMRel, MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd,
II_MTC1, bitconvert>, MFC1_FM_MM<0xa0>;
-def MFHC1_MM : MMRel, MFC1_FT<"mfhc1", GPR32Opnd, AFGR64Opnd, II_MFHC1>,
- MFC1_FM_MM<0xc0>, ISA_MIPS32R2, FGR_32;
-def MTHC1_MM : MMRel, MTC1_64_FT<"mthc1", AFGR64Opnd, GPR32Opnd, II_MTHC1>,
- MFC1_FM_MM<0xe0>, ISA_MIPS32R2, FGR_32;
def MADD_S_MM : MMRel, MADDS_FT<"madd.s", FGR32Opnd, II_MADD_S, fadd>,
MADDS_FM_MM<0x1>;
@@ -147,4 +137,33 @@ let AdditionalPredicates = [InMicroMips] in {
ROUND_W_FM_MM<0, 0x6c>;
def FSQRT_S_MM : MMRel, ABSS_FT<"sqrt.s", FGR32Opnd, FGR32Opnd, II_SQRT_S,
fsqrt>, ROUND_W_FM_MM<0, 0x28>;
+ def MTHC1_MM : MMRel, MTC1_64_FT<"mthc1", AFGR64Opnd, GPR32Opnd, II_MTHC1>,
+ MFC1_FM_MM<0xe0>, ISA_MIPS32R2, FGR_32;
+ def MFHC1_MM : MMRel, MFC1_FT<"mfhc1", GPR32Opnd, AFGR64Opnd, II_MFHC1>,
+ MFC1_FM_MM<0xc0>, ISA_MIPS32R2, FGR_32;
+ let DecoderNamespace = "MicroMips", DecoderMethod = "DecodeFMemMMR2" in {
+ def LDC1_MM : MMRel, LW_FT<"ldc1", AFGR64Opnd, mem_mm_16, II_LDC1, load>,
+ LW_FM_MM<0x2f>, FGR_32 {
+ let BaseOpcode = "LDC132";
+ }
+ def SDC1_MM : MMRel, SW_FT<"sdc1", AFGR64Opnd, mem_mm_16, II_SDC1, store>,
+ LW_FM_MM<0x2e>, FGR_32;
+ def LWC1_MM : MMRel, LW_FT<"lwc1", FGR32Opnd, mem_mm_16, II_LWC1, load>,
+ LW_FM_MM<0x27>;
+ def SWC1_MM : MMRel, SW_FT<"swc1", FGR32Opnd, mem_mm_16, II_SWC1, store>,
+ LW_FM_MM<0x26>;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Floating Point Patterns
+//===----------------------------------------------------------------------===//
+let AdditionalPredicates = [InMicroMips] in {
+ // Patterns for loads/stores with a reg+imm operand.
+ let AddedComplexity = 40 in {
+ def : LoadRegImmPat<LDC1_MM, f64, load>, FGR_32;
+ def : StoreRegImmPat<SDC1_MM, f64>, FGR_32;
+ def : LoadRegImmPat<LWC1_MM, f32, load>;
+ def : StoreRegImmPat<SWC1_MM, f32>;
+ }
}
diff --git a/lib/Target/Mips/MicroMipsInstrFormats.td b/lib/Target/Mips/MicroMipsInstrFormats.td
index b736367ee5fa..79ef6482180d 100644
--- a/lib/Target/Mips/MicroMipsInstrFormats.td
+++ b/lib/Target/Mips/MicroMipsInstrFormats.td
@@ -278,7 +278,6 @@ class MOVEP_FM_MM16 {
class MMArch {
string Arch = "micromips";
- list<dag> Pattern = [];
}
class ADD_FM_MM<bits<6> op, bits<10> funct> : MMArch {
@@ -380,13 +379,15 @@ class SRLV_FM_MM<bits<10> funct, bit rotate> : MMArch {
class LW_FM_MM<bits<6> op> : MMArch {
bits<5> rt;
bits<21> addr;
+ bits<5> base = addr{20-16};
+ bits<16> offset = addr{15-0};
bits<32> Inst;
let Inst{31-26} = op;
let Inst{25-21} = rt;
- let Inst{20-16} = addr{20-16};
- let Inst{15-0} = addr{15-0};
+ let Inst{20-16} = base;
+ let Inst{15-0} = offset;
}
class POOL32C_LHUE_FM_MM<bits<6> op, bits<4> fmt, bits<3> funct> : MMArch {
@@ -674,7 +675,7 @@ class TEQI_FM_MM<bits<5> funct> : MMArch {
let Inst{15-0} = imm16;
}
-class LL_FM_MM<bits<4> funct> {
+class LL_FM_MM<bits<4> funct> : MMArch {
bits<5> rt;
bits<21> addr;
diff --git a/lib/Target/Mips/MicroMipsInstrInfo.td b/lib/Target/Mips/MicroMipsInstrInfo.td
index 99f0f446deab..f27370f57fa0 100644
--- a/lib/Target/Mips/MicroMipsInstrInfo.td
+++ b/lib/Target/Mips/MicroMipsInstrInfo.td
@@ -1,23 +1,8 @@
-def addrimm12 : ComplexPattern<iPTR, 2, "selectIntAddrMM", [frameindex]>;
+def addrimm11 : ComplexPattern<iPTR, 2, "selectIntAddr11MM", [frameindex]>;
+def addrimm12 : ComplexPattern<iPTR, 2, "selectIntAddr12MM", [frameindex]>;
+def addrimm16 : ComplexPattern<iPTR, 2, "selectIntAddr16MM", [frameindex]>;
def addrimm4lsl2 : ComplexPattern<iPTR, 2, "selectIntAddrLSL2MM", [frameindex]>;
-def simm4 : Operand<i32> {
- let DecoderMethod = "DecodeSimm4";
-}
-def simm7 : Operand<i32>;
-def li_simm7 : Operand<i32> {
- let DecoderMethod = "DecodeLiSimm7";
-}
-
-def simm12 : Operand<i32> {
- let DecoderMethod = "DecodeSimm12";
-}
-
-def uimm6_lsl2 : Operand<i32> {
- let EncoderMethod = "getUImm6Lsl2Encoding";
- let DecoderMethod = "DecodeUImm6Lsl2";
-}
-
def simm9_addiusp : Operand<i32> {
let EncoderMethod = "getSImm9AddiuspValue";
let DecoderMethod = "DecodeSimm9SP";
@@ -60,9 +45,15 @@ def MicroMipsMemGPRMM16AsmOperand : AsmOperandClass {
let PredicateMethod = "isMemWithGRPMM16Base";
}
+// Define the classes of pointers used by microMIPS.
+// The numbers must match those in MipsRegisterInfo::MipsPtrClass.
+def ptr_gpr16mm_rc : PointerLikeRegClass<1>;
+def ptr_sp_rc : PointerLikeRegClass<2>;
+def ptr_gp_rc : PointerLikeRegClass<3>;
+
class mem_mm_4_generic : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPRMM16, simm4);
+ let MIOperandInfo = (ops ptr_gpr16mm_rc, simm4);
let OperandType = "OPERAND_MEMORY";
let ParserMatchClass = MicroMipsMemGPRMM16AsmOperand;
}
@@ -86,32 +77,48 @@ def MicroMipsMemSPAsmOperand : AsmOperandClass {
let PredicateMethod = "isMemWithUimmWordAlignedOffsetSP<7>";
}
+def MicroMipsMemGPAsmOperand : AsmOperandClass {
+ let Name = "MicroMipsMemGP";
+ let RenderMethod = "addMemOperands";
+ let ParserMethod = "parseMemOperand";
+ let PredicateMethod = "isMemWithSimmWordAlignedOffsetGP<9>";
+}
+
def mem_mm_sp_imm5_lsl2 : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPR32:$base, simm5:$offset);
+ let MIOperandInfo = (ops ptr_sp_rc:$base, simm5:$offset);
let OperandType = "OPERAND_MEMORY";
let ParserMatchClass = MicroMipsMemSPAsmOperand;
let EncoderMethod = "getMemEncodingMMSPImm5Lsl2";
}
-def mem_mm_gp_imm7_lsl2 : Operand<i32> {
+def mem_mm_gp_simm7_lsl2 : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPRMM16:$base, simm7:$offset);
+ let MIOperandInfo = (ops ptr_gp_rc:$base, simm7_lsl2:$offset);
let OperandType = "OPERAND_MEMORY";
+ let ParserMatchClass = MicroMipsMemGPAsmOperand;
let EncoderMethod = "getMemEncodingMMGPImm7Lsl2";
}
def mem_mm_9 : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPR32, simm9);
+ let MIOperandInfo = (ops ptr_rc, simm9);
let EncoderMethod = "getMemEncodingMMImm9";
- let ParserMatchClass = MipsMemAsmOperand;
+ let ParserMatchClass = MipsMemSimm9AsmOperand;
+ let OperandType = "OPERAND_MEMORY";
+}
+
+def mem_mm_11 : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops GPR32, simm11);
+ let EncoderMethod = "getMemEncodingMMImm11";
+ let ParserMatchClass = MipsMemSimm11AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
def mem_mm_12 : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPR32, simm12);
+ let MIOperandInfo = (ops ptr_rc, simm12);
let EncoderMethod = "getMemEncodingMMImm12";
let ParserMatchClass = MipsMemAsmOperand;
let OperandType = "OPERAND_MEMORY";
@@ -119,9 +126,9 @@ def mem_mm_12 : Operand<i32> {
def mem_mm_16 : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPR32, simm16);
+ let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncodingMMImm16";
- let ParserMatchClass = MipsMemAsmOperand;
+ let ParserMatchClass = MipsMemSimm16AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -135,7 +142,7 @@ def MipsMemUimm4AsmOperand : AsmOperandClass {
def mem_mm_4sp : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops GPR32, uimm8);
+ let MIOperandInfo = (ops ptr_sp_rc, uimm8);
let EncoderMethod = "getMemEncodingMMImm4sp";
let ParserMatchClass = MipsMemUimm4AsmOperand;
let OperandType = "OPERAND_MEMORY";
@@ -216,7 +223,7 @@ def movep_regpair : Operand<i32> {
let ParserMatchClass = MovePRegPairAsmOperand;
let PrintMethod = "printRegisterList";
let DecoderMethod = "DecodeMovePRegPair";
- let MIOperandInfo = (ops GPR32Opnd, GPR32Opnd);
+ let MIOperandInfo = (ops ptr_rc, ptr_rc);
}
class MovePMM16<string opstr, RegisterOperand RO> :
@@ -230,6 +237,7 @@ MicroMipsInst16<(outs movep_regpair:$dst_regs), (ins RO:$rs, RO:$rt),
def RegPairAsmOperand : AsmOperandClass {
let Name = "RegPair";
let ParserMethod = "parseRegisterPair";
+ let PredicateMethod = "isRegPair";
}
def regpair : Operand<i32> {
@@ -237,12 +245,12 @@ def regpair : Operand<i32> {
let ParserMatchClass = RegPairAsmOperand;
let PrintMethod = "printRegisterPair";
let DecoderMethod = "DecodeRegPairOperand";
- let MIOperandInfo = (ops GPR32Opnd, GPR32Opnd);
+ let MIOperandInfo = (ops ptr_rc, ptr_rc);
}
class StorePairMM<string opstr, InstrItinClass Itin = NoItinerary,
ComplexPattern Addr = addr> :
- InstSE<(outs), (ins regpair:$rt, mem_mm_12:$addr),
+ InstSE<(outs), (ins regpair:$rt, mem_simm12:$addr),
!strconcat(opstr, "\t$rt, $addr"), [], Itin, FrmI, opstr> {
let DecoderMethod = "DecodeMemMMImm12";
let mayStore = 1;
@@ -250,7 +258,7 @@ class StorePairMM<string opstr, InstrItinClass Itin = NoItinerary,
class LoadPairMM<string opstr, InstrItinClass Itin = NoItinerary,
ComplexPattern Addr = addr> :
- InstSE<(outs regpair:$rt), (ins mem_mm_12:$addr),
+ InstSE<(outs regpair:$rt), (ins mem_simm12:$addr),
!strconcat(opstr, "\t$rt, $addr"), [], Itin, FrmI, opstr> {
let DecoderMethod = "DecodeMemMMImm12";
let mayLoad = 1;
@@ -264,7 +272,7 @@ class LLBaseMM<string opstr, RegisterOperand RO> :
}
class LLEBaseMM<string opstr, RegisterOperand RO> :
- InstSE<(outs RO:$rt), (ins mem_mm_12:$addr),
+ InstSE<(outs RO:$rt), (ins mem_simm9:$addr),
!strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMemMMImm9";
let mayLoad = 1;
@@ -279,7 +287,7 @@ class SCBaseMM<string opstr, RegisterOperand RO> :
}
class SCEBaseMM<string opstr, RegisterOperand RO> :
- InstSE<(outs RO:$dst), (ins RO:$rt, mem_mm_12:$addr),
+ InstSE<(outs RO:$dst), (ins RO:$rt, mem_simm9:$addr),
!strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMemMMImm9";
let mayStore = 1;
@@ -287,10 +295,10 @@ class SCEBaseMM<string opstr, RegisterOperand RO> :
}
class LoadMM<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
- InstrItinClass Itin = NoItinerary> :
- InstSE<(outs RO:$rt), (ins mem_mm_12:$addr),
+ InstrItinClass Itin = NoItinerary, DAGOperand MO = mem_mm_12> :
+ InstSE<(outs RO:$rt), (ins MO:$addr),
!strconcat(opstr, "\t$rt, $addr"),
- [(set RO:$rt, (OpNode addrimm12:$addr))], Itin, FrmI> {
+ [(set RO:$rt, (OpNode addrimm12:$addr))], Itin, FrmI, opstr> {
let DecoderMethod = "DecodeMemMMImm12";
let canFoldAsLoad = 1;
let mayLoad = 1;
@@ -615,7 +623,7 @@ def SH16_MM : StoreMM16<"sh16", GPRMM16OpndZero, GPRMM16Opnd, truncstorei16,
LOAD_STORE_FM_MM16<0x2a>;
def SW16_MM : StoreMM16<"sw16", GPRMM16OpndZero, GPRMM16Opnd, store, II_SW,
mem_mm_4_lsl2>, LOAD_STORE_FM_MM16<0x3a>;
-def LWGP_MM : LoadGPMM16<"lw", GPRMM16Opnd, II_LW, mem_mm_gp_imm7_lsl2>,
+def LWGP_MM : LoadGPMM16<"lw", GPRMM16Opnd, II_LW, mem_mm_gp_simm7_lsl2>,
LOAD_GP_FM_MM16<0x19>;
def LWSP_MM : LoadSPMM16<"lw", GPR32Opnd, II_LW, mem_mm_sp_imm5_lsl2>,
LOAD_STORE_SP_FM_MM16<0x12>;
@@ -629,7 +637,7 @@ def MFHI16_MM : MoveFromHILOMM<"mfhi", GPR32Opnd, AC0>, MFHILO_FM_MM16<0x10>;
def MFLO16_MM : MoveFromHILOMM<"mflo", GPR32Opnd, AC0>, MFHILO_FM_MM16<0x12>;
def MOVE16_MM : MoveMM16<"move", GPR32Opnd>, MOVE_FM_MM16<0x03>;
def MOVEP_MM : MovePMM16<"movep", GPRMM16OpndMoveP>, MOVEP_FM_MM16;
-def LI16_MM : LoadImmMM16<"li16", li_simm7, GPRMM16Opnd>, LI_FM_MM16,
+def LI16_MM : LoadImmMM16<"li16", li16_imm, GPRMM16Opnd>, LI_FM_MM16,
IsAsCheapAsAMove;
def JALR16_MM : JumpLinkRegMM16<"jalr", GPR32Opnd>, JALR_FM_MM16<0x0e>,
ISA_MICROMIPS32_NOT_MIPS32R6;
@@ -677,11 +685,11 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
SLTI_FM_MM<0x2c>;
def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd>,
ADDI_FM_MM<0x34>;
- def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd>,
- ADDI_FM_MM<0x14>;
- def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd>,
- ADDI_FM_MM<0x1c>;
- def LUi_MM : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM_MM;
+ def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd, II_ORI, immZExt16,
+ or>, ADDI_FM_MM<0x14>;
+ def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd, II_XORI,
+ immZExt16, xor>, ADDI_FM_MM<0x1c>;
+ def LUi_MM : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16_relaxed>, LUI_FM_MM;
def LEA_ADDiu_MM : MMRel, EffectiveAddress<"addiu", GPR32Opnd>,
LW_FM_MM<0xc>;
@@ -709,9 +717,9 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def MULTu_MM : MMRel, Mult<"multu", II_MULTU, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x26c>;
def SDIV_MM : MMRel, Div<"div", II_DIV, GPR32Opnd, [HI0, LO0]>,
- MULT_FM_MM<0x2ac>;
+ MULT_FM_MM<0x2ac>, ISA_MIPS1_NOT_32R6_64R6;
def UDIV_MM : MMRel, Div<"divu", II_DIVU, GPR32Opnd, [HI0, LO0]>,
- MULT_FM_MM<0x2ec>;
+ MULT_FM_MM<0x2ec>, ISA_MIPS1_NOT_32R6_64R6;
/// Arithmetic Instructions with PC and Immediate
def ADDIUPC_MM : AddImmUPC<"addiupc", GPRMM16Opnd>, ADDIUPC_FM_MM;
@@ -730,16 +738,24 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def SRAV_MM : MMRel, shift_rotate_reg<"srav", GPR32Opnd, II_SRAV>,
SRLV_FM_MM<0x90, 0>;
def ROTR_MM : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd, II_ROTR>,
- SRA_FM_MM<0xc0, 0>;
+ SRA_FM_MM<0xc0, 0> {
+ list<dag> Pattern = [(set GPR32Opnd:$rd,
+ (rotr GPR32Opnd:$rt, immZExt5:$shamt))];
+ }
def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, II_ROTRV>,
- SRLV_FM_MM<0xd0, 0>;
+ SRLV_FM_MM<0xd0, 0> {
+ list<dag> Pattern = [(set GPR32Opnd:$rd,
+ (rotr GPR32Opnd:$rt, GPR32Opnd:$rs))];
+ }
/// Load and Store Instructions - aligned
let DecoderMethod = "DecodeMemMMImm16" in {
- def LB_MM : Load<"lb", GPR32Opnd>, MMRel, LW_FM_MM<0x7>;
- def LBu_MM : Load<"lbu", GPR32Opnd>, MMRel, LW_FM_MM<0x5>;
- def LH_MM : Load<"lh", GPR32Opnd>, MMRel, LW_FM_MM<0xf>;
- def LHu_MM : Load<"lhu", GPR32Opnd>, MMRel, LW_FM_MM<0xd>;
+ def LB_MM : LoadMemory<"lb", GPR32Opnd, mem_mm_16>, MMRel, LW_FM_MM<0x7>;
+ def LBu_MM : LoadMemory<"lbu", GPR32Opnd, mem_mm_16>, MMRel, LW_FM_MM<0x5>;
+ def LH_MM : LoadMemory<"lh", GPR32Opnd, mem_simm16, sextloadi16, II_LH,
+ addrDefault>, MMRel, LW_FM_MM<0xf>;
+ def LHu_MM : LoadMemory<"lhu", GPR32Opnd, mem_simm16, zextloadi16, II_LHU>,
+ MMRel, LW_FM_MM<0xd>;
def LW_MM : Load<"lw", GPR32Opnd>, MMRel, LW_FM_MM<0x3f>;
def SB_MM : Store<"sb", GPR32Opnd>, MMRel, LW_FM_MM<0x6>;
def SH_MM : Store<"sh", GPR32Opnd>, MMRel, LW_FM_MM<0xe>;
@@ -749,19 +765,22 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
let DecoderMethod = "DecodeMemMMImm9" in {
def LBE_MM : Load<"lbe", GPR32Opnd>, POOL32C_LHUE_FM_MM<0x18, 0x6, 0x4>;
def LBuE_MM : Load<"lbue", GPR32Opnd>, POOL32C_LHUE_FM_MM<0x18, 0x6, 0x0>;
- def LHE_MM : Load<"lhe", GPR32Opnd>, POOL32C_LHUE_FM_MM<0x18, 0x6, 0x5>;
- def LHuE_MM : Load<"lhue", GPR32Opnd>, POOL32C_LHUE_FM_MM<0x18, 0x6, 0x1>;
- def LWE_MM : Load<"lwe", GPR32Opnd>, POOL32C_LHUE_FM_MM<0x18, 0x6, 0x7>;
- def SBE_MM : Store<"sbe", GPR32Opnd>, POOL32C_LHUE_FM_MM<0x18, 0xa, 0x4>;
- def SHE_MM : Store<"she", GPR32Opnd>, POOL32C_LHUE_FM_MM<0x18, 0xa, 0x5>;
- def SWE_MM : StoreMemory<"swe", GPR32Opnd, mem_simm9gpr>,
+ def LHE_MM : LoadMemory<"lhe", GPR32Opnd, mem_simm9>,
+ POOL32C_LHUE_FM_MM<0x18, 0x6, 0x5>;
+ def LHuE_MM : LoadMemory<"lhue", GPR32Opnd, mem_simm9>,
+ POOL32C_LHUE_FM_MM<0x18, 0x6, 0x1>;
+ def LWE_MM : LoadMemory<"lwe", GPR32Opnd, mem_simm9>,
+ POOL32C_LHUE_FM_MM<0x18, 0x6, 0x7>;
+ def SBE_MM : StoreMemory<"sbe", GPR32Opnd, mem_simm9>,
+ POOL32C_LHUE_FM_MM<0x18, 0xa, 0x4>;
+ def SHE_MM : StoreMemory<"she", GPR32Opnd, mem_simm9>,
+ POOL32C_LHUE_FM_MM<0x18, 0xa, 0x5>;
+ def SWE_MM : StoreMemory<"swe", GPR32Opnd, mem_simm9>,
POOL32C_LHUE_FM_MM<0x18, 0xa, 0x7>;
}
def LWXS_MM : LoadWordIndexedScaledMM<"lwxs", GPR32Opnd>, LWXS_FM_MM<0x118>;
- def LWU_MM : LoadMM<"lwu", GPR32Opnd, zextloadi32, II_LWU>, LL_FM_MM<0xe>;
-
/// Load and Store Instructions - unaligned
def LWL_MM : LoadLeftRightMM<"lwl", MipsLWL, GPR32Opnd, mem_mm_12>,
LWL_FM_MM<0x0>;
@@ -772,13 +791,13 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def SWR_MM : StoreLeftRightMM<"swr", MipsSWR, GPR32Opnd, mem_mm_12>,
LWL_FM_MM<0x9>;
let DecoderMethod = "DecodeMemMMImm9" in {
- def LWLE_MM : LoadLeftRightMM<"lwle", MipsLWL, GPR32Opnd, mem_mm_12>,
+ def LWLE_MM : LoadLeftRightMM<"lwle", MipsLWL, GPR32Opnd, mem_mm_9>,
POOL32C_STEVA_LDEVA_FM_MM<0x6, 0x2>;
- def LWRE_MM : LoadLeftRightMM<"lwre", MipsLWR, GPR32Opnd, mem_mm_12>,
+ def LWRE_MM : LoadLeftRightMM<"lwre", MipsLWR, GPR32Opnd, mem_mm_9>,
POOL32C_STEVA_LDEVA_FM_MM<0x6, 0x3>;
- def SWLE_MM : StoreLeftRightMM<"swle", MipsSWL, GPR32Opnd, mem_mm_12>,
+ def SWLE_MM : StoreLeftRightMM<"swle", MipsSWL, GPR32Opnd, mem_mm_9>,
POOL32C_STEVA_LDEVA_FM_MM<0xa, 0x0>;
- def SWRE_MM : StoreLeftRightMM<"swre", MipsSWR, GPR32Opnd, mem_mm_12>,
+ def SWRE_MM : StoreLeftRightMM<"swre", MipsSWR, GPR32Opnd, mem_mm_9>,
POOL32C_STEVA_LDEVA_FM_MM<0xa, 0x1>, ISA_MIPS1_NOT_32R6_64R6;
}
@@ -845,10 +864,10 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def WSBH_MM : MMRel, SubwordSwap<"wsbh", GPR32Opnd, II_WSBH>,
SEB_FM_MM<0x1ec>, ISA_MIPS32R2;
// TODO: Add '0 < pos+size <= 32' constraint check to ext instruction
- def EXT_MM : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, uimm5_plus1,
- MipsExt>, EXT_FM_MM<0x2c>;
- def INS_MM : MMRel, InsBase<"ins", GPR32Opnd, uimm5, MipsIns>,
- EXT_FM_MM<0x0c>;
+ def EXT_MM : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, uimm5_plus1, immZExt5,
+ immZExt5Plus1, MipsExt>, EXT_FM_MM<0x2c>;
+ def INS_MM : MMRel, InsBase<"ins", GPR32Opnd, uimm5, uimm5_inssize_plus1,
+ MipsIns>, EXT_FM_MM<0x0c>;
/// Jump Instructions
let DecoderMethod = "DecodeJumpTargetMM" in {
@@ -857,7 +876,8 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def JAL_MM : MMRel, JumpLink<"jal", calltarget_mm>, J_FM_MM<0x3d>;
def JALX_MM : MMRel, JumpLink<"jalx", calltarget>, J_FM_MM<0x3c>;
}
- def JR_MM : MMRel, IndirectBranch<"jr", GPR32Opnd>, JR_FM_MM<0x3c>;
+ def JR_MM : MMRel, IndirectBranch<"jr", GPR32Opnd>, JR_FM_MM<0x3c>,
+ ISA_MICROMIPS32_NOT_MIPS32R6;
def JALR_MM : JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM_MM<0x03c>;
/// Jump Instructions - Short Delay Slot
@@ -891,7 +911,7 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
/// Control Instructions
def SYNC_MM : MMRel, SYNC_FT<"sync">, SYNC_FM_MM;
def BREAK_MM : MMRel, BRK_FT<"break">, BRK_FM_MM;
- def SYSCALL_MM : MMRel, SYS_FT<"syscall">, SYS_FM_MM;
+ def SYSCALL_MM : MMRel, SYS_FT<"syscall", uimm10>, SYS_FM_MM;
def WAIT_MM : WaitMM<"wait">, WAIT_FM_MM;
def ERET_MM : MMRel, ER_FT<"eret">, ER_FM_MM<0x3cd>;
def DERET_MM : MMRel, ER_FT<"deret">, ER_FM_MM<0x38d>;
@@ -901,12 +921,12 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
ISA_MIPS32R2;
/// Trap Instructions
- def TEQ_MM : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM_MM<0x0>;
- def TGE_MM : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM_MM<0x08>;
- def TGEU_MM : MMRel, TEQ_FT<"tgeu", GPR32Opnd>, TEQ_FM_MM<0x10>;
- def TLT_MM : MMRel, TEQ_FT<"tlt", GPR32Opnd>, TEQ_FM_MM<0x20>;
- def TLTU_MM : MMRel, TEQ_FT<"tltu", GPR32Opnd>, TEQ_FM_MM<0x28>;
- def TNE_MM : MMRel, TEQ_FT<"tne", GPR32Opnd>, TEQ_FM_MM<0x30>;
+ def TEQ_MM : MMRel, TEQ_FT<"teq", GPR32Opnd, uimm4>, TEQ_FM_MM<0x0>;
+ def TGE_MM : MMRel, TEQ_FT<"tge", GPR32Opnd, uimm4>, TEQ_FM_MM<0x08>;
+ def TGEU_MM : MMRel, TEQ_FT<"tgeu", GPR32Opnd, uimm4>, TEQ_FM_MM<0x10>;
+ def TLT_MM : MMRel, TEQ_FT<"tlt", GPR32Opnd, uimm4>, TEQ_FM_MM<0x20>;
+ def TLTU_MM : MMRel, TEQ_FT<"tltu", GPR32Opnd, uimm4>, TEQ_FM_MM<0x28>;
+ def TNE_MM : MMRel, TEQ_FT<"tne", GPR32Opnd, uimm4>, TEQ_FM_MM<0x30>;
def TEQI_MM : MMRel, TEQI_FT<"teqi", GPR32Opnd>, TEQI_FM_MM<0x0e>;
def TGEI_MM : MMRel, TEQI_FT<"tgei", GPR32Opnd>, TEQI_FM_MM<0x09>;
@@ -931,9 +951,9 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
let DecoderMethod = "DecodePrefeOpMM" in {
def PREFE_MM : MMRel, CacheOp<"prefe", mem_mm_9>,
- CACHE_PREFE_FM_MM<0x18, 0x2>;
+ CACHE_PREFE_FM_MM<0x18, 0x2>;
def CACHEE_MM : MMRel, CacheOp<"cachee", mem_mm_9>,
- CACHE_PREFE_FM_MM<0x18, 0x3>;
+ CACHE_PREFE_FM_MM<0x18, 0x3>;
}
def SSNOP_MM : MMRel, Barrier<"ssnop">, BARRIER_FM_MM<0x1>;
def EHB_MM : MMRel, Barrier<"ehb">, BARRIER_FM_MM<0x3>;
@@ -944,7 +964,7 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
def TLBWI_MM : MMRel, TLB<"tlbwi">, COP0_TLB_FM_MM<0x8d>;
def TLBWR_MM : MMRel, TLB<"tlbwr">, COP0_TLB_FM_MM<0xcd>;
- def SDBBP_MM : MMRel, SYS_FT<"sdbbp">, SDBBP_FM_MM;
+ def SDBBP_MM : MMRel, SYS_FT<"sdbbp", uimm10>, SDBBP_FM_MM;
def PREFX_MM : PrefetchIndexed<"prefx">, POOL32F_PREFX_FM_MM<0x15, 0x1A0>;
}
@@ -952,54 +972,80 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
let DecoderNamespace = "MicroMips" in {
def RDHWR_MM : MMRel, R6MMR6Rel, ReadHardware<GPR32Opnd, HWRegsOpnd>,
RDHWR_FM_MM, ISA_MICROMIPS32_NOT_MIPS32R6;
+ def LWU_MM : MMRel, LoadMM<"lwu", GPR32Opnd, zextloadi32, II_LWU,
+ mem_simm12>, LL_FM_MM<0xe>,
+ ISA_MICROMIPS32_NOT_MIPS32R6;
}
-let Predicates = [InMicroMips] in {
-
//===----------------------------------------------------------------------===//
// MicroMips arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
-def : MipsPat<(i32 immLi16:$imm),
- (LI16_MM immLi16:$imm)>;
-def : MipsPat<(i32 immSExt16:$imm),
- (ADDiu_MM ZERO, immSExt16:$imm)>;
-def : MipsPat<(i32 immZExt16:$imm),
- (ORi_MM ZERO, immZExt16:$imm)>;
-def : MipsPat<(not GPR32:$in),
- (NOR_MM GPR32Opnd:$in, ZERO)>;
-
-def : MipsPat<(add GPRMM16:$src, immSExtAddiur2:$imm),
- (ADDIUR2_MM GPRMM16:$src, immSExtAddiur2:$imm)>;
-def : MipsPat<(add GPR32:$src, immSExtAddius5:$imm),
- (ADDIUS5_MM GPR32:$src, immSExtAddius5:$imm)>;
-def : MipsPat<(add GPR32:$src, immSExt16:$imm),
- (ADDiu_MM GPR32:$src, immSExt16:$imm)>;
-
-def : MipsPat<(and GPRMM16:$src, immZExtAndi16:$imm),
- (ANDI16_MM GPRMM16:$src, immZExtAndi16:$imm)>;
-def : MipsPat<(and GPR32:$src, immZExt16:$imm),
- (ANDi_MM GPR32:$src, immZExt16:$imm)>;
-
-def : MipsPat<(shl GPRMM16:$src, immZExt2Shift:$imm),
- (SLL16_MM GPRMM16:$src, immZExt2Shift:$imm)>;
-def : MipsPat<(shl GPR32:$src, immZExt5:$imm),
- (SLL_MM GPR32:$src, immZExt5:$imm)>;
-
-def : MipsPat<(srl GPRMM16:$src, immZExt2Shift:$imm),
- (SRL16_MM GPRMM16:$src, immZExt2Shift:$imm)>;
-def : MipsPat<(srl GPR32:$src, immZExt5:$imm),
- (SRL_MM GPR32:$src, immZExt5:$imm)>;
-
-def : MipsPat<(store GPRMM16:$src, addrimm4lsl2:$addr),
- (SW16_MM GPRMM16:$src, addrimm4lsl2:$addr)>;
-def : MipsPat<(store GPR32:$src, addr:$addr),
- (SW_MM GPR32:$src, addr:$addr)>;
-
-def : MipsPat<(load addrimm4lsl2:$addr),
- (LW16_MM addrimm4lsl2:$addr)>;
-def : MipsPat<(load addr:$addr),
- (LW_MM addr:$addr)>;
+let Predicates = [InMicroMips] in {
+ def : MipsPat<(i32 immLi16:$imm),
+ (LI16_MM immLi16:$imm)>;
+ def : MipsPat<(i32 immSExt16:$imm),
+ (ADDiu_MM ZERO, immSExt16:$imm)>;
+ def : MipsPat<(i32 immZExt16:$imm),
+ (ORi_MM ZERO, immZExt16:$imm)>;
+
+ def : MipsPat<(not GPRMM16:$in),
+ (NOT16_MM GPRMM16:$in)>;
+ def : MipsPat<(not GPR32:$in),
+ (NOR_MM GPR32Opnd:$in, ZERO)>;
+
+ def : MipsPat<(add GPRMM16:$src, immSExtAddiur2:$imm),
+ (ADDIUR2_MM GPRMM16:$src, immSExtAddiur2:$imm)>;
+ def : MipsPat<(add GPR32:$src, immSExtAddius5:$imm),
+ (ADDIUS5_MM GPR32:$src, immSExtAddius5:$imm)>;
+ def : MipsPat<(add GPR32:$src, immSExt16:$imm),
+ (ADDiu_MM GPR32:$src, immSExt16:$imm)>;
+
+ def : MipsPat<(and GPRMM16:$src, immZExtAndi16:$imm),
+ (ANDI16_MM GPRMM16:$src, immZExtAndi16:$imm)>;
+ def : MipsPat<(and GPR32:$src, immZExt16:$imm),
+ (ANDi_MM GPR32:$src, immZExt16:$imm)>;
+
+ def : MipsPat<(shl GPRMM16:$src, immZExt2Shift:$imm),
+ (SLL16_MM GPRMM16:$src, immZExt2Shift:$imm)>;
+ def : MipsPat<(shl GPR32:$src, immZExt5:$imm),
+ (SLL_MM GPR32:$src, immZExt5:$imm)>;
+ def : MipsPat<(shl GPR32:$lhs, GPR32:$rhs),
+ (SLLV_MM GPR32:$lhs, GPR32:$rhs)>;
+
+ def : MipsPat<(srl GPRMM16:$src, immZExt2Shift:$imm),
+ (SRL16_MM GPRMM16:$src, immZExt2Shift:$imm)>;
+ def : MipsPat<(srl GPR32:$src, immZExt5:$imm),
+ (SRL_MM GPR32:$src, immZExt5:$imm)>;
+ def : MipsPat<(srl GPR32:$lhs, GPR32:$rhs),
+ (SRLV_MM GPR32:$lhs, GPR32:$rhs)>;
+
+ def : MipsPat<(sra GPR32:$src, immZExt5:$imm),
+ (SRA_MM GPR32:$src, immZExt5:$imm)>;
+ def : MipsPat<(sra GPR32:$lhs, GPR32:$rhs),
+ (SRAV_MM GPR32:$lhs, GPR32:$rhs)>;
+
+ def : MipsPat<(store GPRMM16:$src, addrimm4lsl2:$addr),
+ (SW16_MM GPRMM16:$src, addrimm4lsl2:$addr)>;
+ def : MipsPat<(store GPR32:$src, addr:$addr),
+ (SW_MM GPR32:$src, addr:$addr)>;
+
+ def : MipsPat<(load addrimm4lsl2:$addr),
+ (LW16_MM addrimm4lsl2:$addr)>;
+ def : MipsPat<(load addr:$addr),
+ (LW_MM addr:$addr)>;
+ def : MipsPat<(subc GPR32:$lhs, GPR32:$rhs),
+ (SUBu_MM GPR32:$lhs, GPR32:$rhs)>;
+}
+
+let AddedComplexity = 40 in {
+ def : MipsPat<(i32 (sextloadi16 addrRegImm:$a)),
+ (LH_MM addrRegImm:$a)>;
+}
+def : MipsPat<(atomic_load_16 addr:$a),
+ (LH_MM addr:$a)>;
+def : MipsPat<(i32 (extloadi16 addr:$src)),
+ (LHu_MM addr:$src)>;
//===----------------------------------------------------------------------===//
// MicroMips instruction aliases
@@ -1011,24 +1057,62 @@ class UncondBranchMMPseudo<string opstr> :
def B_MM_Pseudo : UncondBranchMMPseudo<"b">, ISA_MICROMIPS;
+let Predicates = [InMicroMips] in {
+ def SDIV_MM_Pseudo : MultDivPseudo<SDIV_MM, ACC64, GPR32Opnd, MipsDivRem,
+ II_DIV, 0, 1, 1>, ISA_MIPS1_NOT_32R6_64R6;
+ def UDIV_MM_Pseudo : MultDivPseudo<UDIV_MM, ACC64, GPR32Opnd, MipsDivRemU,
+ II_DIVU, 0, 1, 1>, ISA_MIPS1_NOT_32R6_64R6;
+
def : MipsInstAlias<"wait", (WAIT_MM 0x0), 1>;
def : MipsInstAlias<"nop", (SLL_MM ZERO, ZERO, 0), 1>;
def : MipsInstAlias<"nop", (MOVE16_MM ZERO, ZERO), 1>;
-}
-
-let Predicates = [InMicroMips] in {
-def : MipsInstAlias<"ei", (EI_MM ZERO), 1>, ISA_MIPS32R2;
-def : MipsInstAlias<"di", (DI_MM ZERO), 1>, ISA_MIPS32R2;
-def : MipsInstAlias<"teq $rs, $rt",
- (TEQ_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : MipsInstAlias<"tge $rs, $rt",
- (TGE_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : MipsInstAlias<"tgeu $rs, $rt",
- (TGEU_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : MipsInstAlias<"tlt $rs, $rt",
- (TLT_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : MipsInstAlias<"tltu $rs, $rt",
- (TLTU_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
-def : MipsInstAlias<"tne $rs, $rt",
- (TNE_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+ def : MipsInstAlias<"ei", (EI_MM ZERO), 1>, ISA_MIPS32R2;
+ def : MipsInstAlias<"di", (DI_MM ZERO), 1>, ISA_MIPS32R2;
+ def : MipsInstAlias<"teq $rs, $rt",
+ (TEQ_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+ def : MipsInstAlias<"tge $rs, $rt",
+ (TGE_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+ def : MipsInstAlias<"tgeu $rs, $rt",
+ (TGEU_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+ def : MipsInstAlias<"tlt $rs, $rt",
+ (TLT_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+ def : MipsInstAlias<"tltu $rs, $rt",
+ (TLTU_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+ def : MipsInstAlias<"tne $rs, $rt",
+ (TNE_MM GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+ def : MipsInstAlias<"sll $rd, $rt, $rs",
+ (SLLV_MM GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+ def : MipsInstAlias<"sra $rd, $rt, $rs",
+ (SRAV_MM GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+ def : MipsInstAlias<"srl $rd, $rt, $rs",
+ (SRLV_MM GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+ def : MipsInstAlias<"sll $rd, $rt",
+ (SLLV_MM GPR32Opnd:$rd, GPR32Opnd:$rd, GPR32Opnd:$rt), 0>;
+ def : MipsInstAlias<"sra $rd, $rt",
+ (SRAV_MM GPR32Opnd:$rd, GPR32Opnd:$rd, GPR32Opnd:$rt), 0>;
+ def : MipsInstAlias<"srl $rd, $rt",
+ (SRLV_MM GPR32Opnd:$rd, GPR32Opnd:$rd, GPR32Opnd:$rt), 0>;
+ def : MipsInstAlias<"sll $rd, $shamt",
+ (SLL_MM GPR32Opnd:$rd, GPR32Opnd:$rd, uimm5:$shamt), 0>;
+ def : MipsInstAlias<"sra $rd, $shamt",
+ (SRA_MM GPR32Opnd:$rd, GPR32Opnd:$rd, uimm5:$shamt), 0>;
+ def : MipsInstAlias<"srl $rd, $shamt",
+ (SRL_MM GPR32Opnd:$rd, GPR32Opnd:$rd, uimm5:$shamt), 0>;
+ def : MipsInstAlias<"rotr $rt, $imm",
+ (ROTR_MM GPR32Opnd:$rt, GPR32Opnd:$rt, uimm5:$imm), 0>;
+ def : MipsInstAlias<"syscall", (SYSCALL_MM 0), 1>;
+ def : MipsInstAlias<"and $rs, $rt, $imm",
+ (ANDi_MM GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
+ def : MipsInstAlias<"and $rs, $imm",
+ (ANDi_MM GPR32Opnd:$rs, GPR32Opnd:$rs, simm16:$imm), 0>;
+ def : MipsInstAlias<"or $rs, $rt, $imm",
+ (ORi_MM GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
+ def : MipsInstAlias<"or $rs, $imm",
+ (ORi_MM GPR32Opnd:$rs, GPR32Opnd:$rs, uimm16:$imm), 0>;
+ def : MipsInstAlias<"xor $rs, $rt, $imm",
+ (XORi_MM GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
+ def : MipsInstAlias<"xor $rs, $imm",
+ (XORi_MM GPR32Opnd:$rs, GPR32Opnd:$rs, uimm16:$imm), 0>;
+ def : MipsInstAlias<"not $rt, $rs",
+ (NOR_MM GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>;
}
diff --git a/lib/Target/Mips/Mips.h b/lib/Target/Mips/Mips.h
index 671d7a87cc3d..d9faf3325cac 100644
--- a/lib/Target/Mips/Mips.h
+++ b/lib/Target/Mips/Mips.h
@@ -29,8 +29,9 @@ namespace llvm {
FunctionPass *createMipsModuleISelDagPass(MipsTargetMachine &TM);
FunctionPass *createMipsOptimizePICCallPass(MipsTargetMachine &TM);
FunctionPass *createMipsDelaySlotFillerPass(MipsTargetMachine &TM);
+ FunctionPass *createMipsHazardSchedule();
FunctionPass *createMipsLongBranchPass(MipsTargetMachine &TM);
- FunctionPass *createMipsConstantIslandPass(MipsTargetMachine &tm);
+ FunctionPass *createMipsConstantIslandPass();
} // end namespace llvm;
#endif
diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td
index 35352b6115c5..ea3fa0a9578e 100644
--- a/lib/Target/Mips/Mips.td
+++ b/lib/Target/Mips/Mips.td
@@ -24,6 +24,8 @@ class PredicateControl {
list<Predicate> EncodingPredicates = [];
// Predicates for the GPR size such as IsGP64bit
list<Predicate> GPRPredicates = [];
+ // Predicates for the PTR size such as IsPTR64bit
+ list<Predicate> PTRPredicates = [];
// Predicates for the FGR size and layout such as IsFP64bit
list<Predicate> FGRPredicates = [];
// Predicates for the instruction group membership such as ISA's and ASE's
@@ -34,6 +36,7 @@ class PredicateControl {
list<Predicate> AdditionalPredicates = [];
list<Predicate> Predicates = !listconcat(EncodingPredicates,
GPRPredicates,
+ PTRPredicates,
FGRPredicates,
InsnPredicates,
HardFloatPredicate,
@@ -62,6 +65,8 @@ def MipsInstrInfo : InstrInfo;
def FeatureNoABICalls : SubtargetFeature<"noabicalls", "NoABICalls", "true",
"Disable SVR4-style position-independent code">;
+def FeaturePTR64Bit : SubtargetFeature<"ptr64", "IsPTR64bit", "true",
+ "Pointers are 64-bit wide">;
def FeatureGP64Bit : SubtargetFeature<"gp64", "IsGP64bit", "true",
"General Purpose Registers are 64-bit wide">;
def FeatureFP64Bit : SubtargetFeature<"fp64", "IsFP64bit", "true",
diff --git a/lib/Target/Mips/Mips16FrameLowering.cpp b/lib/Target/Mips/Mips16FrameLowering.cpp
index 26426c087164..e937ffa7a7ab 100644
--- a/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -24,7 +24,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
index b2bc7e74c706..d2d1c65e40d9 100644
--- a/lib/Target/Mips/Mips16HardFloat.cpp
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -261,7 +261,7 @@ static std::string swapFPIntParams(FPParamVariant PV, Module *M, bool LE,
static void assureFPCallStub(Function &F, Module *M,
const MipsTargetMachine &TM) {
// for now we only need them for static relocation
- if (TM.getRelocationModel() == Reloc::PIC_)
+ if (TM.isPositionIndependent())
return;
LLVMContext &Context = M->getContext();
bool LE = TM.isLittleEndian();
@@ -387,11 +387,9 @@ static bool fixupFPReturnAndCall(Function &F, Module *M,
bool Modified = false;
LLVMContext &C = M->getContext();
Type *MyVoid = Type::getVoidTy(C);
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
- for (BasicBlock::iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- Instruction &Inst = *I;
- if (const ReturnInst *RI = dyn_cast<ReturnInst>(I)) {
+ for (auto &BB: F)
+ for (auto &I: BB) {
+ if (const ReturnInst *RI = dyn_cast<ReturnInst>(&I)) {
Value *RVal = RI->getReturnValue();
if (!RVal) continue;
//
@@ -425,17 +423,11 @@ static bool fixupFPReturnAndCall(Function &F, Module *M,
A = A.addAttribute(C, AttributeSet::FunctionIndex,
Attribute::NoInline);
Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, nullptr));
- CallInst::Create(F, Params, "", &Inst );
- } else if (const CallInst *CI = dyn_cast<CallInst>(I)) {
- const Value* V = CI->getCalledValue();
- Type* T = nullptr;
- if (V) T = V->getType();
- PointerType *PFT = nullptr;
- if (T) PFT = dyn_cast<PointerType>(T);
- FunctionType *FT = nullptr;
- if (PFT) FT = dyn_cast<FunctionType>(PFT->getElementType());
+ CallInst::Create(F, Params, "", &I);
+ } else if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ FunctionType *FT = CI->getFunctionType();
Function *F_ = CI->getCalledFunction();
- if (FT && needsFPReturnHelper(*FT) &&
+ if (needsFPReturnHelper(*FT) &&
!(F_ && isIntrinsicInline(F_))) {
Modified=true;
F.addFnAttr("saveS2");
@@ -447,7 +439,7 @@ static bool fixupFPReturnAndCall(Function &F, Module *M,
Modified=true;
F.addFnAttr("saveS2");
}
- if (TM.getRelocationModel() != Reloc::PIC_ ) {
+ if (!TM.isPositionIndependent()) {
if (needsFPHelperFromSig(*F_)) {
assureFPCallStub(*F_, M, TM);
Modified=true;
@@ -461,7 +453,7 @@ static bool fixupFPReturnAndCall(Function &F, Module *M,
static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
const MipsTargetMachine &TM) {
- bool PicMode = TM.getRelocationModel() == Reloc::PIC_;
+ bool PicMode = TM.isPositionIndependent();
bool LE = TM.isLittleEndian();
LLVMContext &Context = M->getContext();
std::string Name = F->getName();
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
index 5a1c2c67cc70..0405291431cd 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
@@ -14,7 +14,6 @@
#include "Mips16ISelDAGToDAG.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "Mips.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsRegisterInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -43,8 +42,8 @@ bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
return MipsDAGToDAGISel::runOnMachineFunction(MF);
}
/// Select multiply instructions.
-std::pair<SDNode*, SDNode*>
-Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, SDLoc DL, EVT Ty,
+std::pair<SDNode *, SDNode *>
+Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, const SDLoc &DL, EVT Ty,
bool HasLo, bool HasHi) {
SDNode *Lo = nullptr, *Hi = nullptr;
SDNode *Mul = CurDAG->getMachineNode(Opc, DL, MVT::Glue, N->getOperand(0),
@@ -81,125 +80,60 @@ void Mips16DAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
V1 = RegInfo.createVirtualRegister(RC);
V2 = RegInfo.createVirtualRegister(RC);
- BuildMI(MBB, I, DL, TII.get(Mips::GotPrologue16), V0).
- addReg(V1, RegState::Define).
- addExternalSymbol("_gp_disp", MipsII::MO_ABS_HI).
- addExternalSymbol("_gp_disp", MipsII::MO_ABS_LO);
+ BuildMI(MBB, I, DL, TII.get(Mips::GotPrologue16), V0)
+ .addReg(V1, RegState::Define)
+ .addExternalSymbol("_gp_disp", MipsII::MO_ABS_HI)
+ .addExternalSymbol("_gp_disp", MipsII::MO_ABS_LO);
BuildMI(MBB, I, DL, TII.get(Mips::SllX16), V2).addReg(V0).addImm(16);
BuildMI(MBB, I, DL, TII.get(Mips::AdduRxRyRz16), GlobalBaseReg)
- .addReg(V1).addReg(V2);
-}
-
-// Insert instructions to initialize the Mips16 SP Alias register in the
-// first MBB of the function.
-//
-void Mips16DAGToDAGISel::initMips16SPAliasReg(MachineFunction &MF) {
- MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
-
- if (!MipsFI->mips16SPAliasRegSet())
- return;
-
- MachineBasicBlock &MBB = MF.front();
- MachineBasicBlock::iterator I = MBB.begin();
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
- unsigned Mips16SPAliasReg = MipsFI->getMips16SPAliasReg();
-
- BuildMI(MBB, I, DL, TII.get(Mips::MoveR3216), Mips16SPAliasReg)
- .addReg(Mips::SP);
+ .addReg(V1)
+ .addReg(V2);
}
void Mips16DAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
initGlobalBaseReg(MF);
- initMips16SPAliasReg(MF);
-}
-
-/// getMips16SPAliasReg - Output the instructions required to put the
-/// SP into a Mips16 accessible aliased register.
-SDValue Mips16DAGToDAGISel::getMips16SPAliasReg() {
- unsigned Mips16SPAliasReg =
- MF->getInfo<MipsFunctionInfo>()->getMips16SPAliasReg();
- auto PtrVT = getTargetLowering()->getPointerTy(CurDAG->getDataLayout());
- return CurDAG->getRegister(Mips16SPAliasReg, PtrVT);
}
-void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
- auto PtrVT = getTargetLowering()->getPointerTy(CurDAG->getDataLayout());
- SDValue AliasFPReg = CurDAG->getRegister(Mips::S0, PtrVT);
- if (Parent) {
- switch (Parent->getOpcode()) {
- case ISD::LOAD: {
- LoadSDNode *SD = dyn_cast<LoadSDNode>(Parent);
- switch (SD->getMemoryVT().getSizeInBits()) {
- case 8:
- case 16:
- AliasReg = Subtarget->getFrameLowering()->hasFP(*MF)
- ? AliasFPReg
- : getMips16SPAliasReg();
- return;
- }
- break;
- }
- case ISD::STORE: {
- StoreSDNode *SD = dyn_cast<StoreSDNode>(Parent);
- switch (SD->getMemoryVT().getSizeInBits()) {
- case 8:
- case 16:
- AliasReg = Subtarget->getFrameLowering()->hasFP(*MF)
- ? AliasFPReg
- : getMips16SPAliasReg();
- return;
- }
- break;
- }
- }
- }
- AliasReg = CurDAG->getRegister(Mips::SP, PtrVT);
- return;
-
-}
-
-bool Mips16DAGToDAGISel::selectAddr16(
- SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset,
- SDValue &Alias) {
+bool Mips16DAGToDAGISel::selectAddr(bool SPAllowed, SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
SDLoc DL(Addr);
EVT ValTy = Addr.getValueType();
- Alias = CurDAG->getTargetConstant(0, DL, ValTy);
-
// if Address is FI, get the TargetFrameIndex.
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
- Offset = CurDAG->getTargetConstant(0, DL, ValTy);
- getMips16SPRefReg(Parent, Alias);
- return true;
+ if (SPAllowed) {
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
+ Offset = CurDAG->getTargetConstant(0, DL, ValTy);
+ return true;
+ }
}
// on PIC code Load GA
if (Addr.getOpcode() == MipsISD::Wrapper) {
- Base = Addr.getOperand(0);
+ Base = Addr.getOperand(0);
Offset = Addr.getOperand(1);
return true;
}
- if (TM.getRelocationModel() != Reloc::PIC_) {
+ if (!TM.isPositionIndependent()) {
if ((Addr.getOpcode() == ISD::TargetExternalSymbol ||
- Addr.getOpcode() == ISD::TargetGlobalAddress))
+ Addr.getOpcode() == ISD::TargetGlobalAddress))
return false;
}
// Addresses of the form FI+const or FI|const
if (CurDAG->isBaseWithConstantOffset(Addr)) {
ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
if (isInt<16>(CN->getSExtValue())) {
-
// If the first operand is a FI, get the TargetFI Node
- if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
- (Addr.getOperand(0))) {
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
- getMips16SPRefReg(Parent, Alias);
+ if (SPAllowed) {
+ if (FrameIndexSDNode *FIN =
+ dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), DL, ValTy);
+ return true;
+ }
}
- else
- Base = Addr.getOperand(0);
+ Base = Addr.getOperand(0);
Offset = CurDAG->getTargetConstant(CN->getZExtValue(), DL, ValTy);
return true;
}
@@ -224,25 +158,25 @@ bool Mips16DAGToDAGISel::selectAddr16(
return true;
}
}
-
- // If an indexed floating point load/store can be emitted, return false.
- const LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(Parent);
-
- if (LS) {
- if (LS->getMemoryVT() == MVT::f32 && Subtarget->hasMips4_32r2())
- return false;
- if (LS->getMemoryVT() == MVT::f64 && Subtarget->hasMips4_32r2())
- return false;
- }
}
- Base = Addr;
+ Base = Addr;
Offset = CurDAG->getTargetConstant(0, DL, ValTy);
return true;
}
+bool Mips16DAGToDAGISel::selectAddr16(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ return selectAddr(false, Addr, Base, Offset);
+}
+
+bool Mips16DAGToDAGISel::selectAddr16SP(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ return selectAddr(true, Addr, Base, Offset);
+}
+
/// Select instructions not customized! Used for
/// expanded, promoted and normal instructions
-std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) {
+bool Mips16DAGToDAGISel::trySelect(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
SDLoc DL(Node);
@@ -253,13 +187,15 @@ std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) {
EVT NodeTy = Node->getValueType(0);
unsigned MultOpc;
- switch(Opcode) {
- default: break;
+ switch (Opcode) {
+ default:
+ break;
case ISD::SUBE:
case ISD::ADDE: {
SDValue InFlag = Node->getOperand(2), CmpLHS;
- unsigned Opc = InFlag.getOpcode(); (void)Opc;
+ unsigned Opc = InFlag.getOpcode();
+ (void)Opc;
assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) ||
(Opc == ISD::SUBC || Opc == ISD::SUBE)) &&
"(ADD|SUB)E flag operand must come from (ADD|SUB)C/E insn");
@@ -273,7 +209,7 @@ std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) {
MOp = Mips::SubuRxRyRz16;
}
- SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) };
+ SDValue Ops[] = {CmpLHS, InFlag.getOperand(1)};
SDValue LHS = Node->getOperand(0);
SDValue RHS = Node->getOperand(1);
@@ -283,40 +219,42 @@ std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) {
unsigned Sltu_op = Mips::SltuRxRyRz16;
SDNode *Carry = CurDAG->getMachineNode(Sltu_op, DL, VT, Ops);
unsigned Addu_op = Mips::AdduRxRyRz16;
- SDNode *AddCarry = CurDAG->getMachineNode(Addu_op, DL, VT,
- SDValue(Carry,0), RHS);
+ SDNode *AddCarry =
+ CurDAG->getMachineNode(Addu_op, DL, VT, SDValue(Carry, 0), RHS);
- SDNode *Result = CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, LHS,
- SDValue(AddCarry,0));
- return std::make_pair(true, Result);
+ CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, LHS, SDValue(AddCarry, 0));
+ return true;
}
/// Mul with two results
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI: {
MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MultuRxRy16 : Mips::MultRxRy16);
- std::pair<SDNode*, SDNode*> LoHi = selectMULT(Node, MultOpc, DL, NodeTy,
- true, true);
+ std::pair<SDNode *, SDNode *> LoHi =
+ selectMULT(Node, MultOpc, DL, NodeTy, true, true);
if (!SDValue(Node, 0).use_empty())
ReplaceUses(SDValue(Node, 0), SDValue(LoHi.first, 0));
if (!SDValue(Node, 1).use_empty())
ReplaceUses(SDValue(Node, 1), SDValue(LoHi.second, 0));
- return std::make_pair(true, nullptr);
+ CurDAG->RemoveDeadNode(Node);
+ return true;
}
case ISD::MULHS:
case ISD::MULHU: {
MultOpc = (Opcode == ISD::MULHU ? Mips::MultuRxRy16 : Mips::MultRxRy16);
- SDNode *Result = selectMULT(Node, MultOpc, DL, NodeTy, false, true).second;
- return std::make_pair(true, Result);
+ auto LoHi = selectMULT(Node, MultOpc, DL, NodeTy, false, true);
+ ReplaceNode(Node, LoHi.second);
+ return true;
}
}
- return std::make_pair(false, nullptr);
+ return false;
}
-FunctionPass *llvm::createMips16ISelDag(MipsTargetMachine &TM) {
- return new Mips16DAGToDAGISel(TM);
+FunctionPass *llvm::createMips16ISelDag(MipsTargetMachine &TM,
+ CodeGenOpt::Level OptLevel) {
+ return new Mips16DAGToDAGISel(TM, OptLevel);
}
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.h b/lib/Target/Mips/Mips16ISelDAGToDAG.h
index ae0e61e19d9d..bbf8cc36f241 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.h
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.h
@@ -20,22 +20,24 @@ namespace llvm {
class Mips16DAGToDAGISel : public MipsDAGToDAGISel {
public:
- explicit Mips16DAGToDAGISel(MipsTargetMachine &TM) : MipsDAGToDAGISel(TM) {}
+ explicit Mips16DAGToDAGISel(MipsTargetMachine &TM, CodeGenOpt::Level OL)
+ : MipsDAGToDAGISel(TM, OL) {}
private:
- std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc DL,
- EVT Ty, bool HasLo, bool HasHi);
-
- SDValue getMips16SPAliasReg();
+ std::pair<SDNode *, SDNode *> selectMULT(SDNode *N, unsigned Opc,
+ const SDLoc &DL, EVT Ty, bool HasLo,
+ bool HasHi);
bool runOnMachineFunction(MachineFunction &MF) override;
- void getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg);
-
- bool selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
- SDValue &Offset, SDValue &Alias) override;
+ bool selectAddr(bool SPAllowed, SDValue Addr, SDValue &Base,
+ SDValue &Offset);
+ bool selectAddr16(SDValue Addr, SDValue &Base,
+ SDValue &Offset) override;
+ bool selectAddr16SP(SDValue Addr, SDValue &Base,
+ SDValue &Offset) override;
- std::pair<bool, SDNode*> selectNode(SDNode *Node) override;
+ bool trySelect(SDNode *Node) override;
void processFunctionAfterISel(MachineFunction &MF) override;
@@ -46,8 +48,8 @@ private:
void initMips16SPAliasReg(MachineFunction &MF);
};
-FunctionPass *createMips16ISelDag(MipsTargetMachine &TM);
-
+FunctionPass *createMips16ISelDag(MipsTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
}
#endif
diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp
index e7483253e61d..bdb9eec4cc5a 100644
--- a/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -16,11 +16,9 @@
#include "MipsMachineFunction.h"
#include "MipsRegisterInfo.h"
#include "MipsTargetMachine.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include <string>
using namespace llvm;
@@ -167,9 +165,9 @@ Mips16TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
}
MachineBasicBlock *
-Mips16TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+Mips16TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case Mips::SelBeqZ:
@@ -519,12 +517,13 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
Chain);
}
-MachineBasicBlock *Mips16TargetLowering::
-emitSel16(unsigned Opc, MachineInstr *MI, MachineBasicBlock *BB) const {
+MachineBasicBlock *
+Mips16TargetLowering::emitSel16(unsigned Opc, MachineInstr &MI,
+ MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
@@ -554,8 +553,9 @@ emitSel16(unsigned Opc, MachineInstr *MI, MachineBasicBlock *BB) const {
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
- BuildMI(BB, DL, TII->get(Opc)).addReg(MI->getOperand(3).getReg())
- .addMBB(sinkMBB);
+ BuildMI(BB, DL, TII->get(Opc))
+ .addReg(MI.getOperand(3).getReg())
+ .addMBB(sinkMBB);
// copy0MBB:
// %FalseValue = ...
@@ -570,22 +570,23 @@ emitSel16(unsigned Opc, MachineInstr *MI, MachineBasicBlock *BB) const {
// ...
BB = sinkMBB;
- BuildMI(*BB, BB->begin(), DL,
- TII->get(Mips::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB)
- .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB);
+ BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(thisMBB)
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(copy0MBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
-Mips16TargetLowering::emitSelT16(unsigned Opc1, unsigned Opc2, MachineInstr *MI,
+Mips16TargetLowering::emitSelT16(unsigned Opc1, unsigned Opc2, MachineInstr &MI,
MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
@@ -615,8 +616,9 @@ Mips16TargetLowering::emitSelT16(unsigned Opc1, unsigned Opc2, MachineInstr *MI,
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
- BuildMI(BB, DL, TII->get(Opc2)).addReg(MI->getOperand(3).getReg())
- .addReg(MI->getOperand(4).getReg());
+ BuildMI(BB, DL, TII->get(Opc2))
+ .addReg(MI.getOperand(3).getReg())
+ .addReg(MI.getOperand(4).getReg());
BuildMI(BB, DL, TII->get(Opc1)).addMBB(sinkMBB);
// copy0MBB:
@@ -632,24 +634,25 @@ Mips16TargetLowering::emitSelT16(unsigned Opc1, unsigned Opc2, MachineInstr *MI,
// ...
BB = sinkMBB;
- BuildMI(*BB, BB->begin(), DL,
- TII->get(Mips::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB)
- .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB);
+ BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(thisMBB)
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(copy0MBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
Mips16TargetLowering::emitSeliT16(unsigned Opc1, unsigned Opc2,
- MachineInstr *MI,
+ MachineInstr &MI,
MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
@@ -679,8 +682,9 @@ Mips16TargetLowering::emitSeliT16(unsigned Opc1, unsigned Opc2,
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
- BuildMI(BB, DL, TII->get(Opc2)).addReg(MI->getOperand(3).getReg())
- .addImm(MI->getOperand(4).getImm());
+ BuildMI(BB, DL, TII->get(Opc2))
+ .addReg(MI.getOperand(3).getReg())
+ .addImm(MI.getOperand(4).getImm());
BuildMI(BB, DL, TII->get(Opc1)).addMBB(sinkMBB);
// copy0MBB:
@@ -696,42 +700,44 @@ Mips16TargetLowering::emitSeliT16(unsigned Opc1, unsigned Opc2,
// ...
BB = sinkMBB;
- BuildMI(*BB, BB->begin(), DL,
- TII->get(Mips::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB)
- .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB);
+ BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(thisMBB)
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(copy0MBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
Mips16TargetLowering::emitFEXT_T8I816_ins(unsigned BtOpc, unsigned CmpOpc,
- MachineInstr *MI,
+ MachineInstr &MI,
MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- unsigned regX = MI->getOperand(0).getReg();
- unsigned regY = MI->getOperand(1).getReg();
- MachineBasicBlock *target = MI->getOperand(2).getMBB();
- BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(CmpOpc)).addReg(regX)
- .addReg(regY);
- BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(BtOpc)).addMBB(target);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ unsigned regX = MI.getOperand(0).getReg();
+ unsigned regY = MI.getOperand(1).getReg();
+ MachineBasicBlock *target = MI.getOperand(2).getMBB();
+ BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(CmpOpc))
+ .addReg(regX)
+ .addReg(regY);
+ BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(BtOpc)).addMBB(target);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
- MachineInstr *MI, MachineBasicBlock *BB) const {
+ MachineInstr &MI, MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- unsigned regX = MI->getOperand(0).getReg();
- int64_t imm = MI->getOperand(1).getImm();
- MachineBasicBlock *target = MI->getOperand(2).getMBB();
+ unsigned regX = MI.getOperand(0).getReg();
+ int64_t imm = MI.getOperand(1).getImm();
+ MachineBasicBlock *target = MI.getOperand(2).getMBB();
unsigned CmpOpc;
if (isUInt<8>(imm))
CmpOpc = CmpiOpc;
@@ -740,10 +746,9 @@ MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
CmpOpc = CmpiXOpc;
else
llvm_unreachable("immediate field not usable");
- BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(CmpOpc)).addReg(regX)
- .addImm(imm);
- BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(BtOpc)).addMBB(target);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(CmpOpc)).addReg(regX).addImm(imm);
+ BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(BtOpc)).addMBB(target);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -758,38 +763,38 @@ static unsigned Mips16WhichOp8uOr16simm
}
MachineBasicBlock *
-Mips16TargetLowering::emitFEXT_CCRX16_ins(unsigned SltOpc, MachineInstr *MI,
+Mips16TargetLowering::emitFEXT_CCRX16_ins(unsigned SltOpc, MachineInstr &MI,
MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- unsigned CC = MI->getOperand(0).getReg();
- unsigned regX = MI->getOperand(1).getReg();
- unsigned regY = MI->getOperand(2).getReg();
- BuildMI(*BB, MI, MI->getDebugLoc(), TII->get(SltOpc)).addReg(regX).addReg(
- regY);
- BuildMI(*BB, MI, MI->getDebugLoc(),
- TII->get(Mips::MoveR3216), CC).addReg(Mips::T8);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ unsigned CC = MI.getOperand(0).getReg();
+ unsigned regX = MI.getOperand(1).getReg();
+ unsigned regY = MI.getOperand(2).getReg();
+ BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SltOpc))
+ .addReg(regX)
+ .addReg(regY);
+ BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Mips::MoveR3216), CC)
+ .addReg(Mips::T8);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
Mips16TargetLowering::emitFEXT_CCRXI16_ins(unsigned SltiOpc, unsigned SltiXOpc,
- MachineInstr *MI,
+ MachineInstr &MI,
MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- unsigned CC = MI->getOperand(0).getReg();
- unsigned regX = MI->getOperand(1).getReg();
- int64_t Imm = MI->getOperand(2).getImm();
+ unsigned CC = MI.getOperand(0).getReg();
+ unsigned regX = MI.getOperand(1).getReg();
+ int64_t Imm = MI.getOperand(2).getImm();
unsigned SltOpc = Mips16WhichOp8uOr16simm(SltiOpc, SltiXOpc, Imm);
- BuildMI(*BB, MI, MI->getDebugLoc(),
- TII->get(SltOpc)).addReg(regX).addImm(Imm);
- BuildMI(*BB, MI, MI->getDebugLoc(),
- TII->get(Mips::MoveR3216), CC).addReg(Mips::T8);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SltOpc)).addReg(regX).addImm(Imm);
+ BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Mips::MoveR3216), CC)
+ .addReg(Mips::T8);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/lib/Target/Mips/Mips16ISelLowering.h b/lib/Target/Mips/Mips16ISelLowering.h
index d3b9f750f347..0ee0b816ef70 100644
--- a/lib/Target/Mips/Mips16ISelLowering.h
+++ b/lib/Target/Mips/Mips16ISelLowering.h
@@ -27,7 +27,7 @@ namespace llvm {
bool *Fast) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
+ EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
private:
@@ -50,32 +50,32 @@ namespace llvm {
bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
SDValue Chain) const override;
- MachineBasicBlock *emitSel16(unsigned Opc, MachineInstr *MI,
+ MachineBasicBlock *emitSel16(unsigned Opc, MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *emitSeliT16(unsigned Opc1, unsigned Opc2,
- MachineInstr *MI,
+ MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *emitSelT16(unsigned Opc1, unsigned Opc2,
- MachineInstr *MI,
+ MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *emitFEXT_T8I816_ins(unsigned BtOpc, unsigned CmpOpc,
- MachineInstr *MI,
+ MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *emitFEXT_T8I8I16_ins(
- unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
- MachineInstr *MI, MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitFEXT_T8I8I16_ins(unsigned BtOpc, unsigned CmpiOpc,
+ unsigned CmpiXOpc, bool ImmSigned,
+ MachineInstr &MI,
+ MachineBasicBlock *BB) const;
- MachineBasicBlock *emitFEXT_CCRX16_ins(
- unsigned SltOpc,
- MachineInstr *MI, MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitFEXT_CCRX16_ins(unsigned SltOpc, MachineInstr &MI,
+ MachineBasicBlock *BB) const;
- MachineBasicBlock *emitFEXT_CCRXI16_ins(
- unsigned SltiOpc, unsigned SltiXOpc,
- MachineInstr *MI, MachineBasicBlock *BB )const;
+ MachineBasicBlock *emitFEXT_CCRXI16_ins(unsigned SltiOpc, unsigned SltiXOpc,
+ MachineInstr &MI,
+ MachineBasicBlock *BB) const;
};
}
diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp
index da8ada4e5391..daa1355ffefd 100644
--- a/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -15,12 +15,10 @@
#include "MipsMachineFunction.h"
#include "MipsTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
@@ -43,7 +41,7 @@ const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
-unsigned Mips16InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned Mips16InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
@@ -53,15 +51,15 @@ unsigned Mips16InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
-unsigned Mips16InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned Mips16InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
unsigned Opc = 0;
if (Mips::CPU16RegsRegClass.contains(DestReg) &&
@@ -126,9 +124,9 @@ void Mips16InstrInfo::loadRegFromStack(MachineBasicBlock &MBB,
.addMemOperand(MMO);
}
-bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
- MachineBasicBlock &MBB = *MI->getParent();
- switch(MI->getDesc().getOpcode()) {
+bool Mips16InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ MachineBasicBlock &MBB = *MI.getParent();
+ switch (MI.getDesc().getOpcode()) {
default:
return false;
case Mips::RetRA16:
@@ -136,7 +134,7 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
break;
}
- MBB.erase(MI);
+ MBB.erase(MI.getIterator());
return true;
}
@@ -307,7 +305,8 @@ void Mips16InstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
unsigned Mips16InstrInfo::loadImmediate(unsigned FrameReg, int64_t Imm,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator II,
- DebugLoc DL, unsigned &NewImm) const {
+ const DebugLoc &DL,
+ unsigned &NewImm) const {
//
// given original instruction is:
// Instr rx, T[offset] where offset is too big.
@@ -326,7 +325,7 @@ unsigned Mips16InstrInfo::loadImmediate(unsigned FrameReg, int64_t Imm,
int Reg =0;
int SpReg = 0;
- rs.enterBasicBlock(&MBB);
+ rs.enterBasicBlock(MBB);
rs.forward(II);
//
// We need to know which registers can be used, in the case where there
diff --git a/lib/Target/Mips/Mips16InstrInfo.h b/lib/Target/Mips/Mips16InstrInfo.h
index 6540b40bc9ab..ab559799f00b 100644
--- a/lib/Target/Mips/Mips16InstrInfo.h
+++ b/lib/Target/Mips/Mips16InstrInfo.h
@@ -32,7 +32,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
@@ -40,12 +40,11 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStack(MachineBasicBlock &MBB,
@@ -62,7 +61,7 @@ public:
const TargetRegisterInfo *TRI,
int64_t Offset) const override;
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
unsigned getOppositeBranchOpc(unsigned Opc) const override;
@@ -83,9 +82,8 @@ public:
// This is to adjust some FrameReg. We return the new register to be used
// in place of FrameReg and the adjusted immediate field (&NewImm)
//
- unsigned loadImmediate(unsigned FrameReg,
- int64_t Imm, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator II, DebugLoc DL,
+ unsigned loadImmediate(unsigned FrameReg, int64_t Imm, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II, const DebugLoc &DL,
unsigned &NewImm) const;
static bool validImmediate(unsigned Opcode, unsigned Reg, int64_t Amount);
diff --git a/lib/Target/Mips/Mips16InstrInfo.td b/lib/Target/Mips/Mips16InstrInfo.td
index dad6ea4c9e98..021fb8678686 100644
--- a/lib/Target/Mips/Mips16InstrInfo.td
+++ b/lib/Target/Mips/Mips16InstrInfo.td
@@ -14,14 +14,26 @@
//
// Mips Address
//
-def addr16 :
- ComplexPattern<iPTR, 3, "selectAddr16", [frameindex], [SDNPWantParent]>;
+def addr16 : ComplexPattern<iPTR, 2, "selectAddr16", [frameindex]>;
+def addr16sp : ComplexPattern<iPTR, 2, "selectAddr16SP", [frameindex]>;
//
// Address operand
def mem16 : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPU16Regs, simm16, CPU16RegsPlusSP);
+ let MIOperandInfo = (ops CPU16Regs, simm16);
+ let EncoderMethod = "getMemEncoding";
+}
+
+def mem16sp : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ // This should be CPUSPReg but the MIPS16 subtarget isn't good enough at
+ // keeping the sp-relative load and the other varieties separate at the
+ // moment. This lie fixes the problem sufficiently well to fix the errors
+ // emitted by -verify-machineinstrs and the output ends up correct as long
+ // as we use an external assembler (which is already a requirement for MIPS16
+ // for several other reasons).
+ let MIOperandInfo = (ops CPU16RegsPlusSP, simm16);
let EncoderMethod = "getMemEncoding";
}
@@ -31,6 +43,8 @@ def mem16_ea : Operand<i32> {
let EncoderMethod = "getMemEncoding";
}
+def pcrel16 : Operand<i32>;
+
//
// I-type instruction format
//
@@ -115,7 +129,7 @@ class FEXT_CCRXI16_ins<string asmstr>:
//
class FJAL16_ins<bits<1> _X, string asmstr,
InstrItinClass itin>:
- FJAL16<_X, (outs), (ins simm20:$imm),
+ FJAL16<_X, (outs), (ins uimm26:$imm),
!strconcat(asmstr, "\t$imm\n\tnop"),[],
itin> {
let isCodeGenOnly=1;
@@ -124,7 +138,7 @@ class FJAL16_ins<bits<1> _X, string asmstr,
class FJALB16_ins<bits<1> _X, string asmstr,
InstrItinClass itin>:
- FJAL16<_X, (outs), (ins simm20:$imm),
+ FJAL16<_X, (outs), (ins uimm26:$imm),
!strconcat(asmstr, "\t$imm\t# branch\n\tnop"),[],
itin> {
let isCodeGenOnly=1;
@@ -213,19 +227,6 @@ class FEXT_2RI16_ins<bits<5> _op, string asmstr,
let Constraints = "$rx_ = $rx";
}
-
-// this has an explicit sp argument that we ignore to work around a problem
-// in the compiler
-class FEXT_RI16_SP_explicit_ins<bits<5> _op, string asmstr,
- InstrItinClass itin>:
- FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPUSPReg:$ry, simm16:$imm),
- !strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>;
-
-class FEXT_RI16_SP_Store_explicit_ins<bits<5> _op, string asmstr,
- InstrItinClass itin>:
- FEXT_RI16<_op, (outs), (ins CPU16Regs:$rx, CPUSPReg:$ry, simm16:$imm),
- !strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>;
-
//
// EXT-RRI instruction format
//
@@ -483,13 +484,11 @@ class SelT<string op1, string op2>:
//
// 32 bit constant
//
-def imm32: Operand<i32>;
-
def Constant32:
- MipsPseudo16<(outs), (ins imm32:$imm), "\t.word $imm", []>;
+ MipsPseudo16<(outs), (ins simm32:$imm), "\t.word $imm", []>;
def LwConstant32:
- MipsPseudo16<(outs CPU16Regs:$rx), (ins imm32:$imm, imm32:$constid),
+ MipsPseudo16<(outs CPU16Regs:$rx), (ins simm32:$imm, simm32:$constid),
"lw\t$rx, 1f\n\tb\t2f\n\t.align\t2\n1: \t.word\t$imm\n2:", []>;
@@ -635,7 +634,7 @@ def BnezRxImmX16: FEXT_RI16_B_ins<0b00101, "bnez", IIM16Alu>, cbranch16;
// Purpose: Breakpoint
// To cause a Breakpoint exception.
-def Break16: FRRBreakNull16_ins<"break 0", NoItinerary>;
+def Break16: FRRBreakNull16_ins<"break 0", IIM16Alu>;
//
// Format: BTEQZ offset MIPS16e
// Purpose: Branch on T Equal to Zero (Extended)
@@ -851,9 +850,7 @@ def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, II_LW>, MayLoad{
// Purpose: Load Word (SP-Relative, Extended)
// To load an SP-relative word from memory as a signed value.
//
-def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10010, "lw", II_LW>, MayLoad{
- let Uses = [SP];
-}
+def LwRxSpImmX16: FEXT_RRI16_mem_ins<0b10010, "lw", mem16sp, II_LW>, MayLoad;
def LwRxPcTcp16: FRI16_TCP_ins<0b10110, "lw", II_LW>, MayLoad;
@@ -1277,16 +1274,14 @@ def SubuRxRyRz16: FRRR16_ins<0b11, "subu", IIM16Alu>, ArithLogic16Defs<0>;
// Purpose: Store Word (Extended)
// To store a word to memory.
//
-def SwRxRyOffMemX16:
- FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, II_SW>, MayStore;
+def SwRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, II_SW>, MayStore;
//
// Format: SW rx, offset(sp) MIPS16e
// Purpose: Store Word rx (SP-Relative)
// To store an SP-relative word to memory.
//
-def SwRxSpImmX16: FEXT_RI16_SP_Store_explicit_ins
- <0b11010, "sw", II_SW>, MayStore;
+def SwRxSpImmX16: FEXT_RRI16_mem2_ins<0b11010, "sw", mem16sp, II_SW>, MayStore;
//
//
@@ -1340,22 +1335,21 @@ def: shift_rotate_reg16_pat<shl, SllvRxRy16>;
def: shift_rotate_reg16_pat<sra, SravRxRy16>;
def: shift_rotate_reg16_pat<srl, SrlvRxRy16>;
-class LoadM16_pat<PatFrag OpNode, Instruction I> :
- Mips16Pat<(OpNode addr16:$addr), (I addr16:$addr)>;
+class LoadM16_pat<PatFrag OpNode, Instruction I, ComplexPattern Addr> :
+ Mips16Pat<(OpNode Addr:$addr), (I Addr:$addr)>;
-def: LoadM16_pat<sextloadi8, LbRxRyOffMemX16>;
-def: LoadM16_pat<zextloadi8, LbuRxRyOffMemX16>;
-def: LoadM16_pat<sextloadi16, LhRxRyOffMemX16>;
-def: LoadM16_pat<zextloadi16, LhuRxRyOffMemX16>;
-def: LoadM16_pat<load, LwRxRyOffMemX16>;
+def: LoadM16_pat<sextloadi8, LbRxRyOffMemX16, addr16>;
+def: LoadM16_pat<zextloadi8, LbuRxRyOffMemX16, addr16>;
+def: LoadM16_pat<sextloadi16, LhRxRyOffMemX16, addr16>;
+def: LoadM16_pat<zextloadi16, LhuRxRyOffMemX16, addr16>;
+def: LoadM16_pat<load, LwRxSpImmX16, addr16sp>;
-class StoreM16_pat<PatFrag OpNode, Instruction I> :
- Mips16Pat<(OpNode CPU16Regs:$r, addr16:$addr),
- (I CPU16Regs:$r, addr16:$addr)>;
+class StoreM16_pat<PatFrag OpNode, Instruction I, ComplexPattern Addr> :
+ Mips16Pat<(OpNode CPU16Regs:$r, Addr:$addr), (I CPU16Regs:$r, Addr:$addr)>;
-def: StoreM16_pat<truncstorei8, SbRxRyOffMemX16>;
-def: StoreM16_pat<truncstorei16, ShRxRyOffMemX16>;
-def: StoreM16_pat<store, SwRxRyOffMemX16>;
+def: StoreM16_pat<truncstorei8, SbRxRyOffMemX16, addr16>;
+def: StoreM16_pat<truncstorei16, ShRxRyOffMemX16, addr16>;
+def: StoreM16_pat<store, SwRxSpImmX16, addr16sp>;
// Unconditional branch
class UncondBranch16_pat<SDNode OpNode, Instruction I>:
@@ -1401,8 +1395,7 @@ class SetCC_I16<PatFrag cond_op, PatLeaf imm_type, Instruction I>:
(I CPU16Regs:$rx, imm_type:$imm16)>;
-def: Mips16Pat<(i32 addr16:$addr),
- (AddiuRxRyOffMemX16 addr16:$addr)>;
+def: Mips16Pat<(i32 addr16sp:$addr), (AddiuRxRyOffMemX16 addr16sp:$addr)>;
// Large (>16 bit) immediate loads
@@ -1551,7 +1544,7 @@ def: UncondBranch16_pat<br, Bimm16>;
// Small immediates
def: Mips16Pat<(i32 immSExt16:$in),
- (AddiuRxRxImmX16 (Move32R16 ZERO), immSExt16:$in)>;
+ (AddiuRxRxImmX16 (MoveR3216 ZERO), immSExt16:$in)>;
def: Mips16Pat<(i32 immZExt16:$in), (LiRxImmX16 immZExt16:$in)>;
diff --git a/lib/Target/Mips/Mips16RegisterInfo.cpp b/lib/Target/Mips/Mips16RegisterInfo.cpp
index ebd51d7b5072..b034c26640e3 100644
--- a/lib/Target/Mips/Mips16RegisterInfo.cpp
+++ b/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -14,11 +14,9 @@
#include "Mips16RegisterInfo.h"
#include "Mips.h"
#include "Mips16InstrInfo.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
#include "MipsSubtarget.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -28,7 +26,6 @@
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/lib/Target/Mips/Mips32r6InstrFormats.td b/lib/Target/Mips/Mips32r6InstrFormats.td
index 13216bebe5b6..a20c683c1e5e 100644
--- a/lib/Target/Mips/Mips32r6InstrFormats.td
+++ b/lib/Target/Mips/Mips32r6InstrFormats.td
@@ -360,7 +360,7 @@ class SPECIAL_SDBBP_FM : MipsR6Inst {
}
// This class is ambiguous with other branches:
-// BEQC/BNEC require that rs > rt
+// BEQC/BNEC require that rs < rt && rs != 0
class CMP_BRANCH_2R_OFF16_FM<OPGROUP funct> : MipsR6Inst {
bits<5> rs;
bits<5> rt;
diff --git a/lib/Target/Mips/Mips32r6InstrInfo.td b/lib/Target/Mips/Mips32r6InstrInfo.td
index 82d2c8ee9905..f552f8d37afb 100644
--- a/lib/Target/Mips/Mips32r6InstrInfo.td
+++ b/lib/Target/Mips/Mips32r6InstrInfo.td
@@ -182,58 +182,86 @@ class CMP_CONDN_DESC_BASE<string CondStr, string Typestr,
dag InOperandList = (ins FGROpnd:$fs, FGROpnd:$ft);
string AsmString = !strconcat("cmp.", CondStr, ".", Typestr, "\t$fd, $fs, $ft");
list<dag> Pattern = [(set FGRCCOpnd:$fd, (Op FGROpnd:$fs, FGROpnd:$ft))];
+ bit isCTI = 1;
}
multiclass CMP_CC_M <FIELD_CMP_FORMAT Format, string Typestr,
RegisterOperand FGROpnd>{
let AdditionalPredicates = [NotInMicroMips] in {
- def CMP_F_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_AF>,
+ def CMP_F_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_AF>,
CMP_CONDN_DESC_BASE<"af", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.af.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_UN_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_UN>,
+ def CMP_UN_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_UN>,
CMP_CONDN_DESC_BASE<"un", Typestr, FGROpnd, setuo>,
+ MipsR6Arch<!strconcat("cmp.un.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_EQ_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_EQ>,
+ def CMP_EQ_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_EQ>,
CMP_CONDN_DESC_BASE<"eq", Typestr, FGROpnd, setoeq>,
+ MipsR6Arch<!strconcat("cmp.eq.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_UEQ_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_UEQ>,
+ def CMP_UEQ_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_UEQ>,
CMP_CONDN_DESC_BASE<"ueq", Typestr, FGROpnd, setueq>,
+ MipsR6Arch<!strconcat("cmp.ueq.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_LT_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_LT>,
+ def CMP_LT_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_LT>,
CMP_CONDN_DESC_BASE<"lt", Typestr, FGROpnd, setolt>,
+ MipsR6Arch<!strconcat("cmp.lt.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_ULT_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_ULT>,
+ def CMP_ULT_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_ULT>,
CMP_CONDN_DESC_BASE<"ult", Typestr, FGROpnd, setult>,
+ MipsR6Arch<!strconcat("cmp.ult.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_LE_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_LE>,
+ def CMP_LE_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_LE>,
CMP_CONDN_DESC_BASE<"le", Typestr, FGROpnd, setole>,
+ MipsR6Arch<!strconcat("cmp.le.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_ULE_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_ULE>,
+ def CMP_ULE_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_ULE>,
CMP_CONDN_DESC_BASE<"ule", Typestr, FGROpnd, setule>,
+ MipsR6Arch<!strconcat("cmp.ule.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_SAF_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SAF>,
+ def CMP_SAF_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_SAF>,
CMP_CONDN_DESC_BASE<"saf", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.saf.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_SUN_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SUN>,
+ def CMP_SUN_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_SUN>,
CMP_CONDN_DESC_BASE<"sun", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.sun.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_SEQ_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SEQ>,
+ def CMP_SEQ_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_SEQ>,
CMP_CONDN_DESC_BASE<"seq", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.seq.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_SUEQ_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SUEQ>,
+ def CMP_SUEQ_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_SUEQ>,
CMP_CONDN_DESC_BASE<"sueq", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.sueq.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_SLT_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SLT>,
+ def CMP_SLT_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_SLT>,
CMP_CONDN_DESC_BASE<"slt", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.slt.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_SULT_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SULT>,
+ def CMP_SULT_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_SULT>,
CMP_CONDN_DESC_BASE<"sult", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.sult.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_SLE_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SLE>,
+ def CMP_SLE_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_SLE>,
CMP_CONDN_DESC_BASE<"sle", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.sle.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
- def CMP_SULE_#NAME : COP1_CMP_CONDN_FM<Format, FIELD_CMP_COND_SULE>,
+ def CMP_SULE_#NAME : R6MMR6Rel, COP1_CMP_CONDN_FM<Format,
+ FIELD_CMP_COND_SULE>,
CMP_CONDN_DESC_BASE<"sule", Typestr, FGROpnd>,
+ MipsR6Arch<!strconcat("cmp.sule.", Typestr)>,
ISA_MIPS32R6, HARDFLOAT;
}
}
@@ -245,52 +273,62 @@ multiclass CMP_CC_M <FIELD_CMP_FORMAT Format, string Typestr,
//===----------------------------------------------------------------------===//
class PCREL_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
- Operand ImmOpnd> : MipsR6Arch<instr_asm> {
+ Operand ImmOpnd, InstrItinClass itin>
+ : MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rs);
dag InOperandList = (ins ImmOpnd:$imm);
string AsmString = !strconcat(instr_asm, "\t$rs, $imm");
list<dag> Pattern = [];
+ InstrItinClass Itinerary = itin;
}
-class ADDIUPC_DESC : PCREL_DESC_BASE<"addiupc", GPR32Opnd, simm19_lsl2>;
-class LWPC_DESC: PCREL_DESC_BASE<"lwpc", GPR32Opnd, simm19_lsl2>;
-class LWUPC_DESC: PCREL_DESC_BASE<"lwupc", GPR32Opnd, simm19_lsl2>;
+class ADDIUPC_DESC : PCREL_DESC_BASE<"addiupc", GPR32Opnd, simm19_lsl2,
+ II_ADDIUPC>;
+class LWPC_DESC: PCREL_DESC_BASE<"lwpc", GPR32Opnd, simm19_lsl2, II_LWPC>;
+class LWUPC_DESC: PCREL_DESC_BASE<"lwupc", GPR32Opnd, simm19_lsl2, II_LWUPC>;
class ALIGN_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
- Operand ImmOpnd> : MipsR6Arch<instr_asm> {
+ Operand ImmOpnd, InstrItinClass itin>
+ : MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rd);
dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt, ImmOpnd:$bp);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt, $bp");
list<dag> Pattern = [];
+ InstrItinClass Itinerary = itin;
}
-class ALIGN_DESC : ALIGN_DESC_BASE<"align", GPR32Opnd, uimm2>;
+class ALIGN_DESC : ALIGN_DESC_BASE<"align", GPR32Opnd, uimm2, II_ALIGN>;
-class ALUIPC_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
- : MipsR6Arch<instr_asm> {
+class ALUIPC_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin = NoItinerary>
+ : MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rs);
dag InOperandList = (ins simm16:$imm);
string AsmString = !strconcat(instr_asm, "\t$rs, $imm");
list<dag> Pattern = [];
+ InstrItinClass Itinerary = itin;
}
-class ALUIPC_DESC : ALUIPC_DESC_BASE<"aluipc", GPR32Opnd>;
-class AUIPC_DESC : ALUIPC_DESC_BASE<"auipc", GPR32Opnd>;
+class ALUIPC_DESC : ALUIPC_DESC_BASE<"aluipc", GPR32Opnd, II_ALUIPC>;
+class AUIPC_DESC : ALUIPC_DESC_BASE<"auipc", GPR32Opnd, II_AUIPC>;
-class AUI_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
- : MipsR6Arch<instr_asm> {
+class AUI_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin = NoItinerary>
+ : MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rs);
dag InOperandList = (ins GPROpnd:$rt, simm16:$imm);
string AsmString = !strconcat(instr_asm, "\t$rs, $rt, $imm");
list<dag> Pattern = [];
+ InstrItinClass Itinerary = itin;
}
-class AUI_DESC : AUI_DESC_BASE<"aui", GPR32Opnd>;
+class AUI_DESC : AUI_DESC_BASE<"aui", GPR32Opnd, II_AUI>;
class BRANCH_DESC_BASE {
bit isBranch = 1;
bit isTerminator = 1;
bit hasDelaySlot = 0;
+ bit isCTI = 1;
}
class BC_DESC_BASE<string instr_asm, DAGOperand opnd> : BRANCH_DESC_BASE,
@@ -299,22 +337,32 @@ class BC_DESC_BASE<string instr_asm, DAGOperand opnd> : BRANCH_DESC_BASE,
dag OutOperandList = (outs);
string AsmString = !strconcat(instr_asm, "\t$offset");
bit isBarrier = 1;
+ InstrItinClass Itinerary = II_BC;
+ bit isCTI = 1;
}
class CMP_BC_DESC_BASE<string instr_asm, DAGOperand opnd,
- RegisterOperand GPROpnd> : BRANCH_DESC_BASE {
+ RegisterOperand GPROpnd> : BRANCH_DESC_BASE,
+ MipsR6Arch<instr_asm> {
dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt, opnd:$offset);
dag OutOperandList = (outs);
string AsmString = !strconcat(instr_asm, "\t$rs, $rt, $offset");
list<Register> Defs = [AT];
+ InstrItinClass Itinerary = II_BCCC;
+ bit hasForbiddenSlot = 1;
+ bit isCTI = 1;
}
class CMP_CBR_EQNE_Z_DESC_BASE<string instr_asm, DAGOperand opnd,
- RegisterOperand GPROpnd> : BRANCH_DESC_BASE {
+ RegisterOperand GPROpnd>
+ : BRANCH_DESC_BASE, MipsR6Arch<instr_asm> {
dag InOperandList = (ins GPROpnd:$rs, opnd:$offset);
dag OutOperandList = (outs);
string AsmString = !strconcat(instr_asm, "\t$rs, $offset");
list<Register> Defs = [AT];
+ InstrItinClass Itinerary = II_BCCZC;
+ bit hasForbiddenSlot = 1;
+ bit isCTI = 1;
}
class CMP_CBR_RT_Z_DESC_BASE<string instr_asm, DAGOperand opnd,
@@ -324,17 +372,23 @@ class CMP_CBR_RT_Z_DESC_BASE<string instr_asm, DAGOperand opnd,
dag OutOperandList = (outs);
string AsmString = !strconcat(instr_asm, "\t$rt, $offset");
list<Register> Defs = [AT];
+ InstrItinClass Itinerary = II_BCCZC;
+ bit hasForbiddenSlot = 1;
+ bit isCTI = 1;
}
class BAL_DESC : BC_DESC_BASE<"bal", brtarget> {
bit isCall = 1;
bit hasDelaySlot = 1;
list<Register> Defs = [RA];
+ bit isCTI = 1;
}
class BALC_DESC : BC_DESC_BASE<"balc", brtarget26> {
bit isCall = 1;
list<Register> Defs = [RA];
+ InstrItinClass Itinerary = II_BALC;
+ bit isCTI = 1;
}
class BC_DESC : BC_DESC_BASE<"bc", brtarget26>;
@@ -360,6 +414,7 @@ class COP1_BCCZ_DESC_BASE<string instr_asm> : BRANCH_DESC_BASE {
dag OutOperandList = (outs);
string AsmString = instr_asm;
bit hasDelaySlot = 1;
+ InstrItinClass Itinerary = II_BC1CCZ;
}
class BC1EQZ_DESC : COP1_BCCZ_DESC_BASE<"bc1eqz $ft, $offset">;
@@ -370,6 +425,7 @@ class COP2_BCCZ_DESC_BASE<string instr_asm> : BRANCH_DESC_BASE {
dag OutOperandList = (outs);
string AsmString = instr_asm;
bit hasDelaySlot = 1;
+ bit isCTI = 1;
}
class BC2EQZ_DESC : COP2_BCCZ_DESC_BASE<"bc2eqz $ct, $offset">;
@@ -379,23 +435,29 @@ class BOVC_DESC : CMP_BC_DESC_BASE<"bovc", brtarget, GPR32Opnd>;
class BNVC_DESC : CMP_BC_DESC_BASE<"bnvc", brtarget, GPR32Opnd>;
class JMP_IDX_COMPACT_DESC_BASE<string opstr, DAGOperand opnd,
- RegisterOperand GPROpnd>
+ RegisterOperand GPROpnd,
+ InstrItinClass itin = NoItinerary>
: MipsR6Arch<opstr> {
dag InOperandList = (ins GPROpnd:$rt, opnd:$offset);
string AsmString = !strconcat(opstr, "\t$rt, $offset");
list<dag> Pattern = [];
- bit isTerminator = 1;
bit hasDelaySlot = 0;
+ InstrItinClass Itinerary = itin;
+ bit isCTI = 1;
+ bit isBranch = 1;
+ bit isIndirectBranch = 1;
}
class JIALC_DESC : JMP_IDX_COMPACT_DESC_BASE<"jialc", calloffset16,
- GPR32Opnd> {
+ GPR32Opnd, II_JIALC> {
bit isCall = 1;
list<Register> Defs = [RA];
}
-class JIC_DESC : JMP_IDX_COMPACT_DESC_BASE<"jic", jmpoffset16, GPR32Opnd> {
+class JIC_DESC : JMP_IDX_COMPACT_DESC_BASE<"jic", jmpoffset16,
+ GPR32Opnd, II_JIALC> {
bit isBarrier = 1;
+ bit isTerminator = 1;
list<Register> Defs = [AT];
}
@@ -405,35 +467,39 @@ class JR_HB_R6_DESC : JR_HB_DESC_BASE<"jr.hb", GPR32Opnd> {
bit hasDelaySlot = 1;
bit isTerminator=1;
bit isBarrier=1;
+ bit isCTI = 1;
}
-class BITSWAP_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
+class BITSWAP_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin>
: MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rd);
dag InOperandList = (ins GPROpnd:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rt");
list<dag> Pattern = [];
+ InstrItinClass Itinerary = itin;
}
-class BITSWAP_DESC : BITSWAP_DESC_BASE<"bitswap", GPR32Opnd>;
+class BITSWAP_DESC : BITSWAP_DESC_BASE<"bitswap", GPR32Opnd, II_BITSWAP>;
class DIVMOD_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin,
SDPatternOperator Op=null_frag>
: MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rd);
dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
list<dag> Pattern = [(set GPROpnd:$rd, (Op GPROpnd:$rs, GPROpnd:$rt))];
-
+ InstrItinClass Itinerary = itin;
// This instruction doesn't trap division by zero itself. We must insert
// teq instructions as well.
bit usesCustomInserter = 1;
}
-class DIV_DESC : DIVMOD_DESC_BASE<"div", GPR32Opnd, sdiv>;
-class DIVU_DESC : DIVMOD_DESC_BASE<"divu", GPR32Opnd, udiv>;
-class MOD_DESC : DIVMOD_DESC_BASE<"mod", GPR32Opnd, srem>;
-class MODU_DESC : DIVMOD_DESC_BASE<"modu", GPR32Opnd, urem>;
+class DIV_DESC : DIVMOD_DESC_BASE<"div", GPR32Opnd, II_DIV, sdiv>;
+class DIVU_DESC : DIVMOD_DESC_BASE<"divu", GPR32Opnd, II_DIVU, udiv>;
+class MOD_DESC : DIVMOD_DESC_BASE<"mod", GPR32Opnd, II_MOD, srem>;
+class MODU_DESC : DIVMOD_DESC_BASE<"modu", GPR32Opnd, II_MODU, urem>;
class BEQZALC_DESC : CMP_CBR_RT_Z_DESC_BASE<"beqzalc", brtarget, GPR32Opnd> {
list<Register> Defs = [RA];
@@ -460,17 +526,19 @@ class BNEZALC_DESC : CMP_CBR_RT_Z_DESC_BASE<"bnezalc", brtarget, GPR32Opnd> {
}
class MUL_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin,
SDPatternOperator Op=null_frag> : MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rd);
dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
list<dag> Pattern = [(set GPROpnd:$rd, (Op GPROpnd:$rs, GPROpnd:$rt))];
+ InstrItinClass Itinerary = itin;
}
-class MUH_DESC : MUL_R6_DESC_BASE<"muh", GPR32Opnd, mulhs>;
-class MUHU_DESC : MUL_R6_DESC_BASE<"muhu", GPR32Opnd, mulhu>;
-class MUL_R6_DESC : MUL_R6_DESC_BASE<"mul", GPR32Opnd, mul>;
-class MULU_DESC : MUL_R6_DESC_BASE<"mulu", GPR32Opnd>;
+class MUH_DESC : MUL_R6_DESC_BASE<"muh", GPR32Opnd, II_MUH, mulhs>;
+class MUHU_DESC : MUL_R6_DESC_BASE<"muhu", GPR32Opnd, II_MUHU, mulhu>;
+class MUL_R6_DESC : MUL_R6_DESC_BASE<"mul", GPR32Opnd, II_MUL, mul>;
+class MULU_DESC : MUL_R6_DESC_BASE<"mulu", GPR32Opnd, II_MULU>;
class COP1_SEL_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
dag OutOperandList = (outs FGROpnd:$fd);
@@ -482,11 +550,11 @@ class COP1_SEL_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
string Constraints = "$fd_in = $fd";
}
-class SEL_D_DESC : COP1_SEL_DESC_BASE<"sel.d", FGR64Opnd> {
+class SEL_D_DESC : COP1_SEL_DESC_BASE<"sel.d", FGR64Opnd>, MipsR6Arch<"sel.d"> {
// We must insert a SUBREG_TO_REG around $fd_in
bit usesCustomInserter = 1;
}
-class SEL_S_DESC : COP1_SEL_DESC_BASE<"sel.s", FGR32Opnd>;
+class SEL_S_DESC : COP1_SEL_DESC_BASE<"sel.s", FGR32Opnd>, MipsR6Arch<"sel.s">;
class SELEQNE_Z_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
: MipsR6Arch<instr_asm> {
@@ -494,23 +562,26 @@ class SELEQNE_Z_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
list<dag> Pattern = [];
+ InstrItinClass Itinerary = II_SELCCZ;
}
class SELEQZ_DESC : SELEQNE_Z_DESC_BASE<"seleqz", GPR32Opnd>;
class SELNEZ_DESC : SELEQNE_Z_DESC_BASE<"selnez", GPR32Opnd>;
-class COP1_4R_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
+class COP1_4R_DESC_BASE<string instr_asm, RegisterOperand FGROpnd,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs FGROpnd:$fd);
dag InOperandList = (ins FGROpnd:$fd_in, FGROpnd:$fs, FGROpnd:$ft);
string AsmString = !strconcat(instr_asm, "\t$fd, $fs, $ft");
list<dag> Pattern = [];
string Constraints = "$fd_in = $fd";
+ InstrItinClass Itinerary = itin;
}
-class MADDF_S_DESC : COP1_4R_DESC_BASE<"maddf.s", FGR32Opnd>;
-class MADDF_D_DESC : COP1_4R_DESC_BASE<"maddf.d", FGR64Opnd>;
-class MSUBF_S_DESC : COP1_4R_DESC_BASE<"msubf.s", FGR32Opnd>;
-class MSUBF_D_DESC : COP1_4R_DESC_BASE<"msubf.d", FGR64Opnd>;
+class MADDF_S_DESC : COP1_4R_DESC_BASE<"maddf.s", FGR32Opnd, II_MADDF_S>;
+class MADDF_D_DESC : COP1_4R_DESC_BASE<"maddf.d", FGR64Opnd, II_MADDF_D>;
+class MSUBF_S_DESC : COP1_4R_DESC_BASE<"msubf.s", FGR32Opnd, II_MSUBF_S>;
+class MSUBF_D_DESC : COP1_4R_DESC_BASE<"msubf.d", FGR64Opnd, II_MSUBF_D>;
class MAX_MIN_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
dag OutOperandList = (outs FGROpnd:$fd);
@@ -536,10 +607,14 @@ class SELEQNEZ_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
list<dag> Pattern = [];
}
-class SELEQZ_S_DESC : SELEQNEZ_DESC_BASE<"seleqz.s", FGR32Opnd>;
-class SELEQZ_D_DESC : SELEQNEZ_DESC_BASE<"seleqz.d", FGR64Opnd>;
-class SELNEZ_S_DESC : SELEQNEZ_DESC_BASE<"selnez.s", FGR32Opnd>;
-class SELNEZ_D_DESC : SELEQNEZ_DESC_BASE<"selnez.d", FGR64Opnd>;
+class SELEQZ_S_DESC : SELEQNEZ_DESC_BASE<"seleqz.s", FGR32Opnd>,
+ MipsR6Arch<"seleqz.s">;
+class SELEQZ_D_DESC : SELEQNEZ_DESC_BASE<"seleqz.d", FGR64Opnd>,
+ MipsR6Arch<"seleqz.d">;
+class SELNEZ_S_DESC : SELEQNEZ_DESC_BASE<"selnez.s", FGR32Opnd>,
+ MipsR6Arch<"selnez.s">;
+class SELNEZ_D_DESC : SELEQNEZ_DESC_BASE<"selnez.d", FGR64Opnd>,
+ MipsR6Arch<"selnez.d">;
class CLASS_RINT_DESC_BASE<string instr_asm, RegisterOperand FGROpnd> {
dag OutOperandList = (outs FGROpnd:$fd);
@@ -590,61 +665,73 @@ class SDC2_R6_DESC : COP2ST_DESC_BASE<"sdc2", COP2Opnd>;
class SWC2_R6_DESC : COP2ST_DESC_BASE<"swc2", COP2Opnd>;
class LSA_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
- Operand ImmOpnd> : MipsR6Arch<instr_asm> {
+ Operand ImmOpnd, InstrItinClass itin>
+ : MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rd);
dag InOperandList = (ins GPROpnd:$rs, GPROpnd:$rt, ImmOpnd:$imm2);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt, $imm2");
list<dag> Pattern = [];
+ InstrItinClass Itinerary = itin;
}
-class LSA_R6_DESC : LSA_R6_DESC_BASE<"lsa", GPR32Opnd, uimm2_plus1>;
+class LSA_R6_DESC : LSA_R6_DESC_BASE<"lsa", GPR32Opnd, uimm2_plus1, II_LSA>;
-class LL_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+class LL_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ Operand MemOpnd, InstrItinClass itin>
+ : MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rt);
- dag InOperandList = (ins mem_simm9:$addr);
+ dag InOperandList = (ins MemOpnd:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
list<dag> Pattern = [];
bit mayLoad = 1;
+ InstrItinClass Itinerary = itin;
}
-class LL_R6_DESC : LL_R6_DESC_BASE<"ll", GPR32Opnd>;
+class LL_R6_DESC : LL_R6_DESC_BASE<"ll", GPR32Opnd, mem_simm9, II_LL>;
-class SC_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+class SC_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin> {
dag OutOperandList = (outs GPROpnd:$dst);
dag InOperandList = (ins GPROpnd:$rt, mem_simm9:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
list<dag> Pattern = [];
bit mayStore = 1;
string Constraints = "$rt = $dst";
+ InstrItinClass Itinerary = itin;
}
-class SC_R6_DESC : SC_R6_DESC_BASE<"sc", GPR32Opnd>;
+class SC_R6_DESC : SC_R6_DESC_BASE<"sc", GPR32Opnd, II_SC>;
-class CLO_CLZ_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
+class CLO_CLZ_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin>
: MipsR6Arch<instr_asm> {
dag OutOperandList = (outs GPROpnd:$rd);
dag InOperandList = (ins GPROpnd:$rs);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs");
+ InstrItinClass Itinerary = itin;
}
-class CLO_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> :
- CLO_CLZ_R6_DESC_BASE<instr_asm, GPROpnd> {
+class CLO_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin> :
+ CLO_CLZ_R6_DESC_BASE<instr_asm, GPROpnd, itin> {
list<dag> Pattern = [(set GPROpnd:$rd, (ctlz (not GPROpnd:$rs)))];
}
-class CLZ_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> :
- CLO_CLZ_R6_DESC_BASE<instr_asm, GPROpnd> {
+class CLZ_R6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin> :
+ CLO_CLZ_R6_DESC_BASE<instr_asm, GPROpnd, itin> {
list<dag> Pattern = [(set GPROpnd:$rd, (ctlz GPROpnd:$rs))];
}
-class CLO_R6_DESC : CLO_R6_DESC_BASE<"clo", GPR32Opnd>;
-class CLZ_R6_DESC : CLZ_R6_DESC_BASE<"clz", GPR32Opnd>;
+class CLO_R6_DESC : CLO_R6_DESC_BASE<"clo", GPR32Opnd, II_CLO>;
+class CLZ_R6_DESC : CLZ_R6_DESC_BASE<"clz", GPR32Opnd, II_CLZ>;
class SDBBP_R6_DESC {
dag OutOperandList = (outs);
dag InOperandList = (ins uimm20:$code_);
string AsmString = "sdbbp\t$code_";
list<dag> Pattern = [];
+ bit isCTI = 1;
}
//===----------------------------------------------------------------------===//
@@ -660,16 +747,18 @@ def AUI : R6MMR6Rel, AUI_ENC, AUI_DESC, ISA_MIPS32R6;
def AUIPC : R6MMR6Rel, AUIPC_ENC, AUIPC_DESC, ISA_MIPS32R6;
def BAL : BAL_ENC, BAL_DESC, ISA_MIPS32R6;
def BALC : R6MMR6Rel, BALC_ENC, BALC_DESC, ISA_MIPS32R6;
-def BC1EQZ : BC1EQZ_ENC, BC1EQZ_DESC, ISA_MIPS32R6, HARDFLOAT;
-def BC1NEZ : BC1NEZ_ENC, BC1NEZ_DESC, ISA_MIPS32R6, HARDFLOAT;
-def BC2EQZ : BC2EQZ_ENC, BC2EQZ_DESC, ISA_MIPS32R6;
-def BC2NEZ : BC2NEZ_ENC, BC2NEZ_DESC, ISA_MIPS32R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def BC1EQZ : BC1EQZ_ENC, BC1EQZ_DESC, ISA_MIPS32R6, HARDFLOAT;
+ def BC1NEZ : BC1NEZ_ENC, BC1NEZ_DESC, ISA_MIPS32R6, HARDFLOAT;
+ def BC2EQZ : BC2EQZ_ENC, BC2EQZ_DESC, ISA_MIPS32R6;
+ def BC2NEZ : BC2NEZ_ENC, BC2NEZ_DESC, ISA_MIPS32R6;
+}
def BC : R6MMR6Rel, BC_ENC, BC_DESC, ISA_MIPS32R6;
-def BEQC : BEQC_ENC, BEQC_DESC, ISA_MIPS32R6;
+def BEQC : R6MMR6Rel, BEQC_ENC, BEQC_DESC, ISA_MIPS32R6;
def BEQZALC : R6MMR6Rel, BEQZALC_ENC, BEQZALC_DESC, ISA_MIPS32R6;
-def BEQZC : BEQZC_ENC, BEQZC_DESC, ISA_MIPS32R6;
-def BGEC : BGEC_ENC, BGEC_DESC, ISA_MIPS32R6;
-def BGEUC : BGEUC_ENC, BGEUC_DESC, ISA_MIPS32R6;
+def BEQZC : R6MMR6Rel, BEQZC_ENC, BEQZC_DESC, ISA_MIPS32R6;
+def BGEC : R6MMR6Rel, BGEC_ENC, BGEC_DESC, ISA_MIPS32R6;
+def BGEUC : R6MMR6Rel, BGEUC_ENC, BGEUC_DESC, ISA_MIPS32R6;
def BGEZALC : R6MMR6Rel, BGEZALC_ENC, BGEZALC_DESC, ISA_MIPS32R6;
def BGEZC : BGEZC_ENC, BGEZC_DESC, ISA_MIPS32R6;
def BGTZALC : R6MMR6Rel, BGTZALC_ENC, BGTZALC_DESC, ISA_MIPS32R6;
@@ -677,15 +766,17 @@ def BGTZC : BGTZC_ENC, BGTZC_DESC, ISA_MIPS32R6;
def BITSWAP : R6MMR6Rel, BITSWAP_ENC, BITSWAP_DESC, ISA_MIPS32R6;
def BLEZALC : R6MMR6Rel, BLEZALC_ENC, BLEZALC_DESC, ISA_MIPS32R6;
def BLEZC : BLEZC_ENC, BLEZC_DESC, ISA_MIPS32R6;
-def BLTC : BLTC_ENC, BLTC_DESC, ISA_MIPS32R6;
-def BLTUC : BLTUC_ENC, BLTUC_DESC, ISA_MIPS32R6;
+def BLTC : R6MMR6Rel, BLTC_ENC, BLTC_DESC, ISA_MIPS32R6;
+def BLTUC : R6MMR6Rel, BLTUC_ENC, BLTUC_DESC, ISA_MIPS32R6;
def BLTZALC : R6MMR6Rel, BLTZALC_ENC, BLTZALC_DESC, ISA_MIPS32R6;
def BLTZC : BLTZC_ENC, BLTZC_DESC, ISA_MIPS32R6;
-def BNEC : BNEC_ENC, BNEC_DESC, ISA_MIPS32R6;
+def BNEC : R6MMR6Rel, BNEC_ENC, BNEC_DESC, ISA_MIPS32R6;
def BNEZALC : R6MMR6Rel, BNEZALC_ENC, BNEZALC_DESC, ISA_MIPS32R6;
-def BNEZC : BNEZC_ENC, BNEZC_DESC, ISA_MIPS32R6;
-def BNVC : BNVC_ENC, BNVC_DESC, ISA_MIPS32R6;
-def BOVC : BOVC_ENC, BOVC_DESC, ISA_MIPS32R6;
+def BNEZC : R6MMR6Rel, BNEZC_ENC, BNEZC_DESC, ISA_MIPS32R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def BNVC : R6MMR6Rel, BNVC_ENC, BNVC_DESC, ISA_MIPS32R6;
+ def BOVC : R6MMR6Rel, BOVC_ENC, BOVC_DESC, ISA_MIPS32R6;
+}
def CACHE_R6 : R6MMR6Rel, CACHE_ENC, CACHE_DESC, ISA_MIPS32R6;
let AdditionalPredicates = [NotInMicroMips] in {
def CLASS_D : CLASS_D_ENC, CLASS_D_DESC, ISA_MIPS32R6, HARDFLOAT;
@@ -695,15 +786,21 @@ def CLO_R6 : R6MMR6Rel, CLO_R6_ENC, CLO_R6_DESC, ISA_MIPS32R6;
def CLZ_R6 : R6MMR6Rel, CLZ_R6_ENC, CLZ_R6_DESC, ISA_MIPS32R6;
defm S : CMP_CC_M<FIELD_CMP_FORMAT_S, "s", FGR32Opnd>;
defm D : CMP_CC_M<FIELD_CMP_FORMAT_D, "d", FGR64Opnd>;
-def DIV : R6MMR6Rel, DIV_ENC, DIV_DESC, ISA_MIPS32R6;
-def DIVU : R6MMR6Rel, DIVU_ENC, DIVU_DESC, ISA_MIPS32R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DIV : R6MMR6Rel, DIV_ENC, DIV_DESC, ISA_MIPS32R6;
+ def DIVU : R6MMR6Rel, DIVU_ENC, DIVU_DESC, ISA_MIPS32R6;
+}
def JIALC : R6MMR6Rel, JIALC_ENC, JIALC_DESC, ISA_MIPS32R6;
def JIC : R6MMR6Rel, JIC_ENC, JIC_DESC, ISA_MIPS32R6;
def JR_HB_R6 : JR_HB_R6_ENC, JR_HB_R6_DESC, ISA_MIPS32R6;
-def LDC2_R6 : LDC2_R6_ENC, LDC2_R6_DESC, ISA_MIPS32R6;
-def LL_R6 : LL_R6_ENC, LL_R6_DESC, ISA_MIPS32R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def LDC2_R6 : LDC2_R6_ENC, LDC2_R6_DESC, ISA_MIPS32R6;
+ def LL_R6 : LL_R6_ENC, LL_R6_DESC, PTR_32, ISA_MIPS32R6;
+}
def LSA_R6 : R6MMR6Rel, LSA_R6_ENC, LSA_R6_DESC, ISA_MIPS32R6;
-def LWC2_R6 : LWC2_R6_ENC, LWC2_R6_DESC, ISA_MIPS32R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def LWC2_R6 : LWC2_R6_ENC, LWC2_R6_DESC, ISA_MIPS32R6;
+}
def LWPC : R6MMR6Rel, LWPC_ENC, LWPC_DESC, ISA_MIPS32R6;
def LWUPC : LWUPC_ENC, LWUPC_DESC, ISA_MIPS32R6;
let AdditionalPredicates = [NotInMicroMips] in {
@@ -717,41 +814,40 @@ let AdditionalPredicates = [NotInMicroMips] in {
def MINA_S : MINA_S_ENC, MINA_S_DESC, ISA_MIPS32R6, HARDFLOAT;
def MIN_D : MIN_D_ENC, MIN_D_DESC, ISA_MIPS32R6, HARDFLOAT;
def MIN_S : MIN_S_ENC, MIN_S_DESC, ISA_MIPS32R6, HARDFLOAT;
-}
-def MOD : R6MMR6Rel, MOD_ENC, MOD_DESC, ISA_MIPS32R6;
-def MODU : R6MMR6Rel, MODU_ENC, MODU_DESC, ISA_MIPS32R6;
-let AdditionalPredicates = [NotInMicroMips] in {
+
+ def MOD : R6MMR6Rel, MOD_ENC, MOD_DESC, ISA_MIPS32R6;
+ def MODU : R6MMR6Rel, MODU_ENC, MODU_DESC, ISA_MIPS32R6;
+
def MSUBF_S : MSUBF_S_ENC, MSUBF_S_DESC, ISA_MIPS32R6, HARDFLOAT;
def MSUBF_D : MSUBF_D_ENC, MSUBF_D_DESC, ISA_MIPS32R6, HARDFLOAT;
+
+ def MUH : R6MMR6Rel, MUH_ENC, MUH_DESC, ISA_MIPS32R6;
+ def MUHU : R6MMR6Rel, MUHU_ENC, MUHU_DESC, ISA_MIPS32R6;
+ def MUL_R6 : R6MMR6Rel, MUL_R6_ENC, MUL_R6_DESC, ISA_MIPS32R6;
+ def MULU : R6MMR6Rel, MULU_ENC, MULU_DESC, ISA_MIPS32R6;
}
-def MUH : R6MMR6Rel, MUH_ENC, MUH_DESC, ISA_MIPS32R6;
-def MUHU : R6MMR6Rel, MUHU_ENC, MUHU_DESC, ISA_MIPS32R6;
-def MUL_R6 : R6MMR6Rel, MUL_R6_ENC, MUL_R6_DESC, ISA_MIPS32R6;
-def MULU : R6MMR6Rel, MULU_ENC, MULU_DESC, ISA_MIPS32R6;
def NAL; // BAL with rd=0
def PREF_R6 : R6MMR6Rel, PREF_ENC, PREF_DESC, ISA_MIPS32R6;
let AdditionalPredicates = [NotInMicroMips] in {
def RINT_D : RINT_D_ENC, RINT_D_DESC, ISA_MIPS32R6, HARDFLOAT;
def RINT_S : RINT_S_ENC, RINT_S_DESC, ISA_MIPS32R6, HARDFLOAT;
+ def SC_R6 : SC_R6_ENC, SC_R6_DESC, PTR_32, ISA_MIPS32R6;
+ def SDBBP_R6 : SDBBP_R6_ENC, SDBBP_R6_DESC, ISA_MIPS32R6;
+ def SELEQZ : R6MMR6Rel, SELEQZ_ENC, SELEQZ_DESC, ISA_MIPS32R6, GPR_32;
+ def SELNEZ : R6MMR6Rel, SELNEZ_ENC, SELNEZ_DESC, ISA_MIPS32R6, GPR_32;
+ def SELEQZ_D : R6MMR6Rel, SELEQZ_D_ENC, SELEQZ_D_DESC, ISA_MIPS32R6,
+ HARDFLOAT;
+ def SELEQZ_S : R6MMR6Rel, SELEQZ_S_ENC, SELEQZ_S_DESC, ISA_MIPS32R6,
+ HARDFLOAT;
+ def SELNEZ_D : R6MMR6Rel, SELNEZ_D_ENC, SELNEZ_D_DESC, ISA_MIPS32R6,
+ HARDFLOAT;
+ def SELNEZ_S : R6MMR6Rel, SELNEZ_S_ENC, SELNEZ_S_DESC, ISA_MIPS32R6,
+ HARDFLOAT;
+ def SEL_D : R6MMR6Rel, SEL_D_ENC, SEL_D_DESC, ISA_MIPS32R6, HARDFLOAT;
+ def SEL_S : R6MMR6Rel, SEL_S_ENC, SEL_S_DESC, ISA_MIPS32R6, HARDFLOAT;
+ def SDC2_R6 : SDC2_R6_ENC, SDC2_R6_DESC, ISA_MIPS32R6;
+ def SWC2_R6 : SWC2_R6_ENC, SWC2_R6_DESC, ISA_MIPS32R6;
}
-def SC_R6 : SC_R6_ENC, SC_R6_DESC, ISA_MIPS32R6;
-let AdditionalPredicates = [NotInMicroMips] in {
-def SDBBP_R6 : SDBBP_R6_ENC, SDBBP_R6_DESC, ISA_MIPS32R6;
-}
-def SDC2_R6 : SDC2_R6_ENC, SDC2_R6_DESC, ISA_MIPS32R6;
-def SELEQZ : R6MMR6Rel, SELEQZ_ENC, SELEQZ_DESC, ISA_MIPS32R6, GPR_32;
-let AdditionalPredicates = [NotInMicroMips] in {
- def SELEQZ_D : SELEQZ_D_ENC, SELEQZ_D_DESC, ISA_MIPS32R6, HARDFLOAT;
- def SELEQZ_S : SELEQZ_S_ENC, SELEQZ_S_DESC, ISA_MIPS32R6, HARDFLOAT;
-}
-def SELNEZ : R6MMR6Rel, SELNEZ_ENC, SELNEZ_DESC, ISA_MIPS32R6, GPR_32;
-let AdditionalPredicates = [NotInMicroMips] in {
- def SELNEZ_D : SELNEZ_D_ENC, SELNEZ_D_DESC, ISA_MIPS32R6, HARDFLOAT;
- def SELNEZ_S : SELNEZ_S_ENC, SELNEZ_S_DESC, ISA_MIPS32R6, HARDFLOAT;
- def SEL_D : SEL_D_ENC, SEL_D_DESC, ISA_MIPS32R6, HARDFLOAT;
- def SEL_S : SEL_S_ENC, SEL_S_DESC, ISA_MIPS32R6, HARDFLOAT;
-}
-def SWC2_R6 : SWC2_R6_ENC, SWC2_R6_DESC, ISA_MIPS32R6;
//===----------------------------------------------------------------------===//
//
@@ -761,9 +857,14 @@ def SWC2_R6 : SWC2_R6_ENC, SWC2_R6_DESC, ISA_MIPS32R6;
let AdditionalPredicates = [NotInMicroMips] in {
def : MipsInstAlias<"sdbbp", (SDBBP_R6 0)>, ISA_MIPS32R6;
+def : MipsInstAlias<"jr $rs", (JALR ZERO, GPR32Opnd:$rs), 1>, ISA_MIPS32R6, GPR_32;
}
-def : MipsInstAlias<"jr $rs", (JALR ZERO, GPR32Opnd:$rs), 1>, ISA_MIPS32R6;
+def : MipsInstAlias<"jrc $rs", (JIC GPR32Opnd:$rs, 0), 1>, ISA_MIPS32R6, GPR_32;
+
+let AdditionalPredicates = [NotInMicroMips] in {
+def : MipsInstAlias<"jalrc $rs", (JIALC GPR32Opnd:$rs, 0), 1>, ISA_MIPS32R6, GPR_32;
+}
//===----------------------------------------------------------------------===//
//
// Patterns and Pseudo Instructions
@@ -792,8 +893,10 @@ def : MipsPat<(setne VT:$lhs, VT:$rhs),
(NOROp (!cast<Instruction>("CMP_EQ_"#NAME) VT:$lhs, VT:$rhs), ZEROReg)>;
}
-defm S : Cmp_Pats<f32, NOR, ZERO>, ISA_MIPS32R6;
-defm D : Cmp_Pats<f64, NOR, ZERO>, ISA_MIPS32R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ defm S : Cmp_Pats<f32, NOR, ZERO>, ISA_MIPS32R6;
+ defm D : Cmp_Pats<f64, NOR, ZERO>, ISA_MIPS32R6;
+}
// i32 selects
multiclass SelectInt_Pats<ValueType RC, Instruction OROp, Instruction XORiOp,
@@ -832,6 +935,7 @@ def : MipsPat<(select (Opg (setne RC:$cond, immz)), immz, RC:$f),
(SELEQZOp RC:$f, RC:$cond)>;
}
+let AdditionalPredicates = [NotInMicroMips] in {
defm : SelectInt_Pats<i32, OR, XORi, SLTi, SLTiu, SELEQZ, SELNEZ,
immZExt16, i32>, ISA_MIPS32R6;
@@ -845,3 +949,4 @@ def : MipsPat<(select i32:$cond, i32:$t, immz),
def : MipsPat<(select i32:$cond, immz, i32:$f),
(SELEQZ i32:$f, i32:$cond)>,
ISA_MIPS32R6;
+}
diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td
index cbdcdd788bec..88cfec5bc13c 100644
--- a/lib/Target/Mips/Mips64InstrInfo.td
+++ b/lib/Target/Mips/Mips64InstrInfo.td
@@ -15,14 +15,6 @@
// Mips Operand, Complex Patterns and Transformations Definitions.
//===----------------------------------------------------------------------===//
-// Unsigned Operand
-def uimm16_64 : Operand<i64> {
- let PrintMethod = "printUnsignedImm";
-}
-
-// Signed Operand
-def simm10_64 : Operand<i64>;
-
// Transformation Function - get Imm - 32.
def Subtract32 : SDNodeXForm<imm, [{
return getImm(N, (unsigned)N->getZExtValue() - 32);
@@ -37,7 +29,7 @@ def immSExt10_64 : PatLeaf<(i64 imm),
[{ return isInt<10>(N->getSExtValue()); }]>;
def immZExt16_64 : PatLeaf<(i64 imm),
- [{ return isInt<16>(N->getZExtValue()); }]>;
+ [{ return isUInt<16>(N->getZExtValue()); }]>;
def immZExt5_64 : ImmLeaf<i64, [{ return Imm == (Imm & 0x1f); }]>;
@@ -71,6 +63,10 @@ def PowerOf2HI : PatLeaf<(imm), [{
return false;
}]>;
+def assertzext_lt_i32 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+ return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLT(MVT::i32);
+}]>;
+
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
@@ -86,7 +82,7 @@ let usesCustomInserter = 1 in {
}
/// Pseudo instructions for loading and storing accumulator registers.
-let isPseudo = 1, isCodeGenOnly = 1 in {
+let isPseudo = 1, isCodeGenOnly = 1, hasNoSchedulingInfo = 1 in {
def LOAD_ACC128 : Load<"", ACC128>;
def STORE_ACC128 : Store<"", ACC128>;
}
@@ -96,11 +92,13 @@ let isPseudo = 1, isCodeGenOnly = 1 in {
//===----------------------------------------------------------------------===//
let DecoderNamespace = "Mips64" in {
/// Arithmetic Instructions (ALU Immediate)
-def DADDi : ArithLogicI<"daddi", simm16_64, GPR64Opnd>, ADDI_FM<0x18>,
- ISA_MIPS3_NOT_32R6_64R6;
-def DADDiu : ArithLogicI<"daddiu", simm16_64, GPR64Opnd, II_DADDIU,
- immSExt16, add>,
- ADDI_FM<0x19>, IsAsCheapAsAMove, ISA_MIPS3;
+def DADDi : ArithLogicI<"daddi", simm16_64, GPR64Opnd, II_DADDI>,
+ ADDI_FM<0x18>, ISA_MIPS3_NOT_32R6_64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DADDiu : StdMMR6Rel, ArithLogicI<"daddiu", simm16_64, GPR64Opnd,
+ II_DADDIU, immSExt16, add>,
+ ADDI_FM<0x19>, IsAsCheapAsAMove, ISA_MIPS3;
+}
let isCodeGenOnly = 1 in {
def SLTi64 : SetCC_I<"slti", setlt, simm16_64, immSExt16, GPR64Opnd>,
@@ -113,18 +111,20 @@ def ORi64 : ArithLogicI<"ori", uimm16_64, GPR64Opnd, II_OR, immZExt16, or>,
ADDI_FM<0xd>;
def XORi64 : ArithLogicI<"xori", uimm16_64, GPR64Opnd, II_XOR, immZExt16, xor>,
ADDI_FM<0xe>;
-def LUi64 : LoadUpper<"lui", GPR64Opnd, uimm16_64>, LUI_FM;
+def LUi64 : LoadUpper<"lui", GPR64Opnd, uimm16_64_relaxed>, LUI_FM;
}
/// Arithmetic Instructions (3-Operand, R-Type)
-def DADD : ArithLogicR<"dadd", GPR64Opnd, 1, II_DADD>, ADD_FM<0, 0x2c>,
- ISA_MIPS3;
-def DADDu : ArithLogicR<"daddu", GPR64Opnd, 1, II_DADDU, add>, ADD_FM<0, 0x2d>,
- ISA_MIPS3;
-def DSUBu : ArithLogicR<"dsubu", GPR64Opnd, 0, II_DSUBU, sub>, ADD_FM<0, 0x2f>,
- ISA_MIPS3;
-def DSUB : ArithLogicR<"dsub", GPR64Opnd, 0, II_DSUB>, ADD_FM<0, 0x2e>,
- ISA_MIPS3;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DADD : StdMMR6Rel, ArithLogicR<"dadd", GPR64Opnd, 1, II_DADD>,
+ ADD_FM<0, 0x2c>, ISA_MIPS3;
+ def DADDu : StdMMR6Rel, ArithLogicR<"daddu", GPR64Opnd, 1, II_DADDU, add>,
+ ADD_FM<0, 0x2d>, ISA_MIPS3;
+ def DSUBu : StdMMR6Rel, ArithLogicR<"dsubu", GPR64Opnd, 0, II_DSUBU, sub>, ADD_FM<0, 0x2f>,
+ ISA_MIPS3;
+ def DSUB : StdMMR6Rel, ArithLogicR<"dsub", GPR64Opnd, 0, II_DSUB>, ADD_FM<0, 0x2e>,
+ ISA_MIPS3;
+}
let isCodeGenOnly = 1 in {
def SLT64 : SetCC_R<"slt", setlt, GPR64Opnd>, ADD_FM<0, 0x2a>;
@@ -136,33 +136,43 @@ def NOR64 : LogicNOR<"nor", GPR64Opnd>, ADD_FM<0, 0x27>;
}
/// Shift Instructions
-def DSLL : shift_rotate_imm<"dsll", uimm6, GPR64Opnd, II_DSLL, shl, immZExt6>,
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DSLL : StdMMR6Rel, shift_rotate_imm<"dsll", uimm6, GPR64Opnd, II_DSLL,
+ shl, immZExt6>,
SRA_FM<0x38, 0>, ISA_MIPS3;
-def DSRL : shift_rotate_imm<"dsrl", uimm6, GPR64Opnd, II_DSRL, srl, immZExt6>,
+ def DSRL : StdMMR6Rel, shift_rotate_imm<"dsrl", uimm6, GPR64Opnd, II_DSRL,
+ srl, immZExt6>,
SRA_FM<0x3a, 0>, ISA_MIPS3;
-def DSRA : shift_rotate_imm<"dsra", uimm6, GPR64Opnd, II_DSRA, sra, immZExt6>,
+ def DSRA : StdMMR6Rel, shift_rotate_imm<"dsra", uimm6, GPR64Opnd, II_DSRA,
+ sra, immZExt6>,
SRA_FM<0x3b, 0>, ISA_MIPS3;
-def DSLLV : shift_rotate_reg<"dsllv", GPR64Opnd, II_DSLLV, shl>,
- SRLV_FM<0x14, 0>, ISA_MIPS3;
-def DSRLV : shift_rotate_reg<"dsrlv", GPR64Opnd, II_DSRLV, srl>,
- SRLV_FM<0x16, 0>, ISA_MIPS3;
-def DSRAV : shift_rotate_reg<"dsrav", GPR64Opnd, II_DSRAV, sra>,
- SRLV_FM<0x17, 0>, ISA_MIPS3;
-def DSLL32 : shift_rotate_imm<"dsll32", uimm5, GPR64Opnd, II_DSLL32>,
- SRA_FM<0x3c, 0>, ISA_MIPS3;
-def DSRL32 : shift_rotate_imm<"dsrl32", uimm5, GPR64Opnd, II_DSRL32>,
- SRA_FM<0x3e, 0>, ISA_MIPS3;
-def DSRA32 : shift_rotate_imm<"dsra32", uimm5, GPR64Opnd, II_DSRA32>,
- SRA_FM<0x3f, 0>, ISA_MIPS3;
+ def DSLLV : StdMMR6Rel, shift_rotate_reg<"dsllv", GPR64Opnd, II_DSLLV, shl>,
+ SRLV_FM<0x14, 0>, ISA_MIPS3;
+ def DSRAV : StdMMR6Rel, shift_rotate_reg<"dsrav", GPR64Opnd, II_DSRAV, sra>,
+ SRLV_FM<0x17, 0>, ISA_MIPS3;
+ def DSRLV : StdMMR6Rel, shift_rotate_reg<"dsrlv", GPR64Opnd, II_DSRLV, srl>,
+ SRLV_FM<0x16, 0>, ISA_MIPS3;
+ def DSLL32 : StdMMR6Rel, shift_rotate_imm<"dsll32", uimm5, GPR64Opnd,
+ II_DSLL32>,
+ SRA_FM<0x3c, 0>, ISA_MIPS3;
+ def DSRL32 : StdMMR6Rel, shift_rotate_imm<"dsrl32", uimm5, GPR64Opnd,
+ II_DSRL32>,
+ SRA_FM<0x3e, 0>, ISA_MIPS3;
+ def DSRA32 : StdMMR6Rel, shift_rotate_imm<"dsra32", uimm5, GPR64Opnd,
+ II_DSRA32>,
+ SRA_FM<0x3f, 0>, ISA_MIPS3;
// Rotate Instructions
-def DROTR : shift_rotate_imm<"drotr", uimm6, GPR64Opnd, II_DROTR, rotr,
- immZExt6>,
- SRA_FM<0x3a, 1>, ISA_MIPS64R2;
-def DROTRV : shift_rotate_reg<"drotrv", GPR64Opnd, II_DROTRV, rotr>,
- SRLV_FM<0x16, 1>, ISA_MIPS64R2;
-def DROTR32 : shift_rotate_imm<"drotr32", uimm5, GPR64Opnd, II_DROTR32>,
- SRA_FM<0x3e, 1>, ISA_MIPS64R2;
+ def DROTR : StdMMR6Rel, shift_rotate_imm<"drotr", uimm6, GPR64Opnd, II_DROTR,
+ rotr, immZExt6>,
+ SRA_FM<0x3a, 1>, ISA_MIPS64R2;
+ def DROTRV : StdMMR6Rel, shift_rotate_reg<"drotrv", GPR64Opnd, II_DROTRV,
+ rotr>,
+ SRLV_FM<0x16, 1>, ISA_MIPS64R2;
+ def DROTR32 : StdMMR6Rel, shift_rotate_imm<"drotr32", uimm5, GPR64Opnd,
+ II_DROTR32>,
+ SRA_FM<0x3e, 1>, ISA_MIPS64R2;
+}
/// Load and Store Instructions
/// aligned
@@ -177,9 +187,16 @@ def SH64 : Store<"sh", GPR64Opnd, truncstorei16, II_SH>, LW_FM<0x29>;
def SW64 : Store<"sw", GPR64Opnd, truncstorei32, II_SW>, LW_FM<0x2b>;
}
-def LWu : Load<"lwu", GPR64Opnd, zextloadi32, II_LWU>, LW_FM<0x27>, ISA_MIPS3;
-def LD : Load<"ld", GPR64Opnd, load, II_LD>, LW_FM<0x37>, ISA_MIPS3;
-def SD : Store<"sd", GPR64Opnd, store, II_SD>, LW_FM<0x3f>, ISA_MIPS3;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def LWu : StdMMR6Rel, MMRel, Load<"lwu", GPR64Opnd, zextloadi32, II_LWU>,
+ LW_FM<0x27>, ISA_MIPS3;
+ def LD : StdMMR6Rel, LoadMemory<"ld", GPR64Opnd, mem_simm16, load, II_LD>,
+ LW_FM<0x37>, ISA_MIPS3;
+ def SD : StdMMR6Rel, StoreMemory<"sd", GPR64Opnd, mem_simm16, store, II_SD>,
+ LW_FM<0x3f>, ISA_MIPS3;
+}
+
+
/// load/store left/right
let isCodeGenOnly = 1 in {
@@ -199,9 +216,20 @@ def SDR : StoreLeftRight<"sdr", MipsSDR, GPR64Opnd, II_SDR>, LW_FM<0x2d>,
ISA_MIPS3_NOT_32R6_64R6;
/// Load-linked, Store-conditional
-def LLD : LLBase<"lld", GPR64Opnd>, LW_FM<0x34>, ISA_MIPS3_NOT_32R6_64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def LLD : StdMMR6Rel, LLBase<"lld", GPR64Opnd, mem_simm16>, LW_FM<0x34>,
+ ISA_MIPS3_NOT_32R6_64R6;
+}
def SCD : SCBase<"scd", GPR64Opnd>, LW_FM<0x3c>, ISA_MIPS3_NOT_32R6_64R6;
+let AdditionalPredicates = [NotInMicroMips],
+ DecoderNamespace = "Mips32_64_PTR64" in {
+def LL64 : LLBase<"ll", GPR32Opnd>, LW_FM<0x30>, PTR_64,
+ ISA_MIPS2_NOT_32R6_64R6;
+def SC64 : SCBase<"sc", GPR32Opnd>, LW_FM<0x38>, PTR_64,
+ ISA_MIPS2_NOT_32R6_64R6;
+}
+
/// Jump and Branch Instructions
let isCodeGenOnly = 1 in {
def JR64 : IndirectBranch<"jr", GPR64Opnd>, MTLO_FM<8>;
@@ -220,18 +248,22 @@ def PseudoReturn64 : PseudoReturnBase<GPR64Opnd>;
def PseudoIndirectBranch64 : PseudoIndirectBranchBase<GPR64Opnd>;
/// Multiply and Divide Instructions.
-def DMULT : Mult<"dmult", II_DMULT, GPR64Opnd, [HI0_64, LO0_64]>,
- MULT_FM<0, 0x1c>, ISA_MIPS3_NOT_32R6_64R6;
-def DMULTu : Mult<"dmultu", II_DMULTU, GPR64Opnd, [HI0_64, LO0_64]>,
- MULT_FM<0, 0x1d>, ISA_MIPS3_NOT_32R6_64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DMULT : Mult<"dmult", II_DMULT, GPR64Opnd, [HI0_64, LO0_64]>,
+ MULT_FM<0, 0x1c>, ISA_MIPS3_NOT_32R6_64R6;
+ def DMULTu : Mult<"dmultu", II_DMULTU, GPR64Opnd, [HI0_64, LO0_64]>,
+ MULT_FM<0, 0x1d>, ISA_MIPS3_NOT_32R6_64R6;
+}
def PseudoDMULT : MultDivPseudo<DMULT, ACC128, GPR64Opnd, MipsMult,
II_DMULT>, ISA_MIPS3_NOT_32R6_64R6;
def PseudoDMULTu : MultDivPseudo<DMULTu, ACC128, GPR64Opnd, MipsMultu,
II_DMULTU>, ISA_MIPS3_NOT_32R6_64R6;
-def DSDIV : Div<"ddiv", II_DDIV, GPR64Opnd, [HI0_64, LO0_64]>,
- MULT_FM<0, 0x1e>, ISA_MIPS3_NOT_32R6_64R6;
-def DUDIV : Div<"ddivu", II_DDIVU, GPR64Opnd, [HI0_64, LO0_64]>,
- MULT_FM<0, 0x1f>, ISA_MIPS3_NOT_32R6_64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DSDIV : Div<"ddiv", II_DDIV, GPR64Opnd, [HI0_64, LO0_64]>,
+ MULT_FM<0, 0x1e>, ISA_MIPS3_NOT_32R6_64R6;
+ def DUDIV : Div<"ddivu", II_DDIVU, GPR64Opnd, [HI0_64, LO0_64]>,
+ MULT_FM<0, 0x1f>, ISA_MIPS3_NOT_32R6_64R6;
+}
def PseudoDSDIV : MultDivPseudo<DSDIV, ACC128, GPR64Opnd, MipsDivRem,
II_DDIV, 0, 1, 1>, ISA_MIPS3_NOT_32R6_64R6;
def PseudoDUDIV : MultDivPseudo<DUDIV, ACC128, GPR64Opnd, MipsDivRemU,
@@ -260,12 +292,16 @@ def SEH64 : SignExtInReg<"seh", i16, GPR64Opnd, II_SEH>, SEB_FM<0x18, 0x20>,
}
/// Count Leading
-def DCLZ : CountLeading0<"dclz", GPR64Opnd>, CLO_FM<0x24>, ISA_MIPS64_NOT_64R6;
-def DCLO : CountLeading1<"dclo", GPR64Opnd>, CLO_FM<0x25>, ISA_MIPS64_NOT_64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DCLZ : StdMMR6Rel, CountLeading0<"dclz", GPR64Opnd>, CLO_FM<0x24>,
+ ISA_MIPS64_NOT_64R6;
+ def DCLO : StdMMR6Rel, CountLeading1<"dclo", GPR64Opnd>, CLO_FM<0x25>,
+ ISA_MIPS64_NOT_64R6;
/// Double Word Swap Bytes/HalfWords
-def DSBH : SubwordSwap<"dsbh", GPR64Opnd>, SEB_FM<2, 0x24>, ISA_MIPS64R2;
-def DSHD : SubwordSwap<"dshd", GPR64Opnd>, SEB_FM<5, 0x24>, ISA_MIPS64R2;
+ def DSBH : SubwordSwap<"dsbh", GPR64Opnd>, SEB_FM<2, 0x24>, ISA_MIPS64R2;
+ def DSHD : SubwordSwap<"dshd", GPR64Opnd>, SEB_FM<5, 0x24>, ISA_MIPS64R2;
+}
def LEA_ADDiu64 : EffectiveAddress<"daddiu", GPR64Opnd>, LW_FM<0x19>;
@@ -273,21 +309,24 @@ let isCodeGenOnly = 1 in
def RDHWR64 : ReadHardware<GPR64Opnd, HWRegsOpnd>, RDHWR_FM;
let AdditionalPredicates = [NotInMicroMips] in {
- // TODO: Add 'pos + size' constraint check to dext* instructions
- // DEXT: 0 < pos + size <= 63
- // DEXTM, DEXTU: 32 < pos + size <= 64
- def DEXT : ExtBase<"dext", GPR64Opnd, uimm5, uimm5_plus1, MipsExt>,
- EXT_FM<3>;
- def DEXTM : ExtBase<"dextm", GPR64Opnd, uimm5, uimm5_plus33, MipsExt>,
- EXT_FM<1>;
+ // The 'pos + size' constraints are enforced by the code that lowers into
+ // MipsISD::Ext.
+ def DEXT : ExtBase<"dext", GPR64Opnd, uimm5_report_uimm6, uimm5_plus1,
+ immZExt5, immZExt5Plus1, MipsExt>, EXT_FM<3>,
+ ISA_MIPS64R2;
+ def DEXTM : ExtBase<"dextm", GPR64Opnd, uimm5, uimm5_plus33, immZExt5,
+ immZExt5Plus33, MipsExt>, EXT_FM<1>, ISA_MIPS64R2;
def DEXTU : ExtBase<"dextu", GPR64Opnd, uimm5_plus32, uimm5_plus1,
- MipsExt>, EXT_FM<2>;
+ immZExt5Plus32, immZExt5Plus1, MipsExt>, EXT_FM<2>,
+ ISA_MIPS64R2;
+ def DINS : InsBase<"dins", GPR64Opnd, uimm6, uimm5_inssize_plus1, MipsIns>,
+ EXT_FM<7>, ISA_MIPS64R2;
+ def DINSU : InsBase<"dinsu", GPR64Opnd, uimm5_plus32, uimm5_inssize_plus1>,
+ EXT_FM<6>, ISA_MIPS64R2;
+ def DINSM : InsBase<"dinsm", GPR64Opnd, uimm5, uimm5_inssize_plus1>,
+ EXT_FM<5>, ISA_MIPS64R2;
}
-def DINS : InsBase<"dins", GPR64Opnd, uimm6, MipsIns>, EXT_FM<7>;
-def DINSU : InsBase<"dinsu", GPR64Opnd, uimm5_plus32>, EXT_FM<6>;
-def DINSM : InsBase<"dinsm", GPR64Opnd, uimm5>, EXT_FM<5>;
-
let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
def DSLL64_32 : FR<0x00, 0x3c, (outs GPR64:$rd), (ins GPR32:$rt),
"dsll\t$rd, $rt, 32", [], II_DSLL>;
@@ -309,8 +348,8 @@ def LONG_BRANCH_DADDiu : PseudoSE<(outs GPR64Opnd:$dst),
// Cavium Octeon cnMIPS instructions
let DecoderNamespace = "CnMips",
- EncodingPredicates = []<Predicate>, // FIXME: The lack of HasStdEnc is probably a bug
- AdditionalPredicates = [HasCnMips] in {
+ // FIXME: The lack of HasStdEnc is probably a bug
+ EncodingPredicates = []<Predicate> in {
class Count1s<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
@@ -361,83 +400,94 @@ class MFC2OP<string asmstr, RegisterOperand RO> :
!strconcat(asmstr, "\t$rt, $imm16"), [], NoItinerary, FrmFR>;
// Unsigned Byte Add
-let Pattern = [(set GPR64Opnd:$rd,
- (and (add GPR64Opnd:$rs, GPR64Opnd:$rt), 255))] in
def BADDu : ArithLogicR<"baddu", GPR64Opnd, 1, II_BADDU>,
- ADD_FM<0x1c, 0x28>;
+ ADD_FM<0x1c, 0x28>, ASE_CNMIPS {
+ let Pattern = [(set GPR64Opnd:$rd,
+ (and (add GPR64Opnd:$rs, GPR64Opnd:$rt), 255))];
+}
// Branch on Bit Clear /+32
def BBIT0 : CBranchBitNum<"bbit0", brtarget, seteq, GPR64Opnd,
- uimm5_64_report_uimm6>, BBIT_FM<0x32>;
+ uimm5_64_report_uimm6>, BBIT_FM<0x32>, ASE_CNMIPS;
def BBIT032: CBranchBitNum<"bbit032", brtarget, seteq, GPR64Opnd, uimm5_64,
- 0x100000000>,
- BBIT_FM<0x36>;
+ 0x100000000>, BBIT_FM<0x36>, ASE_CNMIPS;
// Branch on Bit Set /+32
def BBIT1 : CBranchBitNum<"bbit1", brtarget, setne, GPR64Opnd,
- uimm5_64_report_uimm6>, BBIT_FM<0x3a>;
+ uimm5_64_report_uimm6>, BBIT_FM<0x3a>, ASE_CNMIPS;
def BBIT132: CBranchBitNum<"bbit132", brtarget, setne, GPR64Opnd, uimm5_64,
- 0x100000000>, BBIT_FM<0x3e>;
+ 0x100000000>, BBIT_FM<0x3e>, ASE_CNMIPS;
// Multiply Doubleword to GPR
-let Defs = [HI0, LO0, P0, P1, P2] in
def DMUL : ArithLogicR<"dmul", GPR64Opnd, 1, II_DMUL, mul>,
- ADD_FM<0x1c, 0x03>;
+ ADD_FM<0x1c, 0x03>, ASE_CNMIPS {
+ let Defs = [HI0, LO0, P0, P1, P2];
+}
// Extract a signed bit field /+32
-def EXTS : ExtsCins<"exts">, EXTS_FM<0x3a>;
-def EXTS32: ExtsCins<"exts32">, EXTS_FM<0x3b>;
+def EXTS : ExtsCins<"exts">, EXTS_FM<0x3a>, ASE_CNMIPS;
+def EXTS32: ExtsCins<"exts32">, EXTS_FM<0x3b>, ASE_CNMIPS;
// Clear and insert a bit field /+32
-def CINS : ExtsCins<"cins">, EXTS_FM<0x32>;
-def CINS32: ExtsCins<"cins32">, EXTS_FM<0x33>;
+def CINS : ExtsCins<"cins">, EXTS_FM<0x32>, ASE_CNMIPS;
+def CINS32: ExtsCins<"cins32">, EXTS_FM<0x33>, ASE_CNMIPS;
// Move to multiplier/product register
-def MTM0 : MoveToLOHI<"mtm0", GPR64Opnd, [MPL0, P0, P1, P2]>, MTMR_FM<0x08>;
-def MTM1 : MoveToLOHI<"mtm1", GPR64Opnd, [MPL1, P0, P1, P2]>, MTMR_FM<0x0c>;
-def MTM2 : MoveToLOHI<"mtm2", GPR64Opnd, [MPL2, P0, P1, P2]>, MTMR_FM<0x0d>;
-def MTP0 : MoveToLOHI<"mtp0", GPR64Opnd, [P0]>, MTMR_FM<0x09>;
-def MTP1 : MoveToLOHI<"mtp1", GPR64Opnd, [P1]>, MTMR_FM<0x0a>;
-def MTP2 : MoveToLOHI<"mtp2", GPR64Opnd, [P2]>, MTMR_FM<0x0b>;
+def MTM0 : MoveToLOHI<"mtm0", GPR64Opnd, [MPL0, P0, P1, P2]>, MTMR_FM<0x08>,
+ ASE_CNMIPS;
+def MTM1 : MoveToLOHI<"mtm1", GPR64Opnd, [MPL1, P0, P1, P2]>, MTMR_FM<0x0c>,
+ ASE_CNMIPS;
+def MTM2 : MoveToLOHI<"mtm2", GPR64Opnd, [MPL2, P0, P1, P2]>, MTMR_FM<0x0d>,
+ ASE_CNMIPS;
+def MTP0 : MoveToLOHI<"mtp0", GPR64Opnd, [P0]>, MTMR_FM<0x09>, ASE_CNMIPS;
+def MTP1 : MoveToLOHI<"mtp1", GPR64Opnd, [P1]>, MTMR_FM<0x0a>, ASE_CNMIPS;
+def MTP2 : MoveToLOHI<"mtp2", GPR64Opnd, [P2]>, MTMR_FM<0x0b>, ASE_CNMIPS;
// Count Ones in a Word/Doubleword
-def POP : Count1s<"pop", GPR32Opnd>, POP_FM<0x2c>;
-def DPOP : Count1s<"dpop", GPR64Opnd>, POP_FM<0x2d>;
+def POP : Count1s<"pop", GPR32Opnd>, POP_FM<0x2c>, ASE_CNMIPS;
+def DPOP : Count1s<"dpop", GPR64Opnd>, POP_FM<0x2d>, ASE_CNMIPS;
// Set on equal/not equal
-def SEQ : SetCC64_R<"seq", seteq>, SEQ_FM<0x2a>;
-def SEQi : SetCC64_I<"seqi", seteq>, SEQI_FM<0x2e>;
-def SNE : SetCC64_R<"sne", setne>, SEQ_FM<0x2b>;
-def SNEi : SetCC64_I<"snei", setne>, SEQI_FM<0x2f>;
+def SEQ : SetCC64_R<"seq", seteq>, SEQ_FM<0x2a>, ASE_CNMIPS;
+def SEQi : SetCC64_I<"seqi", seteq>, SEQI_FM<0x2e>, ASE_CNMIPS;
+def SNE : SetCC64_R<"sne", setne>, SEQ_FM<0x2b>, ASE_CNMIPS;
+def SNEi : SetCC64_I<"snei", setne>, SEQI_FM<0x2f>, ASE_CNMIPS;
// 192-bit x 64-bit Unsigned Multiply and Add
-let Defs = [P0, P1, P2] in
-def V3MULU: ArithLogicR<"v3mulu", GPR64Opnd, 0, II_DMUL>,
- ADD_FM<0x1c, 0x11>;
+def V3MULU: ArithLogicR<"v3mulu", GPR64Opnd, 0, II_DMUL>, ADD_FM<0x1c, 0x11>,
+ ASE_CNMIPS {
+ let Defs = [P0, P1, P2];
+}
// 64-bit Unsigned Multiply and Add Move
-let Defs = [MPL0, P0, P1, P2] in
-def VMM0 : ArithLogicR<"vmm0", GPR64Opnd, 0, II_DMUL>,
- ADD_FM<0x1c, 0x10>;
+def VMM0 : ArithLogicR<"vmm0", GPR64Opnd, 0, II_DMUL>, ADD_FM<0x1c, 0x10>,
+ ASE_CNMIPS {
+ let Defs = [MPL0, P0, P1, P2];
+}
// 64-bit Unsigned Multiply and Add
-let Defs = [MPL1, MPL2, P0, P1, P2] in
-def VMULU : ArithLogicR<"vmulu", GPR64Opnd, 0, II_DMUL>,
- ADD_FM<0x1c, 0x0f>;
+def VMULU : ArithLogicR<"vmulu", GPR64Opnd, 0, II_DMUL>, ADD_FM<0x1c, 0x0f>,
+ ASE_CNMIPS {
+ let Defs = [MPL1, MPL2, P0, P1, P2];
+}
// Move between CPU and coprocessor registers
-def DMFC2_OCTEON : MFC2OP<"dmfc2", GPR64Opnd>, MFC2OP_FM<0x12, 1>;
-def DMTC2_OCTEON : MFC2OP<"dmtc2", GPR64Opnd>, MFC2OP_FM<0x12, 5>;
+def DMFC2_OCTEON : MFC2OP<"dmfc2", GPR64Opnd>, MFC2OP_FM<0x12, 1>, ASE_CNMIPS;
+def DMTC2_OCTEON : MFC2OP<"dmtc2", GPR64Opnd>, MFC2OP_FM<0x12, 5>, ASE_CNMIPS;
}
}
/// Move between CPU and coprocessor registers
let DecoderNamespace = "Mips64", Predicates = [HasMips64] in {
-def DMFC0 : MFC3OP<"dmfc0", GPR64Opnd, COP0Opnd>, MFC3OP_FM<0x10, 1>, ISA_MIPS3;
-def DMTC0 : MTC3OP<"dmtc0", COP0Opnd, GPR64Opnd>, MFC3OP_FM<0x10, 5>, ISA_MIPS3;
-def DMFC2 : MFC3OP<"dmfc2", GPR64Opnd, COP2Opnd>, MFC3OP_FM<0x12, 1>, ISA_MIPS3;
-def DMTC2 : MTC3OP<"dmtc2", COP2Opnd, GPR64Opnd>, MFC3OP_FM<0x12, 5>, ISA_MIPS3;
+def DMFC0 : MFC3OP<"dmfc0", GPR64Opnd, COP0Opnd, II_DMFC0>, MFC3OP_FM<0x10, 1>,
+ ISA_MIPS3;
+def DMTC0 : MTC3OP<"dmtc0", COP0Opnd, GPR64Opnd, II_DMTC0>, MFC3OP_FM<0x10, 5>,
+ ISA_MIPS3;
+def DMFC2 : MFC3OP<"dmfc2", GPR64Opnd, COP2Opnd, II_DMFC2>, MFC3OP_FM<0x12, 1>,
+ ISA_MIPS3;
+def DMTC2 : MTC3OP<"dmtc2", COP2Opnd, GPR64Opnd, II_DMTC2>, MFC3OP_FM<0x12, 5>,
+ ISA_MIPS3;
}
//===----------------------------------------------------------------------===//
@@ -458,31 +508,34 @@ def : MipsPat<(MipsHi tconstpool:$in), (LUi64 tconstpool:$in)>;
def : MipsPat<(MipsHi tglobaltlsaddr:$in), (LUi64 tglobaltlsaddr:$in)>;
def : MipsPat<(MipsHi texternalsym:$in), (LUi64 texternalsym:$in)>;
-def : MipsPat<(MipsLo tglobaladdr:$in), (DADDiu ZERO_64, tglobaladdr:$in)>;
-def : MipsPat<(MipsLo tblockaddress:$in), (DADDiu ZERO_64, tblockaddress:$in)>;
-def : MipsPat<(MipsLo tjumptable:$in), (DADDiu ZERO_64, tjumptable:$in)>;
-def : MipsPat<(MipsLo tconstpool:$in), (DADDiu ZERO_64, tconstpool:$in)>;
-def : MipsPat<(MipsLo tglobaltlsaddr:$in),
- (DADDiu ZERO_64, tglobaltlsaddr:$in)>;
-def : MipsPat<(MipsLo texternalsym:$in), (DADDiu ZERO_64, texternalsym:$in)>;
-
-def : MipsPat<(add GPR64:$hi, (MipsLo tglobaladdr:$lo)),
- (DADDiu GPR64:$hi, tglobaladdr:$lo)>;
-def : MipsPat<(add GPR64:$hi, (MipsLo tblockaddress:$lo)),
- (DADDiu GPR64:$hi, tblockaddress:$lo)>;
-def : MipsPat<(add GPR64:$hi, (MipsLo tjumptable:$lo)),
- (DADDiu GPR64:$hi, tjumptable:$lo)>;
-def : MipsPat<(add GPR64:$hi, (MipsLo tconstpool:$lo)),
- (DADDiu GPR64:$hi, tconstpool:$lo)>;
-def : MipsPat<(add GPR64:$hi, (MipsLo tglobaltlsaddr:$lo)),
- (DADDiu GPR64:$hi, tglobaltlsaddr:$lo)>;
-
-def : WrapperPat<tglobaladdr, DADDiu, GPR64>;
-def : WrapperPat<tconstpool, DADDiu, GPR64>;
-def : WrapperPat<texternalsym, DADDiu, GPR64>;
-def : WrapperPat<tblockaddress, DADDiu, GPR64>;
-def : WrapperPat<tjumptable, DADDiu, GPR64>;
-def : WrapperPat<tglobaltlsaddr, DADDiu, GPR64>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsPat<(MipsLo tglobaladdr:$in), (DADDiu ZERO_64, tglobaladdr:$in)>;
+ def : MipsPat<(MipsLo tblockaddress:$in),
+ (DADDiu ZERO_64, tblockaddress:$in)>;
+ def : MipsPat<(MipsLo tjumptable:$in), (DADDiu ZERO_64, tjumptable:$in)>;
+ def : MipsPat<(MipsLo tconstpool:$in), (DADDiu ZERO_64, tconstpool:$in)>;
+ def : MipsPat<(MipsLo tglobaltlsaddr:$in),
+ (DADDiu ZERO_64, tglobaltlsaddr:$in)>;
+ def : MipsPat<(MipsLo texternalsym:$in), (DADDiu ZERO_64, texternalsym:$in)>;
+
+ def : MipsPat<(add GPR64:$hi, (MipsLo tglobaladdr:$lo)),
+ (DADDiu GPR64:$hi, tglobaladdr:$lo)>;
+ def : MipsPat<(add GPR64:$hi, (MipsLo tblockaddress:$lo)),
+ (DADDiu GPR64:$hi, tblockaddress:$lo)>;
+ def : MipsPat<(add GPR64:$hi, (MipsLo tjumptable:$lo)),
+ (DADDiu GPR64:$hi, tjumptable:$lo)>;
+ def : MipsPat<(add GPR64:$hi, (MipsLo tconstpool:$lo)),
+ (DADDiu GPR64:$hi, tconstpool:$lo)>;
+ def : MipsPat<(add GPR64:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (DADDiu GPR64:$hi, tglobaltlsaddr:$lo)>;
+
+ def : WrapperPat<tglobaladdr, DADDiu, GPR64>;
+ def : WrapperPat<tconstpool, DADDiu, GPR64>;
+ def : WrapperPat<texternalsym, DADDiu, GPR64>;
+ def : WrapperPat<tblockaddress, DADDiu, GPR64>;
+ def : WrapperPat<tjumptable, DADDiu, GPR64>;
+ def : WrapperPat<tglobaltlsaddr, DADDiu, GPR64>;
+}
defm : BrcondPats<GPR64, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64,
ZERO_64>;
@@ -502,7 +555,17 @@ defm : SetgeImmPats<GPR64, SLTi64, SLTiu64>;
// truncate
def : MipsPat<(trunc (assertsext GPR64:$src)),
(EXTRACT_SUBREG GPR64:$src, sub_32)>;
-def : MipsPat<(trunc (assertzext GPR64:$src)),
+// The forward compatibility strategy employed by MIPS requires us to treat
+// values as being sign extended to an infinite number of bits. This allows
+// existing software to run without modification on any future MIPS
+// implementation (e.g. 128-bit, or 1024-bit). Being compatible with this
+// strategy requires that truncation acts as a sign-extension for values being
+// fed into instructions operating on 32-bit values. Such instructions have
+// undefined results if this is not true.
+// For our case, this means that we can't issue an extract_subreg for nodes
+// such as (trunc:i32 (assertzext:i64 X, i32)), because the sign-bit of the
+// lower subreg would not be replicated into the upper half.
+def : MipsPat<(trunc (assertzext_lt_i32 GPR64:$src)),
(EXTRACT_SUBREG GPR64:$src, sub_32)>;
def : MipsPat<(i32 (trunc GPR64:$src)),
(SLL (EXTRACT_SUBREG GPR64:$src, sub_32), 0)>;
@@ -514,11 +577,14 @@ def : MipsPat<(srl GPR64:$rt, (i32 (trunc GPR64:$rs))),
(DSRLV GPR64:$rt, (EXTRACT_SUBREG GPR64:$rs, sub_32))>;
def : MipsPat<(sra GPR64:$rt, (i32 (trunc GPR64:$rs))),
(DSRAV GPR64:$rt, (EXTRACT_SUBREG GPR64:$rs, sub_32))>;
-def : MipsPat<(rotr GPR64:$rt, (i32 (trunc GPR64:$rs))),
- (DROTRV GPR64:$rt, (EXTRACT_SUBREG GPR64:$rs, sub_32))>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsPat<(rotr GPR64:$rt, (i32 (trunc GPR64:$rs))),
+ (DROTRV GPR64:$rt, (EXTRACT_SUBREG GPR64:$rs, sub_32))>;
+}
// 32-to-64-bit extension
-def : MipsPat<(i64 (anyext GPR32:$src)), (SLL64_32 GPR32:$src)>;
+def : MipsPat<(i64 (anyext GPR32:$src)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
def : MipsPat<(i64 (zext GPR32:$src)), (DSRL (DSLL64_32 GPR32:$src), 32)>;
def : MipsPat<(i64 (sext GPR32:$src)), (SLL64_32 GPR32:$src)>;
@@ -530,26 +596,24 @@ def : MipsPat<(i64 (sext_inreg GPR64:$src, i32)),
def : MipsPat<(bswap GPR64:$rt), (DSHD (DSBH GPR64:$rt))>;
// Carry pattern
-def : MipsPat<(subc GPR64:$lhs, GPR64:$rhs),
- (DSUBu GPR64:$lhs, GPR64:$rhs)>;
-let AdditionalPredicates = [NotDSP] in {
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsPat<(subc GPR64:$lhs, GPR64:$rhs),
+ (DSUBu GPR64:$lhs, GPR64:$rhs)>;
def : MipsPat<(addc GPR64:$lhs, GPR64:$rhs),
- (DADDu GPR64:$lhs, GPR64:$rhs)>;
+ (DADDu GPR64:$lhs, GPR64:$rhs)>, ASE_NOT_DSP;
def : MipsPat<(addc GPR64:$lhs, immSExt16:$imm),
- (DADDiu GPR64:$lhs, imm:$imm)>;
+ (DADDiu GPR64:$lhs, imm:$imm)>, ASE_NOT_DSP;
}
// Octeon bbit0/bbit1 MipsPattern
-let Predicates = [HasMips64, HasCnMips] in {
def : MipsPat<(brcond (i32 (seteq (and i64:$lhs, PowerOf2LO:$mask), 0)), bb:$dst),
- (BBIT0 i64:$lhs, (Log2LO PowerOf2LO:$mask), bb:$dst)>;
+ (BBIT0 i64:$lhs, (Log2LO PowerOf2LO:$mask), bb:$dst)>, ASE_MIPS64_CNMIPS;
def : MipsPat<(brcond (i32 (seteq (and i64:$lhs, PowerOf2HI:$mask), 0)), bb:$dst),
- (BBIT032 i64:$lhs, (Log2HI PowerOf2HI:$mask), bb:$dst)>;
+ (BBIT032 i64:$lhs, (Log2HI PowerOf2HI:$mask), bb:$dst)>, ASE_MIPS64_CNMIPS;
def : MipsPat<(brcond (i32 (setne (and i64:$lhs, PowerOf2LO:$mask), 0)), bb:$dst),
- (BBIT1 i64:$lhs, (Log2LO PowerOf2LO:$mask), bb:$dst)>;
+ (BBIT1 i64:$lhs, (Log2LO PowerOf2LO:$mask), bb:$dst)>, ASE_MIPS64_CNMIPS;
def : MipsPat<(brcond (i32 (setne (and i64:$lhs, PowerOf2HI:$mask), 0)), bb:$dst),
- (BBIT132 i64:$lhs, (Log2HI PowerOf2HI:$mask), bb:$dst)>;
-}
+ (BBIT132 i64:$lhs, (Log2HI PowerOf2HI:$mask), bb:$dst)>, ASE_MIPS64_CNMIPS;
// Atomic load patterns.
def : MipsPat<(atomic_load_8 addr:$a), (LB64 addr:$a)>;
@@ -566,39 +630,40 @@ def : MipsPat<(atomic_store_64 addr:$a, GPR64:$v), (SD GPR64:$v, addr:$a)>;
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
-def : MipsInstAlias<"move $dst, $src",
- (OR64 GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>,
- GPR_64;
-def : MipsInstAlias<"move $dst, $src",
- (DADDu GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>,
- GPR_64;
-def : MipsInstAlias<"daddu $rs, $rt, $imm",
- (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
- 0>, ISA_MIPS3;
-def : MipsInstAlias<"dadd $rs, $rt, $imm",
- (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
- 0>, ISA_MIPS3_NOT_32R6_64R6;
-def : MipsInstAlias<"daddu $rs, $imm",
- (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rs, simm16_64:$imm),
- 0>, ISA_MIPS3;
-def : MipsInstAlias<"dadd $rs, $imm",
- (DADDi GPR64Opnd:$rs, GPR64Opnd:$rs, simm16_64:$imm),
- 0>, ISA_MIPS3_NOT_32R6_64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsInstAlias<"move $dst, $src",
+ (OR64 GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>,
+ GPR_64;
+ def : MipsInstAlias<"move $dst, $src",
+ (DADDu GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>,
+ GPR_64;
+ def : MipsInstAlias<"dadd $rs, $rt, $imm",
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
+ 0>, ISA_MIPS3_NOT_32R6_64R6;
+ def : MipsInstAlias<"dadd $rs, $imm",
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rs, simm16_64:$imm),
+ 0>, ISA_MIPS3_NOT_32R6_64R6;
+ def : MipsInstAlias<"daddu $rs, $rt, $imm",
+ (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
+ 0>, ISA_MIPS3;
+ def : MipsInstAlias<"daddu $rs, $imm",
+ (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rs, simm16_64:$imm),
+ 0>, ISA_MIPS3;
+}
def : MipsInstAlias<"dsll $rd, $rt, $rs",
(DSLLV GPR64Opnd:$rd, GPR64Opnd:$rt, GPR32Opnd:$rs), 0>,
ISA_MIPS3;
-def : MipsInstAlias<"dneg $rt, $rs",
- (DSUB GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rs), 1>,
- ISA_MIPS3;
-def : MipsInstAlias<"dneg $rt",
- (DSUB GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rt), 0>,
- ISA_MIPS3;
-def : MipsInstAlias<"dnegu $rt, $rs",
- (DSUBu GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rs), 1>,
- ISA_MIPS3;
-def : MipsInstAlias<"dsubu $rt, $rs, $imm",
- (DADDiu GPR64Opnd:$rt, GPR64Opnd:$rs,
- InvertedImOperand64:$imm), 0>, ISA_MIPS3;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsInstAlias<"dneg $rt, $rs",
+ (DSUB GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rs), 1>,
+ ISA_MIPS3;
+ def : MipsInstAlias<"dneg $rt",
+ (DSUB GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rt), 0>,
+ ISA_MIPS3;
+ def : MipsInstAlias<"dnegu $rt, $rs",
+ (DSUBu GPR64Opnd:$rt, ZERO_64, GPR64Opnd:$rs), 1>,
+ ISA_MIPS3;
+}
def : MipsInstAlias<"dsubi $rs, $rt, $imm",
(DADDi GPR64Opnd:$rs, GPR64Opnd:$rt,
InvertedImOperand64:$imm),
@@ -615,29 +680,35 @@ def : MipsInstAlias<"dsub $rs, $imm",
(DADDi GPR64Opnd:$rs, GPR64Opnd:$rs,
InvertedImOperand64:$imm),
0>, ISA_MIPS3_NOT_32R6_64R6;
-def : MipsInstAlias<"dsubu $rs, $imm",
- (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rs,
- InvertedImOperand64:$imm),
- 0>, ISA_MIPS3;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsInstAlias<"dsubu $rt, $rs, $imm",
+ (DADDiu GPR64Opnd:$rt, GPR64Opnd:$rs,
+ InvertedImOperand64:$imm), 0>, ISA_MIPS3;
+ def : MipsInstAlias<"dsubu $rs, $imm",
+ (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rs,
+ InvertedImOperand64:$imm), 0>, ISA_MIPS3;
+}
def : MipsInstAlias<"dsra $rd, $rt, $rs",
(DSRAV GPR64Opnd:$rd, GPR64Opnd:$rt, GPR32Opnd:$rs), 0>,
ISA_MIPS3;
-def : MipsInstAlias<"dsrl $rd, $rt, $rs",
- (DSRLV GPR64Opnd:$rd, GPR64Opnd:$rt, GPR32Opnd:$rs), 0>,
- ISA_MIPS3;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsInstAlias<"dsrl $rd, $rt, $rs",
+ (DSRLV GPR64Opnd:$rd, GPR64Opnd:$rt, GPR32Opnd:$rs), 0>,
+ ISA_MIPS3;
// Two operand (implicit 0 selector) versions:
-def : MipsInstAlias<"dmfc0 $rt, $rd", (DMFC0 GPR64Opnd:$rt, COP0Opnd:$rd, 0), 0>;
-def : MipsInstAlias<"dmtc0 $rt, $rd", (DMTC0 COP0Opnd:$rd, GPR64Opnd:$rt, 0), 0>;
+ def : MipsInstAlias<"dmtc0 $rt, $rd",
+ (DMTC0 COP0Opnd:$rd, GPR64Opnd:$rt, 0), 0>;
+ def : MipsInstAlias<"dmfc0 $rt, $rd",
+ (DMFC0 GPR64Opnd:$rt, COP0Opnd:$rd, 0), 0>;
+}
def : MipsInstAlias<"dmfc2 $rt, $rd", (DMFC2 GPR64Opnd:$rt, COP2Opnd:$rd, 0), 0>;
def : MipsInstAlias<"dmtc2 $rt, $rd", (DMTC2 COP2Opnd:$rd, GPR64Opnd:$rt, 0), 0>;
-let Predicates = [HasMips64, HasCnMips] in {
-def : MipsInstAlias<"synciobdma", (SYNC 0x2), 0>;
-def : MipsInstAlias<"syncs", (SYNC 0x6), 0>;
-def : MipsInstAlias<"syncw", (SYNC 0x4), 0>;
-def : MipsInstAlias<"syncws", (SYNC 0x5), 0>;
-}
+def : MipsInstAlias<"synciobdma", (SYNC 0x2), 0>, ASE_MIPS64_CNMIPS;
+def : MipsInstAlias<"syncs", (SYNC 0x6), 0>, ASE_MIPS64_CNMIPS;
+def : MipsInstAlias<"syncw", (SYNC 0x4), 0>, ASE_MIPS64_CNMIPS;
+def : MipsInstAlias<"syncws", (SYNC 0x5), 0>, ASE_MIPS64_CNMIPS;
// cnMIPS Aliases.
diff --git a/lib/Target/Mips/Mips64r6InstrInfo.td b/lib/Target/Mips/Mips64r6InstrInfo.td
index 6f34dbe28d30..64effbef8a6a 100644
--- a/lib/Target/Mips/Mips64r6InstrInfo.td
+++ b/lib/Target/Mips/Mips64r6InstrInfo.td
@@ -46,35 +46,50 @@ class SCD_R6_ENC : SPECIAL3_LL_SC_FM<OPCODE6_SCD>;
//
//===----------------------------------------------------------------------===//
-class AHI_ATI_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+class AHI_ATI_DESC_BASE<string instr_asm, RegisterOperand GPROpnd, InstrItinClass itin> {
dag OutOperandList = (outs GPROpnd:$rs);
- dag InOperandList = (ins GPROpnd:$rt, simm16:$imm);
+ dag InOperandList = (ins GPROpnd:$rt, simm16_relaxed:$imm);
string AsmString = !strconcat(instr_asm, "\t$rt, $imm");
string Constraints = "$rs = $rt";
+ InstrItinClass Itinerary = itin;
}
-class DALIGN_DESC : ALIGN_DESC_BASE<"dalign", GPR64Opnd, uimm3>;
-class DAHI_DESC : AHI_ATI_DESC_BASE<"dahi", GPR64Opnd>;
-class DATI_DESC : AHI_ATI_DESC_BASE<"dati", GPR64Opnd>;
-class DAUI_DESC : AUI_DESC_BASE<"daui", GPR64Opnd>;
-class DBITSWAP_DESC : BITSWAP_DESC_BASE<"dbitswap", GPR64Opnd>;
-class DCLO_R6_DESC : CLO_R6_DESC_BASE<"dclo", GPR64Opnd>;
-class DCLZ_R6_DESC : CLZ_R6_DESC_BASE<"dclz", GPR64Opnd>;
-class DDIV_DESC : DIVMOD_DESC_BASE<"ddiv", GPR64Opnd, sdiv>;
-class DDIVU_DESC : DIVMOD_DESC_BASE<"ddivu", GPR64Opnd, udiv>;
-class DLSA_R6_DESC : LSA_R6_DESC_BASE<"dlsa", GPR64Opnd, uimm2_plus1>;
-class DMOD_DESC : DIVMOD_DESC_BASE<"dmod", GPR64Opnd, srem>;
-class DMODU_DESC : DIVMOD_DESC_BASE<"dmodu", GPR64Opnd, urem>;
-class DMUH_DESC : MUL_R6_DESC_BASE<"dmuh", GPR64Opnd, mulhs>;
-class DMUHU_DESC : MUL_R6_DESC_BASE<"dmuhu", GPR64Opnd, mulhu>;
-class DMUL_R6_DESC : MUL_R6_DESC_BASE<"dmul", GPR64Opnd, mul>;
-class DMULU_DESC : MUL_R6_DESC_BASE<"dmulu", GPR64Opnd>;
-class LDPC_DESC : PCREL_DESC_BASE<"ldpc", GPR64Opnd, simm18_lsl3>;
-class LLD_R6_DESC : LL_R6_DESC_BASE<"lld", GPR64Opnd>;
-class SCD_R6_DESC : SC_R6_DESC_BASE<"scd", GPR64Opnd>;
+class DALIGN_DESC : ALIGN_DESC_BASE<"dalign", GPR64Opnd, uimm3, II_DALIGN>;
+class DAHI_DESC : AHI_ATI_DESC_BASE<"dahi", GPR64Opnd, II_DAHI>;
+class DATI_DESC : AHI_ATI_DESC_BASE<"dati", GPR64Opnd, II_DATI>;
+class DAUI_DESC : AUI_DESC_BASE<"daui", GPR64Opnd, II_DAUI>;
+class DBITSWAP_DESC : BITSWAP_DESC_BASE<"dbitswap", GPR64Opnd, II_DBITSWAP>;
+class DCLO_R6_DESC : CLO_R6_DESC_BASE<"dclo", GPR64Opnd, II_DCLO>;
+class DCLZ_R6_DESC : CLZ_R6_DESC_BASE<"dclz", GPR64Opnd, II_DCLZ>;
+class DDIV_DESC : DIVMOD_DESC_BASE<"ddiv", GPR64Opnd, II_DDIV, sdiv>;
+class DDIVU_DESC : DIVMOD_DESC_BASE<"ddivu", GPR64Opnd, II_DDIVU, udiv>;
+class DLSA_R6_DESC : LSA_R6_DESC_BASE<"dlsa", GPR64Opnd, uimm2_plus1, II_DLSA>;
+class DMOD_DESC : DIVMOD_DESC_BASE<"dmod", GPR64Opnd, II_DMOD, srem>;
+class DMODU_DESC : DIVMOD_DESC_BASE<"dmodu", GPR64Opnd, II_DMODU, urem>;
+class DMUH_DESC : MUL_R6_DESC_BASE<"dmuh", GPR64Opnd, II_DMUH, mulhs>;
+class DMUHU_DESC : MUL_R6_DESC_BASE<"dmuhu", GPR64Opnd, II_DMUHU, mulhu>;
+class DMUL_R6_DESC : MUL_R6_DESC_BASE<"dmul", GPR64Opnd, II_DMUL, mul>;
+class DMULU_DESC : MUL_R6_DESC_BASE<"dmulu", GPR64Opnd, II_DMUL>;
+class LDPC_DESC : PCREL_DESC_BASE<"ldpc", GPR64Opnd, simm18_lsl3, II_LDPC>;
+class LLD_R6_DESC : LL_R6_DESC_BASE<"lld", GPR64Opnd, mem_simm16, II_LLD>;
+class SCD_R6_DESC : SC_R6_DESC_BASE<"scd", GPR64Opnd, II_SCD>;
class SELEQZ64_DESC : SELEQNE_Z_DESC_BASE<"seleqz", GPR64Opnd>;
class SELNEZ64_DESC : SELEQNE_Z_DESC_BASE<"selnez", GPR64Opnd>;
+class JIALC64_DESC : JMP_IDX_COMPACT_DESC_BASE<"jialc", calloffset16,
+ GPR64Opnd> {
+ bit isCall = 1;
+ list<Register> Defs = [RA];
+}
+
+class JIC64_DESC : JMP_IDX_COMPACT_DESC_BASE<"jic", jmpoffset16, GPR64Opnd> {
+ bit isBarrier = 1;
+ bit isTerminator = 1;
+ list<Register> Defs = [AT];
+}
+
+class LL64_R6_DESC : LL_R6_DESC_BASE<"ll", GPR32Opnd, mem_simm9, II_LL>;
+class SC64_R6_DESC : SC_R6_DESC_BASE<"sc", GPR32Opnd, II_SC>;
//===----------------------------------------------------------------------===//
//
// Instruction Definitions
@@ -88,25 +103,37 @@ let AdditionalPredicates = [NotInMicroMips] in {
def DALIGN : DALIGN_ENC, DALIGN_DESC, ISA_MIPS64R6;
}
def DBITSWAP : DBITSWAP_ENC, DBITSWAP_DESC, ISA_MIPS64R6;
-def DCLO_R6 : DCLO_R6_ENC, DCLO_R6_DESC, ISA_MIPS64R6;
-def DCLZ_R6 : DCLZ_R6_ENC, DCLZ_R6_DESC, ISA_MIPS64R6;
-def DDIV : DDIV_ENC, DDIV_DESC, ISA_MIPS64R6;
-def DDIVU : DDIVU_ENC, DDIVU_DESC, ISA_MIPS64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DCLO_R6 : R6MMR6Rel, DCLO_R6_ENC, DCLO_R6_DESC, ISA_MIPS64R6;
+ def DCLZ_R6 : R6MMR6Rel, DCLZ_R6_ENC, DCLZ_R6_DESC, ISA_MIPS64R6;
+ def DDIV : DDIV_ENC, DDIV_DESC, ISA_MIPS64R6;
+ def DDIVU : DDIVU_ENC, DDIVU_DESC, ISA_MIPS64R6;
+ def DMOD : DMOD_ENC, DMOD_DESC, ISA_MIPS64R6;
+ def DMODU : DMODU_ENC, DMODU_DESC, ISA_MIPS64R6;
+}
def DLSA_R6 : DLSA_R6_ENC, DLSA_R6_DESC, ISA_MIPS64R6;
-def DMOD : DMOD_ENC, DMOD_DESC, ISA_MIPS64R6;
-def DMODU : DMODU_ENC, DMODU_DESC, ISA_MIPS64R6;
-def DMUH: DMUH_ENC, DMUH_DESC, ISA_MIPS64R6;
-def DMUHU: DMUHU_ENC, DMUHU_DESC, ISA_MIPS64R6;
-def DMUL_R6: DMUL_R6_ENC, DMUL_R6_DESC, ISA_MIPS64R6;
-def DMULU: DMULU_ENC, DMULU_DESC, ISA_MIPS64R6;
-def LDPC: LDPC_ENC, LDPC_DESC, ISA_MIPS64R6;
-def LLD_R6 : LLD_R6_ENC, LLD_R6_DESC, ISA_MIPS32R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DMUH: DMUH_ENC, DMUH_DESC, ISA_MIPS64R6;
+ def DMUHU: DMUHU_ENC, DMUHU_DESC, ISA_MIPS64R6;
+ def DMUL_R6: DMUL_R6_ENC, DMUL_R6_DESC, ISA_MIPS64R6;
+ def DMULU: DMULU_ENC, DMULU_DESC, ISA_MIPS64R6;
+ def LLD_R6 : R6MMR6Rel, LLD_R6_ENC, LLD_R6_DESC, ISA_MIPS64R6;
+}
+def LDPC: R6MMR6Rel, LDPC_ENC, LDPC_DESC, ISA_MIPS64R6;
def SCD_R6 : SCD_R6_ENC, SCD_R6_DESC, ISA_MIPS32R6;
let DecoderNamespace = "Mips32r6_64r6_GP64" in {
def SELEQZ64 : SELEQZ_ENC, SELEQZ64_DESC, ISA_MIPS32R6, GPR_64;
def SELNEZ64 : SELNEZ_ENC, SELNEZ64_DESC, ISA_MIPS32R6, GPR_64;
}
-
+let AdditionalPredicates = [NotInMicroMips],
+ DecoderNamespace = "Mips32r6_64r6_PTR64" in {
+ def LL64_R6 : LL_R6_ENC, LL64_R6_DESC, PTR_64, ISA_MIPS64R6;
+ def SC64_R6 : SC_R6_ENC, SC64_R6_DESC, PTR_64, ISA_MIPS64R6;
+}
+let isCodeGenOnly = 1 in {
+def JIALC64 : JIALC_ENC, JIALC64_DESC, ISA_MIPS64R6;
+def JIC64 : JIC_ENC, JIC64_DESC, ISA_MIPS64R6;
+}
//===----------------------------------------------------------------------===//
//
// Instruction Aliases
@@ -115,6 +142,9 @@ let DecoderNamespace = "Mips32r6_64r6_GP64" in {
def : MipsInstAlias<"jr $rs", (JALR64 ZERO_64, GPR64Opnd:$rs), 1>, ISA_MIPS64R6;
+def : MipsInstAlias<"jrc $rs", (JIC64 GPR64Opnd:$rs, 0), 1>, ISA_MIPS64R6;
+
+def : MipsInstAlias<"jalrc $rs", (JIALC64 GPR64Opnd:$rs, 0), 1>, ISA_MIPS64R6;
//===----------------------------------------------------------------------===//
//
// Patterns and Pseudo Instructions
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index 957529376b37..3686c2fe3ec4 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -313,7 +313,6 @@ const char *MipsAsmPrinter::getCurrentABIString() const {
case MipsABIInfo::ABI::O32: return "abi32";
case MipsABIInfo::ABI::N32: return "abiN32";
case MipsABIInfo::ABI::N64: return "abi64";
- case MipsABIInfo::ABI::EABI: return "eabi32"; // TODO: handle eabi64
default: llvm_unreachable("Unknown Mips ABI");
}
}
@@ -326,9 +325,10 @@ void MipsAsmPrinter::EmitFunctionEntryLabel() {
if (Subtarget->isTargetNaCl())
EmitAlignment(std::max(MF->getAlignment(), MIPS_NACL_BUNDLE_ALIGN));
- if (Subtarget->inMicroMipsMode())
+ if (Subtarget->inMicroMipsMode()) {
TS.emitDirectiveSetMicroMips();
- else
+ TS.setUsesMicroMips();
+ } else
TS.emitDirectiveSetNoMicroMips();
if (Subtarget->inMips16Mode())
@@ -620,24 +620,6 @@ void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
if (closeP) O << ")";
}
-void MipsAsmPrinter::printUnsignedImm(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(opNum);
- if (MO.isImm())
- O << (unsigned short int)MO.getImm();
- else
- printOperand(MI, opNum, O);
-}
-
-void MipsAsmPrinter::printUnsignedImm8(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(opNum);
- if (MO.isImm())
- O << (unsigned short int)(unsigned char)MO.getImm();
- else
- printOperand(MI, opNum, O);
-}
-
void MipsAsmPrinter::
printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O) {
// Load/Store memory operands -- imm($reg)
@@ -687,6 +669,12 @@ printRegisterList(const MachineInstr *MI, int opNum, raw_ostream &O) {
}
void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
+ MipsTargetStreamer &TS = getTargetStreamer();
+
+ // MipsTargetStreamer has an initialization order problem when emitting an
+ // object file directly (see MipsTargetELFStreamer for full details). Work
+ // around it by re-initializing the PIC state here.
+ TS.setPic(OutContext.getObjectFileInfo()->isPositionIndependent());
// Compute MIPS architecture attributes based on the default subtarget
// that we'd have constructed. Module level directives aren't LTO
@@ -702,14 +690,13 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
bool IsABICalls = STI.isABICalls();
const MipsABIInfo &ABI = MTM.getABI();
if (IsABICalls) {
- getTargetStreamer().emitDirectiveAbiCalls();
- Reloc::Model RM = TM.getRelocationModel();
+ TS.emitDirectiveAbiCalls();
// FIXME: This condition should be a lot more complicated that it is here.
// Ideally it should test for properties of the ABI and not the ABI
// itself.
// For the moment, I'm only correcting enough to make MIPS-IV work.
- if (RM == Reloc::Static && !ABI.IsN64())
- getTargetStreamer().emitDirectiveOptionPic0();
+ if (!isPositionIndependent() && !ABI.IsN64())
+ TS.emitDirectiveOptionPic0();
}
// Tell the assembler which ABI we are using
@@ -720,33 +707,24 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
// NaN: At the moment we only support:
// 1. .nan legacy (default)
// 2. .nan 2008
- STI.isNaN2008() ? getTargetStreamer().emitDirectiveNaN2008()
- : getTargetStreamer().emitDirectiveNaNLegacy();
+ STI.isNaN2008() ? TS.emitDirectiveNaN2008()
+ : TS.emitDirectiveNaNLegacy();
// TODO: handle O64 ABI
- if (ABI.IsEABI()) {
- if (STI.isGP32bit())
- OutStreamer->SwitchSection(OutContext.getELFSection(".gcc_compiled_long32",
- ELF::SHT_PROGBITS, 0));
- else
- OutStreamer->SwitchSection(OutContext.getELFSection(".gcc_compiled_long64",
- ELF::SHT_PROGBITS, 0));
- }
-
- getTargetStreamer().updateABIInfo(STI);
+ TS.updateABIInfo(STI);
// We should always emit a '.module fp=...' but binutils 2.24 does not accept
// it. We therefore emit it when it contradicts the ABI defaults (-mfpxx or
// -mfp64) and omit it otherwise.
if (ABI.IsO32() && (STI.isABI_FPXX() || STI.isFP64bit()))
- getTargetStreamer().emitDirectiveModuleFP();
+ TS.emitDirectiveModuleFP();
// We should always emit a '.module [no]oddspreg' but binutils 2.24 does not
// accept it. We therefore emit it when it contradicts the default or an
// option has changed the default (i.e. FPXX) and omit it otherwise.
if (ABI.IsO32() && (!STI.useOddSPReg() || STI.isABI_FPXX()))
- getTargetStreamer().emitDirectiveModuleOddSPReg();
+ TS.emitDirectiveModuleOddSPReg();
}
void MipsAsmPrinter::emitInlineAsmStart() const {
@@ -990,7 +968,7 @@ void MipsAsmPrinter::EmitFPCallStub(
OutStreamer->EmitLabel(Stub);
// Only handle non-pic for now.
- assert(TM.getRelocationModel() != Reloc::PIC_ &&
+ assert(!isPositionIndependent() &&
"should not be here if we are compiling pic");
TS.emitDirectiveSetReorder();
//
@@ -1071,10 +1049,9 @@ void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) {
}
// If basic block address is taken, block can be target of indirect branch.
- for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
- MBB != E; ++MBB) {
- if (MBB->hasAddressTaken())
- MBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
+ for (auto &MBB : MF) {
+ if (MBB.hasAddressTaken())
+ MBB.setAlignment(MIPS_NACL_BUNDLE_ALIGN);
}
}
diff --git a/lib/Target/Mips/MipsAsmPrinter.h b/lib/Target/Mips/MipsAsmPrinter.h
index a7f3304a3da8..f30141fc918f 100644
--- a/lib/Target/Mips/MipsAsmPrinter.h
+++ b/lib/Target/Mips/MipsAsmPrinter.h
@@ -134,8 +134,6 @@ public:
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O) override;
void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printUnsignedImm(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printUnsignedImm8(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperandEA(const MachineInstr *MI, int opNum, raw_ostream &O);
void printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
diff --git a/lib/Target/Mips/MipsCCState.cpp b/lib/Target/Mips/MipsCCState.cpp
index d82063e3d2a9..7af988c1f64d 100644
--- a/lib/Target/Mips/MipsCCState.cpp
+++ b/lib/Target/Mips/MipsCCState.cpp
@@ -26,8 +26,8 @@ static bool isF128SoftLibCall(const char *CallSym) {
"ceill", "copysignl", "cosl", "exp2l",
"expl", "floorl", "fmal", "fmodl",
"log10l", "log2l", "logl", "nearbyintl",
- "powl", "rintl", "sinl", "sqrtl",
- "truncl"};
+ "powl", "rintl", "roundl", "sinl",
+ "sqrtl", "truncl"};
// Check that LibCalls is sorted alphabetically.
auto Comp = [](const char *S1, const char *S2) { return strcmp(S1, S2) < 0; };
diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td
index 0b4b7785af67..a57cb7badc17 100644
--- a/lib/Target/Mips/MipsCallingConv.td
+++ b/lib/Target/Mips/MipsCallingConv.td
@@ -212,48 +212,6 @@ def RetCC_MipsN : CallingConv<[
]>;
//===----------------------------------------------------------------------===//
-// Mips EABI Calling Convention
-//===----------------------------------------------------------------------===//
-
-def CC_MipsEABI : CallingConv<[
- // Promote i8/i16 arguments to i32.
- CCIfType<[i8, i16], CCPromoteToType<i32>>,
-
- // Integer arguments are passed in integer registers.
- CCIfType<[i32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>,
-
- // Single fp arguments are passed in pairs within 32-bit mode
- CCIfType<[f32], CCIfSubtarget<"isSingleFloat()",
- CCAssignToReg<[F12, F13, F14, F15, F16, F17, F18, F19]>>>,
-
- CCIfType<[f32], CCIfSubtargetNot<"isSingleFloat()",
- CCAssignToReg<[F12, F14, F16, F18]>>>,
-
- // The first 4 double fp arguments are passed in single fp registers.
- CCIfType<[f64], CCIfSubtargetNot<"isSingleFloat()",
- CCAssignToReg<[D6, D7, D8, D9]>>>,
-
- // Integer values get stored in stack slots that are 4 bytes in
- // size and 4-byte aligned.
- CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
-
- // Integer values get stored in stack slots that are 8 bytes in
- // size and 8-byte aligned.
- CCIfType<[f64], CCIfSubtargetNot<"isSingleFloat()", CCAssignToStack<8, 8>>>
-]>;
-
-def RetCC_MipsEABI : CallingConv<[
- // i32 are returned in registers V0, V1
- CCIfType<[i32], CCAssignToReg<[V0, V1]>>,
-
- // f32 are returned in registers F0, F1
- CCIfType<[f32], CCAssignToReg<[F0, F1]>>,
-
- // f64 are returned in register D0
- CCIfType<[f64], CCIfSubtargetNot<"isSingleFloat()", CCAssignToReg<[D0]>>>
-]>;
-
-//===----------------------------------------------------------------------===//
// Mips FastCC Calling Convention
//===----------------------------------------------------------------------===//
def CC_MipsO32_FastCC : CallingConv<[
@@ -325,7 +283,6 @@ def CC_Mips_FastCC : CallingConv<[
// Stack parameter slots for i32 and f32 are 32-bit words and 4-byte aligned.
CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
- CCIfSubtarget<"isABI_EABI()", CCDelegateTo<CC_MipsEABI>>,
CCIfSubtarget<"isABI_O32()", CCDelegateTo<CC_MipsO32_FastCC>>,
CCDelegateTo<CC_MipsN_FastCC>
]>;
@@ -335,7 +292,6 @@ def CC_Mips_FastCC : CallingConv<[
//===----------------------------------------------------------------------===//
def RetCC_Mips : CallingConv<[
- CCIfSubtarget<"isABI_EABI()", CCDelegateTo<RetCC_MipsEABI>>,
CCIfSubtarget<"isABI_N32()", CCDelegateTo<RetCC_MipsN>>,
CCIfSubtarget<"isABI_N64()", CCDelegateTo<RetCC_MipsN>>,
CCDelegateTo<RetCC_MipsO32>
@@ -377,8 +333,6 @@ def CC_Mips_FixedArg : CallingConv<[
CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_Mips_FastCC>>,
- // FIXME: There wasn't an EABI case in the original code and it seems unlikely
- // that it's the same as CC_MipsN
CCIfSubtarget<"isABI_O32()", CCDelegateTo<CC_MipsO32_FP>>,
CCDelegateTo<CC_MipsN>
]>;
@@ -386,8 +340,6 @@ def CC_Mips_FixedArg : CallingConv<[
def CC_Mips_VarArg : CallingConv<[
CCIfByVal<CCDelegateTo<CC_Mips_ByVal>>,
- // FIXME: There wasn't an EABI case in the original code and it seems unlikely
- // that it's the same as CC_MipsN_VarArg
CCIfSubtarget<"isABI_O32()", CCDelegateTo<CC_MipsO32_FP>>,
CCDelegateTo<CC_MipsN_VarArg>
]>;
diff --git a/lib/Target/Mips/MipsCondMov.td b/lib/Target/Mips/MipsCondMov.td
index 2d96d9b48c0b..fd4517f25335 100644
--- a/lib/Target/Mips/MipsCondMov.td
+++ b/lib/Target/Mips/MipsCondMov.td
@@ -133,16 +133,14 @@ def MOVZ_I_S : MMRel, CMov_I_F_FT<"movz.s", GPR32Opnd, FGR32Opnd, II_MOVZ_S>,
let isCodeGenOnly = 1 in
def MOVZ_I64_S : CMov_I_F_FT<"movz.s", GPR64Opnd, FGR32Opnd, II_MOVZ_S>,
- CMov_I_F_FM<18, 16>, INSN_MIPS4_32_NOT_32R6_64R6,
- AdditionalRequires<[HasMips64]>;
+ CMov_I_F_FM<18, 16>, INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
def MOVN_I_S : MMRel, CMov_I_F_FT<"movn.s", GPR32Opnd, FGR32Opnd, II_MOVN_S>,
CMov_I_F_FM<19, 16>, INSN_MIPS4_32_NOT_32R6_64R6;
let isCodeGenOnly = 1 in
def MOVN_I64_S : CMov_I_F_FT<"movn.s", GPR64Opnd, FGR32Opnd, II_MOVN_S>,
- CMov_I_F_FM<19, 16>, INSN_MIPS4_32_NOT_32R6_64R6,
- AdditionalRequires<[IsGP64bit]>;
+ CMov_I_F_FM<19, 16>, INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
def MOVZ_I_D32 : MMRel, CMov_I_F_FT<"movz.d", GPR32Opnd, AFGR64Opnd,
II_MOVZ_D>, CMov_I_F_FM<18, 17>,
@@ -169,16 +167,14 @@ def MOVT_I : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, II_MOVT, MipsCMovFP_T>,
let isCodeGenOnly = 1 in
def MOVT_I64 : CMov_F_I_FT<"movt", GPR64Opnd, II_MOVT, MipsCMovFP_T>,
- CMov_F_I_FM<1>, INSN_MIPS4_32_NOT_32R6_64R6,
- AdditionalRequires<[IsGP64bit]>;
+ CMov_F_I_FM<1>, INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
def MOVF_I : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, II_MOVF, MipsCMovFP_F>,
CMov_F_I_FM<0>, INSN_MIPS4_32_NOT_32R6_64R6;
let isCodeGenOnly = 1 in
def MOVF_I64 : CMov_F_I_FT<"movf", GPR64Opnd, II_MOVF, MipsCMovFP_F>,
- CMov_F_I_FM<0>, INSN_MIPS4_32_NOT_32R6_64R6,
- AdditionalRequires<[IsGP64bit]>;
+ CMov_F_I_FM<0>, INSN_MIPS4_32_NOT_32R6_64R6, GPR_64;
def MOVT_S : MMRel, CMov_F_F_FT<"movt.s", FGR32Opnd, II_MOVT_S, MipsCMovFP_T>,
CMov_F_F_FM<16, 1>, INSN_MIPS4_32_NOT_32R6_64R6;
diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp
index ea8c5871fa0e..1ea48e0439d4 100644
--- a/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -97,16 +97,6 @@ static unsigned int branchTargetOperand(MachineInstr *MI) {
llvm_unreachable("Unknown branch type");
}
-static bool isUnconditionalBranch(unsigned int Opcode) {
- switch (Opcode) {
- default: return false;
- case Mips::Bimm16:
- case Mips::BimmX16:
- case Mips::JalB16:
- return true;
- }
-}
-
static unsigned int longformBranchOpcode(unsigned int Opcode) {
switch (Opcode) {
case Mips::Bimm16:
@@ -342,8 +332,6 @@ namespace {
/// the branch fix up pass.
bool HasFarJump;
- const TargetMachine &TM;
- bool IsPIC;
const MipsSubtarget *STI;
const Mips16InstrInfo *TII;
MipsFunctionInfo *MFI;
@@ -364,10 +352,9 @@ namespace {
public:
static char ID;
- MipsConstantIslands(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_), STI(nullptr),
- MF(nullptr), MCP(nullptr), PrescannedForConstants(false) {}
+ MipsConstantIslands()
+ : MachineFunctionPass(ID), STI(nullptr), MF(nullptr), MCP(nullptr),
+ PrescannedForConstants(false) {}
const char *getPassName() const override {
return "Mips Constant Islands";
@@ -375,9 +362,14 @@ namespace {
bool runOnMachineFunction(MachineFunction &F) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- unsigned getCPELogAlign(const MachineInstr *CPEMI);
+ unsigned getCPELogAlign(const MachineInstr &CPEMI);
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
unsigned getOffsetOf(MachineInstr *MI) const;
unsigned getUserOffset(CPUser&) const;
@@ -389,7 +381,7 @@ namespace {
const CPUser &U);
void computeBlockSize(MachineBasicBlock *MBB);
- MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
+ MachineBasicBlock *splitBlockBeforeInstr(MachineInstr &MI);
void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
void adjustBBOffsetsAfter(MachineBasicBlock *BB);
bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
@@ -437,10 +429,9 @@ void MipsConstantIslands::dumpBBs() {
}
});
}
-/// createMipsLongBranchPass - Returns a pass that converts branches to long
-/// branches.
-FunctionPass *llvm::createMipsConstantIslandPass(MipsTargetMachine &tm) {
- return new MipsConstantIslands(tm);
+/// Returns a pass that converts branches to long branches.
+FunctionPass *llvm::createMipsConstantIslandPass() {
+ return new MipsConstantIslands();
}
bool MipsConstantIslands::runOnMachineFunction(MachineFunction &mf) {
@@ -629,14 +620,14 @@ MipsConstantIslands::CPEntry
/// getCPELogAlign - Returns the required alignment of the constant pool entry
/// represented by CPEMI. Alignment is measured in log2(bytes) units.
-unsigned MipsConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
- assert(CPEMI && CPEMI->getOpcode() == Mips::CONSTPOOL_ENTRY);
+unsigned MipsConstantIslands::getCPELogAlign(const MachineInstr &CPEMI) {
+ assert(CPEMI.getOpcode() == Mips::CONSTPOOL_ENTRY);
// Everything is 4-byte aligned unless AlignConstantIslands is set.
if (!AlignConstantIslands)
return 2;
- unsigned CPI = CPEMI->getOperand(1).getIndex();
+ unsigned CPI = CPEMI.getOperand(1).getIndex();
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
unsigned Align = MCP->getConstants()[CPI].getAlignment();
assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
@@ -663,21 +654,17 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
adjustBBOffsetsAfter(&MF->front());
// Now go back through the instructions and build up our data structures.
- for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
-
+ for (MachineBasicBlock &MBB : *MF) {
// If this block doesn't fall through into the next MBB, then this is
// 'water' that a constant pool island could be placed.
if (!BBHasFallthrough(&MBB))
WaterList.push_back(&MBB);
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- if (I->isDebugValue())
+ for (MachineInstr &MI : MBB) {
+ if (MI.isDebugValue())
continue;
- int Opc = I->getOpcode();
- if (I->isBranch()) {
+ int Opc = MI.getOpcode();
+ if (MI.isBranch()) {
bool isCond = false;
unsigned Bits = 0;
unsigned Scale = 1;
@@ -746,7 +733,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
}
// Record this immediate branch.
unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
- ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
+ ImmBranches.push_back(ImmBranch(&MI, MaxOffs, isCond, UOpc));
}
if (Opc == Mips::CONSTPOOL_ENTRY)
@@ -754,8 +741,8 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
// Scan the instructions for constant pool operands.
- for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
- if (I->getOperand(op).isCPI()) {
+ for (unsigned op = 0, e = MI.getNumOperands(); op != e; ++op)
+ if (MI.getOperand(op).isCPI()) {
// We found one. The addressing mode tells us the max displacement
// from the PC that this instruction permits.
@@ -784,12 +771,12 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
break;
}
// Remember that this is a user of a CP entry.
- unsigned CPI = I->getOperand(op).getIndex();
+ unsigned CPI = MI.getOperand(op).getIndex();
MachineInstr *CPEMI = CPEMIs[CPI];
unsigned MaxOffs = ((1 << Bits)-1) * Scale;
unsigned LongFormMaxOffs = ((1 << LongFormBits)-1) * LongFormScale;
- CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk,
- LongFormMaxOffs, LongFormOpcode));
+ CPUsers.push_back(CPUser(&MI, CPEMI, MaxOffs, NegOk, LongFormMaxOffs,
+ LongFormOpcode));
// Increment corresponding CPEntry reference count.
CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
@@ -813,10 +800,8 @@ void MipsConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
BBI.Size = 0;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
- ++I)
- BBI.Size += TII->GetInstSizeInBytes(I);
-
+ for (const MachineInstr &MI : *MBB)
+ BBI.Size += TII->GetInstSizeInBytes(MI);
}
/// getOffsetOf - Return the current offset of the specified machine instruction
@@ -833,7 +818,7 @@ unsigned MipsConstantIslands::getOffsetOf(MachineInstr *MI) const {
// Sum instructions before MI in MBB.
for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
assert(I != MBB->end() && "Didn't find MI in its own basic block?");
- Offset += TII->GetInstSizeInBytes(I);
+ Offset += TII->GetInstSizeInBytes(*I);
}
return Offset;
}
@@ -872,9 +857,9 @@ unsigned MipsConstantIslands::getUserOffset(CPUser &U) const {
/// Split the basic block containing MI into two blocks, which are joined by
/// an unconditional branch. Update data structures and renumber blocks to
/// account for this change and returns the newly created block.
-MachineBasicBlock *MipsConstantIslands::splitBlockBeforeInstr
- (MachineInstr *MI) {
- MachineBasicBlock *OrigBB = MI->getParent();
+MachineBasicBlock *
+MipsConstantIslands::splitBlockBeforeInstr(MachineInstr &MI) {
+ MachineBasicBlock *OrigBB = MI.getParent();
// Create a new MBB for the code after the OrigBB.
MachineBasicBlock *NewBB =
@@ -964,7 +949,7 @@ bool MipsConstantIslands::isOffsetInRange(unsigned UserOffset,
bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
MachineBasicBlock* Water, CPUser &U,
unsigned &Growth) {
- unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
+ unsigned CPELogAlign = getCPELogAlign(*U.CPEMI);
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
unsigned NextBlockOffset, NextBlockAlignment;
MachineFunction::const_iterator NextBlock = ++Water->getIterator();
@@ -985,7 +970,7 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
Growth = CPEEnd - NextBlockOffset;
// Compute the padding that would go at the end of the CPE to align the next
// block.
- Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
+ Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
// If the CPE is to be inserted before the instruction, that will raise
// the offset of the instruction. Also account for unknown alignment padding
@@ -1246,7 +1231,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
CPUser &U = CPUsers[CPUserIndex];
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
- unsigned CPELogAlign = getCPELogAlign(CPEMI);
+ unsigned CPELogAlign = getCPELogAlign(*CPEMI);
MachineBasicBlock *UserMBB = UserMI->getParent();
const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
@@ -1312,11 +1297,12 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned CPUIndex = CPUserIndex+1;
unsigned NumCPUsers = CPUsers.size();
//MachineInstr *LastIT = 0;
- for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
+ for (unsigned Offset = UserOffset + TII->GetInstSizeInBytes(*UserMI);
Offset < BaseInsertOffset;
- Offset += TII->GetInstSizeInBytes(MI), MI = std::next(MI)) {
+ Offset += TII->GetInstSizeInBytes(*MI), MI = std::next(MI)) {
assert(MI != UserMBB->end() && "Fell off end of block");
- if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
+ if (CPUIndex < NumCPUsers &&
+ CPUsers[CPUIndex].MI == static_cast<MachineInstr *>(MI)) {
CPUser &U = CPUsers[CPUIndex];
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
// Shift intertion point by one unit of alignment so it is within reach.
@@ -1332,8 +1318,7 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
}
}
- --MI;
- NewMBB = splitBlockBeforeInstr(MI);
+ NewMBB = splitBlockBeforeInstr(*--MI);
}
/// handleConstantPoolUser - Analyze the specified user, checking to see if it
@@ -1426,7 +1411,7 @@ bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
++NumCPEs;
// Mark the basic block as aligned as required by the const-pool entry.
- NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
+ NewIsland->setAlignment(getCPELogAlign(*U.CPEMI));
// Increase the size of the island block to account for the new entry.
BBInfo[NewIsland->getNumber()].Size += Size;
@@ -1460,7 +1445,7 @@ void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
CPEBB->setAlignment(0);
} else
// Entries are sorted by descending alignment, so realign from the front.
- CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
+ CPEBB->setAlignment(getCPELogAlign(*CPEBB->begin()));
adjustBBOffsetsAfter(CPEBB);
// An island has only one predecessor BB and one successor BB. Check if
@@ -1610,7 +1595,7 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
++NumCBrFixed;
if (BMI != MI) {
if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
- isUnconditionalBranch(BMI->getOpcode())) {
+ BMI->isUnconditionalBranch()) {
// Last MI in the BB is an unconditional branch. Can we simply invert the
// condition and swap destinations:
// beqz L1
@@ -1634,10 +1619,10 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
if (NeedSplit) {
- splitBlockBeforeInstr(MI);
+ splitBlockBeforeInstr(*MI);
// No need for the branch to the next block. We're adding an unconditional
// branch to the destination.
- int delta = TII->GetInstSizeInBytes(&MBB->back());
+ int delta = TII->GetInstSizeInBytes(MBB->back());
BBInfo[MBB->getNumber()].Size -= delta;
MBB->back().eraseFromParent();
// BBInfo[SplitBB].Offset is wrong temporarily, fixed below
@@ -1659,14 +1644,14 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
.addMBB(NextBB);
}
Br.MI = &MBB->back();
- BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
- BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
// Remove the old conditional branch. It may or may not still be in MBB.
- BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
+ BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(*MI);
MI->eraseFromParent();
adjustBBOffsetsAfter(MBB);
return true;
@@ -1710,4 +1695,3 @@ void MipsConstantIslands::prescanForConstants() {
}
}
}
-
diff --git a/lib/Target/Mips/MipsDSPInstrFormats.td b/lib/Target/Mips/MipsDSPInstrFormats.td
index f959bd4d8db3..0ceb1858fb09 100644
--- a/lib/Target/Mips/MipsDSPInstrFormats.td
+++ b/lib/Target/Mips/MipsDSPInstrFormats.td
@@ -32,6 +32,10 @@ class ISA_DSPR2 {
list<Predicate> InsnPredicates = [HasDSPR2];
}
+class ISA_DSPR3 {
+ list<Predicate> InsnPredicates = [HasDSPR3];
+}
+
// Fields.
class Field6<bits<6> val> {
bits<6> V = val;
diff --git a/lib/Target/Mips/MipsDSPInstrInfo.td b/lib/Target/Mips/MipsDSPInstrInfo.td
index da6f174e2a19..ac9a81b1bb2f 100644
--- a/lib/Target/Mips/MipsDSPInstrInfo.td
+++ b/lib/Target/Mips/MipsDSPInstrInfo.td
@@ -16,7 +16,6 @@ def immZExt1 : ImmLeaf<i32, [{return isUInt<1>(Imm);}]>;
def immZExt2 : ImmLeaf<i32, [{return isUInt<2>(Imm);}]>;
def immZExt3 : ImmLeaf<i32, [{return isUInt<3>(Imm);}]>;
def immZExt4 : ImmLeaf<i32, [{return isUInt<4>(Imm);}]>;
-def immZExt7 : ImmLeaf<i32, [{return isUInt<7>(Imm);}]>;
def immZExt8 : ImmLeaf<i32, [{return isUInt<8>(Imm);}]>;
def immZExt10 : ImmLeaf<i32, [{return isUInt<10>(Imm);}]>;
def immSExt6 : ImmLeaf<i32, [{return isInt<6>(Imm);}]>;
@@ -287,6 +286,7 @@ class CMP_EQ_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
string AsmString = !strconcat(instr_asm, "\t$rs, $rt");
list<dag> Pattern = [(OpNode ROS:$rs, ROT:$rt)];
InstrItinClass Itinerary = itin;
+ string BaseOpcode = instr_asm;
}
class CMP_EQ_QB_R3_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
@@ -324,9 +324,10 @@ class ABSQ_S_PH_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
}
class REPL_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- ImmLeaf immPat, InstrItinClass itin, RegisterOperand RO> {
+ Operand ImmOp, ImmLeaf immPat, InstrItinClass itin,
+ RegisterOperand RO> {
dag OutOperandList = (outs RO:$rd);
- dag InOperandList = (ins uimm16:$imm);
+ dag InOperandList = (ins ImmOp:$imm);
string AsmString = !strconcat(instr_asm, "\t$rd, $imm");
list<dag> Pattern = [(set RO:$rd, (OpNode immPat:$imm))];
InstrItinClass Itinerary = itin;
@@ -401,7 +402,7 @@ class EXTR_W_TY1_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
class EXTR_W_TY1_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
dag OutOperandList = (outs GPR32Opnd:$rt);
- dag InOperandList = (ins ACC64DSPOpnd:$ac, uimm16:$shift_rs);
+ dag InOperandList = (ins ACC64DSPOpnd:$ac, uimm5:$shift_rs);
string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs");
InstrItinClass Itinerary = itin;
string BaseOpcode = instr_asm;
@@ -440,7 +441,7 @@ class MTHLIP_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
class RDDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
dag OutOperandList = (outs GPR32Opnd:$rd);
- dag InOperandList = (ins uimm16:$mask);
+ dag InOperandList = (ins uimm10:$mask);
string AsmString = !strconcat(instr_asm, "\t$rd, $mask");
list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode immZExt10:$mask))];
InstrItinClass Itinerary = itin;
@@ -513,14 +514,16 @@ class BPOSGE32_PSEUDO_DESC_BASE<SDPatternOperator OpNode, InstrItinClass itin> :
bit usesCustomInserter = 1;
}
-class BPOSGE32_DESC_BASE<string instr_asm, InstrItinClass itin> {
+class BPOSGE32_DESC_BASE<string instr_asm, DAGOperand opnd,
+ InstrItinClass itin> {
dag OutOperandList = (outs);
- dag InOperandList = (ins brtarget:$offset);
+ dag InOperandList = (ins opnd:$offset);
string AsmString = !strconcat(instr_asm, "\t$offset");
InstrItinClass Itinerary = itin;
bit isBranch = 1;
bit isTerminator = 1;
bit hasDelaySlot = 1;
+ string BaseOpcode = instr_asm;
}
class INSV_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
@@ -845,11 +848,11 @@ class BITREV_DESC : ABSQ_S_PH_R2_DESC_BASE<"bitrev", int_mips_bitrev,
class PACKRL_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"packrl.ph", int_mips_packrl_ph,
NoItinerary, DSPROpnd, DSPROpnd>;
-class REPL_QB_DESC : REPL_DESC_BASE<"repl.qb", int_mips_repl_qb, immZExt8,
- NoItinerary, DSPROpnd>;
+class REPL_QB_DESC : REPL_DESC_BASE<"repl.qb", int_mips_repl_qb, uimm8,
+ immZExt8, NoItinerary, DSPROpnd>;
-class REPL_PH_DESC : REPL_DESC_BASE<"repl.ph", int_mips_repl_ph, immZExt10,
- NoItinerary, DSPROpnd>;
+class REPL_PH_DESC : REPL_DESC_BASE<"repl.ph", int_mips_repl_ph, uimm10,
+ immZExt10, NoItinerary, DSPROpnd>;
class REPLV_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.qb", int_mips_repl_qb,
NoItinerary, DSPROpnd, GPR32Opnd>;
@@ -871,7 +874,7 @@ class LHX_DESC : LX_DESC_BASE<"lhx", int_mips_lhx, NoItinerary>;
class LBUX_DESC : LX_DESC_BASE<"lbux", int_mips_lbux, NoItinerary>;
-class BPOSGE32_DESC : BPOSGE32_DESC_BASE<"bposge32", NoItinerary>;
+class BPOSGE32_DESC : BPOSGE32_DESC_BASE<"bposge32", brtarget, NoItinerary>;
// Extr
class EXTP_DESC : EXTR_W_TY1_R1_DESC_BASE<"extp", MipsEXTP, NoItinerary>,
@@ -1115,7 +1118,7 @@ def ADDQ_S_W : DspMMRel, ADDQ_S_W_ENC, ADDQ_S_W_DESC;
def SUBQ_S_W : DspMMRel, SUBQ_S_W_ENC, SUBQ_S_W_DESC;
def ADDSC : DspMMRel, ADDSC_ENC, ADDSC_DESC;
def ADDWC : DspMMRel, ADDWC_ENC, ADDWC_DESC;
-def MODSUB : MODSUB_ENC, MODSUB_DESC;
+def MODSUB : DspMMRel, MODSUB_ENC, MODSUB_DESC;
def RADDU_W_QB : DspMMRel, RADDU_W_QB_ENC, RADDU_W_QB_DESC;
def ABSQ_S_PH : DspMMRel, ABSQ_S_PH_ENC, ABSQ_S_PH_DESC;
def ABSQ_S_W : DspMMRel, ABSQ_S_W_ENC, ABSQ_S_W_DESC;
@@ -1154,7 +1157,7 @@ def MULEU_S_PH_QBR : DspMMRel, MULEU_S_PH_QBR_ENC, MULEU_S_PH_QBR_DESC;
def MULEQ_S_W_PHL : DspMMRel, MULEQ_S_W_PHL_ENC, MULEQ_S_W_PHL_DESC;
def MULEQ_S_W_PHR : DspMMRel, MULEQ_S_W_PHR_ENC, MULEQ_S_W_PHR_DESC;
def MULQ_RS_PH : DspMMRel, MULQ_RS_PH_ENC, MULQ_RS_PH_DESC;
-def MULSAQ_S_W_PH : MULSAQ_S_W_PH_ENC, MULSAQ_S_W_PH_DESC;
+def MULSAQ_S_W_PH : DspMMRel, MULSAQ_S_W_PH_ENC, MULSAQ_S_W_PH_DESC;
def MAQ_S_W_PHL : DspMMRel, MAQ_S_W_PHL_ENC, MAQ_S_W_PHL_DESC;
def MAQ_S_W_PHR : DspMMRel, MAQ_S_W_PHR_ENC, MAQ_S_W_PHR_DESC;
def MAQ_SA_W_PHL : DspMMRel, MAQ_SA_W_PHL_ENC, MAQ_SA_W_PHL_DESC;
@@ -1177,16 +1180,16 @@ def MADD_DSP : DspMMRel, MADD_DSP_ENC, MADD_DSP_DESC;
def MADDU_DSP : DspMMRel, MADDU_DSP_ENC, MADDU_DSP_DESC;
def MSUB_DSP : DspMMRel, MSUB_DSP_ENC, MSUB_DSP_DESC;
def MSUBU_DSP : DspMMRel, MSUBU_DSP_ENC, MSUBU_DSP_DESC;
-def CMPU_EQ_QB : CMPU_EQ_QB_ENC, CMPU_EQ_QB_DESC;
-def CMPU_LT_QB : CMPU_LT_QB_ENC, CMPU_LT_QB_DESC;
-def CMPU_LE_QB : CMPU_LE_QB_ENC, CMPU_LE_QB_DESC;
-def CMPGU_EQ_QB : CMPGU_EQ_QB_ENC, CMPGU_EQ_QB_DESC;
-def CMPGU_LT_QB : CMPGU_LT_QB_ENC, CMPGU_LT_QB_DESC;
-def CMPGU_LE_QB : CMPGU_LE_QB_ENC, CMPGU_LE_QB_DESC;
-def CMP_EQ_PH : CMP_EQ_PH_ENC, CMP_EQ_PH_DESC;
-def CMP_LT_PH : CMP_LT_PH_ENC, CMP_LT_PH_DESC;
-def CMP_LE_PH : CMP_LE_PH_ENC, CMP_LE_PH_DESC;
-def BITREV : BITREV_ENC, BITREV_DESC;
+def CMPU_EQ_QB : DspMMRel, CMPU_EQ_QB_ENC, CMPU_EQ_QB_DESC;
+def CMPU_LT_QB : DspMMRel, CMPU_LT_QB_ENC, CMPU_LT_QB_DESC;
+def CMPU_LE_QB : DspMMRel, CMPU_LE_QB_ENC, CMPU_LE_QB_DESC;
+def CMPGU_EQ_QB : DspMMRel, CMPGU_EQ_QB_ENC, CMPGU_EQ_QB_DESC;
+def CMPGU_LT_QB : DspMMRel, CMPGU_LT_QB_ENC, CMPGU_LT_QB_DESC;
+def CMPGU_LE_QB : DspMMRel, CMPGU_LE_QB_ENC, CMPGU_LE_QB_DESC;
+def CMP_EQ_PH : DspMMRel, CMP_EQ_PH_ENC, CMP_EQ_PH_DESC;
+def CMP_LT_PH : DspMMRel, CMP_LT_PH_ENC, CMP_LT_PH_DESC;
+def CMP_LE_PH : DspMMRel, CMP_LE_PH_ENC, CMP_LE_PH_DESC;
+def BITREV : DspMMRel, BITREV_ENC, BITREV_DESC;
def PACKRL_PH : DspMMRel, PACKRL_PH_ENC, PACKRL_PH_DESC;
def REPL_QB : DspMMRel, REPL_QB_ENC, REPL_QB_DESC;
def REPL_PH : DspMMRel, REPL_PH_ENC, REPL_PH_DESC;
@@ -1197,7 +1200,9 @@ def PICK_PH : DspMMRel, PICK_PH_ENC, PICK_PH_DESC;
def LWX : DspMMRel, LWX_ENC, LWX_DESC;
def LHX : DspMMRel, LHX_ENC, LHX_DESC;
def LBUX : DspMMRel, LBUX_ENC, LBUX_DESC;
-def BPOSGE32 : BPOSGE32_ENC, BPOSGE32_DESC;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def BPOSGE32 : DspMMRel, BPOSGE32_ENC, BPOSGE32_DESC;
+}
def INSV : DspMMRel, INSV_ENC, INSV_DESC;
def EXTP : DspMMRel, EXTP_ENC, EXTP_DESC;
def EXTPV : DspMMRel, EXTPV_ENC, EXTPV_DESC;
@@ -1224,9 +1229,9 @@ def ADDU_PH : DspMMRel, ADDU_PH_ENC, ADDU_PH_DESC, ISA_DSPR2;
def ADDU_S_PH : DspMMRel, ADDU_S_PH_ENC, ADDU_S_PH_DESC, ISA_DSPR2;
def SUBU_PH : DspMMRel, SUBU_PH_ENC, SUBU_PH_DESC, ISA_DSPR2;
def SUBU_S_PH : DspMMRel, SUBU_S_PH_ENC, SUBU_S_PH_DESC, ISA_DSPR2;
-def CMPGDU_EQ_QB : CMPGDU_EQ_QB_ENC, CMPGDU_EQ_QB_DESC, ISA_DSPR2;
-def CMPGDU_LT_QB : CMPGDU_LT_QB_ENC, CMPGDU_LT_QB_DESC, ISA_DSPR2;
-def CMPGDU_LE_QB : CMPGDU_LE_QB_ENC, CMPGDU_LE_QB_DESC, ISA_DSPR2;
+def CMPGDU_EQ_QB : DspMMRel, CMPGDU_EQ_QB_ENC, CMPGDU_EQ_QB_DESC, ISA_DSPR2;
+def CMPGDU_LT_QB : DspMMRel, CMPGDU_LT_QB_ENC, CMPGDU_LT_QB_DESC, ISA_DSPR2;
+def CMPGDU_LE_QB : DspMMRel, CMPGDU_LE_QB_ENC, CMPGDU_LE_QB_DESC, ISA_DSPR2;
def ABSQ_S_QB : DspMMRel, ABSQ_S_QB_ENC, ABSQ_S_QB_DESC, ISA_DSPR2;
def ADDUH_QB : DspMMRel, ADDUH_QB_ENC, ADDUH_QB_DESC, ISA_DSPR2;
def ADDUH_R_QB : DspMMRel, ADDUH_R_QB_ENC, ADDUH_R_QB_DESC, ISA_DSPR2;
@@ -1253,7 +1258,7 @@ def DPAX_W_PH : DspMMRel, DPAX_W_PH_ENC, DPAX_W_PH_DESC, ISA_DSPR2;
def DPSX_W_PH : DspMMRel, DPSX_W_PH_ENC, DPSX_W_PH_DESC, ISA_DSPR2;
def DPSQX_S_W_PH : DspMMRel, DPSQX_S_W_PH_ENC, DPSQX_S_W_PH_DESC, ISA_DSPR2;
def DPSQX_SA_W_PH : DspMMRel, DPSQX_SA_W_PH_ENC, DPSQX_SA_W_PH_DESC, ISA_DSPR2;
-def MULSA_W_PH : MULSA_W_PH_ENC, MULSA_W_PH_DESC, ISA_DSPR2;
+def MULSA_W_PH : DspMMRel, MULSA_W_PH_ENC, MULSA_W_PH_DESC, ISA_DSPR2;
def PRECR_QB_PH : DspMMRel, PRECR_QB_PH_ENC, PRECR_QB_PH_DESC, ISA_DSPR2;
def PRECR_SRA_PH_W : DspMMRel, PRECR_SRA_PH_W_ENC, PRECR_SRA_PH_W_DESC, ISA_DSPR2;
def PRECR_SRA_R_PH_W : DspMMRel, PRECR_SRA_R_PH_W_ENC, PRECR_SRA_R_PH_W_DESC, ISA_DSPR2;
@@ -1263,12 +1268,12 @@ def SHRA_R_QB : DspMMRel, SHRA_R_QB_ENC, SHRA_R_QB_DESC, ISA_DSPR2;
def SHRAV_R_QB : DspMMRel, SHRAV_R_QB_ENC, SHRAV_R_QB_DESC, ISA_DSPR2;
def SHRL_PH : DspMMRel, SHRL_PH_ENC, SHRL_PH_DESC, ISA_DSPR2;
def SHRLV_PH : DspMMRel, SHRLV_PH_ENC, SHRLV_PH_DESC, ISA_DSPR2;
-def APPEND : APPEND_ENC, APPEND_DESC, ISA_DSPR2;
-def BALIGN : BALIGN_ENC, BALIGN_DESC, ISA_DSPR2;
+def APPEND : DspMMRel, APPEND_ENC, APPEND_DESC, ISA_DSPR2;
+def BALIGN : DspMMRel, BALIGN_ENC, BALIGN_DESC, ISA_DSPR2;
def PREPEND : DspMMRel, PREPEND_ENC, PREPEND_DESC, ISA_DSPR2;
// Pseudos.
-let isPseudo = 1, isCodeGenOnly = 1 in {
+let isPseudo = 1, isCodeGenOnly = 1, hasNoSchedulingInfo = 1 in {
// Pseudo instructions for loading and storing accumulator registers.
def LOAD_ACC64DSP : Load<"", ACC64DSPOpnd>;
def STORE_ACC64DSP : Store<"", ACC64DSPOpnd>;
diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp
index 8313d909df2a..b5ba770df7bd 100644
--- a/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -62,6 +62,28 @@ static cl::opt<bool> DisableBackwardSearch(
cl::desc("Disallow MIPS delay filler to search backward."),
cl::Hidden);
+enum CompactBranchPolicy {
+ CB_Never, ///< The policy 'never' may in some circumstances or for some
+ ///< ISAs not be absolutely adhered to.
+ CB_Optimal, ///< Optimal is the default and will produce compact branches
+ ///< when delay slots cannot be filled.
+ CB_Always ///< 'always' may in some circumstances may not be
+ ///< absolutely adhered to there may not be a corresponding
+ ///< compact form of a branch.
+};
+
+static cl::opt<CompactBranchPolicy> MipsCompactBranchPolicy(
+ "mips-compact-branches",cl::Optional,
+ cl::init(CB_Optimal),
+ cl::desc("MIPS Specific: Compact branch policy."),
+ cl::values(
+ clEnumValN(CB_Never, "never", "Do not use compact branches if possible."),
+ clEnumValN(CB_Optimal, "optimal", "Use compact branches where appropiate (default)."),
+ clEnumValN(CB_Always, "always", "Always use compact branches if possible."),
+ clEnumValEnd
+ )
+);
+
namespace {
typedef MachineBasicBlock::iterator Iter;
typedef MachineBasicBlock::reverse_iterator ReverseIter;
@@ -189,6 +211,11 @@ namespace {
return Changed;
}
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<MachineBranchProbabilityInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -197,11 +224,8 @@ namespace {
private:
bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
- Iter replaceWithCompactBranch(MachineBasicBlock &MBB,
- Iter Branch, DebugLoc DL);
-
- Iter replaceWithCompactJump(MachineBasicBlock &MBB,
- Iter Jump, DebugLoc DL);
+ Iter replaceWithCompactBranch(MachineBasicBlock &MBB, Iter Branch,
+ const DebugLoc &DL);
/// This function checks if it is valid to move Candidate to the delay slot
/// and returns true if it isn't. It also updates memory and register
@@ -505,46 +529,18 @@ getUnderlyingObjects(const MachineInstr &MI,
}
// Replace Branch with the compact branch instruction.
-Iter Filler::replaceWithCompactBranch(MachineBasicBlock &MBB,
- Iter Branch, DebugLoc DL) {
- const MipsInstrInfo *TII =
- MBB.getParent()->getSubtarget<MipsSubtarget>().getInstrInfo();
-
- unsigned NewOpcode =
- (((unsigned) Branch->getOpcode()) == Mips::BEQ) ? Mips::BEQZC_MM
- : Mips::BNEZC_MM;
-
- const MCInstrDesc &NewDesc = TII->get(NewOpcode);
- MachineInstrBuilder MIB = BuildMI(MBB, Branch, DL, NewDesc);
-
- MIB.addReg(Branch->getOperand(0).getReg());
- MIB.addMBB(Branch->getOperand(2).getMBB());
+Iter Filler::replaceWithCompactBranch(MachineBasicBlock &MBB, Iter Branch,
+ const DebugLoc &DL) {
+ const MipsSubtarget &STI = MBB.getParent()->getSubtarget<MipsSubtarget>();
+ const MipsInstrInfo *TII = STI.getInstrInfo();
- Iter tmpIter = Branch;
- Branch = std::prev(Branch);
- MBB.erase(tmpIter);
+ unsigned NewOpcode = TII->getEquivalentCompactForm(Branch);
+ Branch = TII->genInstrWithNewOpc(NewOpcode, Branch);
+ std::next(Branch)->eraseFromParent();
return Branch;
}
-// Replace Jumps with the compact jump instruction.
-Iter Filler::replaceWithCompactJump(MachineBasicBlock &MBB,
- Iter Jump, DebugLoc DL) {
- const MipsInstrInfo *TII =
- MBB.getParent()->getSubtarget<MipsSubtarget>().getInstrInfo();
-
- const MCInstrDesc &NewDesc = TII->get(Mips::JRC16_MM);
- MachineInstrBuilder MIB = BuildMI(MBB, Jump, DL, NewDesc);
-
- MIB.addReg(Jump->getOperand(0).getReg());
-
- Iter tmpIter = Jump;
- Jump = std::prev(Jump);
- MBB.erase(tmpIter);
-
- return Jump;
-}
-
// For given opcode returns opcode of corresponding instruction with short
// delay slot.
static int getEquivalentCallShort(int Opcode) {
@@ -572,6 +568,12 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool InMicroMipsMode = STI.inMicroMipsMode();
const MipsInstrInfo *TII = STI.getInstrInfo();
+ if (InMicroMipsMode && STI.hasMips32r6()) {
+ // This is microMIPS32r6 or microMIPS64r6 processor. Delay slot for
+ // branching instructions is not needed.
+ return Changed;
+ }
+
for (Iter I = MBB.begin(); I != MBB.end(); ++I) {
if (!hasUnoccupiedSlot(&*I))
continue;
@@ -583,55 +585,49 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
if (!DisableDelaySlotFiller && (TM.getOptLevel() != CodeGenOpt::None)) {
bool Filled = false;
- if (searchBackward(MBB, I)) {
- Filled = true;
- } else if (I->isTerminator()) {
- if (searchSuccBBs(MBB, I)) {
+ if (MipsCompactBranchPolicy.getValue() != CB_Always ||
+ !TII->getEquivalentCompactForm(I)) {
+ if (searchBackward(MBB, I)) {
+ Filled = true;
+ } else if (I->isTerminator()) {
+ if (searchSuccBBs(MBB, I)) {
+ Filled = true;
+ }
+ } else if (searchForward(MBB, I)) {
Filled = true;
}
- } else if (searchForward(MBB, I)) {
- Filled = true;
}
if (Filled) {
// Get instruction with delay slot.
- MachineBasicBlock::instr_iterator DSI(I);
+ MachineBasicBlock::instr_iterator DSI = I.getInstrIterator();
- if (InMicroMipsMode && TII->GetInstSizeInBytes(&*std::next(DSI)) == 2 &&
+ if (InMicroMipsMode && TII->GetInstSizeInBytes(*std::next(DSI)) == 2 &&
DSI->isCall()) {
// If instruction in delay slot is 16b change opcode to
// corresponding instruction with short delay slot.
DSI->setDesc(TII->get(getEquivalentCallShort(DSI->getOpcode())));
}
-
continue;
}
}
- // If instruction is BEQ or BNE with one ZERO register, then instead of
- // adding NOP replace this instruction with the corresponding compact
- // branch instruction, i.e. BEQZC or BNEZC.
- unsigned Opcode = I->getOpcode();
- if (InMicroMipsMode) {
- switch (Opcode) {
- case Mips::BEQ:
- case Mips::BNE:
- if (((unsigned) I->getOperand(1).getReg()) == Mips::ZERO) {
- I = replaceWithCompactBranch(MBB, I, I->getDebugLoc());
- continue;
- }
- break;
- case Mips::JR:
- case Mips::PseudoReturn:
- case Mips::PseudoIndirectBranch:
- // For microMIPS the PseudoReturn and PseudoIndirectBranch are allways
- // expanded to JR_MM, so they can be replaced with JRC16_MM.
- I = replaceWithCompactJump(MBB, I, I->getDebugLoc());
- continue;
- default:
- break;
- }
+ // For microMIPS if instruction is BEQ or BNE with one ZERO register, then
+ // instead of adding NOP replace this instruction with the corresponding
+ // compact branch instruction, i.e. BEQZC or BNEZC. Additionally
+ // PseudoReturn and PseudoIndirectBranch are expanded to JR_MM, so they can
+ // be replaced with JRC16_MM.
+
+ // For MIPSR6 attempt to produce the corresponding compact (no delay slot)
+ // form of the CTI. For indirect jumps this will not require inserting a
+ // NOP and for branches will hopefully avoid requiring a NOP.
+ if ((InMicroMipsMode ||
+ (STI.hasMips32r6() && MipsCompactBranchPolicy != CB_Never)) &&
+ TII->getEquivalentCompactForm(I)) {
+ I = replaceWithCompactBranch(MBB, I, I->getDebugLoc());
+ continue;
}
+
// Bundle the NOP to the instruction with the delay slot.
BuildMI(MBB, std::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
MIBundleBuilder(MBB, I, std::next(I, 2));
@@ -696,7 +692,7 @@ bool Filler::searchRange(MachineBasicBlock &MBB, IterTy Begin, IterTy End,
bool InMicroMipsMode = STI.inMicroMipsMode();
const MipsInstrInfo *TII = STI.getInstrInfo();
unsigned Opcode = (*Slot).getOpcode();
- if (InMicroMipsMode && TII->GetInstSizeInBytes(&(*CurrI)) == 2 &&
+ if (InMicroMipsMode && TII->GetInstSizeInBytes(*CurrI) == 2 &&
(Opcode == Mips::JR || Opcode == Mips::PseudoIndirectBranch ||
Opcode == Mips::PseudoReturn))
continue;
@@ -819,7 +815,7 @@ Filler::getBranch(MachineBasicBlock &MBB, const MachineBasicBlock &Dst) const {
SmallVector<MachineOperand, 2> Cond;
MipsInstrInfo::BranchType R =
- TII->AnalyzeBranch(MBB, TrueBB, FalseBB, Cond, false, BranchInstrs);
+ TII->analyzeBranch(MBB, TrueBB, FalseBB, Cond, false, BranchInstrs);
if ((R == MipsInstrInfo::BT_None) || (R == MipsInstrInfo::BT_NoBranch))
return std::make_pair(R, nullptr);
diff --git a/lib/Target/Mips/MipsEVAInstrInfo.td b/lib/Target/Mips/MipsEVAInstrInfo.td
index 36c9694cbadd..26df263d228b 100644
--- a/lib/Target/Mips/MipsEVAInstrInfo.td
+++ b/lib/Target/Mips/MipsEVAInstrInfo.td
@@ -51,7 +51,8 @@ class PREFE_ENC : SPECIAL3_EVA_LOAD_STORE_FM<OPCODE6_PREFE>;
//===----------------------------------------------------------------------===//
// Memory Load/Store EVA descriptions
-class LOAD_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+class LOAD_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs GPROpnd:$rt);
dag InOperandList = (ins mem_simm9:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
@@ -59,30 +60,34 @@ class LOAD_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
string DecoderMethod = "DecodeMemEVA";
bit canFoldAsLoad = 1;
bit mayLoad = 1;
+ InstrItinClass Itinerary = itin;
}
-class LBE_DESC : LOAD_EVA_DESC_BASE<"lbe", GPR32Opnd>;
-class LBuE_DESC : LOAD_EVA_DESC_BASE<"lbue", GPR32Opnd>;
-class LHE_DESC : LOAD_EVA_DESC_BASE<"lhe", GPR32Opnd>;
-class LHuE_DESC : LOAD_EVA_DESC_BASE<"lhue", GPR32Opnd>;
-class LWE_DESC : LOAD_EVA_DESC_BASE<"lwe", GPR32Opnd>;
+class LBE_DESC : LOAD_EVA_DESC_BASE<"lbe", GPR32Opnd, II_LBE>;
+class LBuE_DESC : LOAD_EVA_DESC_BASE<"lbue", GPR32Opnd, II_LBUE>;
+class LHE_DESC : LOAD_EVA_DESC_BASE<"lhe", GPR32Opnd, II_LHE>;
+class LHuE_DESC : LOAD_EVA_DESC_BASE<"lhue", GPR32Opnd, II_LHUE>;
+class LWE_DESC : LOAD_EVA_DESC_BASE<"lwe", GPR32Opnd, II_LWE>;
class STORE_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
- SDPatternOperator OpNode = null_frag> {
+ SDPatternOperator OpNode = null_frag,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs);
dag InOperandList = (ins GPROpnd:$rt, mem_simm9:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
list<dag> Pattern = [];
string DecoderMethod = "DecodeMemEVA";
bit mayStore = 1;
+ InstrItinClass Itinerary = itin;
}
-class SBE_DESC : STORE_EVA_DESC_BASE<"sbe", GPR32Opnd>;
-class SHE_DESC : STORE_EVA_DESC_BASE<"she", GPR32Opnd>;
-class SWE_DESC : STORE_EVA_DESC_BASE<"swe", GPR32Opnd>;
+class SBE_DESC : STORE_EVA_DESC_BASE<"sbe", GPR32Opnd, null_frag, II_SBE>;
+class SHE_DESC : STORE_EVA_DESC_BASE<"she", GPR32Opnd, null_frag, II_SHE>;
+class SWE_DESC : STORE_EVA_DESC_BASE<"swe", GPR32Opnd, null_frag, II_SWE>;
// Load/Store Left/Right EVA descriptions
-class LOAD_LEFT_RIGHT_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+class LOAD_LEFT_RIGHT_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs GPROpnd:$rt);
dag InOperandList = (ins mem_simm9:$addr, GPROpnd:$src);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
@@ -90,35 +95,41 @@ class LOAD_LEFT_RIGHT_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
string DecoderMethod = "DecodeMemEVA";
string Constraints = "$src = $rt";
bit canFoldAsLoad = 1;
+ InstrItinClass Itinerary = itin;
}
-class LWLE_DESC : LOAD_LEFT_RIGHT_EVA_DESC_BASE<"lwle", GPR32Opnd>;
-class LWRE_DESC : LOAD_LEFT_RIGHT_EVA_DESC_BASE<"lwre", GPR32Opnd>;
+class LWLE_DESC : LOAD_LEFT_RIGHT_EVA_DESC_BASE<"lwle", GPR32Opnd, II_LWLE>;
+class LWRE_DESC : LOAD_LEFT_RIGHT_EVA_DESC_BASE<"lwre", GPR32Opnd, II_LWRE>;
-class STORE_LEFT_RIGHT_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+class STORE_LEFT_RIGHT_EVA_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs);
dag InOperandList = (ins GPROpnd:$rt, mem_simm9:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
list<dag> Pattern = [];
string DecoderMethod = "DecodeMemEVA";
+ InstrItinClass Itinerary = itin;
}
-class SWLE_DESC : LOAD_LEFT_RIGHT_EVA_DESC_BASE<"swle", GPR32Opnd>;
-class SWRE_DESC : LOAD_LEFT_RIGHT_EVA_DESC_BASE<"swre", GPR32Opnd>;
+class SWLE_DESC : LOAD_LEFT_RIGHT_EVA_DESC_BASE<"swle", GPR32Opnd, II_SWLE>;
+class SWRE_DESC : LOAD_LEFT_RIGHT_EVA_DESC_BASE<"swre", GPR32Opnd, II_SWRE>;
// Load-linked EVA, Store-conditional EVA descriptions
-class LLE_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+class LLE_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs GPROpnd:$rt);
dag InOperandList = (ins mem_simm9:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
list<dag> Pattern = [];
bit mayLoad = 1;
string DecoderMethod = "DecodeMemEVA";
+ InstrItinClass Itinerary = itin;
}
-class LLE_DESC : LLE_DESC_BASE<"lle", GPR32Opnd>;
+class LLE_DESC : LLE_DESC_BASE<"lle", GPR32Opnd, II_LLE>;
-class SCE_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
+class SCE_DESC_BASE<string instr_asm, RegisterOperand GPROpnd,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs GPROpnd:$dst);
dag InOperandList = (ins GPROpnd:$rt, mem_simm9:$addr);
string AsmString = !strconcat(instr_asm, "\t$rt, $addr");
@@ -126,30 +137,34 @@ class SCE_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
bit mayStore = 1;
string Constraints = "$rt = $dst";
string DecoderMethod = "DecodeMemEVA";
+ InstrItinClass Itinerary = itin;
}
-class SCE_DESC : SCE_DESC_BASE<"sce", GPR32Opnd>;
+class SCE_DESC : SCE_DESC_BASE<"sce", GPR32Opnd, II_SCE>;
-class TLB_DESC_BASE<string instr_asm> {
+class TLB_DESC_BASE<string instr_asm, InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs);
dag InOperandList = (ins);
string AsmString = instr_asm;
list<dag> Pattern = [];
+ InstrItinClass Itinerary = itin;
}
-class TLBINV_DESC : TLB_DESC_BASE<"tlbinv">;
-class TLBINVF_DESC : TLB_DESC_BASE<"tlbinvf">;
+class TLBINV_DESC : TLB_DESC_BASE<"tlbinv", II_TLBINV>;
+class TLBINVF_DESC : TLB_DESC_BASE<"tlbinvf", II_TLBINVF>;
-class CACHEE_DESC_BASE<string instr_asm, Operand MemOpnd> {
+class CACHEE_DESC_BASE<string instr_asm, Operand MemOpnd,
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs);
dag InOperandList = (ins MemOpnd:$addr, uimm5:$hint);
string AsmString = !strconcat(instr_asm, "\t$hint, $addr");
list<dag> Pattern = [];
string DecoderMethod = "DecodeCacheeOp_CacheOpR6";
+ InstrItinClass Itinerary = itin;
}
-class CACHEE_DESC : CACHEE_DESC_BASE<"cachee", mem>;
-class PREFE_DESC : CACHEE_DESC_BASE<"prefe", mem>;
+class CACHEE_DESC : CACHEE_DESC_BASE<"cachee", mem_simm9, II_CACHEE>;
+class PREFE_DESC : CACHEE_DESC_BASE<"prefe", mem_simm9, II_PREFE>;
//===----------------------------------------------------------------------===//
//
@@ -185,8 +200,10 @@ def LLE : LLE_ENC, LLE_DESC, INSN_EVA;
def SCE : SCE_ENC, SCE_DESC, INSN_EVA;
}
-def TLBINV : TLBINV_ENC, TLBINV_DESC, INSN_EVA;
-def TLBINVF : TLBINVF_ENC, TLBINVF_DESC, INSN_EVA;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def TLBINV : TLBINV_ENC, TLBINV_DESC, INSN_EVA;
+ def TLBINVF : TLBINVF_ENC, TLBINVF_DESC, INSN_EVA;
+}
def CACHEE : CACHEE_ENC, CACHEE_DESC, INSN_EVA;
def PREFE : PREFE_ENC, PREFE_DESC, INSN_EVA;
diff --git a/lib/Target/Mips/MipsFastISel.cpp b/lib/Target/Mips/MipsFastISel.cpp
index e9eaf810637a..19c201d26b24 100644
--- a/lib/Target/Mips/MipsFastISel.cpp
+++ b/lib/Target/Mips/MipsFastISel.cpp
@@ -1,5 +1,18 @@
-//===-- MipsastISel.cpp - Mips FastISel implementation
-//---------------------===//
+//===-- MipsFastISel.cpp - Mips FastISel implementation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the MIPS-specific support for the FastISel class.
+/// Some of the target-specific code is generated by tablegen in the file
+/// MipsGenFastISel.inc, which is #included here.
+///
+//===----------------------------------------------------------------------===//
#include "MipsCCState.h"
#include "MipsInstrInfo.h"
@@ -192,9 +205,10 @@ public:
TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
Context = &funcInfo.Fn->getContext();
- bool ISASupported = !Subtarget->hasMips32r6() && Subtarget->hasMips32();
+ bool ISASupported = !Subtarget->hasMips32r6() &&
+ !Subtarget->inMicroMipsMode() && Subtarget->hasMips32();
TargetSupported =
- ISASupported && (TM.getRelocationModel() == Reloc::PIC_) &&
+ ISASupported && TM.isPositionIndependent() &&
(static_cast<const MipsTargetMachine &>(TM).getABI().IsO32());
UnsupportedFPMode = Subtarget->isFP64bit();
}
@@ -691,11 +705,10 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
emitInst(Opc).addReg(LeftReg).addReg(RightReg).addReg(
Mips::FCC0, RegState::ImplicitDefine);
- MachineInstrBuilder MI = emitInst(CondMovOpc, ResultReg)
- .addReg(RegWithOne)
- .addReg(Mips::FCC0)
- .addReg(RegWithZero, RegState::Implicit);
- MI->tieOperands(0, 3);
+ emitInst(CondMovOpc, ResultReg)
+ .addReg(RegWithOne)
+ .addReg(Mips::FCC0)
+ .addReg(RegWithZero);
break;
}
}
@@ -802,7 +815,7 @@ bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
unsigned Offset = Addr.getOffset();
MachineFrameInfo &MFI = *MF->getFrameInfo();
MachineMemOperand *MMO = MF->getMachineMemOperand(
- MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
+ MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
MFI.getObjectSize(FI), Align);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
.addReg(SrcReg)
@@ -943,7 +956,7 @@ bool MipsFastISel::selectFPExt(const Instruction *I) {
return false;
unsigned SrcReg =
- getRegForValue(Src); // his must be a 32 bit floating point register class
+ getRegForValue(Src); // this must be a 32bit floating point register class
// maybe we should handle this differently
if (!SrcReg)
return false;
@@ -1180,7 +1193,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
// for now (will return false). We need to determine the right alignment
// based on the normal alignment for the underlying machine type.
//
- unsigned ArgSize = RoundUpToAlignment(ArgVT.getSizeInBits(), 4);
+ unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
unsigned BEAlign = 0;
if (ArgSize < 8 && !Subtarget->isLittle())
@@ -1207,7 +1220,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
unsigned NumBytes) {
CallingConv::ID CC = CLI.CallConv;
- emitInst(Mips::ADJCALLSTACKUP).addImm(16);
+ emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
if (RetVT != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp
index a74c8abd2e2d..fe6f332f2bdf 100644
--- a/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/lib/Target/Mips/MipsFrameLowering.cpp
@@ -13,7 +13,6 @@
#include "MipsFrameLowering.h"
#include "MCTargetDesc/MipsBaseInfo.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
#include "MipsTargetMachine.h"
@@ -24,7 +23,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
@@ -122,7 +120,7 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
// Conservatively assume all callee-saved registers will be saved.
for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
- Offset = RoundUpToAlignment(Offset + Size, Size);
+ Offset = alignTo(Offset + Size, Size);
}
unsigned MaxAlign = MFI->getMaxAlignment();
@@ -133,18 +131,18 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
// Iterate over other objects.
for (unsigned I = 0, E = MFI->getObjectIndexEnd(); I != E; ++I)
- Offset = RoundUpToAlignment(Offset + MFI->getObjectSize(I), MaxAlign);
+ Offset = alignTo(Offset + MFI->getObjectSize(I), MaxAlign);
// Call frame.
if (MFI->adjustsStack() && hasReservedCallFrame(MF))
- Offset = RoundUpToAlignment(Offset + MFI->getMaxCallFrameSize(),
- std::max(MaxAlign, getStackAlignment()));
+ Offset = alignTo(Offset + MFI->getMaxCallFrameSize(),
+ std::max(MaxAlign, getStackAlignment()));
- return RoundUpToAlignment(Offset, getStackAlignment());
+ return alignTo(Offset, getStackAlignment());
}
// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions
-void MipsFrameLowering::
+MachineBasicBlock::iterator MipsFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
unsigned SP = STI.getABI().IsN64() ? Mips::SP_64 : Mips::SP;
@@ -157,5 +155,5 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
STI.getInstrInfo()->adjustStackPtr(SP, Amount, MBB, I);
}
- MBB.erase(I);
+ return MBB.erase(I);
}
diff --git a/lib/Target/Mips/MipsFrameLowering.h b/lib/Target/Mips/MipsFrameLowering.h
index 5eabd58e8686..8c4214c4c21d 100644
--- a/lib/Target/Mips/MipsFrameLowering.h
+++ b/lib/Target/Mips/MipsFrameLowering.h
@@ -36,7 +36,7 @@ public:
bool isFPCloseToIncomingSP() const override { return false; }
- void
+ MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
diff --git a/lib/Target/Mips/MipsHazardSchedule.cpp b/lib/Target/Mips/MipsHazardSchedule.cpp
new file mode 100644
index 000000000000..10022ba60680
--- /dev/null
+++ b/lib/Target/Mips/MipsHazardSchedule.cpp
@@ -0,0 +1,147 @@
+//===-- MipsHazardSchedule.cpp - Workaround pipeline hazards --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This pass is used to workaround certain pipeline hazards. For now, this covers
+/// compact branch hazards. In future this pass can be extended to other pipeline
+/// hazards, such as various MIPS1 hazards, processor errata that require
+/// instruction reorganization, etc.
+///
+/// This pass has to run after the delay slot filler as that pass can introduce
+/// pipeline hazards, hence the existing hazard recognizer is not suitable.
+///
+/// Hazards handled: forbidden slots for MIPSR6.
+///
+/// A forbidden slot hazard occurs when a compact branch instruction is executed
+/// and the adjacent instruction in memory is a control transfer instruction such
+/// as a branch or jump, ERET, ERETNC, DERET, WAIT and PAUSE.
+///
+/// For example:
+///
+/// 0x8004 bnec a1,v0,<P+0x18>
+/// 0x8008 beqc a1,a2,<P+0x54>
+///
+/// In such cases, the processor is required to signal a Reserved Instruction
+/// exception.
+///
+/// Here, if the instruction at 0x8004 is executed, the processor will raise an
+/// exception as there is a control transfer instruction at 0x8008.
+///
+/// There are two sources of forbidden slot hazards:
+///
+/// A) A previous pass has created a compact branch directly.
+/// B) Transforming a delay slot branch into compact branch. This case can be
+/// difficult to process as lookahead for hazards is insufficent, as
+/// backwards delay slot fillling can also produce hazards in previously
+/// processed instuctions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "Mips.h"
+#include "MipsInstrInfo.h"
+#include "MipsSEInstrInfo.h"
+#include "MipsTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mips-hazard-schedule"
+
+STATISTIC(NumInsertedNops, "Number of nops inserted");
+
+namespace {
+
+typedef MachineBasicBlock::iterator Iter;
+typedef MachineBasicBlock::reverse_iterator ReverseIter;
+
+class MipsHazardSchedule : public MachineFunctionPass {
+
+public:
+ MipsHazardSchedule() : MachineFunctionPass(ID) {}
+
+ const char *getPassName() const override { return "Mips Hazard Schedule"; }
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
+private:
+ static char ID;
+};
+
+char MipsHazardSchedule::ID = 0;
+} // end of anonymous namespace
+
+/// Returns a pass that clears pipeline hazards.
+FunctionPass *llvm::createMipsHazardSchedule() {
+ return new MipsHazardSchedule();
+}
+
+// Find the next real instruction from the current position.
+static Iter getNextMachineInstr(Iter Position) {
+ Iter I = Position, E = Position->getParent()->end();
+ I = std::find_if_not(I, E, [](const Iter &Insn) { return Insn->isTransient(); });
+ assert(I != E);
+ return I;
+}
+
+bool MipsHazardSchedule::runOnMachineFunction(MachineFunction &MF) {
+
+ const MipsSubtarget *STI =
+ &static_cast<const MipsSubtarget &>(MF.getSubtarget());
+
+ // Forbidden slot hazards are only defined for MIPSR6.
+ if (!STI->hasMips32r6() || STI->inMicroMipsMode())
+ return false;
+
+ bool Changed = false;
+ const MipsInstrInfo *TII = STI->getInstrInfo();
+
+ for (MachineFunction::iterator FI = MF.begin(); FI != MF.end(); ++FI) {
+ for (Iter I = FI->begin(); I != FI->end(); ++I) {
+
+ // Forbidden slot hazard handling. Use lookahead over state.
+ if (!TII->HasForbiddenSlot(*I))
+ continue;
+
+ bool InsertNop = false;
+ // Next instruction in the basic block.
+ if (std::next(I) != FI->end() &&
+ !TII->SafeInForbiddenSlot(*getNextMachineInstr(std::next(I)))) {
+ InsertNop = true;
+ } else {
+ // Next instruction in the physical successor basic block.
+ for (auto *Succ : FI->successors()) {
+ if (FI->isLayoutSuccessor(Succ) &&
+ getNextMachineInstr(Succ->begin()) != Succ->end() &&
+ !TII->SafeInForbiddenSlot(*getNextMachineInstr(Succ->begin()))) {
+ InsertNop = true;
+ break;
+ }
+ }
+ }
+
+ if (InsertNop) {
+ Changed = true;
+ MIBundleBuilder(&*I).append(
+ BuildMI(MF, I->getDebugLoc(), TII->get(Mips::NOP)));
+ NumInsertedNops++;
+ }
+ }
+ }
+ return Changed;
+}
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 06502397b6b8..83763a64ab6a 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -72,12 +72,6 @@ bool MipsDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
return false;
}
-bool MipsDAGToDAGISel::selectAddrRegReg(SDValue Addr, SDValue &Base,
- SDValue &Offset) const {
- llvm_unreachable("Unimplemented function.");
- return false;
-}
-
bool MipsDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
llvm_unreachable("Unimplemented function.");
@@ -90,7 +84,19 @@ bool MipsDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base,
return false;
}
-bool MipsDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
+bool MipsDAGToDAGISel::selectIntAddr11MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectIntAddr12MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectIntAddr16MM(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
llvm_unreachable("Unimplemented function.");
return false;
@@ -108,8 +114,14 @@ bool MipsDAGToDAGISel::selectIntAddrMSA(SDValue Addr, SDValue &Base,
return false;
}
-bool MipsDAGToDAGISel::selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
- SDValue &Offset, SDValue &Alias) {
+bool MipsDAGToDAGISel::selectAddr16(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectAddr16SP(SDValue Addr, SDValue &Base,
+ SDValue &Offset) {
llvm_unreachable("Unimplemented function.");
return false;
}
@@ -182,7 +194,7 @@ bool MipsDAGToDAGISel::selectVSplatMaskR(SDValue N, SDValue &Imm) const {
/// Select instructions not customized! Used for
/// expanded, promoted and normal instructions
-SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
+void MipsDAGToDAGISel::Select(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
// Dump information about the Node being selected
@@ -192,21 +204,20 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return nullptr;
+ return;
}
// See if subclasses can handle this node.
- std::pair<bool, SDNode*> Ret = selectNode(Node);
-
- if (Ret.first)
- return Ret.second;
+ if (trySelect(Node))
+ return;
switch(Opcode) {
default: break;
// Get target GOT address.
case ISD::GLOBAL_OFFSET_TABLE:
- return getGlobalBaseReg();
+ ReplaceNode(Node, getGlobalBaseReg());
+ return;
#ifndef NDEBUG
case ISD::LOAD:
@@ -220,15 +231,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
}
// Select the default instruction
- SDNode *ResNode = SelectCode(Node);
-
- DEBUG(errs() << "=> ");
- if (ResNode == nullptr || ResNode == Node)
- DEBUG(Node->dump(CurDAG));
- else
- DEBUG(ResNode->dump(CurDAG));
- DEBUG(errs() << "\n");
- return ResNode;
+ SelectCode(Node);
}
bool MipsDAGToDAGISel::
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.h b/lib/Target/Mips/MipsISelDAGToDAG.h
index 1426d0fbf516..289832a8064e 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsISelDAGToDAG.h
@@ -31,8 +31,8 @@ namespace llvm {
class MipsDAGToDAGISel : public SelectionDAGISel {
public:
- explicit MipsDAGToDAGISel(MipsTargetMachine &TM)
- : SelectionDAGISel(TM), Subtarget(nullptr) {}
+ explicit MipsDAGToDAGISel(MipsTargetMachine &TM, CodeGenOpt::Level OL)
+ : SelectionDAGISel(TM, OL), Subtarget(nullptr) {}
// Pass Name
const char *getPassName() const override {
@@ -57,11 +57,6 @@ private:
virtual bool selectAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
- // Complex Pattern.
- /// (reg + reg).
- virtual bool selectAddrRegReg(SDValue Addr, SDValue &Base,
- SDValue &Offset) const;
-
/// Fall back on this function if all else fails.
virtual bool selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
@@ -70,9 +65,15 @@ private:
virtual bool selectIntAddr(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
- virtual bool selectIntAddrMM(SDValue Addr, SDValue &Base,
+ virtual bool selectIntAddr11MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
+ virtual bool selectIntAddr12MM(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ virtual bool selectIntAddr16MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
virtual bool selectIntAddrLSL2MM(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
@@ -80,8 +81,8 @@ private:
virtual bool selectIntAddrMSA(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
- virtual bool selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
- SDValue &Offset, SDValue &Alias);
+ virtual bool selectAddr16(SDValue Addr, SDValue &Base, SDValue &Offset);
+ virtual bool selectAddr16SP(SDValue Addr, SDValue &Base, SDValue &Offset);
/// \brief Select constant vector splats.
virtual bool selectVSplat(SDNode *N, APInt &Imm,
@@ -114,9 +115,9 @@ private:
/// starting at bit zero.
virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const;
- SDNode *Select(SDNode *N) override;
+ void Select(SDNode *N) override;
- virtual std::pair<bool, SDNode*> selectNode(SDNode *Node) = 0;
+ virtual bool trySelect(SDNode *Node) = 0;
// getImm - Return a target constant with the specified value.
inline SDValue getImm(const SDNode *Node, uint64_t Imm) {
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 5680130b91b2..1d62a251cc66 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -341,10 +341,6 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
}
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i64, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::ROTL, MVT::i32, Expand);
setOperationAction(ISD::ROTL, MVT::i64, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
@@ -396,7 +392,6 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
}
- setInsertFencesForAtomic(true);
if (!Subtarget.hasMips32r2()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
@@ -429,6 +424,7 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::AssertZext);
setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2);
@@ -568,7 +564,7 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
// Creates and returns a CMovFPT/F node.
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
- SDValue False, SDLoc DL) {
+ SDValue False, const SDLoc &DL) {
ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
@@ -808,6 +804,37 @@ static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
}
+static SDValue performAssertZextCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ EVT NarrowerVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+
+ if (N0.getOpcode() != ISD::TRUNCATE)
+ return SDValue();
+
+ if (N0.getOperand(0).getOpcode() != ISD::AssertZext)
+ return SDValue();
+
+ // fold (AssertZext (trunc (AssertZext x))) -> (trunc (AssertZext x))
+ // if the type of the extension of the innermost AssertZext node is
+ // smaller from that of the outermost node, eg:
+ // (AssertZext:i32 (trunc:i32 (AssertZext:i64 X, i32)), i8)
+ // -> (trunc:i32 (AssertZext X, i8))
+ SDValue WiderAssertZext = N0.getOperand(0);
+ EVT WiderVT = cast<VTSDNode>(WiderAssertZext->getOperand(1))->getVT();
+
+ if (NarrowerVT.bitsLT(WiderVT)) {
+ SDValue NewAssertZext = DAG.getNode(
+ ISD::AssertZext, SDLoc(N), WiderAssertZext.getValueType(),
+ WiderAssertZext.getOperand(0), DAG.getValueType(NarrowerVT));
+ return DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0),
+ NewAssertZext);
+ }
+
+ return SDValue();
+}
+
SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
const {
SelectionDAG &DAG = DCI.DAG;
@@ -829,6 +856,8 @@ SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
return performORCombine(N, DAG, DCI, Subtarget);
case ISD::ADD:
return performADDCombine(N, DAG, DCI, Subtarget);
+ case ISD::AssertZext:
+ return performAssertZextCombine(N, DAG, DCI, Subtarget);
}
return SDValue();
@@ -906,20 +935,22 @@ addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
return VReg;
}
-static MachineBasicBlock *insertDivByZeroTrap(MachineInstr *MI,
+static MachineBasicBlock *insertDivByZeroTrap(MachineInstr &MI,
MachineBasicBlock &MBB,
const TargetInstrInfo &TII,
- bool Is64Bit) {
+ bool Is64Bit, bool IsMicroMips) {
if (NoZeroDivCheck)
return &MBB;
// Insert instruction "teq $divisor_reg, $zero, 7".
MachineBasicBlock::iterator I(MI);
MachineInstrBuilder MIB;
- MachineOperand &Divisor = MI->getOperand(2);
- MIB = BuildMI(MBB, std::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ))
- .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
- .addReg(Mips::ZERO).addImm(7);
+ MachineOperand &Divisor = MI.getOperand(2);
+ MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
+ TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
+ .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
+ .addReg(Mips::ZERO)
+ .addImm(7);
// Use the 32-bit sub-register if this is a 64-bit division.
if (Is64Bit)
@@ -935,9 +966,9 @@ static MachineBasicBlock *insertDivByZeroTrap(MachineInstr *MI,
}
MachineBasicBlock *
-MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
llvm_unreachable("Unexpected instr type to insert");
case Mips::ATOMIC_LOAD_ADD_I8:
@@ -1017,15 +1048,31 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case Mips::DIVU:
case Mips::MOD:
case Mips::MODU:
- return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false);
+ return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
+ false);
+ case Mips::SDIV_MM_Pseudo:
+ case Mips::UDIV_MM_Pseudo:
+ case Mips::SDIV_MM:
+ case Mips::UDIV_MM:
+ case Mips::DIV_MMR6:
+ case Mips::DIVU_MMR6:
+ case Mips::MOD_MMR6:
+ case Mips::MODU_MMR6:
+ return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
case Mips::PseudoDSDIV:
case Mips::PseudoDUDIV:
case Mips::DDIV:
case Mips::DDIVU:
case Mips::DMOD:
case Mips::DMODU:
- return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true);
+ return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
+ case Mips::DDIV_MM64R6:
+ case Mips::DDIVU_MM64R6:
+ case Mips::DMOD_MM64R6:
+ case Mips::DMODU_MM64R6:
+ return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, true);
case Mips::SEL_D:
+ case Mips::SEL_D_MMR6:
return emitSEL_D(MI, BB);
case Mips::PseudoSELECT_I:
@@ -1051,17 +1098,19 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
-MachineBasicBlock *
-MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned Size, unsigned BinOpcode,
- bool Nand) const {
+MachineBasicBlock *MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ unsigned Size,
+ unsigned BinOpcode,
+ bool Nand) const {
assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicBinary.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const bool ArePtrs64bit = ABI.ArePtrs64bit();
+ DebugLoc DL = MI.getDebugLoc();
unsigned LL, SC, AND, NOR, ZERO, BEQ;
if (Size == 4) {
@@ -1069,9 +1118,14 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
LL = Mips::LL_MM;
SC = Mips::SC_MM;
} else {
- LL = Subtarget.hasMips32r6() ? Mips::LL_R6 : Mips::LL;
- SC = Subtarget.hasMips32r6() ? Mips::SC_R6 : Mips::SC;
+ LL = Subtarget.hasMips32r6()
+ ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
+ : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
+ SC = Subtarget.hasMips32r6()
+ ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
+ : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
}
+
AND = Mips::AND;
NOR = Mips::NOR;
ZERO = Mips::ZERO;
@@ -1085,9 +1139,9 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
BEQ = Mips::BEQ64;
}
- unsigned OldVal = MI->getOperand(0).getReg();
- unsigned Ptr = MI->getOperand(1).getReg();
- unsigned Incr = MI->getOperand(2).getReg();
+ unsigned OldVal = MI.getOperand(0).getReg();
+ unsigned Ptr = MI.getOperand(1).getReg();
+ unsigned Incr = MI.getOperand(2).getReg();
unsigned StoreVal = RegInfo.createVirtualRegister(RC);
unsigned AndRes = RegInfo.createVirtualRegister(RC);
@@ -1134,16 +1188,16 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
BuildMI(BB, DL, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0);
BuildMI(BB, DL, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI.eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
- MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
+ MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
unsigned SrcReg) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const DebugLoc &DL = MI.getDebugLoc();
if (Subtarget.hasMips32r2() && Size == 1) {
BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
@@ -1170,7 +1224,7 @@ MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
}
MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
- MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned BinOpcode,
+ MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned BinOpcode,
bool Nand) const {
assert((Size == 1 || Size == 2) &&
"Unsupported size for EmitAtomicBinaryPartial.");
@@ -1178,21 +1232,24 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ const bool ArePtrs64bit = ABI.ArePtrs64bit();
+ const TargetRegisterClass *RCp =
+ getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Ptr = MI->getOperand(1).getReg();
- unsigned Incr = MI->getOperand(2).getReg();
+ unsigned Dest = MI.getOperand(0).getReg();
+ unsigned Ptr = MI.getOperand(1).getReg();
+ unsigned Incr = MI.getOperand(2).getReg();
- unsigned AlignedAddr = RegInfo.createVirtualRegister(RC);
+ unsigned AlignedAddr = RegInfo.createVirtualRegister(RCp);
unsigned ShiftAmt = RegInfo.createVirtualRegister(RC);
unsigned Mask = RegInfo.createVirtualRegister(RC);
unsigned Mask2 = RegInfo.createVirtualRegister(RC);
unsigned NewVal = RegInfo.createVirtualRegister(RC);
unsigned OldVal = RegInfo.createVirtualRegister(RC);
unsigned Incr2 = RegInfo.createVirtualRegister(RC);
- unsigned MaskLSB2 = RegInfo.createVirtualRegister(RC);
+ unsigned MaskLSB2 = RegInfo.createVirtualRegister(RCp);
unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
unsigned AndRes = RegInfo.createVirtualRegister(RC);
@@ -1203,6 +1260,17 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
unsigned SrlRes = RegInfo.createVirtualRegister(RC);
unsigned Success = RegInfo.createVirtualRegister(RC);
+ unsigned LL, SC;
+ if (isMicroMips) {
+ LL = Mips::LL_MM;
+ SC = Mips::SC_MM;
+ } else {
+ LL = Subtarget.hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
+ : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
+ SC = Subtarget.hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
+ : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
+ }
+
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
@@ -1234,11 +1302,12 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
// sll incr2,incr,shiftamt
int64_t MaskImm = (Size == 1) ? 255 : 65535;
- BuildMI(BB, DL, TII->get(Mips::ADDiu), MaskLSB2)
- .addReg(Mips::ZERO).addImm(-4);
- BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
+ BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
+ .addReg(ABI.GetNullPtr()).addImm(-4);
+ BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
- BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
+ BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
+ .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
if (Subtarget.isLittle()) {
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
} else {
@@ -1274,7 +1343,6 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
// beq success,$0,loopMBB
BB = loopMBB;
- unsigned LL = isMicroMips ? Mips::LL_MM : Mips::LL;
BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
@@ -1298,7 +1366,6 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
- unsigned SC = isMicroMips ? Mips::SC_MM : Mips::SC;
BuildMI(BB, DL, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
@@ -1316,31 +1383,37 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
.addReg(MaskedOldVal1).addReg(ShiftAmt);
BB = emitSignExtendToI32InReg(MI, BB, Size, Dest, SrlRes);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI.eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
-MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const {
+MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ unsigned Size) const {
assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const bool ArePtrs64bit = ABI.ArePtrs64bit();
+ DebugLoc DL = MI.getDebugLoc();
unsigned LL, SC, ZERO, BNE, BEQ;
- if (Size == 4) {
- if (isMicroMips) {
- LL = Mips::LL_MM;
- SC = Mips::SC_MM;
- } else {
- LL = Subtarget.hasMips32r6() ? Mips::LL_R6 : Mips::LL;
- SC = Subtarget.hasMips32r6() ? Mips::SC_R6 : Mips::SC;
- }
+ if (Size == 4) {
+ if (isMicroMips) {
+ LL = Mips::LL_MM;
+ SC = Mips::SC_MM;
+ } else {
+ LL = Subtarget.hasMips32r6()
+ ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
+ : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
+ SC = Subtarget.hasMips32r6()
+ ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
+ : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
+ }
+
ZERO = Mips::ZERO;
BNE = Mips::BNE;
BEQ = Mips::BEQ;
@@ -1352,10 +1425,10 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
BEQ = Mips::BEQ64;
}
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Ptr = MI->getOperand(1).getReg();
- unsigned OldVal = MI->getOperand(2).getReg();
- unsigned NewVal = MI->getOperand(3).getReg();
+ unsigned Dest = MI.getOperand(0).getReg();
+ unsigned Ptr = MI.getOperand(1).getReg();
+ unsigned OldVal = MI.getOperand(2).getReg();
+ unsigned NewVal = MI.getOperand(3).getReg();
unsigned Success = RegInfo.createVirtualRegister(RC);
@@ -1400,30 +1473,31 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
BuildMI(BB, DL, TII->get(BEQ))
.addReg(Success).addReg(ZERO).addMBB(loop1MBB);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI.eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
-MachineBasicBlock *
-MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const {
+MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
+ MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
assert((Size == 1 || Size == 2) &&
"Unsupported size for EmitAtomicCmpSwapPartial.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ const bool ArePtrs64bit = ABI.ArePtrs64bit();
+ const TargetRegisterClass *RCp =
+ getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Ptr = MI->getOperand(1).getReg();
- unsigned CmpVal = MI->getOperand(2).getReg();
- unsigned NewVal = MI->getOperand(3).getReg();
+ unsigned Dest = MI.getOperand(0).getReg();
+ unsigned Ptr = MI.getOperand(1).getReg();
+ unsigned CmpVal = MI.getOperand(2).getReg();
+ unsigned NewVal = MI.getOperand(3).getReg();
- unsigned AlignedAddr = RegInfo.createVirtualRegister(RC);
+ unsigned AlignedAddr = RegInfo.createVirtualRegister(RCp);
unsigned ShiftAmt = RegInfo.createVirtualRegister(RC);
unsigned Mask = RegInfo.createVirtualRegister(RC);
unsigned Mask2 = RegInfo.createVirtualRegister(RC);
@@ -1431,7 +1505,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
unsigned OldVal = RegInfo.createVirtualRegister(RC);
unsigned MaskedOldVal0 = RegInfo.createVirtualRegister(RC);
unsigned ShiftedNewVal = RegInfo.createVirtualRegister(RC);
- unsigned MaskLSB2 = RegInfo.createVirtualRegister(RC);
+ unsigned MaskLSB2 = RegInfo.createVirtualRegister(RCp);
unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
unsigned MaskedCmpVal = RegInfo.createVirtualRegister(RC);
@@ -1440,6 +1514,17 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
unsigned StoreVal = RegInfo.createVirtualRegister(RC);
unsigned SrlRes = RegInfo.createVirtualRegister(RC);
unsigned Success = RegInfo.createVirtualRegister(RC);
+ unsigned LL, SC;
+
+ if (isMicroMips) {
+ LL = Mips::LL_MM;
+ SC = Mips::SC_MM;
+ } else {
+ LL = Subtarget.hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
+ : (ArePtrs64bit ? Mips::LL64 : Mips::LL);
+ SC = Subtarget.hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
+ : (ArePtrs64bit ? Mips::SC64 : Mips::SC);
+ }
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB->getBasicBlock();
@@ -1470,6 +1555,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
// addiu masklsb2,$0,-4 # 0xfffffffc
// and alignedaddr,ptr,masklsb2
// andi ptrlsb2,ptr,3
+ // xori ptrlsb2,ptrlsb2,3 # Only for BE
// sll shiftamt,ptrlsb2,3
// ori maskupper,$0,255 # 0xff
// sll mask,maskupper,shiftamt
@@ -1479,11 +1565,12 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
// andi maskednewval,newval,255
// sll shiftednewval,maskednewval,shiftamt
int64_t MaskImm = (Size == 1) ? 255 : 65535;
- BuildMI(BB, DL, TII->get(Mips::ADDiu), MaskLSB2)
- .addReg(Mips::ZERO).addImm(-4);
- BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
+ BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
+ .addReg(ABI.GetNullPtr()).addImm(-4);
+ BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
- BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
+ BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
+ .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
if (Subtarget.isLittle()) {
BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
} else {
@@ -1511,7 +1598,6 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
- unsigned LL = isMicroMips ? Mips::LL_MM : Mips::LL;
BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
@@ -1528,7 +1614,6 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
- unsigned SC = isMicroMips ? Mips::SC_MM : Mips::SC;
BuildMI(BB, DL, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
@@ -1543,21 +1628,21 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(MaskedOldVal0).addReg(ShiftAmt);
BB = emitSignExtendToI32InReg(MI, BB, Size, Dest, SrlRes);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI.eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
-MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr *MI,
+MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
MachineBasicBlock::iterator II(MI);
- unsigned Fc = MI->getOperand(1).getReg();
+ unsigned Fc = MI.getOperand(1).getReg();
const auto &FGR64RegClass = TRI->getRegClass(Mips::FGR64RegClassID);
unsigned Fc2 = RegInfo.createVirtualRegister(FGR64RegClass);
@@ -1569,7 +1654,7 @@ MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr *MI,
// We don't erase the original instruction, we just replace the condition
// register with the 64-bit super-register.
- MI->getOperand(1).setReg(Fc2);
+ MI.getOperand(1).setReg(Fc2);
return BB;
}
@@ -1592,13 +1677,12 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
SDValue Addr = DAG.getNode(ISD::ADD, DL, PTy, Index, Table);
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
- Addr =
- DAG.getExtLoad(ISD::SEXTLOAD, DL, PTy, Chain, Addr,
- MachinePointerInfo::getJumpTable(DAG.getMachineFunction()),
- MemVT, false, false, false, 0);
+ Addr = DAG.getExtLoad(
+ ISD::SEXTLOAD, DL, PTy, Chain, Addr,
+ MachinePointerInfo::getJumpTable(DAG.getMachineFunction()), MemVT);
Chain = Addr.getValue(1);
- if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) || ABI.IsN64()) {
+ if (isPositionIndependent() || ABI.IsN64()) {
// For PIC, the sequence is:
// BRIND(load(Jumptable + index) + RelocBase)
// RelocBase can be JumpTable, GOT or some sort of global base.
@@ -1667,7 +1751,7 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = N->getGlobal();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64()) {
+ if (!isPositionIndependent() && !ABI.IsN64()) {
const MipsTargetObjectFile *TLOF =
static_cast<const MipsTargetObjectFile *>(
getTargetMachine().getObjFileLowering());
@@ -1679,7 +1763,18 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
}
- if (GV->hasInternalLinkage() || (GV->hasLocalLinkage() && !isa<Function>(GV)))
+ // Every other architecture would use shouldAssumeDSOLocal in here, but
+ // mips is special.
+ // * In PIC code mips requires got loads even for local statics!
+ // * To save on got entries, for local statics the got entry contains the
+ // page and an additional add instruction takes care of the low bits.
+ // * It is legal to access a hidden symbol with a non hidden undefined,
+ // so one cannot guarantee that all access to a hidden symbol will know
+ // it is hidden.
+ // * Mips linkers don't support creating a page and a full got entry for
+ // the same symbol.
+ // * Given all that, we have to use a full got entry for hidden symbols :-(
+ if (GV->hasLocalLinkage())
return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
if (LargeGOT)
@@ -1690,7 +1785,7 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
return getAddrGlobal(
N, SDLoc(N), Ty, DAG,
- (ABI.IsN32() || ABI.IsN64()) ? MipsII::MO_GOT_DISP : MipsII::MO_GOT16,
+ (ABI.IsN32() || ABI.IsN64()) ? MipsII::MO_GOT_DISP : MipsII::MO_GOT,
DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
@@ -1699,7 +1794,7 @@ SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64())
+ if (!isPositionIndependent() && !ABI.IsN64())
return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
@@ -1743,7 +1838,7 @@ lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(DL).setChain(DAG.getEntryNode())
- .setCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args), 0);
+ .setCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
SDValue Ret = CallResult.first;
@@ -1768,9 +1863,8 @@ lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
MipsII::MO_GOTTPREL);
TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
TGA);
- Offset = DAG.getLoad(PtrVT, DL,
- DAG.getEntryNode(), TGA, MachinePointerInfo(),
- false, false, false, 0);
+ Offset =
+ DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
} else {
// Local Exec TLS Model
assert(model == TLSModel::LocalExec);
@@ -1793,7 +1887,7 @@ lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64())
+ if (!isPositionIndependent() && !ABI.IsN64())
return getAddrNonPIC(N, SDLoc(N), Ty, DAG);
return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
@@ -1805,7 +1899,7 @@ lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
EVT Ty = Op.getValueType();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !ABI.IsN64()) {
+ if (!isPositionIndependent() && !ABI.IsN64()) {
const MipsTargetObjectFile *TLOF =
static_cast<const MipsTargetObjectFile *>(
getTargetMachine().getObjFileLowering());
@@ -1833,7 +1927,7 @@ SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// memory location argument.
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV));
}
SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
@@ -1846,9 +1940,8 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Node);
unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
- SDValue VAListLoad =
- DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain, VAListPtr,
- MachinePointerInfo(SV), false, false, false, 0);
+ SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
+ VAListPtr, MachinePointerInfo(SV));
SDValue VAList = VAListLoad;
// Re-align the pointer if necessary.
@@ -1873,13 +1966,13 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
auto &TD = DAG.getDataLayout();
unsigned ArgSizeInBytes =
TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
- SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
- DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes,
- ArgSlotSizeInBytes),
- DL, VAList.getValueType()));
+ SDValue Tmp3 =
+ DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
+ DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
+ DL, VAList.getValueType()));
// Store the incremented VAList to the legalized pointer
Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV));
// In big-endian mode we must adjust the pointer when the load size is smaller
// than the argument slot size. We must also reduce the known alignment to
@@ -1892,8 +1985,7 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(Adjustment, DL));
}
// Load the actual argument out of the pointer VAList
- return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo(), false, false,
- false, 0);
+ return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
}
static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
@@ -2283,10 +2375,9 @@ static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG) {
EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits());
SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
Val.getOperand(0));
-
return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
- SD->getPointerInfo(), SD->isVolatile(),
- SD->isNonTemporal(), SD->getAlignment());
+ SD->getPointerInfo(), SD->getAlignment(),
+ SD->getMemOperand()->getFlags());
}
SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
@@ -2472,23 +2563,22 @@ static unsigned getNextIntArgReg(unsigned Reg) {
return (Reg == Mips::A0) ? Mips::A1 : Mips::A3;
}
-SDValue
-MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
- SDValue Chain, SDValue Arg, SDLoc DL,
- bool IsTailCall, SelectionDAG &DAG) const {
+SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
+ SDValue Chain, SDValue Arg,
+ const SDLoc &DL, bool IsTailCall,
+ SelectionDAG &DAG) const {
if (!IsTailCall) {
SDValue PtrOff =
DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
DAG.getIntPtrConstant(Offset, DL));
- return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo(), false,
- false, 0);
+ return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
}
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
int FI = MFI->CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(),
- /*isVolatile=*/ true, false, 0);
+ /* Alignment = */ 0, MachineMemOperand::MOVolatile);
}
void MipsTargetLowering::
@@ -2571,7 +2661,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
- bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
+ bool IsPIC = isPositionIndependent();
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -2604,7 +2694,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// ByValChain is the output chain of the last Memcpy node created for copying
// byval arguments to the stack.
unsigned StackAlignment = TFL->getStackAlignment();
- NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
+ NextStackOffset = alignTo(NextStackOffset, StackAlignment);
SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
if (!IsTailCall)
@@ -2614,7 +2704,6 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
getPointerTy(DAG.getDataLayout()));
- // With EABI is it possible to have 16 args on registers.
std::deque< std::pair<unsigned, SDValue> > RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
@@ -2802,8 +2891,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
/// appropriate copies out of appropriate physical registers.
SDValue MipsTargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
TargetLowering::CallLoweringInfo &CLI) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@@ -2864,7 +2953,8 @@ SDValue MipsTargetLowering::LowerCallResult(
}
static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
- EVT ArgVT, SDLoc DL, SelectionDAG &DAG) {
+ EVT ArgVT, const SDLoc &DL,
+ SelectionDAG &DAG) {
MVT LocVT = VA.getLocVT();
EVT ValVT = VA.getValVT();
@@ -2922,14 +3012,10 @@ static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
//===----------------------------------------------------------------------===//
/// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack.
-SDValue
-MipsTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
+SDValue MipsTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
@@ -3037,8 +3123,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
SDValue ArgValue = DAG.getLoad(
LocVT, DL, Chain, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- false, false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
OutChains.push_back(ArgValue.getValue(1));
ArgValue = UnpackFromArgumentSlot(ArgValue, VA, Ins[i].ArgVT, DL, DAG);
@@ -3102,7 +3187,8 @@ MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
SDValue
MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
- SDLoc DL, SelectionDAG &DAG) const {
+ const SDLoc &DL,
+ SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
@@ -3117,7 +3203,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
+ const SDLoc &DL, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of
// the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@@ -3625,10 +3711,11 @@ bool MipsTargetLowering::useSoftFloat() const {
}
void MipsTargetLowering::copyByValRegs(
- SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains, SelectionDAG &DAG,
- const ISD::ArgFlagsTy &Flags, SmallVectorImpl<SDValue> &InVals,
- const Argument *FuncArg, unsigned FirstReg, unsigned LastReg,
- const CCValAssign &VA, MipsCCState &State) const {
+ SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
+ SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
+ SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
+ unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
+ MipsCCState &State) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
@@ -3665,15 +3752,14 @@ void MipsTargetLowering::copyByValRegs(
SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
DAG.getConstant(Offset, DL, PtrTy));
SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
- StorePtr, MachinePointerInfo(FuncArg, Offset),
- false, false, 0);
+ StorePtr, MachinePointerInfo(FuncArg, Offset));
OutChains.push_back(Store);
}
}
// Copy byVal arg to registers and stack.
void MipsTargetLowering::passByValArg(
- SDValue Chain, SDLoc DL,
+ SDValue Chain, const SDLoc &DL,
std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
@@ -3697,8 +3783,7 @@ void MipsTargetLowering::passByValArg(
SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
DAG.getConstant(OffsetInBytes, DL, PtrTy));
SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
- MachinePointerInfo(), false, false, false,
- Alignment);
+ MachinePointerInfo(), Alignment);
MemOpChains.push_back(LoadVal.getValue(1));
unsigned ArgReg = ArgRegs[FirstReg + I];
RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
@@ -3725,8 +3810,7 @@ void MipsTargetLowering::passByValArg(
PtrTy));
SDValue LoadVal = DAG.getExtLoad(
ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
- MVT::getIntegerVT(LoadSizeInBytes * 8), false, false, false,
- Alignment);
+ MVT::getIntegerVT(LoadSizeInBytes * 8), Alignment);
MemOpChains.push_back(LoadVal.getValue(1));
// Shift the loaded value.
@@ -3771,7 +3855,7 @@ void MipsTargetLowering::passByValArg(
}
void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
- SDValue Chain, SDLoc DL,
+ SDValue Chain, const SDLoc &DL,
SelectionDAG &DAG,
CCState &State) const {
ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
@@ -3787,8 +3871,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
int VaArgOffset;
if (ArgRegs.size() == Idx)
- VaArgOffset =
- RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes);
+ VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes);
else {
VaArgOffset =
(int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
@@ -3810,8 +3893,8 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
FI = MFI->CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
- SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo());
cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
(Value *)nullptr);
OutChains.push_back(Store);
@@ -3854,7 +3937,7 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
}
// Mark the registers allocated.
- Size = RoundUpToAlignment(Size, RegSizeInBytes);
+ Size = alignTo(Size, RegSizeInBytes);
for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
Size -= RegSizeInBytes, ++I, ++NumRegs)
State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
@@ -3863,16 +3946,17 @@ void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
}
-MachineBasicBlock *
-MipsTargetLowering::emitPseudoSELECT(MachineInstr *MI, MachineBasicBlock *BB,
- bool isFPCmp, unsigned Opc) const {
+MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ bool isFPCmp,
+ unsigned Opc) const {
assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
"Subtarget already supports SELECT nodes with the use of"
"conditional-move instructions.");
const TargetInstrInfo *TII =
Subtarget.getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
// To "insert" a SELECT instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
@@ -3906,14 +3990,14 @@ MipsTargetLowering::emitPseudoSELECT(MachineInstr *MI, MachineBasicBlock *BB,
if (isFPCmp) {
// bc1[tf] cc, sinkMBB
BuildMI(BB, DL, TII->get(Opc))
- .addReg(MI->getOperand(1).getReg())
- .addMBB(sinkMBB);
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(sinkMBB);
} else {
// bne rs, $0, sinkMBB
BuildMI(BB, DL, TII->get(Opc))
- .addReg(MI->getOperand(1).getReg())
- .addReg(Mips::ZERO)
- .addMBB(sinkMBB);
+ .addReg(MI.getOperand(1).getReg())
+ .addReg(Mips::ZERO)
+ .addMBB(sinkMBB);
}
// copy0MBB:
@@ -3929,12 +4013,13 @@ MipsTargetLowering::emitPseudoSELECT(MachineInstr *MI, MachineBasicBlock *BB,
// ...
BB = sinkMBB;
- BuildMI(*BB, BB->begin(), DL,
- TII->get(Mips::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB)
- .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB);
+ BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(thisMBB)
+ .addReg(MI.getOperand(3).getReg())
+ .addMBB(copy0MBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 0dc683e3df27..182dc9070fc8 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -238,6 +238,10 @@ namespace llvm {
bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override;
+ ISD::NodeType getExtendForAtomicOps() const override {
+ return ISD::SIGN_EXTEND;
+ }
+
void LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const override;
@@ -262,7 +266,7 @@ namespace llvm {
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
+ EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
void HandleByVal(CCState *, unsigned &, unsigned) const override;
@@ -300,15 +304,14 @@ namespace llvm {
//
// (add (load (wrapper $gp, %got(sym)), %lo(sym))
template <class NodeTy>
- SDValue getAddrLocal(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG,
+ SDValue getAddrLocal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG,
bool IsN32OrN64) const {
unsigned GOTFlag = IsN32OrN64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
getTargetNode(N, Ty, DAG, GOTFlag));
SDValue Load =
DAG.getLoad(Ty, DL, DAG.getEntryNode(), GOT,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
unsigned LoFlag = IsN32OrN64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
SDValue Lo = DAG.getNode(MipsISD::Lo, DL, Ty,
getTargetNode(N, Ty, DAG, LoFlag));
@@ -320,12 +323,12 @@ namespace llvm {
//
// (load (wrapper $gp, %got(sym)))
template <class NodeTy>
- SDValue getAddrGlobal(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG,
+ SDValue getAddrGlobal(NodeTy *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG,
unsigned Flag, SDValue Chain,
const MachinePointerInfo &PtrInfo) const {
SDValue Tgt = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
getTargetNode(N, Ty, DAG, Flag));
- return DAG.getLoad(Ty, DL, Chain, Tgt, PtrInfo, false, false, false, 0);
+ return DAG.getLoad(Ty, DL, Chain, Tgt, PtrInfo);
}
// This method creates the following nodes, which are necessary for
@@ -333,7 +336,7 @@ namespace llvm {
//
// (load (wrapper (add %hi(sym), $gp), %lo(sym)))
template <class NodeTy>
- SDValue getAddrGlobalLargeGOT(NodeTy *N, SDLoc DL, EVT Ty,
+ SDValue getAddrGlobalLargeGOT(NodeTy *N, const SDLoc &DL, EVT Ty,
SelectionDAG &DAG, unsigned HiFlag,
unsigned LoFlag, SDValue Chain,
const MachinePointerInfo &PtrInfo) const {
@@ -342,8 +345,7 @@ namespace llvm {
Hi = DAG.getNode(ISD::ADD, DL, Ty, Hi, getGlobalReg(DAG, Ty));
SDValue Wrapper = DAG.getNode(MipsISD::Wrapper, DL, Ty, Hi,
getTargetNode(N, Ty, DAG, LoFlag));
- return DAG.getLoad(Ty, DL, Chain, Wrapper, PtrInfo, false, false, false,
- 0);
+ return DAG.getLoad(Ty, DL, Chain, Wrapper, PtrInfo);
}
// This method creates the following nodes, which are necessary for
@@ -351,7 +353,7 @@ namespace llvm {
//
// (add %hi(sym), %lo(sym))
template <class NodeTy>
- SDValue getAddrNonPIC(NodeTy *N, SDLoc DL, EVT Ty,
+ SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty,
SelectionDAG &DAG) const {
SDValue Hi = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_HI);
SDValue Lo = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_LO);
@@ -365,7 +367,8 @@ namespace llvm {
//
// (add $gp, %gp_rel(sym))
template <class NodeTy>
- SDValue getAddrGPRel(NodeTy *N, SDLoc DL, EVT Ty, SelectionDAG &DAG) const {
+ SDValue getAddrGPRel(NodeTy *N, const SDLoc &DL, EVT Ty,
+ SelectionDAG &DAG) const {
assert(Ty == MVT::i32);
SDValue GPRel = getTargetNode(N, Ty, DAG, MipsII::MO_GPREL);
return DAG.getNode(ISD::ADD, DL, Ty,
@@ -417,8 +420,9 @@ namespace llvm {
// Lower Operand helpers
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
TargetLowering::CallLoweringInfo &CLI) const;
// Lower Operand specifics
@@ -455,15 +459,16 @@ namespace llvm {
/// copyByValArg - Copy argument registers which were used to pass a byval
/// argument to the stack. Create a stack frame object for the byval
/// argument.
- void copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
- SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
+ void copyByValRegs(SDValue Chain, const SDLoc &DL,
+ std::vector<SDValue> &OutChains, SelectionDAG &DAG,
+ const ISD::ArgFlagsTy &Flags,
SmallVectorImpl<SDValue> &InVals,
const Argument *FuncArg, unsigned FirstReg,
unsigned LastReg, const CCValAssign &VA,
MipsCCState &State) const;
/// passByValArg - Pass a byval argument in registers or on stack.
- void passByValArg(SDValue Chain, SDLoc DL,
+ void passByValArg(SDValue Chain, const SDLoc &DL,
std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
@@ -475,17 +480,17 @@ namespace llvm {
/// to the stack. Also create a stack frame object for the first variable
/// argument.
void writeVarArgRegs(std::vector<SDValue> &OutChains, SDValue Chain,
- SDLoc DL, SelectionDAG &DAG, CCState &State) const;
+ const SDLoc &DL, SelectionDAG &DAG,
+ CCState &State) const;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue passArgOnStack(SDValue StackPtr, unsigned Offset, SDValue Chain,
- SDValue Arg, SDLoc DL, bool IsTailCall,
+ SDValue Arg, const SDLoc &DL, bool IsTailCall,
SelectionDAG &DAG) const;
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
@@ -496,14 +501,13 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const override;
- SDValue LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
+ const SDLoc &dl, SelectionDAG &DAG) const override;
- SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, SDLoc DL,
- SelectionDAG &DAG) const;
+ SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
+ const SDLoc &DL, SelectionDAG &DAG) const;
bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
@@ -561,25 +565,33 @@ namespace llvm {
unsigned getJumpTableEncoding() const override;
bool useSoftFloat() const override;
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return true;
+ }
+
/// Emit a sign-extension using sll/sra, seb, or seh appropriately.
- MachineBasicBlock *emitSignExtendToI32InReg(MachineInstr *MI,
+ MachineBasicBlock *emitSignExtendToI32InReg(MachineInstr &MI,
MachineBasicBlock *BB,
unsigned Size, unsigned DstReg,
unsigned SrcRec) const;
- MachineBasicBlock *emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned Size, unsigned BinOpcode, bool Nand = false) const;
- MachineBasicBlock *emitAtomicBinaryPartword(MachineInstr *MI,
- MachineBasicBlock *BB, unsigned Size, unsigned BinOpcode,
- bool Nand = false) const;
- MachineBasicBlock *emitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB, unsigned Size) const;
- MachineBasicBlock *emitAtomicCmpSwapPartword(MachineInstr *MI,
- MachineBasicBlock *BB, unsigned Size) const;
- MachineBasicBlock *emitSEL_D(MachineInstr *MI, MachineBasicBlock *BB) const;
- MachineBasicBlock *emitPseudoSELECT(MachineInstr *MI,
- MachineBasicBlock *BB, bool isFPCmp,
- unsigned Opc) const;
+ MachineBasicBlock *emitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
+ unsigned Size, unsigned BinOpcode,
+ bool Nand = false) const;
+ MachineBasicBlock *emitAtomicBinaryPartword(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ unsigned Size,
+ unsigned BinOpcode,
+ bool Nand = false) const;
+ MachineBasicBlock *emitAtomicCmpSwap(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ unsigned Size) const;
+ MachineBasicBlock *emitAtomicCmpSwapPartword(MachineInstr &MI,
+ MachineBasicBlock *BB,
+ unsigned Size) const;
+ MachineBasicBlock *emitSEL_D(MachineInstr &MI, MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitPseudoSELECT(MachineInstr &MI, MachineBasicBlock *BB,
+ bool isFPCmp, unsigned Opc) const;
};
/// Create MipsTargetLowering objects.
diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td
index 377260f89d10..87b02bdfdfdb 100644
--- a/lib/Target/Mips/MipsInstrFPU.td
+++ b/lib/Target/Mips/MipsInstrFPU.td
@@ -160,18 +160,18 @@ class MTC1_64_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
let Constraints = "$fs = $fs_in";
}
-class LW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
- SDPatternOperator OpNode= null_frag> :
- InstSE<(outs RC:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class LW_FT<string opstr, RegisterOperand RC, DAGOperand MO,
+ InstrItinClass Itin, SDPatternOperator OpNode = null_frag> :
+ InstSE<(outs RC:$rt), (ins MO:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI, opstr>,
HARDFLOAT {
let DecoderMethod = "DecodeFMem";
let mayLoad = 1;
}
-class SW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
- SDPatternOperator OpNode= null_frag> :
- InstSE<(outs), (ins RC:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class SW_FT<string opstr, RegisterOperand RC, DAGOperand MO,
+ InstrItinClass Itin, SDPatternOperator OpNode = null_frag> :
+ InstSE<(outs), (ins RC:$rt, MO:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI, opstr>, HARDFLOAT {
let DecoderMethod = "DecodeFMem";
let mayStore = 1;
@@ -367,22 +367,28 @@ def MFC1 : MMRel, MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, II_MFC1,
bitconvert>, MFC1_FM<0>;
def MTC1 : MMRel, MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, II_MTC1,
bitconvert>, MFC1_FM<4>;
-def MFHC1_D32 : MMRel, MFC1_FT<"mfhc1", GPR32Opnd, AFGR64Opnd, II_MFHC1>,
- MFC1_FM<3>, ISA_MIPS32R2, FGR_32;
-def MFHC1_D64 : MFC1_FT<"mfhc1", GPR32Opnd, FGR64Opnd, II_MFHC1>,
- MFC1_FM<3>, ISA_MIPS32R2, FGR_64 {
- let DecoderNamespace = "Mips64";
-}
-def MTHC1_D32 : MMRel, MTC1_64_FT<"mthc1", AFGR64Opnd, GPR32Opnd, II_MTHC1>,
- MFC1_FM<7>, ISA_MIPS32R2, FGR_32;
-def MTHC1_D64 : MTC1_64_FT<"mthc1", FGR64Opnd, GPR32Opnd, II_MTHC1>,
- MFC1_FM<7>, ISA_MIPS32R2, FGR_64 {
- let DecoderNamespace = "Mips64";
-}
-def DMFC1 : MFC1_FT<"dmfc1", GPR64Opnd, FGR64Opnd, II_DMFC1,
- bitconvert>, MFC1_FM<1>, ISA_MIPS3;
-def DMTC1 : MTC1_FT<"dmtc1", FGR64Opnd, GPR64Opnd, II_DMTC1,
- bitconvert>, MFC1_FM<5>, ISA_MIPS3;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def MFHC1_D32 : MMRel, MFC1_FT<"mfhc1", GPR32Opnd, AFGR64Opnd, II_MFHC1>,
+ MFC1_FM<3>, ISA_MIPS32R2, FGR_32;
+ def MFHC1_D64 : MFC1_FT<"mfhc1", GPR32Opnd, FGR64Opnd, II_MFHC1>,
+ MFC1_FM<3>, ISA_MIPS32R2, FGR_64 {
+ let DecoderNamespace = "Mips64";
+ }
+}
+let AdditionalPredicates = [NotInMicroMips] in {
+ def MTHC1_D32 : MMRel, StdMMR6Rel, MTC1_64_FT<"mthc1", AFGR64Opnd, GPR32Opnd, II_MTHC1>,
+ MFC1_FM<7>, ISA_MIPS32R2, FGR_32;
+ def MTHC1_D64 : MTC1_64_FT<"mthc1", FGR64Opnd, GPR32Opnd, II_MTHC1>,
+ MFC1_FM<7>, ISA_MIPS32R2, FGR_64 {
+ let DecoderNamespace = "Mips64";
+ }
+}
+let AdditionalPredicates = [NotInMicroMips] in {
+ def DMTC1 : MTC1_FT<"dmtc1", FGR64Opnd, GPR64Opnd, II_DMTC1,
+ bitconvert>, MFC1_FM<5>, ISA_MIPS3;
+ def DMFC1 : MFC1_FT<"dmfc1", GPR64Opnd, FGR64Opnd, II_DMFC1,
+ bitconvert>, MFC1_FM<1>, ISA_MIPS3;
+}
def FMOV_S : MMRel, ABSS_FT<"mov.s", FGR32Opnd, FGR32Opnd, II_MOV_S>,
ABSS_FM<0x6, 16>;
@@ -394,20 +400,30 @@ def FMOV_D64 : ABSS_FT<"mov.d", FGR64Opnd, FGR64Opnd, II_MOV_D>,
}
/// Floating Point Memory Instructions
-def LWC1 : MMRel, LW_FT<"lwc1", FGR32Opnd, II_LWC1, load>, LW_FM<0x31>;
-def SWC1 : MMRel, SW_FT<"swc1", FGR32Opnd, II_SWC1, store>, LW_FM<0x39>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def LWC1 : MMRel, LW_FT<"lwc1", FGR32Opnd, mem_simm16, II_LWC1, load>,
+ LW_FM<0x31>;
+ def SWC1 : MMRel, SW_FT<"swc1", FGR32Opnd, mem_simm16, II_SWC1, store>,
+ LW_FM<0x39>;
+}
-let DecoderNamespace = "Mips64" in {
- def LDC164 : LW_FT<"ldc1", FGR64Opnd, II_LDC1, load>, LW_FM<0x35>, ISA_MIPS2,
- FGR_64;
- def SDC164 : SW_FT<"sdc1", FGR64Opnd, II_SDC1, store>, LW_FM<0x3d>, ISA_MIPS2,
- FGR_64;
+let DecoderNamespace = "Mips64", AdditionalPredicates = [NotInMicroMips] in {
+ def LDC164 : StdMMR6Rel, LW_FT<"ldc1", FGR64Opnd, mem_simm16, II_LDC1, load>,
+ LW_FM<0x35>, ISA_MIPS2, FGR_64 {
+ let BaseOpcode = "LDC164";
+ }
+ def SDC164 : StdMMR6Rel, SW_FT<"sdc1", FGR64Opnd, mem_simm16, II_SDC1, store>,
+ LW_FM<0x3d>, ISA_MIPS2, FGR_64;
}
-def LDC1 : MMRel, LW_FT<"ldc1", AFGR64Opnd, II_LDC1, load>, LW_FM<0x35>,
- ISA_MIPS2, FGR_32;
-def SDC1 : MMRel, SW_FT<"sdc1", AFGR64Opnd, II_SDC1, store>, LW_FM<0x3d>,
- ISA_MIPS2, FGR_32;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def LDC1 : MMRel, StdMMR6Rel, LW_FT<"ldc1", AFGR64Opnd, mem_simm16, II_LDC1,
+ load>, LW_FM<0x35>, ISA_MIPS2, FGR_32 {
+ let BaseOpcode = "LDC132";
+ }
+ def SDC1 : MMRel, SW_FT<"sdc1", AFGR64Opnd, mem_simm16, II_SDC1, store>,
+ LW_FM<0x3d>, ISA_MIPS2, FGR_32;
+}
// Indexed loads and stores.
// Base register + offset register addressing mode (indicated by "x" in the
@@ -519,10 +535,12 @@ def BC1TL : MMRel, BC1F_FT<"bc1tl", brtarget, II_BC1TL, MIPS_BRANCH_T, 0>,
BC1F_FM<1, 1>, ISA_MIPS2_NOT_32R6_64R6;
/// Floating Point Compare
-def FCMP_S32 : MMRel, CEQS_FT<"s", FGR32, II_C_CC_S, MipsFPCmp>, CEQS_FM<16>,
- ISA_MIPS1_NOT_32R6_64R6;
-def FCMP_D32 : MMRel, CEQS_FT<"d", AFGR64, II_C_CC_D, MipsFPCmp>, CEQS_FM<17>,
- ISA_MIPS1_NOT_32R6_64R6, FGR_32;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def FCMP_S32 : MMRel, CEQS_FT<"s", FGR32, II_C_CC_S, MipsFPCmp>, CEQS_FM<16>,
+ ISA_MIPS1_NOT_32R6_64R6;
+ def FCMP_D32 : MMRel, CEQS_FT<"d", AFGR64, II_C_CC_D, MipsFPCmp>, CEQS_FM<17>,
+ ISA_MIPS1_NOT_32R6_64R6, FGR_32;
+}
let DecoderNamespace = "Mips64" in
def FCMP_D64 : CEQS_FT<"d", FGR64, II_C_CC_D, MipsFPCmp>, CEQS_FM<17>,
ISA_MIPS1_NOT_32R6_64R6, FGR_64;
@@ -535,7 +553,8 @@ def FCMP_D64 : CEQS_FT<"d", FGR64, II_C_CC_D, MipsFPCmp>, CEQS_FM<17>,
// allocation.
class BuildPairF64Base<RegisterOperand RO> :
PseudoSE<(outs RO:$dst), (ins GPR32Opnd:$lo, GPR32Opnd:$hi),
- [(set RO:$dst, (MipsBuildPairF64 GPR32Opnd:$lo, GPR32Opnd:$hi))]>;
+ [(set RO:$dst, (MipsBuildPairF64 GPR32Opnd:$lo, GPR32Opnd:$hi))],
+ II_MTC1>;
def BuildPairF64 : BuildPairF64Base<AFGR64Opnd>, FGR_32, HARDFLOAT;
def BuildPairF64_64 : BuildPairF64Base<FGR64Opnd>, FGR_64, HARDFLOAT;
@@ -544,13 +563,30 @@ def BuildPairF64_64 : BuildPairF64Base<FGR64Opnd>, FGR_64, HARDFLOAT;
// allocation.
// if n is 0, lower part of src is extracted.
// if n is 1, higher part of src is extracted.
+// This node has associated scheduling information as the pre RA scheduler
+// asserts otherwise.
class ExtractElementF64Base<RegisterOperand RO> :
PseudoSE<(outs GPR32Opnd:$dst), (ins RO:$src, i32imm:$n),
- [(set GPR32Opnd:$dst, (MipsExtractElementF64 RO:$src, imm:$n))]>;
+ [(set GPR32Opnd:$dst, (MipsExtractElementF64 RO:$src, imm:$n))],
+ II_MFC1>;
def ExtractElementF64 : ExtractElementF64Base<AFGR64Opnd>, FGR_32, HARDFLOAT;
def ExtractElementF64_64 : ExtractElementF64Base<FGR64Opnd>, FGR_64, HARDFLOAT;
+def PseudoTRUNC_W_S : MipsAsmPseudoInst<(outs FGR32Opnd:$fd),
+ (ins FGR32Opnd:$fs, GPR32Opnd:$rs),
+ "trunc.w.s\t$fd, $fs, $rs">;
+
+def PseudoTRUNC_W_D32 : MipsAsmPseudoInst<(outs FGR32Opnd:$fd),
+ (ins AFGR64Opnd:$fs, GPR32Opnd:$rs),
+ "trunc.w.d\t$fd, $fs, $rs">,
+ FGR_32, HARDFLOAT;
+
+def PseudoTRUNC_W_D : MipsAsmPseudoInst<(outs FGR32Opnd:$fd),
+ (ins FGR64Opnd:$fs, GPR32Opnd:$rs),
+ "trunc.w.d\t$fd, $fs, $rs">,
+ FGR_64, HARDFLOAT;
+
//===----------------------------------------------------------------------===//
// InstAliases.
//===----------------------------------------------------------------------===//
@@ -606,13 +642,15 @@ def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
(CVT_D64_S FGR32Opnd:$src)>, FGR_64;
// Patterns for loads/stores with a reg+imm operand.
-let AddedComplexity = 40 in {
- def : LoadRegImmPat<LWC1, f32, load>;
- def : StoreRegImmPat<SWC1, f32>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ let AddedComplexity = 40 in {
+ def : LoadRegImmPat<LWC1, f32, load>;
+ def : StoreRegImmPat<SWC1, f32>;
- def : LoadRegImmPat<LDC164, f64, load>, FGR_64;
- def : StoreRegImmPat<SDC164, f64>, FGR_64;
+ def : LoadRegImmPat<LDC164, f64, load>, FGR_64;
+ def : StoreRegImmPat<SDC164, f64>, FGR_64;
- def : LoadRegImmPat<LDC1, f64, load>, FGR_32;
- def : StoreRegImmPat<SDC1, f64>, FGR_32;
+ def : LoadRegImmPat<LDC1, f64, load>, FGR_32;
+ def : StoreRegImmPat<SDC1, f64>, FGR_32;
+ }
}
diff --git a/lib/Target/Mips/MipsInstrFormats.td b/lib/Target/Mips/MipsInstrFormats.td
index 45baf27be518..0bbb49b6b08e 100644
--- a/lib/Target/Mips/MipsInstrFormats.td
+++ b/lib/Target/Mips/MipsInstrFormats.td
@@ -94,10 +94,15 @@ class MipsInst<dag outs, dag ins, string asmstr, list<dag> pattern,
//
// Attributes specific to Mips instructions...
//
- bits<4> FormBits = Form.Value;
+ bits<4> FormBits = Form.Value;
+ bit isCTI = 0; // Any form of Control Transfer Instruction.
+ // Required for MIPSR6
+ bit hasForbiddenSlot = 0; // Instruction has a forbidden slot.
// TSFlags layout should be kept in sync with MipsInstrInfo.h.
let TSFlags{3-0} = FormBits;
+ let TSFlags{4} = isCTI;
+ let TSFlags{5} = hasForbiddenSlot;
let DecoderNamespace = "Mips";
diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp
index b1d69506c16f..800d834e0ab8 100644
--- a/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/lib/Target/Mips/MipsInstrInfo.cpp
@@ -13,7 +13,6 @@
#include "MipsInstrInfo.h"
#include "InstPrinter/MipsInstPrinter.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsSubtarget.h"
#include "llvm/ADT/STLExtras.h"
@@ -47,6 +46,7 @@ bool MipsInstrInfo::isZeroImm(const MachineOperand &op) const {
/// insertNoop - If data hazard condition is found insert the target nop
/// instruction.
+// FIXME: This appears to be dead code.
void MipsInstrInfo::
insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
{
@@ -54,14 +54,15 @@ insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
BuildMI(MBB, MI, DL, get(Mips::NOP));
}
-MachineMemOperand *MipsInstrInfo::GetMemOperand(MachineBasicBlock &MBB, int FI,
- unsigned Flag) const {
+MachineMemOperand *
+MipsInstrInfo::GetMemOperand(MachineBasicBlock &MBB, int FI,
+ MachineMemOperand::Flags Flags) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
unsigned Align = MFI.getObjectAlignment(FI);
return MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
- Flag, MFI.getObjectSize(FI), Align);
+ Flags, MFI.getObjectSize(FI), Align);
}
//===----------------------------------------------------------------------===//
@@ -83,20 +84,20 @@ void MipsInstrInfo::AnalyzeCondBr(const MachineInstr *Inst, unsigned Opc,
Cond.push_back(Inst->getOperand(i));
}
-bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool MipsInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
SmallVector<MachineInstr*, 2> BranchInstrs;
- BranchType BT = AnalyzeBranch(MBB, TBB, FBB, Cond, AllowModify, BranchInstrs);
+ BranchType BT = analyzeBranch(MBB, TBB, FBB, Cond, AllowModify, BranchInstrs);
return (BT == BT_None) || (BT == BT_Indirect);
}
-void
-MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- DebugLoc DL, ArrayRef<MachineOperand> Cond) const {
+void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ const DebugLoc &DL,
+ ArrayRef<MachineOperand> Cond) const {
unsigned Opc = Cond[0].getImm();
const MCInstrDesc &MCID = get(Opc);
MachineInstrBuilder MIB = BuildMI(&MBB, DL, MCID);
@@ -107,14 +108,16 @@ MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
else if (Cond[i].isImm())
MIB.addImm(Cond[i].getImm());
else
- assert(true && "Cannot copy operand");
+ assert(false && "Cannot copy operand");
}
MIB.addMBB(TBB);
}
-unsigned MipsInstrInfo::InsertBranch(
- MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
+unsigned MipsInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@@ -174,7 +177,7 @@ bool MipsInstrInfo::ReverseBranchCondition(
return false;
}
-MipsInstrInfo::BranchType MipsInstrInfo::AnalyzeBranch(
+MipsInstrInfo::BranchType MipsInstrInfo::analyzeBranch(
MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond, bool AllowModify,
SmallVectorImpl<MachineInstr *> &BranchInstrs) const {
@@ -185,7 +188,7 @@ MipsInstrInfo::BranchType MipsInstrInfo::AnalyzeBranch(
while (I != REnd && I->isDebugValue())
++I;
- if (I == REnd || !isUnpredicatedTerminator(&*I)) {
+ if (I == REnd || !isUnpredicatedTerminator(*I)) {
// This block ends with no branches (it just falls through to its succ).
// Leave TBB/FBB null.
TBB = FBB = nullptr;
@@ -209,14 +212,14 @@ MipsInstrInfo::BranchType MipsInstrInfo::AnalyzeBranch(
SecondLastOpc = getAnalyzableBrOpc(SecondLastInst->getOpcode());
// Not an analyzable branch (must be an indirect jump).
- if (isUnpredicatedTerminator(SecondLastInst) && !SecondLastOpc)
+ if (isUnpredicatedTerminator(*SecondLastInst) && !SecondLastOpc)
return BT_None;
}
// If there is only one terminator instruction, process it.
if (!SecondLastOpc) {
// Unconditional branch.
- if (LastOpc == UncondBrOpc) {
+ if (LastInst->isUnconditionalBranch()) {
TBB = LastInst->getOperand(0).getMBB();
return BT_Uncond;
}
@@ -228,14 +231,14 @@ MipsInstrInfo::BranchType MipsInstrInfo::AnalyzeBranch(
// If we reached here, there are two branches.
// If there are three terminators, we don't know what sort of block this is.
- if (++I != REnd && isUnpredicatedTerminator(&*I))
+ if (++I != REnd && isUnpredicatedTerminator(*I))
return BT_None;
BranchInstrs.insert(BranchInstrs.begin(), SecondLastInst);
// If second to last instruction is an unconditional branch,
// analyze it and remove the last instruction.
- if (SecondLastOpc == UncondBrOpc) {
+ if (SecondLastInst->isUnconditionalBranch()) {
// Return if the last instruction cannot be removed.
if (!AllowModify)
return BT_None;
@@ -248,7 +251,7 @@ MipsInstrInfo::BranchType MipsInstrInfo::AnalyzeBranch(
// Conditional branch followed by an unconditional branch.
// The last one must be unconditional.
- if (LastOpc != UncondBrOpc)
+ if (!LastInst->isUnconditionalBranch())
return BT_None;
AnalyzeCondBr(SecondLastInst, SecondLastOpc, TBB, Cond);
@@ -257,20 +260,137 @@ MipsInstrInfo::BranchType MipsInstrInfo::AnalyzeBranch(
return BT_CondUncond;
}
+/// Return the corresponding compact (no delay slot) form of a branch.
+unsigned MipsInstrInfo::getEquivalentCompactForm(
+ const MachineBasicBlock::iterator I) const {
+ unsigned Opcode = I->getOpcode();
+ bool canUseShortMicroMipsCTI = false;
+
+ if (Subtarget.inMicroMipsMode()) {
+ switch (Opcode) {
+ case Mips::BNE:
+ case Mips::BEQ:
+ // microMIPS has NE,EQ branches that do not have delay slots provided one
+ // of the operands is zero.
+ if (I->getOperand(1).getReg() == Subtarget.getABI().GetZeroReg())
+ canUseShortMicroMipsCTI = true;
+ break;
+ // For microMIPS the PseudoReturn and PseudoIndirectBranch are always
+ // expanded to JR_MM, so they can be replaced with JRC16_MM.
+ case Mips::JR:
+ case Mips::PseudoReturn:
+ case Mips::PseudoIndirectBranch:
+ canUseShortMicroMipsCTI = true;
+ break;
+ }
+ }
+
+ // MIPSR6 forbids both operands being the zero register.
+ if (Subtarget.hasMips32r6() && (I->getNumOperands() > 1) &&
+ (I->getOperand(0).isReg() &&
+ (I->getOperand(0).getReg() == Mips::ZERO ||
+ I->getOperand(0).getReg() == Mips::ZERO_64)) &&
+ (I->getOperand(1).isReg() &&
+ (I->getOperand(1).getReg() == Mips::ZERO ||
+ I->getOperand(1).getReg() == Mips::ZERO_64)))
+ return 0;
+
+ if (Subtarget.hasMips32r6() || canUseShortMicroMipsCTI) {
+ switch (Opcode) {
+ case Mips::B:
+ return Mips::BC;
+ case Mips::BAL:
+ return Mips::BALC;
+ case Mips::BEQ:
+ if (canUseShortMicroMipsCTI)
+ return Mips::BEQZC_MM;
+ else if (I->getOperand(0).getReg() == I->getOperand(1).getReg())
+ return 0;
+ return Mips::BEQC;
+ case Mips::BNE:
+ if (canUseShortMicroMipsCTI)
+ return Mips::BNEZC_MM;
+ else if (I->getOperand(0).getReg() == I->getOperand(1).getReg())
+ return 0;
+ return Mips::BNEC;
+ case Mips::BGE:
+ if (I->getOperand(0).getReg() == I->getOperand(1).getReg())
+ return 0;
+ return Mips::BGEC;
+ case Mips::BGEU:
+ if (I->getOperand(0).getReg() == I->getOperand(1).getReg())
+ return 0;
+ return Mips::BGEUC;
+ case Mips::BGEZ:
+ return Mips::BGEZC;
+ case Mips::BGTZ:
+ return Mips::BGTZC;
+ case Mips::BLEZ:
+ return Mips::BLEZC;
+ case Mips::BLT:
+ if (I->getOperand(0).getReg() == I->getOperand(1).getReg())
+ return 0;
+ return Mips::BLTC;
+ case Mips::BLTU:
+ if (I->getOperand(0).getReg() == I->getOperand(1).getReg())
+ return 0;
+ return Mips::BLTUC;
+ case Mips::BLTZ:
+ return Mips::BLTZC;
+ // For MIPSR6, the instruction 'jic' can be used for these cases. Some
+ // tools will accept 'jrc reg' as an alias for 'jic 0, $reg'.
+ case Mips::JR:
+ case Mips::PseudoReturn:
+ case Mips::PseudoIndirectBranch:
+ if (canUseShortMicroMipsCTI)
+ return Mips::JRC16_MM;
+ return Mips::JIC;
+ case Mips::JALRPseudo:
+ return Mips::JIALC;
+ case Mips::JR64:
+ case Mips::PseudoReturn64:
+ case Mips::PseudoIndirectBranch64:
+ return Mips::JIC64;
+ case Mips::JALR64Pseudo:
+ return Mips::JIALC64;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/// Predicate for distingushing between control transfer instructions and all
+/// other instructions for handling forbidden slots. Consider inline assembly
+/// as unsafe as well.
+bool MipsInstrInfo::SafeInForbiddenSlot(const MachineInstr &MI) const {
+ if (MI.isInlineAsm())
+ return false;
+
+ return (MI.getDesc().TSFlags & MipsII::IsCTI) == 0;
+
+}
+
+/// Predicate for distingushing instructions that have forbidden slots.
+bool MipsInstrInfo::HasForbiddenSlot(const MachineInstr &MI) const {
+ return (MI.getDesc().TSFlags & MipsII::HasForbiddenSlot) != 0;
+}
+
/// Return the number of bytes of code the specified instruction may be.
-unsigned MipsInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+unsigned MipsInstrInfo::GetInstSizeInBytes(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
default:
- return MI->getDesc().getSize();
+ return MI.getDesc().getSize();
case TargetOpcode::INLINEASM: { // Inline Asm: Variable size.
- const MachineFunction *MF = MI->getParent()->getParent();
- const char *AsmStr = MI->getOperand(0).getSymbolName();
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const char *AsmStr = MI.getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
}
case Mips::CONSTPOOL_ENTRY:
// If this machine instr is a constant pool entry, its size is recorded as
// operand #2.
- return MI->getOperand(2).getImm();
+ return MI.getOperand(2).getImm();
}
}
@@ -278,10 +398,70 @@ MachineInstrBuilder
MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
MachineBasicBlock::iterator I) const {
MachineInstrBuilder MIB;
+
+ // Certain branches have two forms: e.g beq $1, $zero, dst vs beqz $1, dest
+ // Pick the zero form of the branch for readable assembly and for greater
+ // branch distance in non-microMIPS mode.
+ // FIXME: Certain atomic sequences on mips64 generate 32bit references to
+ // Mips::ZERO, which is incorrect. This test should be updated to use
+ // Subtarget.getABI().GetZeroReg() when those atomic sequences and others
+ // are fixed.
+ bool BranchWithZeroOperand =
+ (I->isBranch() && !I->isPseudo() && I->getOperand(1).isReg() &&
+ (I->getOperand(1).getReg() == Mips::ZERO ||
+ I->getOperand(1).getReg() == Mips::ZERO_64));
+
+ if (BranchWithZeroOperand) {
+ switch (NewOpc) {
+ case Mips::BEQC:
+ NewOpc = Mips::BEQZC;
+ break;
+ case Mips::BNEC:
+ NewOpc = Mips::BNEZC;
+ break;
+ case Mips::BGEC:
+ NewOpc = Mips::BGEZC;
+ break;
+ case Mips::BLTC:
+ NewOpc = Mips::BLTZC;
+ break;
+ }
+ }
+
MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));
- for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J)
- MIB.addOperand(I->getOperand(J));
+ // For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an
+ // immediate 0 as an operand and requires the removal of it's %RA<imp-def>
+ // implicit operand as copying the implicit operations of the instructio we're
+ // looking at will give us the correct flags.
+ if (NewOpc == Mips::JIC || NewOpc == Mips::JIALC || NewOpc == Mips::JIC64 ||
+ NewOpc == Mips::JIALC64) {
+
+ if (NewOpc == Mips::JIALC || NewOpc == Mips::JIALC64)
+ MIB->RemoveOperand(0);
+
+ for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
+ MIB.addOperand(I->getOperand(J));
+ }
+
+ MIB.addImm(0);
+
+ } else if (BranchWithZeroOperand) {
+ // For MIPSR6 and microMIPS branches with an explicit zero operand, copy
+ // everything after the zero.
+ MIB.addOperand(I->getOperand(0));
+
+ for (unsigned J = 2, E = I->getDesc().getNumOperands(); J < E; ++J) {
+ MIB.addOperand(I->getOperand(J));
+ }
+ } else {
+ // All other cases copy all other operands.
+ for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
+ MIB.addOperand(I->getOperand(J));
+ }
+ }
+
+ MIB.copyImplicitOps(*I);
MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
return MIB;
diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h
index 08efc3509046..2e55012eec40 100644
--- a/lib/Target/Mips/MipsInstrInfo.h
+++ b/lib/Target/Mips/MipsInstrInfo.h
@@ -19,7 +19,6 @@
#define LLVM_LIB_TARGET_MIPS_MIPSINSTRINFO_H
#include "Mips.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsRegisterInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/ErrorHandling.h"
@@ -51,7 +50,7 @@ public:
static const MipsInstrInfo *create(MipsSubtarget &STI);
/// Branch Analysis
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
@@ -60,16 +59,25 @@ public:
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
- BranchType AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ BranchType analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify,
- SmallVectorImpl<MachineInstr*> &BranchInstrs) const;
+ SmallVectorImpl<MachineInstr *> &BranchInstrs) const;
+
+ /// Determine the opcode of a non-delay slot form for a branch if one exists.
+ unsigned getEquivalentCompactForm(const MachineBasicBlock::iterator I) const;
+
+ /// Predicate to determine if an instruction can go in a forbidden slot.
+ bool SafeInForbiddenSlot(const MachineInstr &MI) const;
+
+ /// Predicate to determine if an instruction has a forbidden slot.
+ bool HasForbiddenSlot(const MachineInstr &MI) const;
/// Insert nop instruction when hazard condition is found
void insertNoop(MachineBasicBlock &MBB,
@@ -84,7 +92,7 @@ public:
virtual unsigned getOppositeBranchOpc(unsigned Opc) const = 0;
/// Return the number of bytes of code the specified instruction may be.
- unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+ unsigned GetInstSizeInBytes(const MachineInstr &MI) const;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -129,7 +137,7 @@ protected:
bool isZeroImm(const MachineOperand &op) const;
MachineMemOperand *GetMemOperand(MachineBasicBlock &MBB, int FI,
- unsigned Flag) const;
+ MachineMemOperand::Flags Flags) const;
private:
virtual unsigned getAnalyzableBrOpc(unsigned Opc) const = 0;
@@ -138,8 +146,8 @@ private:
MachineBasicBlock *&BB,
SmallVectorImpl<MachineOperand> &Cond) const;
- void BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB, DebugLoc DL,
- ArrayRef<MachineOperand> Cond) const;
+ void BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ const DebugLoc &DL, ArrayRef<MachineOperand> Cond) const;
};
/// Create MipsInstrInfo objects.
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index ffda491f0c86..296f6e9b08bd 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -179,6 +179,10 @@ def IsGP64bit : Predicate<"Subtarget->isGP64bit()">,
AssemblerPredicate<"FeatureGP64Bit">;
def IsGP32bit : Predicate<"!Subtarget->isGP64bit()">,
AssemblerPredicate<"!FeatureGP64Bit">;
+def IsPTR64bit : Predicate<"Subtarget->isABI_N64()">,
+ AssemblerPredicate<"FeaturePTR64Bit">;
+def IsPTR32bit : Predicate<"!Subtarget->isABI_N64()">,
+ AssemblerPredicate<"!FeaturePTR64Bit">;
def HasMips64 : Predicate<"Subtarget->hasMips64()">,
AssemblerPredicate<"FeatureMips64">;
def NotMips64 : Predicate<"!Subtarget->hasMips64()">,
@@ -197,8 +201,8 @@ def InMips16Mode : Predicate<"Subtarget->inMips16Mode()">,
AssemblerPredicate<"FeatureMips16">;
def HasCnMips : Predicate<"Subtarget->hasCnMips()">,
AssemblerPredicate<"FeatureCnMips">;
-def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
-def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">;
+def RelocNotPIC : Predicate<"!TM.isPositionIndependent()">;
+def RelocPIC : Predicate<"TM.isPositionIndependent()">;
def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">;
def HasStdEnc : Predicate<"Subtarget->hasStandardEncoding()">,
AssemblerPredicate<"!FeatureMips16">;
@@ -225,6 +229,9 @@ def HasMSA : Predicate<"Subtarget->hasMSA()">,
class GPR_32 { list<Predicate> GPRPredicates = [IsGP32bit]; }
class GPR_64 { list<Predicate> GPRPredicates = [IsGP64bit]; }
+class PTR_32 { list<Predicate> PTRPredicates = [IsPTR32bit]; }
+class PTR_64 { list<Predicate> PTRPredicates = [IsPTR64bit]; }
+
//===----------------------------------------------------------------------===//
// Mips ISA/ASE membership and instruction group membership adjectives.
// They are mutually exclusive.
@@ -315,6 +322,10 @@ class ASE_CNMIPS {
list<Predicate> InsnPredicates = [HasCnMips];
}
+class ASE_MIPS64_CNMIPS {
+ list<Predicate> InsnPredicates = [HasMips64, HasCnMips];
+}
+
class ASE_MSA {
list<Predicate> InsnPredicates = [HasMSA];
}
@@ -333,6 +344,10 @@ class ISA_MICROMIPS_NOT_32R6_64R6 : PredicateControl {
let InsnPredicates = [InMicroMips, NotMips32r6, NotMips64r6];
}
+class ASE_NOT_DSP {
+ list<Predicate> InsnPredicates = [NotDSP];
+}
+
//===----------------------------------------------------------------------===//
class MipsPat<dag pattern, dag result> : Pat<pattern, result>, PredicateControl {
@@ -348,14 +363,17 @@ class IsCommutable {
class IsBranch {
bit isBranch = 1;
+ bit isCTI = 1;
}
class IsReturn {
bit isReturn = 1;
+ bit isCTI = 1;
}
class IsCall {
bit isCall = 1;
+ bit isCTI = 1;
}
class IsTailCall {
@@ -365,6 +383,7 @@ class IsTailCall {
bit isBarrier = 1;
bit hasExtraSrcRegAllocReq = 1;
bit isCodeGenOnly = 1;
+ bit isCTI = 1;
}
class IsAsCheapAsAMove {
@@ -385,13 +404,13 @@ include "MipsInstrFormats.td"
// Mips Operand, Complex Patterns and Transformations Definitions.
//===----------------------------------------------------------------------===//
-class ConstantSImmAsmOperandClass<int Bits, list<AsmOperandClass> Supers = []>
- : AsmOperandClass {
- let Name = "ConstantSImm" # Bits;
- let RenderMethod = "addImmOperands";
- let PredicateMethod = "isConstantSImm<" # Bits # ">";
+class ConstantSImmAsmOperandClass<int Bits, list<AsmOperandClass> Supers = [],
+ int Offset = 0> : AsmOperandClass {
+ let Name = "ConstantSImm" # Bits # "_" # Offset;
+ let RenderMethod = "addConstantSImmOperands<" # Bits # ", " # Offset # ">";
+ let PredicateMethod = "isConstantSImm<" # Bits # ", " # Offset # ">";
let SuperClasses = Supers;
- let DiagnosticType = "SImm" # Bits;
+ let DiagnosticType = "SImm" # Bits # "_" # Offset;
}
class ConstantUImmAsmOperandClass<int Bits, list<AsmOperandClass> Supers = [],
@@ -403,49 +422,181 @@ class ConstantUImmAsmOperandClass<int Bits, list<AsmOperandClass> Supers = [],
let DiagnosticType = "UImm" # Bits # "_" # Offset;
}
+class ConstantUImmRangeAsmOperandClass<int Bottom, int Top,
+ list<AsmOperandClass> Supers = []>
+ : AsmOperandClass {
+ let Name = "ConstantUImmRange" # Bottom # "_" # Top;
+ let RenderMethod = "addImmOperands";
+ let PredicateMethod = "isConstantUImmRange<" # Bottom # ", " # Top # ">";
+ let SuperClasses = Supers;
+ let DiagnosticType = "UImmRange" # Bottom # "_" # Top;
+}
+
+class SImmAsmOperandClass<int Bits, list<AsmOperandClass> Supers = []>
+ : AsmOperandClass {
+ let Name = "SImm" # Bits;
+ let RenderMethod = "addSImmOperands<" # Bits # ">";
+ let PredicateMethod = "isSImm<" # Bits # ">";
+ let SuperClasses = Supers;
+ let DiagnosticType = "SImm" # Bits;
+}
+
+class UImmAsmOperandClass<int Bits, list<AsmOperandClass> Supers = []>
+ : AsmOperandClass {
+ let Name = "UImm" # Bits;
+ let RenderMethod = "addUImmOperands<" # Bits # ">";
+ let PredicateMethod = "isUImm<" # Bits # ">";
+ let SuperClasses = Supers;
+ let DiagnosticType = "UImm" # Bits;
+}
+
+// AsmOperandClasses require a strict ordering which is difficult to manage
+// as a hierarchy. Instead, we use a linear ordering and impose an order that
+// is in some places arbitrary.
+//
+// Here the rules that are in use:
+// * Wider immediates are a superset of narrower immediates:
+// uimm4 < uimm5 < uimm6
+// * For the same bit-width, unsigned immediates are a superset of signed
+// immediates::
+// simm4 < uimm4 < simm5 < uimm5
+// * For the same upper-bound, signed immediates are a superset of unsigned
+// immediates:
+// uimm3 < simm4 < uimm4 < simm4
+// * Modified immediates are a superset of ordinary immediates:
+// uimm5 < uimm5_plus1 (1..32) < uimm5_plus32 (32..63) < uimm6
+// The term 'superset' starts to break down here since the uimm5_plus* classes
+// are not true supersets of uimm5 (but they are still subsets of uimm6).
+// * 'Relaxed' immediates are supersets of the corresponding unsigned immediate.
+// uimm16 < uimm16_relaxed
+// * The codeGen pattern type is arbitrarily ordered.
+// uimm5 < uimm5_64, and uimm5 < vsplat_uimm5
+// This is entirely arbitrary. We need an ordering and what we pick is
+// unimportant since only one is possible for a given mnemonic.
+def SImm32RelaxedAsmOperandClass
+ : SImmAsmOperandClass<32, []> {
+ let Name = "SImm32_Relaxed";
+ let PredicateMethod = "isAnyImm<32>";
+ let DiagnosticType = "SImm32_Relaxed";
+}
+def SImm32AsmOperandClass
+ : SImmAsmOperandClass<32, [SImm32RelaxedAsmOperandClass]>;
+def ConstantUImm26AsmOperandClass
+ : ConstantUImmAsmOperandClass<26, [SImm32AsmOperandClass]>;
+def ConstantUImm20AsmOperandClass
+ : ConstantUImmAsmOperandClass<20, [ConstantUImm26AsmOperandClass]>;
+def UImm16RelaxedAsmOperandClass
+ : UImmAsmOperandClass<16, [ConstantUImm20AsmOperandClass]> {
+ let Name = "UImm16_Relaxed";
+ let PredicateMethod = "isAnyImm<16>";
+ let DiagnosticType = "UImm16_Relaxed";
+}
+def UImm16AsmOperandClass
+ : UImmAsmOperandClass<16, [UImm16RelaxedAsmOperandClass]>;
+def SImm16RelaxedAsmOperandClass
+ : SImmAsmOperandClass<16, [UImm16RelaxedAsmOperandClass]> {
+ let Name = "SImm16_Relaxed";
+ let PredicateMethod = "isAnyImm<16>";
+ let DiagnosticType = "SImm16_Relaxed";
+}
+def SImm16AsmOperandClass
+ : SImmAsmOperandClass<16, [SImm16RelaxedAsmOperandClass]>;
+def ConstantSImm10Lsl3AsmOperandClass : AsmOperandClass {
+ let Name = "SImm10Lsl3";
+ let RenderMethod = "addImmOperands";
+ let PredicateMethod = "isScaledSImm<10, 3>";
+ let SuperClasses = [SImm16AsmOperandClass];
+ let DiagnosticType = "SImm10_Lsl3";
+}
+def ConstantSImm10Lsl2AsmOperandClass : AsmOperandClass {
+ let Name = "SImm10Lsl2";
+ let RenderMethod = "addImmOperands";
+ let PredicateMethod = "isScaledSImm<10, 2>";
+ let SuperClasses = [ConstantSImm10Lsl3AsmOperandClass];
+ let DiagnosticType = "SImm10_Lsl2";
+}
+def ConstantSImm11AsmOperandClass
+ : ConstantSImmAsmOperandClass<11, [ConstantSImm10Lsl2AsmOperandClass]>;
+def ConstantSImm10Lsl1AsmOperandClass : AsmOperandClass {
+ let Name = "SImm10Lsl1";
+ let RenderMethod = "addImmOperands";
+ let PredicateMethod = "isScaledSImm<10, 1>";
+ let SuperClasses = [ConstantSImm11AsmOperandClass];
+ let DiagnosticType = "SImm10_Lsl1";
+}
def ConstantUImm10AsmOperandClass
- : ConstantUImmAsmOperandClass<10, []>;
+ : ConstantUImmAsmOperandClass<10, [ConstantSImm10Lsl1AsmOperandClass]>;
+def ConstantSImm10AsmOperandClass
+ : ConstantSImmAsmOperandClass<10, [ConstantUImm10AsmOperandClass]>;
+def ConstantSImm9AsmOperandClass
+ : ConstantSImmAsmOperandClass<9, [ConstantSImm10AsmOperandClass]>;
+def ConstantSImm7Lsl2AsmOperandClass : AsmOperandClass {
+ let Name = "SImm7Lsl2";
+ let RenderMethod = "addImmOperands";
+ let PredicateMethod = "isScaledSImm<7, 2>";
+ let SuperClasses = [ConstantSImm9AsmOperandClass];
+ let DiagnosticType = "SImm7_Lsl2";
+}
def ConstantUImm8AsmOperandClass
- : ConstantUImmAsmOperandClass<8, [ConstantUImm10AsmOperandClass]>;
+ : ConstantUImmAsmOperandClass<8, [ConstantSImm7Lsl2AsmOperandClass]>;
+def ConstantUImm7Sub1AsmOperandClass
+ : ConstantUImmAsmOperandClass<7, [ConstantUImm8AsmOperandClass], -1> {
+ // Specify the names since the -1 offset causes invalid identifiers otherwise.
+ let Name = "UImm7_N1";
+ let DiagnosticType = "UImm7_N1";
+}
def ConstantUImm7AsmOperandClass
- : ConstantUImmAsmOperandClass<7, [ConstantUImm8AsmOperandClass]>;
+ : ConstantUImmAsmOperandClass<7, [ConstantUImm7Sub1AsmOperandClass]>;
+def ConstantUImm6Lsl2AsmOperandClass : AsmOperandClass {
+ let Name = "UImm6Lsl2";
+ let RenderMethod = "addImmOperands";
+ let PredicateMethod = "isScaledUImm<6, 2>";
+ let SuperClasses = [ConstantUImm7AsmOperandClass];
+ let DiagnosticType = "UImm6_Lsl2";
+}
def ConstantUImm6AsmOperandClass
- : ConstantUImmAsmOperandClass<6, [ConstantUImm7AsmOperandClass]>;
+ : ConstantUImmAsmOperandClass<6, [ConstantUImm6Lsl2AsmOperandClass]>;
def ConstantSImm6AsmOperandClass
- : ConstantSImmAsmOperandClass<6, [ConstantUImm7AsmOperandClass]>;
-def ConstantUImm5Plus1AsmOperandClass
- : ConstantUImmAsmOperandClass<5, [ConstantUImm6AsmOperandClass], 1>;
-def ConstantUImm5Plus32AsmOperandClass
- : ConstantUImmAsmOperandClass<5, [ConstantUImm6AsmOperandClass], 32>;
-def ConstantUImm5Plus33AsmOperandClass
- : ConstantUImmAsmOperandClass<5, [ConstantUImm6AsmOperandClass], 33>;
-def ConstantUImm5Plus32NormalizeAsmOperandClass
- : ConstantUImmAsmOperandClass<5, [ConstantUImm6AsmOperandClass], 32> {
- let Name = "ConstantUImm5_32_Norm";
- // We must also subtract 32 when we render the operand.
- let RenderMethod = "addConstantUImmOperands<5, 32, -32>";
-}
+ : ConstantSImmAsmOperandClass<6, [ConstantUImm6AsmOperandClass]>;
def ConstantUImm5Lsl2AsmOperandClass : AsmOperandClass {
let Name = "UImm5Lsl2";
let RenderMethod = "addImmOperands";
let PredicateMethod = "isScaledUImm<5, 2>";
- let SuperClasses = [ConstantUImm6AsmOperandClass];
+ let SuperClasses = [ConstantSImm6AsmOperandClass];
let DiagnosticType = "UImm5_Lsl2";
}
+def ConstantUImm5_Range2_64AsmOperandClass
+ : ConstantUImmRangeAsmOperandClass<2, 64, [ConstantUImm5Lsl2AsmOperandClass]>;
+def ConstantUImm5Plus33AsmOperandClass
+ : ConstantUImmAsmOperandClass<5, [ConstantUImm5_Range2_64AsmOperandClass],
+ 33>;
def ConstantUImm5ReportUImm6AsmOperandClass
- : ConstantUImmAsmOperandClass<5, [ConstantUImm6AsmOperandClass]> {
+ : ConstantUImmAsmOperandClass<5, [ConstantUImm5Plus33AsmOperandClass]> {
let Name = "ConstantUImm5_0_Report_UImm6";
let DiagnosticType = "UImm5_0_Report_UImm6";
}
+def ConstantUImm5Plus32AsmOperandClass
+ : ConstantUImmAsmOperandClass<
+ 5, [ConstantUImm5ReportUImm6AsmOperandClass], 32>;
+def ConstantUImm5Plus32NormalizeAsmOperandClass
+ : ConstantUImmAsmOperandClass<5, [ConstantUImm5Plus32AsmOperandClass], 32> {
+ let Name = "ConstantUImm5_32_Norm";
+ // We must also subtract 32 when we render the operand.
+ let RenderMethod = "addConstantUImmOperands<5, 32, -32>";
+}
+def ConstantUImm5Plus1AsmOperandClass
+ : ConstantUImmAsmOperandClass<
+ 5, [ConstantUImm5Plus32NormalizeAsmOperandClass], 1>;
def ConstantUImm5AsmOperandClass
- : ConstantUImmAsmOperandClass<5, [ConstantUImm6AsmOperandClass]>;
+ : ConstantUImmAsmOperandClass<5, [ConstantUImm5Plus1AsmOperandClass]>;
+def ConstantSImm5AsmOperandClass
+ : ConstantSImmAsmOperandClass<5, [ConstantUImm5AsmOperandClass]>;
def ConstantUImm4AsmOperandClass
- : ConstantUImmAsmOperandClass<
- 4, [ConstantUImm5AsmOperandClass,
- ConstantUImm5Plus32AsmOperandClass,
- ConstantUImm5Plus32NormalizeAsmOperandClass]>;
+ : ConstantUImmAsmOperandClass<4, [ConstantSImm5AsmOperandClass]>;
+def ConstantSImm4AsmOperandClass
+ : ConstantSImmAsmOperandClass<4, [ConstantUImm4AsmOperandClass]>;
def ConstantUImm3AsmOperandClass
- : ConstantUImmAsmOperandClass<3, [ConstantUImm4AsmOperandClass]>;
+ : ConstantUImmAsmOperandClass<3, [ConstantSImm4AsmOperandClass]>;
def ConstantUImm2Plus1AsmOperandClass
: ConstantUImmAsmOperandClass<2, [ConstantUImm3AsmOperandClass], 1>;
def ConstantUImm2AsmOperandClass
@@ -478,6 +629,12 @@ def brtarget : Operand<OtherVT> {
let DecoderMethod = "DecodeBranchTarget";
let ParserMatchClass = MipsJumpTargetAsmOperand;
}
+def brtarget1SImm16 : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValue1SImm16";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTarget1SImm16";
+ let ParserMatchClass = MipsJumpTargetAsmOperand;
+}
def calltarget : Operand<iPTR> {
let EncoderMethod = "getJumpTargetOpValue";
let ParserMatchClass = MipsJumpTargetAsmOperand;
@@ -485,18 +642,6 @@ def calltarget : Operand<iPTR> {
def imm64: Operand<i64>;
-def simm6 : Operand<i32> {
- let ParserMatchClass = ConstantSImm6AsmOperandClass;
- let OperandType = "OPERAND_IMMEDIATE";
-}
-def simm9 : Operand<i32>;
-def simm10 : Operand<i32>;
-def simm11 : Operand<i32>;
-
-def simm16 : Operand<i32> {
- let DecoderMethod= "DecodeSimm16";
-}
-
def simm19_lsl2 : Operand<i32> {
let EncoderMethod = "getSimm19Lsl2Encoding";
let DecoderMethod = "DecodeSimm19Lsl2";
@@ -509,91 +654,207 @@ def simm18_lsl3 : Operand<i32> {
let ParserMatchClass = MipsJumpTargetAsmOperand;
}
-def simm20 : Operand<i32>;
-def simm32 : Operand<i32>;
-
-def uimm20 : Operand<i32> {
-}
-
-def simm16_64 : Operand<i64> {
- let DecoderMethod = "DecodeSimm16";
-}
-
// Zero
def uimmz : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<0>";
let ParserMatchClass = ConstantImmzAsmOperandClass;
}
+// size operand of ins instruction
+def uimm_range_2_64 : Operand<i32> {
+ let PrintMethod = "printUImm<6, 2>";
+ let EncoderMethod = "getSizeInsEncoding";
+ let DecoderMethod = "DecodeInsSize";
+ let ParserMatchClass = ConstantUImm5_Range2_64AsmOperandClass;
+}
+
// Unsigned Operands
-foreach I = {1, 2, 3, 4, 5, 6, 7, 8, 10} in
+foreach I = {1, 2, 3, 4, 5, 6, 7, 8, 10, 20, 26} in
def uimm # I : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<" # I # ">";
let ParserMatchClass =
!cast<AsmOperandClass>("ConstantUImm" # I # "AsmOperandClass");
}
def uimm2_plus1 : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<2, 1>";
let EncoderMethod = "getUImmWithOffsetEncoding<2, 1>";
let DecoderMethod = "DecodeUImmWithOffset<2, 1>";
let ParserMatchClass = ConstantUImm2Plus1AsmOperandClass;
}
def uimm5_plus1 : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<5, 1>";
let EncoderMethod = "getUImmWithOffsetEncoding<5, 1>";
let DecoderMethod = "DecodeUImmWithOffset<5, 1>";
let ParserMatchClass = ConstantUImm5Plus1AsmOperandClass;
}
def uimm5_plus32 : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<5, 32>";
let ParserMatchClass = ConstantUImm5Plus32AsmOperandClass;
}
def uimm5_plus33 : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<5, 33>";
let EncoderMethod = "getUImmWithOffsetEncoding<5, 1>";
let DecoderMethod = "DecodeUImmWithOffset<5, 1>";
let ParserMatchClass = ConstantUImm5Plus33AsmOperandClass;
}
+def uimm5_inssize_plus1 : Operand<i32> {
+ let PrintMethod = "printUImm<6>";
+ let ParserMatchClass = ConstantUImm5Plus1AsmOperandClass;
+ let EncoderMethod = "getSizeInsEncoding";
+ let DecoderMethod = "DecodeInsSize";
+}
+
def uimm5_plus32_normalize : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<5>";
let ParserMatchClass = ConstantUImm5Plus32NormalizeAsmOperandClass;
}
def uimm5_lsl2 : Operand<OtherVT> {
let EncoderMethod = "getUImm5Lsl2Encoding";
- let DecoderMethod = "DecodeUImm5lsl2";
+ let DecoderMethod = "DecodeUImmWithOffsetAndScale<5, 0, 4>";
let ParserMatchClass = ConstantUImm5Lsl2AsmOperandClass;
}
def uimm5_plus32_normalize_64 : Operand<i64> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<5>";
let ParserMatchClass = ConstantUImm5Plus32NormalizeAsmOperandClass;
}
+def uimm6_lsl2 : Operand<OtherVT> {
+ let EncoderMethod = "getUImm6Lsl2Encoding";
+ let DecoderMethod = "DecodeUImmWithOffsetAndScale<6, 0, 4>";
+ let ParserMatchClass = ConstantUImm6Lsl2AsmOperandClass;
+}
+
+foreach I = {16} in
+ def uimm # I : Operand<i32> {
+ let PrintMethod = "printUImm<" # I # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("UImm" # I # "AsmOperandClass");
+ }
+
+// Like uimm16_64 but coerces simm16 to uimm16.
+def uimm16_relaxed : Operand<i32> {
+ let PrintMethod = "printUImm<16>";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("UImm16RelaxedAsmOperandClass");
+}
+
foreach I = {5} in
def uimm # I # _64 : Operand<i64> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<" # I # ">";
let ParserMatchClass =
!cast<AsmOperandClass>("ConstantUImm" # I # "AsmOperandClass");
}
+foreach I = {16} in
+ def uimm # I # _64 : Operand<i64> {
+ let PrintMethod = "printUImm<" # I # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("UImm" # I # "AsmOperandClass");
+ }
+
+// Like uimm16_64 but coerces simm16 to uimm16.
+def uimm16_64_relaxed : Operand<i64> {
+ let PrintMethod = "printUImm<16>";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("UImm16RelaxedAsmOperandClass");
+}
+
+// Like uimm5 but reports a less confusing error for 32-63 when
+// an instruction alias permits that.
+def uimm5_report_uimm6 : Operand<i32> {
+ let PrintMethod = "printUImm<5>";
+ let ParserMatchClass = ConstantUImm5ReportUImm6AsmOperandClass;
+}
+
// Like uimm5_64 but reports a less confusing error for 32-63 when
// an instruction alias permits that.
def uimm5_64_report_uimm6 : Operand<i64> {
- let PrintMethod = "printUnsignedImm";
+ let PrintMethod = "printUImm<5>";
let ParserMatchClass = ConstantUImm5ReportUImm6AsmOperandClass;
}
-def uimm16 : Operand<i32> {
- let PrintMethod = "printUnsignedImm";
+foreach I = {1, 2, 3, 4} in
+ def uimm # I # _ptr : Operand<iPTR> {
+ let PrintMethod = "printUImm<" # I # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("ConstantUImm" # I # "AsmOperandClass");
+ }
+
+foreach I = {1, 2, 3, 4, 5, 6, 8} in
+ def vsplat_uimm # I : Operand<vAny> {
+ let PrintMethod = "printUImm<" # I # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("ConstantUImm" # I # "AsmOperandClass");
+ }
+
+// Signed operands
+foreach I = {4, 5, 6, 9, 10, 11} in
+ def simm # I : Operand<i32> {
+ let DecoderMethod = "DecodeSImmWithOffsetAndScale<" # I # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("ConstantSImm" # I # "AsmOperandClass");
+ }
+
+foreach I = {1, 2, 3} in
+ def simm10_lsl # I : Operand<i32> {
+ let DecoderMethod = "DecodeSImmWithOffsetAndScale<10, " # I # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("ConstantSImm10Lsl" # I # "AsmOperandClass");
+ }
+
+foreach I = {10} in
+ def simm # I # _64 : Operand<i64> {
+ let DecoderMethod = "DecodeSImmWithOffsetAndScale<" # I # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("ConstantSImm" # I # "AsmOperandClass");
+ }
+
+foreach I = {5, 10} in
+ def vsplat_simm # I : Operand<vAny> {
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("ConstantSImm" # I # "AsmOperandClass");
+ }
+
+def simm7_lsl2 : Operand<OtherVT> {
+ let EncoderMethod = "getSImm7Lsl2Encoding";
+ let DecoderMethod = "DecodeSImmWithOffsetAndScale<" # I # ", 0, 4>";
+ let ParserMatchClass = ConstantSImm7Lsl2AsmOperandClass;
+}
+
+foreach I = {16, 32} in
+ def simm # I : Operand<i32> {
+ let DecoderMethod = "DecodeSImmWithOffsetAndScale<" # I # ">";
+ let ParserMatchClass = !cast<AsmOperandClass>("SImm" # I # "AsmOperandClass");
+ }
+
+// Like simm16 but coerces uimm16 to simm16.
+def simm16_relaxed : Operand<i32> {
+ let DecoderMethod = "DecodeSImmWithOffsetAndScale<16>";
+ let ParserMatchClass = !cast<AsmOperandClass>("SImm16RelaxedAsmOperandClass");
+}
+
+def simm16_64 : Operand<i64> {
+ let DecoderMethod = "DecodeSImmWithOffsetAndScale<16>";
+ let ParserMatchClass = !cast<AsmOperandClass>("SImm16AsmOperandClass");
+}
+
+// Like simm32 but coerces uimm32 to simm32.
+def simm32_relaxed : Operand<i32> {
+ let DecoderMethod = "DecodeSImmWithOffsetAndScale<32>";
+ let ParserMatchClass = !cast<AsmOperandClass>("SImm32RelaxedAsmOperandClass");
}
-def pcrel16 : Operand<i32> {
+// This is almost the same as a uimm7 but 0x7f is interpreted as -1.
+def li16_imm : Operand<i32> {
+ let DecoderMethod = "DecodeLi16Imm";
+ let ParserMatchClass = ConstantUImm7Sub1AsmOperandClass;
}
def MipsMemAsmOperand : AsmOperandClass {
@@ -607,22 +868,44 @@ def MipsMemSimm9AsmOperand : AsmOperandClass {
let RenderMethod = "addMemOperands";
let ParserMethod = "parseMemOperand";
let PredicateMethod = "isMemWithSimmOffset<9>";
+ let DiagnosticType = "MemSImm9";
}
-def MipsMemSimm9GPRAsmOperand : AsmOperandClass {
- let Name = "MemOffsetSimm9GPR";
+def MipsMemSimm10AsmOperand : AsmOperandClass {
+ let Name = "MemOffsetSimm10";
let SuperClasses = [MipsMemAsmOperand];
let RenderMethod = "addMemOperands";
let ParserMethod = "parseMemOperand";
- let PredicateMethod = "isMemWithSimmOffsetGPR<9>";
+ let PredicateMethod = "isMemWithSimmOffset<10>";
+ let DiagnosticType = "MemSImm10";
}
+def MipsMemSimm12AsmOperand : AsmOperandClass {
+ let Name = "MemOffsetSimm12";
+ let SuperClasses = [MipsMemAsmOperand];
+ let RenderMethod = "addMemOperands";
+ let ParserMethod = "parseMemOperand";
+ let PredicateMethod = "isMemWithSimmOffset<12>";
+ let DiagnosticType = "MemSImm12";
+}
+
+foreach I = {1, 2, 3} in
+ def MipsMemSimm10Lsl # I # AsmOperand : AsmOperandClass {
+ let Name = "MemOffsetSimm10_" # I;
+ let SuperClasses = [MipsMemAsmOperand];
+ let RenderMethod = "addMemOperands";
+ let ParserMethod = "parseMemOperand";
+ let PredicateMethod = "isMemWithSimmOffset<10, " # I # ">";
+ let DiagnosticType = "MemSImm10Lsl" # I;
+ }
+
def MipsMemSimm11AsmOperand : AsmOperandClass {
let Name = "MemOffsetSimm11";
let SuperClasses = [MipsMemAsmOperand];
let RenderMethod = "addMemOperands";
let ParserMethod = "parseMemOperand";
let PredicateMethod = "isMemWithSimmOffset<11>";
+ let DiagnosticType = "MemSImm11";
}
def MipsMemSimm16AsmOperand : AsmOperandClass {
@@ -631,6 +914,7 @@ def MipsMemSimm16AsmOperand : AsmOperandClass {
let RenderMethod = "addMemOperands";
let ParserMethod = "parseMemOperand";
let PredicateMethod = "isMemWithSimmOffset<16>";
+ let DiagnosticType = "MemSImm16";
}
def MipsInvertedImmoperand : AsmOperandClass {
@@ -664,24 +948,42 @@ def mem_msa : mem_generic {
let EncoderMethod = "getMSAMemEncoding";
}
+def simm12 : Operand<i32> {
+ let DecoderMethod = "DecodeSimm12";
+}
+
def mem_simm9 : mem_generic {
let MIOperandInfo = (ops ptr_rc, simm9);
let EncoderMethod = "getMemEncoding";
let ParserMatchClass = MipsMemSimm9AsmOperand;
}
-def mem_simm9gpr : mem_generic {
- let MIOperandInfo = (ops ptr_rc, simm9);
+def mem_simm10 : mem_generic {
+ let MIOperandInfo = (ops ptr_rc, simm10);
let EncoderMethod = "getMemEncoding";
- let ParserMatchClass = MipsMemSimm9GPRAsmOperand;
+ let ParserMatchClass = MipsMemSimm10AsmOperand;
}
+foreach I = {1, 2, 3} in
+ def mem_simm10_lsl # I : mem_generic {
+ let MIOperandInfo = (ops ptr_rc, !cast<Operand>("simm10_lsl" # I));
+ let EncoderMethod = "getMemEncoding<" # I # ">";
+ let ParserMatchClass =
+ !cast<AsmOperandClass>("MipsMemSimm10Lsl" # I # "AsmOperand");
+ }
+
def mem_simm11 : mem_generic {
let MIOperandInfo = (ops ptr_rc, simm11);
let EncoderMethod = "getMemEncoding";
let ParserMatchClass = MipsMemSimm11AsmOperand;
}
+def mem_simm12 : mem_generic {
+ let MIOperandInfo = (ops ptr_rc, simm12);
+ let EncoderMethod = "getMemEncoding";
+ let ParserMatchClass = MipsMemSimm12AsmOperand;
+}
+
def mem_simm16 : mem_generic {
let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncoding";
@@ -735,6 +1037,9 @@ def immSExt16 : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>;
// e.g. addi, andi
def immSExt15 : PatLeaf<(imm), [{ return isInt<15>(N->getSExtValue()); }]>;
+// Node immediate fits as 7-bit zero extended on target immediate.
+def immZExt7 : PatLeaf<(imm), [{ return isUInt<7>(N->getZExtValue()); }]>;
+
// Node immediate fits as 16-bit zero extended on target immediate.
// The LO16 param means that only the lower 16 bits of the node
// immediate are caught.
@@ -755,6 +1060,16 @@ def immLow16Zero : PatLeaf<(imm), [{
// shamt field must fit in 5 bits.
def immZExt5 : ImmLeaf<i32, [{return Imm == (Imm & 0x1f);}]>;
+def immZExt5Plus1 : PatLeaf<(imm), [{
+ return isUInt<5>(N->getZExtValue() - 1);
+}]>;
+def immZExt5Plus32 : PatLeaf<(imm), [{
+ return isUInt<5>(N->getZExtValue() - 32);
+}]>;
+def immZExt5Plus33 : PatLeaf<(imm), [{
+ return isUInt<5>(N->getZExtValue() - 33);
+}]>;
+
// True if (N + 1) fits in 16-bit field.
def immSExt16Plus1 : PatLeaf<(imm), [{
return isInt<17>(N->getSExtValue()) && isInt<16>(N->getSExtValue() + 1);
@@ -768,9 +1083,6 @@ def addr :
def addrRegImm :
ComplexPattern<iPTR, 2, "selectAddrRegImm", [frameindex]>;
-def addrRegReg :
- ComplexPattern<iPTR, 2, "selectAddrRegReg", [frameindex]>;
-
def addrDefault :
ComplexPattern<iPTR, 2, "selectAddrDefault", [frameindex]>;
@@ -849,15 +1161,21 @@ class LoadUpper<string opstr, RegisterOperand RO, Operand Imm>:
}
// Memory Load/Store
-class Load<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
- InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
- InstSE<(outs RO:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class LoadMemory<string opstr, DAGOperand RO, DAGOperand MO,
+ SDPatternOperator OpNode = null_frag,
+ InstrItinClass Itin = NoItinerary,
+ ComplexPattern Addr = addr> :
+ InstSE<(outs RO:$rt), (ins MO:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(set RO:$rt, (OpNode Addr:$addr))], Itin, FrmI, opstr> {
let DecoderMethod = "DecodeMem";
let canFoldAsLoad = 1;
let mayLoad = 1;
}
+class Load<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
+ InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
+ LoadMemory<opstr, RO, mem, OpNode, Itin, Addr>;
+
class StoreMemory<string opstr, DAGOperand RO, DAGOperand MO,
SDPatternOperator OpNode = null_frag,
InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
@@ -868,8 +1186,9 @@ class StoreMemory<string opstr, DAGOperand RO, DAGOperand MO,
}
class Store<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
- InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
- StoreMemory<opstr, RO, mem, OpNode, Itin, Addr>;
+ InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr,
+ DAGOperand MO = mem> :
+ StoreMemory<opstr, RO, MO, OpNode, Itin, Addr>;
// Load/Store Left/Right
let canFoldAsLoad = 1 in
@@ -892,7 +1211,8 @@ class StoreLeftRight<string opstr, SDNode OpNode, RegisterOperand RO,
// COP2 Load/Store
class LW_FT2<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode= null_frag> :
- InstSE<(outs RC:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ InstSE<(outs RC:$rt), (ins mem_simm16:$addr),
+ !strconcat(opstr, "\t$rt, $addr"),
[(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI, opstr> {
let DecoderMethod = "DecodeFMem2";
let mayLoad = 1;
@@ -900,7 +1220,8 @@ class LW_FT2<string opstr, RegisterOperand RC, InstrItinClass Itin,
class SW_FT2<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode= null_frag> :
- InstSE<(outs), (ins RC:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ InstSE<(outs), (ins RC:$rt, mem_simm16:$addr),
+ !strconcat(opstr, "\t$rt, $addr"),
[(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI, opstr> {
let DecoderMethod = "DecodeFMem2";
let mayStore = 1;
@@ -934,6 +1255,7 @@ class CBranch<string opstr, DAGOperand opnd, PatFrag cond_op,
let isTerminator = 1;
let hasDelaySlot = DelaySlot;
let Defs = [AT];
+ bit isCTI = 1;
}
class CBranchZero<string opstr, DAGOperand opnd, PatFrag cond_op,
@@ -946,6 +1268,7 @@ class CBranchZero<string opstr, DAGOperand opnd, PatFrag cond_op,
let isTerminator = 1;
let hasDelaySlot = DelaySlot;
let Defs = [AT];
+ bit isCTI = 1;
}
// SetCC
@@ -972,6 +1295,7 @@ class JumpFJ<DAGOperand opnd, string opstr, SDPatternOperator operator,
let hasDelaySlot = 1;
let DecoderMethod = "DecodeJumpTarget";
let Defs = [AT];
+ bit isCTI = 1;
}
// Unconditional branch
@@ -984,10 +1308,11 @@ class UncondBranch<Instruction BEQInst> :
let hasDelaySlot = 1;
let AdditionalPredicates = [RelocPIC];
let Defs = [AT];
+ bit isCTI = 1;
}
// Base class for indirect branch and return instruction classes.
-let isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
+let isTerminator=1, isBarrier=1, hasDelaySlot = 1, isCTI = 1 in
class JumpFR<string opstr, RegisterOperand RO,
SDPatternOperator operator = null_frag>:
InstSE<(outs), (ins RO:$rs), "jr\t$rs", [(operator RO:$rs)], II_JR,
@@ -1000,7 +1325,7 @@ class IndirectBranch<string opstr, RegisterOperand RO> : JumpFR<opstr, RO> {
}
// Jump and Link (Call)
-let isCall=1, hasDelaySlot=1, Defs = [RA] in {
+let isCall=1, hasDelaySlot=1, isCTI=1, Defs = [RA] in {
class JumpLink<string opstr, DAGOperand opnd> :
InstSE<(outs), (ins opnd:$target), !strconcat(opstr, "\t$target"),
[(MipsJmpLink tglobaladdr:$target)], II_JAL, FrmJ, opstr> {
@@ -1026,7 +1351,7 @@ let isCall=1, hasDelaySlot=1, Defs = [RA] in {
}
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, hasDelaySlot = 1,
- hasExtraSrcRegAllocReq = 1, Defs = [AT] in {
+ hasExtraSrcRegAllocReq = 1, isCTI = 1, Defs = [AT] in {
class TailCall<Instruction JumpInst> :
PseudoSE<(outs), (ins calltarget:$target), [], II_J>,
PseudoInstExpansion<(JumpInst jmptarget:$target)>;
@@ -1045,54 +1370,61 @@ class BAL_BR_Pseudo<Instruction RealInst> :
let isBarrier = 1;
let hasDelaySlot = 1;
let Defs = [RA];
+ bit isCTI = 1;
}
+let isCTI = 1 in {
// Syscall
-class SYS_FT<string opstr> :
- InstSE<(outs), (ins uimm20:$code_),
- !strconcat(opstr, "\t$code_"), [], NoItinerary, FrmI, opstr>;
+class SYS_FT<string opstr, Operand ImmOp, InstrItinClass itin = NoItinerary> :
+ InstSE<(outs), (ins ImmOp:$code_),
+ !strconcat(opstr, "\t$code_"), [], itin, FrmI, opstr>;
// Break
class BRK_FT<string opstr> :
InstSE<(outs), (ins uimm10:$code_1, uimm10:$code_2),
- !strconcat(opstr, "\t$code_1, $code_2"), [], NoItinerary,
+ !strconcat(opstr, "\t$code_1, $code_2"), [], II_BREAK,
FrmOther, opstr>;
// (D)Eret
-class ER_FT<string opstr> :
+class ER_FT<string opstr, InstrItinClass itin = NoItinerary> :
InstSE<(outs), (ins),
- opstr, [], NoItinerary, FrmOther, opstr>;
-
-// Interrupts
-class DEI_FT<string opstr, RegisterOperand RO> :
- InstSE<(outs RO:$rt), (ins),
- !strconcat(opstr, "\t$rt"), [], NoItinerary, FrmOther, opstr>;
+ opstr, [], itin, FrmOther, opstr>;
// Wait
class WAIT_FT<string opstr> :
- InstSE<(outs), (ins), opstr, [], NoItinerary, FrmOther, opstr>;
+ InstSE<(outs), (ins), opstr, [], II_WAIT, FrmOther, opstr>;
+}
+
+// Interrupts
+class DEI_FT<string opstr, RegisterOperand RO,
+ InstrItinClass itin = NoItinerary> :
+ InstSE<(outs RO:$rt), (ins),
+ !strconcat(opstr, "\t$rt"), [], itin, FrmOther, opstr>;
// Sync
let hasSideEffects = 1 in
class SYNC_FT<string opstr> :
- InstSE<(outs), (ins i32imm:$stype), "sync $stype", [(MipsSync imm:$stype)],
- NoItinerary, FrmOther, opstr>;
+ InstSE<(outs), (ins uimm5:$stype), "sync $stype",
+ [(MipsSync immZExt5:$stype)], II_SYNC, FrmOther, opstr>;
class SYNCI_FT<string opstr> :
InstSE<(outs), (ins mem_simm16:$addr), !strconcat(opstr, "\t$addr"), [],
- NoItinerary, FrmOther, opstr> {
+ II_SYNCI, FrmOther, opstr> {
let hasSideEffects = 1;
let DecoderMethod = "DecodeSyncI";
}
-let hasSideEffects = 1 in
-class TEQ_FT<string opstr, RegisterOperand RO> :
- InstSE<(outs), (ins RO:$rs, RO:$rt, uimm16:$code_),
- !strconcat(opstr, "\t$rs, $rt, $code_"), [], NoItinerary,
- FrmI, opstr>;
-
-class TEQI_FT<string opstr, RegisterOperand RO> :
- InstSE<(outs), (ins RO:$rs, uimm16:$imm16),
- !strconcat(opstr, "\t$rs, $imm16"), [], NoItinerary, FrmOther, opstr>;
+let hasSideEffects = 1, isCTI = 1 in {
+class TEQ_FT<string opstr, RegisterOperand RO, Operand ImmOp,
+ InstrItinClass itin = NoItinerary> :
+ InstSE<(outs), (ins RO:$rs, RO:$rt, ImmOp:$code_),
+ !strconcat(opstr, "\t$rs, $rt, $code_"), [], itin, FrmI, opstr>;
+
+class TEQI_FT<string opstr, RegisterOperand RO,
+ InstrItinClass itin = NoItinerary> :
+ InstSE<(outs), (ins RO:$rs, simm16:$imm16),
+ !strconcat(opstr, "\t$rs, $imm16"), [], itin, FrmOther, opstr>;
+}
+
// Mul, Div
class Mult<string opstr, InstrItinClass itin, RegisterOperand RO,
list<Register> DefRegs> :
@@ -1163,20 +1495,23 @@ class MoveToLOHI<string opstr, RegisterOperand RO, list<Register> DefRegs>:
class EffectiveAddress<string opstr, RegisterOperand RO> :
InstSE<(outs RO:$rt), (ins mem_ea:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(set RO:$rt, addr:$addr)], NoItinerary, FrmI,
+ [(set RO:$rt, addr:$addr)], II_ADDIU, FrmI,
!strconcat(opstr, "_lea")> {
let isCodeGenOnly = 1;
+ let hasNoSchedulingInfo = 1;
let DecoderMethod = "DecodeMem";
}
// Count Leading Ones/Zeros in Word
-class CountLeading0<string opstr, RegisterOperand RO>:
+class CountLeading0<string opstr, RegisterOperand RO,
+ InstrItinClass itin = NoItinerary>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [(set RO:$rd, (ctlz RO:$rs))], II_CLZ, FrmR, opstr>;
+ [(set RO:$rd, (ctlz RO:$rs))], itin, FrmR, opstr>;
-class CountLeading1<string opstr, RegisterOperand RO>:
+class CountLeading1<string opstr, RegisterOperand RO,
+ InstrItinClass itin = NoItinerary>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [(set RO:$rd, (ctlz (not RO:$rs)))], II_CLO, FrmR, opstr>;
+ [(set RO:$rd, (ctlz (not RO:$rs)))], itin, FrmR, opstr>;
// Sign Extend in Register.
class SignExtInReg<string opstr, ValueType vt, RegisterOperand RO,
@@ -1199,15 +1534,16 @@ class ReadHardware<RegisterOperand CPURegOperand, RegisterOperand RO> :
// Ext and Ins
class ExtBase<string opstr, RegisterOperand RO, Operand PosOpnd,
- Operand SizeOpnd, SDPatternOperator Op = null_frag> :
+ Operand SizeOpnd, PatFrag PosImm, PatFrag SizeImm,
+ SDPatternOperator Op = null_frag> :
InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, SizeOpnd:$size),
!strconcat(opstr, " $rt, $rs, $pos, $size"),
- [(set RO:$rt, (Op RO:$rs, imm:$pos, imm:$size))], II_EXT,
+ [(set RO:$rt, (Op RO:$rs, PosImm:$pos, SizeImm:$size))], II_EXT,
FrmR, opstr>, ISA_MIPS32R2;
class InsBase<string opstr, RegisterOperand RO, Operand PosOpnd,
- SDPatternOperator Op = null_frag>:
- InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, size_ins:$size, RO:$src),
+ Operand SizeOpnd, SDPatternOperator Op = null_frag>:
+ InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, SizeOpnd:$size, RO:$src),
!strconcat(opstr, " $rt, $rs, $pos, $size"),
[(set RO:$rt, (Op RO:$rs, imm:$pos, imm:$size, RO:$src))],
II_INS, FrmR, opstr>, ISA_MIPS32R2 {
@@ -1224,35 +1560,38 @@ class AtomicCmpSwap<PatFrag Op, RegisterClass DRC> :
PseudoSE<(outs DRC:$dst), (ins PtrRC:$ptr, DRC:$cmp, DRC:$swap),
[(set DRC:$dst, (Op iPTR:$ptr, DRC:$cmp, DRC:$swap))]>;
-class LLBase<string opstr, RegisterOperand RO> :
- InstSE<(outs RO:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [], NoItinerary, FrmI> {
+class LLBase<string opstr, RegisterOperand RO, DAGOperand MO = mem> :
+ InstSE<(outs RO:$rt), (ins MO:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [], II_LL, FrmI, opstr> {
let DecoderMethod = "DecodeMem";
let mayLoad = 1;
}
class SCBase<string opstr, RegisterOperand RO> :
InstSE<(outs RO:$dst), (ins RO:$rt, mem:$addr),
- !strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
+ !strconcat(opstr, "\t$rt, $addr"), [], II_SC, FrmI> {
let DecoderMethod = "DecodeMem";
let mayStore = 1;
let Constraints = "$rt = $dst";
}
-class MFC3OP<string asmstr, RegisterOperand RO, RegisterOperand RD> :
- InstSE<(outs RO:$rt), (ins RD:$rd, uimm16:$sel),
- !strconcat(asmstr, "\t$rt, $rd, $sel"), [], NoItinerary, FrmFR>;
+class MFC3OP<string asmstr, RegisterOperand RO, RegisterOperand RD,
+ InstrItinClass itin> :
+ InstSE<(outs RO:$rt), (ins RD:$rd, uimm3:$sel),
+ !strconcat(asmstr, "\t$rt, $rd, $sel"), [], itin, FrmFR>;
-class MTC3OP<string asmstr, RegisterOperand RO, RegisterOperand RD> :
- InstSE<(outs RO:$rd), (ins RD:$rt, uimm16:$sel),
- !strconcat(asmstr, "\t$rt, $rd, $sel"), [], NoItinerary, FrmFR>;
+class MTC3OP<string asmstr, RegisterOperand RO, RegisterOperand RD,
+ InstrItinClass itin> :
+ InstSE<(outs RO:$rd), (ins RD:$rt, uimm3:$sel),
+ !strconcat(asmstr, "\t$rt, $rd, $sel"), [], itin, FrmFR>;
class TrapBase<Instruction RealInst>
- : PseudoSE<(outs), (ins), [(trap)], NoItinerary>,
+ : PseudoSE<(outs), (ins), [(trap)], II_TRAP>,
PseudoInstExpansion<(RealInst 0, 0)> {
let isBarrier = 1;
let isTerminator = 1;
let isCodeGenOnly = 1;
+ let isCTI = 1;
}
//===----------------------------------------------------------------------===//
@@ -1260,11 +1599,13 @@ class TrapBase<Instruction RealInst>
//===----------------------------------------------------------------------===//
// Return RA.
-let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, hasCtrlDep=1 in
-def RetRA : PseudoSE<(outs), (ins), [(MipsRet)]>;
+let isReturn=1, isTerminator=1, isBarrier=1, hasCtrlDep=1, isCTI=1 in {
+ let hasDelaySlot=1 in
+ def RetRA : PseudoSE<(outs), (ins), [(MipsRet)]>;
-let isReturn=1, isTerminator=1, isBarrier=1, hasCtrlDep=1, hasSideEffects=1 in
-def ERet : PseudoSE<(outs), (ins), [(MipsERet)]>;
+ let hasSideEffects=1 in
+ def ERet : PseudoSE<(outs), (ins), [(MipsERet)]>;
+}
let Defs = [SP], Uses = [SP], hasSideEffects = 1 in {
def ADJCALLSTACKDOWN : MipsPseudo<(outs), (ins i32imm:$amt),
@@ -1303,7 +1644,7 @@ let usesCustomInserter = 1 in {
}
/// Pseudo instructions for loading and storing accumulator registers.
-let isPseudo = 1, isCodeGenOnly = 1 in {
+let isPseudo = 1, isCodeGenOnly = 1, hasNoSchedulingInfo = 1 in {
def LOAD_ACC64 : Load<"", ACC64>;
def STORE_ACC64 : Store<"", ACC64>;
}
@@ -1329,40 +1670,39 @@ def LONG_BRANCH_ADDiu : PseudoSE<(outs GPR32Opnd:$dst),
/// Arithmetic Instructions (ALU Immediate)
let AdditionalPredicates = [NotInMicroMips] in {
-def ADDiu : MMRel, StdMMR6Rel, ArithLogicI<"addiu", simm16, GPR32Opnd,
- II_ADDIU, immSExt16, add>,
- ADDI_FM<0x9>, IsAsCheapAsAMove;
-}
-def ADDi : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>, ADDI_FM<0x8>,
+ def ADDiu : MMRel, StdMMR6Rel, ArithLogicI<"addiu", simm16_relaxed, GPR32Opnd,
+ II_ADDIU, immSExt16, add>,
+ ADDI_FM<0x9>, IsAsCheapAsAMove;
+
+ def ANDi : MMRel, StdMMR6Rel,
+ ArithLogicI<"andi", uimm16, GPR32Opnd, II_ANDI, immZExt16, and>,
+ ADDI_FM<0xc>;
+ def ORi : MMRel, StdMMR6Rel,
+ ArithLogicI<"ori", uimm16, GPR32Opnd, II_ORI, immZExt16, or>,
+ ADDI_FM<0xd>;
+ def XORi : MMRel, StdMMR6Rel,
+ ArithLogicI<"xori", uimm16, GPR32Opnd, II_XORI, immZExt16, xor>,
+ ADDI_FM<0xe>;
+}
+def ADDi : MMRel, ArithLogicI<"addi", simm16_relaxed, GPR32Opnd, II_ADDI>, ADDI_FM<0x8>,
ISA_MIPS1_NOT_32R6_64R6;
def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, GPR32Opnd>,
SLTI_FM<0xa>;
def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, GPR32Opnd>,
SLTI_FM<0xb>;
-let AdditionalPredicates = [NotInMicroMips] in {
-def ANDi : MMRel, StdMMR6Rel,
- ArithLogicI<"andi", uimm16, GPR32Opnd, II_ANDI, immZExt16, and>,
- ADDI_FM<0xc>;
-}
-def ORi : MMRel, StdMMR6Rel,
- ArithLogicI<"ori", uimm16, GPR32Opnd, II_ORI, immZExt16, or>,
- ADDI_FM<0xd>;
-def XORi : MMRel, StdMMR6Rel,
- ArithLogicI<"xori", uimm16, GPR32Opnd, II_XORI, immZExt16, xor>,
- ADDI_FM<0xe>;
-def LUi : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM;
+def LUi : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16_relaxed>, LUI_FM;
let AdditionalPredicates = [NotInMicroMips] in {
/// Arithmetic Instructions (3-Operand, R-Type)
def ADDu : MMRel, StdMMR6Rel, ArithLogicR<"addu", GPR32Opnd, 1, II_ADDU, add>,
ADD_FM<0, 0x21>;
-def SUBu : MMRel, ArithLogicR<"subu", GPR32Opnd, 0, II_SUBU, sub>,
+def SUBu : MMRel, StdMMR6Rel, ArithLogicR<"subu", GPR32Opnd, 0, II_SUBU, sub>,
ADD_FM<0, 0x23>;
}
let Defs = [HI0, LO0] in
def MUL : MMRel, ArithLogicR<"mul", GPR32Opnd, 1, II_MUL, mul>,
ADD_FM<0x1c, 2>, ISA_MIPS32_NOT_32R6_64R6;
-def ADD : MMRel, StdMMR6Rel, ArithLogicR<"add", GPR32Opnd>, ADD_FM<0, 0x20>;
-def SUB : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM<0, 0x22>;
+def ADD : MMRel, StdMMR6Rel, ArithLogicR<"add", GPR32Opnd, 1, II_ADD>, ADD_FM<0, 0x20>;
+def SUB : MMRel, StdMMR6Rel, ArithLogicR<"sub", GPR32Opnd, 0, II_SUB>, ADD_FM<0, 0x22>;
def SLT : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM<0, 0x2a>;
def SLTu : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>, ADD_FM<0, 0x2b>;
let AdditionalPredicates = [NotInMicroMips] in {
@@ -1372,8 +1712,8 @@ def OR : MMRel, StdMMR6Rel, ArithLogicR<"or", GPR32Opnd, 1, II_OR, or>,
ADD_FM<0, 0x25>;
def XOR : MMRel, StdMMR6Rel, ArithLogicR<"xor", GPR32Opnd, 1, II_XOR, xor>,
ADD_FM<0, 0x26>;
-}
def NOR : MMRel, StdMMR6Rel, LogicNOR<"nor", GPR32Opnd>, ADD_FM<0, 0x27>;
+}
/// Shift Instructions
let AdditionalPredicates = [NotInMicroMips] in {
@@ -1381,7 +1721,6 @@ def SLL : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd, II_SLL, shl,
immZExt5>, SRA_FM<0, 0>;
def SRL : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd, II_SRL, srl,
immZExt5>, SRA_FM<2, 0>;
-}
def SRA : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd, II_SRA, sra,
immZExt5>, SRA_FM<3, 0>;
def SLLV : MMRel, shift_rotate_reg<"sllv", GPR32Opnd, II_SLLV, shl>,
@@ -1390,25 +1729,30 @@ def SRLV : MMRel, shift_rotate_reg<"srlv", GPR32Opnd, II_SRLV, srl>,
SRLV_FM<6, 0>;
def SRAV : MMRel, shift_rotate_reg<"srav", GPR32Opnd, II_SRAV, sra>,
SRLV_FM<7, 0>;
+}
// Rotate Instructions
-def ROTR : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd, II_ROTR, rotr,
- immZExt5>,
- SRA_FM<2, 1>, ISA_MIPS32R2;
-def ROTRV : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, II_ROTRV, rotr>,
- SRLV_FM<6, 1>, ISA_MIPS32R2;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def ROTR : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd, II_ROTR, rotr,
+ immZExt5>,
+ SRA_FM<2, 1>, ISA_MIPS32R2;
+ def ROTRV : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, II_ROTRV, rotr>,
+ SRLV_FM<6, 1>, ISA_MIPS32R2;
+}
/// Load and Store Instructions
/// aligned
-def LB : Load<"lb", GPR32Opnd, sextloadi8, II_LB>, MMRel, LW_FM<0x20>;
-def LBu : Load<"lbu", GPR32Opnd, zextloadi8, II_LBU, addrDefault>, MMRel,
- LW_FM<0x24>;
-def LH : Load<"lh", GPR32Opnd, sextloadi16, II_LH, addrDefault>, MMRel,
- LW_FM<0x21>;
-def LHu : Load<"lhu", GPR32Opnd, zextloadi16, II_LHU>, MMRel, LW_FM<0x25>;
+def LB : LoadMemory<"lb", GPR32Opnd, mem_simm16, sextloadi8, II_LB>, MMRel,
+ LW_FM<0x20>;
+def LBu : LoadMemory<"lbu", GPR32Opnd, mem_simm16, zextloadi8, II_LBU,
+ addrDefault>, MMRel, LW_FM<0x24>;
let AdditionalPredicates = [NotInMicroMips] in {
-def LW : StdMMR6Rel, Load<"lw", GPR32Opnd, load, II_LW, addrDefault>, MMRel,
- LW_FM<0x23>;
+ def LH : LoadMemory<"lh", GPR32Opnd, mem_simm16, sextloadi16, II_LH,
+ addrDefault>, MMRel, LW_FM<0x21>;
+ def LHu : LoadMemory<"lhu", GPR32Opnd, mem_simm16, zextloadi16, II_LHU>,
+ MMRel, LW_FM<0x25>;
+ def LW : StdMMR6Rel, Load<"lw", GPR32Opnd, load, II_LW, addrDefault>, MMRel,
+ LW_FM<0x23>;
}
def SB : StdMMR6Rel, Store<"sb", GPR32Opnd, truncstorei8, II_SB>, MMRel,
LW_FM<0x28>;
@@ -1432,82 +1776,87 @@ def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd, II_SWR>, LW_FM<0x2e>,
let AdditionalPredicates = [NotInMicroMips] in {
// COP2 Memory Instructions
-def LWC2 : LW_FT2<"lwc2", COP2Opnd, NoItinerary, load>, LW_FM<0x32>,
+def LWC2 : StdMMR6Rel, LW_FT2<"lwc2", COP2Opnd, II_LWC2, load>, LW_FM<0x32>,
ISA_MIPS1_NOT_32R6_64R6;
-def SWC2 : SW_FT2<"swc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3a>,
- ISA_MIPS1_NOT_32R6_64R6;
-def LDC2 : LW_FT2<"ldc2", COP2Opnd, NoItinerary, load>, LW_FM<0x36>,
- ISA_MIPS2_NOT_32R6_64R6;
-def SDC2 : SW_FT2<"sdc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3e>,
+def SWC2 : StdMMR6Rel, SW_FT2<"swc2", COP2Opnd, II_SWC2, store>,
+ LW_FM<0x3a>, ISA_MIPS1_NOT_32R6_64R6;
+def LDC2 : StdMMR6Rel, LW_FT2<"ldc2", COP2Opnd, II_LDC2, load>, LW_FM<0x36>,
ISA_MIPS2_NOT_32R6_64R6;
+def SDC2 : StdMMR6Rel, SW_FT2<"sdc2", COP2Opnd, II_SDC2, store>,
+ LW_FM<0x3e>, ISA_MIPS2_NOT_32R6_64R6;
// COP3 Memory Instructions
let DecoderNamespace = "COP3_" in {
- def LWC3 : LW_FT3<"lwc3", COP3Opnd, NoItinerary, load>, LW_FM<0x33>;
- def SWC3 : SW_FT3<"swc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3b>;
- def LDC3 : LW_FT3<"ldc3", COP3Opnd, NoItinerary, load>, LW_FM<0x37>,
+ def LWC3 : LW_FT3<"lwc3", COP3Opnd, II_LWC3, load>, LW_FM<0x33>;
+ def SWC3 : SW_FT3<"swc3", COP3Opnd, II_SWC3, store>, LW_FM<0x3b>;
+ def LDC3 : LW_FT3<"ldc3", COP3Opnd, II_LDC3, load>, LW_FM<0x37>,
ISA_MIPS2;
- def SDC3 : SW_FT3<"sdc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3f>,
+ def SDC3 : SW_FT3<"sdc3", COP3Opnd, II_SDC3, store>, LW_FM<0x3f>,
ISA_MIPS2;
}
}
-def SYNC : MMRel, StdMMR6Rel, SYNC_FT<"sync">, SYNC_FM, ISA_MIPS32;
+def SYNC : MMRel, StdMMR6Rel, SYNC_FT<"sync">, SYNC_FM,
+ ISA_MIPS32;
def SYNCI : MMRel, StdMMR6Rel, SYNCI_FT<"synci">, SYNCI_FM, ISA_MIPS32R2;
let AdditionalPredicates = [NotInMicroMips] in {
- def TEQ : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM<0x34>, ISA_MIPS2;
- def TGE : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM<0x30>, ISA_MIPS2;
- def TGEU : MMRel, TEQ_FT<"tgeu", GPR32Opnd>, TEQ_FM<0x31>, ISA_MIPS2;
- def TLT : MMRel, TEQ_FT<"tlt", GPR32Opnd>, TEQ_FM<0x32>, ISA_MIPS2;
- def TLTU : MMRel, TEQ_FT<"tltu", GPR32Opnd>, TEQ_FM<0x33>, ISA_MIPS2;
- def TNE : MMRel, TEQ_FT<"tne", GPR32Opnd>, TEQ_FM<0x36>, ISA_MIPS2;
+ def TEQ : MMRel, TEQ_FT<"teq", GPR32Opnd, uimm10, II_TEQ>, TEQ_FM<0x34>, ISA_MIPS2;
+ def TGE : MMRel, TEQ_FT<"tge", GPR32Opnd, uimm10, II_TGE>, TEQ_FM<0x30>, ISA_MIPS2;
+ def TGEU : MMRel, TEQ_FT<"tgeu", GPR32Opnd, uimm10, II_TGEU>, TEQ_FM<0x31>, ISA_MIPS2;
+ def TLT : MMRel, TEQ_FT<"tlt", GPR32Opnd, uimm10, II_TLT>, TEQ_FM<0x32>, ISA_MIPS2;
+ def TLTU : MMRel, TEQ_FT<"tltu", GPR32Opnd, uimm10, II_TLTU>, TEQ_FM<0x33>, ISA_MIPS2;
+ def TNE : MMRel, TEQ_FT<"tne", GPR32Opnd, uimm10, II_TNE>, TEQ_FM<0x36>, ISA_MIPS2;
}
-def TEQI : MMRel, TEQI_FT<"teqi", GPR32Opnd>, TEQI_FM<0xc>,
+def TEQI : MMRel, TEQI_FT<"teqi", GPR32Opnd, II_TEQI>, TEQI_FM<0xc>,
ISA_MIPS2_NOT_32R6_64R6;
-def TGEI : MMRel, TEQI_FT<"tgei", GPR32Opnd>, TEQI_FM<0x8>,
+def TGEI : MMRel, TEQI_FT<"tgei", GPR32Opnd, II_TGEI>, TEQI_FM<0x8>,
ISA_MIPS2_NOT_32R6_64R6;
-def TGEIU : MMRel, TEQI_FT<"tgeiu", GPR32Opnd>, TEQI_FM<0x9>,
+def TGEIU : MMRel, TEQI_FT<"tgeiu", GPR32Opnd, II_TGEIU>, TEQI_FM<0x9>,
ISA_MIPS2_NOT_32R6_64R6;
-def TLTI : MMRel, TEQI_FT<"tlti", GPR32Opnd>, TEQI_FM<0xa>,
+def TLTI : MMRel, TEQI_FT<"tlti", GPR32Opnd, II_TLTI>, TEQI_FM<0xa>,
ISA_MIPS2_NOT_32R6_64R6;
-def TTLTIU : MMRel, TEQI_FT<"tltiu", GPR32Opnd>, TEQI_FM<0xb>,
+def TTLTIU : MMRel, TEQI_FT<"tltiu", GPR32Opnd, II_TTLTIU>, TEQI_FM<0xb>,
ISA_MIPS2_NOT_32R6_64R6;
-def TNEI : MMRel, TEQI_FT<"tnei", GPR32Opnd>, TEQI_FM<0xe>,
+def TNEI : MMRel, TEQI_FT<"tnei", GPR32Opnd, II_TNEI>, TEQI_FM<0xe>,
ISA_MIPS2_NOT_32R6_64R6;
let AdditionalPredicates = [NotInMicroMips] in {
def BREAK : MMRel, StdMMR6Rel, BRK_FT<"break">, BRK_FM<0xd>;
+def SYSCALL : MMRel, SYS_FT<"syscall", uimm20, II_SYSCALL>, SYS_FM<0xc>;
}
-def SYSCALL : MMRel, SYS_FT<"syscall">, SYS_FM<0xc>;
def TRAP : TrapBase<BREAK>;
-def SDBBP : MMRel, SYS_FT<"sdbbp">, SDBBP_FM, ISA_MIPS32_NOT_32R6_64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
+def SDBBP : MMRel, SYS_FT<"sdbbp", uimm20, II_SDBBP>, SDBBP_FM, ISA_MIPS32_NOT_32R6_64R6;
+}
let AdditionalPredicates = [NotInMicroMips] in {
- def ERET : MMRel, ER_FT<"eret">, ER_FM<0x18, 0x0>, INSN_MIPS3_32;
- def ERETNC : MMRel, ER_FT<"eretnc">, ER_FM<0x18, 0x1>, ISA_MIPS32R5;
- def DERET : MMRel, ER_FT<"deret">, ER_FM<0x1f, 0x0>, ISA_MIPS32;
+ def ERET : MMRel, ER_FT<"eret", II_ERET>, ER_FM<0x18, 0x0>, INSN_MIPS3_32;
+ def ERETNC : MMRel, ER_FT<"eretnc", II_ERETNC>, ER_FM<0x18, 0x1>, ISA_MIPS32R5;
+ def DERET : MMRel, ER_FT<"deret", II_DERET>, ER_FM<0x1f, 0x0>, ISA_MIPS32;
}
let AdditionalPredicates = [NotInMicroMips] in {
- def EI : MMRel, StdMMR6Rel, DEI_FT<"ei", GPR32Opnd>, EI_FM<1>, ISA_MIPS32R2;
- def DI : MMRel, StdMMR6Rel, DEI_FT<"di", GPR32Opnd>, EI_FM<0>, ISA_MIPS32R2;
+ def EI : MMRel, StdMMR6Rel, DEI_FT<"ei", GPR32Opnd, II_EI>, EI_FM<1>, ISA_MIPS32R2;
+ def DI : MMRel, StdMMR6Rel, DEI_FT<"di", GPR32Opnd, II_DI>, EI_FM<0>, ISA_MIPS32R2;
}
let EncodingPredicates = []<Predicate>, // FIXME: Lack of HasStdEnc is probably a bug
AdditionalPredicates = [NotInMicroMips] in {
def WAIT : WAIT_FT<"wait">, WAIT_FM;
+}
+let AdditionalPredicates = [NotInMicroMips] in {
/// Load-linked, Store-conditional
-def LL : LLBase<"ll", GPR32Opnd>, LW_FM<0x30>, ISA_MIPS2_NOT_32R6_64R6;
-def SC : SCBase<"sc", GPR32Opnd>, LW_FM<0x38>, ISA_MIPS2_NOT_32R6_64R6;
+def LL : LLBase<"ll", GPR32Opnd>, LW_FM<0x30>, PTR_32, ISA_MIPS2_NOT_32R6_64R6;
+def SC : SCBase<"sc", GPR32Opnd>, LW_FM<0x38>, PTR_32, ISA_MIPS2_NOT_32R6_64R6;
}
/// Jump and Branch Instructions
def J : MMRel, JumpFJ<jmptarget, "j", br, bb, "j">, FJ<2>,
- AdditionalRequires<[RelocStatic]>, IsBranch;
-def JR : MMRel, IndirectBranch<"jr", GPR32Opnd>, MTLO_FM<8>;
+ AdditionalRequires<[RelocNotPIC]>, IsBranch;
+def JR : MMRel, IndirectBranch<"jr", GPR32Opnd>, MTLO_FM<8>, ISA_MIPS1_NOT_32R6_64R6;
def BEQ : MMRel, CBranch<"beq", brtarget, seteq, GPR32Opnd>, BEQ_FM<4>;
def BEQL : MMRel, CBranch<"beql", brtarget, seteq, GPR32Opnd, 0>,
BEQ_FM<20>, ISA_MIPS2_NOT_32R6_64R6;
@@ -1562,6 +1911,7 @@ class PseudoIndirectBranchBase<RegisterOperand RO> :
let hasDelaySlot = 1;
let isBranch = 1;
let isIndirectBranch = 1;
+ bit isCTI = 1;
}
def PseudoIndirectBranch : PseudoIndirectBranchBase<GPR32Opnd>;
@@ -1579,6 +1929,7 @@ class PseudoReturnBase<RegisterOperand RO> : MipsPseudo<(outs), (ins RO:$rs),
let isCodeGenOnly = 1;
let hasCtrlDep = 1;
let hasExtraSrcRegAllocReq = 1;
+ bit isCTI = 1;
}
def PseudoReturn : PseudoReturnBase<GPR32Opnd>;
@@ -1596,7 +1947,7 @@ def SDT_MipsEHRET : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisPtrTy<1>]>;
def MIPSehret : SDNode<"MipsISD::EH_RETURN", SDT_MipsEHRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
-let Uses = [V0, V1], isTerminator = 1, isReturn = 1, isBarrier = 1 in {
+let Uses = [V0, V1], isTerminator = 1, isReturn = 1, isBarrier = 1, isCTI = 1 in {
def MIPSeh_return32 : MipsPseudo<(outs), (ins GPR32:$spoff, GPR32:$dst),
[(MIPSehret GPR32:$spoff, GPR32:$dst)]>;
def MIPSeh_return64 : MipsPseudo<(outs), (ins GPR64:$spoff,
@@ -1609,11 +1960,12 @@ def MULT : MMRel, Mult<"mult", II_MULT, GPR32Opnd, [HI0, LO0]>,
MULT_FM<0, 0x18>, ISA_MIPS1_NOT_32R6_64R6;
def MULTu : MMRel, Mult<"multu", II_MULTU, GPR32Opnd, [HI0, LO0]>,
MULT_FM<0, 0x19>, ISA_MIPS1_NOT_32R6_64R6;
-def SDIV : MMRel, Div<"div", II_DIV, GPR32Opnd, [HI0, LO0]>,
- MULT_FM<0, 0x1a>, ISA_MIPS1_NOT_32R6_64R6;
-def UDIV : MMRel, Div<"divu", II_DIVU, GPR32Opnd, [HI0, LO0]>,
- MULT_FM<0, 0x1b>, ISA_MIPS1_NOT_32R6_64R6;
-
+let AdditionalPredicates = [NotInMicroMips] in {
+ def SDIV : MMRel, Div<"div", II_DIV, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x1a>, ISA_MIPS1_NOT_32R6_64R6;
+ def UDIV : MMRel, Div<"divu", II_DIVU, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x1b>, ISA_MIPS1_NOT_32R6_64R6;
+}
def MTHI : MMRel, MoveToLOHI<"mthi", GPR32Opnd, [HI0]>, MTLO_FM<0x11>,
ISA_MIPS1_NOT_32R6_64R6;
def MTLO : MMRel, MoveToLOHI<"mtlo", GPR32Opnd, [LO0]>, MTLO_FM<0x13>,
@@ -1633,9 +1985,9 @@ def SEH : MMRel, StdMMR6Rel, SignExtInReg<"seh", i16, GPR32Opnd, II_SEH>,
SEB_FM<0x18, 0x20>, ISA_MIPS32R2;
/// Count Leading
-def CLZ : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM<0x20>,
+def CLZ : MMRel, CountLeading0<"clz", GPR32Opnd, II_CLZ>, CLO_FM<0x20>,
ISA_MIPS32_NOT_32R6_64R6;
-def CLO : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM<0x21>,
+def CLO : MMRel, CountLeading1<"clo", GPR32Opnd, II_CLO>, CLO_FM<0x21>,
ISA_MIPS32_NOT_32R6_64R6;
let AdditionalPredicates = [NotInMicroMips] in {
@@ -1681,29 +2033,39 @@ def PseudoMSUBU : MAddSubPseudo<MSUBU, MipsMSubu, II_MSUBU>,
ISA_MIPS32_NOT_32R6_64R6;
}
-def PseudoSDIV : MultDivPseudo<SDIV, ACC64, GPR32Opnd, MipsDivRem, II_DIV,
- 0, 1, 1>, ISA_MIPS1_NOT_32R6_64R6;
-def PseudoUDIV : MultDivPseudo<UDIV, ACC64, GPR32Opnd, MipsDivRemU, II_DIVU,
- 0, 1, 1>, ISA_MIPS1_NOT_32R6_64R6;
let AdditionalPredicates = [NotInMicroMips] in {
-def RDHWR : MMRel, ReadHardware<GPR32Opnd, HWRegsOpnd>, RDHWR_FM;
+ def PseudoSDIV : MultDivPseudo<SDIV, ACC64, GPR32Opnd, MipsDivRem, II_DIV,
+ 0, 1, 1>, ISA_MIPS1_NOT_32R6_64R6;
+ def PseudoUDIV : MultDivPseudo<UDIV, ACC64, GPR32Opnd, MipsDivRemU, II_DIVU,
+ 0, 1, 1>, ISA_MIPS1_NOT_32R6_64R6;
+ def RDHWR : MMRel, ReadHardware<GPR32Opnd, HWRegsOpnd>, RDHWR_FM;
+ // TODO: Add '0 < pos+size <= 32' constraint check to ext instruction
+ def EXT : MMRel, StdMMR6Rel, ExtBase<"ext", GPR32Opnd, uimm5, uimm5_plus1,
+ immZExt5, immZExt5Plus1, MipsExt>,
+ EXT_FM<0>;
+ def INS : MMRel, StdMMR6Rel, InsBase<"ins", GPR32Opnd, uimm5,
+ uimm5_inssize_plus1, MipsIns>,
+ EXT_FM<4>;
}
-// TODO: Add '0 < pos+size <= 32' constraint check to ext instruction
-def EXT : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, uimm5_plus1, MipsExt>,
- EXT_FM<0>;
-def INS : MMRel, InsBase<"ins", GPR32Opnd, uimm5, MipsIns>, EXT_FM<4>;
-
/// Move Control Registers From/To CPU Registers
-def MFC0 : MFC3OP<"mfc0", GPR32Opnd, COP0Opnd>, MFC3OP_FM<0x10, 0>, ISA_MIPS32;
-def MTC0 : MTC3OP<"mtc0", COP0Opnd, GPR32Opnd>, MFC3OP_FM<0x10, 4>, ISA_MIPS32;
-def MFC2 : MFC3OP<"mfc2", GPR32Opnd, COP2Opnd>, MFC3OP_FM<0x12, 0>;
-def MTC2 : MTC3OP<"mtc2", COP2Opnd, GPR32Opnd>, MFC3OP_FM<0x12, 4>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def MTC0 : MTC3OP<"mtc0", COP0Opnd, GPR32Opnd, II_MTC0>, MFC3OP_FM<0x10, 4>,
+ ISA_MIPS32;
+ def MFC0 : MFC3OP<"mfc0", GPR32Opnd, COP0Opnd, II_MFC0>, MFC3OP_FM<0x10, 0>,
+ ISA_MIPS32;
+}
+def MFC2 : MFC3OP<"mfc2", GPR32Opnd, COP2Opnd, II_MFC2>, MFC3OP_FM<0x12, 0>;
+def MTC2 : MTC3OP<"mtc2", COP2Opnd, GPR32Opnd, II_MTC2>, MFC3OP_FM<0x12, 4>;
+
+class Barrier<string asmstr, InstrItinClass itin = NoItinerary> :
+ InstSE<(outs), (ins), asmstr, [], itin, FrmOther, asmstr>;
-class Barrier<string asmstr> : InstSE<(outs), (ins), asmstr, [], NoItinerary,
- FrmOther, asmstr>;
-def SSNOP : MMRel, StdMMR6Rel, Barrier<"ssnop">, BARRIER_FM<1>;
-def EHB : MMRel, Barrier<"ehb">, BARRIER_FM<3>;
-def PAUSE : MMRel, StdMMR6Rel, Barrier<"pause">, BARRIER_FM<5>, ISA_MIPS32R2;
+def SSNOP : MMRel, StdMMR6Rel, Barrier<"ssnop", II_SSNOP>, BARRIER_FM<1>;
+def EHB : MMRel, Barrier<"ehb", II_EHB>, BARRIER_FM<3>;
+
+let isCTI = 1 in
+def PAUSE : MMRel, StdMMR6Rel, Barrier<"pause", II_PAUSE>, BARRIER_FM<5>,
+ ISA_MIPS32R2;
// JR_HB and JALR_HB are defined here using the new style naming
// scheme because some of this code is shared with Mips32r6InstrInfo.td
@@ -1724,19 +2086,21 @@ class JALR_HB_DESC_BASE<string instr_asm, RegisterOperand GPROpnd> {
list<dag> Pattern = [];
}
-class JR_HB_DESC : InstSE<(outs), (ins), "", [], NoItinerary, FrmJ>,
+class JR_HB_DESC : InstSE<(outs), (ins), "", [], II_JR_HB, FrmJ>,
JR_HB_DESC_BASE<"jr.hb", GPR32Opnd> {
let isBranch=1;
let isIndirectBranch=1;
let hasDelaySlot=1;
let isTerminator=1;
let isBarrier=1;
+ bit isCTI = 1;
}
-class JALR_HB_DESC : InstSE<(outs), (ins), "", [], NoItinerary, FrmJ>,
+class JALR_HB_DESC : InstSE<(outs), (ins), "", [], II_JALR_HB, FrmJ>,
JALR_HB_DESC_BASE<"jalr.hb", GPR32Opnd> {
let isIndirectBranch=1;
let hasDelaySlot=1;
+ bit isCTI = 1;
}
class JR_HB_ENC : JR_HB_FM<8>;
@@ -1745,23 +2109,25 @@ class JALR_HB_ENC : JALR_HB_FM<9>;
def JR_HB : JR_HB_DESC, JR_HB_ENC, ISA_MIPS32_NOT_32R6_64R6;
def JALR_HB : JALR_HB_DESC, JALR_HB_ENC, ISA_MIPS32;
-class TLB<string asmstr> : InstSE<(outs), (ins), asmstr, [], NoItinerary,
- FrmOther, asmstr>;
-def TLBP : MMRel, TLB<"tlbp">, COP0_TLB_FM<0x08>;
-def TLBR : MMRel, TLB<"tlbr">, COP0_TLB_FM<0x01>;
-def TLBWI : MMRel, TLB<"tlbwi">, COP0_TLB_FM<0x02>;
-def TLBWR : MMRel, TLB<"tlbwr">, COP0_TLB_FM<0x06>;
-
-class CacheOp<string instr_asm, Operand MemOpnd> :
+class TLB<string asmstr, InstrItinClass itin = NoItinerary> :
+ InstSE<(outs), (ins), asmstr, [], itin, FrmOther, asmstr>;
+let AdditionalPredicates = [NotInMicroMips] in {
+def TLBP : MMRel, TLB<"tlbp", II_TLBP>, COP0_TLB_FM<0x08>;
+def TLBR : MMRel, TLB<"tlbr", II_TLBR>, COP0_TLB_FM<0x01>;
+def TLBWI : MMRel, TLB<"tlbwi", II_TLBWI>, COP0_TLB_FM<0x02>;
+def TLBWR : MMRel, TLB<"tlbwr", II_TLBWR>, COP0_TLB_FM<0x06>;
+}
+class CacheOp<string instr_asm, Operand MemOpnd,
+ InstrItinClass itin = NoItinerary> :
InstSE<(outs), (ins MemOpnd:$addr, uimm5:$hint),
- !strconcat(instr_asm, "\t$hint, $addr"), [], NoItinerary, FrmOther,
+ !strconcat(instr_asm, "\t$hint, $addr"), [], itin, FrmOther,
instr_asm> {
let DecoderMethod = "DecodeCacheOp";
}
-def CACHE : MMRel, CacheOp<"cache", mem>, CACHEOP_FM<0b101111>,
+def CACHE : MMRel, CacheOp<"cache", mem, II_CACHE>, CACHEOP_FM<0b101111>,
INSN_MIPS3_32_NOT_32R6_64R6;
-def PREF : MMRel, CacheOp<"pref", mem>, CACHEOP_FM<0b110011>,
+def PREF : MMRel, CacheOp<"pref", mem, II_PREF>, CACHEOP_FM<0b110011>,
INSN_MIPS3_32_NOT_32R6_64R6;
def ROL : MipsAsmPseudoInst<(outs),
@@ -1808,6 +2174,9 @@ def : MipsInstAlias<"dror $rd, $rs",
def : MipsInstAlias<"dror $rd, $imm",
(DRORImm GPR32Opnd:$rd, GPR32Opnd:$rd, simm16:$imm), 0>, ISA_MIPS64;
+def ABSMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins GPR32Opnd:$rs),
+ "abs\t$rd, $rs">;
+
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
@@ -1823,47 +2192,66 @@ def : MipsInstAlias<"move $dst, $src",
}
def : MipsInstAlias<"bal $offset", (BGEZAL ZERO, brtarget:$offset), 0>,
ISA_MIPS1_NOT_32R6_64R6;
-def : MipsInstAlias<"addu $rs, $rt, $imm",
- (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
-def : MipsInstAlias<"addu $rs, $imm",
- (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rs, simm16:$imm), 0>;
-def : MipsInstAlias<"add $rs, $rt, $imm",
- (ADDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>,
- ISA_MIPS1_NOT_32R6_64R6;
-def : MipsInstAlias<"add $rs, $imm",
- (ADDi GPR32Opnd:$rs, GPR32Opnd:$rs, simm16:$imm), 0>,
- ISA_MIPS1_NOT_32R6_64R6;
-def : MipsInstAlias<"and $rs, $rt, $imm",
- (ANDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
-def : MipsInstAlias<"and $rs, $imm",
- (ANDi GPR32Opnd:$rs, GPR32Opnd:$rs, simm16:$imm), 0>;
+def : MipsInstAlias<
+ "addu $rs, $rt, $imm",
+ (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rt, simm32_relaxed:$imm), 0>;
+def : MipsInstAlias<
+ "addu $rs, $imm",
+ (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rs, simm32_relaxed:$imm), 0>;
+def : MipsInstAlias<
+ "add $rs, $rt, $imm",
+ (ADDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm32_relaxed:$imm), 0>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def : MipsInstAlias<
+ "add $rs, $imm",
+ (ADDi GPR32Opnd:$rs, GPR32Opnd:$rs, simm32_relaxed:$imm), 0>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def : MipsInstAlias<
+ "and $rs, $rt, $imm",
+ (ANDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm32_relaxed:$imm), 0>;
+def : MipsInstAlias<
+ "and $rs, $imm",
+ (ANDi GPR32Opnd:$rs, GPR32Opnd:$rs, simm32_relaxed:$imm), 0>;
def : MipsInstAlias<"j $rs", (JR GPR32Opnd:$rs), 0>;
let Predicates = [NotInMicroMips] in {
def : MipsInstAlias<"jalr $rs", (JALR RA, GPR32Opnd:$rs), 0>;
}
def : MipsInstAlias<"jalr.hb $rs", (JALR_HB RA, GPR32Opnd:$rs), 1>, ISA_MIPS32;
-def : MipsInstAlias<"not $rt, $rs",
- (NOR GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>;
def : MipsInstAlias<"neg $rt, $rs",
(SUB GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
def : MipsInstAlias<"negu $rt",
(SUBu GPR32Opnd:$rt, ZERO, GPR32Opnd:$rt), 0>;
def : MipsInstAlias<"negu $rt, $rs",
(SUBu GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
-def : MipsInstAlias<"slt $rs, $rt, $imm",
- (SLTi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
-def : MipsInstAlias<"sltu $rt, $rs, $imm",
- (SLTiu GPR32Opnd:$rt, GPR32Opnd:$rs, simm16:$imm), 0>;
-def : MipsInstAlias<"xor $rs, $rt, $imm",
- (XORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
-def : MipsInstAlias<"xor $rs, $imm",
- (XORi GPR32Opnd:$rs, GPR32Opnd:$rs, uimm16:$imm), 0>;
-def : MipsInstAlias<"or $rs, $rt, $imm",
- (ORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
-def : MipsInstAlias<"or $rs, $imm",
- (ORi GPR32Opnd:$rs, GPR32Opnd:$rs, uimm16:$imm), 0>;
+def : MipsInstAlias<
+ "slt $rs, $rt, $imm",
+ (SLTi GPR32Opnd:$rs, GPR32Opnd:$rt, simm32_relaxed:$imm), 0>;
+def : MipsInstAlias<
+ "sltu $rt, $rs, $imm",
+ (SLTiu GPR32Opnd:$rt, GPR32Opnd:$rs, simm32_relaxed:$imm), 0>;
let AdditionalPredicates = [NotInMicroMips] in {
-def : MipsInstAlias<"nop", (SLL ZERO, ZERO, 0), 1>;
+ def : MipsInstAlias<
+ "and $rs, $rt, $imm",
+ (ANDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm32_relaxed:$imm), 0>;
+ def : MipsInstAlias<
+ "and $rs, $imm",
+ (ANDi GPR32Opnd:$rs, GPR32Opnd:$rs, simm32_relaxed:$imm), 0>;
+ def : MipsInstAlias<
+ "xor $rs, $rt, $imm",
+ (XORi GPR32Opnd:$rs, GPR32Opnd:$rt, simm32_relaxed:$imm), 0>;
+ def : MipsInstAlias<
+ "xor $rs, $imm",
+ (XORi GPR32Opnd:$rs, GPR32Opnd:$rs, simm32_relaxed:$imm), 0>;
+ def : MipsInstAlias<
+ "or $rs, $rt, $imm",
+ (ORi GPR32Opnd:$rs, GPR32Opnd:$rt, simm32_relaxed:$imm), 0>;
+ def : MipsInstAlias<
+ "or $rs, $imm",
+ (ORi GPR32Opnd:$rs, GPR32Opnd:$rs, simm32_relaxed:$imm), 0>;
+ def : MipsInstAlias<
+ "not $rt, $rs",
+ (NOR GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>;
+ def : MipsInstAlias<"nop", (SLL ZERO, ZERO, 0), 1>;
}
def : MipsInstAlias<"mfc0 $rt, $rd", (MFC0 GPR32Opnd:$rt, COP0Opnd:$rd, 0), 0>;
def : MipsInstAlias<"mtc0 $rt, $rd", (MTC0 COP0Opnd:$rd, GPR32Opnd:$rt, 0), 0>;
@@ -1880,7 +2268,9 @@ def : MipsInstAlias<"beqz $rs,$offset",
(BEQ GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
def : MipsInstAlias<"beqzl $rs,$offset",
(BEQL GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
-def : MipsInstAlias<"syscall", (SYSCALL 0), 1>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsInstAlias<"syscall", (SYSCALL 0), 1>;
+}
def : MipsInstAlias<"break", (BREAK 0, 0), 1>;
def : MipsInstAlias<"break $imm", (BREAK uimm10:$imm, 0), 1>;
@@ -1902,8 +2292,6 @@ let AdditionalPredicates = [NotInMicroMips] in {
def : MipsInstAlias<"tne $rs, $rt",
(TNE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>, ISA_MIPS2;
}
-def : MipsInstAlias<"sll $rd, $rt, $rs",
- (SLLV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
def : MipsInstAlias<"sub, $rd, $rs, $imm",
(ADDi GPR32Opnd:$rd, GPR32Opnd:$rs,
InvertedImOperand:$imm), 0>, ISA_MIPS1_NOT_32R6_64R6;
@@ -1915,10 +2303,14 @@ def : MipsInstAlias<"subu, $rd, $rs, $imm",
InvertedImOperand:$imm), 0>;
def : MipsInstAlias<"subu $rs, $imm", (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rs,
InvertedImOperand:$imm), 0>;
-def : MipsInstAlias<"sra $rd, $rt, $rs",
- (SRAV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
-def : MipsInstAlias<"srl $rd, $rt, $rs",
- (SRLV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsInstAlias<"sll $rd, $rt, $rs",
+ (SLLV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+ def : MipsInstAlias<"sra $rd, $rt, $rs",
+ (SRAV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+ def : MipsInstAlias<"srl $rd, $rt, $rs",
+ (SRLV GPR32Opnd:$rd, GPR32Opnd:$rt, GPR32Opnd:$rs), 0>;
+}
def : MipsInstAlias<"sdbbp", (SDBBP 0)>, ISA_MIPS32_NOT_32R6_64R6;
def : MipsInstAlias<"sync",
(SYNC 0), 1>, ISA_MIPS2;
@@ -1926,10 +2318,11 @@ def : MipsInstAlias<"sync",
// Assembler Pseudo Instructions
//===----------------------------------------------------------------------===//
+// We use i32imm on li/la to defer range checking to the assembler.
class LoadImmediate32<string instr_asm, Operand Od, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32),
!strconcat(instr_asm, "\t$rt, $imm32")> ;
-def LoadImm32 : LoadImmediate32<"li", simm32, GPR32Opnd>;
+def LoadImm32 : LoadImmediate32<"li", i32imm, GPR32Opnd>;
class LoadAddressFromReg32<string instr_asm, Operand MemOpnd,
RegisterOperand RO> :
@@ -1940,17 +2333,18 @@ def LoadAddrReg32 : LoadAddressFromReg32<"la", mem, GPR32Opnd>;
class LoadAddressFromImm32<string instr_asm, Operand Od, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32),
!strconcat(instr_asm, "\t$rt, $imm32")> ;
-def LoadAddrImm32 : LoadAddressFromImm32<"la", simm32, GPR32Opnd>;
+def LoadAddrImm32 : LoadAddressFromImm32<"la", i32imm, GPR32Opnd>;
def JalTwoReg : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins GPR32Opnd:$rs),
"jal\t$rd, $rs"> ;
def JalOneReg : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs),
"jal\t$rs"> ;
-def NORImm : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm),
- "nor\t$rs, $rt, $imm"> ;
+def NORImm : MipsAsmPseudoInst<
+ (outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt, simm32:$imm),
+ "nor\t$rs, $rt, $imm"> ;
-let hasDelaySlot = 1 in {
+let hasDelaySlot = 1, isCTI = 1 in {
def BneImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
(ins imm64:$imm64, brtarget:$offset),
"bne\t$rt, $imm64, $offset">;
@@ -1981,6 +2375,7 @@ def BLEUL: CondBranchPseudo<"bleul">, ISA_MIPS2_NOT_32R6_64R6;
def BGEUL: CondBranchPseudo<"bgeul">, ISA_MIPS2_NOT_32R6_64R6;
def BGTUL: CondBranchPseudo<"bgtul">, ISA_MIPS2_NOT_32R6_64R6;
+let isCTI = 1 in
class CondBranchImmPseudo<string instr_asm> :
MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, imm64:$imm, brtarget:$offset),
!strconcat(instr_asm, "\t$rs, $imm, $offset")>;
@@ -2008,17 +2403,34 @@ def BGTULImmMacro : CondBranchImmPseudo<"bgtul">, ISA_MIPS2_NOT_32R6_64R6;
// Once the tablegen-erated errors are made better, this needs to be fixed and
// predicates needs to be restored.
-def SDivMacro : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
- "div\t$rs, $rt">; //, ISA_MIPS1_NOT_32R6_64R6;
-
-def UDivMacro : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
- "divu\t$rs, $rt">; //, ISA_MIPS1_NOT_32R6_64R6;
-
-def DSDivMacro : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
- "ddiv\t$rs, $rt">; //, ISA_MIPS64_NOT_64R6;
-
-def DUDivMacro : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
- "ddivu\t$rs, $rt">; //, ISA_MIPS64_NOT_64R6;
+def SDivMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
+ "div\t$rd, $rs, $rt">,
+ ISA_MIPS1_NOT_32R6_64R6;
+def UDivMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
+ "divu\t$rd, $rs, $rt">,
+ ISA_MIPS1_NOT_32R6_64R6;
+def : MipsInstAlias<"div $rt, $rs", (SDivMacro GPR32Opnd:$rt, GPR32Opnd:$rt,
+ GPR32Opnd:$rs), 0>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def : MipsInstAlias<"divu $rt, $rs", (UDivMacro GPR32Opnd:$rt, GPR32Opnd:$rt,
+ GPR32Opnd:$rs), 0>,
+ ISA_MIPS1_NOT_32R6_64R6;
+def DSDivMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
+ "ddiv\t$rd, $rs, $rt">,
+ ISA_MIPS64_NOT_64R6;
+def DUDivMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
+ "ddivu\t$rd, $rs, $rt">,
+ ISA_MIPS64_NOT_64R6;
+def : MipsInstAlias<"ddiv $rt, $rs", (DSDivMacro GPR32Opnd:$rt, GPR32Opnd:$rt,
+ GPR32Opnd:$rs), 0>,
+ ISA_MIPS64_NOT_64R6;
+def : MipsInstAlias<"ddivu $rt, $rs", (DUDivMacro GPR32Opnd:$rt, GPR32Opnd:$rt,
+ GPR32Opnd:$rs), 0>,
+ ISA_MIPS64_NOT_64R6;
def Ulh : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), (ins mem:$addr),
"ulh\t$rt, $addr">; //, ISA_MIPS1_NOT_32R6_64R6;
@@ -2055,14 +2467,14 @@ def : MipsPat<(i32 imm:$imm),
(ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>;
// Carry MipsPatterns
-def : MipsPat<(subc GPR32:$lhs, GPR32:$rhs),
- (SUBu GPR32:$lhs, GPR32:$rhs)>;
-let AdditionalPredicates = [NotDSP] in {
- def : MipsPat<(addc GPR32:$lhs, GPR32:$rhs),
- (ADDu GPR32:$lhs, GPR32:$rhs)>;
- def : MipsPat<(addc GPR32:$src, immSExt16:$imm),
- (ADDiu GPR32:$src, imm:$imm)>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsPat<(subc GPR32:$lhs, GPR32:$rhs),
+ (SUBu GPR32:$lhs, GPR32:$rhs)>;
}
+def : MipsPat<(addc GPR32:$lhs, GPR32:$rhs),
+ (ADDu GPR32:$lhs, GPR32:$rhs)>, ASE_NOT_DSP;
+def : MipsPat<(addc GPR32:$src, immSExt16:$imm),
+ (ADDiu GPR32:$src, imm:$imm)>, ASE_NOT_DSP;
// Support multiplication for pre-Mips32 targets that don't have
// the MUL instruction.
@@ -2138,7 +2550,9 @@ def : MipsPat<(not GPR32:$in),
// extended loads
def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
-def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
+}
// peepholes
def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
@@ -2235,15 +2649,17 @@ def : MipsPat<(bswap GPR32:$rt), (ROTR (WSBH GPR32:$rt), 16)>;
// Load halfword/word patterns.
let AddedComplexity = 40 in {
def : LoadRegImmPat<LBu, i32, zextloadi8>;
- def : LoadRegImmPat<LH, i32, sextloadi16>;
let AdditionalPredicates = [NotInMicroMips] in {
- def : LoadRegImmPat<LW, i32, load>;
+ def : LoadRegImmPat<LH, i32, sextloadi16>;
+ def : LoadRegImmPat<LW, i32, load>;
}
}
// Atomic load patterns.
def : MipsPat<(atomic_load_8 addr:$a), (LB addr:$a)>;
-def : MipsPat<(atomic_load_16 addr:$a), (LH addr:$a)>;
+let AdditionalPredicates = [NotInMicroMips] in {
+ def : MipsPat<(atomic_load_16 addr:$a), (LH addr:$a)>;
+}
def : MipsPat<(atomic_load_32 addr:$a), (LW addr:$a)>;
// Atomic store patterns.
diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp
index 49fb99a8ec43..e721312390d2 100644
--- a/lib/Target/Mips/MipsLongBranch.cpp
+++ b/lib/Target/Mips/MipsLongBranch.cpp
@@ -63,8 +63,7 @@ namespace {
public:
static char ID;
MipsLongBranch(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_),
+ : MachineFunctionPass(ID), TM(tm), IsPIC(TM.isPositionIndependent()),
ABI(static_cast<const MipsTargetMachine &>(TM).getABI()) {}
const char *getPassName() const override {
@@ -73,11 +72,16 @@ namespace {
bool runOnMachineFunction(MachineFunction &F) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
private:
void splitMBB(MachineBasicBlock *MBB);
void initMBBInfo();
int64_t computeOffset(const MachineInstr *Br);
- void replaceBranch(MachineBasicBlock &MBB, Iter Br, DebugLoc DL,
+ void replaceBranch(MachineBasicBlock &MBB, Iter Br, const DebugLoc &DL,
MachineBasicBlock *MBBOpnd);
void expandToLongBranch(MBBInfo &Info);
@@ -113,7 +117,7 @@ static MachineBasicBlock *getTargetMBB(const MachineInstr &Br) {
// Traverse the list of instructions backwards until a non-debug instruction is
// found or it reaches E.
-static ReverseIter getNonDebugInstr(ReverseIter B, ReverseIter E) {
+static ReverseIter getNonDebugInstr(ReverseIter B, const ReverseIter &E) {
for (; B != E; ++B)
if (!B->isDebugValue())
return B;
@@ -160,8 +164,8 @@ void MipsLongBranch::splitMBB(MachineBasicBlock *MBB) {
void MipsLongBranch::initMBBInfo() {
// Split the MBBs if they have two branches. Each basic block should have at
// most one branch after this loop is executed.
- for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E;)
- splitMBB(&*I++);
+ for (auto &MBB : *MF)
+ splitMBB(&MBB);
MF->RenumberBlocks();
MBBInfos.clear();
@@ -175,17 +179,15 @@ void MipsLongBranch::initMBBInfo() {
// Compute size of MBB.
for (MachineBasicBlock::instr_iterator MI = MBB->instr_begin();
MI != MBB->instr_end(); ++MI)
- MBBInfos[I].Size += TII->GetInstSizeInBytes(&*MI);
+ MBBInfos[I].Size += TII->GetInstSizeInBytes(*MI);
// Search for MBB's branch instruction.
ReverseIter End = MBB->rend();
ReverseIter Br = getNonDebugInstr(MBB->rbegin(), End);
if ((Br != End) && !Br->isIndirectBranch() &&
- (Br->isConditionalBranch() ||
- (Br->isUnconditionalBranch() &&
- TM.getRelocationModel() == Reloc::PIC_)))
- MBBInfos[I].Br = (++Br).base();
+ (Br->isConditionalBranch() || (Br->isUnconditionalBranch() && IsPIC)))
+ MBBInfos[I].Br = &*(++Br).base();
}
}
@@ -213,7 +215,8 @@ int64_t MipsLongBranch::computeOffset(const MachineInstr *Br) {
// Replace Br with a branch which has the opposite condition code and a
// MachineBasicBlock operand MBBOpnd.
void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
- DebugLoc DL, MachineBasicBlock *MBBOpnd) {
+ const DebugLoc &DL,
+ MachineBasicBlock *MBBOpnd) {
const MipsInstrInfo *TII = static_cast<const MipsInstrInfo *>(
MBB.getParent()->getSubtarget().getInstrInfo());
unsigned NewOpc = TII->getOppositeBranchOpc(Br->getOpcode());
@@ -238,7 +241,7 @@ void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
// Bundle the instruction in the delay slot to the newly created branch
// and erase the original branch.
assert(Br->isBundledWithSucc());
- MachineBasicBlock::instr_iterator II(Br);
+ MachineBasicBlock::instr_iterator II = Br.getInstrIterator();
MIBundleBuilder(&*MIB).append((++II)->removeFromBundle());
}
Br->eraseFromParent();
@@ -329,23 +332,26 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LW), Mips::RA)
.addReg(Mips::SP).addImm(0);
- if (!Subtarget.isTargetNaCl()) {
- MIBundleBuilder(*BalTgtMBB, Pos)
- .append(BuildMI(*MF, DL, TII->get(Mips::JR)).addReg(Mips::AT))
- .append(BuildMI(*MF, DL, TII->get(Mips::ADDiu), Mips::SP)
- .addReg(Mips::SP).addImm(8));
- } else {
- // In NaCl, modifying the sp is not allowed in branch delay slot.
+ // In NaCl, modifying the sp is not allowed in branch delay slot.
+ if (Subtarget.isTargetNaCl())
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP)
.addReg(Mips::SP).addImm(8);
- MIBundleBuilder(*BalTgtMBB, Pos)
- .append(BuildMI(*MF, DL, TII->get(Mips::JR)).addReg(Mips::AT))
- .append(BuildMI(*MF, DL, TII->get(Mips::NOP)));
+ if (Subtarget.hasMips32r6())
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JALR))
+ .addReg(Mips::ZERO).addReg(Mips::AT);
+ else
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JR)).addReg(Mips::AT);
+ if (Subtarget.isTargetNaCl()) {
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::NOP));
// Bundle-align the target of indirect branch JR.
TgtMBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN);
- }
+ } else
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP)
+ .addReg(Mips::SP).addImm(8);
+
+ BalTgtMBB->rbegin()->bundleWithPred();
} else {
// $longbr:
// daddiu $sp, $sp, -16
@@ -404,10 +410,15 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LD), Mips::RA_64)
.addReg(Mips::SP_64).addImm(0);
- MIBundleBuilder(*BalTgtMBB, Pos)
- .append(BuildMI(*MF, DL, TII->get(Mips::JR64)).addReg(Mips::AT_64))
- .append(BuildMI(*MF, DL, TII->get(Mips::DADDiu), Mips::SP_64)
- .addReg(Mips::SP_64).addImm(16));
+ if (Subtarget.hasMips64r6())
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JALR64))
+ .addReg(Mips::ZERO_64).addReg(Mips::AT_64);
+ else
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JR64)).addReg(Mips::AT_64);
+
+ BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::SP_64)
+ .addReg(Mips::SP_64).addImm(16);
+ BalTgtMBB->rbegin()->bundleWithPred();
}
assert(LongBrMBB->size() + BalTgtMBB->size() == LongBranchSeqSize);
@@ -457,8 +468,7 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
if (STI.inMips16Mode() || !STI.enableLongBranchPass())
return false;
- if ((TM.getRelocationModel() == Reloc::PIC_) &&
- static_cast<const MipsTargetMachine &>(TM).getABI().IsO32() &&
+ if (IsPIC && static_cast<const MipsTargetMachine &>(TM).getABI().IsO32() &&
F.getInfo<MipsFunctionInfo>()->globalBaseRegSet())
emitGPDisp(F, TII);
@@ -506,7 +516,7 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
return true;
// Compute basic block addresses.
- if (TM.getRelocationModel() == Reloc::PIC_) {
+ if (IsPIC) {
uint64_t Address = 0;
for (I = MBBInfos.begin(); I != E; Address += I->Size, ++I)
diff --git a/lib/Target/Mips/MipsMCInstLower.cpp b/lib/Target/Mips/MipsMCInstLower.cpp
index 80d9b75b85b7..d5bc4e537c37 100644
--- a/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/lib/Target/Mips/MipsMCInstLower.cpp
@@ -36,36 +36,87 @@ void MipsMCInstLower::Initialize(MCContext *C) {
MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy,
unsigned Offset) const {
- MCSymbolRefExpr::VariantKind Kind;
+ MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
+ MipsMCExpr::MipsExprKind TargetKind = MipsMCExpr::MEK_None;
+ bool IsGpOff = false;
const MCSymbol *Symbol;
switch(MO.getTargetFlags()) {
- default: llvm_unreachable("Invalid target flag!");
- case MipsII::MO_NO_FLAG: Kind = MCSymbolRefExpr::VK_None; break;
- case MipsII::MO_GPREL: Kind = MCSymbolRefExpr::VK_Mips_GPREL; break;
- case MipsII::MO_GOT_CALL: Kind = MCSymbolRefExpr::VK_Mips_GOT_CALL; break;
- case MipsII::MO_GOT16: Kind = MCSymbolRefExpr::VK_Mips_GOT16; break;
- case MipsII::MO_GOT: Kind = MCSymbolRefExpr::VK_Mips_GOT; break;
- case MipsII::MO_ABS_HI: Kind = MCSymbolRefExpr::VK_Mips_ABS_HI; break;
- case MipsII::MO_ABS_LO: Kind = MCSymbolRefExpr::VK_Mips_ABS_LO; break;
- case MipsII::MO_TLSGD: Kind = MCSymbolRefExpr::VK_Mips_TLSGD; break;
- case MipsII::MO_TLSLDM: Kind = MCSymbolRefExpr::VK_Mips_TLSLDM; break;
- case MipsII::MO_DTPREL_HI: Kind = MCSymbolRefExpr::VK_Mips_DTPREL_HI; break;
- case MipsII::MO_DTPREL_LO: Kind = MCSymbolRefExpr::VK_Mips_DTPREL_LO; break;
- case MipsII::MO_GOTTPREL: Kind = MCSymbolRefExpr::VK_Mips_GOTTPREL; break;
- case MipsII::MO_TPREL_HI: Kind = MCSymbolRefExpr::VK_Mips_TPREL_HI; break;
- case MipsII::MO_TPREL_LO: Kind = MCSymbolRefExpr::VK_Mips_TPREL_LO; break;
- case MipsII::MO_GPOFF_HI: Kind = MCSymbolRefExpr::VK_Mips_GPOFF_HI; break;
- case MipsII::MO_GPOFF_LO: Kind = MCSymbolRefExpr::VK_Mips_GPOFF_LO; break;
- case MipsII::MO_GOT_DISP: Kind = MCSymbolRefExpr::VK_Mips_GOT_DISP; break;
- case MipsII::MO_GOT_PAGE: Kind = MCSymbolRefExpr::VK_Mips_GOT_PAGE; break;
- case MipsII::MO_GOT_OFST: Kind = MCSymbolRefExpr::VK_Mips_GOT_OFST; break;
- case MipsII::MO_HIGHER: Kind = MCSymbolRefExpr::VK_Mips_HIGHER; break;
- case MipsII::MO_HIGHEST: Kind = MCSymbolRefExpr::VK_Mips_HIGHEST; break;
- case MipsII::MO_GOT_HI16: Kind = MCSymbolRefExpr::VK_Mips_GOT_HI16; break;
- case MipsII::MO_GOT_LO16: Kind = MCSymbolRefExpr::VK_Mips_GOT_LO16; break;
- case MipsII::MO_CALL_HI16: Kind = MCSymbolRefExpr::VK_Mips_CALL_HI16; break;
- case MipsII::MO_CALL_LO16: Kind = MCSymbolRefExpr::VK_Mips_CALL_LO16; break;
+ default:
+ llvm_unreachable("Invalid target flag!");
+ case MipsII::MO_NO_FLAG:
+ break;
+ case MipsII::MO_GPREL:
+ TargetKind = MipsMCExpr::MEK_GPREL;
+ break;
+ case MipsII::MO_GOT_CALL:
+ TargetKind = MipsMCExpr::MEK_GOT_CALL;
+ break;
+ case MipsII::MO_GOT:
+ TargetKind = MipsMCExpr::MEK_GOT;
+ break;
+ case MipsII::MO_ABS_HI:
+ TargetKind = MipsMCExpr::MEK_HI;
+ break;
+ case MipsII::MO_ABS_LO:
+ TargetKind = MipsMCExpr::MEK_LO;
+ break;
+ case MipsII::MO_TLSGD:
+ TargetKind = MipsMCExpr::MEK_TLSGD;
+ break;
+ case MipsII::MO_TLSLDM:
+ TargetKind = MipsMCExpr::MEK_TLSLDM;
+ break;
+ case MipsII::MO_DTPREL_HI:
+ TargetKind = MipsMCExpr::MEK_DTPREL_HI;
+ break;
+ case MipsII::MO_DTPREL_LO:
+ TargetKind = MipsMCExpr::MEK_DTPREL_LO;
+ break;
+ case MipsII::MO_GOTTPREL:
+ TargetKind = MipsMCExpr::MEK_GOTTPREL;
+ break;
+ case MipsII::MO_TPREL_HI:
+ TargetKind = MipsMCExpr::MEK_TPREL_HI;
+ break;
+ case MipsII::MO_TPREL_LO:
+ TargetKind = MipsMCExpr::MEK_TPREL_LO;
+ break;
+ case MipsII::MO_GPOFF_HI:
+ TargetKind = MipsMCExpr::MEK_HI;
+ IsGpOff = true;
+ break;
+ case MipsII::MO_GPOFF_LO:
+ TargetKind = MipsMCExpr::MEK_LO;
+ IsGpOff = true;
+ break;
+ case MipsII::MO_GOT_DISP:
+ TargetKind = MipsMCExpr::MEK_GOT_DISP;
+ break;
+ case MipsII::MO_GOT_HI16:
+ TargetKind = MipsMCExpr::MEK_GOT_HI16;
+ break;
+ case MipsII::MO_GOT_LO16:
+ TargetKind = MipsMCExpr::MEK_GOT_LO16;
+ break;
+ case MipsII::MO_GOT_PAGE:
+ TargetKind = MipsMCExpr::MEK_GOT_PAGE;
+ break;
+ case MipsII::MO_GOT_OFST:
+ TargetKind = MipsMCExpr::MEK_GOT_OFST;
+ break;
+ case MipsII::MO_HIGHER:
+ TargetKind = MipsMCExpr::MEK_HIGHER;
+ break;
+ case MipsII::MO_HIGHEST:
+ TargetKind = MipsMCExpr::MEK_HIGHEST;
+ break;
+ case MipsII::MO_CALL_HI16:
+ TargetKind = MipsMCExpr::MEK_CALL_HI16;
+ break;
+ case MipsII::MO_CALL_LO16:
+ TargetKind = MipsMCExpr::MEK_CALL_LO16;
+ break;
}
switch (MOTy) {
@@ -106,30 +157,23 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
llvm_unreachable("<unknown operand type>");
}
- const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::create(Symbol, Kind, *Ctx);
+ const MCExpr *Expr = MCSymbolRefExpr::create(Symbol, Kind, *Ctx);
- if (!Offset)
- return MCOperand::createExpr(MCSym);
+ if (Offset) {
+ // Assume offset is never negative.
+ assert(Offset > 0);
- // Assume offset is never negative.
- assert(Offset > 0);
+ Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(Offset, *Ctx),
+ *Ctx);
+ }
- const MCConstantExpr *OffsetExpr = MCConstantExpr::create(Offset, *Ctx);
- const MCBinaryExpr *Add = MCBinaryExpr::createAdd(MCSym, OffsetExpr, *Ctx);
- return MCOperand::createExpr(Add);
-}
+ if (IsGpOff)
+ Expr = MipsMCExpr::createGpOff(TargetKind, Expr, *Ctx);
+ else if (TargetKind != MipsMCExpr::MEK_None)
+ Expr = MipsMCExpr::create(TargetKind, Expr, *Ctx);
-/*
-static void CreateMCInst(MCInst& Inst, unsigned Opc, const MCOperand &Opnd0,
- const MCOperand &Opnd1,
- const MCOperand &Opnd2 = MCOperand()) {
- Inst.setOpcode(Opc);
- Inst.addOperand(Opnd0);
- Inst.addOperand(Opnd1);
- if (Opnd2.isValid())
- Inst.addOperand(Opnd2);
+ return MCOperand::createExpr(Expr);
}
-*/
MCOperand MipsMCInstLower::LowerOperand(const MachineOperand &MO,
unsigned offset) const {
@@ -160,7 +204,7 @@ MCOperand MipsMCInstLower::LowerOperand(const MachineOperand &MO,
MCOperand MipsMCInstLower::createSub(MachineBasicBlock *BB1,
MachineBasicBlock *BB2,
- MCSymbolRefExpr::VariantKind Kind) const {
+ MipsMCExpr::MipsExprKind Kind) const {
const MCSymbolRefExpr *Sym1 = MCSymbolRefExpr::create(BB1->getSymbol(), *Ctx);
const MCSymbolRefExpr *Sym2 = MCSymbolRefExpr::create(BB2->getSymbol(), *Ctx);
const MCBinaryExpr *Sub = MCBinaryExpr::createSub(Sym1, Sym2, *Ctx);
@@ -178,12 +222,12 @@ lowerLongBranchLUi(const MachineInstr *MI, MCInst &OutMI) const {
// Create %hi($tgt-$baltgt).
OutMI.addOperand(createSub(MI->getOperand(1).getMBB(),
MI->getOperand(2).getMBB(),
- MCSymbolRefExpr::VK_Mips_ABS_HI));
+ MipsMCExpr::MEK_HI));
}
-void MipsMCInstLower::
-lowerLongBranchADDiu(const MachineInstr *MI, MCInst &OutMI, int Opcode,
- MCSymbolRefExpr::VariantKind Kind) const {
+void MipsMCInstLower::lowerLongBranchADDiu(
+ const MachineInstr *MI, MCInst &OutMI, int Opcode,
+ MipsMCExpr::MipsExprKind Kind) const {
OutMI.setOpcode(Opcode);
// Lower two register operands.
@@ -206,17 +250,14 @@ bool MipsMCInstLower::lowerLongBranch(const MachineInstr *MI,
lowerLongBranchLUi(MI, OutMI);
return true;
case Mips::LONG_BRANCH_ADDiu:
- lowerLongBranchADDiu(MI, OutMI, Mips::ADDiu,
- MCSymbolRefExpr::VK_Mips_ABS_LO);
+ lowerLongBranchADDiu(MI, OutMI, Mips::ADDiu, MipsMCExpr::MEK_LO);
return true;
case Mips::LONG_BRANCH_DADDiu:
unsigned TargetFlags = MI->getOperand(2).getTargetFlags();
if (TargetFlags == MipsII::MO_ABS_HI)
- lowerLongBranchADDiu(MI, OutMI, Mips::DADDiu,
- MCSymbolRefExpr::VK_Mips_ABS_HI);
+ lowerLongBranchADDiu(MI, OutMI, Mips::DADDiu, MipsMCExpr::MEK_HI);
else if (TargetFlags == MipsII::MO_ABS_LO)
- lowerLongBranchADDiu(MI, OutMI, Mips::DADDiu,
- MCSymbolRefExpr::VK_Mips_ABS_LO);
+ lowerLongBranchADDiu(MI, OutMI, Mips::DADDiu, MipsMCExpr::MEK_LO);
else
report_fatal_error("Unexpected flags for LONG_BRANCH_DADDiu");
return true;
diff --git a/lib/Target/Mips/MipsMCInstLower.h b/lib/Target/Mips/MipsMCInstLower.h
index 1ce27e401850..c25f90005480 100644
--- a/lib/Target/Mips/MipsMCInstLower.h
+++ b/lib/Target/Mips/MipsMCInstLower.h
@@ -10,7 +10,6 @@
#ifndef LLVM_LIB_TARGET_MIPS_MIPSMCINSTLOWER_H
#define LLVM_LIB_TARGET_MIPS_MIPSMCINSTLOWER_H
#include "MCTargetDesc/MipsMCExpr.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/Support/Compiler.h"
@@ -38,11 +37,10 @@ private:
MCOperand LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy, unsigned Offset) const;
MCOperand createSub(MachineBasicBlock *BB1, MachineBasicBlock *BB2,
- MCSymbolRefExpr::VariantKind Kind) const;
+ MipsMCExpr::MipsExprKind Kind) const;
void lowerLongBranchLUi(const MachineInstr *MI, MCInst &OutMI) const;
- void lowerLongBranchADDiu(const MachineInstr *MI, MCInst &OutMI,
- int Opcode,
- MCSymbolRefExpr::VariantKind Kind) const;
+ void lowerLongBranchADDiu(const MachineInstr *MI, MCInst &OutMI, int Opcode,
+ MipsMCExpr::MipsExprKind Kind) const;
bool lowerLongBranch(const MachineInstr *MI, MCInst &OutMI) const;
};
}
diff --git a/lib/Target/Mips/MipsMSAInstrInfo.td b/lib/Target/Mips/MipsMSAInstrInfo.td
index eacfcec78bc7..deb4345e2662 100644
--- a/lib/Target/Mips/MipsMSAInstrInfo.td
+++ b/lib/Target/Mips/MipsMSAInstrInfo.td
@@ -65,53 +65,11 @@ def MipsVExtractZExt : SDNode<"MipsISD::VEXTRACT_ZEXT_ELT",
def immZExt1Ptr : ImmLeaf<iPTR, [{return isUInt<1>(Imm);}]>;
def immZExt2Ptr : ImmLeaf<iPTR, [{return isUInt<2>(Imm);}]>;
+def immZExt3Ptr : ImmLeaf<iPTR, [{return isUInt<3>(Imm);}]>;
def immZExt4Ptr : ImmLeaf<iPTR, [{return isUInt<4>(Imm);}]>;
-def immZExt6Ptr : ImmLeaf<iPTR, [{return isUInt<6>(Imm);}]>;
// Operands
-def uimm4_ptr : Operand<iPTR> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def uimm6_ptr : Operand<iPTR> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def simm5 : Operand<i32>;
-
-def vsplat_uimm1 : Operand<vAny> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def vsplat_uimm2 : Operand<vAny> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def vsplat_uimm3 : Operand<vAny> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def vsplat_uimm4 : Operand<vAny> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def vsplat_uimm5 : Operand<vAny> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def vsplat_uimm6 : Operand<vAny> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def vsplat_uimm8 : Operand<vAny> {
- let PrintMethod = "printUnsignedImm8";
-}
-
-def vsplat_simm5 : Operand<vAny>;
-
-def vsplat_simm10 : Operand<vAny>;
-
def immZExt2Lsa : ImmLeaf<i32, [{return isUInt<2>(Imm - 1);}]>;
// Pattern fragments
@@ -336,15 +294,33 @@ def vsplat_uimm_inv_pow2 : ComplexPattern<vAny, 1, "selectVSplatUimmInvPow2",
// Any build_vector that is a constant splat with only a consecutive sequence
// of left-most bits set.
-def vsplat_maskl_bits : SplatComplexPattern<vsplat_uimm8, vAny, 1,
- "selectVSplatMaskL",
- [build_vector, bitconvert]>;
+def vsplat_maskl_bits_uimm3
+ : SplatComplexPattern<vsplat_uimm3, vAny, 1, "selectVSplatMaskL",
+ [build_vector, bitconvert]>;
+def vsplat_maskl_bits_uimm4
+ : SplatComplexPattern<vsplat_uimm4, vAny, 1, "selectVSplatMaskL",
+ [build_vector, bitconvert]>;
+def vsplat_maskl_bits_uimm5
+ : SplatComplexPattern<vsplat_uimm5, vAny, 1, "selectVSplatMaskL",
+ [build_vector, bitconvert]>;
+def vsplat_maskl_bits_uimm6
+ : SplatComplexPattern<vsplat_uimm6, vAny, 1, "selectVSplatMaskL",
+ [build_vector, bitconvert]>;
// Any build_vector that is a constant splat with only a consecutive sequence
// of right-most bits set.
-def vsplat_maskr_bits : SplatComplexPattern<vsplat_uimm8, vAny, 1,
- "selectVSplatMaskR",
- [build_vector, bitconvert]>;
+def vsplat_maskr_bits_uimm3
+ : SplatComplexPattern<vsplat_uimm3, vAny, 1, "selectVSplatMaskR",
+ [build_vector, bitconvert]>;
+def vsplat_maskr_bits_uimm4
+ : SplatComplexPattern<vsplat_uimm4, vAny, 1, "selectVSplatMaskR",
+ [build_vector, bitconvert]>;
+def vsplat_maskr_bits_uimm5
+ : SplatComplexPattern<vsplat_uimm5, vAny, 1, "selectVSplatMaskR",
+ [build_vector, bitconvert]>;
+def vsplat_maskr_bits_uimm6
+ : SplatComplexPattern<vsplat_uimm6, vAny, 1, "selectVSplatMaskR",
+ [build_vector, bitconvert]>;
// Any build_vector that is a constant splat with a value that equals 1
// FIXME: These should be a ComplexPattern but we can't use them because the
@@ -1185,11 +1161,11 @@ class MSA_BIT_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
}
class MSA_BIT_BINSXI_DESC_BASE<string instr_asm, ValueType Ty,
- ComplexPattern Mask, RegisterOperand ROWD,
+ SplatComplexPattern Mask, RegisterOperand ROWD,
RegisterOperand ROWS = ROWD,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROWD:$wd);
- dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, vsplat_uimm8:$m);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, Mask.OpClass:$m);
string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
// Note that binsxi and vselect treat the condition operand the opposite
// way to each other.
@@ -1202,16 +1178,16 @@ class MSA_BIT_BINSXI_DESC_BASE<string instr_asm, ValueType Ty,
}
class MSA_BIT_BINSLI_DESC_BASE<string instr_asm, ValueType Ty,
- RegisterOperand ROWD,
+ SplatComplexPattern ImmOp, RegisterOperand ROWD,
RegisterOperand ROWS = ROWD,
InstrItinClass itin = NoItinerary> :
- MSA_BIT_BINSXI_DESC_BASE<instr_asm, Ty, vsplat_maskl_bits, ROWD, ROWS, itin>;
+ MSA_BIT_BINSXI_DESC_BASE<instr_asm, Ty, ImmOp, ROWD, ROWS, itin>;
class MSA_BIT_BINSRI_DESC_BASE<string instr_asm, ValueType Ty,
- RegisterOperand ROWD,
+ SplatComplexPattern ImmOp, RegisterOperand ROWD,
RegisterOperand ROWS = ROWD,
InstrItinClass itin = NoItinerary> :
- MSA_BIT_BINSXI_DESC_BASE<instr_asm, Ty, vsplat_maskr_bits, ROWD, ROWS, itin>;
+ MSA_BIT_BINSXI_DESC_BASE<instr_asm, Ty, ImmOp, ROWD, ROWS, itin>;
class MSA_BIT_SPLAT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
SplatComplexPattern SplatImm,
@@ -1225,13 +1201,13 @@ class MSA_BIT_SPLAT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
}
class MSA_COPY_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- ValueType VecTy, RegisterOperand ROD,
- RegisterOperand ROWS,
+ ValueType VecTy, Operand ImmOp, ImmLeaf Imm,
+ RegisterOperand ROD, RegisterOperand ROWS,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROD:$rd);
- dag InOperandList = (ins ROWS:$ws, uimm4_ptr:$n);
+ dag InOperandList = (ins ROWS:$ws, ImmOp:$n);
string AsmString = !strconcat(instr_asm, "\t$rd, $ws[$n]");
- list<dag> Pattern = [(set ROD:$rd, (OpNode (VecTy ROWS:$ws), immZExt4Ptr:$n))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode (VecTy ROWS:$ws), Imm:$n))];
InstrItinClass Itinerary = itin;
}
@@ -1249,9 +1225,10 @@ class MSA_ELM_SLD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
}
class MSA_COPY_PSEUDO_BASE<SDPatternOperator OpNode, ValueType VecTy,
- RegisterClass RCD, RegisterClass RCWS> :
- MSAPseudo<(outs RCD:$wd), (ins RCWS:$ws, uimm4_ptr:$n),
- [(set RCD:$wd, (OpNode (VecTy RCWS:$ws), immZExt4Ptr:$n))]> {
+ Operand ImmOp, ImmLeaf Imm, RegisterClass RCD,
+ RegisterClass RCWS> :
+ MSAPseudo<(outs RCD:$wd), (ins RCWS:$ws, ImmOp:$n),
+ [(set RCD:$wd, (OpNode (VecTy RCWS:$ws), Imm:$n))]> {
bit usesCustomInserter = 1;
}
@@ -1433,23 +1410,22 @@ class MSA_CBRANCH_DESC_BASE<string instr_asm, RegisterOperand ROWD> {
}
class MSA_INSERT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- RegisterOperand ROWD, RegisterOperand ROS,
+ Operand ImmOp, ImmLeaf Imm, RegisterOperand ROWD,
+ RegisterOperand ROS,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROWD:$wd);
- dag InOperandList = (ins ROWD:$wd_in, ROS:$rs, uimm6_ptr:$n);
+ dag InOperandList = (ins ROWD:$wd_in, ROS:$rs, ImmOp:$n);
string AsmString = !strconcat(instr_asm, "\t$wd[$n], $rs");
- list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in,
- ROS:$rs,
- immZExt6Ptr:$n))];
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in, ROS:$rs, Imm:$n))];
InstrItinClass Itinerary = itin;
string Constraints = "$wd = $wd_in";
}
class MSA_INSERT_PSEUDO_BASE<SDPatternOperator OpNode, ValueType Ty,
- RegisterOperand ROWD, RegisterOperand ROFS> :
- MSAPseudo<(outs ROWD:$wd), (ins ROWD:$wd_in, uimm6_ptr:$n, ROFS:$fs),
- [(set ROWD:$wd, (OpNode (Ty ROWD:$wd_in), ROFS:$fs,
- immZExt6Ptr:$n))]> {
+ Operand ImmOp, ImmLeaf Imm, RegisterOperand ROWD,
+ RegisterOperand ROFS> :
+ MSAPseudo<(outs ROWD:$wd), (ins ROWD:$wd_in, ImmOp:$n, ROFS:$fs),
+ [(set ROWD:$wd, (OpNode (Ty ROWD:$wd_in), ROFS:$fs, Imm:$n))]> {
bit usesCustomInserter = 1;
string Constraints = "$wd = $wd_in";
}
@@ -1643,10 +1619,10 @@ class BINSL_W_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.w", int_mips_binsl_w,
class BINSL_D_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.d", int_mips_binsl_d,
MSA128DOpnd>;
-class BINSLI_B_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.b", v16i8, MSA128BOpnd>;
-class BINSLI_H_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.h", v8i16, MSA128HOpnd>;
-class BINSLI_W_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.w", v4i32, MSA128WOpnd>;
-class BINSLI_D_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.d", v2i64, MSA128DOpnd>;
+class BINSLI_B_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.b", v16i8, vsplat_maskl_bits_uimm3, MSA128BOpnd>;
+class BINSLI_H_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.h", v8i16, vsplat_maskl_bits_uimm4, MSA128HOpnd>;
+class BINSLI_W_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.w", v4i32, vsplat_maskl_bits_uimm5, MSA128WOpnd>;
+class BINSLI_D_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.d", v2i64, vsplat_maskl_bits_uimm6, MSA128DOpnd>;
class BINSR_B_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.b", int_mips_binsr_b,
MSA128BOpnd>;
@@ -1657,10 +1633,18 @@ class BINSR_W_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.w", int_mips_binsr_w,
class BINSR_D_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.d", int_mips_binsr_d,
MSA128DOpnd>;
-class BINSRI_B_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.b", v16i8, MSA128BOpnd>;
-class BINSRI_H_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.h", v8i16, MSA128HOpnd>;
-class BINSRI_W_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.w", v4i32, MSA128WOpnd>;
-class BINSRI_D_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.d", v2i64, MSA128DOpnd>;
+class BINSRI_B_DESC
+ : MSA_BIT_BINSRI_DESC_BASE<"binsri.b", v16i8, vsplat_maskr_bits_uimm3,
+ MSA128BOpnd>;
+class BINSRI_H_DESC
+ : MSA_BIT_BINSRI_DESC_BASE<"binsri.h", v8i16, vsplat_maskr_bits_uimm4,
+ MSA128HOpnd>;
+class BINSRI_W_DESC
+ : MSA_BIT_BINSRI_DESC_BASE<"binsri.w", v4i32, vsplat_maskr_bits_uimm5,
+ MSA128WOpnd>;
+class BINSRI_D_DESC
+ : MSA_BIT_BINSRI_DESC_BASE<"binsri.d", v2i64, vsplat_maskr_bits_uimm6,
+ MSA128DOpnd>;
class BMNZ_V_DESC {
dag OutOperandList = (outs MSA128BOpnd:$wd);
@@ -1867,24 +1851,33 @@ class CLTI_U_D_DESC : MSA_I5_DESC_BASE<"clti_u.d", vsetult_v2i64,
vsplati64_uimm5, MSA128DOpnd>;
class COPY_S_B_DESC : MSA_COPY_DESC_BASE<"copy_s.b", vextract_sext_i8, v16i8,
- GPR32Opnd, MSA128BOpnd>;
+ uimm4_ptr, immZExt4Ptr, GPR32Opnd,
+ MSA128BOpnd>;
class COPY_S_H_DESC : MSA_COPY_DESC_BASE<"copy_s.h", vextract_sext_i16, v8i16,
- GPR32Opnd, MSA128HOpnd>;
+ uimm3_ptr, immZExt3Ptr, GPR32Opnd,
+ MSA128HOpnd>;
class COPY_S_W_DESC : MSA_COPY_DESC_BASE<"copy_s.w", vextract_sext_i32, v4i32,
- GPR32Opnd, MSA128WOpnd>;
+ uimm2_ptr, immZExt2Ptr, GPR32Opnd,
+ MSA128WOpnd>;
class COPY_S_D_DESC : MSA_COPY_DESC_BASE<"copy_s.d", vextract_sext_i64, v2i64,
- GPR64Opnd, MSA128DOpnd>;
+ uimm1_ptr, immZExt1Ptr, GPR64Opnd,
+ MSA128DOpnd>;
class COPY_U_B_DESC : MSA_COPY_DESC_BASE<"copy_u.b", vextract_zext_i8, v16i8,
- GPR32Opnd, MSA128BOpnd>;
+ uimm4_ptr, immZExt4Ptr, GPR32Opnd,
+ MSA128BOpnd>;
class COPY_U_H_DESC : MSA_COPY_DESC_BASE<"copy_u.h", vextract_zext_i16, v8i16,
- GPR32Opnd, MSA128HOpnd>;
+ uimm3_ptr, immZExt3Ptr, GPR32Opnd,
+ MSA128HOpnd>;
class COPY_U_W_DESC : MSA_COPY_DESC_BASE<"copy_u.w", vextract_zext_i32, v4i32,
- GPR32Opnd, MSA128WOpnd>;
+ uimm2_ptr, immZExt2Ptr, GPR32Opnd,
+ MSA128WOpnd>;
-class COPY_FW_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v4f32, FGR32,
+class COPY_FW_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v4f32,
+ uimm2_ptr, immZExt2Ptr, FGR32,
MSA128W>;
-class COPY_FD_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v2f64, FGR64,
+class COPY_FD_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v2f64,
+ uimm1_ptr, immZExt1Ptr, FGR64,
MSA128D>;
class CTCMSA_DESC {
@@ -2249,14 +2242,14 @@ class ILVR_H_DESC : MSA_3R_DESC_BASE<"ilvr.h", MipsILVR, MSA128HOpnd>;
class ILVR_W_DESC : MSA_3R_DESC_BASE<"ilvr.w", MipsILVR, MSA128WOpnd>;
class ILVR_D_DESC : MSA_3R_DESC_BASE<"ilvr.d", MipsILVR, MSA128DOpnd>;
-class INSERT_B_DESC : MSA_INSERT_DESC_BASE<"insert.b", vinsert_v16i8,
- MSA128BOpnd, GPR32Opnd>;
-class INSERT_H_DESC : MSA_INSERT_DESC_BASE<"insert.h", vinsert_v8i16,
- MSA128HOpnd, GPR32Opnd>;
-class INSERT_W_DESC : MSA_INSERT_DESC_BASE<"insert.w", vinsert_v4i32,
- MSA128WOpnd, GPR32Opnd>;
-class INSERT_D_DESC : MSA_INSERT_DESC_BASE<"insert.d", vinsert_v2i64,
- MSA128DOpnd, GPR64Opnd>;
+class INSERT_B_DESC : MSA_INSERT_DESC_BASE<"insert.b", vinsert_v16i8, uimm4,
+ immZExt4Ptr, MSA128BOpnd, GPR32Opnd>;
+class INSERT_H_DESC : MSA_INSERT_DESC_BASE<"insert.h", vinsert_v8i16, uimm3,
+ immZExt3Ptr, MSA128HOpnd, GPR32Opnd>;
+class INSERT_W_DESC : MSA_INSERT_DESC_BASE<"insert.w", vinsert_v4i32, uimm2,
+ immZExt2Ptr, MSA128WOpnd, GPR32Opnd>;
+class INSERT_D_DESC : MSA_INSERT_DESC_BASE<"insert.d", vinsert_v2i64, uimm1,
+ immZExt1Ptr, MSA128DOpnd, GPR64Opnd>;
class INSERT_B_VIDX_PSEUDO_DESC :
MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v16i8, MSA128BOpnd, GPR32Opnd, GPR32Opnd>;
@@ -2268,8 +2261,10 @@ class INSERT_D_VIDX_PSEUDO_DESC :
MSA_INSERT_VIDX_PSEUDO_BASE<vector_insert, v2i64, MSA128DOpnd, GPR64Opnd, GPR32Opnd>;
class INSERT_FW_PSEUDO_DESC : MSA_INSERT_PSEUDO_BASE<vector_insert, v4f32,
+ uimm2, immZExt2Ptr,
MSA128WOpnd, FGR32Opnd>;
class INSERT_FD_PSEUDO_DESC : MSA_INSERT_PSEUDO_BASE<vector_insert, v2f64,
+ uimm1, immZExt1Ptr,
MSA128DOpnd, FGR64Opnd>;
class INSERT_FW_VIDX_PSEUDO_DESC :
@@ -2302,7 +2297,7 @@ class INSVE_D_DESC : MSA_INSVE_DESC_BASE<"insve.d", insve_v2i64, uimm1, immZExt1
class LD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
ValueType TyNode, RegisterOperand ROWD,
- Operand MemOpnd = mem_msa, ComplexPattern Addr = addrimm10,
+ Operand MemOpnd, ComplexPattern Addr = addrimm10,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs ROWD:$wd);
dag InOperandList = (ins MemOpnd:$addr);
@@ -2312,10 +2307,10 @@ class LD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
string DecoderMethod = "DecodeMSA128Mem";
}
-class LD_B_DESC : LD_DESC_BASE<"ld.b", load, v16i8, MSA128BOpnd>;
-class LD_H_DESC : LD_DESC_BASE<"ld.h", load, v8i16, MSA128HOpnd>;
-class LD_W_DESC : LD_DESC_BASE<"ld.w", load, v4i32, MSA128WOpnd>;
-class LD_D_DESC : LD_DESC_BASE<"ld.d", load, v2i64, MSA128DOpnd>;
+class LD_B_DESC : LD_DESC_BASE<"ld.b", load, v16i8, MSA128BOpnd, mem_simm10>;
+class LD_H_DESC : LD_DESC_BASE<"ld.h", load, v8i16, MSA128HOpnd, mem_simm10_lsl1>;
+class LD_W_DESC : LD_DESC_BASE<"ld.w", load, v4i32, MSA128WOpnd, mem_simm10_lsl2>;
+class LD_D_DESC : LD_DESC_BASE<"ld.d", load, v2i64, MSA128DOpnd, mem_simm10_lsl3>;
class LDI_B_DESC : MSA_I10_LDI_DESC_BASE<"ldi.b", MSA128BOpnd>;
class LDI_H_DESC : MSA_I10_LDI_DESC_BASE<"ldi.h", MSA128HOpnd>;
@@ -2323,19 +2318,18 @@ class LDI_W_DESC : MSA_I10_LDI_DESC_BASE<"ldi.w", MSA128WOpnd>;
class LDI_D_DESC : MSA_I10_LDI_DESC_BASE<"ldi.d", MSA128DOpnd>;
class LSA_DESC_BASE<string instr_asm, RegisterOperand RORD,
- RegisterOperand RORS = RORD, RegisterOperand RORT = RORD,
- InstrItinClass itin = NoItinerary > {
+ InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs RORD:$rd);
- dag InOperandList = (ins RORS:$rs, RORT:$rt, uimm2_plus1:$sa);
+ dag InOperandList = (ins RORD:$rs, RORD:$rt, uimm2_plus1:$sa);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt, $sa");
- list<dag> Pattern = [(set RORD:$rd, (add RORT:$rt,
- (shl RORS:$rs,
+ list<dag> Pattern = [(set RORD:$rd, (add RORD:$rt,
+ (shl RORD:$rs,
immZExt2Lsa:$sa)))];
InstrItinClass Itinerary = itin;
}
-class LSA_DESC : LSA_DESC_BASE<"lsa", GPR32Opnd>;
-class DLSA_DESC : LSA_DESC_BASE<"dlsa", GPR64Opnd>;
+class LSA_DESC : LSA_DESC_BASE<"lsa", GPR32Opnd, II_LSA>;
+class DLSA_DESC : LSA_DESC_BASE<"dlsa", GPR64Opnd, II_DLSA>;
class MADD_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.h", int_mips_madd_q_h,
MSA128HOpnd>;
@@ -2636,7 +2630,7 @@ class SRLRI_D_DESC : MSA_BIT_X_DESC_BASE<"srlri.d", int_mips_srlri_d, uimm6,
class ST_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
ValueType TyNode, RegisterOperand ROWD,
- Operand MemOpnd = mem_msa, ComplexPattern Addr = addrimm10,
+ Operand MemOpnd, ComplexPattern Addr = addrimm10,
InstrItinClass itin = NoItinerary> {
dag OutOperandList = (outs);
dag InOperandList = (ins ROWD:$wd, MemOpnd:$addr);
@@ -2646,10 +2640,10 @@ class ST_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
string DecoderMethod = "DecodeMSA128Mem";
}
-class ST_B_DESC : ST_DESC_BASE<"st.b", store, v16i8, MSA128BOpnd>;
-class ST_H_DESC : ST_DESC_BASE<"st.h", store, v8i16, MSA128HOpnd>;
-class ST_W_DESC : ST_DESC_BASE<"st.w", store, v4i32, MSA128WOpnd>;
-class ST_D_DESC : ST_DESC_BASE<"st.d", store, v2i64, MSA128DOpnd>;
+class ST_B_DESC : ST_DESC_BASE<"st.b", store, v16i8, MSA128BOpnd, mem_simm10>;
+class ST_H_DESC : ST_DESC_BASE<"st.h", store, v8i16, MSA128HOpnd, mem_simm10_lsl1>;
+class ST_W_DESC : ST_DESC_BASE<"st.w", store, v4i32, MSA128WOpnd, mem_simm10_lsl2>;
+class ST_D_DESC : ST_DESC_BASE<"st.d", store, v2i64, MSA128DOpnd, mem_simm10_lsl3>;
class SUBS_S_B_DESC : MSA_3R_DESC_BASE<"subs_s.b", int_mips_subs_s_b,
MSA128BOpnd>;
diff --git a/lib/Target/Mips/MipsMachineFunction.cpp b/lib/Target/Mips/MipsMachineFunction.cpp
index c7d2738af1d4..f81e64e06f43 100644
--- a/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/lib/Target/Mips/MipsMachineFunction.cpp
@@ -42,7 +42,9 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
STI.inMips16Mode()
? &Mips::CPU16RegsRegClass
: STI.inMicroMipsMode()
- ? &Mips::GPRMM16RegClass
+ ? STI.hasMips64()
+ ? &Mips::GPRMM16_64RegClass
+ : &Mips::GPRMM16RegClass
: static_cast<const MipsTargetMachine &>(MF.getTarget())
.getABI()
.IsN64()
@@ -51,18 +53,6 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC);
}
-bool MipsFunctionInfo::mips16SPAliasRegSet() const {
- return Mips16SPAliasReg;
-}
-unsigned MipsFunctionInfo::getMips16SPAliasReg() {
- // Return if it has already been initialized.
- if (Mips16SPAliasReg)
- return Mips16SPAliasReg;
-
- const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass;
- return Mips16SPAliasReg = MF.getRegInfo().createVirtualRegister(RC);
-}
-
void MipsFunctionInfo::createEhDataRegsFI() {
for (int I = 0; I < 4; ++I) {
const TargetRegisterClass *RC =
diff --git a/lib/Target/Mips/MipsMachineFunction.h b/lib/Target/Mips/MipsMachineFunction.h
index a2f6ee03604f..c9e5fddc1932 100644
--- a/lib/Target/Mips/MipsMachineFunction.h
+++ b/lib/Target/Mips/MipsMachineFunction.h
@@ -19,12 +19,9 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/IR/ValueMap.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include <map>
-#include <string>
-#include <utility>
namespace llvm {
@@ -33,8 +30,8 @@ namespace llvm {
class MipsFunctionInfo : public MachineFunctionInfo {
public:
MipsFunctionInfo(MachineFunction &MF)
- : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), Mips16SPAliasReg(0),
- VarArgsFrameIndex(0), CallsEhReturn(false), IsISR(false), SaveS2(false),
+ : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), VarArgsFrameIndex(0),
+ CallsEhReturn(false), IsISR(false), SaveS2(false),
MoveF64ViaSpillFI(-1) {}
~MipsFunctionInfo();
@@ -45,9 +42,6 @@ public:
bool globalBaseRegSet() const;
unsigned getGlobalBaseReg();
- bool mips16SPAliasRegSet() const;
- unsigned getMips16SPAliasReg();
-
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
@@ -104,11 +98,6 @@ private:
/// relocation models.
unsigned GlobalBaseReg;
- /// Mips16SPAliasReg - keeps track of the virtual register initialized for
- /// use as an alias for SP for use in load/store of halfword/byte from/to
- /// the stack
- unsigned Mips16SPAliasReg;
-
/// VarArgsFrameIndex - FrameIndex for start of varargs area.
int VarArgsFrameIndex;
diff --git a/lib/Target/Mips/MipsOs16.cpp b/lib/Target/Mips/MipsOs16.cpp
index b6cd79193cfc..8136907de4d7 100644
--- a/lib/Target/Mips/MipsOs16.cpp
+++ b/lib/Target/Mips/MipsOs16.cpp
@@ -111,22 +111,27 @@ static bool needsFP(Function &F) {
bool MipsOs16::runOnModule(Module &M) {
bool usingMask = Mips32FunctionMask.length() > 0;
bool doneUsingMask = false; // this will make it stop repeating
+
DEBUG(dbgs() << "Run on Module MipsOs16 \n" << Mips32FunctionMask << "\n");
if (usingMask)
DEBUG(dbgs() << "using mask \n" << Mips32FunctionMask << "\n");
+
unsigned int functionIndex = 0;
bool modified = false;
- for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
- if (F->isDeclaration()) continue;
- DEBUG(dbgs() << "Working on " << F->getName() << "\n");
+
+ for (auto &F : M) {
+ if (F.isDeclaration())
+ continue;
+
+ DEBUG(dbgs() << "Working on " << F.getName() << "\n");
if (usingMask) {
if (!doneUsingMask) {
if (functionIndex == Mips32FunctionMask.length())
functionIndex = 0;
switch (Mips32FunctionMask[functionIndex]) {
case '1':
- DEBUG(dbgs() << "mask forced mips32: " << F->getName() << "\n");
- F->addFnAttr("nomips16");
+ DEBUG(dbgs() << "mask forced mips32: " << F.getName() << "\n");
+ F.addFnAttr("nomips16");
break;
case '.':
doneUsingMask = true;
@@ -138,16 +143,17 @@ bool MipsOs16::runOnModule(Module &M) {
}
}
else {
- if (needsFP(*F)) {
- DEBUG(dbgs() << "os16 forced mips32: " << F->getName() << "\n");
- F->addFnAttr("nomips16");
+ if (needsFP(F)) {
+ DEBUG(dbgs() << "os16 forced mips32: " << F.getName() << "\n");
+ F.addFnAttr("nomips16");
}
else {
- DEBUG(dbgs() << "os16 forced mips16: " << F->getName() << "\n");
- F->addFnAttr("mips16");
+ DEBUG(dbgs() << "os16 forced mips16: " << F.getName() << "\n");
+ F.addFnAttr("mips16");
}
}
}
+
return modified;
}
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index 28e5a425849f..860cf9cfd138 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -13,7 +13,6 @@
#include "MipsRegisterInfo.h"
#include "Mips.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
#include "MipsSubtarget.h"
@@ -21,14 +20,13 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -52,7 +50,21 @@ const TargetRegisterClass *
MipsRegisterInfo::getPointerRegClass(const MachineFunction &MF,
unsigned Kind) const {
MipsABIInfo ABI = MF.getSubtarget<MipsSubtarget>().getABI();
- return ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+ MipsPtrClass PtrClassKind = static_cast<MipsPtrClass>(Kind);
+
+ switch (PtrClassKind) {
+ case MipsPtrClass::Default:
+ return ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+ case MipsPtrClass::GPR16MM:
+ return ABI.ArePtrs64bit() ? &Mips::GPRMM16_64RegClass
+ : &Mips::GPRMM16RegClass;
+ case MipsPtrClass::StackPointer:
+ return ABI.ArePtrs64bit() ? &Mips::SP64RegClass : &Mips::SP32RegClass;
+ case MipsPtrClass::GlobalPointer:
+ return ABI.ArePtrs64bit() ? &Mips::GP64RegClass : &Mips::GP32RegClass;
+ }
+
+ llvm_unreachable("Unknown pointer kind");
}
unsigned
diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h
index 5de68a21b73e..32f835e83108 100644
--- a/lib/Target/Mips/MipsRegisterInfo.h
+++ b/lib/Target/Mips/MipsRegisterInfo.h
@@ -23,18 +23,23 @@
namespace llvm {
class MipsRegisterInfo : public MipsGenRegisterInfo {
public:
- MipsRegisterInfo();
+ enum class MipsPtrClass {
+ /// The default register class for integer values.
+ Default = 0,
+ /// The subset of registers permitted in certain microMIPS instructions
+ /// such as lw16.
+ GPR16MM = 1,
+ /// The stack pointer only.
+ StackPointer = 2,
+ /// The global pointer only.
+ GlobalPointer = 3,
+ };
- /// getRegisterNumbering - Given the enum value for some register, e.g.
- /// Mips::RA, return the number that it corresponds to (e.g. 31).
- static unsigned getRegisterNumbering(unsigned RegEnum);
+ MipsRegisterInfo();
/// Get PIC indirect call register
static unsigned getPICCallReg();
- /// Adjust the Mips stack frame.
- void adjustMipsStackFrame(MachineFunction &MF) const;
-
/// Code Generation virtual methods...
const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
unsigned Kind) const override;
@@ -57,9 +62,6 @@ public:
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
- void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = nullptr) const;
-
// Stack realignment queries.
bool canRealignStack(const MachineFunction &MF) const override;
diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td
index 02bcac5a3ddb..cfce7c8e6533 100644
--- a/lib/Target/Mips/MipsRegisterInfo.td
+++ b/lib/Target/Mips/MipsRegisterInfo.td
@@ -330,6 +330,12 @@ def GPR64 : RegisterClass<"Mips", [i64], 64, (add
// Reserved
K0_64, K1_64, GP_64, SP_64, FP_64, RA_64)>;
+def GPRMM16_64 : RegisterClass<"Mips", [i64], 64, (add
+ // Callee save
+ S0_64, S1_64,
+ // Return Values and Arguments
+ V0_64, V1_64, A0_64, A1_64, A2_64, A3_64)>;
+
def CPU16Regs : RegisterClass<"Mips", [i32], 32, (add
// Return Values and Arguments
V0, V1, A0, A1, A2, A3,
@@ -447,6 +453,13 @@ def COP2 : RegisterClass<"Mips", [i32], 32, (sequence "COP2%u", 0, 31)>,
def COP3 : RegisterClass<"Mips", [i32], 32, (sequence "COP3%u", 0, 31)>,
Unallocatable;
+// Stack pointer and global pointer classes for instructions that are limited
+// to a single register such as lwgp/lwsp in microMIPS.
+def SP32 : RegisterClass<"Mips", [i32], 32, (add SP)>, Unallocatable;
+def SP64 : RegisterClass<"Mips", [i64], 64, (add SP_64)>, Unallocatable;
+def GP32 : RegisterClass<"Mips", [i32], 32, (add GP)>, Unallocatable;
+def GP64 : RegisterClass<"Mips", [i64], 64, (add GP_64)>, Unallocatable;
+
// Octeon multiplier and product registers
def OCTEON_MPL : RegisterClass<"Mips", [i64], 64, (add MPL0, MPL1, MPL2)>,
Unallocatable;
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index a4abd62ee607..a7ddd7752736 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -13,7 +13,6 @@
#include "MipsSEFrameLowering.h"
#include "MCTargetDesc/MipsBaseInfo.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsSEInstrInfo.h"
#include "MipsSubtarget.h"
@@ -26,7 +25,6 @@
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
@@ -87,10 +85,10 @@ ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
bool ExpandPseudo::expand() {
bool Expanded = false;
- for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end();
- BB != BBEnd; ++BB)
- for (Iter I = BB->begin(), End = BB->end(); I != End;)
- Expanded |= expandInstr(*BB, I++);
+ for (auto &MBB : MF) {
+ for (Iter I = MBB.begin(), End = MBB.end(); I != End;)
+ Expanded |= expandInstr(MBB, I++);
+ }
return Expanded;
}
@@ -518,7 +516,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
unsigned VR = MF.getRegInfo().createVirtualRegister(RC);
assert(isInt<16>(MFI->getMaxAlignment()) &&
"Function's alignment size requirement is not supported.");
- int MaxAlign = - (signed) MFI->getMaxAlignment();
+ int MaxAlign = -(int)MFI->getMaxAlignment();
BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign);
BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR);
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 6f001ea74b30..d9528da5a96d 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -47,7 +47,8 @@ void MipsSEDAGToDAGISel::addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
MachineFunction &MF) {
MachineInstrBuilder MIB(MF, &MI);
unsigned Mask = MI.getOperand(1).getImm();
- unsigned Flag = IsDef ? RegState::ImplicitDefine : RegState::Implicit;
+ unsigned Flag =
+ IsDef ? RegState::ImplicitDefine : RegState::Implicit | RegState::Undef;
if (Mask & 1)
MIB.addReg(Mips::DSPPos, Flag);
@@ -162,7 +163,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
return;
}
- if (MF.getTarget().getRelocationModel() == Reloc::Static) {
+ if (!MF.getTarget().isPositionIndependent()) {
// Set global register to __gnu_local_gp.
//
// lui $v0, %hi(__gnu_local_gp)
@@ -220,21 +221,25 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
MachineRegisterInfo *MRI = &MF.getRegInfo();
- for (MachineFunction::iterator MFI = MF.begin(), MFE = MF.end(); MFI != MFE;
- ++MFI)
- for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I) {
- if (I->getOpcode() == Mips::RDDSP)
- addDSPCtrlRegOperands(false, *I, MF);
- else if (I->getOpcode() == Mips::WRDSP)
- addDSPCtrlRegOperands(true, *I, MF);
- else
- replaceUsesWithZeroReg(MRI, *I);
+ for (auto &MBB: MF) {
+ for (auto &MI: MBB) {
+ switch (MI.getOpcode()) {
+ case Mips::RDDSP:
+ addDSPCtrlRegOperands(false, MI, MF);
+ break;
+ case Mips::WRDSP:
+ addDSPCtrlRegOperands(true, MI, MF);
+ break;
+ default:
+ replaceUsesWithZeroReg(MRI, MI);
+ }
}
+ }
}
-SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag,
- SDValue CmpLHS, SDLoc DL,
- SDNode *Node) const {
+void MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag,
+ SDValue CmpLHS, const SDLoc &DL,
+ SDNode *Node) const {
unsigned Opc = InFlag.getOpcode(); (void)Opc;
assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) ||
@@ -271,8 +276,7 @@ SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag,
if (!C || C->getZExtValue())
AddCarry = CurDAG->getMachineNode(ADDuOp, DL, VT, SDValue(Carry, 0), RHS);
- return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, LHS,
- SDValue(AddCarry, 0));
+ CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, LHS, SDValue(AddCarry, 0));
}
/// Match frameindex
@@ -327,7 +331,7 @@ bool MipsSEDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
return true;
}
- if (TM.getRelocationModel() != Reloc::PIC_) {
+ if (!TM.isPositionIndependent()) {
if ((Addr.getOpcode() == ISD::TargetExternalSymbol ||
Addr.getOpcode() == ISD::TargetGlobalAddress))
return false;
@@ -364,18 +368,6 @@ bool MipsSEDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
/// ComplexPattern used on MipsInstrInfo
/// Used on Mips Load/Store instructions
-bool MipsSEDAGToDAGISel::selectAddrRegReg(SDValue Addr, SDValue &Base,
- SDValue &Offset) const {
- // Operand is a result from an ADD.
- if (Addr.getOpcode() == ISD::ADD) {
- Base = Addr.getOperand(0);
- Offset = Addr.getOperand(1);
- return true;
- }
-
- return false;
-}
-
bool MipsSEDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
Base = Addr;
@@ -411,6 +403,18 @@ bool MipsSEDAGToDAGISel::selectAddrRegImm10(SDValue Addr, SDValue &Base,
return false;
}
+/// Used on microMIPS LWC2, LDC2, SWC2 and SDC2 instructions (11-bit offset)
+bool MipsSEDAGToDAGISel::selectAddrRegImm11(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ if (selectAddrFrameIndex(Addr, Base, Offset))
+ return true;
+
+ if (selectAddrFrameIndexOffset(Addr, Base, Offset, 11))
+ return true;
+
+ return false;
+}
+
/// Used on microMIPS Load/Store unaligned instructions (12-bit offset)
bool MipsSEDAGToDAGISel::selectAddrRegImm12(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
@@ -434,12 +438,24 @@ bool MipsSEDAGToDAGISel::selectAddrRegImm16(SDValue Addr, SDValue &Base,
return false;
}
-bool MipsSEDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
+bool MipsSEDAGToDAGISel::selectIntAddr11MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ return selectAddrRegImm11(Addr, Base, Offset) ||
+ selectAddrDefault(Addr, Base, Offset);
+}
+
+bool MipsSEDAGToDAGISel::selectIntAddr12MM(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
return selectAddrRegImm12(Addr, Base, Offset) ||
selectAddrDefault(Addr, Base, Offset);
}
+bool MipsSEDAGToDAGISel::selectIntAddr16MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ return selectAddrRegImm16(Addr, Base, Offset) ||
+ selectAddrDefault(Addr, Base, Offset);
+}
+
bool MipsSEDAGToDAGISel::selectIntAddrLSL2MM(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
if (selectAddrFrameIndexOffset(Addr, Base, Offset, 7)) {
@@ -702,7 +718,7 @@ bool MipsSEDAGToDAGISel::selectVSplatUimmInvPow2(SDValue N,
return false;
}
-std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
+bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
SDLoc DL(Node);
@@ -710,16 +726,14 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
// Instruction Selection not handled by the auto-generated
// tablegen selection should be handled here.
///
- SDNode *Result;
-
switch(Opcode) {
default: break;
case ISD::SUBE: {
SDValue InFlag = Node->getOperand(2);
unsigned Opc = Subtarget->isGP64bit() ? Mips::DSUBu : Mips::SUBu;
- Result = selectAddESubE(Opc, InFlag, InFlag.getOperand(0), DL, Node);
- return std::make_pair(true, Result);
+ selectAddESubE(Opc, InFlag, InFlag.getOperand(0), DL, Node);
+ return true;
}
case ISD::ADDE: {
@@ -727,8 +741,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
break;
SDValue InFlag = Node->getOperand(2);
unsigned Opc = Subtarget->isGP64bit() ? Mips::DADDu : Mips::ADDu;
- Result = selectAddESubE(Opc, InFlag, InFlag.getValue(0), DL, Node);
- return std::make_pair(true, Result);
+ selectAddESubE(Opc, InFlag, InFlag.getValue(0), DL, Node);
+ return true;
}
case ISD::ConstantFP: {
@@ -737,20 +751,20 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
if (Subtarget->isGP64bit()) {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO_64, MVT::i64);
- Result = CurDAG->getMachineNode(Mips::DMTC1, DL, MVT::f64, Zero);
+ ReplaceNode(Node,
+ CurDAG->getMachineNode(Mips::DMTC1, DL, MVT::f64, Zero));
} else if (Subtarget->isFP64bit()) {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO, MVT::i32);
- Result = CurDAG->getMachineNode(Mips::BuildPairF64_64, DL, MVT::f64,
- Zero, Zero);
+ ReplaceNode(Node, CurDAG->getMachineNode(Mips::BuildPairF64_64, DL,
+ MVT::f64, Zero, Zero));
} else {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO, MVT::i32);
- Result = CurDAG->getMachineNode(Mips::BuildPairF64, DL, MVT::f64, Zero,
- Zero);
+ ReplaceNode(Node, CurDAG->getMachineNode(Mips::BuildPairF64, DL,
+ MVT::f64, Zero, Zero));
}
-
- return std::make_pair(true, Result);
+ return true;
}
break;
}
@@ -793,7 +807,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
SDValue(RegOpnd, 0), ImmOpnd);
}
- return std::make_pair(true, RegOpnd);
+ ReplaceNode(Node, RegOpnd);
+ return true;
}
case ISD::INTRINSIC_W_CHAIN: {
@@ -806,7 +821,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
SDValue RegIdx = Node->getOperand(2);
SDValue Reg = CurDAG->getCopyFromReg(ChainIn, DL,
getMSACtrlReg(RegIdx), MVT::i32);
- return std::make_pair(true, Reg.getNode());
+ ReplaceNode(Node, Reg.getNode());
+ return true;
}
}
break;
@@ -820,10 +836,10 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
case Intrinsic::mips_move_v:
// Like an assignment but will always produce a move.v even if
// unnecessary.
- return std::make_pair(true,
- CurDAG->getMachineNode(Mips::MOVE_V, DL,
- Node->getValueType(0),
- Node->getOperand(1)));
+ ReplaceNode(Node, CurDAG->getMachineNode(Mips::MOVE_V, DL,
+ Node->getValueType(0),
+ Node->getOperand(1)));
+ return true;
}
break;
}
@@ -839,7 +855,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
SDValue Value = Node->getOperand(3);
SDValue ChainOut = CurDAG->getCopyToReg(ChainIn, DL,
getMSACtrlReg(RegIdx), Value);
- return std::make_pair(true, ChainOut.getNode());
+ ReplaceNode(Node, ChainOut.getNode());
+ return true;
}
}
break;
@@ -864,8 +881,8 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, DestReg,
SDValue(Rdhwr, 0));
SDValue ResNode = CurDAG->getCopyFromReg(Chain, DL, DestReg, PtrVT);
- ReplaceUses(SDValue(Node, 0), ResNode);
- return std::make_pair(true, ResNode.getNode());
+ ReplaceNode(Node, ResNode.getNode());
+ return true;
}
case ISD::BUILD_VECTOR: {
@@ -890,16 +907,16 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
EVT ViaVecTy;
if (!Subtarget->hasMSA() || !BVN->getValueType(0).is128BitVector())
- return std::make_pair(false, nullptr);
+ return false;
if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
HasAnyUndefs, 8,
!Subtarget->isLittle()))
- return std::make_pair(false, nullptr);
+ return false;
switch (SplatBitSize) {
default:
- return std::make_pair(false, nullptr);
+ return false;
case 8:
LdiOp = Mips::LDI_B;
ViaVecTy = MVT::v16i8;
@@ -919,7 +936,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
}
if (!SplatValue.isSignedIntN(10))
- return std::make_pair(false, nullptr);
+ return false;
SDValue Imm = CurDAG->getTargetConstant(SplatValue, DL,
ViaVecTy.getVectorElementType());
@@ -940,12 +957,13 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
MVT::i32));
}
- return std::make_pair(true, Res);
+ ReplaceNode(Node, Res);
+ return true;
}
}
- return std::make_pair(false, nullptr);
+ return false;
}
bool MipsSEDAGToDAGISel::
@@ -1015,6 +1033,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
return true;
}
-FunctionPass *llvm::createMipsSEISelDag(MipsTargetMachine &TM) {
- return new MipsSEDAGToDAGISel(TM);
+FunctionPass *llvm::createMipsSEISelDag(MipsTargetMachine &TM,
+ CodeGenOpt::Level OptLevel) {
+ return new MipsSEDAGToDAGISel(TM, OptLevel);
}
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.h b/lib/Target/Mips/MipsSEISelDAGToDAG.h
index a894034020e9..0f08b72a334e 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.h
@@ -21,7 +21,8 @@ namespace llvm {
class MipsSEDAGToDAGISel : public MipsDAGToDAGISel {
public:
- explicit MipsSEDAGToDAGISel(MipsTargetMachine &TM) : MipsDAGToDAGISel(TM) {}
+ explicit MipsSEDAGToDAGISel(MipsTargetMachine &TM, CodeGenOpt::Level OL)
+ : MipsDAGToDAGISel(TM, OL) {}
private:
@@ -34,11 +35,12 @@ private:
bool replaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&);
- std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc dl,
- EVT Ty, bool HasLo, bool HasHi);
+ std::pair<SDNode *, SDNode *> selectMULT(SDNode *N, unsigned Opc,
+ const SDLoc &dl, EVT Ty, bool HasLo,
+ bool HasHi);
- SDNode *selectAddESubE(unsigned MOp, SDValue InFlag, SDValue CmpLHS,
- SDLoc DL, SDNode *Node) const;
+ void selectAddESubE(unsigned MOp, SDValue InFlag, SDValue CmpLHS,
+ const SDLoc &DL, SDNode *Node) const;
bool selectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset) const;
bool selectAddrFrameIndexOffset(SDValue Addr, SDValue &Base, SDValue &Offset,
@@ -47,9 +49,6 @@ private:
bool selectAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) const override;
- bool selectAddrRegReg(SDValue Addr, SDValue &Base,
- SDValue &Offset) const override;
-
bool selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const override;
@@ -62,14 +61,23 @@ private:
bool selectAddrRegImm10(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ bool selectAddrRegImm11(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
bool selectAddrRegImm12(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
bool selectAddrRegImm16(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
- bool selectIntAddrMM(SDValue Addr, SDValue &Base,
- SDValue &Offset) const override;
+ bool selectIntAddr11MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
+
+ bool selectIntAddr12MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
+
+ bool selectIntAddr16MM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const override;
bool selectIntAddrLSL2MM(SDValue Addr, SDValue &Base,
SDValue &Offset) const override;
@@ -111,7 +119,7 @@ private:
/// starting at bit zero.
bool selectVSplatMaskR(SDValue N, SDValue &Imm) const override;
- std::pair<bool, SDNode*> selectNode(SDNode *Node) override;
+ bool trySelect(SDNode *Node) override;
void processFunctionAfterISel(MachineFunction &MF) override;
@@ -124,8 +132,8 @@ private:
std::vector<SDValue> &OutOps) override;
};
-FunctionPass *createMipsSEISelDag(MipsTargetMachine &TM);
-
+FunctionPass *createMipsSEISelDag(MipsTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
}
#endif
diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp
index efe22fba98ce..80c000d5746d 100644
--- a/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -793,7 +793,7 @@ static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
+static SDValue genConstMult(SDValue X, uint64_t C, const SDLoc &DL, EVT VT,
EVT ShiftTy, SelectionDAG &DAG) {
// Clear the upper (64 - VT.sizeInBits) bits.
C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits());
@@ -1111,9 +1111,9 @@ MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
}
MachineBasicBlock *
-MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case Mips::BPOSGE32_PSEUDO:
@@ -1218,17 +1218,14 @@ SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
EVT PtrVT = Ptr.getValueType();
// i32 load from lower address.
- SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr,
- MachinePointerInfo(), Nd.isVolatile(),
- Nd.isNonTemporal(), Nd.isInvariant(),
- Nd.getAlignment());
+ SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr, MachinePointerInfo(),
+ Nd.getAlignment(), Nd.getMemOperand()->getFlags());
// i32 load from higher address.
Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT));
- SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
- MachinePointerInfo(), Nd.isVolatile(),
- Nd.isNonTemporal(), Nd.isInvariant(),
- std::min(Nd.getAlignment(), 4U));
+ SDValue Hi = DAG.getLoad(
+ MVT::i32, DL, Lo.getValue(1), Ptr, MachinePointerInfo(),
+ std::min(Nd.getAlignment(), 4U), Nd.getMemOperand()->getFlags());
if (!Subtarget.isLittle())
std::swap(Lo, Hi);
@@ -1257,15 +1254,15 @@ SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
std::swap(Lo, Hi);
// i32 store to lower address.
- Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(),
- Nd.isVolatile(), Nd.isNonTemporal(), Nd.getAlignment(),
- Nd.getAAInfo());
+ Chain =
+ DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(), Nd.getAlignment(),
+ Nd.getMemOperand()->getFlags(), Nd.getAAInfo());
// i32 store to higher address.
Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT));
return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
- Nd.isVolatile(), Nd.isNonTemporal(),
- std::min(Nd.getAlignment(), 4U), Nd.getAAInfo());
+ std::min(Nd.getAlignment(), 4U),
+ Nd.getMemOperand()->getFlags(), Nd.getAAInfo());
}
SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
@@ -1292,8 +1289,7 @@ SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
return DAG.getMergeValues(Vals, DL);
}
-
-static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
+static SDValue initAccumulator(SDValue In, const SDLoc &DL, SelectionDAG &DAG) {
SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
DAG.getConstant(0, DL, MVT::i32));
SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
@@ -1301,7 +1297,7 @@ static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
return DAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, InLo, InHi);
}
-static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) {
+static SDValue extractLOHI(SDValue Op, const SDLoc &DL, SelectionDAG &DAG) {
SDValue Lo = DAG.getNode(MipsISD::MFLO, DL, MVT::i32, Op);
SDValue Hi = DAG.getNode(MipsISD::MFHI, DL, MVT::i32, Op);
return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
@@ -1401,8 +1397,8 @@ static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) {
SDValue Ops[16] = { LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB,
LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB };
- SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy,
- makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
+ SDValue Result = DAG.getBuildVector(
+ ViaVecTy, DL, makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
if (ViaVecTy != ResVecTy)
Result = DAG.getNode(ISD::BITCAST, DL, ResVecTy, Result);
@@ -1442,8 +1438,8 @@ static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
SplatValueA, SplatValueB, SplatValueA, SplatValueB,
SplatValueA, SplatValueB, SplatValueA, SplatValueB };
- SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy,
- makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
+ SDValue Result = DAG.getBuildVector(
+ ViaVecTy, DL, makeArrayRef(Ops, ViaVecTy.getVectorNumElements()));
if (VecTy != ViaVecTy)
Result = DAG.getNode(ISD::BITCAST, DL, VecTy, Result);
@@ -1471,10 +1467,10 @@ static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
if (BigEndian)
std::swap(BitImmLoOp, BitImmHiOp);
- Exp2Imm =
- DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, BitImmLoOp,
- BitImmHiOp, BitImmLoOp, BitImmHiOp));
+ Exp2Imm = DAG.getNode(
+ ISD::BITCAST, DL, MVT::v2i64,
+ DAG.getBuildVector(MVT::v4i32, DL,
+ {BitImmLoOp, BitImmHiOp, BitImmLoOp, BitImmHiOp}));
}
}
@@ -1860,7 +1856,7 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
// If ResTy is v2i64 then the type legalizer will break this node down into
// an equivalent v4i32.
- return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, Ops);
+ return DAG.getBuildVector(ResTy, DL, Ops);
}
case Intrinsic::mips_fexp2_w:
case Intrinsic::mips_fexp2_d: {
@@ -2166,6 +2162,10 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::mips_xori_b:
return DAG.getNode(ISD::XOR, DL, Op->getValueType(0),
Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::thread_pointer: {
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ return DAG.getNode(MipsISD::ThreadPointer, DL, PtrVT);
+ }
}
}
@@ -2178,9 +2178,8 @@ static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
EVT PtrTy = Address->getValueType(0);
Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
-
- return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), false,
- false, false, 16);
+ return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(),
+ /* Alignment = */ 16);
}
SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
@@ -2247,8 +2246,8 @@ static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
- return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), false,
- false, 16);
+ return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(),
+ /* Alignment = */ 16);
}
SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op,
@@ -2311,7 +2310,7 @@ lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
}
static bool isConstantOrUndef(const SDValue Op) {
- if (Op->getOpcode() == ISD::UNDEF)
+ if (Op->isUndef())
return true;
if (isa<ConstantSDNode>(Op))
return true;
@@ -2841,7 +2840,7 @@ static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
++I)
Ops.push_back(DAG.getTargetConstant(*I, DL, MaskEltTy));
- SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, Ops);
+ SDValue MaskVec = DAG.getBuildVector(MaskVecTy, DL, Ops);
if (Using1stVec && Using2ndVec) {
Op0 = Op->getOperand(0);
@@ -2883,32 +2882,27 @@ SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
// MipsISD::VSHF.
if (isVECTOR_SHUFFLE_SPLATI(Op, ResTy, Indices, DAG))
return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
- SDValue Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
- if (Result.getNode())
+ SDValue Result;
+ if ((Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG)))
return Result;
- Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
- if (Result.getNode())
+ if ((Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG)))
return Result;
- Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG);
- if (Result.getNode())
+ if ((Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG)))
return Result;
- Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG);
- if (Result.getNode())
+ if ((Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG)))
return Result;
- Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG);
- if (Result.getNode())
+ if ((Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG)))
return Result;
- Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
- if (Result.getNode())
+ if ((Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG)))
return Result;
- Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
- if (Result.getNode())
+ if ((Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG)))
return Result;
return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
}
-MachineBasicBlock * MipsSETargetLowering::
-emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
+MachineBasicBlock *
+MipsSETargetLowering::emitBPOSGE32(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
// $bb:
// bposge32_pseudo $vr0
// =>
@@ -2925,7 +2919,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = std::next(MachineFunction::iterator(BB));
MachineFunction *F = BB->getParent();
@@ -2949,6 +2943,8 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
// Insert the real bposge32 instruction to $BB.
BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB);
+ // Insert the real bposge32c instruction to $BB.
+ BuildMI(BB, DL, TII->get(Mips::BPOSGE32C_MMR3)).addMBB(TBB);
// Fill $FBB.
unsigned VR2 = RegInfo.createVirtualRegister(RC);
@@ -2963,16 +2959,18 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
// Insert phi function to $Sink.
BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
- MI->getOperand(0).getReg())
- .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB);
+ MI.getOperand(0).getReg())
+ .addReg(VR2)
+ .addMBB(FBB)
+ .addReg(VR1)
+ .addMBB(TBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return Sink;
}
-MachineBasicBlock * MipsSETargetLowering::
-emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned BranchOp) const{
+MachineBasicBlock *MipsSETargetLowering::emitMSACBranchPseudo(
+ MachineInstr &MI, MachineBasicBlock *BB, unsigned BranchOp) const {
// $bb:
// vany_nonzero $rd, $ws
// =>
@@ -2990,7 +2988,7 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = std::next(MachineFunction::iterator(BB));
MachineFunction *F = BB->getParent();
@@ -3014,8 +3012,8 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
// Insert the real bnz.b instruction to $BB.
BuildMI(BB, DL, TII->get(BranchOp))
- .addReg(MI->getOperand(1).getReg())
- .addMBB(TBB);
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(TBB);
// Fill $FBB.
unsigned RD1 = RegInfo.createVirtualRegister(RC);
@@ -3030,10 +3028,13 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
// Insert phi function to $Sink.
BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
- MI->getOperand(0).getReg())
- .addReg(RD1).addMBB(FBB).addReg(RD2).addMBB(TBB);
+ MI.getOperand(0).getReg())
+ .addReg(RD1)
+ .addMBB(FBB)
+ .addReg(RD2)
+ .addMBB(TBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return Sink;
}
@@ -3047,14 +3048,15 @@ emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
// When n is zero, the equivalent operation can be performed with (potentially)
// zero instructions due to register overlaps. This optimization is never valid
// for lane 1 because it would require FR=0 mode which isn't supported by MSA.
-MachineBasicBlock * MipsSETargetLowering::
-emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
+MachineBasicBlock *
+MipsSETargetLowering::emitCOPY_FW(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
- unsigned Fd = MI->getOperand(0).getReg();
- unsigned Ws = MI->getOperand(1).getReg();
- unsigned Lane = MI->getOperand(2).getImm();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Fd = MI.getOperand(0).getReg();
+ unsigned Ws = MI.getOperand(1).getReg();
+ unsigned Lane = MI.getOperand(2).getImm();
if (Lane == 0) {
unsigned Wt = Ws;
@@ -3076,7 +3078,7 @@ emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
}
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3090,16 +3092,17 @@ emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
// When n is zero, the equivalent operation can be performed with (potentially)
// zero instructions due to register overlaps. This optimization is always
// valid because FR=1 mode which is the only supported mode in MSA.
-MachineBasicBlock * MipsSETargetLowering::
-emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
+MachineBasicBlock *
+MipsSETargetLowering::emitCOPY_FD(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
assert(Subtarget.isFP64bit());
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- unsigned Fd = MI->getOperand(0).getReg();
- unsigned Ws = MI->getOperand(1).getReg();
- unsigned Lane = MI->getOperand(2).getImm() * 2;
- DebugLoc DL = MI->getDebugLoc();
+ unsigned Fd = MI.getOperand(0).getReg();
+ unsigned Ws = MI.getOperand(1).getReg();
+ unsigned Lane = MI.getOperand(2).getImm() * 2;
+ DebugLoc DL = MI.getDebugLoc();
if (Lane == 0)
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64);
@@ -3110,7 +3113,7 @@ emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64);
}
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3121,15 +3124,15 @@ emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
// subreg_to_reg $wt:sub_lo, $fs
// insve_w $wd[$n], $wd_in, $wt[0]
MachineBasicBlock *
-MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
+MipsSETargetLowering::emitINSERT_FW(MachineInstr &MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
- unsigned Wd = MI->getOperand(0).getReg();
- unsigned Wd_in = MI->getOperand(1).getReg();
- unsigned Lane = MI->getOperand(2).getImm();
- unsigned Fs = MI->getOperand(3).getReg();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Wd = MI.getOperand(0).getReg();
+ unsigned Wd_in = MI.getOperand(1).getReg();
+ unsigned Lane = MI.getOperand(2).getImm();
+ unsigned Fs = MI.getOperand(3).getReg();
unsigned Wt = RegInfo.createVirtualRegister(
Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass :
&Mips::MSA128WEvensRegClass);
@@ -3144,7 +3147,7 @@ MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
.addReg(Wt)
.addImm(0);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3155,17 +3158,17 @@ MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
// subreg_to_reg $wt:sub_64, $fs
// insve_d $wd[$n], $wd_in, $wt[0]
MachineBasicBlock *
-MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
+MipsSETargetLowering::emitINSERT_FD(MachineInstr &MI,
MachineBasicBlock *BB) const {
assert(Subtarget.isFP64bit());
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
- unsigned Wd = MI->getOperand(0).getReg();
- unsigned Wd_in = MI->getOperand(1).getReg();
- unsigned Lane = MI->getOperand(2).getImm();
- unsigned Fs = MI->getOperand(3).getReg();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Wd = MI.getOperand(0).getReg();
+ unsigned Wd_in = MI.getOperand(1).getReg();
+ unsigned Lane = MI.getOperand(2).getImm();
+ unsigned Fs = MI.getOperand(3).getReg();
unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
@@ -3178,7 +3181,7 @@ MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
.addReg(Wt)
.addImm(0);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3202,22 +3205,23 @@ MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
// (INSVE_[WD], $wdtmp2, 0, $wdtmp1, 0)
// (NEG $lanetmp2, $lanetmp1)
// (SLD_B $wd, $wdtmp2, $wdtmp2, $lanetmp2)
-MachineBasicBlock *
-MipsSETargetLowering::emitINSERT_DF_VIDX(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned EltSizeInBytes,
- bool IsFP) const {
+MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX(
+ MachineInstr &MI, MachineBasicBlock *BB, unsigned EltSizeInBytes,
+ bool IsFP) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
- unsigned Wd = MI->getOperand(0).getReg();
- unsigned SrcVecReg = MI->getOperand(1).getReg();
- unsigned LaneReg = MI->getOperand(2).getReg();
- unsigned SrcValReg = MI->getOperand(3).getReg();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Wd = MI.getOperand(0).getReg();
+ unsigned SrcVecReg = MI.getOperand(1).getReg();
+ unsigned LaneReg = MI.getOperand(2).getReg();
+ unsigned SrcValReg = MI.getOperand(3).getReg();
const TargetRegisterClass *VecRC = nullptr;
+ // FIXME: This should be true for N32 too.
const TargetRegisterClass *GPRRC =
Subtarget.isABI_N64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+ unsigned SubRegIdx = Subtarget.isABI_N64() ? Mips::sub_32 : 0;
+ unsigned ShiftOp = Subtarget.isABI_N64() ? Mips::DSLL : Mips::SLL;
unsigned EltLog2Size;
unsigned InsertOp = 0;
unsigned InsveOp = 0;
@@ -3262,7 +3266,7 @@ MipsSETargetLowering::emitINSERT_DF_VIDX(MachineInstr *MI,
// Convert the lane index into a byte index
if (EltSizeInBytes != 1) {
unsigned LaneTmp1 = RegInfo.createVirtualRegister(GPRRC);
- BuildMI(*BB, MI, DL, TII->get(Mips::SLL), LaneTmp1)
+ BuildMI(*BB, MI, DL, TII->get(ShiftOp), LaneTmp1)
.addReg(LaneReg)
.addImm(EltLog2Size);
LaneReg = LaneTmp1;
@@ -3273,7 +3277,7 @@ MipsSETargetLowering::emitINSERT_DF_VIDX(MachineInstr *MI,
BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), WdTmp1)
.addReg(SrcVecReg)
.addReg(SrcVecReg)
- .addReg(LaneReg);
+ .addReg(LaneReg, 0, SubRegIdx);
unsigned WdTmp2 = RegInfo.createVirtualRegister(VecRC);
if (IsFP) {
@@ -3302,9 +3306,9 @@ MipsSETargetLowering::emitINSERT_DF_VIDX(MachineInstr *MI,
BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), Wd)
.addReg(WdTmp2)
.addReg(WdTmp2)
- .addReg(LaneTmp2);
+ .addReg(LaneTmp2, 0, SubRegIdx);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3316,13 +3320,13 @@ MipsSETargetLowering::emitINSERT_DF_VIDX(MachineInstr *MI,
// insert_subreg $wt2:subreg_lo, $wt1, $fs
// splati.w $wd, $wt2[0]
MachineBasicBlock *
-MipsSETargetLowering::emitFILL_FW(MachineInstr *MI,
+MipsSETargetLowering::emitFILL_FW(MachineInstr &MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
- unsigned Wd = MI->getOperand(0).getReg();
- unsigned Fs = MI->getOperand(1).getReg();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Wd = MI.getOperand(0).getReg();
+ unsigned Fs = MI.getOperand(1).getReg();
unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
@@ -3333,7 +3337,7 @@ MipsSETargetLowering::emitFILL_FW(MachineInstr *MI,
.addImm(Mips::sub_lo);
BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wd).addReg(Wt2).addImm(0);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3345,15 +3349,15 @@ MipsSETargetLowering::emitFILL_FW(MachineInstr *MI,
// insert_subreg $wt2:subreg_64, $wt1, $fs
// splati.d $wd, $wt2[0]
MachineBasicBlock *
-MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
+MipsSETargetLowering::emitFILL_FD(MachineInstr &MI,
MachineBasicBlock *BB) const {
assert(Subtarget.isFP64bit());
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
- unsigned Wd = MI->getOperand(0).getReg();
- unsigned Fs = MI->getOperand(1).getReg();
+ DebugLoc DL = MI.getDebugLoc();
+ unsigned Wd = MI.getOperand(0).getReg();
+ unsigned Fs = MI.getOperand(1).getReg();
unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
@@ -3364,7 +3368,7 @@ MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
.addImm(Mips::sub_64);
BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wd).addReg(Wt2).addImm(0);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3375,25 +3379,25 @@ MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
// ldi.w $ws, 1
// fexp2.w $wd, $ws, $wt
MachineBasicBlock *
-MipsSETargetLowering::emitFEXP2_W_1(MachineInstr *MI,
+MipsSETargetLowering::emitFEXP2_W_1(MachineInstr &MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetRegisterClass *RC = &Mips::MSA128WRegClass;
unsigned Ws1 = RegInfo.createVirtualRegister(RC);
unsigned Ws2 = RegInfo.createVirtualRegister(RC);
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
// Splat 1.0 into a vector
BuildMI(*BB, MI, DL, TII->get(Mips::LDI_W), Ws1).addImm(1);
BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_W), Ws2).addReg(Ws1);
// Emit 1.0 * fexp2(Wt)
- BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_W), MI->getOperand(0).getReg())
+ BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_W), MI.getOperand(0).getReg())
.addReg(Ws2)
- .addReg(MI->getOperand(1).getReg());
+ .addReg(MI.getOperand(1).getReg());
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3404,24 +3408,24 @@ MipsSETargetLowering::emitFEXP2_W_1(MachineInstr *MI,
// ldi.d $ws, 1
// fexp2.d $wd, $ws, $wt
MachineBasicBlock *
-MipsSETargetLowering::emitFEXP2_D_1(MachineInstr *MI,
+MipsSETargetLowering::emitFEXP2_D_1(MachineInstr &MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetRegisterClass *RC = &Mips::MSA128DRegClass;
unsigned Ws1 = RegInfo.createVirtualRegister(RC);
unsigned Ws2 = RegInfo.createVirtualRegister(RC);
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
// Splat 1.0 into a vector
BuildMI(*BB, MI, DL, TII->get(Mips::LDI_D), Ws1).addImm(1);
BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_D), Ws2).addReg(Ws1);
// Emit 1.0 * fexp2(Wt)
- BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_D), MI->getOperand(0).getReg())
+ BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_D), MI.getOperand(0).getReg())
.addReg(Ws2)
- .addReg(MI->getOperand(1).getReg());
+ .addReg(MI.getOperand(1).getReg());
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/lib/Target/Mips/MipsSEISelLowering.h b/lib/Target/Mips/MipsSEISelLowering.h
index d44f8d82ec3e..54154662f261 100644
--- a/lib/Target/Mips/MipsSEISelLowering.h
+++ b/lib/Target/Mips/MipsSEISelLowering.h
@@ -40,7 +40,7 @@ namespace llvm {
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
+ EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
@@ -77,39 +77,39 @@ namespace llvm {
/// depending on the indices in the shuffle.
SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
- MachineBasicBlock *emitBPOSGE32(MachineInstr *MI,
+ MachineBasicBlock *emitBPOSGE32(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *emitMSACBranchPseudo(MachineInstr *MI,
+ MachineBasicBlock *emitMSACBranchPseudo(MachineInstr &MI,
MachineBasicBlock *BB,
unsigned BranchOp) const;
/// \brief Emit the COPY_FW pseudo instruction
- MachineBasicBlock *emitCOPY_FW(MachineInstr *MI,
+ MachineBasicBlock *emitCOPY_FW(MachineInstr &MI,
MachineBasicBlock *BB) const;
/// \brief Emit the COPY_FD pseudo instruction
- MachineBasicBlock *emitCOPY_FD(MachineInstr *MI,
+ MachineBasicBlock *emitCOPY_FD(MachineInstr &MI,
MachineBasicBlock *BB) const;
/// \brief Emit the INSERT_FW pseudo instruction
- MachineBasicBlock *emitINSERT_FW(MachineInstr *MI,
+ MachineBasicBlock *emitINSERT_FW(MachineInstr &MI,
MachineBasicBlock *BB) const;
/// \brief Emit the INSERT_FD pseudo instruction
- MachineBasicBlock *emitINSERT_FD(MachineInstr *MI,
+ MachineBasicBlock *emitINSERT_FD(MachineInstr &MI,
MachineBasicBlock *BB) const;
/// \brief Emit the INSERT_([BHWD]|F[WD])_VIDX pseudo instruction
- MachineBasicBlock *emitINSERT_DF_VIDX(MachineInstr *MI,
+ MachineBasicBlock *emitINSERT_DF_VIDX(MachineInstr &MI,
MachineBasicBlock *BB,
unsigned EltSizeInBytes,
bool IsFP) const;
/// \brief Emit the FILL_FW pseudo instruction
- MachineBasicBlock *emitFILL_FW(MachineInstr *MI,
+ MachineBasicBlock *emitFILL_FW(MachineInstr &MI,
MachineBasicBlock *BB) const;
/// \brief Emit the FILL_FD pseudo instruction
- MachineBasicBlock *emitFILL_FD(MachineInstr *MI,
+ MachineBasicBlock *emitFILL_FD(MachineInstr &MI,
MachineBasicBlock *BB) const;
/// \brief Emit the FEXP2_W_1 pseudo instructions.
- MachineBasicBlock *emitFEXP2_W_1(MachineInstr *MI,
+ MachineBasicBlock *emitFEXP2_W_1(MachineInstr &MI,
MachineBasicBlock *BB) const;
/// \brief Emit the FEXP2_D_1 pseudo instructions.
- MachineBasicBlock *emitFEXP2_D_1(MachineInstr *MI,
+ MachineBasicBlock *emitFEXP2_D_1(MachineInstr &MI,
MachineBasicBlock *BB) const;
};
}
diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp
index d4aeaf928655..29107b2c1aa5 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -13,20 +13,20 @@
#include "MipsSEInstrInfo.h"
#include "InstPrinter/MipsInstPrinter.h"
+#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI)
- : MipsInstrInfo(STI, STI.getRelocationModel() == Reloc::PIC_ ? Mips::B
- : Mips::J),
+ : MipsInstrInfo(STI, STI.isPositionIndependent() ? Mips::B : Mips::J),
RI() {}
const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
@@ -38,17 +38,17 @@ const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
-unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- unsigned Opc = MI->getOpcode();
+ unsigned Opc = MI.getOpcode();
if ((Opc == Mips::LW) || (Opc == Mips::LD) ||
(Opc == Mips::LWC1) || (Opc == Mips::LDC1) || (Opc == Mips::LDC164)) {
- if ((MI->getOperand(1).isFI()) && // is a stack slot
- (MI->getOperand(2).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(2)))) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if ((MI.getOperand(1).isFI()) && // is a stack slot
+ (MI.getOperand(2).isImm()) && // the imm is zero
+ (isZeroImm(MI.getOperand(2)))) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
}
@@ -60,26 +60,26 @@ unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
-unsigned MipsSEInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned MipsSEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- unsigned Opc = MI->getOpcode();
+ unsigned Opc = MI.getOpcode();
if ((Opc == Mips::SW) || (Opc == Mips::SD) ||
(Opc == Mips::SWC1) || (Opc == Mips::SDC1) || (Opc == Mips::SDC164)) {
- if ((MI->getOperand(1).isFI()) && // is a stack slot
- (MI->getOperand(2).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(2)))) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if ((MI.getOperand(1).isFI()) && // is a stack slot
+ (MI.getOperand(2).isImm()) && // the imm is zero
+ (isZeroImm(MI.getOperand(2)))) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
}
return 0;
}
void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
unsigned Opc = 0, ZeroReg = 0;
bool isMicroMips = Subtarget.inMicroMipsMode();
@@ -129,9 +129,12 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addReg(SrcReg, getKillRegState(KillSrc)).addImm(1 << 4)
.addReg(DestReg, RegState::ImplicitDefine);
return;
+ } else if (Mips::MSACtrlRegClass.contains(DestReg)) {
+ BuildMI(MBB, I, DL, get(Mips::CTCMSA))
+ .addReg(DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
}
- else if (Mips::MSACtrlRegClass.contains(DestReg))
- Opc = Mips::CTCMSA;
}
else if (Mips::FGR32RegClass.contains(DestReg, SrcReg))
Opc = Mips::FMOV_S;
@@ -325,12 +328,12 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
-bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
- MachineBasicBlock &MBB = *MI->getParent();
+bool MipsSEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ MachineBasicBlock &MBB = *MI.getParent();
bool isMicroMips = Subtarget.inMicroMipsMode();
unsigned Opc;
- switch(MI->getDesc().getOpcode()) {
+ switch (MI.getDesc().getOpcode()) {
default:
return false;
case Mips::RetRA:
@@ -420,6 +423,14 @@ unsigned MipsSEInstrInfo::getOppositeBranchOpc(unsigned Opc) const {
case Mips::BC1F: return Mips::BC1T;
case Mips::BEQZC_MM: return Mips::BNEZC_MM;
case Mips::BNEZC_MM: return Mips::BEQZC_MM;
+ case Mips::BEQZC: return Mips::BNEZC;
+ case Mips::BNEZC: return Mips::BEQZC;
+ case Mips::BEQC: return Mips::BNEC;
+ case Mips::BNEC: return Mips::BEQC;
+ case Mips::BGTZC: return Mips::BLEZC;
+ case Mips::BGEZC: return Mips::BLTZC;
+ case Mips::BLTZC: return Mips::BGEZC;
+ case Mips::BLEZC: return Mips::BGTZC;
}
}
@@ -429,26 +440,33 @@ void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
MachineBasicBlock::iterator I) const {
MipsABIInfo ABI = Subtarget.getABI();
DebugLoc DL;
- unsigned ADDu = ABI.GetPtrAdduOp();
unsigned ADDiu = ABI.GetPtrAddiuOp();
if (Amount == 0)
return;
- if (isInt<16>(Amount))// addi sp, sp, amount
+ if (isInt<16>(Amount)) {
+ // addi sp, sp, amount
BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount);
- else { // Expand immediate that doesn't fit in 16-bit.
+ } else {
+ // For numbers which are not 16bit integers we synthesize Amount inline
+ // then add or subtract it from sp.
+ unsigned Opc = ABI.GetPtrAdduOp();
+ if (Amount < 0) {
+ Opc = ABI.GetPtrSubuOp();
+ Amount = -Amount;
+ }
unsigned Reg = loadImmediate(Amount, MBB, I, DL, nullptr);
- BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(Reg, RegState::Kill);
+ BuildMI(MBB, I, DL, get(Opc), SP).addReg(SP).addReg(Reg, RegState::Kill);
}
}
/// This function generates the sequence of instructions needed to get the
/// result of adding register REG and immediate IMM.
-unsigned
-MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator II, DebugLoc DL,
- unsigned *NewImm) const {
+unsigned MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II,
+ const DebugLoc &DL,
+ unsigned *NewImm) const {
MipsAnalyzeImmediate AnalyzeImm;
const MipsSubtarget &STI = Subtarget;
MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
@@ -493,8 +511,12 @@ unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 ||
Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 ||
Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B ||
- Opc == Mips::J || Opc == Mips::BEQZC_MM || Opc == Mips::BNEZC_MM) ?
- Opc : 0;
+ Opc == Mips::J || Opc == Mips::BEQZC_MM || Opc == Mips::BNEZC_MM ||
+ Opc == Mips::BEQC || Opc == Mips::BNEC || Opc == Mips::BLTC ||
+ Opc == Mips::BGEC || Opc == Mips::BLTUC || Opc == Mips::BGEUC ||
+ Opc == Mips::BGTZC || Opc == Mips::BLEZC || Opc == Mips::BGEZC ||
+ Opc == Mips::BLTZC || Opc == Mips::BEQZC || Opc == Mips::BNEZC ||
+ Opc == Mips::BC) ? Opc : 0;
}
void MipsSEInstrInfo::expandRetRA(MachineBasicBlock &MBB,
@@ -697,7 +719,7 @@ void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB,
// addu $sp, $sp, $v1
// jr $ra (via RetRA)
const TargetMachine &TM = MBB.getParent()->getTarget();
- if (TM.getRelocationModel() == Reloc::PIC_)
+ if (TM.isPositionIndependent())
BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), T9)
.addReg(TargetReg)
.addReg(ZERO);
diff --git a/lib/Target/Mips/MipsSEInstrInfo.h b/lib/Target/Mips/MipsSEInstrInfo.h
index 5d73545ef6b9..b356909bf1cf 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/lib/Target/Mips/MipsSEInstrInfo.h
@@ -32,7 +32,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
@@ -40,12 +40,11 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStack(MachineBasicBlock &MBB,
@@ -62,7 +61,7 @@ public:
const TargetRegisterInfo *TRI,
int64_t Offset) const override;
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
unsigned getOppositeBranchOpc(unsigned Opc) const override;
@@ -74,7 +73,7 @@ public:
/// non-NULL parameter, the last instruction is not emitted, but instead
/// its immediate operand is returned in NewImm.
unsigned loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator II, DebugLoc DL,
+ MachineBasicBlock::iterator II, const DebugLoc &DL,
unsigned *NewImm) const;
private:
diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp
index b1e2885f5ba3..e3431cd118ab 100644
--- a/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -14,12 +14,10 @@
#include "MipsSERegisterInfo.h"
#include "Mips.h"
-#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsSEInstrInfo.h"
#include "MipsSubtarget.h"
#include "MipsTargetMachine.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -29,7 +27,6 @@
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -63,10 +60,11 @@ MipsSERegisterInfo::intRegClass(unsigned Size) const {
return &Mips::GPR64RegClass;
}
-/// Get the size of the offset supported by the given load/store.
+/// Get the size of the offset supported by the given load/store/inline asm.
/// The result includes the effects of any scale factors applied to the
/// instruction immediate.
-static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode) {
+static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode,
+ MachineOperand MO) {
switch (Opcode) {
case Mips::LD_B:
case Mips::ST_B:
@@ -80,6 +78,49 @@ static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode) {
case Mips::LD_D:
case Mips::ST_D:
return 10 + 3 /* scale factor */;
+ case Mips::LL:
+ case Mips::LL64:
+ case Mips::LLD:
+ case Mips::LLE:
+ case Mips::SC:
+ case Mips::SC64:
+ case Mips::SCD:
+ case Mips::SCE:
+ return 16;
+ case Mips::LLE_MM:
+ case Mips::LLE_MMR6:
+ case Mips::LL_MM:
+ case Mips::SCE_MM:
+ case Mips::SCE_MMR6:
+ case Mips::SC_MM:
+ return 12;
+ case Mips::LL64_R6:
+ case Mips::LL_R6:
+ case Mips::LLD_R6:
+ case Mips::SC64_R6:
+ case Mips::SCD_R6:
+ case Mips::SC_R6:
+ return 9;
+ case Mips::INLINEASM: {
+ unsigned ConstraintID = InlineAsm::getMemoryConstraintID(MO.getImm());
+ switch (ConstraintID) {
+ case InlineAsm::Constraint_ZC: {
+ const MipsSubtarget &Subtarget = MO.getParent()
+ ->getParent()
+ ->getParent()
+ ->getSubtarget<MipsSubtarget>();
+ if (Subtarget.inMicroMipsMode())
+ return 12;
+
+ if (Subtarget.hasMips32r6())
+ return 9;
+
+ return 16;
+ }
+ default:
+ return 16;
+ }
+ }
default:
return 16;
}
@@ -169,7 +210,8 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
// Make sure Offset fits within the field available.
// For MSA instructions, this is a 10-bit signed immediate (scaled by
// element size), otherwise it is a 16-bit signed immediate.
- unsigned OffsetBitSize = getLoadStoreOffsetSizeInBits(MI.getOpcode());
+ unsigned OffsetBitSize =
+ getLoadStoreOffsetSizeInBits(MI.getOpcode(), MI.getOperand(OpNo - 1));
unsigned OffsetAlign = getLoadStoreOffsetAlign(MI.getOpcode());
if (OffsetBitSize < 16 && isInt<16>(Offset) &&
diff --git a/lib/Target/Mips/MipsSchedule.td b/lib/Target/Mips/MipsSchedule.td
index 37f9e491d546..738b6c46407a 100644
--- a/lib/Target/Mips/MipsSchedule.td
+++ b/lib/Target/Mips/MipsSchedule.td
@@ -23,24 +23,34 @@ def IIPseudo : InstrItinClass;
def II_ABS : InstrItinClass;
def II_ADDI : InstrItinClass;
def II_ADDIU : InstrItinClass;
+def II_ADDIUPC : InstrItinClass;
+def II_ADD : InstrItinClass;
def II_ADDU : InstrItinClass;
def II_ADD_D : InstrItinClass;
def II_ADD_S : InstrItinClass;
+def II_ALIGN : InstrItinClass;
def II_AND : InstrItinClass;
def II_ANDI : InstrItinClass;
+def II_ALUIPC : InstrItinClass;
+def II_AUI : InstrItinClass;
+def II_AUIPC : InstrItinClass;
def II_B : InstrItinClass;
def II_BADDU : InstrItinClass;
def II_BBIT : InstrItinClass; // bbit[01], bbit[01]32
+def II_BALC : InstrItinClass;
def II_BC : InstrItinClass;
def II_BC1F : InstrItinClass;
def II_BC1FL : InstrItinClass;
def II_BC1T : InstrItinClass;
def II_BC1TL : InstrItinClass;
+def II_BC1CCZ : InstrItinClass;
def II_BCC : InstrItinClass; // beq and bne
def II_BCCZ : InstrItinClass; // b[gl][et]z
+def II_BCCC : InstrItinClass; // b<cc>c
def II_BCCZAL : InstrItinClass; // bgezal and bltzal
def II_BCCZALS : InstrItinClass; // bgezals and bltzals
def II_BCCZC : InstrItinClass; // beqzc, bnezc
+def II_BITSWAP : InstrItinClass;
def II_CEIL : InstrItinClass;
def II_CFC1 : InstrItinClass;
def II_CLO : InstrItinClass;
@@ -51,16 +61,33 @@ def II_C_CC_D : InstrItinClass; // Any c.<cc>.d instruction
def II_C_CC_S : InstrItinClass; // Any c.<cc>.s instruction
def II_DADDIU : InstrItinClass;
def II_DADDU : InstrItinClass;
+def II_DADDI : InstrItinClass;
def II_DADD : InstrItinClass;
+def II_DAHI : InstrItinClass;
+def II_DATI : InstrItinClass;
+def II_DAUI : InstrItinClass;
+def II_DALIGN : InstrItinClass;
+def II_DBITSWAP : InstrItinClass;
+def II_DCLO : InstrItinClass;
+def II_DCLZ : InstrItinClass;
def II_DDIV : InstrItinClass;
def II_DDIVU : InstrItinClass;
def II_DIV : InstrItinClass;
def II_DIVU : InstrItinClass;
def II_DIV_D : InstrItinClass;
def II_DIV_S : InstrItinClass;
+def II_DMFC0 : InstrItinClass;
+def II_DMTC0 : InstrItinClass;
def II_DMFC1 : InstrItinClass;
def II_DMTC1 : InstrItinClass;
+def II_DMOD : InstrItinClass;
+def II_DMODU : InstrItinClass;
+def II_DMUH : InstrItinClass;
+def II_DMUHU : InstrItinClass;
+def II_DMFC2 : InstrItinClass;
+def II_DMTC2 : InstrItinClass;
def II_DMUL : InstrItinClass;
+def II_DMULU : InstrItinClass;
def II_DMULT : InstrItinClass;
def II_DMULTU : InstrItinClass;
def II_DROTR : InstrItinClass;
@@ -75,6 +102,8 @@ def II_DSRAV : InstrItinClass;
def II_DSRL : InstrItinClass;
def II_DSRL32 : InstrItinClass;
def II_DSRLV : InstrItinClass;
+def II_DSBH : InstrItinClass;
+def II_DSHD : InstrItinClass;
def II_DSUBU : InstrItinClass;
def II_DSUB : InstrItinClass;
def II_EXT : InstrItinClass; // Any EXT instruction
@@ -84,44 +113,96 @@ def II_IndirectBranchPseudo : InstrItinClass; // Indirect branch pseudo.
def II_J : InstrItinClass;
def II_JAL : InstrItinClass;
def II_JALR : InstrItinClass;
+def II_JALR_HB : InstrItinClass;
def II_JALRC : InstrItinClass;
def II_JALRS : InstrItinClass;
def II_JALS : InstrItinClass;
+def II_JIC : InstrItinClass;
+def II_JIALC : InstrItinClass;
def II_JR : InstrItinClass;
+def II_JR_HB : InstrItinClass;
def II_JRADDIUSP : InstrItinClass;
def II_JRC : InstrItinClass;
def II_ReturnPseudo : InstrItinClass; // Return pseudo.
+def II_ERET : InstrItinClass;
+def II_DERET : InstrItinClass;
+def II_ERETNC : InstrItinClass;
+def II_EHB : InstrItinClass;
+def II_SDBBP : InstrItinClass;
+def II_SSNOP : InstrItinClass;
+def II_SYSCALL : InstrItinClass;
+def II_PAUSE : InstrItinClass;
+def II_WAIT : InstrItinClass;
+def II_EI : InstrItinClass;
+def II_DI : InstrItinClass;
+def II_TEQ : InstrItinClass;
+def II_TEQI : InstrItinClass;
+def II_TGE : InstrItinClass;
+def II_TGEI : InstrItinClass;
+def II_TGEIU : InstrItinClass;
+def II_TGEU : InstrItinClass;
+def II_TNE : InstrItinClass;
+def II_TNEI : InstrItinClass;
+def II_TLT : InstrItinClass;
+def II_TLTI : InstrItinClass;
+def II_TLTU : InstrItinClass;
+def II_TTLTIU : InstrItinClass;
+def II_TLBP : InstrItinClass;
+def II_TLBR : InstrItinClass;
+def II_TLBWI : InstrItinClass;
+def II_TLBWR : InstrItinClass;
+def II_TRAP : InstrItinClass;
+def II_BREAK : InstrItinClass;
+def II_SYNC : InstrItinClass;
+def II_SYNCI : InstrItinClass;
def II_LB : InstrItinClass;
def II_LBE : InstrItinClass;
def II_LBU : InstrItinClass;
def II_LBUE : InstrItinClass;
def II_LD : InstrItinClass;
def II_LDC1 : InstrItinClass;
+def II_LDC2 : InstrItinClass;
+def II_LDC3 : InstrItinClass;
def II_LDL : InstrItinClass;
def II_LDR : InstrItinClass;
+def II_LDPC : InstrItinClass;
def II_LDXC1 : InstrItinClass;
def II_LH : InstrItinClass;
def II_LHE : InstrItinClass;
def II_LHU : InstrItinClass;
def II_LHUE : InstrItinClass;
+def II_LL : InstrItinClass;
+def II_LLD : InstrItinClass;
def II_LUI : InstrItinClass;
def II_LUXC1 : InstrItinClass;
def II_LW : InstrItinClass;
def II_LWE : InstrItinClass;
def II_LWC1 : InstrItinClass;
+def II_LWC2 : InstrItinClass;
+def II_LWC3 : InstrItinClass;
def II_LWL : InstrItinClass;
def II_LWLE : InstrItinClass;
+def II_LWPC : InstrItinClass;
def II_LWR : InstrItinClass;
def II_LWRE : InstrItinClass;
def II_LWU : InstrItinClass;
+def II_LWUPC : InstrItinClass;
def II_LWXC1 : InstrItinClass;
+def II_LSA : InstrItinClass;
+def II_DLSA : InstrItinClass;
def II_MADD : InstrItinClass;
def II_MADDU : InstrItinClass;
def II_MADD_D : InstrItinClass;
def II_MADD_S : InstrItinClass;
+def II_MADDF_D : InstrItinClass;
+def II_MADDF_S : InstrItinClass;
+def II_MFC0 : InstrItinClass;
def II_MFC1 : InstrItinClass;
def II_MFHC1 : InstrItinClass;
+def II_MFC2 : InstrItinClass;
def II_MFHI_MFLO : InstrItinClass; // mfhi and mflo
+def II_MOD : InstrItinClass;
+def II_MODU : InstrItinClass;
def II_MOVF : InstrItinClass;
def II_MOVF_D : InstrItinClass;
def II_MOVF_S : InstrItinClass;
@@ -140,10 +221,17 @@ def II_MSUB : InstrItinClass;
def II_MSUBU : InstrItinClass;
def II_MSUB_D : InstrItinClass;
def II_MSUB_S : InstrItinClass;
+def II_MSUBF_D : InstrItinClass;
+def II_MSUBF_S : InstrItinClass;
+def II_MTC0 : InstrItinClass;
def II_MTC1 : InstrItinClass;
def II_MTHC1 : InstrItinClass;
+def II_MTC2 : InstrItinClass;
def II_MTHI_MTLO : InstrItinClass; // mthi and mtlo
def II_MUL : InstrItinClass;
+def II_MUH : InstrItinClass;
+def II_MUHU : InstrItinClass;
+def II_MULU : InstrItinClass;
def II_MULT : InstrItinClass;
def II_MULTU : InstrItinClass;
def II_MUL_D : InstrItinClass;
@@ -163,15 +251,20 @@ def II_ROTR : InstrItinClass;
def II_ROTRV : InstrItinClass;
def II_ROUND : InstrItinClass;
def II_SAVE : InstrItinClass;
+def II_SC : InstrItinClass;
+def II_SCD : InstrItinClass;
def II_SB : InstrItinClass;
def II_SBE : InstrItinClass;
def II_SD : InstrItinClass;
def II_SDC1 : InstrItinClass;
+def II_SDC2 : InstrItinClass;
+def II_SDC3 : InstrItinClass;
def II_SDL : InstrItinClass;
def II_SDR : InstrItinClass;
def II_SDXC1 : InstrItinClass;
def II_SEB : InstrItinClass;
def II_SEH : InstrItinClass;
+def II_SELCCZ : InstrItinClass;
def II_SEQ_SNE : InstrItinClass; // seq and sne
def II_SEQI_SNEI : InstrItinClass; // seqi and snei
def II_SH : InstrItinClass;
@@ -186,6 +279,7 @@ def II_SRA : InstrItinClass;
def II_SRAV : InstrItinClass;
def II_SRL : InstrItinClass;
def II_SRLV : InstrItinClass;
+def II_SUB : InstrItinClass;
def II_SUBU : InstrItinClass;
def II_SUB_D : InstrItinClass;
def II_SUB_S : InstrItinClass;
@@ -193,6 +287,8 @@ def II_SUXC1 : InstrItinClass;
def II_SW : InstrItinClass;
def II_SWE : InstrItinClass;
def II_SWC1 : InstrItinClass;
+def II_SWC2 : InstrItinClass;
+def II_SWC3 : InstrItinClass;
def II_SWL : InstrItinClass;
def II_SWLE : InstrItinClass;
def II_SWR : InstrItinClass;
@@ -202,6 +298,14 @@ def II_TRUNC : InstrItinClass;
def II_WSBH : InstrItinClass;
def II_XOR : InstrItinClass;
def II_XORI : InstrItinClass;
+def II_CACHE : InstrItinClass;
+def II_PREF : InstrItinClass;
+def II_CACHEE : InstrItinClass;
+def II_PREFE : InstrItinClass;
+def II_LLE : InstrItinClass;
+def II_SCE : InstrItinClass;
+def II_TLBINV : InstrItinClass;
+def II_TLBINVF : InstrItinClass;
//===----------------------------------------------------------------------===//
// Mips Generic instruction itineraries.
@@ -210,9 +314,16 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<IIM16Alu , [InstrStage<1, [ALU]>]>,
InstrItinData<II_ADDI , [InstrStage<1, [ALU]>]>,
InstrItinData<II_ADDIU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ADDIUPC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ADD , [InstrStage<1, [ALU]>]>,
InstrItinData<II_ADDU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_AUI , [InstrStage<1, [ALU]>]>,
InstrItinData<II_AND , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ALUIPC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_AUIPC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ALIGN , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BADDU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_BITSWAP , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SLL , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SRA , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SRL , [InstrStage<1, [ALU]>]>,
@@ -225,17 +336,35 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<II_CLZ , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DADDIU , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DADDU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DADDI , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DADD , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DALIGN , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DAHI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DATI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DAUI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DBITSWAP , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DCLO , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DCLZ , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DMOD , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_DMODU , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_DSLL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSLL32 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSRL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSRL32 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSRA , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSRA32 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSLLV , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSRLV , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSRAV , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSUBU , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DSUB , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DROTR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DROTR32 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DROTRV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSBH , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DSHD , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DCLO , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DCLZ , [InstrStage<1, [ALU]>]>,
InstrItinData<II_EXT , [InstrStage<1, [ALU]>]>,
InstrItinData<II_INS , [InstrStage<1, [ALU]>]>,
InstrItinData<II_LUI , [InstrStage<1, [ALU]>]>,
@@ -249,41 +378,61 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<II_OR , [InstrStage<1, [ALU]>]>,
InstrItinData<II_POP , [InstrStage<1, [ALU]>]>,
InstrItinData<II_RDHWR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SUB , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SUBU , [InstrStage<1, [ALU]>]>,
InstrItinData<II_XOR , [InstrStage<1, [ALU]>]>,
InstrItinData<II_ANDI , [InstrStage<1, [ALU]>]>,
InstrItinData<II_ORI , [InstrStage<1, [ALU]>]>,
InstrItinData<II_XORI , [InstrStage<1, [ALU]>]>,
InstrItinData<II_LB , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LBE , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LBU , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LBUE , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LH , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LHU , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LHUE , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LW , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWPC , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LWL , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWLE , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LWR , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWRE , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWUPC , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LD , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LDL , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LDR , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LDPC , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LL , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LLD , [InstrStage<3, [ALU]>]>,
InstrItinData<II_RESTORE , [InstrStage<3, [ALU]>]>,
InstrItinData<II_SB , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SH , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SHE , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SW , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SWL , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SWR , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SDL , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SDR , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SD , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SCD , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SAVE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SELCCZ , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SEQ_SNE , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SEQI_SNEI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SLTI_SLTIU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SLT_SLTU , [InstrStage<1, [ALU]>]>,
InstrItinData<II_B , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_BALC , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BBIT , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BC , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BC1F , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BC1FL , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BC1T , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BC1TL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_BC1CCZ , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BCC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_BCCC , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BCCZ , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BCCZAL , [InstrStage<1, [ALU]>]>,
InstrItinData<II_BCCZALS , [InstrStage<1, [ALU]>]>,
@@ -292,25 +441,69 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<II_J , [InstrStage<1, [ALU]>]>,
InstrItinData<II_JAL , [InstrStage<1, [ALU]>]>,
InstrItinData<II_JALR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_JALR_HB , [InstrStage<1, [ALU]>]>,
InstrItinData<II_JALRC , [InstrStage<1, [ALU]>]>,
InstrItinData<II_JALRS , [InstrStage<1, [ALU]>]>,
InstrItinData<II_JALS , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_JIC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_JIALC , [InstrStage<1, [ALU]>]>,
InstrItinData<II_JR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_JR_HB , [InstrStage<1, [ALU]>]>,
InstrItinData<II_JRADDIUSP , [InstrStage<1, [ALU]>]>,
InstrItinData<II_JRC , [InstrStage<1, [ALU]>]>,
InstrItinData<II_ReturnPseudo , [InstrStage<1, [ALU]>]>,
+ InstrItinData<IIPseudo , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DMUH , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_DMUHU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_ERET , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DERET , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_ERETNC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_EHB , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SDBBP , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SSNOP , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SYSCALL , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_PAUSE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_WAIT , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_EI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TEQ , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TEQI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TGE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TGEI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TGEIU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TGEU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TNE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TNEI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLT , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLTI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLTU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TTLTIU , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLBP , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLBR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLBWI , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLBWR , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TRAP , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_BREAK , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SYNC , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SYNCI , [InstrStage<1, [ALU]>]>,
InstrItinData<II_DMUL , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_DMULT , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_DMULTU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_DMULU , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MADD , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MADDU , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MFHI_MFLO , [InstrStage<1, [IMULDIV]>]>,
+ InstrItinData<II_MOD , [InstrStage<38, [IMULDIV]>]>,
+ InstrItinData<II_MODU , [InstrStage<38, [IMULDIV]>]>,
InstrItinData<II_MSUB , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MSUBU , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MTHI_MTLO , [InstrStage<1, [IMULDIV]>]>,
+ InstrItinData<II_MUH , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MUHU , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MUL , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MULT , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MULTU , [InstrStage<17, [IMULDIV]>]>,
+ InstrItinData<II_MULU , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MSUB , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_MSUBU , [InstrStage<17, [IMULDIV]>]>,
InstrItinData<II_DIV , [InstrStage<38, [IMULDIV]>]>,
@@ -342,34 +535,65 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<II_SUB_S , [InstrStage<4, [ALU]>]>,
InstrItinData<II_MUL_S , [InstrStage<7, [ALU]>]>,
InstrItinData<II_MADD_S , [InstrStage<7, [ALU]>]>,
+ InstrItinData<II_MADDF_S , [InstrStage<7, [ALU]>]>,
InstrItinData<II_MSUB_S , [InstrStage<7, [ALU]>]>,
+ InstrItinData<II_MSUBF_S , [InstrStage<7, [ALU]>]>,
InstrItinData<II_NMADD_S , [InstrStage<7, [ALU]>]>,
InstrItinData<II_NMSUB_S , [InstrStage<7, [ALU]>]>,
InstrItinData<II_MUL_D , [InstrStage<8, [ALU]>]>,
InstrItinData<II_MADD_D , [InstrStage<8, [ALU]>]>,
+ InstrItinData<II_MADDF_D , [InstrStage<8, [ALU]>]>,
InstrItinData<II_MSUB_D , [InstrStage<8, [ALU]>]>,
+ InstrItinData<II_MSUBF_D , [InstrStage<8, [ALU]>]>,
InstrItinData<II_NMADD_D , [InstrStage<8, [ALU]>]>,
InstrItinData<II_NMSUB_D , [InstrStage<8, [ALU]>]>,
InstrItinData<II_DIV_S , [InstrStage<23, [ALU]>]>,
InstrItinData<II_DIV_D , [InstrStage<36, [ALU]>]>,
InstrItinData<II_SQRT_S , [InstrStage<54, [ALU]>]>,
InstrItinData<II_SQRT_D , [InstrStage<12, [ALU]>]>,
+ InstrItinData<II_WSBH , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_LSA , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DLSA , [InstrStage<1, [ALU]>]>,
InstrItinData<II_LDC1 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LDC2 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LDC3 , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LWC1 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWC2 , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_LWC3 , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LDXC1 , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LWXC1 , [InstrStage<3, [ALU]>]>,
InstrItinData<II_LUXC1 , [InstrStage<3, [ALU]>]>,
InstrItinData<II_SDC1 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SDC2 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SDC3 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SWC1 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SWC2 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_SWC3 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SDXC1 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SWXC1 , [InstrStage<1, [ALU]>]>,
InstrItinData<II_SUXC1 , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_DMFC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_DMFC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_DMFC2 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_DMTC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_DMTC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_DMTC2 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MFC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MFC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MFC2 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MTC0 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MTC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_MTC2 , [InstrStage<2, [ALU]>]>,
InstrItinData<II_MFHC1 , [InstrStage<2, [ALU]>]>,
- InstrItinData<II_MTHC1 , [InstrStage<2, [ALU]>]>
+ InstrItinData<II_MTHC1 , [InstrStage<2, [ALU]>]>,
+ InstrItinData<II_CACHE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_PREF , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_CACHEE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_PREFE , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLBINV , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_TLBINVF , [InstrStage<1, [ALU]>]>,
+ InstrItinData<II_LLE , [InstrStage<3, [ALU]>]>,
+ InstrItinData<II_SCE , [InstrStage<1, [ALU]>]>
]>;
include "MipsScheduleP5600.td"
diff --git a/lib/Target/Mips/MipsScheduleP5600.td b/lib/Target/Mips/MipsScheduleP5600.td
index d32ae4f55eaf..cee42873c6e8 100644
--- a/lib/Target/Mips/MipsScheduleP5600.td
+++ b/lib/Target/Mips/MipsScheduleP5600.td
@@ -13,7 +13,7 @@ def MipsP5600Model : SchedMachineModel {
int LoadLatency = 4;
int MispredictPenalty = 8; // TODO: Estimated
- let CompleteModel = 1;
+ let CompleteModel = 0;
}
let SchedModel = MipsP5600Model in {
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index 8a18b517d16b..3e7570ff46ed 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -90,7 +90,7 @@ MipsSubtarget::MipsSubtarget(const Triple &TT, const std::string &CPU,
report_fatal_error("Code generation for MIPS-V is not implemented", false);
// Check if Architecture and ABI are compatible.
- assert(((!isGP64bit() && (isABI_O32() || isABI_EABI())) ||
+ assert(((!isGP64bit() && isABI_O32()) ||
(isGP64bit() && (isABI_N32() || isABI_N64()))) &&
"Invalid Arch & ABI pair.");
@@ -114,7 +114,7 @@ MipsSubtarget::MipsSubtarget(const Triple &TT, const std::string &CPU,
report_fatal_error(ISA + " is not compatible with the DSP ASE", false);
}
- if (NoABICalls && TM.getRelocationModel() == Reloc::PIC_)
+ if (NoABICalls && TM.isPositionIndependent())
report_fatal_error("position-independent code requires '-mabicalls'");
// Set UseSmallSection.
@@ -126,6 +126,10 @@ MipsSubtarget::MipsSubtarget(const Triple &TT, const std::string &CPU,
}
}
+bool MipsSubtarget::isPositionIndependent() const {
+ return TM.isPositionIndependent();
+}
+
/// This overrides the PostRAScheduler bit in the SchedModel for any CPU.
bool MipsSubtarget::enablePostRAScheduler() const { return true; }
@@ -164,7 +168,6 @@ Reloc::Model MipsSubtarget::getRelocationModel() const {
return TM.getRelocationModel();
}
-bool MipsSubtarget::isABI_EABI() const { return getABI().IsEABI(); }
bool MipsSubtarget::isABI_N64() const { return getABI().IsN64(); }
bool MipsSubtarget::isABI_N32() const { return getABI().IsN32(); }
bool MipsSubtarget::isABI_O32() const { return getABI().IsO32(); }
diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h
index fbb01fe77029..38d3cee70477 100644
--- a/lib/Target/Mips/MipsSubtarget.h
+++ b/lib/Target/Mips/MipsSubtarget.h
@@ -18,10 +18,10 @@
#include "MipsFrameLowering.h"
#include "MipsISelLowering.h"
#include "MipsInstrInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -81,6 +81,9 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
// IsFP64bit - General-purpose registers are 64 bits wide
bool IsGP64bit;
+ // IsPTR64bit - Pointers are 64 bit wide
+ bool IsPTR64bit;
+
// HasVFPU - Processor has a vector floating point unit.
bool HasVFPU;
@@ -152,19 +155,18 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
Triple TargetTriple;
- const TargetSelectionDAGInfo TSInfo;
+ const SelectionDAGTargetInfo TSInfo;
std::unique_ptr<const MipsInstrInfo> InstrInfo;
std::unique_ptr<const MipsFrameLowering> FrameLowering;
std::unique_ptr<const MipsTargetLowering> TLInfo;
public:
+ bool isPositionIndependent() const;
/// This overrides the PostRAScheduler bit in the SchedModel for each CPU.
bool enablePostRAScheduler() const override;
void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override;
CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const override;
- /// Only O32 and EABI supported right now.
- bool isABI_EABI() const;
bool isABI_N64() const;
bool isABI_N32() const;
bool isABI_O32() const;
@@ -225,6 +227,8 @@ public:
bool isGP64bit() const { return IsGP64bit; }
bool isGP32bit() const { return !IsGP64bit; }
unsigned getGPRSizeInBytes() const { return isGP64bit() ? 8 : 4; }
+ bool isPTR64bit() const { return IsPTR64bit; }
+ bool isPTR32bit() const { return !IsPTR64bit; }
bool isSingleFloat() const { return IsSingleFloat; }
bool hasVFPU() const { return HasVFPU; }
bool inMips16Mode() const { return InMips16Mode; }
@@ -290,7 +294,7 @@ public:
void setHelperClassesMips16();
void setHelperClassesMipsSE();
- const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
const MipsInstrInfo *getInstrInfo() const override { return InstrInfo.get(); }
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index 3e638720e839..c248c3a50ac8 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -26,6 +26,7 @@
#include "MipsTargetObjectFile.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/TargetRegistry.h"
@@ -77,6 +78,13 @@ static std::string computeDataLayout(const Triple &TT, StringRef CPU,
return Ret;
}
+static Reloc::Model getEffectiveRelocModel(CodeModel::Model CM,
+ Optional<Reloc::Model> RM) {
+ if (!RM.hasValue() || CM == CodeModel::JITDefault)
+ return Reloc::Static;
+ return *RM;
+}
+
// On function prologue, the stack is created by decrementing
// its pointer. Once decremented, all references are done with positive
// offset from the stack/frame pointer, using StackGrowsUp enables
@@ -85,10 +93,12 @@ static std::string computeDataLayout(const Triple &TT, StringRef CPU,
MipsTargetMachine::MipsTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL, bool isLittle)
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL,
+ bool isLittle)
: LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT,
- CPU, FS, Options, RM, CM, OL),
+ CPU, FS, Options, getEffectiveRelocModel(CM, RM), CM,
+ OL),
isLittle(isLittle), TLOF(make_unique<MipsTargetObjectFile>()),
ABI(MipsABIInfo::computeTargetABI(TT, CPU, Options.MCOptions)),
Subtarget(nullptr), DefaultSubtarget(TT, CPU, FS, isLittle, *this),
@@ -107,7 +117,8 @@ void MipsebTargetMachine::anchor() { }
MipsebTargetMachine::MipsebTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
@@ -116,7 +127,8 @@ void MipselTargetMachine::anchor() { }
MipselTargetMachine::MipselTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
@@ -217,8 +229,8 @@ void MipsPassConfig::addIRPasses() {
// the ISelDag to gen Mips code.
bool MipsPassConfig::addInstSelector() {
addPass(createMipsModuleISelDagPass(getMipsTargetMachine()));
- addPass(createMips16ISelDag(getMipsTargetMachine()));
- addPass(createMipsSEISelDag(getMipsTargetMachine()));
+ addPass(createMips16ISelDag(getMipsTargetMachine(), getOptLevel()));
+ addPass(createMipsSEISelDag(getMipsTargetMachine(), getOptLevel()));
return false;
}
@@ -250,7 +262,13 @@ TargetIRAnalysis MipsTargetMachine::getTargetIRAnalysis() {
// print out the code after the passes.
void MipsPassConfig::addPreEmitPass() {
MipsTargetMachine &TM = getMipsTargetMachine();
+
+ // The delay slot filler pass can potientially create forbidden slot (FS)
+ // hazards for MIPSR6 which the hazard schedule pass (HSP) will fix. Any
+ // (new) pass that creates compact branches after the HSP must handle FS
+ // hazards itself or be pipelined before the HSP.
addPass(createMipsDelaySlotFillerPass(TM));
+ addPass(createMipsHazardSchedule());
addPass(createMipsLongBranchPass(TM));
- addPass(createMipsConstantIslandPass(TM));
+ addPass(createMipsConstantIslandPass());
}
diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h
index 38b2ecff7d7f..e4cf17e2abd8 100644
--- a/lib/Target/Mips/MipsTargetMachine.h
+++ b/lib/Target/Mips/MipsTargetMachine.h
@@ -40,8 +40,9 @@ class MipsTargetMachine : public LLVMTargetMachine {
public:
MipsTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL, bool isLittle);
~MipsTargetMachine() override;
TargetIRAnalysis getTargetIRAnalysis() override;
@@ -68,25 +69,25 @@ public:
const MipsABIInfo &getABI() const { return ABI; }
};
-/// MipsebTargetMachine - Mips32/64 big endian target machine.
+/// Mips32/64 big endian target machine.
///
class MipsebTargetMachine : public MipsTargetMachine {
virtual void anchor();
public:
MipsebTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
-/// MipselTargetMachine - Mips32/64 little endian target machine.
+/// Mips32/64 little endian target machine.
///
class MipselTargetMachine : public MipsTargetMachine {
virtual void anchor();
public:
MipselTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
diff --git a/lib/Target/Mips/MipsTargetObjectFile.cpp b/lib/Target/Mips/MipsTargetObjectFile.cpp
index 146f33bda249..3bd4567e3792 100644
--- a/lib/Target/Mips/MipsTargetObjectFile.cpp
+++ b/lib/Target/Mips/MipsTargetObjectFile.cpp
@@ -41,10 +41,12 @@ void MipsTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){
InitializeELF(TM.Options.UseInitArray);
SmallDataSection = getContext().getELFSection(
- ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ ".sdata", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC | ELF::SHF_MIPS_GPREL);
SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
- ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ ELF::SHF_WRITE | ELF::SHF_ALLOC |
+ ELF::SHF_MIPS_GPREL);
this->TM = &static_cast<const MipsTargetMachine &>(TM);
}
@@ -106,7 +108,7 @@ IsGlobalInSmallSectionImpl(const GlobalValue *GV,
GV->hasCommonLinkage()))
return false;
- Type *Ty = GV->getType()->getElementType();
+ Type *Ty = GV->getValueType();
return IsInSmallSection(
GV->getParent()->getDataLayout().getTypeAllocSize(Ty));
}
@@ -138,11 +140,13 @@ bool MipsTargetObjectFile::IsConstantInSmallSection(
}
/// Return true if this constant should be placed into small data section.
-MCSection *MipsTargetObjectFile::getSectionForConstant(
- const DataLayout &DL, SectionKind Kind, const Constant *C) const {
+MCSection *MipsTargetObjectFile::getSectionForConstant(const DataLayout &DL,
+ SectionKind Kind,
+ const Constant *C,
+ unsigned &Align) const {
if (IsConstantInSmallSection(DL, C, *TM))
return SmallDataSection;
// Otherwise, we work the same as ELF.
- return TargetLoweringObjectFileELF::getSectionForConstant(DL, Kind, C);
+ return TargetLoweringObjectFileELF::getSectionForConstant(DL, Kind, C, Align);
}
diff --git a/lib/Target/Mips/MipsTargetObjectFile.h b/lib/Target/Mips/MipsTargetObjectFile.h
index ba04343bad87..9840769aff69 100644
--- a/lib/Target/Mips/MipsTargetObjectFile.h
+++ b/lib/Target/Mips/MipsTargetObjectFile.h
@@ -40,7 +40,8 @@ class MipsTargetMachine;
const TargetMachine &TM) const;
MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
- const Constant *C) const override;
+ const Constant *C,
+ unsigned &Align) const override;
};
} // end namespace llvm
diff --git a/lib/Target/Mips/MipsTargetStreamer.h b/lib/Target/Mips/MipsTargetStreamer.h
index b3222f5d89ef..41ebe411b98d 100644
--- a/lib/Target/Mips/MipsTargetStreamer.h
+++ b/lib/Target/Mips/MipsTargetStreamer.h
@@ -13,6 +13,7 @@
#include "MCTargetDesc/MipsABIFlagsSection.h"
#include "MCTargetDesc/MipsABIInfo.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
@@ -24,8 +25,12 @@ struct MipsABIFlagsSection;
class MipsTargetStreamer : public MCTargetStreamer {
public:
MipsTargetStreamer(MCStreamer &S);
+
+ virtual void setPic(bool Value) {}
+
virtual void emitDirectiveSetMicroMips();
virtual void emitDirectiveSetNoMicroMips();
+ virtual void setUsesMicroMips();
virtual void emitDirectiveSetMips16();
virtual void emitDirectiveSetNoMips16();
@@ -78,8 +83,9 @@ public:
// PIC support
virtual void emitDirectiveCpLoad(unsigned RegNo);
- virtual void emitDirectiveCpRestore(SmallVector<MCInst, 3> &StoreInsts,
- int Offset);
+ virtual bool emitDirectiveCpRestore(int Offset,
+ function_ref<unsigned()> GetATReg,
+ SMLoc IDLoc, const MCSubtargetInfo *STI);
virtual void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset,
const MCSymbol &Sym, bool IsReg);
virtual void emitDirectiveCpreturn(unsigned SaveLocation,
@@ -94,6 +100,54 @@ public:
virtual void emitDirectiveSetOddSPReg();
virtual void emitDirectiveSetNoOddSPReg();
+ void emitR(unsigned Opcode, unsigned Reg0, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitII(unsigned Opcode, int16_t Imm1, int16_t Imm2, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitRX(unsigned Opcode, unsigned Reg0, MCOperand Op1, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitRI(unsigned Opcode, unsigned Reg0, int32_t Imm, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitRR(unsigned Opcode, unsigned Reg0, unsigned Reg1, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitRRX(unsigned Opcode, unsigned Reg0, unsigned Reg1, MCOperand Op2,
+ SMLoc IDLoc, const MCSubtargetInfo *STI);
+ void emitRRR(unsigned Opcode, unsigned Reg0, unsigned Reg1, unsigned Reg2,
+ SMLoc IDLoc, const MCSubtargetInfo *STI);
+ void emitRRI(unsigned Opcode, unsigned Reg0, unsigned Reg1, int16_t Imm,
+ SMLoc IDLoc, const MCSubtargetInfo *STI);
+ void emitAddu(unsigned DstReg, unsigned SrcReg, unsigned TrgReg, bool Is64Bit,
+ const MCSubtargetInfo *STI);
+ void emitDSLL(unsigned DstReg, unsigned SrcReg, int16_t ShiftAmount,
+ SMLoc IDLoc, const MCSubtargetInfo *STI);
+ void emitEmptyDelaySlot(bool hasShortDelaySlot, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitNop(SMLoc IDLoc, const MCSubtargetInfo *STI);
+
+ /// Emit a store instruction with an offset. If the offset is out of range
+ /// then it will be synthesized using the assembler temporary.
+ ///
+ /// GetATReg() is a callback that can be used to obtain the current assembler
+ /// temporary and is only called when the assembler temporary is required. It
+ /// must handle the case where no assembler temporary is available (typically
+ /// by reporting an error).
+ void emitStoreWithImmOffset(unsigned Opcode, unsigned SrcReg,
+ unsigned BaseReg, int64_t Offset,
+ function_ref<unsigned()> GetATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitStoreWithSymOffset(unsigned Opcode, unsigned SrcReg,
+ unsigned BaseReg, MCOperand &HiOperand,
+ MCOperand &LoOperand, unsigned ATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitLoadWithImmOffset(unsigned Opcode, unsigned DstReg, unsigned BaseReg,
+ int64_t Offset, unsigned TmpReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitLoadWithSymOffset(unsigned Opcode, unsigned DstReg, unsigned BaseReg,
+ MCOperand &HiOperand, MCOperand &LoOperand,
+ unsigned ATReg, SMLoc IDLoc,
+ const MCSubtargetInfo *STI);
+ void emitGPRestore(int Offset, SMLoc IDLoc, const MCSubtargetInfo *STI);
+
void forbidModuleDirective() { ModuleDirectiveAllowed = false; }
void reallowModuleDirective() { ModuleDirectiveAllowed = true; }
bool isModuleDirectiveAllowed() { return ModuleDirectiveAllowed; }
@@ -193,8 +247,16 @@ public:
// PIC support
void emitDirectiveCpLoad(unsigned RegNo) override;
- void emitDirectiveCpRestore(SmallVector<MCInst, 3> &StoreInsts,
- int Offset) override;
+
+ /// Emit a .cprestore directive. If the offset is out of range then it will
+ /// be synthesized using the assembler temporary.
+ ///
+ /// GetATReg() is a callback that can be used to obtain the current assembler
+ /// temporary and is only called when the assembler temporary is required. It
+ /// must handle the case where no assembler temporary is available (typically
+ /// by reporting an error).
+ bool emitDirectiveCpRestore(int Offset, function_ref<unsigned()> GetATReg,
+ SMLoc IDLoc, const MCSubtargetInfo *STI) override;
void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset,
const MCSymbol &Sym, bool IsReg) override;
void emitDirectiveCpreturn(unsigned SaveLocation,
@@ -221,12 +283,15 @@ public:
MCELFStreamer &getStreamer();
MipsTargetELFStreamer(MCStreamer &S, const MCSubtargetInfo &STI);
+ void setPic(bool Value) override { Pic = Value; }
+
void emitLabel(MCSymbol *Symbol) override;
void emitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
void finish() override;
void emitDirectiveSetMicroMips() override;
void emitDirectiveSetNoMicroMips() override;
+ void setUsesMicroMips() override;
void emitDirectiveSetMips16() override;
void emitDirectiveSetNoReorder() override;
@@ -246,8 +311,8 @@ public:
// PIC support
void emitDirectiveCpLoad(unsigned RegNo) override;
- void emitDirectiveCpRestore(SmallVector<MCInst, 3> &StoreInsts,
- int Offset) override;
+ bool emitDirectiveCpRestore(int Offset, function_ref<unsigned()> GetATReg,
+ SMLoc IDLoc, const MCSubtargetInfo *STI) override;
void emitDirectiveCpsetup(unsigned RegNo, int RegOrOffset,
const MCSymbol &Sym, bool IsReg) override;
void emitDirectiveCpreturn(unsigned SaveLocation,
diff --git a/lib/Target/Mips/TargetInfo/Makefile b/lib/Target/Mips/TargetInfo/Makefile
deleted file mode 100644
index 32f4e1695b1d..000000000000
--- a/lib/Target/Mips/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/Mips/TargetInfo/Makefile -----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMMipsInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/NVPTX/CMakeLists.txt b/lib/Target/NVPTX/CMakeLists.txt
index 05fe06dbc07c..b67c40500861 100644
--- a/lib/Target/NVPTX/CMakeLists.txt
+++ b/lib/Target/NVPTX/CMakeLists.txt
@@ -18,6 +18,7 @@ set(NVPTXCodeGen_sources
NVPTXISelDAGToDAG.cpp
NVPTXISelLowering.cpp
NVPTXImageOptimizer.cpp
+ NVPTXInferAddressSpaces.cpp
NVPTXInstrInfo.cpp
NVPTXLowerAggrCopies.cpp
NVPTXLowerKernelArgs.cpp
@@ -31,6 +32,7 @@ set(NVPTXCodeGen_sources
NVPTXTargetMachine.cpp
NVPTXTargetTransformInfo.cpp
NVPTXUtilities.cpp
+ NVVMIntrRange.cpp
NVVMReflect.cpp
)
diff --git a/lib/Target/NVPTX/InstPrinter/Makefile b/lib/Target/NVPTX/InstPrinter/Makefile
deleted file mode 100644
index 7b7865436bf3..000000000000
--- a/lib/Target/NVPTX/InstPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/NVPTX/AsmPrinter/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMNVPTXAsmPrinter
-
-# Hack: we need to include 'main' ptx target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/NVPTX/MCTargetDesc/Makefile b/lib/Target/NVPTX/MCTargetDesc/Makefile
deleted file mode 100644
index 31d06cb5948d..000000000000
--- a/lib/Target/NVPTX/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/NVPTX/TargetDesc/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMNVPTXDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
index ef36c13b49f1..78bdf4e698d8 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
@@ -34,13 +34,16 @@ NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Triple &TheTriple) {
HasSingleParameterDotFile = false;
- InlineAsmStart = " inline asm";
- InlineAsmEnd = " inline asm";
+ InlineAsmStart = " begin inline asm";
+ InlineAsmEnd = " end inline asm";
SupportsDebugInformation = CompileForDebugging;
// PTX does not allow .align on functions.
HasFunctionAlignment = false;
HasDotTypeDotSizeDirective = false;
+ // PTX does not allow .hidden or .protected
+ HiddenDeclarationVisibilityAttr = HiddenVisibilityAttr = MCSA_Invalid;
+ ProtectedVisibilityAttr = MCSA_Invalid;
Data8bitsDirective = " .b8 ";
Data16bitsDirective = " .b16 ";
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
index ad7302037cad..e356a965a04b 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
@@ -14,7 +14,6 @@
#include "NVPTXMCTargetDesc.h"
#include "InstPrinter/NVPTXInstPrinter.h"
#include "NVPTXMCAsmInfo.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -49,18 +48,6 @@ createNVPTXMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
return createNVPTXMCSubtargetInfoImpl(TT, CPU, FS);
}
-static MCCodeGenInfo *createNVPTXMCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
-
- // The default relocation model is used regardless of what the client has
- // specified, as it is the only relocation model currently supported.
- X->initMCCodeGenInfo(Reloc::Default, CM, OL);
- return X;
-}
-
static MCInstPrinter *createNVPTXMCInstPrinter(const Triple &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -77,9 +64,6 @@ extern "C" void LLVMInitializeNVPTXTargetMC() {
// Register the MC asm info.
RegisterMCAsmInfo<NVPTXMCAsmInfo> X(*T);
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(*T, createNVPTXMCCodeGenInfo);
-
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createNVPTXMCInstrInfo);
diff --git a/lib/Target/NVPTX/Makefile b/lib/Target/NVPTX/Makefile
deleted file mode 100644
index 8db20ebed2c2..000000000000
--- a/lib/Target/NVPTX/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- lib/Target/NVPTX/Makefile ---------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMNVPTXCodeGen
-TARGET = NVPTX
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = NVPTXGenAsmWriter.inc \
- NVPTXGenDAGISel.inc \
- NVPTXGenInstrInfo.inc \
- NVPTXGenRegisterInfo.inc \
- NVPTXGenSubtargetInfo.inc
-
-DIRS = InstPrinter TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/NVPTX/NVPTX.h b/lib/Target/NVPTX/NVPTX.h
index e5fae85bacf2..e91385ac13f2 100644
--- a/lib/Target/NVPTX/NVPTX.h
+++ b/lib/Target/NVPTX/NVPTX.h
@@ -46,8 +46,10 @@ FunctionPass *createNVPTXISelDag(NVPTXTargetMachine &TM,
ModulePass *createNVPTXAssignValidGlobalNamesPass();
ModulePass *createGenericToNVVMPass();
FunctionPass *createNVPTXFavorNonGenericAddrSpacesPass();
-ModulePass *createNVVMReflectPass();
-ModulePass *createNVVMReflectPass(const StringMap<int>& Mapping);
+FunctionPass *createNVPTXInferAddressSpacesPass();
+FunctionPass *createNVVMIntrRangePass(unsigned int SmVersion);
+FunctionPass *createNVVMReflectPass();
+FunctionPass *createNVVMReflectPass(const StringMap<int> &Mapping);
MachineFunctionPass *createNVPTXPrologEpilogPass();
MachineFunctionPass *createNVPTXReplaceImageHandlesPass();
FunctionPass *createNVPTXImageOptimizerPass();
@@ -55,8 +57,6 @@ FunctionPass *createNVPTXLowerKernelArgsPass(const NVPTXTargetMachine *TM);
BasicBlockPass *createNVPTXLowerAllocaPass();
MachineFunctionPass *createNVPTXPeephole();
-bool isImageOrSamplerVal(const Value *, const Module *);
-
extern Target TheNVPTXTarget32;
extern Target TheNVPTXTarget64;
diff --git a/lib/Target/NVPTX/NVPTX.td b/lib/Target/NVPTX/NVPTX.td
index 96abfa859119..032991a20cc9 100644
--- a/lib/Target/NVPTX/NVPTX.td
+++ b/lib/Target/NVPTX/NVPTX.td
@@ -44,6 +44,12 @@ def SM52 : SubtargetFeature<"sm_52", "SmVersion", "52",
"Target SM 5.2">;
def SM53 : SubtargetFeature<"sm_53", "SmVersion", "53",
"Target SM 5.3">;
+def SM60 : SubtargetFeature<"sm_60", "SmVersion", "60",
+ "Target SM 6.0">;
+def SM61 : SubtargetFeature<"sm_61", "SmVersion", "61",
+ "Target SM 6.1">;
+def SM62 : SubtargetFeature<"sm_62", "SmVersion", "62",
+ "Target SM 6.2">;
// PTX Versions
def PTX32 : SubtargetFeature<"ptx32", "PTXVersion", "32",
@@ -54,6 +60,10 @@ def PTX41 : SubtargetFeature<"ptx41", "PTXVersion", "41",
"Use PTX version 4.1">;
def PTX42 : SubtargetFeature<"ptx42", "PTXVersion", "42",
"Use PTX version 4.2">;
+def PTX43 : SubtargetFeature<"ptx43", "PTXVersion", "43",
+ "Use PTX version 4.3">;
+def PTX50 : SubtargetFeature<"ptx50", "PTXVersion", "50",
+ "Use PTX version 5.0">;
//===----------------------------------------------------------------------===//
// NVPTX supported processors.
@@ -71,7 +81,9 @@ def : Proc<"sm_37", [SM37, PTX41]>;
def : Proc<"sm_50", [SM50, PTX40]>;
def : Proc<"sm_52", [SM52, PTX41]>;
def : Proc<"sm_53", [SM53, PTX42]>;
-
+def : Proc<"sm_60", [SM60, PTX50]>;
+def : Proc<"sm_61", [SM61, PTX50]>;
+def : Proc<"sm_62", [SM62, PTX50]>;
def NVPTXInstrInfo : InstrInfo {
}
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index e8c36089a779..660016bfcd05 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -117,7 +117,7 @@ void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) {
if (ignoreLoc(MI))
return;
- DebugLoc curLoc = MI.getDebugLoc();
+ const DebugLoc &curLoc = MI.getDebugLoc();
if (!prevDebugLoc && !curLoc)
return;
@@ -277,7 +277,7 @@ bool NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO,
break;
case MachineOperand::MO_FPImmediate: {
const ConstantFP *Cnt = MO.getFPImm();
- APFloat Val = Cnt->getValueAPF();
+ const APFloat &Val = Cnt->getValueAPF();
switch (Cnt->getType()->getTypeID()) {
default: report_fatal_error("Unsupported FP type"); break;
@@ -432,7 +432,8 @@ bool NVPTXAsmPrinter::isLoopHeaderOfNoUnroll(
continue;
}
if (const BasicBlock *PBB = PMBB->getBasicBlock()) {
- if (MDNode *LoopID = PBB->getTerminator()->getMetadata("llvm.loop")) {
+ if (MDNode *LoopID =
+ PBB->getTerminator()->getMetadata(LLVMContext::MD_loop)) {
if (GetUnrollMetadata(LoopID, "llvm.loop.unroll.disable"))
return true;
}
@@ -798,10 +799,18 @@ void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) {
if (filenameMap.find(Filename) != filenameMap.end())
continue;
filenameMap[Filename] = i;
+ OutStreamer->EmitDwarfFileDirective(i, "", Filename);
++i;
}
}
+static bool isEmptyXXStructor(GlobalVariable *GV) {
+ if (!GV) return true;
+ const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!InitList) return true; // Not an array; we don't know how to parse.
+ return InitList->getNumOperands() == 0;
+}
+
bool NVPTXAsmPrinter::doInitialization(Module &M) {
// Construct a default subtarget off of the TargetMachine defaults. The
// rest of NVPTX isn't friendly to change subtargets per function and
@@ -812,6 +821,21 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
const NVPTXSubtarget STI(TT, CPU, FS, NTM);
+ if (M.alias_size()) {
+ report_fatal_error("Module has aliases, which NVPTX does not support.");
+ return true; // error
+ }
+ if (!isEmptyXXStructor(M.getNamedGlobal("llvm.global_ctors"))) {
+ report_fatal_error(
+ "Module has a nontrivial global ctor, which NVPTX does not support.");
+ return true; // error
+ }
+ if (!isEmptyXXStructor(M.getNamedGlobal("llvm.global_dtors"))) {
+ report_fatal_error(
+ "Module has a nontrivial global dtor, which NVPTX does not support.");
+ return true; // error
+ }
+
SmallString<128> Str1;
raw_svector_ostream OS1(Str1);
@@ -1017,7 +1041,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
// Skip meta data
if (GVar->hasSection()) {
- if (GVar->getSection() == StringRef("llvm.metadata"))
+ if (GVar->getSection() == "llvm.metadata")
return;
}
@@ -1030,7 +1054,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
// GlobalVariables are always constant pointers themselves.
PointerType *PTy = GVar->getType();
- Type *ETy = PTy->getElementType();
+ Type *ETy = GVar->getValueType();
if (GVar->hasExternalLinkage()) {
if (GVar->hasInitializer())
@@ -1341,11 +1365,10 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
const DataLayout &DL = getDataLayout();
// GlobalVariables are always constant pointers themselves.
- PointerType *PTy = GVar->getType();
- Type *ETy = PTy->getElementType();
+ Type *ETy = GVar->getValueType();
O << ".";
- emitPTXAddressSpace(PTy->getAddressSpace(), O);
+ emitPTXAddressSpace(GVar->getType()->getAddressSpace(), O);
if (GVar->getAlignment() == 0)
O << " .align " << (int)DL.getPrefTypeAlignment(ETy);
else
@@ -1429,6 +1452,11 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
bool isABI = (nvptxSubtarget->getSmVersion() >= 20);
MVT thePointerTy = TLI->getPointerTy(DL);
+ if (F->arg_empty()) {
+ O << "()\n";
+ return;
+ }
+
O << "(\n";
for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, paramIndex++) {
@@ -1715,9 +1743,8 @@ void NVPTXAsmPrinter::printScalarConstant(const Constant *CPV, raw_ostream &O) {
return;
}
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
- PointerType *PTy = dyn_cast<PointerType>(GVar->getType());
bool IsNonGenericPointer = false;
- if (PTy && PTy->getAddressSpace() != 0) {
+ if (GVar->getType()->getAddressSpace() != 0) {
IsNonGenericPointer = true;
}
if (EmitGeneric && !isa<Function>(CPV) && !IsNonGenericPointer) {
@@ -1883,8 +1910,7 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
case Type::ArrayTyID:
case Type::VectorTyID:
case Type::StructTyID: {
- if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV) ||
- isa<ConstantStruct>(CPV) || isa<ConstantDataSequential>(CPV)) {
+ if (isa<ConstantAggregate>(CPV) || isa<ConstantDataSequential>(CPV)) {
int ElementSize = DL.getTypeAllocSize(CPV->getType());
bufferAggregateConstant(CPV, aggBuffer);
if (Bytes > ElementSize)
@@ -2315,7 +2341,7 @@ void NVPTXAsmPrinter::emitSrcInText(StringRef filename, unsigned line) {
this->OutStreamer->EmitRawText(temp.str());
}
-LineReader *NVPTXAsmPrinter::getReader(std::string filename) {
+LineReader *NVPTXAsmPrinter::getReader(const std::string &filename) {
if (!reader) {
reader = new LineReader(filename);
}
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.h b/lib/Target/NVPTX/NVPTXAsmPrinter.h
index 76bf179896a8..85660fbdb26e 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.h
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.h
@@ -18,14 +18,14 @@
#include "NVPTX.h"
#include "NVPTXSubtarget.h"
#include "NVPTXTargetMachine.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Target/TargetMachine.h"
#include <fstream>
@@ -293,7 +293,7 @@ private:
bool isLoopHeaderOfNoUnroll(const MachineBasicBlock &MBB) const;
LineReader *reader;
- LineReader *getReader(std::string);
+ LineReader *getReader(const std::string &);
// Used to control the need to emit .generic() in the initializer of
// module scope variables.
diff --git a/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp b/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
index 95813c8430d1..7c5a54162d77 100644
--- a/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
+++ b/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
@@ -7,6 +7,9 @@
//
//===----------------------------------------------------------------------===//
//
+// FIXME: This pass is deprecated in favor of NVPTXInferAddressSpaces, which
+// uses a new algorithm that handles pointer induction variables.
+//
// When a load/store accesses the generic address space, checks whether the
// address is casted from a non-generic address space. If so, remove this
// addrspacecast because accessing non-generic address spaces is typically
@@ -164,8 +167,8 @@ Value *NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromGEP(
GEP->getSourceElementType(), Cast->getOperand(0), Indices,
"", GEPI);
NewGEP->setIsInBounds(GEP->isInBounds());
+ NewGEP->takeName(GEP);
NewASC = new AddrSpaceCastInst(NewGEP, GEP->getType(), "", GEPI);
- NewASC->takeName(GEP);
// Without RAUWing GEP, the compiler would visit GEP again and emit
// redundant instructions. This is exercised in test @rauw in
// access-non-generic.ll.
@@ -263,7 +266,7 @@ bool NVPTXFavorNonGenericAddrSpaces::optimizeMemoryInstruction(Instruction *MI,
}
bool NVPTXFavorNonGenericAddrSpaces::runOnFunction(Function &F) {
- if (DisableFavorNonGeneric)
+ if (DisableFavorNonGeneric || skipFunction(F))
return false;
bool Changed = false;
diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.cpp b/lib/Target/NVPTX/NVPTXFrameLowering.cpp
index 9b34aef3fdec..bbcb497ead9d 100644
--- a/lib/Target/NVPTX/NVPTXFrameLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXFrameLowering.cpp
@@ -16,7 +16,6 @@
#include "NVPTXRegisterInfo.h"
#include "NVPTXSubtarget.h"
#include "NVPTXTargetMachine.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -35,7 +34,7 @@ void NVPTXFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
if (MF.getFrameInfo()->hasStackObjects()) {
assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
- MachineInstr *MI = MBB.begin();
+ MachineInstr *MI = &MBB.front();
MachineRegisterInfo &MR = MF.getRegInfo();
// This instruction really occurs before first instruction
@@ -70,10 +69,10 @@ void NVPTXFrameLowering::emitEpilogue(MachineFunction &MF,
// This function eliminates ADJCALLSTACKDOWN,
// ADJCALLSTACKUP pseudo instructions
-void NVPTXFrameLowering::eliminateCallFramePseudoInstr(
+MachineBasicBlock::iterator NVPTXFrameLowering::eliminateCallFramePseudoInstr(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
// Simply discard ADJCALLSTACKDOWN,
// ADJCALLSTACKUP instructions.
- MBB.erase(I);
+ return MBB.erase(I);
}
diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.h b/lib/Target/NVPTX/NVPTXFrameLowering.h
index 14f8bb7b98fe..320ca9a2f095 100644
--- a/lib/Target/NVPTX/NVPTXFrameLowering.h
+++ b/lib/Target/NVPTX/NVPTXFrameLowering.h
@@ -26,7 +26,7 @@ public:
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- void
+ MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
};
diff --git a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
index 62ca5e9f9f62..66a964082c5f 100644
--- a/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
+++ b/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
@@ -86,7 +86,7 @@ bool GenericToNVVM::runOnModule(Module &M) {
!llvm::isTexture(*GV) && !llvm::isSurface(*GV) &&
!llvm::isSampler(*GV) && !GV->getName().startswith("llvm.")) {
GlobalVariable *NewGV = new GlobalVariable(
- M, GV->getType()->getElementType(), GV->isConstant(),
+ M, GV->getValueType(), GV->isConstant(),
GV->getLinkage(),
GV->hasInitializer() ? GV->getInitializer() : nullptr,
"", GV, GV->getThreadLocalMode(), llvm::ADDRESS_SPACE_GLOBAL);
@@ -172,7 +172,7 @@ Value *GenericToNVVM::getOrInsertCVTA(Module *M, Function *F,
// See if the address space conversion requires the operand to be bitcast
// to i8 addrspace(n)* first.
- EVT ExtendedGVType = EVT::getEVT(GVType->getElementType(), true);
+ EVT ExtendedGVType = EVT::getEVT(GV->getValueType(), true);
if (!ExtendedGVType.isInteger() && !ExtendedGVType.isFloatingPoint()) {
// A bitcast to i8 addrspace(n)* on the operand is needed.
LLVMContext &Context = M->getContext();
@@ -182,21 +182,18 @@ Value *GenericToNVVM::getOrInsertCVTA(Module *M, Function *F,
// Insert the address space conversion.
Type *ResultType =
PointerType::get(Type::getInt8Ty(Context), llvm::ADDRESS_SPACE_GENERIC);
- SmallVector<Type *, 2> ParamTypes;
- ParamTypes.push_back(ResultType);
- ParamTypes.push_back(DestTy);
Function *CVTAFunction = Intrinsic::getDeclaration(
- M, Intrinsic::nvvm_ptr_global_to_gen, ParamTypes);
+ M, Intrinsic::nvvm_ptr_global_to_gen, {ResultType, DestTy});
CVTA = Builder.CreateCall(CVTAFunction, CVTA, "cvta");
// Another bitcast from i8 * to <the element type of GVType> * is
// required.
DestTy =
- PointerType::get(GVType->getElementType(), llvm::ADDRESS_SPACE_GENERIC);
+ PointerType::get(GV->getValueType(), llvm::ADDRESS_SPACE_GENERIC);
CVTA = Builder.CreateBitCast(CVTA, DestTy, "cvta");
} else {
// A simple CVTA is enough.
SmallVector<Type *, 2> ParamTypes;
- ParamTypes.push_back(PointerType::get(GVType->getElementType(),
+ ParamTypes.push_back(PointerType::get(GV->getValueType(),
llvm::ADDRESS_SPACE_GENERIC));
ParamTypes.push_back(GVType);
Function *CVTAFunction = Intrinsic::getDeclaration(
@@ -230,8 +227,7 @@ Value *GenericToNVVM::remapConstant(Module *M, Function *F, Constant *C,
if (I != GVMap.end()) {
NewValue = getOrInsertCVTA(M, F, I->second, Builder);
}
- } else if (isa<ConstantVector>(C) || isa<ConstantArray>(C) ||
- isa<ConstantStruct>(C)) {
+ } else if (isa<ConstantAggregate>(C)) {
// If any element in the constant vector or aggregate C is or uses a global
// variable in GVMap, the constant C needs to be reconstructed, using a set
// of instructions.
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 2d0098b392f4..61c6758ef118 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -105,57 +105,66 @@ bool NVPTXDAGToDAGISel::allowFMA() const {
/// Select - Select instructions not customized! Used for
/// expanded, promoted and normal instructions.
-SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
+void NVPTXDAGToDAGISel::Select(SDNode *N) {
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return nullptr; // Already selected.
+ return; // Already selected.
}
- SDNode *ResNode = nullptr;
switch (N->getOpcode()) {
case ISD::LOAD:
- ResNode = SelectLoad(N);
+ if (tryLoad(N))
+ return;
break;
case ISD::STORE:
- ResNode = SelectStore(N);
+ if (tryStore(N))
+ return;
break;
case NVPTXISD::LoadV2:
case NVPTXISD::LoadV4:
- ResNode = SelectLoadVector(N);
+ if (tryLoadVector(N))
+ return;
break;
case NVPTXISD::LDGV2:
case NVPTXISD::LDGV4:
case NVPTXISD::LDUV2:
case NVPTXISD::LDUV4:
- ResNode = SelectLDGLDU(N);
+ if (tryLDGLDU(N))
+ return;
break;
case NVPTXISD::StoreV2:
case NVPTXISD::StoreV4:
- ResNode = SelectStoreVector(N);
+ if (tryStoreVector(N))
+ return;
break;
case NVPTXISD::LoadParam:
case NVPTXISD::LoadParamV2:
case NVPTXISD::LoadParamV4:
- ResNode = SelectLoadParam(N);
+ if (tryLoadParam(N))
+ return;
break;
case NVPTXISD::StoreRetval:
case NVPTXISD::StoreRetvalV2:
case NVPTXISD::StoreRetvalV4:
- ResNode = SelectStoreRetval(N);
+ if (tryStoreRetval(N))
+ return;
break;
case NVPTXISD::StoreParam:
case NVPTXISD::StoreParamV2:
case NVPTXISD::StoreParamV4:
case NVPTXISD::StoreParamS32:
case NVPTXISD::StoreParamU32:
- ResNode = SelectStoreParam(N);
+ if (tryStoreParam(N))
+ return;
break;
case ISD::INTRINSIC_WO_CHAIN:
- ResNode = SelectIntrinsicNoChain(N);
+ if (tryIntrinsicNoChain(N))
+ return;
break;
case ISD::INTRINSIC_W_CHAIN:
- ResNode = SelectIntrinsicChain(N);
+ if (tryIntrinsicChain(N))
+ return;
break;
case NVPTXISD::Tex1DFloatS32:
case NVPTXISD::Tex1DFloatFloat:
@@ -325,7 +334,8 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
case NVPTXISD::Tld4UnifiedG2DU64Float:
case NVPTXISD::Tld4UnifiedB2DU64Float:
case NVPTXISD::Tld4UnifiedA2DU64Float:
- ResNode = SelectTextureIntrinsic(N);
+ if (tryTextureIntrinsic(N))
+ return;
break;
case NVPTXISD::Suld1DI8Clamp:
case NVPTXISD::Suld1DI16Clamp:
@@ -492,37 +502,37 @@ SDNode *NVPTXDAGToDAGISel::Select(SDNode *N) {
case NVPTXISD::Suld3DV4I8Zero:
case NVPTXISD::Suld3DV4I16Zero:
case NVPTXISD::Suld3DV4I32Zero:
- ResNode = SelectSurfaceIntrinsic(N);
+ if (trySurfaceIntrinsic(N))
+ return;
break;
case ISD::AND:
case ISD::SRA:
case ISD::SRL:
// Try to select BFE
- ResNode = SelectBFE(N);
+ if (tryBFE(N))
+ return;
break;
case ISD::ADDRSPACECAST:
- ResNode = SelectAddrSpaceCast(N);
- break;
+ SelectAddrSpaceCast(N);
+ return;
default:
break;
}
- if (ResNode)
- return ResNode;
- return SelectCode(N);
+ SelectCode(N);
}
-SDNode *NVPTXDAGToDAGISel::SelectIntrinsicChain(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryIntrinsicChain(SDNode *N) {
unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
switch (IID) {
default:
- return NULL;
+ return false;
case Intrinsic::nvvm_ldg_global_f:
case Intrinsic::nvvm_ldg_global_i:
case Intrinsic::nvvm_ldg_global_p:
case Intrinsic::nvvm_ldu_global_f:
case Intrinsic::nvvm_ldu_global_i:
case Intrinsic::nvvm_ldu_global_p:
- return SelectLDGLDU(N);
+ return tryLDGLDU(N);
}
}
@@ -579,25 +589,26 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectIntrinsicNoChain(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryIntrinsicNoChain(SDNode *N) {
unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
switch (IID) {
default:
- return nullptr;
+ return false;
case Intrinsic::nvvm_texsurf_handle_internal:
- return SelectTexSurfHandle(N);
+ SelectTexSurfHandle(N);
+ return true;
}
}
-SDNode *NVPTXDAGToDAGISel::SelectTexSurfHandle(SDNode *N) {
+void NVPTXDAGToDAGISel::SelectTexSurfHandle(SDNode *N) {
// Op 0 is the intrinsic ID
SDValue Wrapper = N->getOperand(1);
SDValue GlobalVal = Wrapper.getOperand(0);
- return CurDAG->getMachineNode(NVPTX::texsurf_handles, SDLoc(N), MVT::i64,
- GlobalVal);
+ ReplaceNode(N, CurDAG->getMachineNode(NVPTX::texsurf_handles, SDLoc(N),
+ MVT::i64, GlobalVal));
}
-SDNode *NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
+void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
SDValue Src = N->getOperand(0);
AddrSpaceCastSDNode *CastN = cast<AddrSpaceCastSDNode>(N);
unsigned SrcAddrSpace = CastN->getSrcAddressSpace();
@@ -624,7 +635,9 @@ SDNode *NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
Opc = TM.is64Bit() ? NVPTX::cvta_local_yes_64 : NVPTX::cvta_local_yes;
break;
}
- return CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0), Src);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
+ Src));
+ return;
} else {
// Generic to specific
if (SrcAddrSpace != 0)
@@ -653,11 +666,13 @@ SDNode *NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
: NVPTX::nvvm_ptr_gen_to_param;
break;
}
- return CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0), Src);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
+ Src));
+ return;
}
}
-SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
SDLoc dl(N);
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT LoadedVT = LD->getMemoryVT();
@@ -665,16 +680,16 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
// do not support pre/post inc/dec
if (LD->isIndexed())
- return nullptr;
+ return false;
if (!LoadedVT.isSimple())
- return nullptr;
+ return false;
// Address Space Setting
unsigned int codeAddrSpace = getCodeAddrSpace(LD);
if (canLowerToLDG(LD, *Subtarget, codeAddrSpace, MF)) {
- return SelectLDGLDU(N);
+ return tryLDGLDU(N);
}
// Volatile Setting
@@ -695,7 +710,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
else if (num == 4)
vecType = NVPTX::PTXLdStInstCode::V4;
else
- return nullptr;
+ return false;
}
// Type Setting: fromType + fromTypeWidth
@@ -744,7 +759,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_avar;
break;
default:
- return nullptr;
+ return false;
}
SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(codeAddrSpace, dl),
getI32Imm(vecType, dl), getI32Imm(fromType, dl),
@@ -772,7 +787,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_asi;
break;
default:
- return nullptr;
+ return false;
}
SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(codeAddrSpace, dl),
getI32Imm(vecType, dl), getI32Imm(fromType, dl),
@@ -801,7 +816,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_ari_64;
break;
default:
- return nullptr;
+ return false;
}
} else {
switch (TargetVT) {
@@ -824,7 +839,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_ari;
break;
default:
- return nullptr;
+ return false;
}
}
SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(codeAddrSpace, dl),
@@ -853,7 +868,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_areg_64;
break;
default:
- return nullptr;
+ return false;
}
} else {
switch (TargetVT) {
@@ -876,7 +891,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
Opcode = NVPTX::LD_f64_areg;
break;
default:
- return nullptr;
+ return false;
}
}
SDValue Ops[] = { getI32Imm(isVolatile, dl), getI32Imm(codeAddrSpace, dl),
@@ -885,16 +900,18 @@ SDNode *NVPTXDAGToDAGISel::SelectLoad(SDNode *N) {
NVPTXLD = CurDAG->getMachineNode(Opcode, dl, TargetVT, MVT::Other, Ops);
}
- if (NVPTXLD) {
- MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
- MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
- cast<MachineSDNode>(NVPTXLD)->setMemRefs(MemRefs0, MemRefs0 + 1);
- }
+ if (!NVPTXLD)
+ return false;
+
+ MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
+ MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(NVPTXLD)->setMemRefs(MemRefs0, MemRefs0 + 1);
- return NVPTXLD;
+ ReplaceNode(N, NVPTXLD);
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
SDValue Chain = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -906,13 +923,13 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
EVT LoadedVT = MemSD->getMemoryVT();
if (!LoadedVT.isSimple())
- return nullptr;
+ return false;
// Address Space Setting
unsigned int CodeAddrSpace = getCodeAddrSpace(MemSD);
if (canLowerToLDG(MemSD, *Subtarget, CodeAddrSpace, MF)) {
- return SelectLDGLDU(N);
+ return tryLDGLDU(N);
}
// Volatile Setting
@@ -956,7 +973,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
VecType = NVPTX::PTXLdStInstCode::V4;
break;
default:
- return nullptr;
+ return false;
}
EVT EltVT = N->getValueType(0);
@@ -964,11 +981,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
if (SelectDirectAddr(Op1, Addr)) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_avar;
break;
@@ -992,7 +1009,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_avar;
break;
@@ -1017,11 +1034,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
: SelectADDRsi(Op1.getNode(), Op1, Base, Offset)) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_asi;
break;
@@ -1045,7 +1062,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_asi;
break;
@@ -1071,11 +1088,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_ari_64;
break;
@@ -1099,7 +1116,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_ari_64;
break;
@@ -1118,11 +1135,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_ari;
break;
@@ -1146,7 +1163,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_ari;
break;
@@ -1173,11 +1190,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_areg_64;
break;
@@ -1201,7 +1218,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_areg_64;
break;
@@ -1220,11 +1237,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::LoadV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v2_areg;
break;
@@ -1248,7 +1265,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
case NVPTXISD::LoadV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::LDV_i8_v4_areg;
break;
@@ -1276,17 +1293,18 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadVector(SDNode *N) {
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
cast<MachineSDNode>(LD)->setMemRefs(MemRefs0, MemRefs0 + 1);
- return LD;
+ ReplaceNode(N, LD);
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
SDValue Chain = N->getOperand(0);
SDValue Op1;
MemSDNode *Mem;
bool IsLDG = true;
- // If this is an LDG intrinsic, the address is the third operand. Its its an
+ // If this is an LDG intrinsic, the address is the third operand. If its an
// LDG/LDU SD node (from custom vector handling), then its the second operand
if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
Op1 = N->getOperand(2);
@@ -1294,7 +1312,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
switch (IID) {
default:
- return NULL;
+ return false;
case Intrinsic::nvvm_ldg_global_f:
case Intrinsic::nvvm_ldg_global_i:
case Intrinsic::nvvm_ldg_global_p:
@@ -1317,19 +1335,32 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
SDValue Base, Offset, Addr;
EVT EltVT = Mem->getMemoryVT();
+ unsigned NumElts = 1;
if (EltVT.isVector()) {
+ NumElts = EltVT.getVectorNumElements();
EltVT = EltVT.getVectorElementType();
}
+ // Build the "promoted" result VTList for the load. If we are really loading
+ // i8s, then the return type will be promoted to i16 since we do not expose
+ // 8-bit registers in NVPTX.
+ EVT NodeVT = (EltVT == MVT::i8) ? MVT::i16 : EltVT;
+ SmallVector<EVT, 5> InstVTs;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ InstVTs.push_back(NodeVT);
+ }
+ InstVTs.push_back(MVT::Other);
+ SDVTList InstVTList = CurDAG->getVTList(InstVTs);
+
if (SelectDirectAddr(Op1, Addr)) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case ISD::INTRINSIC_W_CHAIN:
if (IsLDG) {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8avar;
break;
@@ -1352,7 +1383,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
} else {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8avar;
break;
@@ -1377,7 +1408,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_avar;
break;
@@ -1401,7 +1432,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_avar;
break;
@@ -1425,7 +1456,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_avar;
break;
@@ -1443,7 +1474,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_avar;
break;
@@ -1461,19 +1492,19 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
}
SDValue Ops[] = { Addr, Chain };
- LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
+ LD = CurDAG->getMachineNode(Opcode, DL, InstVTList, Ops);
} else if (TM.is64Bit() ? SelectADDRri64(Op1.getNode(), Op1, Base, Offset)
: SelectADDRri(Op1.getNode(), Op1, Base, Offset)) {
if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case ISD::LOAD:
case ISD::INTRINSIC_W_CHAIN:
if (IsLDG) {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8ari64;
break;
@@ -1496,7 +1527,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
} else {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8ari64;
break;
@@ -1522,7 +1553,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari64;
break;
@@ -1546,7 +1577,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari64;
break;
@@ -1571,7 +1602,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari64;
break;
@@ -1589,7 +1620,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari64;
break;
@@ -1608,13 +1639,13 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case ISD::LOAD:
case ISD::INTRINSIC_W_CHAIN:
if (IsLDG) {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8ari;
break;
@@ -1637,7 +1668,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
} else {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8ari;
break;
@@ -1663,7 +1694,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_ari32;
break;
@@ -1687,7 +1718,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_ari32;
break;
@@ -1712,7 +1743,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_ari32;
break;
@@ -1730,7 +1761,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_ari32;
break;
@@ -1750,18 +1781,18 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
SDValue Ops[] = { Base, Offset, Chain };
- LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
+ LD = CurDAG->getMachineNode(Opcode, DL, InstVTList, Ops);
} else {
if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case ISD::LOAD:
case ISD::INTRINSIC_W_CHAIN:
if (IsLDG) {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8areg64;
break;
@@ -1784,7 +1815,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
} else {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8areg64;
break;
@@ -1810,7 +1841,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg64;
break;
@@ -1834,7 +1865,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg64;
break;
@@ -1859,7 +1890,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg64;
break;
@@ -1877,7 +1908,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg64;
break;
@@ -1896,13 +1927,13 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case ISD::LOAD:
case ISD::INTRINSIC_W_CHAIN:
if (IsLDG) {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_GLOBAL_i8areg;
break;
@@ -1925,7 +1956,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
} else {
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_GLOBAL_i8areg;
break;
@@ -1951,7 +1982,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v2i8_ELE_areg32;
break;
@@ -1975,7 +2006,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v2i8_ELE_areg32;
break;
@@ -2000,7 +2031,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDGV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDG_G_v4i8_ELE_areg32;
break;
@@ -2018,7 +2049,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
case NVPTXISD::LDUV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::INT_PTX_LDU_G_v4i8_ELE_areg32;
break;
@@ -2037,17 +2068,54 @@ SDNode *NVPTXDAGToDAGISel::SelectLDGLDU(SDNode *N) {
}
SDValue Ops[] = { Op1, Chain };
- LD = CurDAG->getMachineNode(Opcode, DL, N->getVTList(), Ops);
+ LD = CurDAG->getMachineNode(Opcode, DL, InstVTList, Ops);
}
MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
MemRefs0[0] = Mem->getMemOperand();
cast<MachineSDNode>(LD)->setMemRefs(MemRefs0, MemRefs0 + 1);
- return LD;
+ // For automatic generation of LDG (through SelectLoad[Vector], not the
+ // intrinsics), we may have an extending load like:
+ //
+ // i32,ch = load<LD1[%data1(addrspace=1)], zext from i8> t0, t7, undef:i64
+ //
+ // In this case, the matching logic above will select a load for the original
+ // memory type (in this case, i8) and our types will not match (the node needs
+ // to return an i32 in this case). Our LDG/LDU nodes do not support the
+ // concept of sign-/zero-extension, so emulate it here by adding an explicit
+ // CVT instruction. Ptxas should clean up any redundancies here.
+
+ EVT OrigType = N->getValueType(0);
+ LoadSDNode *LdNode = dyn_cast<LoadSDNode>(N);
+
+ if (OrigType != EltVT && LdNode) {
+ // We have an extending-load. The instruction we selected operates on the
+ // smaller type, but the SDNode we are replacing has the larger type. We
+ // need to emit a CVT to make the types match.
+ bool IsSigned = LdNode->getExtensionType() == ISD::SEXTLOAD;
+ unsigned CvtOpc = GetConvertOpcode(OrigType.getSimpleVT(),
+ EltVT.getSimpleVT(), IsSigned);
+
+ // For each output value, apply the manual sign/zero-extension and make sure
+ // all users of the load go through that CVT.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ SDValue Res(LD, i);
+ SDValue OrigVal(N, i);
+
+ SDNode *CvtNode =
+ CurDAG->getMachineNode(CvtOpc, DL, OrigType, Res,
+ CurDAG->getTargetConstant(NVPTX::PTXCvtMode::NONE,
+ DL, MVT::i32));
+ ReplaceUses(OrigVal, SDValue(CvtNode, 0));
+ }
+ }
+
+ ReplaceNode(N, LD);
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
SDLoc dl(N);
StoreSDNode *ST = cast<StoreSDNode>(N);
EVT StoreVT = ST->getMemoryVT();
@@ -2055,10 +2123,10 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
// do not support pre/post inc/dec
if (ST->isIndexed())
- return nullptr;
+ return false;
if (!StoreVT.isSimple())
- return nullptr;
+ return false;
// Address Space Setting
unsigned int codeAddrSpace = getCodeAddrSpace(ST);
@@ -2081,7 +2149,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
else if (num == 4)
vecType = NVPTX::PTXLdStInstCode::V4;
else
- return nullptr;
+ return false;
}
// Type Setting: toType + toTypeWidth
@@ -2125,7 +2193,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_avar;
break;
default:
- return nullptr;
+ return false;
}
SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
getI32Imm(codeAddrSpace, dl), getI32Imm(vecType, dl),
@@ -2154,7 +2222,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_asi;
break;
default:
- return nullptr;
+ return false;
}
SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
getI32Imm(codeAddrSpace, dl), getI32Imm(vecType, dl),
@@ -2184,7 +2252,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_ari_64;
break;
default:
- return nullptr;
+ return false;
}
} else {
switch (SourceVT) {
@@ -2207,7 +2275,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_ari;
break;
default:
- return nullptr;
+ return false;
}
}
SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
@@ -2237,7 +2305,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_areg_64;
break;
default:
- return nullptr;
+ return false;
}
} else {
switch (SourceVT) {
@@ -2260,7 +2328,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
Opcode = NVPTX::ST_f64_areg;
break;
default:
- return nullptr;
+ return false;
}
}
SDValue Ops[] = { N1, getI32Imm(isVolatile, dl),
@@ -2270,16 +2338,17 @@ SDNode *NVPTXDAGToDAGISel::SelectStore(SDNode *N) {
NVPTXST = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
}
- if (NVPTXST) {
- MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
- MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
- cast<MachineSDNode>(NVPTXST)->setMemRefs(MemRefs0, MemRefs0 + 1);
- }
+ if (!NVPTXST)
+ return false;
- return NVPTXST;
+ MachineSDNode::mmo_iterator MemRefs0 = MF->allocateMemRefsArray(1);
+ MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(NVPTXST)->setMemRefs(MemRefs0, MemRefs0 + 1);
+ ReplaceNode(N, NVPTXST);
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
SDValue Chain = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue Addr, Offset, Base;
@@ -2337,7 +2406,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
N2 = N->getOperand(5);
break;
default:
- return nullptr;
+ return false;
}
StOps.push_back(getI32Imm(IsVolatile, DL));
@@ -2349,11 +2418,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
if (SelectDirectAddr(N2, Addr)) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_avar;
break;
@@ -2377,7 +2446,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_avar;
break;
@@ -2398,11 +2467,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
: SelectADDRsi(N2.getNode(), N2, Base, Offset)) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_asi;
break;
@@ -2426,7 +2495,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_asi;
break;
@@ -2449,11 +2518,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_ari_64;
break;
@@ -2477,7 +2546,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_ari_64;
break;
@@ -2496,11 +2565,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_ari;
break;
@@ -2524,7 +2593,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_ari;
break;
@@ -2547,11 +2616,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
if (TM.is64Bit()) {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_areg_64;
break;
@@ -2575,7 +2644,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_areg_64;
break;
@@ -2594,11 +2663,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
} else {
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::StoreV2:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v2_areg;
break;
@@ -2622,7 +2691,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
case NVPTXISD::StoreV4:
switch (EltVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i8:
Opcode = NVPTX::STV_i8_v4_areg;
break;
@@ -2650,10 +2719,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreVector(SDNode *N) {
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
cast<MachineSDNode>(ST)->setMemRefs(MemRefs0, MemRefs0 + 1);
- return ST;
+ ReplaceNode(N, ST);
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
+bool NVPTXDAGToDAGISel::tryLoadParam(SDNode *Node) {
SDValue Chain = Node->getOperand(0);
SDValue Offset = Node->getOperand(2);
SDValue Flag = Node->getOperand(3);
@@ -2663,7 +2733,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
unsigned VecSize;
switch (Node->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::LoadParam:
VecSize = 1;
break;
@@ -2682,11 +2752,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
switch (VecSize) {
default:
- return nullptr;
+ return false;
case 1:
switch (MemVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opc = NVPTX::LoadParamMemI8;
break;
@@ -2713,7 +2783,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
case 2:
switch (MemVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opc = NVPTX::LoadParamMemV2I8;
break;
@@ -2740,7 +2810,7 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
case 4:
switch (MemVT.getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opc = NVPTX::LoadParamMemV4I8;
break;
@@ -2777,10 +2847,11 @@ SDNode *NVPTXDAGToDAGISel::SelectLoadParam(SDNode *Node) {
Ops.push_back(Chain);
Ops.push_back(Flag);
- return CurDAG->getMachineNode(Opc, DL, VTs, Ops);
+ ReplaceNode(Node, CurDAG->getMachineNode(Opc, DL, VTs, Ops));
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Offset = N->getOperand(1);
@@ -2791,7 +2862,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
unsigned NumElts = 1;
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::StoreRetval:
NumElts = 1;
break;
@@ -2816,11 +2887,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
unsigned Opcode = 0;
switch (NumElts) {
default:
- return nullptr;
+ return false;
case 1:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opcode = NVPTX::StoreRetvalI8;
break;
@@ -2847,7 +2918,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
case 2:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opcode = NVPTX::StoreRetvalV2I8;
break;
@@ -2874,7 +2945,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
case 4:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opcode = NVPTX::StoreRetvalV4I8;
break;
@@ -2900,10 +2971,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreRetval(SDNode *N) {
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
cast<MachineSDNode>(Ret)->setMemRefs(MemRefs0, MemRefs0 + 1);
- return Ret;
+ ReplaceNode(N, Ret);
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
SDLoc DL(N);
SDValue Chain = N->getOperand(0);
SDValue Param = N->getOperand(1);
@@ -2917,7 +2989,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
unsigned NumElts = 1;
switch (N->getOpcode()) {
default:
- return nullptr;
+ return false;
case NVPTXISD::StoreParamU32:
case NVPTXISD::StoreParamS32:
case NVPTXISD::StoreParam:
@@ -2948,11 +3020,11 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
default:
switch (NumElts) {
default:
- return nullptr;
+ return false;
case 1:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opcode = NVPTX::StoreParamI8;
break;
@@ -2979,7 +3051,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
case 2:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opcode = NVPTX::StoreParamV2I8;
break;
@@ -3006,7 +3078,7 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
case 4:
switch (Mem->getMemoryVT().getSimpleVT().SimpleTy) {
default:
- return nullptr;
+ return false;
case MVT::i1:
Opcode = NVPTX::StoreParamV4I8;
break;
@@ -3056,17 +3128,17 @@ SDNode *NVPTXDAGToDAGISel::SelectStoreParam(SDNode *N) {
MemRefs0[0] = cast<MemSDNode>(N)->getMemOperand();
cast<MachineSDNode>(Ret)->setMemRefs(MemRefs0, MemRefs0 + 1);
- return Ret;
+ ReplaceNode(N, Ret);
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectTextureIntrinsic(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryTextureIntrinsic(SDNode *N) {
SDValue Chain = N->getOperand(0);
- SDNode *Ret = nullptr;
unsigned Opc = 0;
SmallVector<SDValue, 8> Ops;
switch (N->getOpcode()) {
- default: return nullptr;
+ default: return false;
case NVPTXISD::Tex1DFloatS32:
Opc = NVPTX::TEX_1D_F32_S32;
break;
@@ -3579,18 +3651,17 @@ SDNode *NVPTXDAGToDAGISel::SelectTextureIntrinsic(SDNode *N) {
}
Ops.push_back(Chain);
- Ret = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
- return Ret;
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops));
+ return true;
}
-SDNode *NVPTXDAGToDAGISel::SelectSurfaceIntrinsic(SDNode *N) {
+bool NVPTXDAGToDAGISel::trySurfaceIntrinsic(SDNode *N) {
SDValue Chain = N->getOperand(0);
SDValue TexHandle = N->getOperand(1);
- SDNode *Ret = nullptr;
unsigned Opc = 0;
SmallVector<SDValue, 8> Ops;
switch (N->getOpcode()) {
- default: return nullptr;
+ default: return false;
case NVPTXISD::Suld1DI8Clamp:
Opc = NVPTX::SULD_1D_I8_CLAMP;
Ops.push_back(TexHandle);
@@ -4780,14 +4851,14 @@ SDNode *NVPTXDAGToDAGISel::SelectSurfaceIntrinsic(SDNode *N) {
Ops.push_back(Chain);
break;
}
- Ret = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
- return Ret;
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops));
+ return true;
}
/// SelectBFE - Look for instruction sequences that can be made more efficient
/// by using the 'bfe' (bit-field extract) PTX instruction
-SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
+bool NVPTXDAGToDAGISel::tryBFE(SDNode *N) {
SDLoc DL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -4806,7 +4877,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS);
if (!Mask) {
// We need a constant mask on the RHS of the AND
- return NULL;
+ return false;
}
// Extract the mask bits
@@ -4815,7 +4886,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
// We *could* handle shifted masks here, but doing so would require an
// 'and' operation to fix up the low-order bits so we would trade
// shr+and for bfe+and, which has the same throughput
- return NULL;
+ return false;
}
// How many bits are in our mask?
@@ -4836,7 +4907,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
// Do not handle the case where bits have been shifted in. In theory
// we could handle this, but the cost is likely higher than just
// emitting the srl/and pair.
- return NULL;
+ return false;
}
Start = CurDAG->getTargetConstant(StartVal, DL, MVT::i32);
} else {
@@ -4844,20 +4915,20 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
// was found) is not constant. We could handle this case, but it would
// require run-time logic that would be more expensive than just
// emitting the srl/and pair.
- return NULL;
+ return false;
}
} else {
// Do not handle the case where the LHS of the and is not a shift. While
// it would be trivial to handle this case, it would just transform
// 'and' -> 'bfe', but 'and' has higher-throughput.
- return NULL;
+ return false;
}
} else if (N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) {
if (LHS->getOpcode() == ISD::AND) {
ConstantSDNode *ShiftCnst = dyn_cast<ConstantSDNode>(RHS);
if (!ShiftCnst) {
// Shift amount must be constant
- return NULL;
+ return false;
}
uint64_t ShiftAmt = ShiftCnst->getZExtValue();
@@ -4873,7 +4944,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(AndRHS);
if (!MaskCnst) {
// Mask must be constant
- return NULL;
+ return false;
}
uint64_t MaskVal = MaskCnst->getZExtValue();
@@ -4893,13 +4964,13 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
NumBits = NumZeros + NumOnes - ShiftAmt;
} else {
// This is not a mask we can handle
- return NULL;
+ return false;
}
if (ShiftAmt < NumZeros) {
// Handling this case would require extra logic that would make this
// transformation non-profitable
- return NULL;
+ return false;
}
Val = AndLHS;
@@ -4919,7 +4990,7 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
ConstantSDNode *ShlCnst = dyn_cast<ConstantSDNode>(ShlRHS);
if (!ShlCnst) {
// Shift amount must be constant
- return NULL;
+ return false;
}
uint64_t InnerShiftAmt = ShlCnst->getZExtValue();
@@ -4927,20 +4998,20 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
ConstantSDNode *ShrCnst = dyn_cast<ConstantSDNode>(ShrRHS);
if (!ShrCnst) {
// Shift amount must be constant
- return NULL;
+ return false;
}
uint64_t OuterShiftAmt = ShrCnst->getZExtValue();
// To avoid extra codegen and be profitable, we need Outer >= Inner
if (OuterShiftAmt < InnerShiftAmt) {
- return NULL;
+ return false;
}
// If the outer shift is more than the type size, we have no bitfield to
// extract (since we also check that the inner shift is <= the outer shift
// then this also implies that the inner shift is < the type size)
if (OuterShiftAmt >= Val.getValueType().getSizeInBits()) {
- return NULL;
+ return false;
}
Start =
@@ -4956,11 +5027,11 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
}
} else {
// No can do...
- return NULL;
+ return false;
}
} else {
// No can do...
- return NULL;
+ return false;
}
@@ -4981,14 +5052,15 @@ SDNode *NVPTXDAGToDAGISel::SelectBFE(SDNode *N) {
}
} else {
// We cannot handle this type
- return NULL;
+ return false;
}
SDValue Ops[] = {
Val, Start, Len
};
- return CurDAG->getMachineNode(Opc, DL, N->getVTList(), Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, DL, N->getVTList(), Ops));
+ return true;
}
// SelectDirectAddr - Match a direct address for DAG.
@@ -5122,3 +5194,57 @@ bool NVPTXDAGToDAGISel::SelectInlineAsmMemoryOperand(
}
return true;
}
+
+/// GetConvertOpcode - Returns the CVT_ instruction opcode that implements a
+/// conversion from \p SrcTy to \p DestTy.
+unsigned NVPTXDAGToDAGISel::GetConvertOpcode(MVT DestTy, MVT SrcTy,
+ bool IsSigned) {
+ switch (SrcTy.SimpleTy) {
+ default:
+ llvm_unreachable("Unhandled source type");
+ case MVT::i8:
+ switch (DestTy.SimpleTy) {
+ default:
+ llvm_unreachable("Unhandled dest type");
+ case MVT::i16:
+ return IsSigned ? NVPTX::CVT_s16_s8 : NVPTX::CVT_u16_u8;
+ case MVT::i32:
+ return IsSigned ? NVPTX::CVT_s32_s8 : NVPTX::CVT_u32_u8;
+ case MVT::i64:
+ return IsSigned ? NVPTX::CVT_s64_s8 : NVPTX::CVT_u64_u8;
+ }
+ case MVT::i16:
+ switch (DestTy.SimpleTy) {
+ default:
+ llvm_unreachable("Unhandled dest type");
+ case MVT::i8:
+ return IsSigned ? NVPTX::CVT_s8_s16 : NVPTX::CVT_u8_u16;
+ case MVT::i32:
+ return IsSigned ? NVPTX::CVT_s32_s16 : NVPTX::CVT_u32_u16;
+ case MVT::i64:
+ return IsSigned ? NVPTX::CVT_s64_s16 : NVPTX::CVT_u64_u16;
+ }
+ case MVT::i32:
+ switch (DestTy.SimpleTy) {
+ default:
+ llvm_unreachable("Unhandled dest type");
+ case MVT::i8:
+ return IsSigned ? NVPTX::CVT_s8_s32 : NVPTX::CVT_u8_u32;
+ case MVT::i16:
+ return IsSigned ? NVPTX::CVT_s16_s32 : NVPTX::CVT_u16_u32;
+ case MVT::i64:
+ return IsSigned ? NVPTX::CVT_s64_s32 : NVPTX::CVT_u64_u32;
+ }
+ case MVT::i64:
+ switch (DestTy.SimpleTy) {
+ default:
+ llvm_unreachable("Unhandled dest type");
+ case MVT::i8:
+ return IsSigned ? NVPTX::CVT_s8_s64 : NVPTX::CVT_u8_u64;
+ case MVT::i16:
+ return IsSigned ? NVPTX::CVT_s16_s64 : NVPTX::CVT_u16_u64;
+ case MVT::i32:
+ return IsSigned ? NVPTX::CVT_s32_s64 : NVPTX::CVT_u32_u64;
+ }
+ }
+}
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index fe20580c83a2..d53c92f1eff3 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -21,9 +21,8 @@
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Compiler.h"
-using namespace llvm;
-namespace {
+namespace llvm {
class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel {
const NVPTXTargetMachine &TM;
@@ -54,24 +53,24 @@ private:
// Include the pieces autogenerated from the target description.
#include "NVPTXGenDAGISel.inc"
- SDNode *Select(SDNode *N) override;
- SDNode *SelectIntrinsicNoChain(SDNode *N);
- SDNode *SelectIntrinsicChain(SDNode *N);
- SDNode *SelectTexSurfHandle(SDNode *N);
- SDNode *SelectLoad(SDNode *N);
- SDNode *SelectLoadVector(SDNode *N);
- SDNode *SelectLDGLDU(SDNode *N);
- SDNode *SelectStore(SDNode *N);
- SDNode *SelectStoreVector(SDNode *N);
- SDNode *SelectLoadParam(SDNode *N);
- SDNode *SelectStoreRetval(SDNode *N);
- SDNode *SelectStoreParam(SDNode *N);
- SDNode *SelectAddrSpaceCast(SDNode *N);
- SDNode *SelectTextureIntrinsic(SDNode *N);
- SDNode *SelectSurfaceIntrinsic(SDNode *N);
- SDNode *SelectBFE(SDNode *N);
-
- inline SDValue getI32Imm(unsigned Imm, SDLoc DL) {
+ void Select(SDNode *N) override;
+ bool tryIntrinsicNoChain(SDNode *N);
+ bool tryIntrinsicChain(SDNode *N);
+ void SelectTexSurfHandle(SDNode *N);
+ bool tryLoad(SDNode *N);
+ bool tryLoadVector(SDNode *N);
+ bool tryLDGLDU(SDNode *N);
+ bool tryStore(SDNode *N);
+ bool tryStoreVector(SDNode *N);
+ bool tryLoadParam(SDNode *N);
+ bool tryStoreRetval(SDNode *N);
+ bool tryStoreParam(SDNode *N);
+ void SelectAddrSpaceCast(SDNode *N);
+ bool tryTextureIntrinsic(SDNode *N);
+ bool trySurfaceIntrinsic(SDNode *N);
+ bool tryBFE(SDNode *N);
+
+ inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
}
@@ -94,7 +93,8 @@ private:
bool ChkMemSDNodeAddressSpace(SDNode *N, unsigned int spN) const;
+ static unsigned GetConvertOpcode(MVT DestTy, MVT SrcTy, bool IsSigned);
};
-}
+} // end namespace llvm
#endif
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index be735f6c1bce..f28c89cd976a 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -257,15 +257,9 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::CTLZ, MVT::i16, Legal);
setOperationAction(ISD::CTLZ, MVT::i32, Legal);
setOperationAction(ISD::CTLZ, MVT::i64, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
setOperationAction(ISD::CTTZ, MVT::i16, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i64, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::CTPOP, MVT::i16, Legal);
setOperationAction(ISD::CTPOP, MVT::i32, Legal);
setOperationAction(ISD::CTPOP, MVT::i64, Legal);
@@ -273,6 +267,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// PTX does not directly support SELP of i1, so promote to i32 first
setOperationAction(ISD::SELECT, MVT::i1, Custom);
+ // PTX cannot multiply two i64s in a single instruction.
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+
// We have some custom DAG combine patterns for these nodes
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::AND);
@@ -310,8 +308,12 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
return "NVPTXISD::DeclareRetParam";
case NVPTXISD::PrintCall:
return "NVPTXISD::PrintCall";
+ case NVPTXISD::PrintConvergentCall:
+ return "NVPTXISD::PrintConvergentCall";
case NVPTXISD::PrintCallUni:
return "NVPTXISD::PrintCallUni";
+ case NVPTXISD::PrintConvergentCallUni:
+ return "NVPTXISD::PrintConvergentCallUni";
case NVPTXISD::LoadParam:
return "NVPTXISD::LoadParam";
case NVPTXISD::LoadParamV2:
@@ -1309,9 +1311,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InFlag };
unsigned opcode = NVPTXISD::StoreParam;
- if (Outs[OIdx].Flags.isZExt())
+ if (Outs[OIdx].Flags.isZExt() && VT.getSizeInBits() < 32)
opcode = NVPTXISD::StoreParamU32;
- else if (Outs[OIdx].Flags.isSExt())
+ else if (Outs[OIdx].Flags.isSExt() && VT.getSizeInBits() < 32)
opcode = NVPTXISD::StoreParamS32;
Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps,
VT, MachinePointerInfo());
@@ -1351,8 +1353,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
DAG.getConstant(curOffset, dl, PtrVT));
SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
- MachinePointerInfo(), false, false, false,
- PartAlign);
+ MachinePointerInfo(), PartAlign);
if (elemtype.getSizeInBits() < 16) {
theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
}
@@ -1435,8 +1436,12 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue PrintCallOps[] = {
Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
};
- Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
- dl, PrintCallVTs, PrintCallOps);
+ // We model convergent calls as separate opcodes.
+ unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
+ if (CLI.IsConvergent)
+ Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
+ : NVPTXISD::PrintConvergentCall;
+ Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
InFlag = Chain.getValue(1);
// Ops to print out the function name
@@ -1608,9 +1613,11 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
unsigned sz = VTs[i].getSizeInBits();
unsigned AlignI = GreatestCommonDivisor64(RetAlign, Offsets[i]);
- bool needTruncate = sz < 8;
- if (VTs[i].isInteger() && (sz < 8))
+ bool needTruncate = false;
+ if (VTs[i].isInteger() && sz < 8) {
sz = 8;
+ needTruncate = true;
+ }
SmallVector<EVT, 4> LoadRetVTs;
EVT TheLoadType = VTs[i];
@@ -1619,10 +1626,16 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// aggregates.
LoadRetVTs.push_back(MVT::i32);
TheLoadType = MVT::i32;
+ needTruncate = true;
} else if (sz < 16) {
// If loading i1/i8 result, generate
// load i8 (-> i16)
// trunc i16 to i1/i8
+
+ // FIXME: Do we need to set needTruncate to true here, too? We could
+ // not figure out what this branch is for in D17872, so we left it
+ // alone. The comment above about loading i1/i8 may be wrong, as the
+ // branch above seems to cover integers of size < 32.
LoadRetVTs.push_back(MVT::i16);
} else
LoadRetVTs.push_back(Ins[i].VT);
@@ -1678,7 +1691,7 @@ NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(j, dl)));
}
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
+ return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
}
/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
@@ -1872,10 +1885,9 @@ SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
assert(Node->getValueType(0) == MVT::i1 &&
"Custom lowering for i1 load only");
- SDValue newLD =
- DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
- LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
- LD->isInvariant(), LD->getAlignment());
+ SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
+ LD->getPointerInfo(), LD->getAlignment(),
+ LD->getMemOperand()->getFlags());
SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
// The legalizer (the caller) is expecting two values from the legalized
// load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
@@ -2002,13 +2014,10 @@ SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
SDValue Tmp2 = ST->getBasePtr();
SDValue Tmp3 = ST->getValue();
assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
- unsigned Alignment = ST->getAlignment();
- bool isVolatile = ST->isVolatile();
- bool isNonTemporal = ST->isNonTemporal();
Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
- SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
- ST->getPointerInfo(), MVT::i8, isNonTemporal,
- isVolatile, Alignment);
+ SDValue Result =
+ DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
+ ST->getAlignment(), ST->getMemOperand()->getFlags());
return Result;
}
@@ -2027,7 +2036,7 @@ NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
// Check to see if the kernel argument is image*_t or sampler_t
-bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
+static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
static const char *const specialTypes[] = { "struct._image2d_t",
"struct._image3d_t",
"struct._sampler_t" };
@@ -2042,16 +2051,17 @@ bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
return false;
auto *STy = dyn_cast<StructType>(PTy->getElementType());
- const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
+ if (!STy || STy->isLiteral())
+ return false;
return std::find(std::begin(specialTypes), std::end(specialTypes),
- TypeName) != std::end(specialTypes);
+ STy->getName()) != std::end(specialTypes);
}
SDValue NVPTXTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
const DataLayout &DL = DAG.getDataLayout();
auto PtrVT = getPointerTy(DAG.getDataLayout());
@@ -2171,12 +2181,10 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
ISD::SEXTLOAD : ISD::ZEXTLOAD;
p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
- MachinePointerInfo(srcValue), partVT, false,
- false, false, partAlign);
+ MachinePointerInfo(srcValue), partVT, partAlign);
} else {
p = DAG.getLoad(partVT, dl, Root, srcAddr,
- MachinePointerInfo(srcValue), false, false, false,
- partAlign);
+ MachinePointerInfo(srcValue), partAlign);
}
if (p.getNode())
p.getNode()->setIROrder(idx + 1);
@@ -2202,9 +2210,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
Value *SrcValue = Constant::getNullValue(PointerType::get(
EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
SDValue P = DAG.getLoad(
- EltVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false, false,
- true,
- DL.getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
+ EltVT, dl, Root, Arg, MachinePointerInfo(SrcValue),
+ DL.getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())),
+ MachineMemOperand::MOInvariant);
if (P.getNode())
P.getNode()->setIROrder(idx + 1);
@@ -2219,9 +2227,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
Value *SrcValue = Constant::getNullValue(PointerType::get(
VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
SDValue P = DAG.getLoad(
- VecVT, dl, Root, Arg, MachinePointerInfo(SrcValue), false, false,
- true,
- DL.getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
+ VecVT, dl, Root, Arg, MachinePointerInfo(SrcValue),
+ DL.getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())),
+ MachineMemOperand::MOInvariant);
if (P.getNode())
P.getNode()->setIROrder(idx + 1);
@@ -2241,10 +2249,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
} else {
// V4 loads
// We have at least 4 elements (<3 x Ty> expands to 4 elements) and
- // the
- // vector will be expanded to a power of 2 elements, so we know we can
- // always round up to the next multiple of 4 when creating the vector
- // loads.
+ // the vector will be expanded to a power of 2 elements, so we know we
+ // can always round up to the next multiple of 4 when creating the
+ // vector loads.
// e.g. 4 elem => 1 ld.v4
// 6 elem => 2 ld.v4
// 8 elem => 2 ld.v4
@@ -2262,9 +2269,9 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
DAG.getConstant(Ofst, dl, PtrVT));
SDValue P = DAG.getLoad(
- VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
- false, true,
- DL.getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
+ VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue),
+ DL.getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())),
+ MachineMemOperand::MOInvariant);
if (P.getNode())
P.getNode()->setIROrder(idx + 1);
@@ -2298,12 +2305,11 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
ISD::SEXTLOAD : ISD::ZEXTLOAD;
p = DAG.getExtLoad(
ExtOp, dl, Ins[InsIdx].VT, Root, Arg, MachinePointerInfo(srcValue),
- ObjectVT, false, false, false,
+ ObjectVT,
DL.getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
} else {
p = DAG.getLoad(
- Ins[InsIdx].VT, dl, Root, Arg, MachinePointerInfo(srcValue), false,
- false, false,
+ Ins[InsIdx].VT, dl, Root, Arg, MachinePointerInfo(srcValue),
DL.getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
}
if (p.getNode())
@@ -2350,13 +2356,12 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
return Chain;
}
-
SDValue
NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const Function *F = MF.getFunction();
Type *RetTy = F->getReturnType();
@@ -3940,9 +3945,8 @@ static SDValue PerformADDCombine(SDNode *N,
SDValue N1 = N->getOperand(1);
// First try with the default operand order.
- SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget,
- OptLevel);
- if (Result.getNode())
+ if (SDValue Result =
+ PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
return Result;
// If that didn't work, try again with the operands commuted.
@@ -4139,7 +4143,7 @@ static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
// The RHS can be a demotable op or a constant
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
- APInt Val = CI->getAPIntValue();
+ const APInt &Val = CI->getAPIntValue();
if (LHSSign == Unsigned) {
return Val.isIntN(OptSize);
} else {
@@ -4230,8 +4234,7 @@ static SDValue PerformMULCombine(SDNode *N,
CodeGenOpt::Level OptLevel) {
if (OptLevel > 0) {
// Try mul.wide combining at OptLevel > 0
- SDValue Ret = TryMULWIDECombine(N, DCI);
- if (Ret.getNode())
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
return Ret;
}
@@ -4244,8 +4247,7 @@ static SDValue PerformSHLCombine(SDNode *N,
CodeGenOpt::Level OptLevel) {
if (OptLevel > 0) {
// Try mul.wide combining at OptLevel > 0
- SDValue Ret = TryMULWIDECombine(N, DCI);
- if (Ret.getNode())
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
return Ret;
}
@@ -4368,7 +4370,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
SDValue LoadChain = NewLD.getValue(NumElts);
- SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
+ SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
Results.push_back(BuildVec);
Results.push_back(LoadChain);
@@ -4481,7 +4483,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SDValue LoadChain = NewLD.getValue(NumElts);
SDValue BuildVec =
- DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
+ DAG.getBuildVector(ResVT, DL, ScalarRes);
Results.push_back(BuildVec);
Results.push_back(LoadChain);
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h
index 60914c1d09b4..1c32232024d1 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -34,7 +34,9 @@ enum NodeType : unsigned {
DeclareRet,
DeclareScalarRet,
PrintCall,
+ PrintConvergentCall,
PrintCallUni,
+ PrintConvergentCallUni,
CallArgBegin,
CallArg,
LastCallArg,
@@ -475,10 +477,11 @@ public:
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
- SDValue LowerFormalArguments(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
@@ -488,11 +491,10 @@ public:
unsigned retAlignment,
const ImmutableCallSite *CS) const;
- SDValue
- LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals, SDLoc dl,
- SelectionDAG &DAG) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
+ SelectionDAG &DAG) const override;
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
std::vector<SDValue> &Ops,
diff --git a/lib/Target/NVPTX/NVPTXImageOptimizer.cpp b/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
index aa36b6be7250..8d00bbb5e9c2 100644
--- a/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
+++ b/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
@@ -50,6 +50,9 @@ NVPTXImageOptimizer::NVPTXImageOptimizer()
: FunctionPass(ID) {}
bool NVPTXImageOptimizer::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
bool Changed = false;
InstrToDelete.clear();
diff --git a/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp b/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp
new file mode 100644
index 000000000000..e451d273cf44
--- /dev/null
+++ b/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp
@@ -0,0 +1,586 @@
+//===-- NVPTXInferAddressSpace.cpp - ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CUDA C/C++ includes memory space designation as variable type qualifers (such
+// as __global__ and __shared__). Knowing the space of a memory access allows
+// CUDA compilers to emit faster PTX loads and stores. For example, a load from
+// shared memory can be translated to `ld.shared` which is roughly 10% faster
+// than a generic `ld` on an NVIDIA Tesla K40c.
+//
+// Unfortunately, type qualifiers only apply to variable declarations, so CUDA
+// compilers must infer the memory space of an address expression from
+// type-qualified variables.
+//
+// LLVM IR uses non-zero (so-called) specific address spaces to represent memory
+// spaces (e.g. addrspace(3) means shared memory). The Clang frontend
+// places only type-qualified variables in specific address spaces, and then
+// conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
+// (so-called the generic address space) for other instructions to use.
+//
+// For example, the Clang translates the following CUDA code
+// __shared__ float a[10];
+// float v = a[i];
+// to
+// %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
+// %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
+// %v = load float, float* %1 ; emits ld.f32
+// @a is in addrspace(3) since it's type-qualified, but its use from %1 is
+// redirected to %0 (the generic version of @a).
+//
+// The optimization implemented in this file propagates specific address spaces
+// from type-qualified variable declarations to its users. For example, it
+// optimizes the above IR to
+// %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
+// %v = load float addrspace(3)* %1 ; emits ld.shared.f32
+// propagating the addrspace(3) from @a to %1. As the result, the NVPTX
+// codegen is able to emit ld.shared.f32 for %v.
+//
+// Address space inference works in two steps. First, it uses a data-flow
+// analysis to infer as many generic pointers as possible to point to only one
+// specific address space. In the above example, it can prove that %1 only
+// points to addrspace(3). This algorithm was published in
+// CUDA: Compiling and optimizing for a GPU platform
+// Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
+// ICCS 2012
+//
+// Then, address space inference replaces all refinable generic pointers with
+// equivalent specific pointers.
+//
+// The major challenge of implementing this optimization is handling PHINodes,
+// which may create loops in the data flow graph. This brings two complications.
+//
+// First, the data flow analysis in Step 1 needs to be circular. For example,
+// %generic.input = addrspacecast float addrspace(3)* %input to float*
+// loop:
+// %y = phi [ %generic.input, %y2 ]
+// %y2 = getelementptr %y, 1
+// %v = load %y2
+// br ..., label %loop, ...
+// proving %y specific requires proving both %generic.input and %y2 specific,
+// but proving %y2 specific circles back to %y. To address this complication,
+// the data flow analysis operates on a lattice:
+// uninitialized > specific address spaces > generic.
+// All address expressions (our implementation only considers phi, bitcast,
+// addrspacecast, and getelementptr) start with the uninitialized address space.
+// The monotone transfer function moves the address space of a pointer down a
+// lattice path from uninitialized to specific and then to generic. A join
+// operation of two different specific address spaces pushes the expression down
+// to the generic address space. The analysis completes once it reaches a fixed
+// point.
+//
+// Second, IR rewriting in Step 2 also needs to be circular. For example,
+// converting %y to addrspace(3) requires the compiler to know the converted
+// %y2, but converting %y2 needs the converted %y. To address this complication,
+// we break these cycles using "undef" placeholders. When converting an
+// instruction `I` to a new address space, if its operand `Op` is not converted
+// yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
+// For instance, our algorithm first converts %y to
+// %y' = phi float addrspace(3)* [ %input, undef ]
+// Then, it converts %y2 to
+// %y2' = getelementptr %y', 1
+// Finally, it fixes the undef in %y' so that
+// %y' = phi float addrspace(3)* [ %input, %y2' ]
+//
+// TODO: This pass is experimental and not enabled by default. Users can turn it
+// on by setting the -nvptx-use-infer-addrspace flag of llc. We plan to replace
+// NVPTXNonFavorGenericAddrSpaces with this pass shortly.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "nvptx-infer-addrspace"
+
+#include "NVPTX.h"
+#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+
+using namespace llvm;
+
+namespace {
+const unsigned ADDRESS_SPACE_UNINITIALIZED = (unsigned)-1;
+
+using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
+
+/// \brief NVPTXInferAddressSpaces
+class NVPTXInferAddressSpaces: public FunctionPass {
+public:
+ static char ID;
+
+ NVPTXInferAddressSpaces() : FunctionPass(ID) {}
+
+ bool runOnFunction(Function &F) override;
+
+private:
+ // Returns the new address space of V if updated; otherwise, returns None.
+ Optional<unsigned>
+ updateAddressSpace(const Value &V,
+ const ValueToAddrSpaceMapTy &InferredAddrSpace);
+
+ // Tries to infer the specific address space of each address expression in
+ // Postorder.
+ void inferAddressSpaces(const std::vector<Value *> &Postorder,
+ ValueToAddrSpaceMapTy *InferredAddrSpace);
+
+ // Changes the generic address expressions in function F to point to specific
+ // address spaces if InferredAddrSpace says so. Postorder is the postorder of
+ // all generic address expressions in the use-def graph of function F.
+ bool
+ rewriteWithNewAddressSpaces(const std::vector<Value *> &Postorder,
+ const ValueToAddrSpaceMapTy &InferredAddrSpace,
+ Function *F);
+};
+} // end anonymous namespace
+
+char NVPTXInferAddressSpaces::ID = 0;
+
+namespace llvm {
+void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
+}
+INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace",
+ "Infer address spaces",
+ false, false)
+
+// Returns true if V is an address expression.
+// TODO: Currently, we consider only phi, bitcast, addrspacecast, and
+// getelementptr operators.
+static bool isAddressExpression(const Value &V) {
+ if (!isa<Operator>(V))
+ return false;
+
+ switch (cast<Operator>(V).getOpcode()) {
+ case Instruction::PHI:
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ case Instruction::GetElementPtr:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Returns the pointer operands of V.
+//
+// Precondition: V is an address expression.
+static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
+ assert(isAddressExpression(V));
+ const Operator& Op = cast<Operator>(V);
+ switch (Op.getOpcode()) {
+ case Instruction::PHI: {
+ auto IncomingValues = cast<PHINode>(Op).incoming_values();
+ return SmallVector<Value *, 2>(IncomingValues.begin(),
+ IncomingValues.end());
+ }
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ case Instruction::GetElementPtr:
+ return {Op.getOperand(0)};
+ default:
+ llvm_unreachable("Unexpected instruction type.");
+ }
+}
+
+// If V is an unvisited generic address expression, appends V to PostorderStack
+// and marks it as visited.
+static void appendsGenericAddressExpressionToPostorderStack(
+ Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
+ DenseSet<Value *> *Visited) {
+ assert(V->getType()->isPointerTy());
+ if (isAddressExpression(*V) &&
+ V->getType()->getPointerAddressSpace() ==
+ AddressSpace::ADDRESS_SPACE_GENERIC) {
+ if (Visited->insert(V).second)
+ PostorderStack->push_back(std::make_pair(V, false));
+ }
+}
+
+// Returns all generic address expressions in function F. The elements are
+// ordered in postorder.
+static std::vector<Value *> collectGenericAddressExpressions(Function &F) {
+ // This function implements a non-recursive postorder traversal of a partial
+ // use-def graph of function F.
+ std::vector<std::pair<Value*, bool>> PostorderStack;
+ // The set of visited expressions.
+ DenseSet<Value*> Visited;
+ // We only explore address expressions that are reachable from loads and
+ // stores for now because we aim at generating faster loads and stores.
+ for (Instruction &I : instructions(F)) {
+ if (isa<LoadInst>(I)) {
+ appendsGenericAddressExpressionToPostorderStack(
+ I.getOperand(0), &PostorderStack, &Visited);
+ } else if (isa<StoreInst>(I)) {
+ appendsGenericAddressExpressionToPostorderStack(
+ I.getOperand(1), &PostorderStack, &Visited);
+ }
+ }
+
+ std::vector<Value *> Postorder; // The resultant postorder.
+ while (!PostorderStack.empty()) {
+ // If the operands of the expression on the top are already explored,
+ // adds that expression to the resultant postorder.
+ if (PostorderStack.back().second) {
+ Postorder.push_back(PostorderStack.back().first);
+ PostorderStack.pop_back();
+ continue;
+ }
+ // Otherwise, adds its operands to the stack and explores them.
+ PostorderStack.back().second = true;
+ for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
+ appendsGenericAddressExpressionToPostorderStack(
+ PtrOperand, &PostorderStack, &Visited);
+ }
+ }
+ return Postorder;
+}
+
+// A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
+// of OperandUse.get() in the new address space. If the clone is not ready yet,
+// returns an undef in the new address space as a placeholder.
+static Value *operandWithNewAddressSpaceOrCreateUndef(
+ const Use &OperandUse, unsigned NewAddrSpace,
+ const ValueToValueMapTy &ValueWithNewAddrSpace,
+ SmallVectorImpl<const Use *> *UndefUsesToFix) {
+ Value *Operand = OperandUse.get();
+ if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
+ return NewOperand;
+
+ UndefUsesToFix->push_back(&OperandUse);
+ return UndefValue::get(
+ Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
+}
+
+// Returns a clone of `I` with its operands converted to those specified in
+// ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
+// operand whose address space needs to be modified might not exist in
+// ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
+// adds that operand use to UndefUsesToFix so that caller can fix them later.
+//
+// Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
+// from a pointer whose type already matches. Therefore, this function returns a
+// Value* instead of an Instruction*.
+static Value *cloneInstructionWithNewAddressSpace(
+ Instruction *I, unsigned NewAddrSpace,
+ const ValueToValueMapTy &ValueWithNewAddrSpace,
+ SmallVectorImpl<const Use *> *UndefUsesToFix) {
+ Type *NewPtrType =
+ I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
+
+ if (I->getOpcode() == Instruction::AddrSpaceCast) {
+ Value *Src = I->getOperand(0);
+ // Because `I` is generic, the source address space must be specific.
+ // Therefore, the inferred address space must be the source space, according
+ // to our algorithm.
+ assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
+ if (Src->getType() != NewPtrType)
+ return new BitCastInst(Src, NewPtrType);
+ return Src;
+ }
+
+ // Computes the converted pointer operands.
+ SmallVector<Value *, 4> NewPointerOperands;
+ for (const Use &OperandUse : I->operands()) {
+ if (!OperandUse.get()->getType()->isPointerTy())
+ NewPointerOperands.push_back(nullptr);
+ else
+ NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
+ OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
+ }
+
+ switch (I->getOpcode()) {
+ case Instruction::BitCast:
+ return new BitCastInst(NewPointerOperands[0], NewPtrType);
+ case Instruction::PHI: {
+ assert(I->getType()->isPointerTy());
+ PHINode *PHI = cast<PHINode>(I);
+ PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
+ for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
+ unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
+ NewPHI->addIncoming(NewPointerOperands[OperandNo],
+ PHI->getIncomingBlock(Index));
+ }
+ return NewPHI;
+ }
+ case Instruction::GetElementPtr: {
+ GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
+ GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
+ GEP->getSourceElementType(), NewPointerOperands[0],
+ SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
+ NewGEP->setIsInBounds(GEP->isInBounds());
+ return NewGEP;
+ }
+ default:
+ llvm_unreachable("Unexpected opcode");
+ }
+}
+
+// Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
+// constant expression `CE` with its operands replaced as specified in
+// ValueWithNewAddrSpace.
+static Value *cloneConstantExprWithNewAddressSpace(
+ ConstantExpr *CE, unsigned NewAddrSpace,
+ const ValueToValueMapTy &ValueWithNewAddrSpace) {
+ Type *TargetType =
+ CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
+
+ if (CE->getOpcode() == Instruction::AddrSpaceCast) {
+ // Because CE is generic, the source address space must be specific.
+ // Therefore, the inferred address space must be the source space according
+ // to our algorithm.
+ assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
+ NewAddrSpace);
+ return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
+ }
+
+ // Computes the operands of the new constant expression.
+ SmallVector<Constant *, 4> NewOperands;
+ for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
+ Constant *Operand = CE->getOperand(Index);
+ // If the address space of `Operand` needs to be modified, the new operand
+ // with the new address space should already be in ValueWithNewAddrSpace
+ // because (1) the constant expressions we consider (i.e. addrspacecast,
+ // bitcast, and getelementptr) do not incur cycles in the data flow graph
+ // and (2) this function is called on constant expressions in postorder.
+ if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
+ NewOperands.push_back(cast<Constant>(NewOperand));
+ } else {
+ // Otherwise, reuses the old operand.
+ NewOperands.push_back(Operand);
+ }
+ }
+
+ if (CE->getOpcode() == Instruction::GetElementPtr) {
+ // Needs to specify the source type while constructing a getelementptr
+ // constant expression.
+ return CE->getWithOperands(
+ NewOperands, TargetType, /*OnlyIfReduced=*/false,
+ NewOperands[0]->getType()->getPointerElementType());
+ }
+
+ return CE->getWithOperands(NewOperands, TargetType);
+}
+
+// Returns a clone of the value `V`, with its operands replaced as specified in
+// ValueWithNewAddrSpace. This function is called on every generic address
+// expression whose address space needs to be modified, in postorder.
+//
+// See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
+static Value *
+cloneValueWithNewAddressSpace(Value *V, unsigned NewAddrSpace,
+ const ValueToValueMapTy &ValueWithNewAddrSpace,
+ SmallVectorImpl<const Use *> *UndefUsesToFix) {
+ // All values in Postorder are generic address expressions.
+ assert(isAddressExpression(*V) &&
+ V->getType()->getPointerAddressSpace() ==
+ AddressSpace::ADDRESS_SPACE_GENERIC);
+
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ Value *NewV = cloneInstructionWithNewAddressSpace(
+ I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
+ if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
+ if (NewI->getParent() == nullptr) {
+ NewI->insertBefore(I);
+ NewI->takeName(I);
+ }
+ }
+ return NewV;
+ }
+
+ return cloneConstantExprWithNewAddressSpace(
+ cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
+}
+
+// Defines the join operation on the address space lattice (see the file header
+// comments).
+static unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) {
+ if (AS1 == AddressSpace::ADDRESS_SPACE_GENERIC ||
+ AS2 == AddressSpace::ADDRESS_SPACE_GENERIC)
+ return AddressSpace::ADDRESS_SPACE_GENERIC;
+
+ if (AS1 == ADDRESS_SPACE_UNINITIALIZED)
+ return AS2;
+ if (AS2 == ADDRESS_SPACE_UNINITIALIZED)
+ return AS1;
+
+ // The join of two different specific address spaces is generic.
+ return AS1 == AS2 ? AS1 : (unsigned)AddressSpace::ADDRESS_SPACE_GENERIC;
+}
+
+bool NVPTXInferAddressSpaces::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
+ // Collects all generic address expressions in postorder.
+ std::vector<Value *> Postorder = collectGenericAddressExpressions(F);
+
+ // Runs a data-flow analysis to refine the address spaces of every expression
+ // in Postorder.
+ ValueToAddrSpaceMapTy InferredAddrSpace;
+ inferAddressSpaces(Postorder, &InferredAddrSpace);
+
+ // Changes the address spaces of the generic address expressions who are
+ // inferred to point to a specific address space.
+ return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
+}
+
+void NVPTXInferAddressSpaces::inferAddressSpaces(
+ const std::vector<Value *> &Postorder,
+ ValueToAddrSpaceMapTy *InferredAddrSpace) {
+ SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
+ // Initially, all expressions are in the uninitialized address space.
+ for (Value *V : Postorder)
+ (*InferredAddrSpace)[V] = ADDRESS_SPACE_UNINITIALIZED;
+
+ while (!Worklist.empty()) {
+ Value* V = Worklist.pop_back_val();
+
+ // Tries to update the address space of the stack top according to the
+ // address spaces of its operands.
+ DEBUG(dbgs() << "Updating the address space of\n"
+ << " " << *V << "\n");
+ Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
+ if (!NewAS.hasValue())
+ continue;
+ // If any updates are made, grabs its users to the worklist because
+ // their address spaces can also be possibly updated.
+ DEBUG(dbgs() << " to " << NewAS.getValue() << "\n");
+ (*InferredAddrSpace)[V] = NewAS.getValue();
+
+ for (Value *User : V->users()) {
+ // Skip if User is already in the worklist.
+ if (Worklist.count(User))
+ continue;
+
+ auto Pos = InferredAddrSpace->find(User);
+ // Our algorithm only updates the address spaces of generic address
+ // expressions, which are those in InferredAddrSpace.
+ if (Pos == InferredAddrSpace->end())
+ continue;
+
+ // Function updateAddressSpace moves the address space down a lattice
+ // path. Therefore, nothing to do if User is already inferred as
+ // generic (the bottom element in the lattice).
+ if (Pos->second == AddressSpace::ADDRESS_SPACE_GENERIC)
+ continue;
+
+ Worklist.insert(User);
+ }
+ }
+}
+
+Optional<unsigned> NVPTXInferAddressSpaces::updateAddressSpace(
+ const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) {
+ assert(InferredAddrSpace.count(&V));
+
+ // The new inferred address space equals the join of the address spaces
+ // of all its pointer operands.
+ unsigned NewAS = ADDRESS_SPACE_UNINITIALIZED;
+ for (Value *PtrOperand : getPointerOperands(V)) {
+ unsigned OperandAS;
+ if (InferredAddrSpace.count(PtrOperand))
+ OperandAS = InferredAddrSpace.lookup(PtrOperand);
+ else
+ OperandAS = PtrOperand->getType()->getPointerAddressSpace();
+ NewAS = joinAddressSpaces(NewAS, OperandAS);
+ // join(generic, *) = generic. So we can break if NewAS is already generic.
+ if (NewAS == AddressSpace::ADDRESS_SPACE_GENERIC)
+ break;
+ }
+
+ unsigned OldAS = InferredAddrSpace.lookup(&V);
+ assert(OldAS != AddressSpace::ADDRESS_SPACE_GENERIC);
+ if (OldAS == NewAS)
+ return None;
+ return NewAS;
+}
+
+bool NVPTXInferAddressSpaces::rewriteWithNewAddressSpaces(
+ const std::vector<Value *> &Postorder,
+ const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) {
+ // For each address expression to be modified, creates a clone of it with its
+ // pointer operands converted to the new address space. Since the pointer
+ // operands are converted, the clone is naturally in the new address space by
+ // construction.
+ ValueToValueMapTy ValueWithNewAddrSpace;
+ SmallVector<const Use *, 32> UndefUsesToFix;
+ for (Value* V : Postorder) {
+ unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
+ if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
+ ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
+ V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
+ }
+ }
+
+ if (ValueWithNewAddrSpace.empty())
+ return false;
+
+ // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
+ for (const Use* UndefUse : UndefUsesToFix) {
+ User *V = UndefUse->getUser();
+ User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
+ unsigned OperandNo = UndefUse->getOperandNo();
+ assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
+ NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
+ }
+
+ // Replaces the uses of the old address expressions with the new ones.
+ for (Value *V : Postorder) {
+ Value *NewV = ValueWithNewAddrSpace.lookup(V);
+ if (NewV == nullptr)
+ continue;
+
+ SmallVector<Use *, 4> Uses;
+ for (Use &U : V->uses())
+ Uses.push_back(&U);
+ DEBUG(dbgs() << "Replacing the uses of " << *V << "\n to\n " << *NewV
+ << "\n");
+ for (Use *U : Uses) {
+ if (isa<LoadInst>(U->getUser()) ||
+ (isa<StoreInst>(U->getUser()) && U->getOperandNo() == 1)) {
+ // If V is used as the pointer operand of a load/store, sets the pointer
+ // operand to NewV. This replacement does not change the element type,
+ // so the resultant load/store is still valid.
+ U->set(NewV);
+ } else if (isa<Instruction>(U->getUser())) {
+ // Otherwise, replaces the use with generic(NewV).
+ // TODO: Some optimization opportunities are missed. For example, in
+ // %0 = icmp eq float* %p, %q
+ // if both p and q are inferred to be shared, we can rewrite %0 as
+ // %0 = icmp eq float addrspace(3)* %new_p, %new_q
+ // instead of currently
+ // %generic_p = addrspacecast float addrspace(3)* %new_p to float*
+ // %generic_q = addrspacecast float addrspace(3)* %new_q to float*
+ // %0 = icmp eq float* %generic_p, %generic_q
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ BasicBlock::iterator InsertPos = std::next(I->getIterator());
+ while (isa<PHINode>(InsertPos))
+ ++InsertPos;
+ U->set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
+ } else {
+ U->set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
+ V->getType()));
+ }
+ }
+ }
+ if (V->use_empty())
+ RecursivelyDeleteTriviallyDeadInstructions(V);
+ }
+
+ return true;
+}
+
+FunctionPass *llvm::createNVPTXInferAddressSpacesPass() {
+ return new NVPTXInferAddressSpaces();
+}
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index 9f3cf4551955..0c7c6cbc4512 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -30,9 +30,10 @@ void NVPTXInstrInfo::anchor() {}
NVPTXInstrInfo::NVPTXInstrInfo() : NVPTXGenInstrInfo(), RegInfo() {}
-void NVPTXInstrInfo::copyPhysReg(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg, bool KillSrc) const {
+void NVPTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *DestRC = MRI.getRegClass(DestReg);
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
@@ -111,7 +112,7 @@ bool NVPTXInstrInfo::isStoreInstr(const MachineInstr &MI,
bool NVPTXInstrInfo::CanTailMerge(const MachineInstr *MI) const {
unsigned addrspace = 0;
- if (MI->getOpcode() == NVPTX::INT_CUDA_SYNCTHREADS)
+ if (MI->getOpcode() == NVPTX::INT_BARRIER0)
return false;
if (isLoadInstr(*MI, addrspace))
if (addrspace == NVPTX::PTXLdStInstCode::SHARED)
@@ -145,26 +146,28 @@ bool NVPTXInstrInfo::CanTailMerge(const MachineInstr *MI) const {
/// Note that RemoveBranch and InsertBranch must be implemented to support
/// cases where this method returns success.
///
-bool NVPTXInstrInfo::AnalyzeBranch(
- MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const {
+bool NVPTXInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I))
return false;
// Get the last instruction in the block.
- MachineInstr *LastInst = I;
+ MachineInstr &LastInst = *I;
// If there is only one terminator instruction, process it.
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
- if (LastInst->getOpcode() == NVPTX::GOTO) {
- TBB = LastInst->getOperand(0).getMBB();
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
+ if (LastInst.getOpcode() == NVPTX::GOTO) {
+ TBB = LastInst.getOperand(0).getMBB();
return false;
- } else if (LastInst->getOpcode() == NVPTX::CBranch) {
+ } else if (LastInst.getOpcode() == NVPTX::CBranch) {
// Block ends with fall-through condbranch.
- TBB = LastInst->getOperand(1).getMBB();
- Cond.push_back(LastInst->getOperand(0));
+ TBB = LastInst.getOperand(1).getMBB();
+ Cond.push_back(LastInst.getOperand(0));
return false;
}
// Otherwise, don't know what this is.
@@ -172,26 +175,26 @@ bool NVPTXInstrInfo::AnalyzeBranch(
}
// Get the instruction before it if it's a terminator.
- MachineInstr *SecondLastInst = I;
+ MachineInstr &SecondLastInst = *I;
// If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
+ if (I != MBB.begin() && isUnpredicatedTerminator(*--I))
return true;
// If the block ends with NVPTX::GOTO and NVPTX:CBranch, handle it.
- if (SecondLastInst->getOpcode() == NVPTX::CBranch &&
- LastInst->getOpcode() == NVPTX::GOTO) {
- TBB = SecondLastInst->getOperand(1).getMBB();
- Cond.push_back(SecondLastInst->getOperand(0));
- FBB = LastInst->getOperand(0).getMBB();
+ if (SecondLastInst.getOpcode() == NVPTX::CBranch &&
+ LastInst.getOpcode() == NVPTX::GOTO) {
+ TBB = SecondLastInst.getOperand(1).getMBB();
+ Cond.push_back(SecondLastInst.getOperand(0));
+ FBB = LastInst.getOperand(0).getMBB();
return false;
}
// If the block ends with two NVPTX:GOTOs, handle it. The second one is not
// executed, so remove it.
- if (SecondLastInst->getOpcode() == NVPTX::GOTO &&
- LastInst->getOpcode() == NVPTX::GOTO) {
- TBB = SecondLastInst->getOperand(0).getMBB();
+ if (SecondLastInst.getOpcode() == NVPTX::GOTO &&
+ LastInst.getOpcode() == NVPTX::GOTO) {
+ TBB = SecondLastInst.getOperand(0).getMBB();
I = LastInst;
if (AllowModify)
I->eraseFromParent();
@@ -226,9 +229,11 @@ unsigned NVPTXInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 2;
}
-unsigned NVPTXInstrInfo::InsertBranch(
- MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
+unsigned NVPTXInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.h b/lib/Target/NVPTX/NVPTXInstrInfo.h
index 3e407223f010..050bf12fe859 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.h
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.h
@@ -49,9 +49,9 @@ public:
* const TargetRegisterClass *RC) const;
*/
- void copyPhysReg(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg, bool KillSrc) const override;
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
virtual bool isMoveInstr(const MachineInstr &MI, unsigned &SrcReg,
unsigned &DestReg) const;
bool isLoadInstr(const MachineInstr &MI, unsigned &AddrSpace) const;
@@ -59,13 +59,14 @@ public:
virtual bool CanTailMerge(const MachineInstr *MI) const;
// Branch analysis.
- bool AnalyzeBranch(
- MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const override;
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
- unsigned InsertBranch(
- MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond, DebugLoc DL) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const override;
unsigned getLdStCodeAddrSpace(const MachineInstr &MI) const {
return MI.getOperand(2).getImm();
}
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.td b/lib/Target/NVPTX/NVPTXInstrInfo.td
index 6fdd60f3ed2d..c158cc6cdab2 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -14,7 +14,9 @@
include "NVPTXInstrFormats.td"
// A NOP instruction
-def NOP : NVPTXInst<(outs), (ins), "", []>;
+let hasSideEffects = 0 in {
+ def NOP : NVPTXInst<(outs), (ins), "", []>;
+}
// List of vector specific properties
def isVecLD : VecInstTypeEnum<1>;
@@ -162,130 +164,146 @@ def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
// Some Common Instruction Class Templates
//===----------------------------------------------------------------------===//
+// Template for instructions which take three int64, int32, or int16 args.
+// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
multiclass I3<string OpcStr, SDNode OpNode> {
- def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
- Int64Regs:$b))]>;
- def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
- def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- Int32Regs:$b))]>;
- def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
- Int16Regs:$b))]>;
- def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
}
+// Template for instructions which take 3 int32 args. The instructions are
+// named "<OpcStr>.s32" (e.g. "addc.cc.s32").
multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
- def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
- Int32Regs:$b),
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- Int32Regs:$b))]>;
- def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
}
+// Template for instructions which take three fp64 or fp32 args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32 functions.
multiclass F3<string OpcStr, SDNode OpNode> {
- def f64rr : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst,
- (OpNode Float64Regs:$a, Float64Regs:$b))]>,
- Requires<[allowFMA]>;
- def f64ri : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst,
- (OpNode Float64Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA]>;
- def f32rr_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA, doF32FTZ]>;
- def f32ri_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA, doF32FTZ]>;
- def f32rr : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA]>;
- def f32ri : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA]>;
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
}
+// Same as F3, but defines ".rn" variants (round to nearest even).
multiclass F3_rn<string OpcStr, SDNode OpNode> {
- def f64rr : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst,
- (OpNode Float64Regs:$a, Float64Regs:$b))]>,
- Requires<[noFMA]>;
- def f64ri : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst,
- (OpNode Float64Regs:$a, fpimm:$b))]>,
- Requires<[noFMA]>;
- def f32rr_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[noFMA, doF32FTZ]>;
- def f32ri_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[noFMA, doF32FTZ]>;
- def f32rr : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[noFMA]>;
- def f32ri : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst,
- (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[noFMA]>;
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[noFMA]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
}
+// Template for operations which take two f32 or f64 operands. Provides three
+// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
+// subnormal inputs and results to zero).
multiclass F2<string OpcStr, SDNode OpNode> {
- def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
- !strconcat(OpcStr, ".f64 \t$dst, $a;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
+ def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
+ !strconcat(OpcStr, ".f64 \t$dst, $a;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
- Requires<[doF32FTZ]>;
- def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
- !strconcat(OpcStr, ".f32 \t$dst, $a;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
+ Requires<[doF32FTZ]>;
+ def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
}
//===----------------------------------------------------------------------===//
@@ -293,160 +311,251 @@ multiclass F2<string OpcStr, SDNode OpNode> {
//===----------------------------------------------------------------------===//
//-----------------------------------
-// General Type Conversion
+// Type Conversion
//-----------------------------------
let hasSideEffects = 0 in {
-// Generate a cvt to the given type from all possible types.
-// Each instance takes a CvtMode immediate that defines the conversion mode to
-// use. It can be CvtNONE to omit a conversion mode.
-multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
- def _s16 : NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s16\t$dst, $src;"),
- []>;
- def _u16 : NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u16\t$dst, $src;"),
- []>;
- def _f16 : NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f16\t$dst, $src;"),
- []>;
- def _s32 : NVPTXInst<(outs RC:$dst),
- (ins Int32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s32\t$dst, $src;"),
- []>;
- def _u32 : NVPTXInst<(outs RC:$dst),
- (ins Int32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u32\t$dst, $src;"),
- []>;
- def _s64 : NVPTXInst<(outs RC:$dst),
- (ins Int64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s64\t$dst, $src;"),
- []>;
- def _u64 : NVPTXInst<(outs RC:$dst),
- (ins Int64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u64\t$dst, $src;"),
- []>;
- def _f32 : NVPTXInst<(outs RC:$dst),
- (ins Float32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f32\t$dst, $src;"),
- []>;
- def _f64 : NVPTXInst<(outs RC:$dst),
- (ins Float64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f64\t$dst, $src;"),
- []>;
-}
-
-// Generate a cvt to all possible types.
-defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
-defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
-defm CVT_f16 : CVT_FROM_ALL<"f16", Int16Regs>;
-defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
-defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
-defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
-defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
-defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
-defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
-
-// This set of cvt is different from the above. The type of the source
-// and target are the same.
-//
-def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "cvt.s16.s8 \t$dst, $src;", []>;
-def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "cvt.s32.s8 \t$dst, $src;", []>;
-def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "cvt.s32.s16 \t$dst, $src;", []>;
-def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s8 \t$dst, $src;", []>;
-def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s16 \t$dst, $src;", []>;
-def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s32 \t$dst, $src;", []>;
+ // Generate a cvt to the given type from all possible types. Each instance
+ // takes a CvtMode immediate that defines the conversion mode to use. It can
+ // be CvtNONE to omit a conversion mode.
+ multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
+ def _s8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s8\t$dst, $src;"), []>;
+ def _u8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u8\t$dst, $src;"), []>;
+ def _s16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s16\t$dst, $src;"), []>;
+ def _u16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u16\t$dst, $src;"), []>;
+ def _f16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f16\t$dst, $src;"), []>;
+ def _s32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s32\t$dst, $src;"), []>;
+ def _u32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u32\t$dst, $src;"), []>;
+ def _s64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s64\t$dst, $src;"), []>;
+ def _u64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u64\t$dst, $src;"), []>;
+ def _f32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f32\t$dst, $src;"), []>;
+ def _f64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f64\t$dst, $src;"), []>;
+ }
+
+ // Generate cvts from all types to all types.
+ defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
+ defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
+ defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
+ defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
+ defm CVT_f16 : CVT_FROM_ALL<"f16", Int16Regs>;
+ defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
+ defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
+ defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
+ defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
+ defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
+ defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
+
+ // These cvts are different from those above: The source and dest registers
+ // are of the same type.
+ def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.s16.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s32 \t$dst, $src;", []>;
}
//-----------------------------------
// Integer Arithmetic
//-----------------------------------
+// Template for xor masquerading as int1 arithmetic.
multiclass ADD_SUB_i1<SDNode OpNode> {
def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
}
+// int1 addition and subtraction are both just xor.
defm ADD_i1 : ADD_SUB_i1<add>;
defm SUB_i1 : ADD_SUB_i1<sub>;
-
+// int16, int32, and int64 signed addition. Since nvptx is 2's compliment, we
+// also use these for unsigned arithmetic.
defm ADD : I3<"add.s", add>;
defm SUB : I3<"sub.s", sub>;
+// int32 addition and subtraction with carry-out.
+// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
+// int32 addition and subtraction with carry-in and carry-out.
defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
-//mul.wide PTX instruction
+defm MULT : I3<"mul.lo.s", mul>;
+
+defm MULTHS : I3<"mul.hi.s", mulhs>;
+defm MULTHU : I3<"mul.hi.u", mulhu>;
+
+defm SDIV : I3<"div.s", sdiv>;
+defm UDIV : I3<"div.u", udiv>;
+
+// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
+// will lower it.
+defm SREM : I3<"rem.s", srem>;
+defm UREM : I3<"rem.u", urem>;
+
+
+//
+// Wide multiplication
+//
+def MULWIDES64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+
+def MULWIDEU64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+
+def MULWIDES32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+
+def MULWIDEU32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+
+def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
+def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
+def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
+
+// Matchers for signed, unsigned mul.wide ISD nodes.
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
+ (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
+ (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+// Predicates used for converting some patterns to mul.wide.
def SInt32Const : PatLeaf<(imm), [{
const APInt &v = N->getAPIntValue();
- if (v.isSignedIntN(32))
- return true;
- return false;
+ return v.isSignedIntN(32);
}]>;
def UInt32Const : PatLeaf<(imm), [{
const APInt &v = N->getAPIntValue();
- if (v.isIntN(32))
- return true;
- return false;
+ return v.isIntN(32);
}]>;
def SInt16Const : PatLeaf<(imm), [{
const APInt &v = N->getAPIntValue();
- if (v.isSignedIntN(16))
- return true;
- return false;
+ return v.isSignedIntN(16);
}]>;
def UInt16Const : PatLeaf<(imm), [{
const APInt &v = N->getAPIntValue();
- if (v.isIntN(16))
- return true;
- return false;
+ return v.isIntN(16);
}]>;
def Int5Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
const APInt &v = N->getAPIntValue();
- // Check if 0 <= v < 32
- // Only then the result from (x << v) will be i32
- if (v.sge(0) && v.slt(32))
- return true;
- return false;
+ return v.sge(0) && v.slt(32);
}]>;
def Int4Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
const APInt &v = N->getAPIntValue();
- // Check if 0 <= v < 16
- // Only then the result from (x << v) will be i16
- if (v.sge(0) && v.slt(16))
- return true;
- return false;
+ return v.sge(0) && v.slt(16);
}]>;
def SHL2MUL32 : SDNodeXForm<imm, [{
@@ -461,215 +570,133 @@ def SHL2MUL16 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
}]>;
-def MULWIDES64
- : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm
- : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm64
- : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-
-def MULWIDEU64
- : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm
- : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm64
- : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-
-def MULWIDES32
- : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm
- : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm32
- : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-
-def MULWIDEU32
- : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm
- : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm32
- : NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-
+// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
(MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
- Requires<[doMulWide]>;
+ Requires<[doMulWide]>;
def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
(MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
- Requires<[doMulWide]>;
+ Requires<[doMulWide]>;
def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
(MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
- Requires<[doMulWide]>;
+ Requires<[doMulWide]>;
def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
(MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
- Requires<[doMulWide]>;
+ Requires<[doMulWide]>;
+// Convert "sign/zero-extend then multiply" to mul.wide.
def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
(MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
+ Requires<[doMulWide]>;
def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
(MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
- Requires<[doMulWide]>;
+ Requires<[doMulWide]>;
def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
(MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
Requires<[doMulWide]>;
def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
(MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
- Requires<[doMulWide]>;
+ Requires<[doMulWide]>;
def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
(MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
Requires<[doMulWide]>;
def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
(MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
- Requires<[doMulWide]>;
+ Requires<[doMulWide]>;
def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
(MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
Requires<[doMulWide]>;
def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
(MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
- Requires<[doMulWide]>;
-
-
-def SDTMulWide
- : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
-def mul_wide_signed
- : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
-def mul_wide_unsigned
- : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
-
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
- (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
- (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
- (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
- (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-
-defm MULT : I3<"mul.lo.s", mul>;
-
-defm MULTHS : I3<"mul.hi.s", mulhs>;
-defm MULTHU : I3<"mul.hi.u", mulhu>;
-
-defm SDIV : I3<"div.s", sdiv>;
-defm UDIV : I3<"div.u", udiv>;
-
-defm SREM : I3<"rem.s", srem>;
-// The ri version will not be selected as DAGCombiner::visitSREM will lower it.
-defm UREM : I3<"rem.u", urem>;
-// The ri version will not be selected as DAGCombiner::visitUREM will lower it.
-
-def SDTIMAD
- : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>,
- SDTCisInt<2>, SDTCisSameAs<0, 2>,
- SDTCisSameAs<0, 3>]>;
-def imad
- : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
-
-def MAD16rrr : NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst,
- (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
-def MAD16rri : NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst,
- (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
-def MAD16rir : NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst,
- (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
-def MAD16rii : NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst,
- (imad Int16Regs:$a, imm:$b, imm:$c))]>;
-
-def MAD32rrr : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst,
- (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
-def MAD32rri : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst,
- (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
-def MAD32rir : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst,
- (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
-def MAD32rii : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst,
- (imad Int32Regs:$a, imm:$b, imm:$c))]>;
-
-def MAD64rrr : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst,
- (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
-def MAD64rri : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst,
- (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
-def MAD64rir : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst,
- (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
-def MAD64rii : NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst,
- (imad Int64Regs:$a, imm:$b, imm:$c))]>;
-
-def INEG16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "neg.s16 \t$dst, $src;",
- [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
-def INEG32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "neg.s32 \t$dst, $src;",
- [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
-def INEG64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "neg.s64 \t$dst, $src;",
- [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
+//
+// Integer multiply-add
+//
+def SDTIMAD :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
+def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
+
+def MAD16rrr :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
+def MAD16rri :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
+def MAD16rir :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
+def MAD16rii :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD32rrr :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
+def MAD32rri :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
+def MAD32rir :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
+def MAD32rii :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD64rrr :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
+def MAD64rri :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
+def MAD64rir :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
+def MAD64rii :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
+
+def INEG16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "neg.s16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
+def INEG32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "neg.s32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
+def INEG64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "neg.s64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
//-----------------------------------
// Floating Point Arithmetic
@@ -677,17 +704,13 @@ def INEG64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
// Constant 1.0f
def FloatConst1 : PatLeaf<(fpimm), [{
- if (&(N->getValueAPF().getSemantics()) != &llvm::APFloat::IEEEsingle)
- return false;
- float f = (float)N->getValueAPF().convertToFloat();
- return (f==1.0f);
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle &&
+ N->getValueAPF().convertToFloat() == 1.0f;
}]>;
-// Constand (double)1.0
+// Constant 1.0 (double)
def DoubleConst1 : PatLeaf<(fpimm), [{
- if (&(N->getValueAPF().getSemantics()) != &llvm::APFloat::IEEEdouble)
- return false;
- double d = (double)N->getValueAPF().convertToDouble();
- return (d==1.0);
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble &&
+ N->getValueAPF().convertToDouble() == 1.0;
}]>;
defm FADD : F3<"add", fadd>;
@@ -698,157 +721,157 @@ defm FADD_rn : F3_rn<"add", fadd>;
defm FSUB_rn : F3_rn<"sub", fsub>;
defm FMUL_rn : F3_rn<"mul", fmul>;
-defm FABS : F2<"abs", fabs>;
-defm FNEG : F2<"neg", fneg>;
+defm FABS : F2<"abs", fabs>;
+defm FNEG : F2<"neg", fneg>;
defm FSQRT : F2<"sqrt.rn", fsqrt>;
//
// F64 division
//
-def FDIV641r : NVPTXInst<(outs Float64Regs:$dst),
- (ins f64imm:$a, Float64Regs:$b),
- "rcp.rn.f64 \t$dst, $b;",
- [(set Float64Regs:$dst,
- (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
-def FDIV64rr : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- "div.rn.f64 \t$dst, $a, $b;",
- [(set Float64Regs:$dst,
- (fdiv Float64Regs:$a, Float64Regs:$b))]>;
-def FDIV64ri : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- "div.rn.f64 \t$dst, $a, $b;",
- [(set Float64Regs:$dst,
- (fdiv Float64Regs:$a, fpimm:$b))]>;
+def FDIV641r :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins f64imm:$a, Float64Regs:$b),
+ "rcp.rn.f64 \t$dst, $b;",
+ [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
+def FDIV64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
+def FDIV64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
//
// F32 Approximate reciprocal
//
-def FDIV321r_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst,
- (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV321r : NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.f32 \t$dst, $b;",
- [(set Float32Regs:$dst,
- (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
+def FDIV321r_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV321r :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
//
// F32 Approximate division
//
-def FDIV32approxrr_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.approx.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV32approxri_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.approx.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV32approxrr : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.approx.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-def FDIV32approxri : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.approx.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
+def FDIV32approxrr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxrr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+def FDIV32approxri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
//
// F32 Semi-accurate reciprocal
//
// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
//
-def FDIV321r_approx_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst,
- (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV321r_approx : NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.f32 \t$dst, $b;",
- [(set Float32Regs:$dst,
- (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL]>;
+def FDIV321r_approx_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV321r_approx :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
//
// F32 Semi-accurate division
//
-def FDIV32rr_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.full.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV32ri_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.full.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV32rr : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.full.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-def FDIV32ri : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.full.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_FULL]>;
+def FDIV32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+def FDIV32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
//
// F32 Accurate reciprocal
//
-def FDIV321r_prec_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.rn.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst,
- (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20, doF32FTZ]>;
-def FDIV321r_prec : NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.rn.f32 \t$dst, $b;",
- [(set Float32Regs:$dst,
- (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20]>;
+def FDIV321r_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20, doF32FTZ]>;
+def FDIV321r_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
//
// F32 Accurate division
//
-def FDIV32rr_prec_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.rn.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ, reqPTX20]>;
-def FDIV32ri_prec_ftz : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.rn.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ, reqPTX20]>;
-def FDIV32rr_prec : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.rn.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20]>;
-def FDIV32ri_prec : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.rn.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst,
- (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[reqPTX20]>;
+def FDIV32rr_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32ri_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32rr_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+def FDIV32ri_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[reqPTX20]>;
//
// F32 rsqrt
@@ -857,68 +880,39 @@ def FDIV32ri_prec : NVPTXInst<(outs Float32Regs:$dst),
def RSQRTF32approx1r : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$b),
"rsqrt.approx.f32 \t$dst, $b;", []>;
+// Convert 1.0f/sqrt(x) to rsqrt.approx.f32. (There is an rsqrt.approx.f64, but
+// it's emulated in software.)
def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_f Float32Regs:$b)),
(RSQRTF32approx1r Float32Regs:$b)>,
Requires<[do_DIVF32_FULL, do_SQRTF32_APPROX, doNoF32FTZ]>;
-multiclass FPCONTRACT32<string OpcStr, Predicate Pred> {
- def rrr : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b, Float32Regs:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst,
- (fma Float32Regs:$a, Float32Regs:$b, Float32Regs:$c))]>,
- Requires<[Pred]>;
- def rri : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b, f32imm:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst,
- (fma Float32Regs:$a, Float32Regs:$b, fpimm:$c))]>,
- Requires<[Pred]>;
- def rir : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b, Float32Regs:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst,
- (fma Float32Regs:$a, fpimm:$b, Float32Regs:$c))]>,
- Requires<[Pred]>;
- def rii : NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b, f32imm:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float32Regs:$dst,
- (fma Float32Regs:$a, fpimm:$b, fpimm:$c))]>,
- Requires<[Pred]>;
+multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+ def rir : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rii : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
}
-multiclass FPCONTRACT64<string OpcStr, Predicate Pred> {
- def rrr : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b, Float64Regs:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float64Regs:$dst,
- (fma Float64Regs:$a, Float64Regs:$b, Float64Regs:$c))]>,
- Requires<[Pred]>;
- def rri : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b, f64imm:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float64Regs:$dst,
- (fma Float64Regs:$a, Float64Regs:$b, fpimm:$c))]>,
- Requires<[Pred]>;
- def rir : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b, Float64Regs:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float64Regs:$dst,
- (fma Float64Regs:$a, fpimm:$b, Float64Regs:$c))]>,
- Requires<[Pred]>;
- def rii : NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b, f64imm:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set Float64Regs:$dst,
- (fma Float64Regs:$a, fpimm:$b, fpimm:$c))]>,
- Requires<[Pred]>;
-}
-
-defm FMA32_ftz : FPCONTRACT32<"fma.rn.ftz.f32", doF32FTZ>;
-defm FMA32 : FPCONTRACT32<"fma.rn.f32", true>;
-defm FMA64 : FPCONTRACT64<"fma.rn.f64", true>;
+defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
+defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
+defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
+// sin/cos
def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
"sin.approx.f32 \t$dst, $src;",
[(set Float32Regs:$dst, (fsin Float32Regs:$src))]>;
@@ -926,8 +920,8 @@ def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
"cos.approx.f32 \t$dst, $src;",
[(set Float32Regs:$dst, (fcos Float32Regs:$src))]>;
-// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y))
-// e.g. "poor man's fmod()"
+// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
+// i.e. "poor man's fmod()"
// frem - f32 FTZ
def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
@@ -962,183 +956,152 @@ def : Pat<(frem Float64Regs:$x, fpimm:$y),
fpimm:$y))>;
//-----------------------------------
-// Logical Arithmetic
+// Bitwise operations
//-----------------------------------
-multiclass LOG_FORMAT<string OpcStr, SDNode OpNode> {
- def b1rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
- def b1ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
- def b16rr: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
- Int16Regs:$b))]>;
- def b16ri: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
- def b32rr: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- Int32Regs:$b))]>;
- def b32ri: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def b64rr: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
- Int64Regs:$b))]>;
- def b64ri: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+// Template for three-arg bitwise operations. Takes three args, Creates .b16,
+// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
+multiclass BITWISE<string OpcStr, SDNode OpNode> {
+ def b1rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def b1ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
+ def b16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def b16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+ def b32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def b32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def b64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def b64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
}
-defm OR : LOG_FORMAT<"or", or>;
-defm AND : LOG_FORMAT<"and", and>;
-defm XOR : LOG_FORMAT<"xor", xor>;
+defm OR : BITWISE<"or", or>;
+defm AND : BITWISE<"and", and>;
+defm XOR : BITWISE<"xor", xor>;
-def NOT1: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
+def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
"not.pred \t$dst, $src;",
[(set Int1Regs:$dst, (not Int1Regs:$src))]>;
-def NOT16: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
"not.b16 \t$dst, $src;",
[(set Int16Regs:$dst, (not Int16Regs:$src))]>;
-def NOT32: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
"not.b32 \t$dst, $src;",
[(set Int32Regs:$dst, (not Int32Regs:$src))]>;
-def NOT64: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "not.b64 \t$dst, $src;",
- [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
-
-// For shifts, the second src operand must be 32-bit value
-multiclass LSHIFT_FORMAT<string OpcStr, SDNode OpNode> {
- def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a,
- Int32Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
- Int32Regs:$b))]>;
- def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
- (i32 imm:$b)))]>;
- def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
- Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- Int32Regs:$b))]>;
- def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- (i32 imm:$b)))]>;
- def i32ii : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode (i32 imm:$a),
- (i32 imm:$b)))]>;
- def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a,
- Int32Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
- Int32Regs:$b))]>;
- def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
- (i32 imm:$b)))]>;
-}
+def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "not.b64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
-defm SHL : LSHIFT_FORMAT<"shl.b", shl>;
-
-// For shifts, the second src operand must be 32-bit value
-// Need to add cvt for the 8-bits.
-multiclass RSHIFT_FORMAT<string OpcStr, SDNode OpNode> {
- def i64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a,
- Int32Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
- Int32Regs:$b))]>;
- def i64ri : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a,
- (i32 imm:$b)))]>;
- def i32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a,
- Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- Int32Regs:$b))]>;
- def i32ri : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a,
- (i32 imm:$b)))]>;
- def i32ii : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode (i32 imm:$a),
- (i32 imm:$b)))]>;
- def i16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a,
- Int32Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
- Int32Regs:$b))]>;
- def i16ri : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a,
- (i32 imm:$b)))]>;
+// Template for left/right shifts. Takes three operands,
+// [dest (reg), src (reg), shift (reg or imm)].
+// dest and src may be int64, int32, or int16, but shift is always int32.
+//
+// This template also defines a 32-bit shift (imm, imm) instruction.
+multiclass SHIFT<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
+ def i32ii :
+ NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
}
-defm SRA : RSHIFT_FORMAT<"shr.s", sra>;
-defm SRL : RSHIFT_FORMAT<"shr.u", srl>;
+defm SHL : SHIFT<"shl.b", shl>;
+defm SRA : SHIFT<"shr.s", sra>;
+defm SRL : SHIFT<"shr.u", srl>;
//
-// Rotate: use ptx shf instruction if available.
+// Rotate: Use ptx shf instruction if available.
//
// 32 bit r2 = rotl r1, n
// =>
// r2 = shf.l r1, r1, n
-def ROTL32imm_hw : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, i32imm:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]> ;
-
-def ROTL32reg_hw : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]>;
+def ROTL32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTL32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
// 32 bit r2 = rotr r1, n
// =>
// r2 = shf.r r1, r1, n
-def ROTR32imm_hw : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, i32imm:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTR32reg_hw : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]>;
-
-//
-// Rotate: if ptx shf instruction is not available, then use shift+add
-//
-// 32bit
-def ROT32imm_sw : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
- !strconcat("{{\n\t",
- !strconcat(".reg .b32 %lhs;\n\t",
- !strconcat(".reg .b32 %rhs;\n\t",
- !strconcat("shl.b32 \t%lhs, $src, $amt1;\n\t",
- !strconcat("shr.b32 \t%rhs, $src, $amt2;\n\t",
- !strconcat("add.u32 \t$dst, %lhs, %rhs;\n\t",
- !strconcat("}}", ""))))))),
- []>;
+def ROTR32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTR32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
+def ROT32imm_sw :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ "shl.b32 \t%lhs, $src, $amt1;\n\t"
+ "shr.b32 \t%rhs, $src, $amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
def SUB_FRM_32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(32-N->getZExtValue(), SDLoc(N), MVT::i32);
+ return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
}]>;
def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
@@ -1148,45 +1111,48 @@ def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
(ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
Requires<[noHWROT32]>;
-def ROTL32reg_sw : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src,
- Int32Regs:$amt),
- !strconcat("{{\n\t",
- !strconcat(".reg .b32 %lhs;\n\t",
- !strconcat(".reg .b32 %rhs;\n\t",
- !strconcat(".reg .b32 %amt2;\n\t",
- !strconcat("shl.b32 \t%lhs, $src, $amt;\n\t",
- !strconcat("sub.s32 \t%amt2, 32, $amt;\n\t",
- !strconcat("shr.b32 \t%rhs, $src, %amt2;\n\t",
- !strconcat("add.u32 \t$dst, %lhs, %rhs;\n\t",
- !strconcat("}}", ""))))))))),
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[noHWROT32]>;
-
-def ROTR32reg_sw : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src,
- Int32Regs:$amt),
- !strconcat("{{\n\t",
- !strconcat(".reg .b32 %lhs;\n\t",
- !strconcat(".reg .b32 %rhs;\n\t",
- !strconcat(".reg .b32 %amt2;\n\t",
- !strconcat("shr.b32 \t%lhs, $src, $amt;\n\t",
- !strconcat("sub.s32 \t%amt2, 32, $amt;\n\t",
- !strconcat("shl.b32 \t%rhs, $src, %amt2;\n\t",
- !strconcat("add.u32 \t$dst, %lhs, %rhs;\n\t",
- !strconcat("}}", ""))))))))),
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[noHWROT32]>;
-
-// 64bit
-def ROT64imm_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
- i32imm:$amt1, i32imm:$amt2),
- !strconcat("{{\n\t",
- !strconcat(".reg .b64 %lhs;\n\t",
- !strconcat(".reg .b64 %rhs;\n\t",
- !strconcat("shl.b64 \t%lhs, $src, $amt1;\n\t",
- !strconcat("shr.b64 \t%rhs, $src, $amt2;\n\t",
- !strconcat("add.u64 \t$dst, %lhs, %rhs;\n\t",
- !strconcat("}}", ""))))))),
- []>;
+// 32-bit software rotate left by register.
+def ROTL32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shl.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shr.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate right by register.
+def ROTR32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shr.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shl.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
+def ROT64imm_sw :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ "shl.b64 \t%lhs, $src, $amt1;\n\t"
+ "shr.b64 \t%rhs, $src, $amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
def SUB_FRM_64 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
@@ -1197,37 +1163,70 @@ def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
(ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
-def ROTL64reg_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
- Int32Regs:$amt),
- !strconcat("{{\n\t",
- !strconcat(".reg .b64 %lhs;\n\t",
- !strconcat(".reg .b64 %rhs;\n\t",
- !strconcat(".reg .u32 %amt2;\n\t",
- !strconcat("shl.b64 \t%lhs, $src, $amt;\n\t",
- !strconcat("sub.u32 \t%amt2, 64, $amt;\n\t",
- !strconcat("shr.b64 \t%rhs, $src, %amt2;\n\t",
- !strconcat("add.u64 \t$dst, %lhs, %rhs;\n\t",
- !strconcat("}}", ""))))))))),
- [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
-
-def ROTR64reg_sw : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src,
- Int32Regs:$amt),
- !strconcat("{{\n\t",
- !strconcat(".reg .b64 %lhs;\n\t",
- !strconcat(".reg .b64 %rhs;\n\t",
- !strconcat(".reg .u32 %amt2;\n\t",
- !strconcat("shr.b64 \t%lhs, $src, $amt;\n\t",
- !strconcat("sub.u32 \t%amt2, 64, $amt;\n\t",
- !strconcat("shl.b64 \t%rhs, $src, %amt2;\n\t",
- !strconcat("add.u64 \t$dst, %lhs, %rhs;\n\t",
- !strconcat("}}", ""))))))))),
- [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+// 64-bit software rotate left by register.
+def ROTL64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shl.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shr.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
+
+def ROTR64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shr.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shl.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+
+//
+// Funnnel shift in clamp mode
+//
+
+// Create SDNodes so they can be used in the DAG code, e.g.
+// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
+def SDTIntShiftDOp :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisInt<0>, SDTCisInt<3>]>;
+def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
+def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
+
+def FUNSHFLCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+def FUNSHFRCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+//
// BFE - bit-field extract
+//
+// Template for BFE instructions. Takes four args,
+// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
+// Start may be an imm only if end is also an imm. FIXME: Is this a
+// restriction in PTX?
+//
+// dest and src may be int32 or int64, but start and end are always int32.
multiclass BFE<string TyStr, RegisterClass RC> {
- // BFE supports both 32-bit and 64-bit values, but the start and length
- // operands are always 32-bit
def rrr
: NVPTXInst<(outs RC:$d),
(ins RC:$a, Int32Regs:$b, Int32Regs:$c),
@@ -1242,29 +1241,35 @@ multiclass BFE<string TyStr, RegisterClass RC> {
!strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
}
-defm BFE_S32 : BFE<"s32", Int32Regs>;
-defm BFE_U32 : BFE<"u32", Int32Regs>;
-defm BFE_S64 : BFE<"s64", Int64Regs>;
-defm BFE_U64 : BFE<"u64", Int64Regs>;
+let hasSideEffects = 0 in {
+ defm BFE_S32 : BFE<"s32", Int32Regs>;
+ defm BFE_U32 : BFE<"u32", Int32Regs>;
+ defm BFE_S64 : BFE<"s64", Int64Regs>;
+ defm BFE_U64 : BFE<"u64", Int64Regs>;
+}
//-----------------------------------
-// General Comparison
+// Comparison instructions (setp, set)
//-----------------------------------
-// General setp instructions
-multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs Int1Regs:$dst),
- (ins RC:$a, RC:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"),
- []>;
- def ri : NVPTXInst<(outs Int1Regs:$dst),
- (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"),
- []>;
- def ir : NVPTXInst<(outs Int1Regs:$dst),
- (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr, "\t$dst, $a, $b;"),
- []>;
+// FIXME: This doesn't cover versions of set and setp that combine with a
+// boolean predicate, e.g. setp.eq.and.b16.
+
+let hasSideEffects = 0 in {
+ multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ "\t$dst, $a, $b;"), []>;
+ def ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ "\t$dst, $a, $b;"), []>;
+ def ir :
+ NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ "\t$dst, $a, $b;"), []>;
+ }
}
defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
@@ -1279,17 +1284,22 @@ defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
-// General set instructions
-multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs Int32Regs:$dst),
- (ins RC:$a, RC:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
- def ri : NVPTXInst<(outs Int32Regs:$dst),
- (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
- def ir : NVPTXInst<(outs Int32Regs:$dst),
- (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
+// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
+// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
+// reg, either u32, s32, or f32. Anyway these aren't used at the moment.
+
+let hasSideEffects = 0 in {
+ multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
+ def ri : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
+ def ir : NVPTXInst<(outs Int32Regs:$dst),
+ (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, "\t$dst, $a, $b;"), []>;
+ }
}
defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
@@ -1305,45 +1315,56 @@ defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
//-----------------------------------
-// General Selection
+// Selection instructions (selp)
//-----------------------------------
-// General selp instructions
-multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
- def ri : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
- def ir : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
- def ii : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
-}
+// FIXME: Missing slct
-multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
- SDNode ImmNode> {
- def rr : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
- def ri : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
- def ir : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
- def ii : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
+// selp instructions that don't have any pattern matches; we explicitly use
+// them within this file.
+let hasSideEffects = 0 in {
+ multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
+ def ri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
+ def ir : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
+ def ii : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"), []>;
+ }
+
+ multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
+ SDNode ImmNode> {
+ def rr :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
+ def ri :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
+ def ir :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
+ def ii :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, "\t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
+ }
}
+// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
+// good.
defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
@@ -1356,40 +1377,14 @@ defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
-//
-// Funnnel shift in clamp mode
-//
-// - SDNodes are created so they can be used in the DAG code,
-// e.g. NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
-//
-def SDTIntShiftDOp: SDTypeProfile<1, 3,
- [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
- SDTCisInt<0>, SDTCisInt<3>]>;
-def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
-def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
-
-def FUNSHFLCLAMP : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFL_CLAMP Int32Regs:$lo,
- Int32Regs:$hi, Int32Regs:$amt))]>;
-
-def FUNSHFRCLAMP : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFR_CLAMP Int32Regs:$lo,
- Int32Regs:$hi, Int32Regs:$amt))]>;
-
//-----------------------------------
// Data Movement (Load / Store, Move)
//-----------------------------------
def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
- [SDNPWantRoot]>;
+ [SDNPWantRoot]>;
def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
- [SDNPWantRoot]>;
+ [SDNPWantRoot]>;
def MEMri : Operand<i32> {
let PrintMethod = "printMemOperand";
@@ -1401,82 +1396,83 @@ def MEMri64 : Operand<i64> {
}
def imem : Operand<iPTR> {
- let PrintMethod = "printOperand";
+ let PrintMethod = "printOperand";
}
def imemAny : Operand<iPTRAny> {
- let PrintMethod = "printOperand";
+ let PrintMethod = "printOperand";
}
def LdStCode : Operand<i32> {
- let PrintMethod = "printLdStCode";
+ let PrintMethod = "printLdStCode";
}
def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
+// Load a memory address into a u32 or u64 register.
def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
- "mov.u32 \t$dst, $a;",
- [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
-
+ "mov.u32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
- "mov.u64 \t$dst, $a;",
- [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+ "mov.u64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
-// Get pointer to local stack
-def MOV_DEPOT_ADDR
- : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
- "mov.u32 \t$d, __local_depot$num;", []>;
-def MOV_DEPOT_ADDR_64
- : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
- "mov.u64 \t$d, __local_depot$num;", []>;
+// Get pointer to local stack.
+let hasSideEffects = 0 in {
+ def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
+ "mov.u32 \t$d, __local_depot$num;", []>;
+ def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
+ "mov.u64 \t$d, __local_depot$num;", []>;
+}
// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
-let IsSimpleMove=1 in {
-def IMOV1rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
- "mov.pred \t$dst, $sss;", []>;
-def IMOV16rr: NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
- "mov.u16 \t$dst, $sss;", []>;
-def IMOV32rr: NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
- "mov.u32 \t$dst, $sss;", []>;
-def IMOV64rr: NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
- "mov.u64 \t$dst, $sss;", []>;
-
-def FMOV32rr: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "mov.f32 \t$dst, $src;", []>;
-def FMOV64rr: NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
- "mov.f64 \t$dst, $src;", []>;
+let IsSimpleMove=1, hasSideEffects=0 in {
+ def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
+ "mov.pred \t$dst, $sss;", []>;
+ def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
+ "mov.u16 \t$dst, $sss;", []>;
+ def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
+ "mov.u32 \t$dst, $sss;", []>;
+ def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
+ "mov.u64 \t$dst, $sss;", []>;
+
+ def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "mov.f32 \t$dst, $src;", []>;
+ def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
+ "mov.f64 \t$dst, $src;", []>;
}
-def IMOV1ri: NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
- "mov.pred \t$dst, $src;",
- [(set Int1Regs:$dst, imm:$src)]>;
-def IMOV16ri: NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
- "mov.u16 \t$dst, $src;",
- [(set Int16Regs:$dst, imm:$src)]>;
-def IMOV32ri: NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
- "mov.u32 \t$dst, $src;",
- [(set Int32Regs:$dst, imm:$src)]>;
-def IMOV64i: NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
- "mov.u64 \t$dst, $src;",
- [(set Int64Regs:$dst, imm:$src)]>;
-
-def FMOV32ri: NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
- "mov.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, fpimm:$src)]>;
-def FMOV64ri: NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
- "mov.f64 \t$dst, $src;",
- [(set Float64Regs:$dst, fpimm:$src)]>;
+
+def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
+ "mov.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, imm:$src)]>;
+def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
+ "mov.u16 \t$dst, $src;",
+ [(set Int16Regs:$dst, imm:$src)]>;
+def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
+ "mov.u32 \t$dst, $src;",
+ [(set Int32Regs:$dst, imm:$src)]>;
+def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
+ "mov.u64 \t$dst, $src;",
+ [(set Int64Regs:$dst, imm:$src)]>;
+
+def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
+ "mov.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, fpimm:$src)]>;
+def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
+ "mov.f64 \t$dst, $src;",
+ [(set Float64Regs:$dst, fpimm:$src)]>;
def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
//---- Copy Frame Index ----
-def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
- "add.u32 \t$dst, ${addr:add};",
- [(set Int32Regs:$dst, ADDRri:$addr)]>;
+def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
+ "add.u32 \t$dst, ${addr:add};",
+ [(set Int32Regs:$dst, ADDRri:$addr)]>;
def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
- "add.u64 \t$dst, ${addr:add};",
- [(set Int64Regs:$dst, ADDRri64:$addr)]>;
+ "add.u64 \t$dst, ${addr:add};",
+ [(set Int64Regs:$dst, ADDRri64:$addr)]>;
//-----------------------------------
// Comparison and Selection
@@ -1554,7 +1550,7 @@ multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
SET_s16rr, SET_s16ri, SET_s16ir,
SET_s32rr, SET_s32ri, SET_s32ir,
SET_s64rr, SET_s64ri, SET_s64ir> {
- // TableGen doesn't like empty multiclasses
+ // TableGen doesn't like empty multiclasses.
def : PatLeaf<(i32 0)>;
}
@@ -1566,21 +1562,21 @@ multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
SET_u16rr, SET_u16ri, SET_u16ir,
SET_u32rr, SET_u32ri, SET_u32ir,
SET_u64rr, SET_u64ri, SET_u64ir> {
- // TableGen doesn't like empty multiclasses
+ // TableGen doesn't like empty multiclasses.
def : PatLeaf<(i32 0)>;
}
defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
-defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
-defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
-defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
-defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
-defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
+defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
+defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
+defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
+defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
+defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
// i1 compares
@@ -1678,13 +1674,14 @@ defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
-//def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
-// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+// FIXME: What is this doing here? Can it be deleted?
+// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
+// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
-def SDTDeclareParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
- SDTCisInt<2>]>;
-def SDTDeclareScalarParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>,
- SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTDeclareParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTDeclareScalarParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
@@ -1704,185 +1701,200 @@ def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
-def DeclareParam : SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareScalarParam : SDNode<"NVPTXISD::DeclareScalarParam",
- SDTDeclareScalarParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareRetParam : SDNode<"NVPTXISD::DeclareRetParam",
- SDTDeclareParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareRet : SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def LoadParam : SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def LoadParamV2 : SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def LoadParamV4 : SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def PrintCall : SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintCallUni : SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParam : SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamV2 : SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamV4 : SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamU32 : SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamS32 : SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArgBegin : SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArg : SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def LastCallArg : SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArgEnd : SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallVoid : SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def Prototype : SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallVal : SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def MoveParam : SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile,
- []>;
-def StoreRetval : SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
- [SDNPHasChain, SDNPSideEffect]>;
-def StoreRetvalV2 : SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
- [SDNPHasChain, SDNPSideEffect]>;
-def StoreRetvalV4 : SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
- [SDNPHasChain, SDNPSideEffect]>;
-def PseudoUseParam : SDNode<"NVPTXISD::PseudoUseParam",
- SDTPseudoUseParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def RETURNNode : SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
- [SDNPHasChain, SDNPSideEffect]>;
-
-class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
- !strconcat(!strconcat("ld.param", opstr),
- "\t$dst, [retval0+$b];"),
- []>;
+def DeclareParam :
+ SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareScalarParam :
+ SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRetParam :
+ SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRet :
+ SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LoadParam :
+ SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV2 :
+ SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV4 :
+ SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def PrintCall :
+ SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCall :
+ SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintCallUni :
+ SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCallUni :
+ SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParam :
+ SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV2 :
+ SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV4 :
+ SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamU32 :
+ SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamS32 :
+ SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgBegin :
+ SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArg :
+ SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LastCallArg :
+ SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgEnd :
+ SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVoid :
+ SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def Prototype :
+ SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVal :
+ SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def MoveParam :
+ SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
+def StoreRetval :
+ SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV2 :
+ SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV4 :
+ SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def PseudoUseParam :
+ SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def RETURNNode :
+ SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+let mayLoad = 1 in {
+ class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat(!strconcat("ld.param", opstr),
+ "\t$dst, [retval0+$b];"),
+ []>;
+
+ class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
+ !strconcat("ld.param.v2", opstr,
+ "\t{{$dst, $dst2}}, [retval0+$b];"), []>;
+
+ class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins i32imm:$b),
+ !strconcat("ld.param.v4", opstr,
+ "\t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
+ []>;
+}
class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
- !strconcat(!strconcat("mov", opstr),
- "\t$dst, retval$b;"),
+ !strconcat("mov", opstr, "\t$dst, retval$b;"),
[(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
-class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
- !strconcat(!strconcat("ld.param.v2", opstr),
- "\t{{$dst, $dst2}}, [retval0+$b];"), []>;
-
-class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
- regclass:$dst4),
- (ins i32imm:$b),
- !strconcat(!strconcat("ld.param.v4", opstr),
- "\t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"), []>;
-
-class StoreParamInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
- !strconcat(!strconcat("st.param", opstr),
- "\t[param$a+$b], $val;"),
- []>;
-
-class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
- i32imm:$a, i32imm:$b),
- !strconcat(!strconcat("st.param.v2", opstr),
- "\t[param$a+$b], {{$val, $val2}};"),
- []>;
-
-class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val1, regclass:$val2,
- regclass:$val3, i32imm:$a, i32imm:$b),
- !strconcat(!strconcat("st.param.v4", opstr),
- "\t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
- []>;
-
-class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
- !strconcat(!strconcat("st.param", opstr),
- "\t[func_retval0+$a], $val;"),
- []>;
-
-class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
- !strconcat(!strconcat("st.param.v2", opstr),
- "\t[func_retval0+$a], {{$val, $val2}};"),
- []>;
-
-class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs),
- (ins regclass:$val, regclass:$val2, regclass:$val3,
- regclass:$val4, i32imm:$a),
- !strconcat(!strconcat("st.param.v4", opstr),
- "\t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
- []>;
-
-def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
-"call (retval0), ",
- [(PrintCall (i32 1))]>;
-def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
-"call (retval0, retval1), ",
- [(PrintCall (i32 2))]>;
-def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
-"call (retval0, retval1, retval2), ",
- [(PrintCall (i32 3))]>;
-def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
-"call (retval0, retval1, retval2, retval3), ",
- [(PrintCall (i32 4))]>;
-def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
-"call (retval0, retval1, retval2, retval3, retval4), ",
- [(PrintCall (i32 5))]>;
-def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
-"call (retval0, retval1, retval2, retval3, retval4, retval5), ",
- [(PrintCall (i32 6))]>;
-def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
-"call (retval0, retval1, retval2, retval3, retval4, retval5, retval6), ",
- [(PrintCall (i32 7))]>;
-def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
-!strconcat("call (retval0, retval1, retval2, retval3, retval4",
- ", retval5, retval6, retval7), "),
- [(PrintCall (i32 8))]>;
-
-def PrintCallNoRetInst : NVPTXInst<(outs), (ins), "call ",
- [(PrintCall (i32 0))]>;
-
-def PrintCallUniRetInst1 : NVPTXInst<(outs), (ins),
-"call.uni (retval0), ",
- [(PrintCallUni (i32 1))]>;
-def PrintCallUniRetInst2 : NVPTXInst<(outs), (ins),
-"call.uni (retval0, retval1), ",
- [(PrintCallUni (i32 2))]>;
-def PrintCallUniRetInst3 : NVPTXInst<(outs), (ins),
-"call.uni (retval0, retval1, retval2), ",
- [(PrintCallUni (i32 3))]>;
-def PrintCallUniRetInst4 : NVPTXInst<(outs), (ins),
-"call.uni (retval0, retval1, retval2, retval3), ",
- [(PrintCallUni (i32 4))]>;
-def PrintCallUniRetInst5 : NVPTXInst<(outs), (ins),
-"call.uni (retval0, retval1, retval2, retval3, retval4), ",
- [(PrintCallUni (i32 5))]>;
-def PrintCallUniRetInst6 : NVPTXInst<(outs), (ins),
-"call.uni (retval0, retval1, retval2, retval3, retval4, retval5), ",
- [(PrintCallUni (i32 6))]>;
-def PrintCallUniRetInst7 : NVPTXInst<(outs), (ins),
-"call.uni (retval0, retval1, retval2, retval3, retval4, retval5, retval6), ",
- [(PrintCallUni (i32 7))]>;
-def PrintCallUniRetInst8 : NVPTXInst<(outs), (ins),
-!strconcat("call.uni (retval0, retval1, retval2, retval3, retval4",
- ", retval5, retval6, retval7), "),
- [(PrintCallUni (i32 8))]>;
-
-def PrintCallUniNoRetInst : NVPTXInst<(outs), (ins), "call.uni ",
- [(PrintCallUni (i32 0))]>;
+let mayStore = 1 in {
+ class StoreParamInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
+ !strconcat("st.param", opstr, "\t[param$a+$b], $val;"),
+ []>;
+
+ class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
+ i32imm:$a, i32imm:$b),
+ !strconcat("st.param.v2", opstr,
+ "\t[param$a+$b], {{$val, $val2}};"),
+ []>;
+
+ class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a,
+ i32imm:$b),
+ !strconcat("st.param.v4", opstr,
+ "\t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
+ []>;
+
+ class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
+ !strconcat("st.param", opstr, "\t[func_retval0+$a], $val;"),
+ []>;
+
+ class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
+ !strconcat("st.param.v2", opstr,
+ "\t[func_retval0+$a], {{$val, $val2}};"),
+ []>;
+
+ class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a),
+ !strconcat("st.param.v4", opstr,
+ "\t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
+ []>;
+}
+
+let isCall=1 in {
+ multiclass CALL<string OpcStr, SDNode OpNode> {
+ def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
+ def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
+ def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
+ def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
+ def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
+ [(OpNode (i32 4))]>;
+ def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
+ [(OpNode (i32 5))]>;
+ def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5), "),
+ [(OpNode (i32 6))]>;
+ def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6), "),
+ [(OpNode (i32 7))]>;
+ def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6, retval7), "),
+ [(OpNode (i32 8))]>;
+ }
+}
+
+defm Call : CALL<"call", PrintCall>;
+defm CallUni : CALL<"call.uni", PrintCallUni>;
+
+// Convergent call instructions. These are identical to regular calls, except
+// they have the isConvergent bit set.
+let isConvergent=1 in {
+ defm ConvergentCall : CALL<"call", PrintConvergentCall>;
+ defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
+}
def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
@@ -1911,39 +1923,15 @@ def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
-// FIXME: StoreParamV4Inst crashes llvm-tblgen :(
-//def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
-def StoreParamV4I32 : NVPTXInst<(outs), (ins Int32Regs:$val, Int32Regs:$val2,
- Int32Regs:$val3, Int32Regs:$val4,
- i32imm:$a, i32imm:$b),
- "st.param.v4.b32\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
- []>;
-
-def StoreParamV4I16 : NVPTXInst<(outs), (ins Int16Regs:$val, Int16Regs:$val2,
- Int16Regs:$val3, Int16Regs:$val4,
- i32imm:$a, i32imm:$b),
- "st.param.v4.b16\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
- []>;
-
-def StoreParamV4I8 : NVPTXInst<(outs), (ins Int16Regs:$val, Int16Regs:$val2,
- Int16Regs:$val3, Int16Regs:$val4,
- i32imm:$a, i32imm:$b),
- "st.param.v4.b8\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
- []>;
-
-def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
-def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
+def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
+def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
+def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
+
+def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
+def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
-// FIXME: StoreParamV4Inst crashes llvm-tblgen :(
-//def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
-def StoreParamV4F32 : NVPTXInst<(outs),
- (ins Float32Regs:$val, Float32Regs:$val2,
- Float32Regs:$val3, Float32Regs:$val4,
- i32imm:$a, i32imm:$b),
- "st.param.v4.f32\t[param$a+$b], {{$val, $val2, $val3, $val4}};",
- []>;
-
+def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
@@ -1969,89 +1957,88 @@ def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
class CallArgInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$a), "$a, ",
- [(CallArg (i32 0), regclass:$a)]>;
+ NVPTXInst<(outs), (ins regclass:$a), "$a, ",
+ [(CallArg (i32 0), regclass:$a)]>;
class LastCallArgInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$a), "$a",
- [(LastCallArg (i32 0), regclass:$a)]>;
+ NVPTXInst<(outs), (ins regclass:$a), "$a",
+ [(LastCallArg (i32 0), regclass:$a)]>;
def CallArgI64 : CallArgInst<Int64Regs>;
def CallArgI32 : CallArgInst<Int32Regs>;
def CallArgI16 : CallArgInst<Int16Regs>;
-
def CallArgF64 : CallArgInst<Float64Regs>;
def CallArgF32 : CallArgInst<Float32Regs>;
def LastCallArgI64 : LastCallArgInst<Int64Regs>;
def LastCallArgI32 : LastCallArgInst<Int32Regs>;
def LastCallArgI16 : LastCallArgInst<Int16Regs>;
-
def LastCallArgF64 : LastCallArgInst<Float64Regs>;
def LastCallArgF32 : LastCallArgInst<Float32Regs>;
def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
[(CallArg (i32 0), (i32 imm:$a))]>;
def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
- [(LastCallArg (i32 0), (i32 imm:$a))]>;
+ [(LastCallArg (i32 0), (i32 imm:$a))]>;
def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
[(CallArg (i32 1), (i32 imm:$a))]>;
def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
- [(LastCallArg (i32 1), (i32 imm:$a))]>;
-
-def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr),
- "$addr, ",
- [(CallVoid (Wrapper tglobaladdr:$addr))]>;
-def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr),
- "$addr, ",
- [(CallVoid Int32Regs:$addr)]>;
-def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr),
- "$addr, ",
- [(CallVoid Int64Regs:$addr)]>;
-def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val),
- ", prototype_$val;",
- [(Prototype (i32 imm:$val))]>;
-
-def DeclareRetMemInst : NVPTXInst<(outs),
- (ins i32imm:$align, i32imm:$size, i32imm:$num),
- ".param .align $align .b8 retval$num[$size];",
- [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
-def DeclareRetScalarInst : NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
- ".param .b$size retval$num;",
- [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
-def DeclareRetRegInst : NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
- ".reg .b$size retval$num;",
- [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
-
-def DeclareParamInst : NVPTXInst<(outs),
- (ins i32imm:$align, i32imm:$a, i32imm:$size),
- ".param .align $align .b8 param$a[$size];",
- [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
-def DeclareScalarParamInst : NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
- ".param .b$size param$a;",
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
-def DeclareScalarRegInst : NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
- ".reg .b$size param$a;",
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
+ [(LastCallArg (i32 1), (i32 imm:$a))]>;
+
+def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
+ [(CallVoid (Wrapper tglobaladdr:$addr))]>;
+def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
+ [(CallVoid Int32Regs:$addr)]>;
+def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
+ [(CallVoid Int64Regs:$addr)]>;
+def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
+ [(Prototype (i32 imm:$val))]>;
+
+def DeclareRetMemInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
+ ".param .align $align .b8 retval$num[$size];",
+ [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetScalarInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".param .b$size retval$num;",
+ [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetRegInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".reg .b$size retval$num;",
+ [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
+
+def DeclareParamInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
+ ".param .align $align .b8 param$a[$size];",
+ [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
+def DeclareScalarParamInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".param .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
+def DeclareScalarRegInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".reg .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
- NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
- !strconcat(!strconcat("mov", asmstr), "\t$dst, $src;"),
- [(set regclass:$dst, (MoveParam regclass:$src))]>;
+ NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
+ !strconcat("mov", asmstr, "\t$dst, $src;"),
+ [(set regclass:$dst, (MoveParam regclass:$src))]>;
def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
-def MoveParamI16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "cvt.u16.u32\t$dst, $src;",
- [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
+def MoveParamI16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.u16.u32\t$dst, $src;",
+ [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
class PseudoUseParamInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$src),
- "// Pseudo use of $src",
- [(PseudoUseParam regclass:$src)]>;
+ NVPTXInst<(outs), (ins regclass:$src),
+ "// Pseudo use of $src",
+ [(PseudoUseParam regclass:$src)]>;
def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
@@ -2064,254 +2051,278 @@ def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
// Load / Store Handling
//
multiclass LD<NVPTXRegClass regclass> {
- def _avar : NVPTXInst<(outs regclass:$dst),
+ def _avar : NVPTXInst<
+ (outs regclass:$dst),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
-!strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t$dst, [$addr];"), []>;
- def _areg : NVPTXInst<(outs regclass:$dst),
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg : NVPTXInst<
+ (outs regclass:$dst),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
-!strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t$dst, [$addr];"), []>;
- def _areg_64 : NVPTXInst<(outs regclass:$dst),
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg_64 : NVPTXInst<
+ (outs regclass:$dst),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth",
- " \t$dst, [$addr];"), []>;
- def _ari : NVPTXInst<(outs regclass:$dst),
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _ari : NVPTXInst<
+ (outs regclass:$dst),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
-!strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t$dst, [$addr+$offset];"), []>;
- def _ari_64 : NVPTXInst<(outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth",
- " \t$dst, [$addr+$offset];"), []>;
- def _asi : NVPTXInst<(outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
-!strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t$dst, [$addr+$offset];"), []>;
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _ari_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _asi : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
}
let mayLoad=1, hasSideEffects=0 in {
-defm LD_i8 : LD<Int16Regs>;
-defm LD_i16 : LD<Int16Regs>;
-defm LD_i32 : LD<Int32Regs>;
-defm LD_i64 : LD<Int64Regs>;
-defm LD_f32 : LD<Float32Regs>;
-defm LD_f64 : LD<Float64Regs>;
+ defm LD_i8 : LD<Int16Regs>;
+ defm LD_i16 : LD<Int16Regs>;
+ defm LD_i32 : LD<Int32Regs>;
+ defm LD_i64 : LD<Int64Regs>;
+ defm LD_f32 : LD<Float32Regs>;
+ defm LD_f64 : LD<Float64Regs>;
}
multiclass ST<NVPTXRegClass regclass> {
- def _avar : NVPTXInst<(outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
-!strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth",
- " \t[$addr], $src;"), []>;
- def _areg : NVPTXInst<(outs),
+ def _avar : NVPTXInst<
+ (outs),
(ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
-!strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth",
- " \t[$addr], $src;"), []>;
- def _areg_64 : NVPTXInst<(outs),
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg_64 : NVPTXInst<
+ (outs),
(ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth ",
- "\t[$addr], $src;"), []>;
- def _ari : NVPTXInst<(outs),
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _ari : NVPTXInst<
+ (outs),
(ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
-!strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth",
- " \t[$addr+$offset], $src;"), []>;
- def _ari_64 : NVPTXInst<(outs),
+ LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _ari_64 : NVPTXInst<
+ (outs),
(ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth ",
- "\t[$addr+$offset], $src;"), []>;
- def _asi : NVPTXInst<(outs),
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _asi : NVPTXInst<
+ (outs),
(ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
-!strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth",
- " \t[$addr+$offset], $src;"), []>;
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
}
let mayStore=1, hasSideEffects=0 in {
-defm ST_i8 : ST<Int16Regs>;
-defm ST_i16 : ST<Int16Regs>;
-defm ST_i32 : ST<Int32Regs>;
-defm ST_i64 : ST<Int64Regs>;
-defm ST_f32 : ST<Float32Regs>;
-defm ST_f64 : ST<Float64Regs>;
+ defm ST_i8 : ST<Int16Regs>;
+ defm ST_i16 : ST<Int16Regs>;
+ defm ST_i32 : ST<Int32Regs>;
+ defm ST_i64 : ST<Int64Regs>;
+ defm ST_f32 : ST<Float32Regs>;
+ defm ST_f64 : ST<Float64Regs>;
}
-// The following is used only in and after vector elementizations.
-// Vector elementization happens at the machine instruction level, so the
-// following instruction
-// never appears in the DAG.
+// The following is used only in and after vector elementizations. Vector
+// elementization happens at the machine instruction level, so the following
+// instructions never appear in the DAG.
multiclass LD_VEC<NVPTXRegClass regclass> {
- def _v2_avar : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ def _v2_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2}}, [$addr];"), []>;
- def _v2_areg : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2}}, [$addr];"), []>;
- def _v2_areg_64 : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2}}, [$addr];"), []>;
- def _v2_ari : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2}}, [$addr+$offset];"), []>;
- def _v2_ari_64 : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2}}, [$addr+$offset];"), []>;
- def _v2_asi : NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2}}, [$addr+$offset];"), []>;
- def _v4_avar : NVPTXInst<(outs regclass:$dst1, regclass:$dst2,
- regclass:$dst3, regclass:$dst4),
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v4_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];"), []>;
- def _v4_areg : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
- regclass:$dst4),
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];"), []>;
- def _v4_areg_64 : NVPTXInst<(outs regclass:$dst1, regclass:$dst2,
- regclass:$dst3, regclass:$dst4),
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];"), []>;
- def _v4_ari : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
- regclass:$dst4),
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];"),
- []>;
- def _v4_ari_64 : NVPTXInst<(outs regclass:$dst1, regclass:$dst2,
- regclass:$dst3, regclass:$dst4),
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];"),
- []>;
- def _v4_asi : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
- regclass:$dst4),
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
(ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- !strconcat("ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];"),
- []>;
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
}
let mayLoad=1, hasSideEffects=0 in {
-defm LDV_i8 : LD_VEC<Int16Regs>;
-defm LDV_i16 : LD_VEC<Int16Regs>;
-defm LDV_i32 : LD_VEC<Int32Regs>;
-defm LDV_i64 : LD_VEC<Int64Regs>;
-defm LDV_f32 : LD_VEC<Float32Regs>;
-defm LDV_f64 : LD_VEC<Float64Regs>;
+ defm LDV_i8 : LD_VEC<Int16Regs>;
+ defm LDV_i16 : LD_VEC<Int16Regs>;
+ defm LDV_i32 : LD_VEC<Int32Regs>;
+ defm LDV_i64 : LD_VEC<Int64Regs>;
+ defm LDV_f32 : LD_VEC<Float32Regs>;
+ defm LDV_f64 : LD_VEC<Float64Regs>;
}
multiclass ST_VEC<NVPTXRegClass regclass> {
- def _v2_avar : NVPTXInst<(outs),
+ def _v2_avar : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr], {{$src1, $src2}};"), []>;
- def _v2_areg : NVPTXInst<(outs),
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr], {{$src1, $src2}};"), []>;
- def _v2_areg_64 : NVPTXInst<(outs),
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr], {{$src1, $src2}};"), []>;
- def _v2_ari : NVPTXInst<(outs),
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_ari : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
- i32imm:$offset),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr+$offset], {{$src1, $src2}};"), []>;
- def _v2_ari_64 : NVPTXInst<(outs),
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
- i32imm:$offset),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr+$offset], {{$src1, $src2}};"), []>;
- def _v2_asi : NVPTXInst<(outs),
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_asi : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
- i32imm:$offset),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr+$offset], {{$src1, $src2}};"), []>;
- def _v4_avar : NVPTXInst<(outs),
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v4_avar : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr], {{$src1, $src2, $src3, $src4}};"), []>;
- def _v4_areg : NVPTXInst<(outs),
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr], {{$src1, $src2, $src3, $src4}};"), []>;
- def _v4_areg_64 : NVPTXInst<(outs),
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr], {{$src1, $src2, $src3, $src4}};"), []>;
- def _v4_ari : NVPTXInst<(outs),
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};"),
- []>;
- def _v4_ari_64 : NVPTXInst<(outs),
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};"),
- []>;
- def _v4_asi : NVPTXInst<(outs),
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_asi : NVPTXInst<
+ (outs),
(ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- !strconcat("st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}",
- "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};"),
- []>;
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
}
+
let mayStore=1, hasSideEffects=0 in {
-defm STV_i8 : ST_VEC<Int16Regs>;
-defm STV_i16 : ST_VEC<Int16Regs>;
-defm STV_i32 : ST_VEC<Int32Regs>;
-defm STV_i64 : ST_VEC<Int64Regs>;
-defm STV_f32 : ST_VEC<Float32Regs>;
-defm STV_f64 : ST_VEC<Float64Regs>;
+ defm STV_i8 : ST_VEC<Int16Regs>;
+ defm STV_i16 : ST_VEC<Int16Regs>;
+ defm STV_i32 : ST_VEC<Int32Regs>;
+ defm STV_i64 : ST_VEC<Int64Regs>;
+ defm STV_f32 : ST_VEC<Float32Regs>;
+ defm STV_f64 : ST_VEC<Float64Regs>;
}
@@ -2525,64 +2536,52 @@ def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
(SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-// pack a set of smaller int registers to a larger int register
-def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
- (ins Int16Regs:$s1, Int16Regs:$s2,
- Int16Regs:$s3, Int16Regs:$s4),
- "mov.b64\t$d, {{$s1, $s2, $s3, $s4}};",
- []>;
-def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
- (ins Int16Regs:$s1, Int16Regs:$s2),
- "mov.b32\t$d, {{$s1, $s2}};",
- []>;
-def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
- (ins Int32Regs:$s1, Int32Regs:$s2),
- "mov.b64\t$d, {{$s1, $s2}};",
- []>;
-def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
- (ins Float32Regs:$s1, Float32Regs:$s2),
- "mov.b64\t$d, {{$s1, $s2}};",
- []>;
-
-// unpack a larger int register to a set of smaller int registers
-def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
- Int16Regs:$d3, Int16Regs:$d4),
- (ins Int64Regs:$s),
- "mov.b64\t{{$d1, $d2, $d3, $d4}}, $s;",
- []>;
-def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
- (ins Int32Regs:$s),
- "mov.b32\t{{$d1, $d2}}, $s;",
- []>;
-def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
- (ins Int64Regs:$s),
- "mov.b64\t{{$d1, $d2}}, $s;",
- []>;
-def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
- (ins Float64Regs:$s),
- "mov.b64\t{{$d1, $d2}}, $s;",
- []>;
+let hasSideEffects = 0 in {
+ // pack a set of smaller int registers to a larger int register
+ def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2,
+ Int16Regs:$s3, Int16Regs:$s4),
+ "mov.b64\t$d, {{$s1, $s2, $s3, $s4}};", []>;
+ def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2),
+ "mov.b32\t$d, {{$s1, $s2}};", []>;
+ def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int32Regs:$s1, Int32Regs:$s2),
+ "mov.b64\t$d, {{$s1, $s2}};", []>;
+ def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
+ (ins Float32Regs:$s1, Float32Regs:$s2),
+ "mov.b64\t$d, {{$s1, $s2}};", []>;
+
+ // unpack a larger int register to a set of smaller int registers
+ def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
+ Int16Regs:$d3, Int16Regs:$d4),
+ (ins Int64Regs:$s),
+ "mov.b64\t{{$d1, $d2, $d3, $d4}}, $s;", []>;
+ def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
+ (ins Int32Regs:$s),
+ "mov.b32\t{{$d1, $d2}}, $s;", []>;
+ def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
+ (ins Int64Regs:$s),
+ "mov.b64\t{{$d1, $d2}}, $s;", []>;
+ def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
+ (ins Float64Regs:$s),
+ "mov.b64\t{{$d1, $d2}}, $s;", []>;
+}
// Count leading zeros
-def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
- "clz.b32\t$d, $a;",
- []>;
-def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "clz.b64\t$d, $a;",
- []>;
+let hasSideEffects = 0 in {
+ def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "clz.b32\t$d, $a;", []>;
+ def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "clz.b64\t$d, $a;", []>;
+}
// 32-bit has a direct PTX instruction
-def : Pat<(ctlz Int32Regs:$a),
- (CLZr32 Int32Regs:$a)>;
-def : Pat<(ctlz_zero_undef Int32Regs:$a),
- (CLZr32 Int32Regs:$a)>;
+def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
// For 64-bit, the result in PTX is actually 32-bit so we zero-extend
// to 64-bit to match the LLVM semantics
-def : Pat<(ctlz Int64Regs:$a),
- (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(ctlz_zero_undef Int64Regs:$a),
- (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
// For 16-bit, we zero-extend to 32-bit, then trunc the result back
// to 16-bits (ctlz of a 16-bit value is guaranteed to require less
@@ -2592,34 +2591,27 @@ def : Pat<(ctlz Int16Regs:$a),
(SUBi16ri (CVT_u16_u32 (CLZr32
(CVT_u32_u16 Int16Regs:$a, CvtNONE)),
CvtNONE), 16)>;
-def : Pat<(ctlz_zero_undef Int16Regs:$a),
- (SUBi16ri (CVT_u16_u32 (CLZr32
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)),
- CvtNONE), 16)>;
// Population count
-def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
- "popc.b32\t$d, $a;",
- []>;
-def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "popc.b64\t$d, $a;",
- []>;
+let hasSideEffects = 0 in {
+ def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "popc.b32\t$d, $a;", []>;
+ def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "popc.b64\t$d, $a;", []>;
+}
// 32-bit has a direct PTX instruction
-def : Pat<(ctpop Int32Regs:$a),
- (POPCr32 Int32Regs:$a)>;
+def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
// For 64-bit, the result in PTX is actually 32-bit so we zero-extend
// to 64-bit to match the LLVM semantics
-def : Pat<(ctpop Int64Regs:$a),
- (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
// For 16-bit, we zero-extend to 32-bit, then trunc the result back
// to 16-bits (ctpop of a 16-bit value is guaranteed to require less
// than 16 bits to store)
def : Pat<(ctpop Int16Regs:$a),
- (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)),
- CvtNONE)>;
+ (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
// fround f64 -> f32
def : Pat<(f32 (fround Float64Regs:$a)),
@@ -2633,8 +2625,8 @@ def : Pat<(f64 (fextend Float32Regs:$a)),
def : Pat<(f64 (fextend Float32Regs:$a)),
(CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
-def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue]>;
+def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
//-----------------------------------
// Control-flow
@@ -2646,88 +2638,77 @@ let isTerminator=1 in {
let isBranch=1 in
def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
- "@$a bra \t$target;",
- [(brcond Int1Regs:$a, bb:$target)]>;
+ "@$a bra \t$target;",
+ [(brcond Int1Regs:$a, bb:$target)]>;
let isBranch=1 in
def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
- "@!$a bra \t$target;",
- []>;
+ "@!$a bra \t$target;", []>;
let isBranch=1, isBarrier=1 in
def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
- "bra.uni \t$target;",
- [(br bb:$target)]>;
+ "bra.uni \t$target;", [(br bb:$target)]>;
}
def : Pat<(brcond Int32Regs:$a, bb:$target),
(CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
-// conditional branch if
-// the target block is the next block so that the code can fall through to the
-// target block.
-// The invertion is done by 'xor condition, 1', which will be translated to
-// (setne condition, -1).
-// Since ptx supports '@!pred bra target', we should use it.
+// conditional branch if the target block is the next block so that the code
+// can fall through to the target block. The invertion is done by 'xor
+// condition, 1', which will be translated to (setne condition, -1). Since ptx
+// supports '@!pred bra target', we should use it.
def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
- (CBranchOther Int1Regs:$a, bb:$target)>;
+ (CBranchOther Int1Regs:$a, bb:$target)>;
// Call
-def SDT_NVPTXCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
-def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
- SDTCisVT<1, i32> ]>;
+def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
+def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
[SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPSideEffect]>;
+ SDNPSideEffect]>;
def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
def calltarget : Operand<i32>;
let isCall=1 in {
- def CALL : NVPTXInst<(outs), (ins calltarget:$dst),
- "call \t$dst, (1);", []>;
+ def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
}
-def : Pat<(call tglobaladdr:$dst),
- (CALL tglobaladdr:$dst)>;
-def : Pat<(call texternalsym:$dst),
- (CALL texternalsym:$dst)>;
+def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
+def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
// Pseudo instructions.
class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
: NVPTXInst<outs, ins, asmstr, pattern>;
-// @TODO: We use some tricks here to emit curly braces. Can we clean this up
-// a bit without TableGen modifications?
-def Callseq_Start : NVPTXInst<(outs), (ins i32imm:$amt),
- "// Callseq Start $amt\n\t{{\n\t.reg .b32 temp_param_reg;\n\t// <end>}}",
- [(callseq_start timm:$amt)]>;
-def Callseq_End : NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "\n\t//{{\n\t}}// Callseq End $amt1",
- [(callseq_end timm:$amt1, timm:$amt2)]>;
+def Callseq_Start :
+ NVPTXInst<(outs), (ins i32imm:$amt),
+ "\\{ // callseq $amt\n"
+ "\t.reg .b32 temp_param_reg;",
+ [(callseq_start timm:$amt)]>;
+def Callseq_End :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\} // callseq $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
// trap instruction
-
-def trapinst : NVPTXInst<(outs), (ins),
- "trap;",
- [(trap)]>;
+def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
// Call prototype wrapper
def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def CallPrototype
- : SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallPrototype :
+ SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
def ProtoIdent : Operand<i32> {
let PrintMethod = "printProtoIdent";
}
-def CALL_PROTOTYPE
- : NVPTXInst<(outs), (ins ProtoIdent:$ident),
- "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
-
+def CALL_PROTOTYPE :
+ NVPTXInst<(outs), (ins ProtoIdent:$ident),
+ "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
include "NVPTXIntrinsics.td"
diff --git a/lib/Target/NVPTX/NVPTXIntrinsics.td b/lib/Target/NVPTX/NVPTXIntrinsics.td
index 14e51aa309ea..ed16afa24752 100644
--- a/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -30,11 +30,9 @@ def immDouble1 : PatLeaf<(fpimm), [{
//-----------------------------------
-// Synchronization Functions
+// Synchronization and shuffle functions
//-----------------------------------
-def INT_CUDA_SYNCTHREADS : NVPTXInst<(outs), (ins),
- "bar.sync \t0;",
- [(int_cuda_syncthreads)]>;
+let isConvergent = 1 in {
def INT_BARRIER0 : NVPTXInst<(outs), (ins),
"bar.sync \t0;",
[(int_nvvm_barrier0)]>;
@@ -64,6 +62,51 @@ def INT_BARRIER0_OR : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
!strconcat("}}", ""))))))),
[(set Int32Regs:$dst, (int_nvvm_barrier0_or Int32Regs:$pred))]>;
+def INT_BAR_SYNC : NVPTXInst<(outs), (ins i32imm:$i), "bar.sync\t$i;",
+ [(int_nvvm_bar_sync imm:$i)]>;
+
+// shfl.{up,down,bfly,idx}.b32
+multiclass SHFL<NVPTXRegClass regclass, string mode, Intrinsic IntOp> {
+ // The last two parameters to shfl can be regs or imms. ptxas is smart
+ // enough to inline constant registers, so strictly speaking we don't need to
+ // handle immediates here. But it's easy enough, and it makes our ptx more
+ // readable.
+ def reg : NVPTXInst<
+ (outs regclass:$dst),
+ (ins regclass:$src, Int32Regs:$offset, Int32Regs:$mask),
+ !strconcat("shfl.", mode, ".b32 $dst, $src, $offset, $mask;"),
+ [(set regclass:$dst, (IntOp regclass:$src, Int32Regs:$offset, Int32Regs:$mask))]>;
+
+ def imm1 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins regclass:$src, i32imm:$offset, Int32Regs:$mask),
+ !strconcat("shfl.", mode, ".b32 $dst, $src, $offset, $mask;"),
+ [(set regclass:$dst, (IntOp regclass:$src, imm:$offset, Int32Regs:$mask))]>;
+
+ def imm2 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins regclass:$src, Int32Regs:$offset, i32imm:$mask),
+ !strconcat("shfl.", mode, ".b32 $dst, $src, $offset, $mask;"),
+ [(set regclass:$dst, (IntOp regclass:$src, Int32Regs:$offset, imm:$mask))]>;
+
+ def imm3 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins regclass:$src, i32imm:$offset, i32imm:$mask),
+ !strconcat("shfl.", mode, ".b32 $dst, $src, $offset, $mask;"),
+ [(set regclass:$dst, (IntOp regclass:$src, imm:$offset, imm:$mask))]>;
+}
+
+defm INT_SHFL_DOWN_I32 : SHFL<Int32Regs, "down", int_nvvm_shfl_down_i32>;
+defm INT_SHFL_DOWN_F32 : SHFL<Float32Regs, "down", int_nvvm_shfl_down_f32>;
+defm INT_SHFL_UP_I32 : SHFL<Int32Regs, "up", int_nvvm_shfl_up_i32>;
+defm INT_SHFL_UP_F32 : SHFL<Float32Regs, "up", int_nvvm_shfl_up_f32>;
+defm INT_SHFL_BFLY_I32 : SHFL<Int32Regs, "bfly", int_nvvm_shfl_bfly_i32>;
+defm INT_SHFL_BFLY_F32 : SHFL<Float32Regs, "bfly", int_nvvm_shfl_bfly_f32>;
+defm INT_SHFL_IDX_I32 : SHFL<Int32Regs, "idx", int_nvvm_shfl_idx_i32>;
+defm INT_SHFL_IDX_F32 : SHFL<Float32Regs, "idx", int_nvvm_shfl_idx_f32>;
+
+} // isConvergent = 1
+
//-----------------------------------
// Explicit Memory Fence Functions
@@ -1335,51 +1378,17 @@ defm INT_PTX_ATOM_CAS_GEN_64_USE_G : F_ATOMIC_3<Int64Regs, ".global", ".b64",
".cas", atomic_cmp_swap_64_gen, i64imm, useAtomRedG64forGen64>;
-//-----------------------------------
-// Read Special Registers
-//-----------------------------------
-class F_SREG<string OpStr, NVPTXRegClass regclassOut, Intrinsic IntOp> :
- NVPTXInst<(outs regclassOut:$dst), (ins),
- OpStr,
- [(set regclassOut:$dst, (IntOp))]>;
-
-def INT_PTX_SREG_TID_X : F_SREG<"mov.u32 \t$dst, %tid.x;", Int32Regs,
- int_nvvm_read_ptx_sreg_tid_x>;
-def INT_PTX_SREG_TID_Y : F_SREG<"mov.u32 \t$dst, %tid.y;", Int32Regs,
- int_nvvm_read_ptx_sreg_tid_y>;
-def INT_PTX_SREG_TID_Z : F_SREG<"mov.u32 \t$dst, %tid.z;", Int32Regs,
- int_nvvm_read_ptx_sreg_tid_z>;
-
-def INT_PTX_SREG_NTID_X : F_SREG<"mov.u32 \t$dst, %ntid.x;", Int32Regs,
- int_nvvm_read_ptx_sreg_ntid_x>;
-def INT_PTX_SREG_NTID_Y : F_SREG<"mov.u32 \t$dst, %ntid.y;", Int32Regs,
- int_nvvm_read_ptx_sreg_ntid_y>;
-def INT_PTX_SREG_NTID_Z : F_SREG<"mov.u32 \t$dst, %ntid.z;", Int32Regs,
- int_nvvm_read_ptx_sreg_ntid_z>;
-
-def INT_PTX_SREG_CTAID_X : F_SREG<"mov.u32 \t$dst, %ctaid.x;", Int32Regs,
- int_nvvm_read_ptx_sreg_ctaid_x>;
-def INT_PTX_SREG_CTAID_Y : F_SREG<"mov.u32 \t$dst, %ctaid.y;", Int32Regs,
- int_nvvm_read_ptx_sreg_ctaid_y>;
-def INT_PTX_SREG_CTAID_Z : F_SREG<"mov.u32 \t$dst, %ctaid.z;", Int32Regs,
- int_nvvm_read_ptx_sreg_ctaid_z>;
-
-def INT_PTX_SREG_NCTAID_X : F_SREG<"mov.u32 \t$dst, %nctaid.x;", Int32Regs,
- int_nvvm_read_ptx_sreg_nctaid_x>;
-def INT_PTX_SREG_NCTAID_Y : F_SREG<"mov.u32 \t$dst, %nctaid.y;", Int32Regs,
- int_nvvm_read_ptx_sreg_nctaid_y>;
-def INT_PTX_SREG_NCTAID_Z : F_SREG<"mov.u32 \t$dst, %nctaid.z;", Int32Regs,
- int_nvvm_read_ptx_sreg_nctaid_z>;
-
-def INT_PTX_SREG_WARPSIZE : F_SREG<"mov.u32 \t$dst, WARP_SZ;", Int32Regs,
- int_nvvm_read_ptx_sreg_warpsize>;
//-----------------------------------
// Support for ldu on sm_20 or later
//-----------------------------------
+// Don't annotate ldu instructions as mayLoad, as they load from memory that is
+// read-only in a kernel.
+
// Scalar
+
multiclass LDU_G<string TyStr, NVPTXRegClass regclass> {
def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src),
!strconcat("ldu.global.", TyStr),
@@ -1475,6 +1484,10 @@ defm INT_PTX_LDU_G_v4f32_ELE
// Support for ldg on sm_35 or later
//-----------------------------------
+// Don't annotate ld.global.nc as mayLoad, because these loads go through the
+// non-coherent texture cache, and therefore the values read must be read-only
+// during the lifetime of the kernel.
+
multiclass LDG_G<string TyStr, NVPTXRegClass regclass> {
def areg: NVPTXInst<(outs regclass:$result), (ins Int32Regs:$src),
!strconcat("ld.global.nc.", TyStr),
@@ -1836,54 +1849,61 @@ def : Pat<(int_nvvm_rotate_b32 Int32Regs:$src, Int32Regs:$amt),
(ROTL32reg_sw Int32Regs:$src, Int32Regs:$amt)>,
Requires<[noHWROT32]> ;
-def GET_LO_INT64
- : NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$src),
- !strconcat("{{\n\t",
- !strconcat(".reg .b32 %dummy;\n\t",
- !strconcat("mov.b64 \t{$dst,%dummy}, $src;\n\t",
- !strconcat("}}", "")))),
- []> ;
-
-def GET_HI_INT64
- : NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$src),
- !strconcat("{{\n\t",
- !strconcat(".reg .b32 %dummy;\n\t",
- !strconcat("mov.b64 \t{%dummy,$dst}, $src;\n\t",
- !strconcat("}}", "")))),
- []> ;
-
-def PACK_TWO_INT32
- : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$lo, Int32Regs:$hi),
- "mov.b64 \t$dst, {{$lo, $hi}};", []> ;
+let hasSideEffects = 0 in {
+ def GET_LO_INT64
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$src),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b32 %dummy;\n\t",
+ !strconcat("mov.b64 \t{$dst,%dummy}, $src;\n\t",
+ !strconcat("}}", "")))),
+ []> ;
+
+ def GET_HI_INT64
+ : NVPTXInst<(outs Int32Regs:$dst), (ins Int64Regs:$src),
+ !strconcat("{{\n\t",
+ !strconcat(".reg .b32 %dummy;\n\t",
+ !strconcat("mov.b64 \t{%dummy,$dst}, $src;\n\t",
+ !strconcat("}}", "")))),
+ []> ;
+}
+
+let hasSideEffects = 0 in {
+ def PACK_TWO_INT32
+ : NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$lo, Int32Regs:$hi),
+ "mov.b64 \t$dst, {{$lo, $hi}};", []> ;
+}
def : Pat<(int_nvvm_swap_lo_hi_b64 Int64Regs:$src),
(PACK_TWO_INT32 (GET_HI_INT64 Int64Regs:$src),
(GET_LO_INT64 Int64Regs:$src))> ;
-// funnel shift, requires >= sm_32
-def SHF_L_WRAP_B32_IMM
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
- "shf.l.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
- Requires<[hasHWROT32]>;
+// Funnel shift, requires >= sm_32. Does not trap if amt is out of range, so
+// no side effects.
+let hasSideEffects = 0 in {
+ def SHF_L_WRAP_B32_IMM
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
+ Requires<[hasHWROT32]>;
-def SHF_L_WRAP_B32_REG
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.l.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
- Requires<[hasHWROT32]>;
+ def SHF_L_WRAP_B32_REG
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
+ Requires<[hasHWROT32]>;
-def SHF_R_WRAP_B32_IMM
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
- "shf.r.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
- Requires<[hasHWROT32]>;
+ def SHF_R_WRAP_B32_IMM
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
+ "shf.r.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
+ Requires<[hasHWROT32]>;
-def SHF_R_WRAP_B32_REG
- : NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.r.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
- Requires<[hasHWROT32]>;
+ def SHF_R_WRAP_B32_REG
+ : NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.r.wrap.b32 \t$dst, $lo, $hi, $amt;",[]>,
+ Requires<[hasHWROT32]>;
+}
// HW version of rotate 64
def : Pat<(int_nvvm_rotate_b64 Int64Regs:$src, (i32 imm:$amt)),
@@ -6950,98 +6970,95 @@ def : Pat<(int_nvvm_sust_p_3d_v4i32_trap
Int32Regs:$x, Int32Regs:$y, Int32Regs:$z,
Int32Regs:$r, Int32Regs:$g, Int32Regs:$b, Int32Regs:$a)>;
+//-----------------------------------
+// Read Special Registers
+//-----------------------------------
-
-//===-- Old PTX Back-end Intrinsics ---------------------------------------===//
-
-// These intrinsics are handled to retain compatibility with the old backend.
-
-// PTX Special Purpose Register Accessor Intrinsics
-
-class PTX_READ_SPECIAL_REGISTER_R64<string regname, Intrinsic intop>
+class PTX_READ_SREG_R64<string regname, Intrinsic intop>
: NVPTXInst<(outs Int64Regs:$d), (ins),
!strconcat(!strconcat("mov.u64\t$d, %", regname), ";"),
[(set Int64Regs:$d, (intop))]>;
-class PTX_READ_SPECIAL_REGISTER_R32<string regname, Intrinsic intop>
+class PTX_READ_SREG_R32<string regname, Intrinsic intop>
: NVPTXInst<(outs Int32Regs:$d), (ins),
!strconcat(!strconcat("mov.u32\t$d, %", regname), ";"),
[(set Int32Regs:$d, (intop))]>;
// TODO Add read vector-version of special registers
-def PTX_READ_TID_X : PTX_READ_SPECIAL_REGISTER_R32<"tid.x",
- int_ptx_read_tid_x>;
-def PTX_READ_TID_Y : PTX_READ_SPECIAL_REGISTER_R32<"tid.y",
- int_ptx_read_tid_y>;
-def PTX_READ_TID_Z : PTX_READ_SPECIAL_REGISTER_R32<"tid.z",
- int_ptx_read_tid_z>;
-def PTX_READ_TID_W : PTX_READ_SPECIAL_REGISTER_R32<"tid.w",
- int_ptx_read_tid_w>;
-
-def PTX_READ_NTID_X : PTX_READ_SPECIAL_REGISTER_R32<"ntid.x",
- int_ptx_read_ntid_x>;
-def PTX_READ_NTID_Y : PTX_READ_SPECIAL_REGISTER_R32<"ntid.y",
- int_ptx_read_ntid_y>;
-def PTX_READ_NTID_Z : PTX_READ_SPECIAL_REGISTER_R32<"ntid.z",
- int_ptx_read_ntid_z>;
-def PTX_READ_NTID_W : PTX_READ_SPECIAL_REGISTER_R32<"ntid.w",
- int_ptx_read_ntid_w>;
-
-def PTX_READ_LANEID : PTX_READ_SPECIAL_REGISTER_R32<"laneid",
- int_ptx_read_laneid>;
-def PTX_READ_WARPID : PTX_READ_SPECIAL_REGISTER_R32<"warpid",
- int_ptx_read_warpid>;
-def PTX_READ_NWARPID : PTX_READ_SPECIAL_REGISTER_R32<"nwarpid",
- int_ptx_read_nwarpid>;
-
-def PTX_READ_CTAID_X : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.x",
- int_ptx_read_ctaid_x>;
-def PTX_READ_CTAID_Y : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.y",
- int_ptx_read_ctaid_y>;
-def PTX_READ_CTAID_Z : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.z",
- int_ptx_read_ctaid_z>;
-def PTX_READ_CTAID_W : PTX_READ_SPECIAL_REGISTER_R32<"ctaid.w",
- int_ptx_read_ctaid_w>;
-
-def PTX_READ_NCTAID_X : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.x",
- int_ptx_read_nctaid_x>;
-def PTX_READ_NCTAID_Y : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.y",
- int_ptx_read_nctaid_y>;
-def PTX_READ_NCTAID_Z : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.z",
- int_ptx_read_nctaid_z>;
-def PTX_READ_NCTAID_W : PTX_READ_SPECIAL_REGISTER_R32<"nctaid.w",
- int_ptx_read_nctaid_w>;
-
-def PTX_READ_SMID : PTX_READ_SPECIAL_REGISTER_R32<"smid",
- int_ptx_read_smid>;
-def PTX_READ_NSMID : PTX_READ_SPECIAL_REGISTER_R32<"nsmid",
- int_ptx_read_nsmid>;
-def PTX_READ_GRIDID : PTX_READ_SPECIAL_REGISTER_R32<"gridid",
- int_ptx_read_gridid>;
-
-def PTX_READ_LANEMASK_EQ
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_eq", int_ptx_read_lanemask_eq>;
-def PTX_READ_LANEMASK_LE
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_le", int_ptx_read_lanemask_le>;
-def PTX_READ_LANEMASK_LT
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_lt", int_ptx_read_lanemask_lt>;
-def PTX_READ_LANEMASK_GE
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_ge", int_ptx_read_lanemask_ge>;
-def PTX_READ_LANEMASK_GT
- : PTX_READ_SPECIAL_REGISTER_R32<"lanemask_gt", int_ptx_read_lanemask_gt>;
-
-def PTX_READ_CLOCK
- : PTX_READ_SPECIAL_REGISTER_R32<"clock", int_ptx_read_clock>;
-def PTX_READ_CLOCK64
- : PTX_READ_SPECIAL_REGISTER_R64<"clock64", int_ptx_read_clock64>;
-
-def PTX_READ_PM0 : PTX_READ_SPECIAL_REGISTER_R32<"pm0", int_ptx_read_pm0>;
-def PTX_READ_PM1 : PTX_READ_SPECIAL_REGISTER_R32<"pm1", int_ptx_read_pm1>;
-def PTX_READ_PM2 : PTX_READ_SPECIAL_REGISTER_R32<"pm2", int_ptx_read_pm2>;
-def PTX_READ_PM3 : PTX_READ_SPECIAL_REGISTER_R32<"pm3", int_ptx_read_pm3>;
-
-// PTX Parallel Synchronization and Communication Intrinsics
-
-def PTX_BAR_SYNC : NVPTXInst<(outs), (ins i32imm:$i), "bar.sync\t$i;",
- [(int_ptx_bar_sync imm:$i)]>;
+def INT_PTX_SREG_TID_X :
+ PTX_READ_SREG_R32<"tid.x", int_nvvm_read_ptx_sreg_tid_x>;
+def INT_PTX_SREG_TID_Y :
+ PTX_READ_SREG_R32<"tid.y", int_nvvm_read_ptx_sreg_tid_y>;
+def INT_PTX_SREG_TID_Z :
+ PTX_READ_SREG_R32<"tid.z", int_nvvm_read_ptx_sreg_tid_z>;
+def INT_PTX_SREG_TID_W :
+ PTX_READ_SREG_R32<"tid.w", int_nvvm_read_ptx_sreg_tid_w>;
+
+def INT_PTX_SREG_NTID_X :
+ PTX_READ_SREG_R32<"ntid.x", int_nvvm_read_ptx_sreg_ntid_x>;
+def INT_PTX_SREG_NTID_Y :
+ PTX_READ_SREG_R32<"ntid.y", int_nvvm_read_ptx_sreg_ntid_y>;
+def INT_PTX_SREG_NTID_Z :
+ PTX_READ_SREG_R32<"ntid.z", int_nvvm_read_ptx_sreg_ntid_z>;
+def INT_PTX_SREG_NTID_W :
+ PTX_READ_SREG_R32<"ntid.w", int_nvvm_read_ptx_sreg_ntid_w>;
+
+def INT_PTX_SREG_LANEID :
+ PTX_READ_SREG_R32<"laneid", int_nvvm_read_ptx_sreg_laneid>;
+def INT_PTX_SREG_WARPID :
+ PTX_READ_SREG_R32<"warpid", int_nvvm_read_ptx_sreg_warpid>;
+def INT_PTX_SREG_NWARPID :
+ PTX_READ_SREG_R32<"nwarpid", int_nvvm_read_ptx_sreg_nwarpid>;
+
+def INT_PTX_SREG_CTAID_X :
+ PTX_READ_SREG_R32<"ctaid.x", int_nvvm_read_ptx_sreg_ctaid_x>;
+def INT_PTX_SREG_CTAID_Y :
+ PTX_READ_SREG_R32<"ctaid.y", int_nvvm_read_ptx_sreg_ctaid_y>;
+def INT_PTX_SREG_CTAID_Z :
+ PTX_READ_SREG_R32<"ctaid.z", int_nvvm_read_ptx_sreg_ctaid_z>;
+def INT_PTX_SREG_CTAID_W :
+ PTX_READ_SREG_R32<"ctaid.w", int_nvvm_read_ptx_sreg_ctaid_w>;
+
+def INT_PTX_SREG_NCTAID_X :
+ PTX_READ_SREG_R32<"nctaid.x", int_nvvm_read_ptx_sreg_nctaid_x>;
+def INT_PTX_SREG_NCTAID_Y :
+ PTX_READ_SREG_R32<"nctaid.y", int_nvvm_read_ptx_sreg_nctaid_y>;
+def INT_PTX_SREG_NCTAID_Z :
+ PTX_READ_SREG_R32<"nctaid.z", int_nvvm_read_ptx_sreg_nctaid_z>;
+def INT_PTX_SREG_NCTAID_W :
+ PTX_READ_SREG_R32<"nctaid.w", int_nvvm_read_ptx_sreg_nctaid_w>;
+
+def INT_PTX_SREG_SMID :
+ PTX_READ_SREG_R32<"smid", int_nvvm_read_ptx_sreg_smid>;
+def INT_PTX_SREG_NSMID :
+ PTX_READ_SREG_R32<"nsmid", int_nvvm_read_ptx_sreg_nsmid>;
+def INT_PTX_SREG_GRIDID :
+ PTX_READ_SREG_R32<"gridid", int_nvvm_read_ptx_sreg_gridid>;
+
+def INT_PTX_SREG_LANEMASK_EQ :
+ PTX_READ_SREG_R32<"lanemask_eq", int_nvvm_read_ptx_sreg_lanemask_eq>;
+def INT_PTX_SREG_LANEMASK_LE :
+ PTX_READ_SREG_R32<"lanemask_le", int_nvvm_read_ptx_sreg_lanemask_le>;
+def INT_PTX_SREG_LANEMASK_LT :
+ PTX_READ_SREG_R32<"lanemask_lt", int_nvvm_read_ptx_sreg_lanemask_lt>;
+def INT_PTX_SREG_LANEMASK_GE :
+ PTX_READ_SREG_R32<"lanemask_ge", int_nvvm_read_ptx_sreg_lanemask_ge>;
+def INT_PTX_SREG_LANEMASK_GT :
+ PTX_READ_SREG_R32<"lanemask_gt", int_nvvm_read_ptx_sreg_lanemask_gt>;
+
+def INT_PTX_SREG_CLOCK :
+ PTX_READ_SREG_R32<"clock", int_nvvm_read_ptx_sreg_clock>;
+def INT_PTX_SREG_CLOCK64 :
+ PTX_READ_SREG_R64<"clock64", int_nvvm_read_ptx_sreg_clock64>;
+
+def INT_PTX_SREG_PM0 : PTX_READ_SREG_R32<"pm0", int_nvvm_read_ptx_sreg_pm0>;
+def INT_PTX_SREG_PM1 : PTX_READ_SREG_R32<"pm1", int_nvvm_read_ptx_sreg_pm1>;
+def INT_PTX_SREG_PM2 : PTX_READ_SREG_R32<"pm2", int_nvvm_read_ptx_sreg_pm2>;
+def INT_PTX_SREG_PM3 : PTX_READ_SREG_R32<"pm3", int_nvvm_read_ptx_sreg_pm3>;
+
+// TODO: It would be nice to use PTX_READ_SREG here, but it doesn't
+// handle the constant.
+def INT_PTX_SREG_WARPSIZE :
+ NVPTXInst<(outs Int32Regs:$dst), (ins), "mov.u32 \t$dst, WARP_SZ;",
+ [(set Int32Regs:$dst, (int_nvvm_read_ptx_sreg_warpsize))]>;
diff --git a/lib/Target/NVPTX/NVPTXLowerAlloca.cpp b/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
index 624052e9b981..fa1a3ef3fe24 100644
--- a/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
+++ b/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
@@ -62,6 +62,9 @@ INITIALIZE_PASS(NVPTXLowerAlloca, "nvptx-lower-alloca",
// Main function for this pass.
// =============================================================================
bool NVPTXLowerAlloca::runOnBasicBlock(BasicBlock &BB) {
+ if (skipBasicBlock(BB))
+ return false;
+
bool Changed = false;
for (auto &I : BB) {
if (auto allocaInst = dyn_cast<AllocaInst>(&I)) {
diff --git a/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp b/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp
index 6656077348a1..d162a283f745 100644
--- a/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp
+++ b/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp
@@ -128,7 +128,7 @@ INITIALIZE_PASS(NVPTXLowerKernelArgs, "nvptx-lower-kernel-args",
"Lower kernel arguments (NVPTX)", false, false)
// =============================================================================
-// If the function had a byval struct ptr arg, say foo(%struct.x *byval %d),
+// If the function had a byval struct ptr arg, say foo(%struct.x* byval %d),
// then add the following instructions to the first basic block:
//
// %temp = alloca %struct.x, align 8
diff --git a/lib/Target/NVPTX/NVPTXMCExpr.cpp b/lib/Target/NVPTX/NVPTXMCExpr.cpp
index 3c98b9febf85..84d5239ec096 100644
--- a/lib/Target/NVPTX/NVPTXMCExpr.cpp
+++ b/lib/Target/NVPTX/NVPTXMCExpr.cpp
@@ -15,8 +15,8 @@ using namespace llvm;
#define DEBUG_TYPE "nvptx-mcexpr"
-const NVPTXFloatMCExpr*
-NVPTXFloatMCExpr::create(VariantKind Kind, APFloat Flt, MCContext &Ctx) {
+const NVPTXFloatMCExpr *
+NVPTXFloatMCExpr::create(VariantKind Kind, const APFloat &Flt, MCContext &Ctx) {
return new (Ctx) NVPTXFloatMCExpr(Kind, Flt);
}
diff --git a/lib/Target/NVPTX/NVPTXMCExpr.h b/lib/Target/NVPTX/NVPTXMCExpr.h
index 81a606d7535c..7f833c42fa8f 100644
--- a/lib/Target/NVPTX/NVPTXMCExpr.h
+++ b/lib/Target/NVPTX/NVPTXMCExpr.h
@@ -14,6 +14,7 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/MC/MCExpr.h"
+#include <utility>
namespace llvm {
@@ -30,21 +31,21 @@ private:
const APFloat Flt;
explicit NVPTXFloatMCExpr(VariantKind Kind, APFloat Flt)
- : Kind(Kind), Flt(Flt) {}
+ : Kind(Kind), Flt(std::move(Flt)) {}
public:
/// @name Construction
/// @{
- static const NVPTXFloatMCExpr *create(VariantKind Kind, APFloat Flt,
+ static const NVPTXFloatMCExpr *create(VariantKind Kind, const APFloat &Flt,
MCContext &Ctx);
- static const NVPTXFloatMCExpr *createConstantFPSingle(APFloat Flt,
+ static const NVPTXFloatMCExpr *createConstantFPSingle(const APFloat &Flt,
MCContext &Ctx) {
return create(VK_NVPTX_SINGLE_PREC_FLOAT, Flt, Ctx);
}
- static const NVPTXFloatMCExpr *createConstantFPDouble(APFloat Flt,
+ static const NVPTXFloatMCExpr *createConstantFPDouble(const APFloat &Flt,
MCContext &Ctx) {
return create(VK_NVPTX_DOUBLE_PREC_FLOAT, Flt, Ctx);
}
diff --git a/lib/Target/NVPTX/NVPTXPeephole.cpp b/lib/Target/NVPTX/NVPTXPeephole.cpp
index a61c291d233f..7d0cd553e03f 100644
--- a/lib/Target/NVPTX/NVPTXPeephole.cpp
+++ b/lib/Target/NVPTX/NVPTXPeephole.cpp
@@ -125,6 +125,9 @@ static void CombineCVTAToLocal(MachineInstr &Root) {
}
bool NVPTXPeephole::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
bool Changed = false;
// Loop over all of the basic blocks.
for (auto &MBB : MF) {
diff --git a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
index 17019d7b364d..029e0097c5dc 100644
--- a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
+++ b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
@@ -55,11 +55,10 @@ bool NVPTXPrologEpilogPass::runOnMachineFunction(MachineFunction &MF) {
calculateFrameObjectOffsets(MF);
- for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB) {
- for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
- MachineInstr *MI = I;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- if (!MI->getOperand(i).isFI())
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ if (!MI.getOperand(i).isFI())
continue;
TRI.eliminateFrameIndex(MI, 0, i, nullptr);
Modified = true;
diff --git a/lib/Target/NVPTX/NVPTXSection.h b/lib/Target/NVPTX/NVPTXSection.h
index 45a7309479ee..cad4f5668fdf 100644
--- a/lib/Target/NVPTX/NVPTXSection.h
+++ b/lib/Target/NVPTX/NVPTXSection.h
@@ -16,7 +16,6 @@
#include "llvm/IR/GlobalVariable.h"
#include "llvm/MC/MCSection.h"
-#include <vector>
namespace llvm {
/// Represents a section in PTX PTX does not have sections. We create this class
diff --git a/lib/Target/NVPTX/NVPTXSubtarget.h b/lib/Target/NVPTX/NVPTXSubtarget.h
index c7287719be5f..41670390c41b 100644
--- a/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -19,8 +19,8 @@
#include "NVPTXISelLowering.h"
#include "NVPTXInstrInfo.h"
#include "NVPTXRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -42,7 +42,7 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
const NVPTXTargetMachine &TM;
NVPTXInstrInfo InstrInfo;
NVPTXTargetLowering TLInfo;
- TargetSelectionDAGInfo TSInfo;
+ SelectionDAGTargetInfo TSInfo;
// NVPTX does not have any call stack frame, but need a NVPTX specific
// FrameLowering class because TargetFrameLowering is abstract.
@@ -65,7 +65,7 @@ public:
const NVPTXTargetLowering *getTargetLowering() const override {
return &TLInfo;
}
- const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index aa931b134da9..b9f5919964c7 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -23,6 +23,7 @@
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
@@ -44,15 +45,23 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Scalar/GVN.h"
using namespace llvm;
+static cl::opt<bool> UseInferAddressSpaces(
+ "nvptx-use-infer-addrspace", cl::init(false), cl::Hidden,
+ cl::desc("Optimize address spaces using NVPTXInferAddressSpaces instead of "
+ "NVPTXFavorNonGenericAddrSpaces"));
+
namespace llvm {
+void initializeNVVMIntrRangePass(PassRegistry&);
void initializeNVVMReflectPass(PassRegistry&);
void initializeGenericToNVVMPass(PassRegistry&);
void initializeNVPTXAllocaHoistingPass(PassRegistry &);
void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&);
void initializeNVPTXFavorNonGenericAddrSpacesPass(PassRegistry &);
+void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
void initializeNVPTXLowerKernelArgsPass(PassRegistry &);
void initializeNVPTXLowerAllocaPass(PassRegistry &);
@@ -67,10 +76,12 @@ extern "C" void LLVMInitializeNVPTXTarget() {
// but it's very NVPTX-specific.
PassRegistry &PR = *PassRegistry::getPassRegistry();
initializeNVVMReflectPass(PR);
+ initializeNVVMIntrRangePass(PR);
initializeGenericToNVVMPass(PR);
initializeNVPTXAllocaHoistingPass(PR);
initializeNVPTXAssignValidGlobalNamesPass(PR);
initializeNVPTXFavorNonGenericAddrSpacesPass(PR);
+ initializeNVPTXInferAddressSpacesPass(PR);
initializeNVPTXLowerKernelArgsPass(PR);
initializeNVPTXLowerAllocaPass(PR);
initializeNVPTXLowerAggrCopiesPass(PR);
@@ -90,11 +101,15 @@ static std::string computeDataLayout(bool is64Bit) {
NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL, bool is64bit)
- : LLVMTargetMachine(T, computeDataLayout(is64bit), TT, CPU, FS, Options, RM,
- CM, OL),
- is64bit(is64bit), TLOF(make_unique<NVPTXTargetObjectFile>()),
+ // The pic relocation model is used regardless of what the client has
+ // specified, as it is the only relocation model currently supported.
+ : LLVMTargetMachine(T, computeDataLayout(is64bit), TT, CPU, FS, Options,
+ Reloc::PIC_, CM, OL),
+ is64bit(is64bit),
+ TLOF(make_unique<NVPTXTargetObjectFile>()),
Subtarget(TT, CPU, FS, *this) {
if (TT.getOS() == Triple::NVCL)
drvInterface = NVPTX::NVCL;
@@ -110,7 +125,8 @@ void NVPTXTargetMachine32::anchor() {}
NVPTXTargetMachine32::NVPTXTargetMachine32(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
@@ -119,7 +135,8 @@ void NVPTXTargetMachine64::anchor() {}
NVPTXTargetMachine64::NVPTXTargetMachine64(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
@@ -143,14 +160,25 @@ public:
void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
private:
- // if the opt level is aggressive, add GVN; otherwise, add EarlyCSE.
+ // If the opt level is aggressive, add GVN; otherwise, add EarlyCSE. This
+ // function is only called in opt mode.
void addEarlyCSEOrGVNPass();
+
+ // Add passes that propagate special memory spaces.
+ void addAddressSpaceInferencePasses();
+
+ // Add passes that perform straight-line scalar optimizations.
+ void addStraightLineScalarOptimizationPasses();
};
} // end anonymous namespace
TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) {
- NVPTXPassConfig *PassConfig = new NVPTXPassConfig(this, PM);
- return PassConfig;
+ return new NVPTXPassConfig(this, PM);
+}
+
+void NVPTXTargetMachine::addEarlyAsPossiblePasses(PassManagerBase &PM) {
+ PM.add(createNVVMReflectPass());
+ PM.add(createNVVMIntrRangePass(Subtarget.getSmVersion()));
}
TargetIRAnalysis NVPTXTargetMachine::getTargetIRAnalysis() {
@@ -166,34 +194,23 @@ void NVPTXPassConfig::addEarlyCSEOrGVNPass() {
addPass(createEarlyCSEPass());
}
-void NVPTXPassConfig::addIRPasses() {
- // The following passes are known to not play well with virtual regs hanging
- // around after register allocation (which in our case, is *all* registers).
- // We explicitly disable them here. We do, however, need some functionality
- // of the PrologEpilogCodeInserter pass, so we emulate that behavior in the
- // NVPTXPrologEpilog pass (see NVPTXPrologEpilogPass.cpp).
- disablePass(&PrologEpilogCodeInserterID);
- disablePass(&MachineCopyPropagationID);
- disablePass(&TailDuplicateID);
-
- addPass(createNVVMReflectPass());
- addPass(createNVPTXImageOptimizerPass());
- addPass(createNVPTXAssignValidGlobalNamesPass());
- addPass(createGenericToNVVMPass());
-
- // === Propagate special address spaces ===
- addPass(createNVPTXLowerKernelArgsPass(&getNVPTXTargetMachine()));
+void NVPTXPassConfig::addAddressSpaceInferencePasses() {
// NVPTXLowerKernelArgs emits alloca for byval parameters which can often
// be eliminated by SROA.
addPass(createSROAPass());
addPass(createNVPTXLowerAllocaPass());
- addPass(createNVPTXFavorNonGenericAddrSpacesPass());
- // FavorNonGenericAddrSpaces shortcuts unnecessary addrspacecasts, and leave
- // them unused. We could remove dead code in an ad-hoc manner, but that
- // requires manual work and might be error-prone.
- addPass(createDeadCodeEliminationPass());
+ if (UseInferAddressSpaces) {
+ addPass(createNVPTXInferAddressSpacesPass());
+ } else {
+ addPass(createNVPTXFavorNonGenericAddrSpacesPass());
+ // FavorNonGenericAddrSpaces shortcuts unnecessary addrspacecasts, and leave
+ // them unused. We could remove dead code in an ad-hoc manner, but that
+ // requires manual work and might be error-prone.
+ addPass(createDeadCodeEliminationPass());
+ }
+}
- // === Straight-line scalar optimizations ===
+void NVPTXPassConfig::addStraightLineScalarOptimizationPasses() {
addPass(createSeparateConstOffsetFromGEPPass());
addPass(createSpeculativeExecutionPass());
// ReassociateGEPs exposes more opportunites for SLSR. See
@@ -208,6 +225,41 @@ void NVPTXPassConfig::addIRPasses() {
// NaryReassociate on GEPs creates redundant common expressions, so run
// EarlyCSE after it.
addPass(createEarlyCSEPass());
+}
+
+void NVPTXPassConfig::addIRPasses() {
+ // The following passes are known to not play well with virtual regs hanging
+ // around after register allocation (which in our case, is *all* registers).
+ // We explicitly disable them here. We do, however, need some functionality
+ // of the PrologEpilogCodeInserter pass, so we emulate that behavior in the
+ // NVPTXPrologEpilog pass (see NVPTXPrologEpilogPass.cpp).
+ disablePass(&PrologEpilogCodeInserterID);
+ disablePass(&MachineCopyPropagationID);
+ disablePass(&TailDuplicateID);
+ disablePass(&StackMapLivenessID);
+ disablePass(&LiveDebugValuesID);
+ disablePass(&PostRASchedulerID);
+ disablePass(&FuncletLayoutID);
+ disablePass(&PatchableFunctionID);
+
+ // NVVMReflectPass is added in addEarlyAsPossiblePasses, so hopefully running
+ // it here does nothing. But since we need it for correctness when lowering
+ // to NVPTX, run it here too, in case whoever built our pass pipeline didn't
+ // call addEarlyAsPossiblePasses.
+ addPass(createNVVMReflectPass());
+
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createNVPTXImageOptimizerPass());
+ addPass(createNVPTXAssignValidGlobalNamesPass());
+ addPass(createGenericToNVVMPass());
+
+ // NVPTXLowerKernelArgs is required for correctness and should be run right
+ // before the address space inference passes.
+ addPass(createNVPTXLowerKernelArgsPass(&getNVPTXTargetMachine()));
+ if (getOptLevel() != CodeGenOpt::None) {
+ addAddressSpaceInferencePasses();
+ addStraightLineScalarOptimizationPasses();
+ }
// === LSR and other generic IR passes ===
TargetPassConfig::addIRPasses();
@@ -223,7 +275,8 @@ void NVPTXPassConfig::addIRPasses() {
// %1 = shl %a, 2
//
// but EarlyCSE can do neither of them.
- addEarlyCSEOrGVNPass();
+ if (getOptLevel() != CodeGenOpt::None)
+ addEarlyCSEOrGVNPass();
}
bool NVPTXPassConfig::addInstSelector() {
@@ -241,10 +294,12 @@ bool NVPTXPassConfig::addInstSelector() {
void NVPTXPassConfig::addPostRegAlloc() {
addPass(createNVPTXPrologEpilogPass(), false);
- // NVPTXPrologEpilogPass calculates frame object offset and replace frame
- // index with VRFrame register. NVPTXPeephole need to be run after that and
- // will replace VRFrame with VRFrameLocal when possible.
- addPass(createNVPTXPeephole());
+ if (getOptLevel() != CodeGenOpt::None) {
+ // NVPTXPrologEpilogPass calculates frame object offset and replace frame
+ // index with VRFrame register. NVPTXPeephole need to be run after that and
+ // will replace VRFrame with VRFrameLocal when possible.
+ addPass(createNVPTXPeephole());
+ }
}
FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) {
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.h b/lib/Target/NVPTX/NVPTXTargetMachine.h
index da7f62bf9d9b..78a053831772 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -16,9 +16,9 @@
#include "ManagedStringPool.h"
#include "NVPTXSubtarget.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
namespace llvm {
@@ -36,8 +36,8 @@ class NVPTXTargetMachine : public LLVMTargetMachine {
public:
NVPTXTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OP,
- bool is64bit);
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OP, bool is64bit);
~NVPTXTargetMachine() override;
const NVPTXSubtarget *getSubtargetImpl(const Function &) const override {
@@ -61,6 +61,7 @@ public:
return TLOF.get();
}
+ void addEarlyAsPossiblePasses(PassManagerBase &PM) override;
TargetIRAnalysis getTargetIRAnalysis() override;
}; // NVPTXTargetMachine.
@@ -70,7 +71,7 @@ class NVPTXTargetMachine32 : public NVPTXTargetMachine {
public:
NVPTXTargetMachine32(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
@@ -79,7 +80,7 @@ class NVPTXTargetMachine64 : public NVPTXTargetMachine {
public:
NVPTXTargetMachine64(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
diff --git a/lib/Target/NVPTX/NVPTXTargetObjectFile.h b/lib/Target/NVPTX/NVPTXTargetObjectFile.h
index 683b9a3f49f7..045fbb75a2a0 100644
--- a/lib/Target/NVPTX/NVPTXTargetObjectFile.h
+++ b/lib/Target/NVPTX/NVPTXTargetObjectFile.h
@@ -12,7 +12,6 @@
#include "NVPTXSection.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
-#include <string>
namespace llvm {
class GlobalVariable;
@@ -87,7 +86,8 @@ public:
}
MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
- const Constant *C) const override {
+ const Constant *C,
+ unsigned &Align) const override {
return ReadOnlySection;
}
diff --git a/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index 6e679dd0257c..580d345cc663 100644
--- a/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -32,7 +32,7 @@ static bool readsThreadIndex(const IntrinsicInst *II) {
}
static bool readsLaneId(const IntrinsicInst *II) {
- return II->getIntrinsicID() == Intrinsic::ptx_read_laneid;
+ return II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_laneid;
}
// Whether the given intrinsic is an atomic instruction in PTX.
diff --git a/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index 0946a3293eec..08ffdf191151 100644
--- a/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -52,6 +52,10 @@ public:
bool isSourceOfDivergence(const Value *V);
+ // Increase the inlining cost threshold by a factor of 5, reflecting that
+ // calls are particularly expensive in NVPTX.
+ unsigned getInliningThresholdMultiplier() { return 5; }
+
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
diff --git a/lib/Target/NVPTX/NVPTXUtilities.cpp b/lib/Target/NVPTX/NVPTXUtilities.cpp
index 578b466568ae..835e4b442039 100644
--- a/lib/Target/NVPTX/NVPTXUtilities.cpp
+++ b/lib/Target/NVPTX/NVPTXUtilities.cpp
@@ -99,7 +99,7 @@ static void cacheAnnotationFromMD(const Module *m, const GlobalValue *gv) {
}
}
-bool llvm::findOneNVVMAnnotation(const GlobalValue *gv, std::string prop,
+bool llvm::findOneNVVMAnnotation(const GlobalValue *gv, const std::string &prop,
unsigned &retval) {
MutexGuard Guard(Lock);
const Module *m = gv->getParent();
@@ -113,7 +113,7 @@ bool llvm::findOneNVVMAnnotation(const GlobalValue *gv, std::string prop,
return true;
}
-bool llvm::findAllNVVMAnnotation(const GlobalValue *gv, std::string prop,
+bool llvm::findAllNVVMAnnotation(const GlobalValue *gv, const std::string &prop,
std::vector<unsigned> &retval) {
MutexGuard Guard(Lock);
const Module *m = gv->getParent();
diff --git a/lib/Target/NVPTX/NVPTXUtilities.h b/lib/Target/NVPTX/NVPTXUtilities.h
index a5262cb7412f..ec5bfc17afc7 100644
--- a/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/lib/Target/NVPTX/NVPTXUtilities.h
@@ -30,8 +30,9 @@ namespace llvm {
void clearAnnotationCache(const llvm::Module *);
-bool findOneNVVMAnnotation(const llvm::GlobalValue *, std::string, unsigned &);
-bool findAllNVVMAnnotation(const llvm::GlobalValue *, std::string,
+bool findOneNVVMAnnotation(const llvm::GlobalValue *, const std::string &,
+ unsigned &);
+bool findAllNVVMAnnotation(const llvm::GlobalValue *, const std::string &,
std::vector<unsigned> &);
bool isTexture(const llvm::Value &);
diff --git a/lib/Target/NVPTX/NVVMIntrRange.cpp b/lib/Target/NVPTX/NVVMIntrRange.cpp
new file mode 100644
index 000000000000..b9c02c431141
--- /dev/null
+++ b/lib/Target/NVPTX/NVVMIntrRange.cpp
@@ -0,0 +1,148 @@
+//===- NVVMIntrRange.cpp - Set !range metadata for NVVM intrinsics --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass adds appropriate !range metadata for calls to NVVM
+// intrinsics that return a limited range of values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Instructions.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "nvvm-intr-range"
+
+namespace llvm { void initializeNVVMIntrRangePass(PassRegistry &); }
+
+// Add !range metadata based on limits of given SM variant.
+static cl::opt<unsigned> NVVMIntrRangeSM("nvvm-intr-range-sm", cl::init(20),
+ cl::Hidden, cl::desc("SM variant"));
+
+namespace {
+class NVVMIntrRange : public FunctionPass {
+ private:
+ struct {
+ unsigned x, y, z;
+ } MaxBlockSize, MaxGridSize;
+
+ public:
+ static char ID;
+ NVVMIntrRange() : NVVMIntrRange(NVVMIntrRangeSM) {}
+ NVVMIntrRange(unsigned int SmVersion) : FunctionPass(ID) {
+ MaxBlockSize.x = 1024;
+ MaxBlockSize.y = 1024;
+ MaxBlockSize.z = 64;
+
+ MaxGridSize.x = SmVersion >= 30 ? 0x7fffffff : 0xffff;
+ MaxGridSize.y = 0xffff;
+ MaxGridSize.z = 0xffff;
+
+ initializeNVVMIntrRangePass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &) override;
+};
+}
+
+FunctionPass *llvm::createNVVMIntrRangePass(unsigned int SmVersion) {
+ return new NVVMIntrRange(SmVersion);
+}
+
+char NVVMIntrRange::ID = 0;
+INITIALIZE_PASS(NVVMIntrRange, "nvvm-intr-range",
+ "Add !range metadata to NVVM intrinsics.", false, false)
+
+// Adds the passed-in [Low,High) range information as metadata to the
+// passed-in call instruction.
+static bool addRangeMetadata(uint64_t Low, uint64_t High, CallInst *C) {
+ LLVMContext &Context = C->getParent()->getContext();
+ IntegerType *Int32Ty = Type::getInt32Ty(Context);
+ Metadata *LowAndHigh[] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Low)),
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, High))};
+ C->setMetadata(LLVMContext::MD_range, MDNode::get(Context, LowAndHigh));
+ return true;
+}
+
+bool NVVMIntrRange::runOnFunction(Function &F) {
+ // Go through the calls in this function.
+ bool Changed = false;
+ for (Instruction &I : instructions(F)) {
+ CallInst *Call = dyn_cast<CallInst>(&I);
+ if (!Call)
+ continue;
+
+ if (Function *Callee = Call->getCalledFunction()) {
+ switch (Callee->getIntrinsicID()) {
+ // Index within block
+ case Intrinsic::nvvm_read_ptx_sreg_tid_x:
+ Changed |= addRangeMetadata(0, MaxBlockSize.x, Call);
+ break;
+ case Intrinsic::nvvm_read_ptx_sreg_tid_y:
+ Changed |= addRangeMetadata(0, MaxBlockSize.y, Call);
+ break;
+ case Intrinsic::nvvm_read_ptx_sreg_tid_z:
+ Changed |= addRangeMetadata(0, MaxBlockSize.z, Call);
+ break;
+
+ // Block size
+ case Intrinsic::nvvm_read_ptx_sreg_ntid_x:
+ Changed |= addRangeMetadata(1, MaxBlockSize.x+1, Call);
+ break;
+ case Intrinsic::nvvm_read_ptx_sreg_ntid_y:
+ Changed |= addRangeMetadata(1, MaxBlockSize.y+1, Call);
+ break;
+ case Intrinsic::nvvm_read_ptx_sreg_ntid_z:
+ Changed |= addRangeMetadata(1, MaxBlockSize.z+1, Call);
+ break;
+
+ // Index within grid
+ case Intrinsic::nvvm_read_ptx_sreg_ctaid_x:
+ Changed |= addRangeMetadata(0, MaxGridSize.x, Call);
+ break;
+ case Intrinsic::nvvm_read_ptx_sreg_ctaid_y:
+ Changed |= addRangeMetadata(0, MaxGridSize.y, Call);
+ break;
+ case Intrinsic::nvvm_read_ptx_sreg_ctaid_z:
+ Changed |= addRangeMetadata(0, MaxGridSize.z, Call);
+ break;
+
+ // Grid size
+ case Intrinsic::nvvm_read_ptx_sreg_nctaid_x:
+ Changed |= addRangeMetadata(1, MaxGridSize.x+1, Call);
+ break;
+ case Intrinsic::nvvm_read_ptx_sreg_nctaid_y:
+ Changed |= addRangeMetadata(1, MaxGridSize.y+1, Call);
+ break;
+ case Intrinsic::nvvm_read_ptx_sreg_nctaid_z:
+ Changed |= addRangeMetadata(1, MaxGridSize.z+1, Call);
+ break;
+
+ // warp size is constant 32.
+ case Intrinsic::nvvm_read_ptx_sreg_warpsize:
+ Changed |= addRangeMetadata(32, 32+1, Call);
+ break;
+
+ // Lane ID is [0..warpsize)
+ case Intrinsic::nvvm_read_ptx_sreg_laneid:
+ Changed |= addRangeMetadata(0, 32, Call);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/lib/Target/NVPTX/NVVMReflect.cpp b/lib/Target/NVPTX/NVVMReflect.cpp
index 20ab5db584d2..e0c35e7039e5 100644
--- a/lib/Target/NVPTX/NVVMReflect.cpp
+++ b/lib/Target/NVPTX/NVVMReflect.cpp
@@ -7,20 +7,26 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass replaces occurrences of __nvvm_reflect("string") with an
-// integer based on -nvvm-reflect-list string=<int> option given to this pass.
-// If an undefined string value is seen in a call to __nvvm_reflect("string"),
-// a default value of 0 will be used.
+// This pass replaces occurrences of __nvvm_reflect("foo") and llvm.nvvm.reflect
+// with an integer.
+//
+// We choose the value we use by looking, in this order, at:
+//
+// * the -nvvm-reflect-list flag, which has the format "foo=1,bar=42",
+// * the StringMap passed to the pass's constructor, and
+// * metadata in the module itself.
+//
+// If we see an unknown string, we replace its call with 0.
//
//===----------------------------------------------------------------------===//
#include "NVPTX.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
@@ -31,11 +37,8 @@
#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
-#include <map>
#include <sstream>
#include <string>
-#include <vector>
-
#define NVVM_REFLECT_FUNCTION "__nvvm_reflect"
using namespace llvm;
@@ -45,31 +48,21 @@ using namespace llvm;
namespace llvm { void initializeNVVMReflectPass(PassRegistry &); }
namespace {
-class NVVMReflect : public ModulePass {
+class NVVMReflect : public FunctionPass {
private:
StringMap<int> VarMap;
- typedef DenseMap<std::string, int>::iterator VarMapIter;
public:
static char ID;
- NVVMReflect() : ModulePass(ID) {
- initializeNVVMReflectPass(*PassRegistry::getPassRegistry());
- VarMap.clear();
- }
+ NVVMReflect() : NVVMReflect(StringMap<int>()) {}
NVVMReflect(const StringMap<int> &Mapping)
- : ModulePass(ID) {
+ : FunctionPass(ID), VarMap(Mapping) {
initializeNVVMReflectPass(*PassRegistry::getPassRegistry());
- for (StringMap<int>::const_iterator I = Mapping.begin(), E = Mapping.end();
- I != E; ++I) {
- VarMap[(*I).getKey()] = (*I).getValue();
- }
+ setVarMap();
}
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesAll();
- }
- bool runOnModule(Module &) override;
+ bool runOnFunction(Function &) override;
private:
bool handleFunction(Function *ReflectFunction);
@@ -77,11 +70,8 @@ private:
};
}
-ModulePass *llvm::createNVVMReflectPass() {
- return new NVVMReflect();
-}
-
-ModulePass *llvm::createNVVMReflectPass(const StringMap<int>& Mapping) {
+FunctionPass *llvm::createNVVMReflectPass() { return new NVVMReflect(); }
+FunctionPass *llvm::createNVVMReflectPass(const StringMap<int> &Mapping) {
return new NVVMReflect(Mapping);
}
@@ -123,30 +113,35 @@ void NVVMReflect::setVarMap() {
}
}
-bool NVVMReflect::handleFunction(Function *ReflectFunction) {
- // Validate _reflect function
- assert(ReflectFunction->isDeclaration() &&
- "_reflect function should not have a body");
- assert(ReflectFunction->getReturnType()->isIntegerTy() &&
- "_reflect's return type should be integer");
+bool NVVMReflect::runOnFunction(Function &F) {
+ if (!NVVMReflectEnabled)
+ return false;
+
+ if (F.getName() == NVVM_REFLECT_FUNCTION) {
+ assert(F.isDeclaration() && "_reflect function should not have a body");
+ assert(F.getReturnType()->isIntegerTy() &&
+ "_reflect's return type should be integer");
+ return false;
+ }
- std::vector<Instruction *> ToRemove;
+ SmallVector<Instruction *, 4> ToRemove;
- // Go through the uses of ReflectFunction in this Function.
- // Each of them should a CallInst with a ConstantArray argument.
- // First validate that. If the c-string corresponding to the
- // ConstantArray can be found successfully, see if it can be
- // found in VarMap. If so, replace the uses of CallInst with the
- // value found in VarMap. If not, replace the use with value 0.
+ // Go through the calls in this function. Each call to __nvvm_reflect or
+ // llvm.nvvm.reflect should be a CallInst with a ConstantArray argument.
+ // First validate that. If the c-string corresponding to the ConstantArray can
+ // be found successfully, see if it can be found in VarMap. If so, replace the
+ // uses of CallInst with the value found in VarMap. If not, replace the use
+ // with value 0.
- // IR for __nvvm_reflect calls differs between CUDA versions:
+ // The IR for __nvvm_reflect calls differs between CUDA versions.
+ //
// CUDA 6.5 and earlier uses this sequence:
// %ptr = tail call i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8
// (i8 addrspace(4)* getelementptr inbounds
// ([8 x i8], [8 x i8] addrspace(4)* @str, i32 0, i32 0))
// %reflect = tail call i32 @__nvvm_reflect(i8* %ptr)
//
- // Value returned by Sym->getOperand(0) is a Constant with a
+ // The value returned by Sym->getOperand(0) is a Constant with a
// ConstantDataSequential operand which can be converted to string and used
// for lookup.
//
@@ -157,31 +152,37 @@ bool NVVMReflect::handleFunction(Function *ReflectFunction) {
//
// In this case, we get a Constant with a GlobalVariable operand and we need
// to dig deeper to find its initializer with the string we'll use for lookup.
-
- for (User *U : ReflectFunction->users()) {
- assert(isa<CallInst>(U) && "Only a call instruction can use _reflect");
- CallInst *Reflect = cast<CallInst>(U);
-
- assert((Reflect->getNumOperands() == 2) &&
- "Only one operand expect for _reflect function");
- // In cuda, we will have an extra constant-to-generic conversion of
- // the string.
- const Value *Str = Reflect->getArgOperand(0);
- if (isa<CallInst>(Str)) {
- // CUDA path
- const CallInst *ConvCall = cast<CallInst>(Str);
+ for (Instruction &I : instructions(F)) {
+ CallInst *Call = dyn_cast<CallInst>(&I);
+ if (!Call)
+ continue;
+ Function *Callee = Call->getCalledFunction();
+ if (!Callee || (Callee->getName() != NVVM_REFLECT_FUNCTION &&
+ Callee->getIntrinsicID() != Intrinsic::nvvm_reflect))
+ continue;
+
+ // FIXME: Improve error handling here and elsewhere in this pass.
+ assert(Call->getNumOperands() == 2 &&
+ "Wrong number of operands to __nvvm_reflect function");
+
+ // In cuda 6.5 and earlier, we will have an extra constant-to-generic
+ // conversion of the string.
+ const Value *Str = Call->getArgOperand(0);
+ if (const CallInst *ConvCall = dyn_cast<CallInst>(Str)) {
+ // FIXME: Add assertions about ConvCall.
Str = ConvCall->getArgOperand(0);
}
assert(isa<ConstantExpr>(Str) &&
- "Format of _reflect function not recognized");
+ "Format of __nvvm__reflect function not recognized");
const ConstantExpr *GEP = cast<ConstantExpr>(Str);
const Value *Sym = GEP->getOperand(0);
- assert(isa<Constant>(Sym) && "Format of _reflect function not recognized");
+ assert(isa<Constant>(Sym) &&
+ "Format of __nvvm_reflect function not recognized");
const Value *Operand = cast<Constant>(Sym)->getOperand(0);
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand)) {
- // For CUDA-7.0 style __nvvm_reflect calls we need to find operand's
+ // For CUDA-7.0 style __nvvm_reflect calls, we need to find the operand's
// initializer.
assert(GV->hasInitializer() &&
"Format of _reflect function not recognized");
@@ -194,57 +195,26 @@ bool NVVMReflect::handleFunction(Function *ReflectFunction) {
assert(cast<ConstantDataSequential>(Operand)->isCString() &&
"Format of _reflect function not recognized");
- std::string ReflectArg =
- cast<ConstantDataSequential>(Operand)->getAsString();
-
+ StringRef ReflectArg = cast<ConstantDataSequential>(Operand)->getAsString();
ReflectArg = ReflectArg.substr(0, ReflectArg.size() - 1);
DEBUG(dbgs() << "Arg of _reflect : " << ReflectArg << "\n");
int ReflectVal = 0; // The default value is 0
- if (VarMap.find(ReflectArg) != VarMap.end()) {
- ReflectVal = VarMap[ReflectArg];
- }
- Reflect->replaceAllUsesWith(
- ConstantInt::get(Reflect->getType(), ReflectVal));
- ToRemove.push_back(Reflect);
- }
- if (ToRemove.size() == 0)
- return false;
-
- for (unsigned i = 0, e = ToRemove.size(); i != e; ++i)
- ToRemove[i]->eraseFromParent();
- return true;
-}
-
-bool NVVMReflect::runOnModule(Module &M) {
- if (!NVVMReflectEnabled)
- return false;
-
- setVarMap();
-
-
- bool Res = false;
- std::string Name;
- Type *Tys[1];
- Type *I8Ty = Type::getInt8Ty(M.getContext());
- Function *ReflectFunction;
-
- // Check for standard overloaded versions of llvm.nvvm.reflect
-
- for (unsigned i = 0; i != 5; ++i) {
- Tys[0] = PointerType::get(I8Ty, i);
- Name = Intrinsic::getName(Intrinsic::nvvm_reflect, Tys);
- ReflectFunction = M.getFunction(Name);
- if(ReflectFunction != 0) {
- Res |= handleFunction(ReflectFunction);
+ auto Iter = VarMap.find(ReflectArg);
+ if (Iter != VarMap.end())
+ ReflectVal = Iter->second;
+ else if (ReflectArg == "__CUDA_FTZ") {
+ // Try to pull __CUDA_FTZ from the nvvm-reflect-ftz module flag.
+ if (auto *Flag = mdconst::extract_or_null<ConstantInt>(
+ F.getParent()->getModuleFlag("nvvm-reflect-ftz")))
+ ReflectVal = Flag->getSExtValue();
}
+ Call->replaceAllUsesWith(ConstantInt::get(Call->getType(), ReflectVal));
+ ToRemove.push_back(Call);
}
- ReflectFunction = M.getFunction(NVVM_REFLECT_FUNCTION);
- // If reflect function is not used, then there will be
- // no entry in the module.
- if (ReflectFunction != 0)
- Res |= handleFunction(ReflectFunction);
+ for (Instruction *I : ToRemove)
+ I->eraseFromParent();
- return Res;
+ return ToRemove.size() > 0;
}
diff --git a/lib/Target/NVPTX/TargetInfo/Makefile b/lib/Target/NVPTX/TargetInfo/Makefile
deleted file mode 100644
index 8622315b47b9..000000000000
--- a/lib/Target/NVPTX/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/NVPTX/TargetInfo/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMNVPTXInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/PowerPC/AsmParser/Makefile b/lib/Target/PowerPC/AsmParser/Makefile
deleted file mode 100644
index c8a8915685ea..000000000000
--- a/lib/Target/PowerPC/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/PowerPC/AsmParser/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMPowerPCAsmParser
-
-# Hack: we need to include 'main' PowerPC target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index 220c70a48542..4181775fc6da 100644
--- a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -1,4 +1,4 @@
-//===-- PPCAsmParser.cpp - Parse PowerPC asm to MCInst instructions ---------===//
+//===-- PPCAsmParser.cpp - Parse PowerPC asm to MCInst instructions -------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,12 +7,10 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCMCExpr.h"
+#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "PPCTargetStreamer.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCContext.h"
@@ -22,11 +20,11 @@
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/MC/MCSymbolELF.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -294,7 +292,7 @@ public:
const MCInstrInfo &MII, const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI), MII(MII) {
// Check for 64-bit vs. 32-bit pointer mode.
- Triple TheTriple(STI.getTargetTriple());
+ const Triple &TheTriple = STI.getTargetTriple();
IsPPC64 = (TheTriple.getArch() == Triple::ppc64 ||
TheTriple.getArch() == Triple::ppc64le);
IsDarwin = TheTriple.isMacOSX();
@@ -378,6 +376,10 @@ public:
}
}
+ // Disable use of sized deallocation due to overallocation of PPCOperand
+ // objects in CreateTokenWithStringCopy.
+ void operator delete(void *p) { ::operator delete(p); }
+
/// getStartLoc - Get the location of the first token of this operand.
SMLoc getStartLoc() const override { return StartLoc; }
@@ -392,13 +394,15 @@ public:
return Imm.Val;
}
int64_t getImmS16Context() const {
- assert((Kind == Immediate || Kind == ContextImmediate) && "Invalid access!");
+ assert((Kind == Immediate || Kind == ContextImmediate) &&
+ "Invalid access!");
if (Kind == Immediate)
return Imm.Val;
return static_cast<int16_t>(Imm.Val);
}
int64_t getImmU16Context() const {
- assert((Kind == Immediate || Kind == ContextImmediate) && "Invalid access!");
+ assert((Kind == Immediate || Kind == ContextImmediate) &&
+ "Invalid access!");
return Imm.Val;
}
@@ -443,7 +447,9 @@ public:
}
bool isToken() const override { return Kind == Token; }
- bool isImm() const override { return Kind == Immediate || Kind == Expression; }
+ bool isImm() const override {
+ return Kind == Immediate || Kind == Expression;
+ }
bool isU1Imm() const { return Kind == Immediate && isUInt<1>(getImm()); }
bool isU2Imm() const { return Kind == Immediate && isUInt<2>(getImm()); }
bool isU3Imm() const { return Kind == Immediate && isUInt<3>(getImm()); }
@@ -454,13 +460,15 @@ public:
bool isU6ImmX2() const { return Kind == Immediate &&
isUInt<6>(getImm()) &&
(getImm() & 1) == 0; }
+ bool isU7Imm() const { return Kind == Immediate && isUInt<7>(getImm()); }
bool isU7ImmX4() const { return Kind == Immediate &&
isUInt<7>(getImm()) &&
(getImm() & 3) == 0; }
+ bool isU8Imm() const { return Kind == Immediate && isUInt<8>(getImm()); }
bool isU8ImmX8() const { return Kind == Immediate &&
isUInt<8>(getImm()) &&
(getImm() & 7) == 0; }
-
+
bool isU10Imm() const { return Kind == Immediate && isUInt<10>(getImm()); }
bool isU12Imm() const { return Kind == Immediate && isUInt<12>(getImm()); }
bool isU16Imm() const {
@@ -488,6 +496,9 @@ public:
bool isS16ImmX4() const { return Kind == Expression ||
(Kind == Immediate && isInt<16>(getImm()) &&
(getImm() & 3) == 0); }
+ bool isS16ImmX16() const { return Kind == Expression ||
+ (Kind == Immediate && isInt<16>(getImm()) &&
+ (getImm() & 15) == 0); }
bool isS17Imm() const {
switch (Kind) {
case Expression:
@@ -521,7 +532,9 @@ public:
(Kind == Immediate && isInt<16>(getImm()) &&
(getImm() & 3) == 0); }
bool isRegNumber() const { return Kind == Immediate && isUInt<5>(getImm()); }
- bool isVSRegNumber() const { return Kind == Immediate && isUInt<6>(getImm()); }
+ bool isVSRegNumber() const {
+ return Kind == Immediate && isUInt<6>(getImm());
+ }
bool isCCRegNumber() const { return (Kind == Expression
&& isUInt<3>(getExprCRVal())) ||
(Kind == Immediate
@@ -1190,6 +1203,29 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst,
}
break;
}
+ case PPC::CP_COPYx:
+ case PPC::CP_COPY_FIRST: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(PPC::CP_COPY);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::createImm(Opcode == PPC::CP_COPYx ? 0 : 1));
+
+ Inst = TmpInst;
+ break;
+ }
+ case PPC::CP_PASTEx :
+ case PPC::CP_PASTE_LAST: {
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opcode == PPC::CP_PASTEx ?
+ PPC::CP_PASTE : PPC::CP_PASTEo);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(MCOperand::createImm(Opcode == PPC::CP_PASTEx ? 0 : 1));
+
+ Inst = TmpInst;
+ break;
+ }
}
}
@@ -1454,8 +1490,8 @@ ParseExpression(const MCExpr *&EVal) {
/// This differs from the default "parseExpression" in that it handles detection
/// of the \code hi16(), ha16() and lo16() \endcode modifiers. At present,
/// parseExpression() doesn't recognise the modifiers when in the Darwin/MachO
-/// syntax form so it is done here. TODO: Determine if there is merit in arranging
-/// for this to be done at a higher level.
+/// syntax form so it is done here. TODO: Determine if there is merit in
+/// arranging for this to be done at a higher level.
bool PPCAsmParser::
ParseDarwinExpression(const MCExpr *&EVal) {
MCAsmParser &Parser = getParser();
@@ -1674,7 +1710,7 @@ bool PPCAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
while (getLexer().isNot(AsmToken::EndOfStatement) &&
getLexer().is(AsmToken::Comma)) {
// Consume the comma token
- getLexer().Lex();
+ Lex();
// Parse the next operand
if (ParseOperand(Operands))
diff --git a/lib/Target/PowerPC/CMakeLists.txt b/lib/Target/PowerPC/CMakeLists.txt
index c31ababafbe7..4842c3b7a656 100644
--- a/lib/Target/PowerPC/CMakeLists.txt
+++ b/lib/Target/PowerPC/CMakeLists.txt
@@ -16,6 +16,7 @@ add_llvm_target(PowerPCCodeGen
PPCBoolRetToInt.cpp
PPCAsmPrinter.cpp
PPCBranchSelector.cpp
+ PPCCCState.cpp
PPCCTRLoops.cpp
PPCHazardRecognizers.cpp
PPCInstrInfo.cpp
@@ -24,12 +25,12 @@ add_llvm_target(PowerPCCodeGen
PPCEarlyReturn.cpp
PPCFastISel.cpp
PPCFrameLowering.cpp
- PPCLoopDataPrefetch.cpp
PPCLoopPreIncPrep.cpp
PPCMCInstLower.cpp
PPCMachineFunctionInfo.cpp
PPCMIPeephole.cpp
PPCRegisterInfo.cpp
+ PPCQPXLoadSplat.cpp
PPCSubtarget.cpp
PPCTargetMachine.cpp
PPCTargetObjectFile.cpp
diff --git a/lib/Target/PowerPC/Disassembler/Makefile b/lib/Target/PowerPC/Disassembler/Makefile
deleted file mode 100644
index 86e3b4752207..000000000000
--- a/lib/Target/PowerPC/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===-- lib/Target/PowerPC/Disassembler/Makefile -----------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMPowerPCDisassembler
-
-# Hack: we need to include 'main' PPC target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp b/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
index 1fc84fb76551..6ea4fb1bfbc3 100644
--- a/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
+++ b/lib/Target/PowerPC/Disassembler/PPCDisassembler.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
#include "PPC.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -368,6 +368,21 @@ static DecodeStatus decodeMemRIXOperands(MCInst &Inst, uint64_t Imm,
return MCDisassembler::Success;
}
+static DecodeStatus decodeMemRIX16Operands(MCInst &Inst, uint64_t Imm,
+ int64_t Address, const void *Decoder) {
+ // Decode the memrix16 field (imm, reg), which has the low 12-bits as the
+ // displacement with 16-byte aligned, and the next 5 bits as the register #.
+
+ uint64_t Base = Imm >> 12;
+ uint64_t Disp = Imm & 0xFFF;
+
+ assert(Base < 32 && "Invalid base register");
+
+ Inst.addOperand(MCOperand::createImm(SignExtend64<16>(Disp << 4)));
+ Inst.addOperand(MCOperand::createReg(GP0Regs[Base]));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus decodeCRBitMOperand(MCInst &Inst, uint64_t Imm,
int64_t Address, const void *Decoder) {
// The cr bit encoding is 0x80 >> cr_reg_num.
diff --git a/lib/Target/PowerPC/InstPrinter/Makefile b/lib/Target/PowerPC/InstPrinter/Makefile
deleted file mode 100644
index f097e84248ff..000000000000
--- a/lib/Target/PowerPC/InstPrinter/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/PowerPC/AsmPrinter/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMPowerPCAsmPrinter
-
-# Hack: we need to include 'main' powerpc target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
index 5e1d22789056..d9d9b4f180f7 100644
--- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
+++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -136,17 +136,6 @@ void PPCInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
return;
}
- // For fast-isel, a COPY_TO_REGCLASS may survive this long. This is
- // used when converting a 32-bit float to a 64-bit float as part of
- // conversion to an integer (see PPCFastISel.cpp:SelectFPToI()),
- // as otherwise we have problems with incorrect register classes
- // in machine instruction verification. For now, just avoid trying
- // to print it as such an instruction has no effect (a 32-bit float
- // in a register is already in 64-bit form, just with lower
- // precision). FIXME: Is there a better solution?
- if (MI->getOpcode() == TargetOpcode::COPY_TO_REGCLASS)
- return;
-
if (!printAliasInstr(MI, O))
printInstruction(MI, O);
printAnnotation(O, Annot);
@@ -299,6 +288,20 @@ void PPCInstPrinter::printU6ImmOperand(const MCInst *MI, unsigned OpNo,
O << (unsigned int)Value;
}
+void PPCInstPrinter::printU7ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned int Value = MI->getOperand(OpNo).getImm();
+ assert(Value <= 127 && "Invalid u7imm argument!");
+ O << (unsigned int)Value;
+}
+
+void PPCInstPrinter::printU8ImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ unsigned int Value = MI->getOperand(OpNo).getImm();
+ assert(Value <= 255 && "Invalid u8imm argument!");
+ O << (unsigned int)Value;
+}
+
void PPCInstPrinter::printU10ImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
unsigned short Value = MI->getOperand(OpNo).getImm();
diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
index 53eb727d0b07..d0ffeff0247c 100644
--- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
+++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
@@ -53,6 +53,8 @@ public:
void printS5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU6ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU7ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printU8ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU10ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printU12ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printS16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
diff --git a/lib/Target/PowerPC/MCTargetDesc/Makefile b/lib/Target/PowerPC/MCTargetDesc/Makefile
deleted file mode 100644
index 9db66622cced..000000000000
--- a/lib/Target/PowerPC/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/PowerPC/TargetDesc/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMPowerPCDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
index b6dd595ffb0e..9100ecb4aa37 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
@@ -168,8 +168,8 @@ public:
llvm_unreachable("relaxInstruction() unimplemented");
}
-
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {
// FIXME.
llvm_unreachable("relaxInstruction() unimplemented");
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
index dd994956870f..fd279c60f3f5 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
@@ -25,8 +25,8 @@ namespace {
PPCELFObjectWriter(bool Is64Bit, uint8_t OSABI);
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
bool needsRelocateWithSymbol(const MCSymbol &Sym,
unsigned Type) const override;
@@ -66,7 +66,7 @@ static MCSymbolRefExpr::VariantKind getAccessVariant(const MCValue &Target,
llvm_unreachable("unknown PPCMCExpr kind");
}
-unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned PPCELFObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
MCSymbolRefExpr::VariantKind Modifier = getAccessVariant(Target, Fixup);
@@ -186,7 +186,7 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
case MCSymbolRefExpr::VK_PPC_TOC_HA:
Type = ELF::R_PPC64_TOC16_HA;
break;
- case MCSymbolRefExpr::VK_PPC_TPREL:
+ case MCSymbolRefExpr::VK_TPREL:
Type = ELF::R_PPC_TPREL16;
break;
case MCSymbolRefExpr::VK_PPC_TPREL_LO:
@@ -210,7 +210,7 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
case MCSymbolRefExpr::VK_PPC_TPREL_HIGHESTA:
Type = ELF::R_PPC64_TPREL16_HIGHESTA;
break;
- case MCSymbolRefExpr::VK_PPC_DTPREL:
+ case MCSymbolRefExpr::VK_DTPREL:
Type = ELF::R_PPC64_DTPREL16;
break;
case MCSymbolRefExpr::VK_PPC_DTPREL_LO:
@@ -319,13 +319,13 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
case MCSymbolRefExpr::VK_PPC_TOC_LO:
Type = ELF::R_PPC64_TOC16_LO_DS;
break;
- case MCSymbolRefExpr::VK_PPC_TPREL:
+ case MCSymbolRefExpr::VK_TPREL:
Type = ELF::R_PPC64_TPREL16_DS;
break;
case MCSymbolRefExpr::VK_PPC_TPREL_LO:
Type = ELF::R_PPC64_TPREL16_LO_DS;
break;
- case MCSymbolRefExpr::VK_PPC_DTPREL:
+ case MCSymbolRefExpr::VK_DTPREL:
Type = ELF::R_PPC64_DTPREL16_DS;
break;
case MCSymbolRefExpr::VK_PPC_DTPREL_LO:
@@ -380,10 +380,10 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
case MCSymbolRefExpr::VK_PPC_DTPMOD:
Type = ELF::R_PPC64_DTPMOD64;
break;
- case MCSymbolRefExpr::VK_PPC_TPREL:
+ case MCSymbolRefExpr::VK_TPREL:
Type = ELF::R_PPC64_TPREL64;
break;
- case MCSymbolRefExpr::VK_PPC_DTPREL:
+ case MCSymbolRefExpr::VK_DTPREL:
Type = ELF::R_PPC64_DTPREL64;
break;
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index b7291561c75d..e7b2d8369f2f 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -69,6 +69,9 @@ public:
unsigned getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
+ unsigned getMemRIX16Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
unsigned getSPE8DisEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
@@ -102,19 +105,16 @@ public:
void encodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override {
- // For fast-isel, a float COPY_TO_REGCLASS can survive this long.
- // It's just a nop to keep the register classes happy, so don't
- // generate anything.
unsigned Opcode = MI.getOpcode();
const MCInstrDesc &Desc = MCII.get(Opcode);
- if (Opcode == TargetOpcode::COPY_TO_REGCLASS)
- return;
uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
// Output the constant in big/little endian byte order.
unsigned Size = Desc.getSize();
switch (Size) {
+ case 0:
+ break;
case 4:
if (IsLittleEndian) {
support::endian::Writer<support::little>(OS).write<uint32_t>(Bits);
@@ -249,6 +249,19 @@ unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
return RegBits;
}
+unsigned PPCMCCodeEmitter::getMemRIX16Encoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Encode (imm, reg) as a memrix16, which has the low 12-bits as the
+ // displacement and the next 5 bits as the register #.
+ assert(MI.getOperand(OpNo+1).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups, STI) << 12;
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert(MO.isImm());
+
+ return ((getMachineOpValue(MI, MO, Fixups, STI) >> 4) & 0xFFF) | RegBits;
+}
unsigned PPCMCCodeEmitter::getSPE8DisEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups,
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index 30f232a9a91e..c9074448fe45 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -15,7 +15,6 @@
#include "InstPrinter/PPCInstPrinter.h"
#include "PPCMCAsmInfo.h"
#include "PPCTargetStreamer.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCExpr.h"
@@ -87,24 +86,13 @@ static MCAsmInfo *createPPCMCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createPPCMCCodeGenInfo(const Triple &TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
-
- if (RM == Reloc::Default) {
- if (TT.isOSDarwin())
- RM = Reloc::DynamicNoPIC;
- else
- RM = Reloc::Static;
- }
+static void adjustCodeGenOpts(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
if (CM == CodeModel::Default) {
if (!TT.isOSDarwin() &&
(TT.getArch() == Triple::ppc64 || TT.getArch() == Triple::ppc64le))
CM = CodeModel::Medium;
}
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
namespace {
@@ -245,7 +233,7 @@ extern "C" void LLVMInitializePowerPCTargetMC() {
RegisterMCAsmInfoFn C(*T, createPPCMCAsmInfo);
// Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(*T, createPPCMCCodeGenInfo);
+ TargetRegistry::registerMCAdjustCodeGenOpts(*T, adjustCodeGenOpts);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createPPCMCInstrInfo);
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
index b54a0e1b86b1..1f38a8c947e7 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
@@ -79,7 +79,7 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
}
/// Translates generic PPC fixup kind to Mach-O/PPC relocation type enum.
-/// Outline based on PPCELFObjectWriter::GetRelocType().
+/// Outline based on PPCELFObjectWriter::getRelocType().
static unsigned getRelocType(const MCValue &Target,
const MCFixupKind FixupKind, // from
// Fixup.getKind()
diff --git a/lib/Target/PowerPC/Makefile b/lib/Target/PowerPC/Makefile
deleted file mode 100644
index cf516f4e5ec9..000000000000
--- a/lib/Target/PowerPC/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-##===- lib/Target/PowerPC/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMPowerPCCodeGen
-TARGET = PPC
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = PPCGenRegisterInfo.inc PPCGenAsmMatcher.inc \
- PPCGenAsmWriter.inc \
- PPCGenInstrInfo.inc PPCGenDAGISel.inc \
- PPCGenSubtargetInfo.inc PPCGenCallingConv.inc \
- PPCGenMCCodeEmitter.inc PPCGenFastISel.inc \
- PPCGenDisassemblerTables.inc
-
-DIRS = AsmParser Disassembler InstPrinter TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/PowerPC/PPC.h b/lib/Target/PowerPC/PPC.h
index a259ed3fd327..e01f49dce81e 100644
--- a/lib/Target/PowerPC/PPC.h
+++ b/lib/Target/PowerPC/PPC.h
@@ -16,7 +16,6 @@
#define LLVM_LIB_TARGET_POWERPC_PPC_H
#include "MCTargetDesc/PPCMCTargetDesc.h"
-#include <string>
// GCC #defines PPC on Linux but we use it as our namespace name
#undef PPC
@@ -34,7 +33,6 @@ namespace llvm {
#ifndef NDEBUG
FunctionPass *createPPCCTRLoopsVerify();
#endif
- FunctionPass *createPPCLoopDataPrefetchPass();
FunctionPass *createPPCLoopPreIncPrepPass(PPCTargetMachine &TM);
FunctionPass *createPPCTOCRegDepsPass();
FunctionPass *createPPCEarlyReturnPass();
@@ -43,6 +41,7 @@ namespace llvm {
FunctionPass *createPPCVSXSwapRemovalPass();
FunctionPass *createPPCMIPeepholePass();
FunctionPass *createPPCBranchSelectionPass();
+ FunctionPass *createPPCQPXLoadSplatPass();
FunctionPass *createPPCISelDag(PPCTargetMachine &TM);
FunctionPass *createPPCTLSDynamicCallPass();
FunctionPass *createPPCBoolRetToIntPass();
@@ -60,13 +59,12 @@ namespace llvm {
//===------------------------------------------------------------------===//
// PPC Specific MachineOperand flags.
MO_NO_FLAG,
-
- /// MO_PLT_OR_STUB - On a symbol operand "FOO", this indicates that the
- /// reference is actually to the "FOO$stub" or "FOO@plt" symbol. This is
- /// used for calls and jumps to external functions on Tiger and earlier, and
+
+ /// On a symbol operand "FOO", this indicates that the reference is actually
+ /// to "FOO@plt". This is used for calls and jumps to external functions on
/// for PIC calls on Linux and ELF systems.
- MO_PLT_OR_STUB = 1,
-
+ MO_PLT = 1,
+
/// MO_PIC_FLAG - If this bit is set, the symbol reference is relative to
/// the function's picbase, e.g. lo16(symbol-picbase).
MO_PIC_FLAG = 2,
@@ -74,7 +72,7 @@ namespace llvm {
/// MO_NLP_FLAG - If this bit is set, the symbol reference is actually to
/// the non_lazy_ptr for the global, e.g. lo16(symbol$non_lazy_ptr-picbase).
MO_NLP_FLAG = 4,
-
+
/// MO_NLP_HIDDEN_FLAG - If this bit is set, the symbol reference is to a
/// symbol with hidden visibility. This causes a different kind of
/// non-lazy-pointer to be generated.
@@ -93,11 +91,11 @@ namespace llvm {
/// These values identify relocations on immediates folded
/// into memory operations.
MO_DTPREL_LO = 5 << 4,
- MO_TLSLD_LO = 6 << 4,
- MO_TOC_LO = 7 << 4,
+ MO_TLSLD_LO = 6 << 4,
+ MO_TOC_LO = 7 << 4,
// Symbol for VK_PPC_TLS fixup attached to an ADD instruction
- MO_TLS = 8 << 4
+ MO_TLS = 8 << 4
};
} // end namespace PPCII
diff --git a/lib/Target/PowerPC/PPC.td b/lib/Target/PowerPC/PPC.td
index b03be12cfd97..b40b530f4c5d 100644
--- a/lib/Target/PowerPC/PPC.td
+++ b/lib/Target/PowerPC/PPC.td
@@ -37,16 +37,19 @@ def Directive64 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_64", "">;
def DirectiveA2 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_A2", "">;
def DirectiveE500mc : SubtargetFeature<"", "DarwinDirective",
"PPC::DIR_E500mc", "">;
-def DirectiveE5500 : SubtargetFeature<"", "DarwinDirective",
+def DirectiveE5500 : SubtargetFeature<"", "DarwinDirective",
"PPC::DIR_E5500", "">;
def DirectivePwr3: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR3", "">;
def DirectivePwr4: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR4", "">;
def DirectivePwr5: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR5", "">;
-def DirectivePwr5x: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR5X", "">;
+def DirectivePwr5x
+ : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR5X", "">;
def DirectivePwr6: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR6", "">;
-def DirectivePwr6x: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR6X", "">;
+def DirectivePwr6x
+ : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR6X", "">;
def DirectivePwr7: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR7", "">;
def DirectivePwr8: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR8", "">;
+def DirectivePwr9: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR9", "">;
def Feature64Bit : SubtargetFeature<"64bit","Has64BitSupport", "true",
"Enable 64-bit instructions">;
@@ -86,8 +89,6 @@ def FeatureFPCVT : SubtargetFeature<"fpcvt", "HasFPCVT", "true",
"Enable fc[ft]* (unsigned and single-precision) and lfiwzx instructions">;
def FeatureISEL : SubtargetFeature<"isel","HasISEL", "true",
"Enable the isel instruction">;
-def FeaturePOPCNTD : SubtargetFeature<"popcntd","HasPOPCNTD", "true",
- "Enable the popcnt[dw] instructions">;
def FeatureBPERMD : SubtargetFeature<"bpermd", "HasBPERMD", "true",
"Enable the bpermd instruction">;
def FeatureExtDiv : SubtargetFeature<"extdiv", "HasExtDiv", "true",
@@ -145,24 +146,43 @@ def FeatureFloat128 :
SubtargetFeature<"float128", "HasFloat128", "true",
"Enable the __float128 data type for IEEE-754R Binary128.",
[FeatureVSX]>;
+def FeaturePOPCNTD : SubtargetFeature<"popcntd","HasPOPCNTD",
+ "POPCNTD_Fast",
+ "Enable the popcnt[dw] instructions">;
+// Note that for the a2/a2q processor models we should not use popcnt[dw] by
+// default. These processors do support the instructions, but they're
+// microcoded, and the software emulation is about twice as fast.
+def FeatureSlowPOPCNTD : SubtargetFeature<"slow-popcntd","HasPOPCNTD",
+ "POPCNTD_Slow",
+ "Has slow popcnt[dw] instructions">;
def DeprecatedDST : SubtargetFeature<"", "DeprecatedDST", "true",
"Treat vector data stream cache control instructions as deprecated">;
-/* Since new processors generally contain a superset of features of those that
- came before them, the idea is to make implementations of new processors
- less error prone and easier to read.
- Namely:
- list<SubtargetFeature> Power8FeatureList = ...
- list<SubtargetFeature> FutureProcessorSpecificFeatureList =
- [ features that Power8 does not support ]
- list<SubtargetFeature> FutureProcessorFeatureList =
- !listconcat(Power8FeatureList, FutureProcessorSpecificFeatureList)
+def FeatureISA3_0 : SubtargetFeature<"isa-v30-instructions", "IsISA3_0",
+ "true",
+ "Enable instructions added in ISA 3.0.">;
+def FeatureP9Altivec : SubtargetFeature<"power9-altivec", "HasP9Altivec", "true",
+ "Enable POWER9 Altivec instructions",
+ [FeatureISA3_0, FeatureP8Altivec]>;
+def FeatureP9Vector : SubtargetFeature<"power9-vector", "HasP9Vector", "true",
+ "Enable POWER9 vector instructions",
+ [FeatureISA3_0, FeatureP8Vector,
+ FeatureP9Altivec]>;
- Makes it explicit and obvious what is new in FutureProcesor vs. Power8 as
- well as providing a single point of definition if the feature set will be
- used elsewhere.
-*/
+// Since new processors generally contain a superset of features of those that
+// came before them, the idea is to make implementations of new processors
+// less error prone and easier to read.
+// Namely:
+// list<SubtargetFeature> Power8FeatureList = ...
+// list<SubtargetFeature> FutureProcessorSpecificFeatureList =
+// [ features that Power8 does not support ]
+// list<SubtargetFeature> FutureProcessorFeatureList =
+// !listconcat(Power8FeatureList, FutureProcessorSpecificFeatureList)
+
+// Makes it explicit and obvious what is new in FutureProcesor vs. Power8 as
+// well as providing a single point of definition if the feature set will be
+// used elsewhere.
def ProcessorFeatures {
list<SubtargetFeature> Power7FeatureList =
[DirectivePwr7, FeatureAltivec, FeatureVSX,
@@ -180,6 +200,10 @@ def ProcessorFeatures {
FeatureFusion];
list<SubtargetFeature> Power8FeatureList =
!listconcat(Power7FeatureList, Power8SpecificFeatures);
+ list<SubtargetFeature> Power9SpecificFeatures =
+ [FeatureP9Altivec, FeatureP9Vector, FeatureISA3_0];
+ list<SubtargetFeature> Power9FeatureList =
+ !listconcat(Power8FeatureList, Power9SpecificFeatures);
}
// Note: Future features to add when support is extended to more
@@ -331,16 +355,17 @@ def : ProcessorModel<"a2", PPCA2Model,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
- FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX, Feature64Bit
- /*, Feature64BitRegs */, FeatureMFTB]>;
+ FeatureSlowPOPCNTD, FeatureCMPB, FeatureLDBRX,
+ Feature64Bit /*, Feature64BitRegs */, FeatureMFTB]>;
def : ProcessorModel<"a2q", PPCA2Model,
[DirectiveA2, FeatureICBT, FeatureBookE, FeatureMFOCRF,
FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
- FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX, Feature64Bit
- /*, Feature64BitRegs */, FeatureQPX, FeatureMFTB]>;
+ FeatureSlowPOPCNTD, FeatureCMPB, FeatureLDBRX,
+ Feature64Bit /*, Feature64BitRegs */, FeatureQPX,
+ FeatureMFTB]>;
def : ProcessorModel<"pwr3", G5Model,
[DirectivePwr3, FeatureAltivec,
FeatureFRES, FeatureFRSQRTE, FeatureMFOCRF,
@@ -377,6 +402,8 @@ def : ProcessorModel<"pwr6x", G5Model,
FeatureMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr7", P7Model, ProcessorFeatures.Power7FeatureList>;
def : ProcessorModel<"pwr8", P8Model, ProcessorFeatures.Power8FeatureList>;
+// FIXME: Same as P8 until the POWER9 scheduling info is available
+def : ProcessorModel<"pwr9", P8Model, ProcessorFeatures.Power9FeatureList>;
def : Processor<"ppc", G3Itineraries, [Directive32, FeatureMFTB]>;
def : ProcessorModel<"ppc64", G5Model,
[Directive64, FeatureAltivec,
diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp
index ec354c209ca0..76c52ab6cf1e 100644
--- a/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -25,7 +25,6 @@
#include "PPCTargetMachine.h"
#include "PPCTargetStreamer.h"
#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -50,7 +49,6 @@
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbolELF.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
@@ -82,6 +80,12 @@ public:
MCSymbol *lookUpOrCreateTOCEntry(MCSymbol *Sym);
+ virtual bool doInitialization(Module &M) override {
+ if (!TOC.empty())
+ TOC.clear();
+ return AsmPrinter::doInitialization(M);
+ }
+
void EmitInstruction(const MachineInstr *MI) override;
void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
@@ -138,8 +142,6 @@ public:
bool doFinalization(Module &M) override;
void EmitStartOfAsmFile(Module &M) override;
-
- void EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs);
};
} // end of anonymous namespace
@@ -195,29 +197,14 @@ void PPCAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
MCSymbol *SymToPrint;
// External or weakly linked global variables need non-lazily-resolved stubs
- if (TM.getRelocationModel() != Reloc::Static &&
- !GV->isStrongDefinitionForLinker()) {
- if (!GV->hasHiddenVisibility()) {
- SymToPrint = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(
- SymToPrint);
- if (!StubSym.getPointer())
- StubSym = MachineModuleInfoImpl::
- StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
- } else if (GV->isDeclaration() || GV->hasCommonLinkage() ||
- GV->hasAvailableExternallyLinkage()) {
- SymToPrint = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
-
- MachineModuleInfoImpl::StubValueTy &StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(
- SymToPrint);
- if (!StubSym.getPointer())
- StubSym = MachineModuleInfoImpl::
- StubValueTy(getSymbol(GV), !GV->hasInternalLinkage());
- } else {
- SymToPrint = getSymbol(GV);
- }
+ if (Subtarget->hasLazyResolverStub(GV)) {
+ SymToPrint = getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(
+ SymToPrint);
+ if (!StubSym.getPointer())
+ StubSym = MachineModuleInfoImpl::StubValueTy(getSymbol(GV),
+ !GV->hasInternalLinkage());
} else {
SymToPrint = getSymbol(GV);
}
@@ -470,7 +457,7 @@ void PPCAsmPrinter::EmitTlsCall(const MachineInstr *MI,
"GETtls[ld]ADDR[32] must read GPR3");
if (!Subtarget->isPPC64() && !Subtarget->isDarwin() &&
- TM.getRelocationModel() == Reloc::PIC_)
+ isPositionIndependent())
Kind = MCSymbolRefExpr::VK_PLT;
const MCSymbolRefExpr *TlsRef =
MCSymbolRefExpr::create(TlsGetAddr, Kind, OutContext);
@@ -597,7 +584,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
else if (MO.isBlockAddress())
MOSymbol = GetBlockAddressSymbol(MO.getBlockAddress());
- if (PL == PICLevel::Small) {
+ if (PL == PICLevel::SmallPIC) {
const MCExpr *Exp =
MCSymbolRefExpr::create(MOSymbol, MCSymbolRefExpr::VK_GOT,
OutContext);
@@ -1038,10 +1025,10 @@ void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module &M) {
}
if (static_cast<const PPCTargetMachine &>(TM).isPPC64() ||
- TM.getRelocationModel() != Reloc::PIC_)
+ !isPositionIndependent())
return AsmPrinter::EmitStartOfAsmFile(M);
- if (M.getPICLevel() == PICLevel::Small)
+ if (M.getPICLevel() == PICLevel::SmallPIC)
return AsmPrinter::EmitStartOfAsmFile(M);
OutStreamer->SwitchSection(OutContext.getELFSection(
@@ -1067,8 +1054,8 @@ void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module &M) {
void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
// linux/ppc32 - Normal entry label.
if (!Subtarget->isPPC64() &&
- (TM.getRelocationModel() != Reloc::PIC_ ||
- MF->getFunction()->getParent()->getPICLevel() == PICLevel::Small))
+ (!isPositionIndependent() ||
+ MF->getFunction()->getParent()->getPICLevel() == PICLevel::SmallPIC))
return AsmPrinter::EmitFunctionEntryLabel();
if (!Subtarget->isPPC64()) {
@@ -1302,8 +1289,10 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
"power6",
"power6x",
"power7",
+ // FIXME: why is power8 missing here?
"ppc64",
- "ppc64le"
+ "ppc64le",
+ "power9"
};
// Get the numerically largest directive.
@@ -1350,161 +1339,6 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
OutStreamer->SwitchSection(getObjFileLowering().getTextSection());
}
-static MCSymbol *GetLazyPtr(MCSymbol *Sym, MCContext &Ctx) {
- // Remove $stub suffix, add $lazy_ptr.
- StringRef NoStub = Sym->getName().substr(0, Sym->getName().size()-5);
- return Ctx.getOrCreateSymbol(NoStub + "$lazy_ptr");
-}
-
-static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) {
- // Add $tmp suffix to $stub, yielding $stub$tmp.
- return Ctx.getOrCreateSymbol(Sym->getName() + "$tmp");
-}
-
-void PPCDarwinAsmPrinter::
-EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
- bool isPPC64 = getDataLayout().getPointerSizeInBits() == 64;
-
- // Construct a local MCSubtargetInfo and shadow EmitToStreamer here.
- // This is because the MachineFunction won't exist (but have not yet been
- // freed) and since we're at the global level we can use the default
- // constructed subtarget.
- std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
- TM.getTargetTriple().str(), TM.getTargetCPU(),
- TM.getTargetFeatureString()));
- auto EmitToStreamer = [&STI] (MCStreamer &S, const MCInst &Inst) {
- S.EmitInstruction(Inst, *STI);
- };
-
- const TargetLoweringObjectFileMachO &TLOFMacho =
- static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering());
-
- // .lazy_symbol_pointer
- MCSection *LSPSection = TLOFMacho.getLazySymbolPointerSection();
-
- // Output stubs for dynamically-linked functions
- if (TM.getRelocationModel() == Reloc::PIC_) {
- MCSection *StubSection = OutContext.getMachOSection(
- "__TEXT", "__picsymbolstub1",
- MachO::S_SYMBOL_STUBS | MachO::S_ATTR_PURE_INSTRUCTIONS, 32,
- SectionKind::getText());
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- OutStreamer->SwitchSection(StubSection);
- EmitAlignment(4);
-
- MCSymbol *Stub = Stubs[i].first;
- MCSymbol *RawSym = Stubs[i].second.getPointer();
- MCSymbol *LazyPtr = GetLazyPtr(Stub, OutContext);
- MCSymbol *AnonSymbol = GetAnonSym(Stub, OutContext);
-
- OutStreamer->EmitLabel(Stub);
- OutStreamer->EmitSymbolAttribute(RawSym, MCSA_IndirectSymbol);
-
- const MCExpr *Anon = MCSymbolRefExpr::create(AnonSymbol, OutContext);
- const MCExpr *LazyPtrExpr = MCSymbolRefExpr::create(LazyPtr, OutContext);
- const MCExpr *Sub =
- MCBinaryExpr::createSub(LazyPtrExpr, Anon, OutContext);
-
- // mflr r0
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::MFLR).addReg(PPC::R0));
- // bcl 20, 31, AnonSymbol
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::BCLalways).addExpr(Anon));
- OutStreamer->EmitLabel(AnonSymbol);
- // mflr r11
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::MFLR).addReg(PPC::R11));
- // addis r11, r11, ha16(LazyPtr - AnonSymbol)
- const MCExpr *SubHa16 = PPCMCExpr::createHa(Sub, true, OutContext);
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::ADDIS)
- .addReg(PPC::R11)
- .addReg(PPC::R11)
- .addExpr(SubHa16));
- // mtlr r0
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::MTLR).addReg(PPC::R0));
-
- // ldu r12, lo16(LazyPtr - AnonSymbol)(r11)
- // lwzu r12, lo16(LazyPtr - AnonSymbol)(r11)
- const MCExpr *SubLo16 = PPCMCExpr::createLo(Sub, true, OutContext);
- EmitToStreamer(*OutStreamer, MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
- .addReg(PPC::R12)
- .addExpr(SubLo16).addExpr(SubLo16)
- .addReg(PPC::R11));
- // mtctr r12
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::MTCTR).addReg(PPC::R12));
- // bctr
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::BCTR));
-
- OutStreamer->SwitchSection(LSPSection);
- OutStreamer->EmitLabel(LazyPtr);
- OutStreamer->EmitSymbolAttribute(RawSym, MCSA_IndirectSymbol);
-
- MCSymbol *DyldStubBindingHelper =
- OutContext.getOrCreateSymbol(StringRef("dyld_stub_binding_helper"));
- if (isPPC64) {
- // .quad dyld_stub_binding_helper
- OutStreamer->EmitSymbolValue(DyldStubBindingHelper, 8);
- } else {
- // .long dyld_stub_binding_helper
- OutStreamer->EmitSymbolValue(DyldStubBindingHelper, 4);
- }
- }
- OutStreamer->AddBlankLine();
- return;
- }
-
- MCSection *StubSection = OutContext.getMachOSection(
- "__TEXT", "__symbol_stub1",
- MachO::S_SYMBOL_STUBS | MachO::S_ATTR_PURE_INSTRUCTIONS, 16,
- SectionKind::getText());
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- MCSymbol *Stub = Stubs[i].first;
- MCSymbol *RawSym = Stubs[i].second.getPointer();
- MCSymbol *LazyPtr = GetLazyPtr(Stub, OutContext);
- const MCExpr *LazyPtrExpr = MCSymbolRefExpr::create(LazyPtr, OutContext);
-
- OutStreamer->SwitchSection(StubSection);
- EmitAlignment(4);
- OutStreamer->EmitLabel(Stub);
- OutStreamer->EmitSymbolAttribute(RawSym, MCSA_IndirectSymbol);
-
- // lis r11, ha16(LazyPtr)
- const MCExpr *LazyPtrHa16 =
- PPCMCExpr::createHa(LazyPtrExpr, true, OutContext);
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::LIS)
- .addReg(PPC::R11)
- .addExpr(LazyPtrHa16));
-
- // ldu r12, lo16(LazyPtr)(r11)
- // lwzu r12, lo16(LazyPtr)(r11)
- const MCExpr *LazyPtrLo16 =
- PPCMCExpr::createLo(LazyPtrExpr, true, OutContext);
- EmitToStreamer(*OutStreamer, MCInstBuilder(isPPC64 ? PPC::LDU : PPC::LWZU)
- .addReg(PPC::R12)
- .addExpr(LazyPtrLo16).addExpr(LazyPtrLo16)
- .addReg(PPC::R11));
-
- // mtctr r12
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::MTCTR).addReg(PPC::R12));
- // bctr
- EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::BCTR));
-
- OutStreamer->SwitchSection(LSPSection);
- OutStreamer->EmitLabel(LazyPtr);
- OutStreamer->EmitSymbolAttribute(RawSym, MCSA_IndirectSymbol);
-
- MCSymbol *DyldStubBindingHelper =
- OutContext.getOrCreateSymbol(StringRef("dyld_stub_binding_helper"));
- if (isPPC64) {
- // .quad dyld_stub_binding_helper
- OutStreamer->EmitSymbolValue(DyldStubBindingHelper, 8);
- } else {
- // .long dyld_stub_binding_helper
- OutStreamer->EmitSymbolValue(DyldStubBindingHelper, 4);
- }
- }
-
- OutStreamer->AddBlankLine();
-}
-
bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
bool isPPC64 = getDataLayout().getPointerSizeInBits() == 64;
@@ -1514,10 +1348,6 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
MachineModuleInfoMachO &MMIMacho =
MMI->getObjFileInfo<MachineModuleInfoMachO>();
- MachineModuleInfoMachO::SymbolListTy Stubs = MMIMacho.GetFnStubList();
- if (!Stubs.empty())
- EmitFunctionStubs(Stubs);
-
if (MAI->doesSupportExceptionHandling() && MMI) {
// Add the (possibly multiple) personalities to the set of global values.
// Only referenced functions get into the Personalities list.
@@ -1534,7 +1364,7 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
}
// Output stubs for dynamically-linked functions.
- Stubs = MMIMacho.GetGVStubList();
+ MachineModuleInfoMachO::SymbolListTy Stubs = MMIMacho.GetGVStubList();
// Output macho stubs for external and common global variables.
if (!Stubs.empty()) {
@@ -1568,25 +1398,6 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
OutStreamer->AddBlankLine();
}
- Stubs = MMIMacho.GetHiddenGVStubList();
- if (!Stubs.empty()) {
- OutStreamer->SwitchSection(getObjFileLowering().getDataSection());
- EmitAlignment(isPPC64 ? 3 : 2);
-
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- // L_foo$stub:
- OutStreamer->EmitLabel(Stubs[i].first);
- // .long _foo
- OutStreamer->EmitValue(MCSymbolRefExpr::
- create(Stubs[i].second.getPointer(),
- OutContext),
- isPPC64 ? 8 : 4/*size*/);
- }
-
- Stubs.clear();
- OutStreamer->AddBlankLine();
- }
-
// Funny Darwin hack: This flag tells the linker that no global symbols
// contain code that falls through to other global symbols (e.g. the obvious
// implementation of multiple entry points). If this doesn't occur, the
diff --git a/lib/Target/PowerPC/PPCBoolRetToInt.cpp b/lib/Target/PowerPC/PPCBoolRetToInt.cpp
index 7920240bc2b9..bfb4d8756901 100644
--- a/lib/Target/PowerPC/PPCBoolRetToInt.cpp
+++ b/lib/Target/PowerPC/PPCBoolRetToInt.cpp
@@ -119,7 +119,7 @@ class PPCBoolRetToInt : public FunctionPass {
Promotable.insert(P);
SmallVector<const PHINode *, 8> ToRemove;
- for (const auto &P : Promotable) {
+ for (const PHINode *P : Promotable) {
// Condition 2 and 3
auto IsValidUser = [] (const Value *V) -> bool {
return isa<ReturnInst>(V) || isa<CallInst>(V) || isa<PHINode>(V) ||
@@ -146,7 +146,7 @@ class PPCBoolRetToInt : public FunctionPass {
Promotable.erase(User);
ToRemove.clear();
- for (const auto &P : Promotable) {
+ for (const PHINode *P : Promotable) {
// Condition 4 and 5
const auto &Users = P->users();
const auto &Operands = P->operands();
@@ -168,6 +168,9 @@ class PPCBoolRetToInt : public FunctionPass {
}
bool runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
PHINodeSet PromotablePHINodes = getPromotablePHINodes(F);
B2IMap Bool2IntMap;
bool Changed = false;
@@ -199,11 +202,11 @@ class PPCBoolRetToInt : public FunctionPass {
// Presently, we only know how to handle PHINode, Constant, and Arguments.
// Potentially, bitwise operations (AND, OR, XOR, NOT) and sign extension
// could also be handled in the future.
- for (const auto &V : Defs)
+ for (Value *V : Defs)
if (!isa<PHINode>(V) && !isa<Constant>(V) && !isa<Argument>(V))
return false;
- for (const auto &V : Defs)
+ for (Value *V : Defs)
if (const PHINode *P = dyn_cast<PHINode>(V))
if (!PromotablePHINodes.count(P))
return false;
@@ -214,7 +217,7 @@ class PPCBoolRetToInt : public FunctionPass {
++NumBoolCallPromotion;
++NumBoolToIntPromotion;
- for (const auto &V : Defs)
+ for (Value *V : Defs)
if (!BoolToIntMap.count(V))
BoolToIntMap[V] = translate(V);
diff --git a/lib/Target/PowerPC/PPCBranchSelector.cpp b/lib/Target/PowerPC/PPCBranchSelector.cpp
index 73a5305197ad..4d63c5b5703c 100644
--- a/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -46,6 +46,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "PowerPC Branch Selector";
}
@@ -102,10 +107,9 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
}
unsigned BlockSize = 0;
- for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end();
- MBBI != EE; ++MBBI)
- BlockSize += TII->GetInstSizeInBytes(MBBI);
-
+ for (MachineInstr &MI : *MBB)
+ BlockSize += TII->GetInstSizeInBytes(MI);
+
BlockSizes[MBB->getNumber()] = BlockSize;
FuncSize += BlockSize;
}
@@ -151,7 +155,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
Dest = I->getOperand(0).getMBB();
if (!Dest) {
- MBBStartOffset += TII->GetInstSizeInBytes(I);
+ MBBStartOffset += TII->GetInstSizeInBytes(*I);
continue;
}
@@ -234,4 +238,3 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
BlockSizes.clear();
return true;
}
-
diff --git a/lib/Target/PowerPC/PPCCCState.cpp b/lib/Target/PowerPC/PPCCCState.cpp
new file mode 100644
index 000000000000..5510a95430f5
--- /dev/null
+++ b/lib/Target/PowerPC/PPCCCState.cpp
@@ -0,0 +1,36 @@
+//===---- PPCCCState.cpp - CCState with PowerPC specific extensions ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCCCState.h"
+#include "PPCSubtarget.h"
+#include "llvm/IR/Module.h"
+using namespace llvm;
+
+// Identify lowered values that originated from ppcf128 arguments and record
+// this.
+void PPCCCState::PreAnalyzeCallOperands(
+ const SmallVectorImpl<ISD::OutputArg> &Outs) {
+ for (const auto &I : Outs) {
+ if (I.ArgVT == llvm::MVT::ppcf128)
+ OriginalArgWasPPCF128.push_back(true);
+ else
+ OriginalArgWasPPCF128.push_back(false);
+ }
+}
+
+void PPCCCState::PreAnalyzeFormalArguments(
+ const SmallVectorImpl<ISD::InputArg> &Ins) {
+ for (const auto &I : Ins) {
+ if (I.ArgVT == llvm::MVT::ppcf128) {
+ OriginalArgWasPPCF128.push_back(true);
+ } else {
+ OriginalArgWasPPCF128.push_back(false);
+ }
+ }
+} \ No newline at end of file
diff --git a/lib/Target/PowerPC/PPCCCState.h b/lib/Target/PowerPC/PPCCCState.h
new file mode 100644
index 000000000000..9be9f11dbea3
--- /dev/null
+++ b/lib/Target/PowerPC/PPCCCState.h
@@ -0,0 +1,42 @@
+//===---- PPCCCState.h - CCState with PowerPC specific extensions -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PPCCCSTATE_H
+#define PPCCCSTATE_H
+
+#include "PPCISelLowering.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+
+namespace llvm {
+
+class PPCCCState : public CCState {
+public:
+
+ void
+ PreAnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs);
+ void
+ PreAnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins);
+
+private:
+
+ // Records whether the value has been lowered from an ppcf128.
+ SmallVector<bool, 4> OriginalArgWasPPCF128;
+
+public:
+ PPCCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
+ SmallVectorImpl<CCValAssign> &locs, LLVMContext &C)
+ : CCState(CC, isVarArg, MF, locs, C) {}
+
+ bool WasOriginalArgPPCF128(unsigned ValNo) { return OriginalArgWasPPCF128[ValNo]; }
+ void clearWasPPCF128() { OriginalArgWasPPCF128.clear(); }
+};
+}
+
+#endif
diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp
index b6ac4d54d4c7..875226635917 100644
--- a/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -54,9 +54,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#endif
-#include <algorithm>
-#include <vector>
-
using namespace llvm;
#define DEBUG_TYPE "ctrloops"
@@ -169,6 +166,9 @@ FunctionPass *llvm::createPPCCTRLoopsVerify() {
#endif // NDEBUG
bool PPCCTRLoops::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
@@ -245,7 +245,7 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
if (Function *F = CI->getCalledFunction()) {
// Most intrinsics don't become function calls, but some might.
// sin, cos, exp and log are always calls.
- unsigned Opcode;
+ unsigned Opcode = 0;
if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
switch (F->getIntrinsicID()) {
default: continue;
@@ -305,6 +305,8 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
case Intrinsic::rint: Opcode = ISD::FRINT; break;
case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
case Intrinsic::round: Opcode = ISD::FROUND; break;
+ case Intrinsic::minnum: Opcode = ISD::FMINNUM; break;
+ case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break;
}
}
@@ -364,8 +366,18 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
case LibFunc::truncf:
case LibFunc::truncl:
Opcode = ISD::FTRUNC; break;
+ case LibFunc::fmin:
+ case LibFunc::fminf:
+ case LibFunc::fminl:
+ Opcode = ISD::FMINNUM; break;
+ case LibFunc::fmax:
+ case LibFunc::fmaxf:
+ case LibFunc::fmaxl:
+ Opcode = ISD::FMAXNUM; break;
}
+ }
+ if (Opcode) {
auto &DL = CI->getModule()->getDataLayout();
MVT VTy = TLI->getSimpleValueType(DL, CI->getArgOperand(0)->getType(),
true);
@@ -422,6 +434,25 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
return true;
}
+
+ if (TM->getSubtargetImpl(*BB->getParent())->getTargetLowering()->useSoftFloat()) {
+ switch(J->getOpcode()) {
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FCmp:
+ return true;
+ }
+ }
+
for (Value *Operand : J->operands())
if (memAddrUsesCTR(TM, Operand))
return true;
diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td
index 5bc9124f8085..53d2f77ff918 100644
--- a/lib/Target/PowerPC/PPCCallingConv.td
+++ b/lib/Target/PowerPC/PPCCallingConv.td
@@ -23,6 +23,9 @@ class CCIfNotSubtarget<string F, CCAction A>
"(State.getMachineFunction().getSubtarget()).",
F),
A>;
+class CCIfOrigArgWasNotPPCF128<CCAction A>
+ : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
+ A>;
//===----------------------------------------------------------------------===//
// Return Value Calling Convention
@@ -109,7 +112,7 @@ def RetCC_PPC64_ELF_FIS : CallingConv<[
CCIfType<[i8], CCPromoteToType<i64>>,
CCIfType<[i16], CCPromoteToType<i64>>,
CCIfType<[i32], CCPromoteToType<i64>>,
- CCIfType<[i64], CCAssignToReg<[X3, X4]>>,
+ CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
@@ -131,7 +134,14 @@ def CC_PPC32_SVR4_Common : CallingConv<[
// The ABI requires i64 to be passed in two adjacent registers with the first
// register having an odd register number.
- CCIfType<[i32], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>,
+ CCIfType<[i32],
+ CCIfSplit<CCIfSubtarget<"useSoftFloat()",
+ CCIfOrigArgWasNotPPCF128<
+ CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>,
+
+ CCIfType<[i32],
+ CCIfSplit<CCIfNotSubtarget<"useSoftFloat()",
+ CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>,
// The 'nest' parameter, if any, is passed in R11.
CCIfNest<CCAssignToReg<[R11]>>,
@@ -243,12 +253,23 @@ def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20,
F27, F28, F29, F30, F31, CR2, CR3, CR4
)>;
+// CSRs that are handled by prologue, epilogue.
+def CSR_SRV464_TLS_PE : CalleeSavedRegs<(add)>;
+
+def CSR_SVR464_ViaCopy : CalleeSavedRegs<(add CSR_SVR464)>;
+
def CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>;
+def CSR_SVR464_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_Altivec)>;
+
def CSR_SVR464_R2 : CalleeSavedRegs<(add CSR_SVR464, X2)>;
+def CSR_SVR464_R2_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2)>;
+
def CSR_SVR464_R2_Altivec : CalleeSavedRegs<(add CSR_SVR464_Altivec, X2)>;
+def CSR_SVR464_R2_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2_Altivec)>;
+
def CSR_NoRegs : CalleeSavedRegs<(add)>;
def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10),
diff --git a/lib/Target/PowerPC/PPCEarlyReturn.cpp b/lib/Target/PowerPC/PPCEarlyReturn.cpp
index 7cb1bb54c725..fcd2f50e1e3d 100644
--- a/lib/Target/PowerPC/PPCEarlyReturn.cpp
+++ b/lib/Target/PowerPC/PPCEarlyReturn.cpp
@@ -12,10 +12,10 @@
//
//===----------------------------------------------------------------------===//
-#include "PPCInstrInfo.h"
-#include "MCTargetDesc/PPCPredicates.h"
#include "PPC.h"
+#include "MCTargetDesc/PPCPredicates.h"
#include "PPCInstrBuilder.h"
+#include "PPCInstrInfo.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
@@ -26,7 +26,6 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
@@ -84,7 +83,7 @@ protected:
// This is an unconditional branch to the return. Replace the
// branch with a blr.
BuildMI(**PI, J, J->getDebugLoc(), TII->get(I->getOpcode()))
- .copyImplicitOps(I);
+ .copyImplicitOps(*I);
MachineBasicBlock::iterator K = J--;
K->eraseFromParent();
BlockChanged = true;
@@ -98,7 +97,7 @@ protected:
BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCCLR))
.addImm(J->getOperand(0).getImm())
.addReg(J->getOperand(1).getReg())
- .copyImplicitOps(I);
+ .copyImplicitOps(*I);
MachineBasicBlock::iterator K = J--;
K->eraseFromParent();
BlockChanged = true;
@@ -113,7 +112,7 @@ protected:
**PI, J, J->getDebugLoc(),
TII->get(J->getOpcode() == PPC::BC ? PPC::BCLR : PPC::BCLRn))
.addReg(J->getOperand(0).getReg())
- .copyImplicitOps(I);
+ .copyImplicitOps(*I);
MachineBasicBlock::iterator K = J--;
K->eraseFromParent();
BlockChanged = true;
@@ -174,6 +173,9 @@ protected:
public:
bool runOnMachineFunction(MachineFunction &MF) override {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
TII = MF.getSubtarget().getInstrInfo();
bool Changed = false;
@@ -192,6 +194,11 @@ public:
return Changed;
}
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
void getAnalysisUsage(AnalysisUsage &AU) const override {
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -204,4 +211,3 @@ INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE,
char PPCEarlyReturn::ID = 0;
FunctionPass*
llvm::createPPCEarlyReturnPass() { return new PPCEarlyReturn(); }
-
diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp
index 16dcd468c91d..7e92042d2f96 100644
--- a/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/lib/Target/PowerPC/PPCFastISel.cpp
@@ -16,6 +16,7 @@
#include "PPC.h"
#include "MCTargetDesc/PPCPredicates.h"
#include "PPCCallingConv.h"
+#include "PPCCCState.h"
#include "PPCISelLowering.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCSubtarget.h"
@@ -158,7 +159,7 @@ class PPCFastISel final : public FastISel {
unsigned FP64LoadOpc = PPC::LFD);
bool PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr);
bool PPCComputeAddress(const Value *Obj, Address &Addr);
- void PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
+ void PPCSimplifyAddress(Address &Addr, bool &UseOffset,
unsigned &IndexReg);
bool PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
unsigned DestReg, bool IsZExt);
@@ -185,7 +186,7 @@ class PPCFastISel final : public FastISel {
unsigned &NumBytes,
bool IsVarArg);
bool finishCall(MVT RetVT, CallLoweringInfo &CLI, unsigned &NumBytes);
- CCAssignFn *usePPC32CCs(unsigned Flag);
+ LLVM_ATTRIBUTE_UNUSED CCAssignFn *usePPC32CCs(unsigned Flag);
private:
#include "PPCGenFastISel.inc"
@@ -196,7 +197,7 @@ class PPCFastISel final : public FastISel {
#include "PPCGenCallingConv.inc"
-// Function whose sole purpose is to kill compiler warnings
+// Function whose sole purpose is to kill compiler warnings
// stemming from unused functions included from PPCGenCallingConv.inc.
CCAssignFn *PPCFastISel::usePPC32CCs(unsigned Flag) {
if (Flag == 1)
@@ -213,13 +214,29 @@ static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
switch (Pred) {
// These are not representable with any single compare.
case CmpInst::FCMP_FALSE:
+ case CmpInst::FCMP_TRUE:
+ // Major concern about the following 6 cases is NaN result. The comparison
+ // result consists of 4 bits, indicating lt, eq, gt and un (unordered),
+ // only one of which will be set. The result is generated by fcmpu
+ // instruction. However, bc instruction only inspects one of the first 3
+ // bits, so when un is set, bc instruction may jump to to an undesired
+ // place.
+ //
+ // More specifically, if we expect an unordered comparison and un is set, we
+ // expect to always go to true branch; in such case UEQ, UGT and ULT still
+ // give false, which are undesired; but UNE, UGE, ULE happen to give true,
+ // since they are tested by inspecting !eq, !lt, !gt, respectively.
+ //
+ // Similarly, for ordered comparison, when un is set, we always expect the
+ // result to be false. In such case OGT, OLT and OEQ is good, since they are
+ // actually testing GT, LT, and EQ respectively, which are false. OGE, OLE
+ // and ONE are tested through !lt, !gt and !eq, and these are true.
case CmpInst::FCMP_UEQ:
case CmpInst::FCMP_UGT:
- case CmpInst::FCMP_UGE:
case CmpInst::FCMP_ULT:
- case CmpInst::FCMP_ULE:
- case CmpInst::FCMP_UNE:
- case CmpInst::FCMP_TRUE:
+ case CmpInst::FCMP_OGE:
+ case CmpInst::FCMP_OLE:
+ case CmpInst::FCMP_ONE:
default:
return Optional<PPC::Predicate>();
@@ -232,7 +249,7 @@ static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
case CmpInst::ICMP_SGT:
return PPC::PRED_GT;
- case CmpInst::FCMP_OGE:
+ case CmpInst::FCMP_UGE:
case CmpInst::ICMP_UGE:
case CmpInst::ICMP_SGE:
return PPC::PRED_GE;
@@ -242,12 +259,12 @@ static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
case CmpInst::ICMP_SLT:
return PPC::PRED_LT;
- case CmpInst::FCMP_OLE:
+ case CmpInst::FCMP_ULE:
case CmpInst::ICMP_ULE:
case CmpInst::ICMP_SLE:
return PPC::PRED_LE;
- case CmpInst::FCMP_ONE:
+ case CmpInst::FCMP_UNE:
case CmpInst::ICMP_NE:
return PPC::PRED_NE;
@@ -412,7 +429,7 @@ bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
// Fix up some addresses that can't be used directly. For example, if
// an offset won't fit in an instruction field, we may need to move it
// into an index register.
-void PPCFastISel::PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
+void PPCFastISel::PPCSimplifyAddress(Address &Addr, bool &UseOffset,
unsigned &IndexReg) {
// Check whether the offset fits in the instruction field.
@@ -431,8 +448,7 @@ void PPCFastISel::PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
}
if (!UseOffset) {
- IntegerType *OffsetTy = ((VT == MVT::i32) ? Type::getInt32Ty(*Context)
- : Type::getInt64Ty(*Context));
+ IntegerType *OffsetTy = Type::getInt64Ty(*Context);
const ConstantInt *Offset =
ConstantInt::getSigned(OffsetTy, (int64_t)(Addr.Offset));
IndexReg = PPCMaterializeInt(Offset, MVT::i64);
@@ -501,7 +517,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// If necessary, materialize the offset into a register and use
// the indexed form. Also handle stack pointers with special needs.
unsigned IndexReg = 0;
- PPCSimplifyAddress(Addr, VT, UseOffset, IndexReg);
+ PPCSimplifyAddress(Addr, UseOffset, IndexReg);
// If this is a potential VSX load with an offset of 0, a VSX indexed load can
// be used.
@@ -637,7 +653,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
// If necessary, materialize the offset into a register and use
// the indexed form. Also handle stack pointers with special needs.
unsigned IndexReg = 0;
- PPCSimplifyAddress(Addr, VT, UseOffset, IndexReg);
+ PPCSimplifyAddress(Addr, UseOffset, IndexReg);
// If this is a potential VSX store with an offset of 0, a VSX indexed store
// can be used.
@@ -1068,10 +1084,10 @@ unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT,
if (!PPCEmitStore(MVT::f64, SrcReg, Addr))
return 0;
- // Reload it into a GPR. If we want an i32, modify the address
- // to have a 4-byte offset so we load from the right place.
+ // Reload it into a GPR. If we want an i32 on big endian, modify the
+ // address to have a 4-byte offset so we load from the right place.
if (VT == MVT::i32)
- Addr.Offset = 4;
+ Addr.Offset = (PPCSubTarget->isLittleEndian()) ? 0 : 4;
// Look at the currently assigned register for this instruction
// to determine the required register class.
@@ -1115,14 +1131,13 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
return false;
// Convert f32 to f64 if necessary. This is just a meaningless copy
- // to get the register class right. COPY_TO_REGCLASS is needed since
- // a COPY from F4RC to F8RC is converted to a F4RC-F4RC copy downstream.
+ // to get the register class right.
const TargetRegisterClass *InRC = MRI.getRegClass(SrcReg);
if (InRC == &PPC::F4RCRegClass) {
unsigned TmpReg = createResultReg(&PPC::F8RCRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::COPY_TO_REGCLASS), TmpReg)
- .addReg(SrcReg).addImm(PPC::F8RCRegClassID);
+ TII.get(TargetOpcode::COPY), TmpReg)
+ .addReg(SrcReg);
SrcReg = TmpReg;
}
@@ -1583,6 +1598,9 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
if (!FuncInfo.CanLowerReturn)
return false;
+ if (TLI.supportSplitCSR(FuncInfo.MF))
+ return false;
+
const ReturnInst *Ret = cast<ReturnInst>(I);
const Function &F = *I->getParent()->getParent();
@@ -2071,7 +2089,6 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
return TmpReg3;
}
-
// Materialize an integer constant into a register, and return
// the register number (or zero if we failed to handle it).
unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT,
@@ -2085,12 +2102,12 @@ unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT,
return ImmReg;
}
- if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
- VT != MVT::i8 && VT != MVT::i1)
+ if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 &&
+ VT != MVT::i1)
return 0;
- const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass :
- &PPC::GPRCRegClass);
+ const TargetRegisterClass *RC =
+ ((VT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass);
int64_t Imm = UseSExt ? CI->getSExtValue() : CI->getZExtValue();
// If the constant is in range, use a load-immediate.
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index 3fd509ae27f4..c480430dd29a 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -76,9 +76,7 @@ static unsigned computeBasePointerSaveOffset(const PPCSubtarget &STI) {
// SVR4 ABI: First slot in the general register save area.
return STI.isPPC64()
? -16U
- : (STI.getTargetMachine().getRelocationModel() == Reloc::PIC_)
- ? -12U
- : -8U;
+ : STI.getTargetMachine().isPositionIndependent() ? -12U : -8U;
}
PPCFrameLowering::PPCFrameLowering(const PPCSubtarget &STI)
@@ -596,7 +594,7 @@ PPCFrameLowering::findScratchRegister(MachineBasicBlock *MBB,
(!UseAtEnd && (&MBB->getParent()->front() == MBB)))
return true;
- RS.enterBasicBlock(MBB);
+ RS.enterBasicBlock(*MBB);
if (UseAtEnd && !MBB->empty()) {
// The scratch register will be used at the end of the block, so must
@@ -653,7 +651,7 @@ PPCFrameLowering::findScratchRegister(MachineBasicBlock *MBB,
// Now that we've done our best to provide both registers, double check
// whether we were unable to provide enough.
- if (BV.count() < (TwoUniqueRegsRequired ? 2 : 1))
+ if (BV.count() < (TwoUniqueRegsRequired ? 2U : 1U))
return false;
return true;
@@ -838,13 +836,20 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
// If we need to spill the CR and the LR but we don't have two separate
// registers available, we must spill them one at a time
if (MustSaveCR && SingleScratchReg && MustSaveLR) {
- // FIXME: In the ELFv2 ABI, we are not required to save all CR fields.
- // If only one or two CR fields are clobbered, it could be more
- // efficient to use mfocrf to selectively save just those fields.
+ // In the ELFv2 ABI, we are not required to save all CR fields.
+ // If only one or two CR fields are clobbered, it is more efficient to use
+ // mfocrf to selectively save just those fields, because mfocrf has short
+ // latency compares to mfcr.
+ unsigned MfcrOpcode = PPC::MFCR8;
+ unsigned CrState = RegState::ImplicitKill;
+ if (isELFv2ABI && MustSaveCRs.size() == 1) {
+ MfcrOpcode = PPC::MFOCRF8;
+ CrState = RegState::Kill;
+ }
MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MFCR8), TempReg);
+ BuildMI(MBB, MBBI, dl, TII.get(MfcrOpcode), TempReg);
for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
- MIB.addReg(MustSaveCRs[i], RegState::ImplicitKill);
+ MIB.addReg(MustSaveCRs[i], CrState);
BuildMI(MBB, MBBI, dl, TII.get(PPC::STW8))
.addReg(TempReg, getKillRegState(true))
.addImm(8)
@@ -856,13 +861,20 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
if (MustSaveCR &&
!(SingleScratchReg && MustSaveLR)) { // will only occur for PPC64
- // FIXME: In the ELFv2 ABI, we are not required to save all CR fields.
- // If only one or two CR fields are clobbered, it could be more
- // efficient to use mfocrf to selectively save just those fields.
+ // In the ELFv2 ABI, we are not required to save all CR fields.
+ // If only one or two CR fields are clobbered, it is more efficient to use
+ // mfocrf to selectively save just those fields, because mfocrf has short
+ // latency compares to mfcr.
+ unsigned MfcrOpcode = PPC::MFCR8;
+ unsigned CrState = RegState::ImplicitKill;
+ if (isELFv2ABI && MustSaveCRs.size() == 1) {
+ MfcrOpcode = PPC::MFOCRF8;
+ CrState = RegState::Kill;
+ }
MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, dl, TII.get(PPC::MFCR8), TempReg);
+ BuildMI(MBB, MBBI, dl, TII.get(MfcrOpcode), TempReg);
for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
- MIB.addReg(MustSaveCRs[i], RegState::ImplicitKill);
+ MIB.addReg(MustSaveCRs[i], CrState);
}
if (HasFP)
@@ -889,7 +901,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
if (MustSaveLR)
// FIXME: On PPC32 SVR4, we must not spill before claiming the stackframe.
BuildMI(MBB, MBBI, dl, StoreInst)
- .addReg(ScratchReg)
+ .addReg(ScratchReg, getKillRegState(true))
.addImm(LROffset)
.addReg(SPReg);
@@ -1315,36 +1327,53 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
.addReg(FPReg)
.addReg(ScratchReg);
}
- } else if (RetOpcode == PPC::TCRETURNdi) {
- MBBI = MBB.getLastNonDebugInstr();
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
- addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
- } else if (RetOpcode == PPC::TCRETURNri) {
- MBBI = MBB.getLastNonDebugInstr();
- assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR));
- } else if (RetOpcode == PPC::TCRETURNai) {
- MBBI = MBB.getLastNonDebugInstr();
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
- } else if (RetOpcode == PPC::TCRETURNdi8) {
- MBBI = MBB.getLastNonDebugInstr();
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
- addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
- } else if (RetOpcode == PPC::TCRETURNri8) {
- MBBI = MBB.getLastNonDebugInstr();
- assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR8));
- } else if (RetOpcode == PPC::TCRETURNai8) {
- MBBI = MBB.getLastNonDebugInstr();
- MachineOperand &JumpTarget = MBBI->getOperand(0);
- BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
+ } else {
+ createTailCallBranchInstr(MBB);
}
}
}
+void PPCFrameLowering::createTailCallBranchInstr(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
+ DebugLoc dl;
+
+ if (MBBI != MBB.end())
+ dl = MBBI->getDebugLoc();
+
+ const PPCInstrInfo &TII =
+ *static_cast<const PPCInstrInfo *>(Subtarget.getInstrInfo());
+
+ // Create branch instruction for pseudo tail call return instruction
+ unsigned RetOpcode = MBBI->getOpcode();
+ if (RetOpcode == PPC::TCRETURNdi) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
+ addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
+ } else if (RetOpcode == PPC::TCRETURNri) {
+ MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR));
+ } else if (RetOpcode == PPC::TCRETURNai) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
+ } else if (RetOpcode == PPC::TCRETURNdi8) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
+ addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
+ } else if (RetOpcode == PPC::TCRETURNri8) {
+ MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR8));
+ } else if (RetOpcode == PPC::TCRETURNai8) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
+ }
+}
+
void PPCFrameLowering::determineCalleeSaves(MachineFunction &MF,
BitVector &SavedRegs,
RegScavenger *RS) const {
@@ -1421,6 +1450,18 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
MachineFrameInfo *FFI = MF.getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = FFI->getCalleeSavedInfo();
+ // If the function is shrink-wrapped, and if the function has a tail call, the
+ // tail call might not be in the new RestoreBlock, so real branch instruction
+ // won't be generated by emitEpilogue(), because shrink-wrap has chosen new
+ // RestoreBlock. So we handle this case here.
+ if (FFI->getSavePoint() && FFI->hasTailCall()) {
+ MachineBasicBlock *RestoreBlock = FFI->getRestorePoint();
+ for (MachineBasicBlock &MBB : MF) {
+ if (MBB.isReturnBlock() && (&MBB) != RestoreBlock)
+ createTailCallBranchInstr(MBB);
+ }
+ }
+
// Early exit if no callee saved registers are modified!
if (CSI.empty() && !needsFP(MF)) {
addScavengingSpillSlot(MF, RS);
@@ -1770,7 +1811,7 @@ restoreCRs(bool isPPC64, bool is31,
.addReg(MoveReg, getKillRegState(true)));
}
-void PPCFrameLowering::
+MachineBasicBlock::iterator PPCFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
@@ -1787,7 +1828,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
unsigned LISInstr = is64Bit ? PPC::LIS8 : PPC::LIS;
unsigned ORIInstr = is64Bit ? PPC::ORI8 : PPC::ORI;
MachineInstr *MI = I;
- DebugLoc dl = MI->getDebugLoc();
+ const DebugLoc &dl = MI->getDebugLoc();
if (isInt<16>(CalleeAmt)) {
BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg)
@@ -1807,7 +1848,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
}
}
// Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
- MBB.erase(I);
+ return MBB.erase(I);
}
bool
diff --git a/lib/Target/PowerPC/PPCFrameLowering.h b/lib/Target/PowerPC/PPCFrameLowering.h
index f1f3f0b831a7..28b0c57f0ffb 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.h
+++ b/lib/Target/PowerPC/PPCFrameLowering.h
@@ -66,6 +66,13 @@ class PPCFrameLowering: public TargetFrameLowering {
unsigned *SR2 = nullptr) const;
bool twoUniqueScratchRegsRequired(MachineBasicBlock *MBB) const;
+ /**
+ * \brief Create branch instruction for PPC::TCRETURN* (tail call return)
+ *
+ * \param[in] MBB that is terminated by PPC::TCRETURN*
+ */
+ void createTailCallBranchInstr(MachineBasicBlock &MBB) const;
+
public:
PPCFrameLowering(const PPCSubtarget &STI);
@@ -93,9 +100,9 @@ public:
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index 7234e30fa73e..caab67d68b17 100644
--- a/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -162,8 +162,9 @@ unsigned PPCDispatchGroupSBHazardRecognizer::PreEmitNoops(SUnit *SU) {
unsigned Directive =
DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
// If we're using a special group-terminating nop, then we need only one.
+ // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
if (Directive == PPC::DIR_PWR6 || Directive == PPC::DIR_PWR7 ||
- Directive == PPC::DIR_PWR8 )
+ Directive == PPC::DIR_PWR8 || Directive == PPC::DIR_PWR9)
return 1;
return 5 - CurSlots;
@@ -223,8 +224,10 @@ void PPCDispatchGroupSBHazardRecognizer::EmitNoop() {
DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
// If the group has now filled all of its slots, or if we're using a special
// group-terminating nop, the group is complete.
+ // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
if (Directive == PPC::DIR_PWR6 || Directive == PPC::DIR_PWR7 ||
- Directive == PPC::DIR_PWR8 || CurSlots == 6) {
+ Directive == PPC::DIR_PWR8 || Directive == PPC::DIR_PWR8 ||
+ CurSlots == 6) {
CurGroup.clear();
CurSlots = CurBranches = 0;
} else {
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 1eaa8118ba0a..0e9b2daa0cb5 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -59,10 +59,6 @@ static cl::opt<bool> EnableBranchHint(
cl::desc("Enable static hinting of branches on ppc"),
cl::Hidden);
-namespace llvm {
- void initializePPCDAGToDAGISelPass(PassRegistry&);
-}
-
namespace {
//===--------------------------------------------------------------------===//
/// PPCDAGToDAGISel - PPC specific code to select PPC machine
@@ -75,9 +71,7 @@ namespace {
unsigned GlobalBaseReg;
public:
explicit PPCDAGToDAGISel(PPCTargetMachine &tm)
- : SelectionDAGISel(tm), TM(tm) {
- initializePPCDAGToDAGISelPass(*PassRegistry::getPassRegistry());
- }
+ : SelectionDAGISel(tm), TM(tm) {}
bool runOnMachineFunction(MachineFunction &MF) override {
// Make sure we re-emit a set of the global base reg if necessary
@@ -97,18 +91,18 @@ namespace {
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
- inline SDValue getI32Imm(unsigned Imm, SDLoc dl) {
+ inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
/// getI64Imm - Return a target constant with the specified value, of type
/// i64.
- inline SDValue getI64Imm(uint64_t Imm, SDLoc dl) {
+ inline SDValue getI64Imm(uint64_t Imm, const SDLoc &dl) {
return CurDAG->getTargetConstant(Imm, dl, MVT::i64);
}
/// getSmallIPtrImm - Return a target constant of pointer type.
- inline SDValue getSmallIPtrImm(unsigned Imm, SDLoc dl) {
+ inline SDValue getSmallIPtrImm(unsigned Imm, const SDLoc &dl) {
return CurDAG->getTargetConstant(
Imm, dl, PPCLowering->getPointerTy(CurDAG->getDataLayout()));
}
@@ -122,18 +116,19 @@ namespace {
/// base register. Return the virtual register that holds this value.
SDNode *getGlobalBaseReg();
- SDNode *getFrameIndex(SDNode *SN, SDNode *N, unsigned Offset = 0);
+ void selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset = 0);
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
- SDNode *Select(SDNode *N) override;
+ void Select(SDNode *N) override;
- SDNode *SelectBitfieldInsert(SDNode *N);
- SDNode *SelectBitPermutation(SDNode *N);
+ bool tryBitfieldInsert(SDNode *N);
+ bool tryBitPermutation(SDNode *N);
/// SelectCC - Select a comparison of the specified values with the
/// specified condition code, returning the CR# of the expression.
- SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDLoc dl);
+ SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
+ const SDLoc &dl);
/// SelectAddrImm - Returns true if the address N can be represented by
/// a base register plus a signed 16-bit displacement [r+imm].
@@ -228,7 +223,7 @@ namespace {
#include "PPCGenDAGISel.inc"
private:
- SDNode *SelectSETCC(SDNode *N);
+ bool trySETCC(SDNode *N);
void PeepholePPC64();
void PeepholePPC64ZExt();
@@ -240,7 +235,7 @@ private:
bool AllUsersSelectZero(SDNode *N);
void SwapAllSelectUsers(SDNode *N);
- SDNode *transferMemOperands(SDNode *N, SDNode *Result);
+ void transferMemOperands(SDNode *N, SDNode *Result);
};
}
@@ -324,7 +319,7 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) == MVT::i32) {
if (PPCSubTarget->isTargetELF()) {
GlobalBaseReg = PPC::R30;
- if (M->getPICLevel() == PICLevel::Small) {
+ if (M->getPICLevel() == PICLevel::SmallPIC) {
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MoveGOTtoLR));
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true);
@@ -458,16 +453,17 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
&& isInt32Immediate(N->getOperand(1).getNode(), Imm);
}
-SDNode *PPCDAGToDAGISel::getFrameIndex(SDNode *SN, SDNode *N, unsigned Offset) {
+void PPCDAGToDAGISel::selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset) {
SDLoc dl(SN);
int FI = cast<FrameIndexSDNode>(N)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0));
unsigned Opc = N->getValueType(0) == MVT::i32 ? PPC::ADDI : PPC::ADDI8;
if (SN->hasOneUse())
- return CurDAG->SelectNodeTo(SN, Opc, N->getValueType(0), TFI,
- getSmallIPtrImm(Offset, dl));
- return CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI,
- getSmallIPtrImm(Offset, dl));
+ CurDAG->SelectNodeTo(SN, Opc, N->getValueType(0), TFI,
+ getSmallIPtrImm(Offset, dl));
+ else
+ ReplaceNode(SN, CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI,
+ getSmallIPtrImm(Offset, dl)));
}
bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
@@ -512,9 +508,9 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
return false;
}
-/// SelectBitfieldInsert - turn an or of two masked values into
-/// the rotate left word immediate then mask insert (rlwimi) instruction.
-SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
+/// Turn an or of two masked values into the rotate left word immediate then
+/// mask insert (rlwimi) instruction.
+bool PPCDAGToDAGISel::tryBitfieldInsert(SDNode *N) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDLoc dl(N);
@@ -584,15 +580,16 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
SH &= 31;
SDValue Ops[] = { Op0, Op1, getI32Imm(SH, dl), getI32Imm(MB, dl),
getI32Imm(ME, dl) };
- return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops));
+ return true;
}
}
- return nullptr;
+ return false;
}
// Predict the number of instructions that would be generated by calling
-// SelectInt64(N).
-static unsigned SelectInt64CountDirect(int64_t Imm) {
+// getInt64(N).
+static unsigned getInt64CountDirect(int64_t Imm) {
// Assume no remaining bits.
unsigned Remainder = 0;
// Assume no shift required.
@@ -653,17 +650,17 @@ static uint64_t Rot64(uint64_t Imm, unsigned R) {
return (Imm << R) | (Imm >> (64 - R));
}
-static unsigned SelectInt64Count(int64_t Imm) {
- unsigned Count = SelectInt64CountDirect(Imm);
+static unsigned getInt64Count(int64_t Imm) {
+ unsigned Count = getInt64CountDirect(Imm);
if (Count == 1)
return Count;
for (unsigned r = 1; r < 63; ++r) {
uint64_t RImm = Rot64(Imm, r);
- unsigned RCount = SelectInt64CountDirect(RImm) + 1;
+ unsigned RCount = getInt64CountDirect(RImm) + 1;
Count = std::min(Count, RCount);
- // See comments in SelectInt64 for an explanation of the logic below.
+ // See comments in getInt64 for an explanation of the logic below.
unsigned LS = findLastSet(RImm);
if (LS != r-1)
continue;
@@ -671,16 +668,17 @@ static unsigned SelectInt64Count(int64_t Imm) {
uint64_t OnesMask = -(int64_t) (UINT64_C(1) << (LS+1));
uint64_t RImmWithOnes = RImm | OnesMask;
- RCount = SelectInt64CountDirect(RImmWithOnes) + 1;
+ RCount = getInt64CountDirect(RImmWithOnes) + 1;
Count = std::min(Count, RCount);
}
return Count;
}
-// Select a 64-bit constant. For cost-modeling purposes, SelectInt64Count
+// Select a 64-bit constant. For cost-modeling purposes, getInt64Count
// (above) needs to be kept in sync with this function.
-static SDNode *SelectInt64Direct(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
+static SDNode *getInt64Direct(SelectionDAG *CurDAG, const SDLoc &dl,
+ int64_t Imm) {
// Assume no remaining bits.
unsigned Remainder = 0;
// Assume no shift required.
@@ -754,10 +752,10 @@ static SDNode *SelectInt64Direct(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
return Result;
}
-static SDNode *SelectInt64(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
- unsigned Count = SelectInt64CountDirect(Imm);
+static SDNode *getInt64(SelectionDAG *CurDAG, const SDLoc &dl, int64_t Imm) {
+ unsigned Count = getInt64CountDirect(Imm);
if (Count == 1)
- return SelectInt64Direct(CurDAG, dl, Imm);
+ return getInt64Direct(CurDAG, dl, Imm);
unsigned RMin = 0;
@@ -766,7 +764,7 @@ static SDNode *SelectInt64(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
for (unsigned r = 1; r < 63; ++r) {
uint64_t RImm = Rot64(Imm, r);
- unsigned RCount = SelectInt64CountDirect(RImm) + 1;
+ unsigned RCount = getInt64CountDirect(RImm) + 1;
if (RCount < Count) {
Count = RCount;
RMin = r;
@@ -789,7 +787,7 @@ static SDNode *SelectInt64(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
uint64_t OnesMask = -(int64_t) (UINT64_C(1) << (LS+1));
uint64_t RImmWithOnes = RImm | OnesMask;
- RCount = SelectInt64CountDirect(RImmWithOnes) + 1;
+ RCount = getInt64CountDirect(RImmWithOnes) + 1;
if (RCount < Count) {
Count = RCount;
RMin = r;
@@ -799,24 +797,24 @@ static SDNode *SelectInt64(SelectionDAG *CurDAG, SDLoc dl, int64_t Imm) {
}
if (!RMin)
- return SelectInt64Direct(CurDAG, dl, Imm);
+ return getInt64Direct(CurDAG, dl, Imm);
auto getI32Imm = [CurDAG, dl](unsigned Imm) {
return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
};
- SDValue Val = SDValue(SelectInt64Direct(CurDAG, dl, MatImm), 0);
+ SDValue Val = SDValue(getInt64Direct(CurDAG, dl, MatImm), 0);
return CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Val,
getI32Imm(64 - RMin), getI32Imm(MaskEnd));
}
// Select a 64-bit constant.
-static SDNode *SelectInt64(SelectionDAG *CurDAG, SDNode *N) {
+static SDNode *getInt64(SelectionDAG *CurDAG, SDNode *N) {
SDLoc dl(N);
// Get 64 bit value.
int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
- return SelectInt64(CurDAG, dl, Imm);
+ return getInt64(CurDAG, dl, Imm);
}
namespace {
@@ -1209,7 +1207,7 @@ class BitPermutationSelector {
"bit group ends at index 63 but there is another?");
auto IN = BitGroups.begin();
- if (IP->Repl32 && IN->Repl32 && I->V == IP->V && I->V == IN->V &&
+ if (IP->Repl32 && IN->Repl32 && I->V == IP->V && I->V == IN->V &&
(I->RLAmt % 32) == IP->RLAmt && (I->RLAmt % 32) == IN->RLAmt &&
IP->EndIdx == 31 && IN->StartIdx == 0 && I != IP &&
IsAllLow32(*I)) {
@@ -1252,7 +1250,7 @@ class BitPermutationSelector {
}
}
- SDValue getI32Imm(unsigned Imm, SDLoc dl) {
+ SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
@@ -1270,7 +1268,7 @@ class BitPermutationSelector {
// Depending on the number of groups for a particular value, it might be
// better to rotate, mask explicitly (using andi/andis), and then or the
// result. Select this part of the result first.
- void SelectAndParts32(SDLoc dl, SDValue &Res, unsigned *InstCnt) {
+ void SelectAndParts32(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) {
if (BPermRewriterNoMasking)
return;
@@ -1466,8 +1464,8 @@ class BitPermutationSelector {
// For 64-bit values, not all combinations of rotates and masks are
// available. Produce one if it is available.
- SDValue SelectRotMask64(SDValue V, SDLoc dl, unsigned RLAmt, bool Repl32,
- unsigned MaskStart, unsigned MaskEnd,
+ SDValue SelectRotMask64(SDValue V, const SDLoc &dl, unsigned RLAmt,
+ bool Repl32, unsigned MaskStart, unsigned MaskEnd,
unsigned *InstCnt = nullptr) {
// In the notation used by the instructions, 'start' and 'end' are reversed
// because bits are counted from high to low order.
@@ -1527,8 +1525,8 @@ class BitPermutationSelector {
// For 64-bit values, not all combinations of rotates and masks are
// available. Produce a rotate-mask-and-insert if one is available.
- SDValue SelectRotMaskIns64(SDValue Base, SDValue V, SDLoc dl, unsigned RLAmt,
- bool Repl32, unsigned MaskStart,
+ SDValue SelectRotMaskIns64(SDValue Base, SDValue V, const SDLoc &dl,
+ unsigned RLAmt, bool Repl32, unsigned MaskStart,
unsigned MaskEnd, unsigned *InstCnt = nullptr) {
// In the notation used by the instructions, 'start' and 'end' are reversed
// because bits are counted from high to low order.
@@ -1574,7 +1572,7 @@ class BitPermutationSelector {
return SelectRotMaskIns64(Base, V, dl, RLAmt2, false, MaskStart, MaskEnd);
}
- void SelectAndParts64(SDLoc dl, SDValue &Res, unsigned *InstCnt) {
+ void SelectAndParts64(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) {
if (BPermRewriterNoMasking)
return;
@@ -1646,7 +1644,7 @@ class BitPermutationSelector {
NumAndInsts += (unsigned) (ANDIMask != 0) + (unsigned) (ANDISMask != 0) +
(unsigned) (ANDIMask != 0 && ANDISMask != 0);
else
- NumAndInsts += SelectInt64Count(Mask) + /* and */ 1;
+ NumAndInsts += getInt64Count(Mask) + /* and */ 1;
unsigned NumRLInsts = 0;
bool FirstBG = true;
@@ -1709,7 +1707,7 @@ class BitPermutationSelector {
TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
ANDIVal, ANDISVal), 0);
} else {
- TotalVal = SDValue(SelectInt64(CurDAG, dl, Mask), 0);
+ TotalVal = SDValue(getInt64(CurDAG, dl, Mask), 0);
TotalVal =
SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64,
VRot, TotalVal), 0);
@@ -1852,9 +1850,9 @@ class BitPermutationSelector {
Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64,
ANDIVal, ANDISVal), 0);
} else {
- if (InstCnt) *InstCnt += SelectInt64Count(Mask) + /* and */ 1;
+ if (InstCnt) *InstCnt += getInt64Count(Mask) + /* and */ 1;
- SDValue MaskVal = SDValue(SelectInt64(CurDAG, dl, Mask), 0);
+ SDValue MaskVal = SDValue(getInt64(CurDAG, dl, Mask), 0);
Res =
SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64,
Res, MaskVal), 0);
@@ -1955,13 +1953,13 @@ public:
};
} // anonymous namespace
-SDNode *PPCDAGToDAGISel::SelectBitPermutation(SDNode *N) {
+bool PPCDAGToDAGISel::tryBitPermutation(SDNode *N) {
if (N->getValueType(0) != MVT::i32 &&
N->getValueType(0) != MVT::i64)
- return nullptr;
+ return false;
if (!UseBitPermRewriter)
- return nullptr;
+ return false;
switch (N->getOpcode()) {
default: break;
@@ -1971,17 +1969,21 @@ SDNode *PPCDAGToDAGISel::SelectBitPermutation(SDNode *N) {
case ISD::AND:
case ISD::OR: {
BitPermutationSelector BPS(CurDAG);
- return BPS.Select(N);
+ if (SDNode *New = BPS.Select(N)) {
+ ReplaceNode(N, New);
+ return true;
+ }
+ return false;
}
}
- return nullptr;
+ return false;
}
/// SelectCC - Select a comparison of the specified values with the specified
/// condition code, returning the CR# of the expression.
-SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
- ISD::CondCode CC, SDLoc dl) {
+SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
+ const SDLoc &dl) {
// Always select the LHS.
unsigned Opc;
@@ -2255,7 +2257,7 @@ static unsigned int getVCmpInst(MVT VecVT, ISD::CondCode CC,
}
}
-SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
+bool PPCDAGToDAGISel::trySETCC(SDNode *N) {
SDLoc dl(N);
unsigned Imm;
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
@@ -2276,20 +2278,22 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
Op = SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Op), 0);
SDValue Ops[] = { Op, getI32Imm(27, dl), getI32Imm(5, dl),
getI32Imm(31, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return true;
}
case ISD::SETNE: {
if (isPPC64) break;
SDValue AD =
SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
Op, getI32Imm(~0U, dl)), 0);
- return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op,
- AD.getValue(1));
+ CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op, AD.getValue(1));
+ return true;
}
case ISD::SETLT: {
SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl),
getI32Imm(31, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return true;
}
case ISD::SETGT: {
SDValue T =
@@ -2297,7 +2301,8 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
T = SDValue(CurDAG->getMachineNode(PPC::ANDC, dl, MVT::i32, T, Op), 0);
SDValue Ops[] = { T, getI32Imm(1, dl), getI32Imm(31, dl),
getI32Imm(31, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return true;
}
}
} else if (Imm == ~0U) { // setcc op, -1
@@ -2308,18 +2313,20 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
if (isPPC64) break;
Op = SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
Op, getI32Imm(1, dl)), 0);
- return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
- SDValue(CurDAG->getMachineNode(PPC::LI, dl,
- MVT::i32,
- getI32Imm(0, dl)),
- 0), Op.getValue(1));
+ CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
+ SDValue(CurDAG->getMachineNode(PPC::LI, dl,
+ MVT::i32,
+ getI32Imm(0, dl)),
+ 0), Op.getValue(1));
+ return true;
case ISD::SETNE: {
if (isPPC64) break;
Op = SDValue(CurDAG->getMachineNode(PPC::NOR, dl, MVT::i32, Op, Op), 0);
SDNode *AD = CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
Op, getI32Imm(~0U, dl));
- return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0),
- Op, SDValue(AD, 1));
+ CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0), Op,
+ SDValue(AD, 1));
+ return true;
}
case ISD::SETLT: {
SDValue AD = SDValue(CurDAG->getMachineNode(PPC::ADDI, dl, MVT::i32, Op,
@@ -2328,14 +2335,15 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
Op), 0);
SDValue Ops[] = { AN, getI32Imm(1, dl), getI32Imm(31, dl),
getI32Imm(31, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return true;
}
case ISD::SETGT: {
SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl),
getI32Imm(31, dl) };
Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
- return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op,
- getI32Imm(1, dl));
+ CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op, getI32Imm(1, dl));
+ return true;
}
}
}
@@ -2348,7 +2356,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
// vector compare operations return the same type as the operands.
if (LHS.getValueType().isVector()) {
if (PPCSubTarget->hasQPX())
- return nullptr;
+ return false;
EVT VecVT = LHS.getValueType();
bool Swap, Negate;
@@ -2360,16 +2368,17 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
EVT ResVT = VecVT.changeVectorElementTypeToInteger();
if (Negate) {
SDValue VCmp(CurDAG->getMachineNode(VCmpInst, dl, ResVT, LHS, RHS), 0);
- return CurDAG->SelectNodeTo(N, PPCSubTarget->hasVSX() ? PPC::XXLNOR :
- PPC::VNOR,
- ResVT, VCmp, VCmp);
+ CurDAG->SelectNodeTo(N, PPCSubTarget->hasVSX() ? PPC::XXLNOR : PPC::VNOR,
+ ResVT, VCmp, VCmp);
+ return true;
}
- return CurDAG->SelectNodeTo(N, VCmpInst, ResVT, LHS, RHS);
+ CurDAG->SelectNodeTo(N, VCmpInst, ResVT, LHS, RHS);
+ return true;
}
if (PPCSubTarget->useCRBits())
- return nullptr;
+ return false;
bool Inv;
unsigned Idx = getCRIdxForSetCC(CC, Inv);
@@ -2388,31 +2397,33 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
SDValue Ops[] = { IntCR, getI32Imm((32 - (3 - Idx)) & 31, dl),
getI32Imm(31, dl), getI32Imm(31, dl) };
- if (!Inv)
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ if (!Inv) {
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return true;
+ }
// Get the specified bit.
SDValue Tmp =
SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0);
- return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1, dl));
+ CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1, dl));
+ return true;
}
-SDNode *PPCDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
+void PPCDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) {
// Transfer memoperands.
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
- return Result;
}
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
-SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
+void PPCDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return nullptr; // Already selected.
+ return; // Already selected.
}
// In case any misguided DAG-level optimizations form an ADD with a
@@ -2423,39 +2434,44 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
llvm_unreachable("Invalid ADD with TargetConstant operand");
// Try matching complex bit permutations before doing anything else.
- if (SDNode *NN = SelectBitPermutation(N))
- return NN;
+ if (tryBitPermutation(N))
+ return;
switch (N->getOpcode()) {
default: break;
case ISD::Constant: {
- if (N->getValueType(0) == MVT::i64)
- return SelectInt64(CurDAG, N);
+ if (N->getValueType(0) == MVT::i64) {
+ ReplaceNode(N, getInt64(CurDAG, N));
+ return;
+ }
break;
}
case ISD::SETCC: {
- SDNode *SN = SelectSETCC(N);
- if (SN)
- return SN;
+ if (trySETCC(N))
+ return;
break;
}
case PPCISD::GlobalBaseReg:
- return getGlobalBaseReg();
+ ReplaceNode(N, getGlobalBaseReg());
+ return;
case ISD::FrameIndex:
- return getFrameIndex(N, N);
+ selectFrameIndex(N, N);
+ return;
case PPCISD::MFOCRF: {
SDValue InFlag = N->getOperand(1);
- return CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32,
- N->getOperand(0), InFlag);
+ ReplaceNode(N, CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32,
+ N->getOperand(0), InFlag));
+ return;
}
case PPCISD::READ_TIME_BASE: {
- return CurDAG->getMachineNode(PPC::ReadTB, dl, MVT::i32, MVT::i32,
- MVT::Other, N->getOperand(0));
+ ReplaceNode(N, CurDAG->getMachineNode(PPC::ReadTB, dl, MVT::i32, MVT::i32,
+ MVT::Other, N->getOperand(0)));
+ return;
}
case PPCISD::SRA_ADDZE: {
@@ -2468,16 +2484,18 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDNode *Op =
CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, MVT::Glue,
N0, ShiftAmt);
- return CurDAG->SelectNodeTo(N, PPC::ADDZE8, MVT::i64,
- SDValue(Op, 0), SDValue(Op, 1));
+ CurDAG->SelectNodeTo(N, PPC::ADDZE8, MVT::i64, SDValue(Op, 0),
+ SDValue(Op, 1));
+ return;
} else {
assert(N->getValueType(0) == MVT::i32 &&
"Expecting i64 or i32 in PPCISD::SRA_ADDZE");
SDNode *Op =
CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue,
N0, ShiftAmt);
- return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
- SDValue(Op, 0), SDValue(Op, 1));
+ CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, SDValue(Op, 0),
+ SDValue(Op, 1));
+ return;
}
}
@@ -2524,11 +2542,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Offset, Base, Chain };
- return transferMemOperands(
- N, CurDAG->getMachineNode(
- Opcode, dl, LD->getValueType(0),
- PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other,
- Ops));
+ SDNode *MN = CurDAG->getMachineNode(
+ Opcode, dl, LD->getValueType(0),
+ PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other, Ops);
+ transferMemOperands(N, MN);
+ ReplaceNode(N, MN);
+ return;
} else {
unsigned Opcode;
bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
@@ -2563,11 +2582,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Base, Offset, Chain };
- return transferMemOperands(
- N, CurDAG->getMachineNode(
- Opcode, dl, LD->getValueType(0),
- PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other,
- Ops));
+ SDNode *MN = CurDAG->getMachineNode(
+ Opcode, dl, LD->getValueType(0),
+ PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other, Ops);
+ transferMemOperands(N, MN);
+ ReplaceNode(N, MN);
+ return;
}
}
@@ -2582,7 +2602,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Val = N->getOperand(0).getOperand(0);
SDValue Ops[] = { Val, getI32Imm(SH, dl), getI32Imm(MB, dl),
getI32Imm(ME, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return;
}
// If this is just a masked value where the input is not handled above, and
// is not a rotate-left (handled by a pattern in the .td file), emit rlwinm
@@ -2592,7 +2613,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Val = N->getOperand(0);
SDValue Ops[] = { Val, getI32Imm(0, dl), getI32Imm(MB, dl),
getI32Imm(ME, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return;
}
// If this is a 64-bit zero-extension mask, emit rldicl.
if (isInt64Immediate(N->getOperand(1).getNode(), Imm64) &&
@@ -2614,12 +2636,13 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
}
SDValue Ops[] = { Val, getI32Imm(SH, dl), getI32Imm(MB, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops);
+ return;
}
// AND X, 0 -> 0, not "rlwinm 32".
if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) {
ReplaceUses(SDValue(N, 0), N->getOperand(1));
- return nullptr;
+ return;
}
// ISD::OR doesn't get all the bitfield insertion fun.
// (and (or x, c1), c2) where isRunOfOnes(~(c1^c2)) might be a
@@ -2645,7 +2668,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
N->getOperand(0).getOperand(1),
getI32Imm(0, dl), getI32Imm(MB, dl),
getI32Imm(ME, dl) };
- return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops));
+ return;
}
}
@@ -2654,8 +2678,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
}
case ISD::OR: {
if (N->getValueType(0) == MVT::i32)
- if (SDNode *I = SelectBitfieldInsert(N))
- return I;
+ if (tryBitfieldInsert(N))
+ return;
short Imm;
if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
@@ -2665,8 +2689,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// If this is equivalent to an add, then we can fold it with the
// FrameIndex calculation.
- if ((LHSKnownZero.getZExtValue()|~(uint64_t)Imm) == ~0ULL)
- return getFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
+ if ((LHSKnownZero.getZExtValue()|~(uint64_t)Imm) == ~0ULL) {
+ selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
+ return;
+ }
}
// Other cases are autogenerated.
@@ -2675,8 +2701,10 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case ISD::ADD: {
short Imm;
if (N->getOperand(0)->getOpcode() == ISD::FrameIndex &&
- isIntS16Immediate(N->getOperand(1), Imm))
- return getFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
+ isIntS16Immediate(N->getOperand(1), Imm)) {
+ selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm);
+ return;
+ }
break;
}
@@ -2687,7 +2715,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
getI32Imm(SH, dl), getI32Imm(MB, dl),
getI32Imm(ME, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return;
}
// Other cases are autogenerated.
@@ -2700,7 +2729,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
getI32Imm(SH, dl), getI32Imm(MB, dl),
getI32Imm(ME, dl) };
- return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops);
+ return;
}
// Other cases are autogenerated.
@@ -2726,9 +2756,9 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
CurDAG->getTargetConstant(N->getOpcode() == PPCISD::ANDIo_1_EQ_BIT ?
PPC::sub_eq : PPC::sub_gt, dl, MVT::i32);
- return CurDAG->SelectNodeTo(N, TargetOpcode::EXTRACT_SUBREG, MVT::i1,
- CR0Reg, SRIdxVal,
- SDValue(AndI.getNode(), 1) /* glue */);
+ CurDAG->SelectNodeTo(N, TargetOpcode::EXTRACT_SUBREG, MVT::i1, CR0Reg,
+ SRIdxVal, SDValue(AndI.getNode(), 1) /* glue */);
+ return;
}
case ISD::SELECT_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
@@ -2753,9 +2783,9 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDNode *Tmp =
CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
N->getOperand(0), getI32Imm(~0U, dl));
- return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32,
- SDValue(Tmp, 0), N->getOperand(0),
- SDValue(Tmp, 1));
+ CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(Tmp, 0),
+ N->getOperand(0), SDValue(Tmp, 1));
+ return;
}
SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl);
@@ -2786,7 +2816,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue NotCAndF(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1,
NotC, N->getOperand(3)), 0);
- return CurDAG->SelectNodeTo(N, PPC::CROR, MVT::i1, CAndT, NotCAndF);
+ CurDAG->SelectNodeTo(N, PPC::CROR, MVT::i1, CAndT, NotCAndF);
+ return;
}
unsigned BROpc = getPredicateForSetCC(CC);
@@ -2820,12 +2851,14 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { CCReg, N->getOperand(2), N->getOperand(3),
getI32Imm(BROpc, dl) };
- return CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops);
+ CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops);
+ return;
}
case ISD::VSELECT:
if (PPCSubTarget->hasVSX()) {
SDValue Ops[] = { N->getOperand(2), N->getOperand(1), N->getOperand(0) };
- return CurDAG->SelectNodeTo(N, PPC::XXSEL, N->getValueType(0), Ops);
+ CurDAG->SelectNodeTo(N, PPC::XXSEL, N->getValueType(0), Ops);
+ return;
}
break;
@@ -2856,8 +2889,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SelectAddrIdxOnly(LD->getBasePtr(), Base, Offset)) {
SDValue Chain = LD->getChain();
SDValue Ops[] = { Base, Offset, Chain };
- return CurDAG->SelectNodeTo(N, PPC::LXVDSX,
- N->getValueType(0), Ops);
+ CurDAG->SelectNodeTo(N, PPC::LXVDSX, N->getValueType(0), Ops);
+ return;
}
}
@@ -2873,7 +2906,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue DMV = CurDAG->getTargetConstant(DM[1] | (DM[0] << 1), dl,
MVT::i32);
SDValue Ops[] = { Op1, Op2, DMV };
- return CurDAG->SelectNodeTo(N, PPC::XXPERMDI, N->getValueType(0), Ops);
+ CurDAG->SelectNodeTo(N, PPC::XXPERMDI, N->getValueType(0), Ops);
+ return;
}
break;
@@ -2881,10 +2915,11 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case PPCISD::BDZ: {
bool IsPPC64 = PPCSubTarget->isPPC64();
SDValue Ops[] = { N->getOperand(1), N->getOperand(0) };
- return CurDAG->SelectNodeTo(N, N->getOpcode() == PPCISD::BDNZ ?
- (IsPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
- (IsPPC64 ? PPC::BDZ8 : PPC::BDZ),
- MVT::Other, Ops);
+ CurDAG->SelectNodeTo(N, N->getOpcode() == PPCISD::BDNZ
+ ? (IsPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
+ : (IsPPC64 ? PPC::BDZ8 : PPC::BDZ),
+ MVT::Other, Ops);
+ return;
}
case PPCISD::COND_BRANCH: {
// Op #0 is the Chain.
@@ -2900,7 +2935,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Pred = getI32Imm(PCC, dl);
SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3),
N->getOperand(0), N->getOperand(4) };
- return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
+ CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
+ return;
}
case ISD::BR_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
@@ -2922,8 +2958,9 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue BitComp(CurDAG->getMachineNode(Opc, dl, MVT::i1,
N->getOperand(Swap ? 3 : 2),
N->getOperand(Swap ? 2 : 3)), 0);
- return CurDAG->SelectNodeTo(N, PPC::BC, MVT::Other,
- BitComp, N->getOperand(4), N->getOperand(0));
+ CurDAG->SelectNodeTo(N, PPC::BC, MVT::Other, BitComp, N->getOperand(4),
+ N->getOperand(0));
+ return;
}
if (EnableBranchHint)
@@ -2932,7 +2969,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl);
SDValue Ops[] = { getI32Imm(PCC, dl), CondCode,
N->getOperand(4), N->getOperand(0) };
- return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
+ CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops);
+ return;
}
case ISD::BRIND: {
// FIXME: Should custom lower this.
@@ -2942,15 +2980,19 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
unsigned Reg = Target.getValueType() == MVT::i32 ? PPC::BCTR : PPC::BCTR8;
Chain = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Target,
Chain), 0);
- return CurDAG->SelectNodeTo(N, Reg, MVT::Other, Chain);
+ CurDAG->SelectNodeTo(N, Reg, MVT::Other, Chain);
+ return;
}
case PPCISD::TOC_ENTRY: {
assert ((PPCSubTarget->isPPC64() || PPCSubTarget->isSVR4ABI()) &&
"Only supported for 64-bit ABI and 32-bit SVR4");
if (PPCSubTarget->isSVR4ABI() && !PPCSubTarget->isPPC64()) {
SDValue GA = N->getOperand(0);
- return transferMemOperands(N, CurDAG->getMachineNode(PPC::LWZtoc, dl,
- MVT::i32, GA, N->getOperand(1)));
+ SDNode *MN = CurDAG->getMachineNode(PPC::LWZtoc, dl, MVT::i32, GA,
+ N->getOperand(1));
+ transferMemOperands(N, MN);
+ ReplaceNode(N, MN);
+ return;
}
// For medium and large code model, we generate two instructions as
@@ -2971,29 +3013,38 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
TOCbase, GA);
if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA) ||
- CModel == CodeModel::Large)
- return transferMemOperands(N, CurDAG->getMachineNode(PPC::LDtocL, dl,
- MVT::i64, GA, SDValue(Tmp, 0)));
+ CModel == CodeModel::Large) {
+ SDNode *MN = CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA,
+ SDValue(Tmp, 0));
+ transferMemOperands(N, MN);
+ ReplaceNode(N, MN);
+ return;
+ }
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) {
const GlobalValue *GV = G->getGlobal();
unsigned char GVFlags = PPCSubTarget->classifyGlobalReference(GV);
if (GVFlags & PPCII::MO_NLP_FLAG) {
- return transferMemOperands(N, CurDAG->getMachineNode(PPC::LDtocL, dl,
- MVT::i64, GA, SDValue(Tmp, 0)));
+ SDNode *MN = CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA,
+ SDValue(Tmp, 0));
+ transferMemOperands(N, MN);
+ ReplaceNode(N, MN);
+ return;
}
}
- return CurDAG->getMachineNode(PPC::ADDItocL, dl, MVT::i64,
- SDValue(Tmp, 0), GA);
+ ReplaceNode(N, CurDAG->getMachineNode(PPC::ADDItocL, dl, MVT::i64,
+ SDValue(Tmp, 0), GA));
+ return;
}
case PPCISD::PPC32_PICGOT: {
// Generate a PIC-safe GOT reference.
assert(!PPCSubTarget->isPPC64() && PPCSubTarget->isSVR4ABI() &&
"PPCISD::PPC32_PICGOT is only supported for 32-bit SVR4");
- return CurDAG->SelectNodeTo(
- N, PPC::PPC32PICGOT, PPCLowering->getPointerTy(CurDAG->getDataLayout()),
- MVT::i32);
+ CurDAG->SelectNodeTo(N, PPC::PPC32PICGOT,
+ PPCLowering->getPointerTy(CurDAG->getDataLayout()),
+ MVT::i32);
+ return;
}
case PPCISD::VADD_SPLAT: {
// This expands into one of three sequences, depending on whether
@@ -3035,7 +3086,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue EltVal = getI32Imm(Elt >> 1, dl);
SDNode *Tmp = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
SDValue TmpVal = SDValue(Tmp, 0);
- return CurDAG->getMachineNode(Opc2, dl, VT, TmpVal, TmpVal);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc2, dl, VT, TmpVal, TmpVal));
+ return;
} else if (Elt > 0) {
// Elt is odd and positive, in the range [17,31].
@@ -3048,8 +3100,9 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
EltVal = getI32Imm(-16, dl);
SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
- return CurDAG->getMachineNode(Opc3, dl, VT, SDValue(Tmp1, 0),
- SDValue(Tmp2, 0));
+ ReplaceNode(N, CurDAG->getMachineNode(Opc3, dl, VT, SDValue(Tmp1, 0),
+ SDValue(Tmp2, 0)));
+ return;
} else {
// Elt is odd and negative, in the range [-31,-17].
@@ -3062,13 +3115,14 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
EltVal = getI32Imm(-16, dl);
SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal);
- return CurDAG->getMachineNode(Opc2, dl, VT, SDValue(Tmp1, 0),
- SDValue(Tmp2, 0));
+ ReplaceNode(N, CurDAG->getMachineNode(Opc2, dl, VT, SDValue(Tmp1, 0),
+ SDValue(Tmp2, 0)));
+ return;
}
}
}
- return SelectCode(N);
+ SelectCode(N);
}
// If the target supports the cmpb instruction, do the idiom recognition here.
@@ -3565,11 +3619,12 @@ void PPCDAGToDAGISel::PeepholeCROps() {
MVT::i1, MachineNode->getOperand(0),
MachineNode->getOperand(1).
getOperand(0));
- else if (AllUsersSelectZero(MachineNode))
+ else if (AllUsersSelectZero(MachineNode)) {
ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode),
MVT::i1, MachineNode->getOperand(0),
- MachineNode->getOperand(1)),
+ MachineNode->getOperand(1));
SelectSwap = true;
+ }
break;
case PPC::CRNAND:
if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
@@ -3603,11 +3658,12 @@ void PPCDAGToDAGISel::PeepholeCROps() {
MVT::i1, MachineNode->getOperand(1).
getOperand(0),
MachineNode->getOperand(0));
- else if (AllUsersSelectZero(MachineNode))
+ else if (AllUsersSelectZero(MachineNode)) {
ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode),
MVT::i1, MachineNode->getOperand(0),
- MachineNode->getOperand(1)),
+ MachineNode->getOperand(1));
SelectSwap = true;
+ }
break;
case PPC::CROR:
if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
@@ -3635,11 +3691,12 @@ void PPCDAGToDAGISel::PeepholeCROps() {
MVT::i1, MachineNode->getOperand(0),
MachineNode->getOperand(1).
getOperand(0));
- else if (AllUsersSelectZero(MachineNode))
+ else if (AllUsersSelectZero(MachineNode)) {
ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode),
MVT::i1, MachineNode->getOperand(0),
- MachineNode->getOperand(1)),
+ MachineNode->getOperand(1));
SelectSwap = true;
+ }
break;
case PPC::CRXOR:
if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
@@ -3674,11 +3731,12 @@ void PPCDAGToDAGISel::PeepholeCROps() {
MVT::i1, MachineNode->getOperand(0),
MachineNode->getOperand(1).
getOperand(0));
- else if (AllUsersSelectZero(MachineNode))
+ else if (AllUsersSelectZero(MachineNode)) {
ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode),
MVT::i1, MachineNode->getOperand(0),
- MachineNode->getOperand(1)),
+ MachineNode->getOperand(1));
SelectSwap = true;
+ }
break;
case PPC::CRNOR:
if (Op1Set || Op2Set)
@@ -3707,11 +3765,12 @@ void PPCDAGToDAGISel::PeepholeCROps() {
MVT::i1, MachineNode->getOperand(1).
getOperand(0),
MachineNode->getOperand(0));
- else if (AllUsersSelectZero(MachineNode))
+ else if (AllUsersSelectZero(MachineNode)) {
ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode),
MVT::i1, MachineNode->getOperand(0),
- MachineNode->getOperand(1)),
+ MachineNode->getOperand(1));
SelectSwap = true;
+ }
break;
case PPC::CREQV:
if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
@@ -3746,11 +3805,12 @@ void PPCDAGToDAGISel::PeepholeCROps() {
MVT::i1, MachineNode->getOperand(0),
MachineNode->getOperand(1).
getOperand(0));
- else if (AllUsersSelectZero(MachineNode))
+ else if (AllUsersSelectZero(MachineNode)) {
ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode),
MVT::i1, MachineNode->getOperand(0),
- MachineNode->getOperand(1)),
+ MachineNode->getOperand(1));
SelectSwap = true;
+ }
break;
case PPC::CRANDC:
if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
@@ -3781,11 +3841,12 @@ void PPCDAGToDAGISel::PeepholeCROps() {
MVT::i1, MachineNode->getOperand(0),
MachineNode->getOperand(1).
getOperand(0));
- else if (AllUsersSelectZero(MachineNode))
+ else if (AllUsersSelectZero(MachineNode)) {
ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode),
MVT::i1, MachineNode->getOperand(1),
- MachineNode->getOperand(0)),
+ MachineNode->getOperand(0));
SelectSwap = true;
+ }
break;
case PPC::CRORC:
if (MachineNode->getOperand(0) == MachineNode->getOperand(1))
@@ -3816,11 +3877,12 @@ void PPCDAGToDAGISel::PeepholeCROps() {
MVT::i1, MachineNode->getOperand(0),
MachineNode->getOperand(1).
getOperand(0));
- else if (AllUsersSelectZero(MachineNode))
+ else if (AllUsersSelectZero(MachineNode)) {
ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode),
MVT::i1, MachineNode->getOperand(1),
- MachineNode->getOperand(0)),
+ MachineNode->getOperand(0));
SelectSwap = true;
+ }
break;
case PPC::SELECT_I4:
case PPC::SELECT_I8:
@@ -4365,15 +4427,3 @@ void PPCDAGToDAGISel::PeepholePPC64() {
FunctionPass *llvm::createPPCISelDag(PPCTargetMachine &TM) {
return new PPCDAGToDAGISel(TM);
}
-
-static void initializePassOnce(PassRegistry &Registry) {
- const char *Name = "PowerPC DAG->DAG Pattern Instruction Selection";
- PassInfo *PI = new PassInfo(Name, "ppc-codegen", &SelectionDAGISel::ID,
- nullptr, false, false);
- Registry.registerPass(*PI, true);
-}
-
-void llvm::initializePPCDAGToDAGISelPass(PassRegistry &Registry) {
- CALL_ONCE_INITIALIZATION(initializePassOnce);
-}
-
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index af9ad077a7ce..6e3c830a8243 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -14,11 +14,13 @@
#include "PPCISelLowering.h"
#include "MCTargetDesc/PPCPredicates.h"
#include "PPCCallingConv.h"
+#include "PPCCCState.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCPerfectShuffle.h"
#include "PPCTargetMachine.h"
#include "PPCTargetObjectFile.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/CallingConvLower.h"
@@ -36,12 +38,16 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
+#include <list>
using namespace llvm;
+#define DEBUG_TYPE "ppc-lowering"
+
static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
@@ -51,6 +57,12 @@ cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hi
static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
+static cl::opt<bool> DisableSCO("disable-ppc-sco",
+cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
+
+STATISTIC(NumTailCalls, "Number of tail calls");
+STATISTIC(NumSiblingCalls, "Number of sibling calls");
+
// FIXME: Remove this once the bug has been fixed!
extern cl::opt<bool> ANDIGlueBug;
@@ -68,7 +80,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
// Set up the register classes.
addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
- if (!Subtarget.useSoftFloat()) {
+ if (!useSoftFloat()) {
addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
}
@@ -207,14 +219,10 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
// PowerPC does not have BSWAP, CTPOP or CTTZ
setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
- if (Subtarget.hasPOPCNTD()) {
+ if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
setOperationAction(ISD::CTPOP, MVT::i32 , Legal);
setOperationAction(ISD::CTPOP, MVT::i64 , Legal);
} else {
@@ -255,7 +263,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
- if (Subtarget.hasDirectMove()) {
+ if (Subtarget.hasDirectMove() && isPPC64) {
setOperationAction(ISD::BITCAST, MVT::f32, Legal);
setOperationAction(ISD::BITCAST, MVT::i32, Legal);
setOperationAction(ISD::BITCAST, MVT::i64, Legal);
@@ -479,9 +487,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
setOperationAction(ISD::ROTL, VT, Expand);
@@ -557,7 +563,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
}
- if (Subtarget.hasDirectMove()) {
+ if (Subtarget.hasDirectMove() && isPPC64) {
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
@@ -647,6 +653,11 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
+ setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
+ setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
+ setOperationAction(ISD::FABS, MVT::v4f32, Legal);
+ setOperationAction(ISD::FABS, MVT::v2f64, Legal);
+
addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
}
@@ -654,6 +665,10 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
}
+ if (Subtarget.hasP9Vector()) {
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Legal);
+ }
}
if (Subtarget.hasQPX()) {
@@ -840,6 +855,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::SINT_TO_FP);
+ setTargetDAGCombine(ISD::BUILD_VECTOR);
if (Subtarget.hasFPCVT())
setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::LOAD);
@@ -906,13 +922,12 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
case PPC::DIR_PWR6X:
case PPC::DIR_PWR7:
case PPC::DIR_PWR8:
+ case PPC::DIR_PWR9:
setPrefFunctionAlignment(4);
setPrefLoopAlignment(4);
break;
}
- setInsertFencesForAtomic(true);
-
if (Subtarget.enableMachineScheduler())
setSchedulingPreference(Sched::Source);
else
@@ -1006,6 +1021,9 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
case PPCISD::VPERM: return "PPCISD::VPERM";
+ case PPCISD::XXSPLT: return "PPCISD::XXSPLT";
+ case PPCISD::XXINSERT: return "PPCISD::XXINSERT";
+ case PPCISD::VECSHL: return "PPCISD::VECSHL";
case PPCISD::CMPB: return "PPCISD::CMPB";
case PPCISD::Hi: return "PPCISD::Hi";
case PPCISD::Lo: return "PPCISD::Lo";
@@ -1030,6 +1048,8 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::MFVSR: return "PPCISD::MFVSR";
case PPCISD::MTVSRA: return "PPCISD::MTVSRA";
case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ";
+ case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP";
+ case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP";
case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT";
case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT";
case PPCISD::VCMP: return "PPCISD::VCMP";
@@ -1069,6 +1089,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE";
case PPCISD::RFEBB: return "PPCISD::RFEBB";
case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD";
+ case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN";
case PPCISD::QVFPERM: return "PPCISD::QVFPERM";
case PPCISD::QVGPCI: return "PPCISD::QVGPCI";
case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI";
@@ -1480,6 +1501,91 @@ bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
return true;
}
+bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
+ unsigned &InsertAtByte, bool &Swap, bool IsLE) {
+
+ // Check that the mask is shuffling words
+ for (unsigned i = 0; i < 4; ++i) {
+ unsigned B0 = N->getMaskElt(i*4);
+ unsigned B1 = N->getMaskElt(i*4+1);
+ unsigned B2 = N->getMaskElt(i*4+2);
+ unsigned B3 = N->getMaskElt(i*4+3);
+ if (B0 % 4)
+ return false;
+ if (B1 != B0+1 || B2 != B1+1 || B3 != B2+1)
+ return false;
+ }
+
+ // Now we look at mask elements 0,4,8,12
+ unsigned M0 = N->getMaskElt(0) / 4;
+ unsigned M1 = N->getMaskElt(4) / 4;
+ unsigned M2 = N->getMaskElt(8) / 4;
+ unsigned M3 = N->getMaskElt(12) / 4;
+ unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
+ unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
+
+ // Below, let H and L be arbitrary elements of the shuffle mask
+ // where H is in the range [4,7] and L is in the range [0,3].
+ // H, 1, 2, 3 or L, 5, 6, 7
+ if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
+ (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
+ ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
+ InsertAtByte = IsLE ? 12 : 0;
+ Swap = M0 < 4;
+ return true;
+ }
+ // 0, H, 2, 3 or 4, L, 6, 7
+ if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
+ (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
+ ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
+ InsertAtByte = IsLE ? 8 : 4;
+ Swap = M1 < 4;
+ return true;
+ }
+ // 0, 1, H, 3 or 4, 5, L, 7
+ if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
+ (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
+ ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
+ InsertAtByte = IsLE ? 4 : 8;
+ Swap = M2 < 4;
+ return true;
+ }
+ // 0, 1, 2, H or 4, 5, 6, L
+ if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
+ (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
+ ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
+ InsertAtByte = IsLE ? 0 : 12;
+ Swap = M3 < 4;
+ return true;
+ }
+
+ // If both vector operands for the shuffle are the same vector, the mask will
+ // contain only elements from the first one and the second one will be undef.
+ if (N->getOperand(1).isUndef()) {
+ ShiftElts = 0;
+ Swap = true;
+ unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
+ if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
+ InsertAtByte = IsLE ? 12 : 0;
+ return true;
+ }
+ if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
+ InsertAtByte = IsLE ? 8 : 4;
+ return true;
+ }
+ if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
+ InsertAtByte = IsLE ? 4 : 8;
+ return true;
+ }
+ if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
+ InsertAtByte = IsLE ? 0 : 12;
+ return true;
+ }
+ }
+
+ return false;
+}
+
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
@@ -1511,7 +1617,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
// See if all of the elements in the buildvector agree across.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
+ if (N->getOperand(i).isUndef()) continue;
// If the element isn't a constant, bail fully out.
if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
@@ -1557,7 +1663,7 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
// Check to see if this buildvec has a single non-undef value in its elements.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
+ if (N->getOperand(i).isUndef()) continue;
if (!OpVal.getNode())
OpVal = N->getOperand(i);
else if (OpVal != N->getOperand(i))
@@ -1950,19 +2056,16 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
// LowerOperation implementation
//===----------------------------------------------------------------------===//
-/// GetLabelAccessInfo - Return true if we should reference labels using a
-/// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
-static bool GetLabelAccessInfo(const TargetMachine &TM,
- const PPCSubtarget &Subtarget,
+/// Return true if we should reference labels using a PICBase, set the HiOpFlags
+/// and LoOpFlags to the target MO flags.
+static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
unsigned &HiOpFlags, unsigned &LoOpFlags,
const GlobalValue *GV = nullptr) {
HiOpFlags = PPCII::MO_HA;
LoOpFlags = PPCII::MO_LO;
// Don't use the pic base if not in PIC relocation model.
- bool isPIC = TM.getRelocationModel() == Reloc::PIC_;
-
- if (isPIC) {
+ if (IsPIC) {
HiOpFlags |= PPCII::MO_PIC_FLAG;
LoOpFlags |= PPCII::MO_PIC_FLAG;
}
@@ -1978,8 +2081,6 @@ static bool GetLabelAccessInfo(const TargetMachine &TM,
LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
}
}
-
- return isPIC;
}
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
@@ -2010,7 +2111,7 @@ static void setUsesTOCBasePtr(SelectionDAG &DAG) {
setUsesTOCBasePtr(DAG.getMachineFunction());
}
-static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit,
+static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit,
SDValue GA) {
EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) :
@@ -2038,10 +2139,10 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
}
unsigned MOHiFlag, MOLoFlag;
- bool isPIC =
- GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag);
+ bool IsPIC = isPositionIndependent();
+ getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
- if (isPIC && Subtarget.isSVR4ABI()) {
+ if (IsPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
PPCII::MO_PIC_FLAG);
return getTOCEntry(DAG, SDLoc(CP), false, GA);
@@ -2051,7 +2152,7 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
SDValue CPILo =
DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
- return LowerLabelRef(CPIHi, CPILo, isPIC, DAG);
+ return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
}
SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
@@ -2067,10 +2168,10 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
}
unsigned MOHiFlag, MOLoFlag;
- bool isPIC =
- GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag);
+ bool IsPIC = isPositionIndependent();
+ getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
- if (isPIC && Subtarget.isSVR4ABI()) {
+ if (IsPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
PPCII::MO_PIC_FLAG);
return getTOCEntry(DAG, SDLoc(GA), false, GA);
@@ -2078,7 +2179,7 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
- return LowerLabelRef(JTIHi, JTILo, isPIC, DAG);
+ return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
}
SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
@@ -2096,11 +2197,11 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
}
unsigned MOHiFlag, MOLoFlag;
- bool isPIC =
- GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag);
+ bool IsPIC = isPositionIndependent();
+ getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
- return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
+ return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
}
SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
@@ -2160,7 +2261,7 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
GOTReg, TGA);
} else {
- if (picLevel == PICLevel::Small)
+ if (picLevel == PICLevel::SmallPIC)
GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
else
GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
@@ -2178,7 +2279,7 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
GOTReg, TGA);
} else {
- if (picLevel == PICLevel::Small)
+ if (picLevel == PICLevel::SmallPIC)
GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
else
GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
@@ -2209,10 +2310,10 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
}
unsigned MOHiFlag, MOLoFlag;
- bool isPIC =
- GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV);
+ bool IsPIC = isPositionIndependent();
+ getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
- if (isPIC && Subtarget.isSVR4ABI()) {
+ if (IsPIC && Subtarget.isSVR4ABI()) {
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
GSDN->getOffset(),
PPCII::MO_PIC_FLAG);
@@ -2224,13 +2325,12 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
SDValue GALo =
DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
- SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG);
+ SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG);
// If the global reference is actually to a non-lazy-pointer, we have to do an
// extra load to get the address of the global.
if (MOHiFlag & PPCII::MO_NLP_FLAG)
- Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(),
- false, false, false, 0);
+ Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
return Ptr;
}
@@ -2260,7 +2360,7 @@ SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
}
// If we're comparing for equality to zero, expose the fact that this is
- // implented as a ctlz/srl pair on ppc, so that the dag combiner can
+ // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
// fold the new nodes.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (C->isNullValue() && CC == ISD::SETEQ) {
@@ -2298,11 +2398,10 @@ SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
-SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const {
+SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
EVT VT = Node->getValueType(0);
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue InChain = Node->getOperand(0);
SDValue VAListPtr = Node->getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
@@ -2312,8 +2411,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
// gpr_index
SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
- VAListPtr, MachinePointerInfo(SV), MVT::i8,
- false, false, false, 0);
+ VAListPtr, MachinePointerInfo(SV), MVT::i8);
InChain = GprIndex.getValue(1);
if (VT == MVT::i64) {
@@ -2335,8 +2433,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
// fpr
SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
- FprPtr, MachinePointerInfo(SV), MVT::i8,
- false, false, false, 0);
+ FprPtr, MachinePointerInfo(SV), MVT::i8);
InChain = FprIndex.getValue(1);
SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
@@ -2346,14 +2443,12 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(4, dl, MVT::i32));
// areas
- SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
- MachinePointerInfo(), false, false,
- false, 0);
+ SDValue OverflowArea =
+ DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
InChain = OverflowArea.getValue(1);
- SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr,
- MachinePointerInfo(), false, false,
- false, 0);
+ SDValue RegSaveArea =
+ DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
InChain = RegSaveArea.getValue(1);
// select overflow_area if index > 8
@@ -2383,8 +2478,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
VT.isInteger() ? VAListPtr : FprPtr,
- MachinePointerInfo(SV),
- MVT::i8, false, false, 0);
+ MachinePointerInfo(SV), MVT::i8);
// determine if we should load from reg_save_area or overflow_area
SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
@@ -2397,17 +2491,13 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
OverflowAreaPlusN);
- InChain = DAG.getTruncStore(InChain, dl, OverflowArea,
- OverflowAreaPtr,
- MachinePointerInfo(),
- MVT::i32, false, false, 0);
+ InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
+ MachinePointerInfo(), MVT::i32);
- return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
}
-SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const {
+SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
// We have to copy the entire va_list struct:
@@ -2431,7 +2521,7 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDValue Nest = Op.getOperand(3); // 'nest' parameter value
SDLoc dl(Op);
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
bool isPPC64 = (PtrVT == MVT::i64);
Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
@@ -2454,28 +2544,26 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
CLI.setDebugLoc(dl).setChain(Chain)
.setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
DAG.getExternalSymbol("__trampoline_setup", PtrVT),
- std::move(Args), 0);
+ std::move(Args));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.second;
}
-SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const {
+SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
SDLoc dl(Op);
if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
- MachinePointerInfo(SV),
- false, false, 0);
+ MachinePointerInfo(SV));
}
// For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
@@ -2504,9 +2592,6 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
-
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
-
SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
PtrVT);
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
@@ -2524,35 +2609,29 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
// Store first byte : number of int regs
- SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR,
- Op.getOperand(1),
- MachinePointerInfo(SV),
- MVT::i8, false, false, 0);
+ SDValue firstStore =
+ DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
+ MachinePointerInfo(SV), MVT::i8);
uint64_t nextOffset = FPROffset;
SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
ConstFPROffset);
// Store second byte : number of float regs
SDValue secondStore =
- DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
- MachinePointerInfo(SV, nextOffset), MVT::i8,
- false, false, 0);
+ DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
+ MachinePointerInfo(SV, nextOffset), MVT::i8);
nextOffset += StackOffset;
nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
// Store second word : arguments given on stack
- SDValue thirdStore =
- DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
- MachinePointerInfo(SV, nextOffset),
- false, false, 0);
+ SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
+ MachinePointerInfo(SV, nextOffset));
nextOffset += FrameOffset;
nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
// Store third word : arguments given in registers
return DAG.getStore(thirdStore, dl, FR, nextPtr,
- MachinePointerInfo(SV, nextOffset),
- false, false, 0);
-
+ MachinePointerInfo(SV, nextOffset));
}
#include "PPCGenCallingConv.inc"
@@ -2762,14 +2841,10 @@ static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
return NumBytes;
}
-SDValue
-PPCTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
+SDValue PPCTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
if (Subtarget.isSVR4ABI()) {
if (Subtarget.isPPC64())
return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
@@ -2783,14 +2858,10 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain,
}
}
-SDValue
-PPCTargetLowering::LowerFormalArguments_32SVR4(
- SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// 32-bit SVR4 ABI Stack Frame Layout:
// +-----------------------------------+
@@ -2825,7 +2896,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
MachineFrameInfo *MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
// Potential tail calls could cause overwriting of argument stack slots.
bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
(CallConv == CallingConv::Fast));
@@ -2833,14 +2904,17 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
+ PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
// Reserve space for the linkage area on the stack.
unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
CCInfo.AllocateStack(LinkageSize, PtrByteSize);
+ if (useSoftFloat())
+ CCInfo.PreAnalyzeFormalArguments(Ins);
CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
+ CCInfo.clearWasPPCF128();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
@@ -2908,9 +2982,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
// Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
- MachinePointerInfo(),
- false, false, false, 0));
+ InVals.push_back(
+ DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
}
}
@@ -2955,7 +3028,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
};
unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
- if (Subtarget.useSoftFloat())
+ if (useSoftFloat())
NumFPArgRegs = 0;
FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
@@ -2973,8 +3046,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
// The fixed integer arguments of a variadic function are stored to the
- // VarArgsFrameIndex on the stack so that they may be loaded by deferencing
- // the result of va_next.
+ // VarArgsFrameIndex on the stack so that they may be loaded by
+ // dereferencing the result of va_next.
for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
// Get an existing live-in vreg, or add a new one.
unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
@@ -2982,8 +3055,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
@@ -3001,8 +3074,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
// Increment the address by eight for the next argument to store
SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
@@ -3019,10 +3092,10 @@ PPCTargetLowering::LowerFormalArguments_32SVR4(
// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
// value to MVT::i64 and then truncate to the correct register size.
-SDValue
-PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
- SelectionDAG &DAG, SDValue ArgVal,
- SDLoc dl) const {
+SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
+ EVT ObjectVT, SelectionDAG &DAG,
+ SDValue ArgVal,
+ const SDLoc &dl) const {
if (Flags.isSExt())
ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
DAG.getValueType(ObjectVT));
@@ -3033,14 +3106,10 @@ PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
}
-SDValue
-PPCTargetLowering::LowerFormalArguments_64SVR4(
- SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// TODO: add description of PPC stack frame format, or at least some docs.
//
bool isELFv2ABI = Subtarget.isELFv2ABI();
@@ -3052,7 +3121,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
assert(!(CallConv == CallingConv::Fast && isVarArg) &&
"fastcc not supported on varargs functions");
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
// Potential tail calls could cause overwriting of argument stack slots.
bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
(CallConv == CallingConv::Fast));
@@ -3199,15 +3268,13 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
EVT ObjType = (ObjSize == 1 ? MVT::i8 :
(ObjSize == 2 ? MVT::i16 : MVT::i32));
Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
- MachinePointerInfo(&*FuncArg), ObjType,
- false, false, 0);
+ MachinePointerInfo(&*FuncArg), ObjType);
} else {
// For sizes that don't fit a truncating store (3, 5, 6, 7),
// store the whole register as-is to the parameter save area
// slot.
- Store =
- DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(&*FuncArg), false, false, 0);
+ Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(&*FuncArg));
}
MemOps.push_back(Store);
@@ -3234,9 +3301,8 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
SDValue Off = DAG.getConstant(j, dl, PtrVT);
Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
}
- SDValue Store =
- DAG.getStore(Val.getValue(1), dl, Val, Addr,
- MachinePointerInfo(&*FuncArg, j), false, false, 0);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
+ MachinePointerInfo(&*FuncArg, j));
MemOps.push_back(Store);
++GPR_idx;
}
@@ -3402,8 +3468,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
CurArgOffset += ArgSize - ObjSize;
int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
- false, false, false, 0);
+ ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
}
InVals.push_back(ArgVal);
@@ -3434,14 +3499,14 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
// If this function is vararg, store any remaining integer argument regs
- // to their spots on the stack so that they may be loaded by deferencing the
- // result of va_next.
+ // to their spots on the stack so that they may be loaded by dereferencing
+ // the result of va_next.
for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
GPR_idx < Num_GPR_Regs; ++GPR_idx) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
@@ -3455,21 +3520,17 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
return Chain;
}
-SDValue
-PPCTargetLowering::LowerFormalArguments_Darwin(
- SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// TODO: add description of PPC stack frame format, or at least some docs.
//
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
bool isPPC64 = PtrVT == MVT::i64;
// Potential tail calls could cause overwriting of argument stack slots.
bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
@@ -3613,9 +3674,9 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
- SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(&*FuncArg),
- ObjType, false, false, 0);
+ SDValue Store =
+ DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(&*FuncArg), ObjType);
MemOps.push_back(Store);
++GPR_idx;
}
@@ -3637,9 +3698,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store =
- DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(&*FuncArg, j), false, false, 0);
+ SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
+ MachinePointerInfo(&*FuncArg, j));
MemOps.push_back(Store);
++GPR_idx;
ArgOffset += PtrByteSize;
@@ -3760,8 +3820,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
CurArgOffset + (ArgSize - ObjSize),
isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
- false, false, false, 0);
+ ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
}
InVals.push_back(ArgVal);
@@ -3795,8 +3854,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
// If this function is vararg, store any remaining integer argument regs
- // to their spots on the stack so that they may be loaded by deferencing the
- // result of va_next.
+ // to their spots on the stack so that they may be loaded by dereferencing
+ // the result of va_next.
for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
unsigned VReg;
@@ -3806,8 +3865,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
@@ -3838,6 +3897,176 @@ static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
return SPDiff;
}
+static bool isFunctionGlobalAddress(SDValue Callee);
+
+static bool
+resideInSameModule(SDValue Callee, Reloc::Model RelMod) {
+ // If !G, Callee can be an external symbol.
+ GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
+ if (!G) return false;
+
+ const GlobalValue *GV = G->getGlobal();
+
+ if (GV->isDeclaration()) return false;
+
+ switch(GV->getLinkage()) {
+ default: llvm_unreachable("unknow linkage type");
+ case GlobalValue::AvailableExternallyLinkage:
+ case GlobalValue::ExternalWeakLinkage:
+ return false;
+
+ // Callee with weak linkage is allowed if it has hidden or protected
+ // visibility
+ case GlobalValue::LinkOnceAnyLinkage:
+ case GlobalValue::LinkOnceODRLinkage: // e.g. c++ inline functions
+ case GlobalValue::WeakAnyLinkage:
+ case GlobalValue::WeakODRLinkage: // e.g. c++ template instantiation
+ if (GV->hasDefaultVisibility())
+ return false;
+
+ case GlobalValue::ExternalLinkage:
+ case GlobalValue::InternalLinkage:
+ case GlobalValue::PrivateLinkage:
+ break;
+ }
+
+ // With '-fPIC', calling default visiblity function need insert 'nop' after
+ // function call, no matter that function resides in same module or not, so
+ // we treat it as in different module.
+ if (RelMod == Reloc::PIC_ && GV->hasDefaultVisibility())
+ return false;
+
+ return true;
+}
+
+static bool
+needStackSlotPassParameters(const PPCSubtarget &Subtarget,
+ const SmallVectorImpl<ISD::OutputArg> &Outs) {
+ assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64());
+
+ const unsigned PtrByteSize = 8;
+ const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
+
+ static const MCPhysReg GPR[] = {
+ PPC::X3, PPC::X4, PPC::X5, PPC::X6,
+ PPC::X7, PPC::X8, PPC::X9, PPC::X10,
+ };
+ static const MCPhysReg VR[] = {
+ PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
+ PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
+ };
+
+ const unsigned NumGPRs = array_lengthof(GPR);
+ const unsigned NumFPRs = 13;
+ const unsigned NumVRs = array_lengthof(VR);
+ const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
+
+ unsigned NumBytes = LinkageSize;
+ unsigned AvailableFPRs = NumFPRs;
+ unsigned AvailableVRs = NumVRs;
+
+ for (const ISD::OutputArg& Param : Outs) {
+ if (Param.Flags.isNest()) continue;
+
+ if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
+ PtrByteSize, LinkageSize, ParamAreaSize,
+ NumBytes, AvailableFPRs, AvailableVRs,
+ Subtarget.hasQPX()))
+ return true;
+ }
+ return false;
+}
+
+static bool
+hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS) {
+ if (CS->arg_size() != CallerFn->getArgumentList().size())
+ return false;
+
+ ImmutableCallSite::arg_iterator CalleeArgIter = CS->arg_begin();
+ ImmutableCallSite::arg_iterator CalleeArgEnd = CS->arg_end();
+ Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
+
+ for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
+ const Value* CalleeArg = *CalleeArgIter;
+ const Value* CallerArg = &(*CallerArgIter);
+ if (CalleeArg == CallerArg)
+ continue;
+
+ // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
+ // tail call @callee([4 x i64] undef, [4 x i64] %b)
+ // }
+ // 1st argument of callee is undef and has the same type as caller.
+ if (CalleeArg->getType() == CallerArg->getType() &&
+ isa<UndefValue>(CalleeArg))
+ continue;
+
+ return false;
+ }
+
+ return true;
+}
+
+bool
+PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
+ SDValue Callee,
+ CallingConv::ID CalleeCC,
+ ImmutableCallSite *CS,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const {
+ bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
+
+ if (DisableSCO && !TailCallOpt) return false;
+
+ // Variadic argument functions are not supported.
+ if (isVarArg) return false;
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
+
+ // Tail or Sibling call optimization (TCO/SCO) needs callee and caller has
+ // the same calling convention
+ if (CallerCC != CalleeCC) return false;
+
+ // SCO support C calling convention
+ if (CalleeCC != CallingConv::Fast && CalleeCC != CallingConv::C)
+ return false;
+
+ // Functions containing by val parameters are not supported.
+ if (std::any_of(Ins.begin(), Ins.end(),
+ [](const ISD::InputArg& IA) { return IA.Flags.isByVal(); }))
+ return false;
+
+ // No TCO/SCO on indirect call because Caller have to restore its TOC
+ if (!isFunctionGlobalAddress(Callee) &&
+ !isa<ExternalSymbolSDNode>(Callee))
+ return false;
+
+ // Check if Callee resides in the same module, because for now, PPC64 SVR4 ABI
+ // (ELFv1/ELFv2) doesn't allow tail calls to a symbol resides in another
+ // module.
+ // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
+ if (!resideInSameModule(Callee, getTargetMachine().getRelocationModel()))
+ return false;
+
+ // TCO allows altering callee ABI, so we don't have to check further.
+ if (CalleeCC == CallingConv::Fast && TailCallOpt)
+ return true;
+
+ if (DisableSCO) return false;
+
+ // If callee use the same argument list that caller is using, then we can
+ // apply SCO on this case. If it is not, then we need to check if callee needs
+ // stack for passing arguments.
+ if (!hasSameArgumentList(MF.getFunction(), CS) &&
+ needStackSlotPassParameters(Subtarget, Outs)) {
+ return false;
+ }
+
+ return true;
+}
+
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function.
@@ -3888,9 +4117,11 @@ static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
SignExtend32<26>(Addr) != Addr)
return nullptr; // Top 6 bits have to be sext of immediate.
- return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op),
- DAG.getTargetLoweringInfo().getPointerTy(
- DAG.getDataLayout())).getNode();
+ return DAG
+ .getConstant(
+ (int)C->getZExtValue() >> 2, SDLoc(Op),
+ DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
+ .getNode();
}
namespace {
@@ -3905,12 +4136,10 @@ struct TailCallArgumentInfo {
}
/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
-static void
-StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
- SDValue Chain,
- const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
- SmallVectorImpl<SDValue> &MemOpChains,
- SDLoc dl) {
+static void StoreTailCallArgumentsToStackSlot(
+ SelectionDAG &DAG, SDValue Chain,
+ const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
+ SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
SDValue Arg = TailCallArgs[i].Arg;
SDValue FIN = TailCallArgs[i].FrameIdxOp;
@@ -3918,48 +4147,40 @@ StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
// Store relative to framepointer.
MemOpChains.push_back(DAG.getStore(
Chain, dl, Arg, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false,
- false, 0));
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
}
}
/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
/// the appropriate stack slot for the tail call optimized function call.
-static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
- MachineFunction &MF,
- SDValue Chain,
- SDValue OldRetAddr,
- SDValue OldFP,
- int SPDiff,
- bool isPPC64,
- bool isDarwinABI,
- SDLoc dl) {
+static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
+ SDValue OldRetAddr, SDValue OldFP,
+ int SPDiff, const SDLoc &dl) {
if (SPDiff) {
// Calculate the new stack slot for the return address.
+ MachineFunction &MF = DAG.getMachineFunction();
+ const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
+ const PPCFrameLowering *FL = Subtarget.getFrameLowering();
+ bool isPPC64 = Subtarget.isPPC64();
int SlotSize = isPPC64 ? 8 : 4;
- const PPCFrameLowering *FL =
- MF.getSubtarget<PPCSubtarget>().getFrameLowering();
int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
NewRetAddrLoc, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
- Chain = DAG.getStore(
- Chain, dl, OldRetAddr, NewRetAddrFrIdx,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewRetAddr),
- false, false, 0);
+ Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
+ MachinePointerInfo::getFixedStack(MF, NewRetAddr));
// When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
// slot as the FP is never overwritten.
- if (isDarwinABI) {
+ if (Subtarget.isDarwinABI()) {
int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
true);
SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
- Chain = DAG.getStore(
- Chain, dl, OldFP, NewFramePtrIdx,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFPIdx),
- false, false, 0);
+ Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
+ MachinePointerInfo::getFixedStack(
+ DAG.getMachineFunction(), NewFPIdx));
}
}
return Chain;
@@ -3986,27 +4207,21 @@ CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
/// stack slot. Returns the chain as result and the loaded frame pointers in
/// LROpOut/FPOpout. Used when tail calling.
-SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
- int SPDiff,
- SDValue Chain,
- SDValue &LROpOut,
- SDValue &FPOpOut,
- bool isDarwinABI,
- SDLoc dl) const {
+SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
+ SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
+ SDValue &FPOpOut, const SDLoc &dl) const {
if (SPDiff) {
// Load the LR and FP stack slot for later adjusting.
EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
LROpOut = getReturnAddrFrameIndex(DAG);
- LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
- false, false, false, 0);
+ LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
Chain = SDValue(LROpOut.getNode(), 1);
// When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
// slot as the FP is never overwritten.
- if (isDarwinABI) {
+ if (Subtarget.isDarwinABI()) {
FPOpOut = getFramePointerFrameIndex(DAG);
- FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(),
- false, false, false, 0);
+ FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo());
Chain = SDValue(FPOpOut.getNode(), 1);
}
}
@@ -4019,10 +4234,9 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
/// a byval function parameter.
/// Sometimes what we are copying is the end of a larger object, the part that
/// does not fit in registers.
-static SDValue
-CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
- ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- SDLoc dl) {
+static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
+ SDValue Chain, ISD::ArgFlagsTy Flags,
+ SelectionDAG &DAG, const SDLoc &dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
false, false, false, MachinePointerInfo(),
@@ -4031,13 +4245,11 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
/// tail calls.
-static void
-LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
- SDValue Arg, SDValue PtrOff, int SPDiff,
- unsigned ArgOffset, bool isPPC64, bool isTailCall,
- bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
- SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments,
- SDLoc dl) {
+static void LowerMemOpCallTo(
+ SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
+ SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
+ bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
+ SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
if (!isTailCall) {
if (isVector) {
@@ -4049,20 +4261,18 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
DAG.getConstant(ArgOffset, dl, PtrVT));
}
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(), false, false, 0));
- // Calculate and remember argument location.
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
+ // Calculate and remember argument location.
} else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
TailCallArguments);
}
-static
-void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
- SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
- SDValue LROp, SDValue FPOp, bool isDarwinABI,
- SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
- MachineFunction &MF = DAG.getMachineFunction();
-
+static void
+PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
+ const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
+ SDValue FPOp,
+ SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
// Emit a sequence of copyto/copyfrom virtual registers for arguments that
// might overwrite each other in case of tail call optimization.
SmallVector<SDValue, 8> MemOpChains2;
@@ -4074,8 +4284,7 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
// Store the return address to the appropriate stack slot.
- Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
- isPPC64, isDarwinABI, dl);
+ Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
// Emit callseq_end just before tailcall node.
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
@@ -4091,19 +4300,19 @@ static bool isFunctionGlobalAddress(SDValue Callee) {
Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
return false;
- return G->getGlobal()->getType()->getElementType()->isFunctionTy();
+ return G->getGlobal()->getValueType()->isFunctionTy();
}
return false;
}
-static
-unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
- SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff,
- bool isTailCall, bool IsPatchPoint, bool hasNest,
- SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass,
- SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
- ImmutableCallSite *CS, const PPCSubtarget &Subtarget) {
+static unsigned
+PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain,
+ SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall,
+ bool isPatchPoint, bool hasNest,
+ SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
+ SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
+ ImmutableCallSite *CS, const PPCSubtarget &Subtarget) {
bool isPPC64 = Subtarget.isPPC64();
bool isSVR4ABI = Subtarget.isSVR4ABI();
@@ -4123,23 +4332,24 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
needIndirectCall = false;
}
+ // PC-relative references to external symbols should go through $stub, unless
+ // we're building with the leopard linker or later, which automatically
+ // synthesizes these stubs.
+ const TargetMachine &TM = DAG.getTarget();
+ const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const GlobalValue *GV = nullptr;
+ if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ GV = G->getGlobal();
+ bool Local = TM.shouldAssumeDSOLocal(*Mod, GV);
+ bool UsePlt = !Local && Subtarget.isTargetELF() && !isPPC64;
+
if (isFunctionGlobalAddress(Callee)) {
GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
// A call to a TLS address is actually an indirect call to a
// thread-specific pointer.
unsigned OpFlags = 0;
- if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
- (Subtarget.getTargetTriple().isMacOSX() &&
- Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) &&
- !G->getGlobal()->isStrongDefinitionForLinker()) ||
- (Subtarget.isTargetELF() && !isPPC64 &&
- !G->getGlobal()->hasLocalLinkage() &&
- DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = PPCII::MO_PLT_OR_STUB;
- }
+ if (UsePlt)
+ OpFlags = PPCII::MO_PLT;
// If the callee is a GlobalAddress/ExternalSymbol node (quite common,
// every direct call is) turn it into a TargetGlobalAddress /
@@ -4152,23 +4362,15 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
unsigned char OpFlags = 0;
- if ((DAG.getTarget().getRelocationModel() != Reloc::Static &&
- (Subtarget.getTargetTriple().isMacOSX() &&
- Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) ||
- (Subtarget.isTargetELF() && !isPPC64 &&
- DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = PPCII::MO_PLT_OR_STUB;
- }
+ if (UsePlt)
+ OpFlags = PPCII::MO_PLT;
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
OpFlags);
needIndirectCall = false;
}
- if (IsPatchPoint) {
+ if (isPatchPoint) {
// We'll form an invalid direct call when lowering a patchpoint; the full
// sequence for an indirect call is complicated, and many of the
// instructions introduced might have side effects (and, thus, can't be
@@ -4217,24 +4419,26 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
if (LDChain.getValueType() == MVT::Glue)
LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2);
- bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors();
+ auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
+ ? MachineMemOperand::MOInvariant
+ : MachineMemOperand::MONone;
MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr);
SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI,
- false, false, LoadsInv, 8);
+ /* Alignment = */ 8, MMOFlags);
// Load environment pointer into r11.
SDValue PtrOff = DAG.getIntPtrConstant(16, dl);
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
- SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr,
- MPI.getWithOffset(16), false, false,
- LoadsInv, 8);
+ SDValue LoadEnvPtr =
+ DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, MPI.getWithOffset(16),
+ /* Alignment = */ 8, MMOFlags);
SDValue TOCOff = DAG.getIntPtrConstant(8, dl);
SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff);
- SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC,
- MPI.getWithOffset(8), false, false,
- LoadsInv, 8);
+ SDValue TOCPtr =
+ DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, MPI.getWithOffset(8),
+ /* Alignment = */ 8, MMOFlags);
setUsesTOCBasePtr(DAG);
SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr,
@@ -4292,7 +4496,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// All calls, in both the ELF V1 and V2 ABIs, need the TOC register live
// into the call.
- if (isSVR4ABI && isPPC64 && !IsPatchPoint) {
+ if (isSVR4ABI && isPPC64 && !isPatchPoint) {
setUsesTOCBasePtr(DAG);
Ops.push_back(DAG.getRegister(PPC::X2, PtrVT));
}
@@ -4308,12 +4512,10 @@ bool isLocalCall(const SDValue &Callee)
return false;
}
-SDValue
-PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue PPCTargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
@@ -4354,23 +4556,18 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
return Chain;
}
-SDValue
-PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
- bool isTailCall, bool isVarArg, bool IsPatchPoint,
- bool hasNest, SelectionDAG &DAG,
- SmallVector<std::pair<unsigned, SDValue>, 8>
- &RegsToPass,
- SDValue InFlag, SDValue Chain,
- SDValue CallSeqStart, SDValue &Callee,
- int SPDiff, unsigned NumBytes,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const {
+SDValue PPCTargetLowering::FinishCall(
+ CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg,
+ bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
+ SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue InFlag,
+ SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
+ unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
+ SmallVectorImpl<SDValue> &InVals, ImmutableCallSite *CS) const {
std::vector<EVT> NodeTys;
SmallVector<SDValue, 8> Ops;
unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
- SPDiff, isTailCall, IsPatchPoint, hasNest,
+ SPDiff, isTailCall, isPatchPoint, hasNest,
RegsToPass, Ops, NodeTys, CS, Subtarget);
// Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
@@ -4417,7 +4614,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
// same TOC), the NOP will remain unchanged.
if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() &&
- !IsPatchPoint) {
+ !isPatchPoint) {
if (CallOpc == PPCISD::BCTRL) {
// This is a call through a function pointer.
// Restore the caller TOC from the save area into R2.
@@ -4430,7 +4627,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl,
// allocated and an unnecessary move instruction being generated.
CallOpc = PPCISD::BCTRL_LOAD_TOC;
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT);
unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
@@ -4472,12 +4669,35 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool &isTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool isVarArg = CLI.IsVarArg;
- bool IsPatchPoint = CLI.IsPatchPoint;
+ bool isPatchPoint = CLI.IsPatchPoint;
ImmutableCallSite *CS = CLI.CS;
- if (isTailCall)
- isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
- Ins, DAG);
+ if (isTailCall) {
+ if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
+ isTailCall =
+ IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
+ isVarArg, Outs, Ins, DAG);
+ else
+ isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
+ Ins, DAG);
+ if (isTailCall) {
+ ++NumTailCalls;
+ if (!getTargetMachine().Options.GuaranteedTailCallOpt)
+ ++NumSiblingCalls;
+
+ assert(isa<GlobalAddressSDNode>(Callee) &&
+ "Callee should be an llvm::Function object.");
+ DEBUG(
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
+ const unsigned Width = 80 - strlen("TCO caller: ")
+ - strlen(", callee linkage: 0, 0");
+ dbgs() << "TCO caller: "
+ << left_justify(DAG.getMachineFunction().getName(), Width)
+ << ", callee linkage: "
+ << GV->getVisibility() << ", " << GV->getLinkage() << "\n"
+ );
+ }
+ }
if (!isTailCall && CS && CS->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
@@ -4486,29 +4706,27 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (Subtarget.isSVR4ABI()) {
if (Subtarget.isPPC64())
return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
- isTailCall, IsPatchPoint, Outs, OutVals, Ins,
+ isTailCall, isPatchPoint, Outs, OutVals, Ins,
dl, DAG, InVals, CS);
else
return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
- isTailCall, IsPatchPoint, Outs, OutVals, Ins,
+ isTailCall, isPatchPoint, Outs, OutVals, Ins,
dl, DAG, InVals, CS);
}
return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
- isTailCall, IsPatchPoint, Outs, OutVals, Ins,
+ isTailCall, isPatchPoint, Outs, OutVals, Ins,
dl, DAG, InVals, CS);
}
-SDValue
-PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall, bool IsPatchPoint,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const {
+SDValue PPCTargetLowering::LowerCall_32SVR4(
+ SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall, bool isPatchPoint,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const {
// See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
// of the 32-bit SVR4 ABI stack frame layout.
@@ -4534,12 +4752,13 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
// Assign locations to all of the outgoing arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext());
+ PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
// Reserve space for the linkage area on the stack.
CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
PtrByteSize);
+ if (useSoftFloat())
+ CCInfo.PreAnalyzeCallOperands(Outs);
if (isVarArg) {
// Handle fixed and variable vector arguments differently.
@@ -4572,11 +4791,11 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
// All arguments are treated the same.
CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
}
+ CCInfo.clearWasPPCF128();
// Assign locations to all of the outgoing aggregate by value arguments.
SmallVector<CCValAssign, 16> ByValArgLocs;
- CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- ByValArgLocs, *DAG.getContext());
+ CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext());
// Reserve stack space for the allocations in CCInfo.
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
@@ -4601,8 +4820,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
// Load the return address and frame pointer so it can be moved somewhere else
// later.
SDValue LROp, FPOp;
- Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false,
- dl);
+ Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
// Set up a copy of the stack pointer for use loading and storing any
// arguments that may not fit in the registers available for argument
@@ -4676,9 +4894,8 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
} else {
// Calculate and remember argument location.
CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
@@ -4712,10 +4929,10 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
}
if (isTailCall)
- PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
- false, TailCallArguments);
+ PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
+ TailCallArguments);
- return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint,
+ return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
/* unused except on PPC64 ELFv1 */ false, DAG,
RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
NumBytes, Ins, InVals, CS);
@@ -4723,12 +4940,9 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee,
// Copy an argument into memory, being careful to do this outside the
// call sequence for the call to which the argument belongs.
-SDValue
-PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
- SDValue CallSeqStart,
- ISD::ArgFlagsTy Flags,
- SelectionDAG &DAG,
- SDLoc dl) const {
+SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
+ SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
+ SelectionDAG &DAG, const SDLoc &dl) const {
SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
CallSeqStart.getNode()->getOperand(0),
Flags, DAG, dl);
@@ -4741,27 +4955,29 @@ PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
return NewCallSeqStart;
}
-SDValue
-PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall, bool IsPatchPoint,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const {
+SDValue PPCTargetLowering::LowerCall_64SVR4(
+ SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall, bool isPatchPoint,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const {
bool isELFv2ABI = Subtarget.isELFv2ABI();
bool isLittleEndian = Subtarget.isLittleEndian();
unsigned NumOps = Outs.size();
bool hasNest = false;
+ bool IsSibCall = false;
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
unsigned PtrByteSize = 8;
MachineFunction &MF = DAG.getMachineFunction();
+ if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
+ IsSibCall = true;
+
// Mark this function as potentially containing a function that contains a
// tail call. As a consequence the frame pointer will be used for dynamicalloc
// and restoring the callers stack pointer in this functions epilog. This is
@@ -4881,9 +5097,12 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
CallConv == CallingConv::Fast)
NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
+ int SPDiff = 0;
+
// Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization.
- int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
+ if (!IsSibCall)
+ SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
// To protect arguments on the stack from being clobbered in a tail call,
// force all the loads to happen before doing any other lowering.
@@ -4892,15 +5111,15 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
- dl);
+ if (!IsSibCall)
+ Chain = DAG.getCALLSEQ_START(Chain,
+ DAG.getIntPtrConstant(NumBytes, dl, true), dl);
SDValue CallSeqStart = Chain;
// Load the return address and frame pointer so it can be move somewhere else
// later.
SDValue LROp, FPOp;
- Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
- dl);
+ Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
// Set up a copy of the stack pointer for use loading and storing any
// arguments that may not fit in the registers available for argument
@@ -4980,8 +5199,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
if (GPR_idx != NumGPRs) {
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
- MachinePointerInfo(), VT,
- false, false, false, 0);
+ MachinePointerInfo(), VT);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -5041,9 +5259,8 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
Flags, DAG, dl);
// Load the slot into the register.
- SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -5058,9 +5275,8 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
ArgOffset += PtrByteSize;
@@ -5214,13 +5430,12 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
if (isVarArg) {
// We could elide this store in the case where the object fits
// entirely in R registers. Maybe later.
- SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Store);
if (VR_idx != NumVRs) {
- SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 ||
@@ -5236,8 +5451,8 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
break;
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
DAG.getConstant(i, dl, PtrVT));
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
@@ -5278,13 +5493,12 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
if (isVarArg) {
// We could elide this store in the case where the object fits
// entirely in R registers. Maybe later.
- SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Store);
if (QFPR_idx != NumQFPRs) {
- SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl,
- Store, PtrOff, MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
+ PtrOff, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
}
@@ -5294,8 +5508,8 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
break;
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
DAG.getConstant(i, dl, PtrVT));
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
@@ -5332,7 +5546,7 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
// Check if this is an indirect call (MTCTR/BCTRL).
// See PrepareCall() for more information about calls through function
// pointers in the 64-bit SVR4 ABI.
- if (!isTailCall && !IsPatchPoint &&
+ if (!isTailCall && !isPatchPoint &&
!isFunctionGlobalAddress(Callee) &&
!isa<ExternalSymbolSDNode>(Callee)) {
// Load r2 into a virtual register and store it to the TOC save area.
@@ -5344,12 +5558,11 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
Chain = DAG.getStore(
Val.getValue(1), dl, Val, AddPtr,
- MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset),
- false, false, 0);
+ MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
// In the ELFv2 ABI, R12 must contain the address of an indirect callee.
// This does not mean the MTCTR instruction must use R12; it's easier
// to model this as an extra parameter, so do that.
- if (isELFv2ABI && !IsPatchPoint)
+ if (isELFv2ABI && !isPatchPoint)
RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
}
@@ -5362,29 +5575,27 @@ PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee,
InFlag = Chain.getValue(1);
}
- if (isTailCall)
- PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp,
- FPOp, true, TailCallArguments);
+ if (isTailCall && !IsSibCall)
+ PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
+ TailCallArguments);
- return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, hasNest,
+ return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
SPDiff, NumBytes, Ins, InVals, CS);
}
-SDValue
-PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall, bool IsPatchPoint,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const {
+SDValue PPCTargetLowering::LowerCall_Darwin(
+ SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall, bool isPatchPoint,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const {
unsigned NumOps = Outs.size();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
bool isPPC64 = PtrVT == MVT::i64;
unsigned PtrByteSize = isPPC64 ? 8 : 4;
@@ -5467,8 +5678,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Load the return address and frame pointer so it can be move somewhere else
// later.
SDValue LROp, FPOp;
- Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
- dl);
+ Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
// Set up a copy of the stack pointer for use loading and storing any
// arguments that may not fit in the registers available for argument
@@ -5538,8 +5748,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
if (GPR_idx != NumGPRs) {
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
- MachinePointerInfo(), VT,
- false, false, false, 0);
+ MachinePointerInfo(), VT);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -5569,9 +5778,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
ArgOffset += PtrByteSize;
@@ -5606,24 +5814,22 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
if (isVarArg) {
- SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Store);
// Float varargs are always shadowed in available integer registers
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
- MachinePointerInfo(), false, false,
- false, 0);
+ SDValue Load =
+ DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
@@ -5665,13 +5871,12 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// entirely in R registers. Maybe later.
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
DAG.getConstant(ArgOffset, dl, PtrVT));
- SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Store);
if (VR_idx != NumVRs) {
- SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
}
@@ -5681,8 +5886,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
break;
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
DAG.getConstant(i, dl, PtrVT));
- SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Load =
+ DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
@@ -5754,10 +5959,10 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
}
if (isTailCall)
- PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
- FPOp, true, TailCallArguments);
+ PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
+ TailCallArguments);
- return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint,
+ return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
/* unused except on PPC64 ELFv1 */ false, DAG,
RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
NumBytes, Ins, InVals, CS);
@@ -5774,11 +5979,11 @@ PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
}
SDValue
-PPCTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
@@ -5814,6 +6019,25 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
+ const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const MCPhysReg *I =
+ TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
+ if (I) {
+ for (; *I; ++I) {
+
+ if (PPC::G8RCRegClass.contains(*I))
+ RetOps.push_back(DAG.getRegister(*I, MVT::i64));
+ else if (PPC::F8RCRegClass.contains(*I))
+ RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
+ else if (PPC::CRRCRegClass.contains(*I))
+ RetOps.push_back(DAG.getRegister(*I, MVT::i1));
+ else if (PPC::VRRCRegClass.contains(*I))
+ RetOps.push_back(DAG.getRegister(*I, MVT::Other));
+ else
+ llvm_unreachable("Unexpected register class in CSRsViaCopy!");
+ }
+ }
+
RetOps[0] = Chain; // Update chain.
// Add the flag if we have it.
@@ -5823,8 +6047,9 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
}
-SDValue PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(
- SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const {
+SDValue
+PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
+ SelectionDAG &DAG) const {
SDLoc dl(Op);
// Get the corect type for integers.
@@ -5839,13 +6064,13 @@ SDValue PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(
return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
}
-SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const {
+SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
+ SelectionDAG &DAG) const {
// When we pop the dynamic allocation we need to restore the SP link.
SDLoc dl(Op);
// Get the corect type for pointers.
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
// Construct the stack pointer operand.
bool isPPC64 = Subtarget.isPPC64();
@@ -5857,22 +6082,20 @@ SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
SDValue SaveSP = Op.getOperand(1);
// Load the old link SP.
- SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue LoadLinkSP =
+ DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
// Restore the stack pointer.
Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
// Store the old link SP.
- return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(),
- false, false, 0);
+ return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
}
SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool isPPC64 = Subtarget.isPPC64();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
// Get current frame pointer save index. The users of this index will be
// primarily DYNALLOC instructions.
@@ -5895,7 +6118,7 @@ SDValue
PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool isPPC64 = Subtarget.isPPC64();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
// Get current frame pointer save index. The users of this index will be
// primarily DYNALLOC instructions.
@@ -5915,15 +6138,14 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
}
SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
- SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const {
+ SelectionDAG &DAG) const {
// Get the inputs.
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
SDLoc dl(Op);
// Get the corect type for pointers.
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
// Negate the size.
SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
DAG.getConstant(0, dl, PtrVT), Size);
@@ -6113,7 +6335,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
SelectionDAG &DAG,
- SDLoc dl) const {
+ const SDLoc &dl) const {
assert(Op.getOperand(0).getValueType().isFloatingPoint());
SDValue Src = Op.getOperand(0);
if (Src.getValueType() == MVT::f32)
@@ -6156,15 +6378,14 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
} else
- Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr,
- MPI, false, false, 0);
+ Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
// Result is a load from the stack slot. If loading 4 bytes, make sure to
- // add in a bias.
+ // add in a bias on big endian.
if (Op.getValueType() == MVT::i32 && !i32Stack) {
FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
DAG.getConstant(4, dl, FIPtr.getValueType()));
- MPI = MPI.getWithOffset(4);
+ MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
}
RLI.Chain = Chain;
@@ -6177,7 +6398,7 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
/// need for load/store combinations.
SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
SelectionDAG &DAG,
- SDLoc dl) const {
+ const SDLoc &dl) const {
assert(Op.getOperand(0).getValueType().isFloatingPoint());
SDValue Src = Op.getOperand(0);
@@ -6208,16 +6429,18 @@ SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
}
SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
- SDLoc dl) const {
+ const SDLoc &dl) const {
if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
return LowerFP_TO_INTDirectMove(Op, DAG, dl);
ReuseLoadInfo RLI;
LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
- return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, false,
- false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo,
- RLI.Ranges);
+ return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
+ RLI.Alignment,
+ RLI.IsInvariant ? MachineMemOperand::MOInvariant
+ : MachineMemOperand::MONone,
+ RLI.AAInfo, RLI.Ranges);
}
// We're trying to insert a regular store, S, and then a load, L. If the
@@ -6251,7 +6474,7 @@ bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
return false;
RLI.Ptr = LD->getBasePtr();
- if (LD->isIndexed() && LD->getOffset().getOpcode() != ISD::UNDEF) {
+ if (LD->isIndexed() && !LD->getOffset().isUndef()) {
assert(LD->getAddressingMode() == ISD::PRE_INC &&
"Non-pre-inc AM on PPC?");
RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
@@ -6289,12 +6512,36 @@ void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
}
+/// \brief Analyze profitability of direct move
+/// prefer float load to int load plus direct move
+/// when there is no integer use of int load
+static bool directMoveIsProfitable(const SDValue &Op) {
+ SDNode *Origin = Op.getOperand(0).getNode();
+ if (Origin->getOpcode() != ISD::LOAD)
+ return true;
+
+ for (SDNode::use_iterator UI = Origin->use_begin(),
+ UE = Origin->use_end();
+ UI != UE; ++UI) {
+
+ // Only look at the users of the loaded value.
+ if (UI.getUse().get().getResNo() != 0)
+ continue;
+
+ if (UI->getOpcode() != ISD::SINT_TO_FP &&
+ UI->getOpcode() != ISD::UINT_TO_FP)
+ return true;
+ }
+
+ return false;
+}
+
/// \brief Custom lowers integer to floating point conversions to use
/// the direct move instructions available in ISA 2.07 to avoid the
/// need for load/store combinations.
SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
SelectionDAG &DAG,
- SDLoc dl) const {
+ const SDLoc &dl) const {
assert((Op.getValueType() == MVT::f32 ||
Op.getValueType() == MVT::f64) &&
"Invalid floating point type as target of conversion");
@@ -6335,9 +6582,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
- SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64);
- FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64, FPHalfs, FPHalfs,
- FPHalfs, FPHalfs);
+ SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
@@ -6359,7 +6604,8 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// If we have direct moves, we can do all the conversion, skip the store/load
// however, without FPCVT we can't do most conversions.
- if (Subtarget.hasDirectMove() && Subtarget.isPPC64() && Subtarget.hasFPCVT())
+ if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
+ Subtarget.isPPC64() && Subtarget.hasFPCVT())
return LowerINT_TO_FPDirectMove(Op, DAG, dl);
assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
@@ -6429,9 +6675,11 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
- Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, false,
- false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo,
- RLI.Ranges);
+ Bits =
+ DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, RLI.Alignment,
+ RLI.IsInvariant ? MachineMemOperand::MOInvariant
+ : MachineMemOperand::MONone,
+ RLI.AAInfo, RLI.Ranges);
spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
} else if (Subtarget.hasLFIWAX() &&
canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
@@ -6459,15 +6707,15 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
SINT.getOperand(0).getValueType() == MVT::i32) {
MachineFrameInfo *FrameInfo = MF.getFrameInfo();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
- SDValue Store = DAG.getStore(
- DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx),
- false, false, 0);
+ SDValue Store =
+ DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
+ MachinePointerInfo::getFixedStack(
+ DAG.getMachineFunction(), FrameIdx));
assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
"Expected an i32 store");
@@ -6505,7 +6753,7 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// then lfd it and fcfid it.
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *FrameInfo = MF.getFrameInfo();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
SDValue Ld;
if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
@@ -6516,10 +6764,10 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
int FrameIdx = FrameInfo->CreateStackObject(4, 4, false);
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
- SDValue Store = DAG.getStore(
- DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx),
- false, false, 0);
+ SDValue Store =
+ DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
+ MachinePointerInfo::getFixedStack(
+ DAG.getMachineFunction(), FrameIdx));
assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
"Expected an i32 store");
@@ -6554,14 +6802,12 @@ SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
// STD the extended value into the stack slot.
SDValue Store = DAG.getStore(
DAG.getEntryNode(), dl, Ext64, FIdx,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx),
- false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
// Load the value as a double.
Ld = DAG.getLoad(
MVT::f64, dl, Store, FIdx,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx),
- false, false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
}
// FCFID it and return it.
@@ -6596,7 +6842,7 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
EVT VT = Op.getValueType();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
// Save FP Control Word to register
EVT NodeTys[] = {
@@ -6608,14 +6854,13 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
// Save FP register to stack slot
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain,
- StackSlot, MachinePointerInfo(), false, false,0);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot,
+ MachinePointerInfo());
// Load FP Control Word from low 32 bits of stack slot.
SDValue Four = DAG.getConstant(4, dl, PtrVT);
SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
- SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(),
- false, false, false, 0);
+ SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo());
// Transform as necessary
SDValue CWD1 =
@@ -6730,7 +6975,7 @@ SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
/// BuildSplatI - Build a canonical splati of Val with an element size of
/// SplatSize. Cast the result to VT.
static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
- SelectionDAG &DAG, SDLoc dl) {
+ SelectionDAG &DAG, const SDLoc &dl) {
assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
static const MVT VTys[] = { // canonical VT to use for each size.
@@ -6746,18 +6991,13 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
EVT CanonicalVT = VTys[SplatSize-1];
// Build a canonical splat for this value.
- SDValue Elt = DAG.getConstant(Val, dl, MVT::i32);
- SmallVector<SDValue, 8> Ops;
- Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
- SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, Ops);
- return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
+ return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
}
/// BuildIntrinsicOp - Return a unary operator intrinsic node with the
/// specified intrinsic ID.
-static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op,
- SelectionDAG &DAG, SDLoc dl,
- EVT DestVT = MVT::Other) {
+static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
+ const SDLoc &dl, EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(IID, dl, MVT::i32), Op);
@@ -6766,7 +7006,7 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op,
/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
- SelectionDAG &DAG, SDLoc dl,
+ SelectionDAG &DAG, const SDLoc &dl,
EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = LHS.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
@@ -6776,8 +7016,8 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
- SDValue Op2, SelectionDAG &DAG,
- SDLoc dl, EVT DestVT = MVT::Other) {
+ SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
+ EVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op0.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
@@ -6785,8 +7025,8 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
/// amount. The result has the specified value type.
-static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
- EVT VT, SelectionDAG &DAG, SDLoc dl) {
+static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
+ SelectionDAG &DAG, const SDLoc &dl) {
// Force LHS/RHS to be the right type.
LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
@@ -6825,7 +7065,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
bool IsConst = true;
for (unsigned i = 0; i < 4; ++i) {
- if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue;
+ if (BVN->getOperand(i).isUndef()) continue;
if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
IsConst = false;
break;
@@ -6838,12 +7078,12 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Constant *NegOne =
ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
- SmallVector<Constant*, 4> CV(4, NegOne);
+ Constant *CV[4];
for (unsigned i = 0; i < 4; ++i) {
- if (BVN->getOperand(i).getOpcode() == ISD::UNDEF)
+ if (BVN->getOperand(i).isUndef())
CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
else if (isNullConstant(BVN->getOperand(i)))
- continue;
+ CV[i] = NegOne;
else
CV[i] = One;
}
@@ -6852,15 +7092,8 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
16 /* alignment */);
- SmallVector<SDValue, 2> Ops;
- Ops.push_back(DAG.getEntryNode());
- Ops.push_back(CPIdx);
-
- SmallVector<EVT, 2> ValueVTs;
- ValueVTs.push_back(MVT::v4i1);
- ValueVTs.push_back(MVT::Other); // chain
- SDVTList VTs = DAG.getVTList(ValueVTs);
-
+ SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
+ SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
return DAG.getMemIntrinsicNode(
PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
@@ -6868,7 +7101,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SmallVector<SDValue, 4> Stores;
for (unsigned i = 0; i < 4; ++i) {
- if (BVN->getOperand(i).getOpcode() == ISD::UNDEF) continue;
+ if (BVN->getOperand(i).isUndef()) continue;
unsigned Offset = 4*i;
SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
@@ -6876,19 +7109,16 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
if (StoreSize > 4) {
- Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
- BVN->getOperand(i), Idx,
- PtrInfo.getWithOffset(Offset),
- MVT::i32, false, false, 0));
+ Stores.push_back(
+ DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
+ PtrInfo.getWithOffset(Offset), MVT::i32));
} else {
SDValue StoreValue = BVN->getOperand(i);
if (StoreSize < 4)
StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
- Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
- StoreValue, Idx,
- PtrInfo.getWithOffset(Offset),
- false, false, 0));
+ Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
+ PtrInfo.getWithOffset(Offset)));
}
}
@@ -6903,15 +7133,10 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// is typed as v4f64 because the QPX register integer states are not
// explicitly represented.
- SmallVector<SDValue, 2> Ops;
- Ops.push_back(StoreChain);
- Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32));
- Ops.push_back(FIdx);
-
- SmallVector<EVT, 2> ValueVTs;
- ValueVTs.push_back(MVT::v4f64);
- ValueVTs.push_back(MVT::Other); // chain
- SDVTList VTs = DAG.getVTList(ValueVTs);
+ SDValue Ops[] = {StoreChain,
+ DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
+ FIdx};
+ SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
dl, VTs, Ops, MVT::v4i32, PtrInfo);
@@ -6919,9 +7144,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
LoadedVect);
- SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::f64);
- FPZeros = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
- FPZeros, FPZeros, FPZeros, FPZeros);
+ SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
}
@@ -6949,8 +7172,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (SplatBits == 0) {
// Canonicalize all zero vectors to be v4i32.
if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
- SDValue Z = DAG.getConstant(0, dl, MVT::i32);
- Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
+ SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
}
return Op;
@@ -7089,7 +7311,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
- SDLoc dl) {
+ const SDLoc &dl) {
unsigned OpNum = (PFEntry >> 26) & 0x0F;
unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
@@ -7175,11 +7397,50 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
EVT VT = Op.getValueType();
bool isLittleEndian = Subtarget.isLittleEndian();
+ unsigned ShiftElts, InsertAtByte;
+ bool Swap;
+ if (Subtarget.hasP9Vector() &&
+ PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
+ isLittleEndian)) {
+ if (Swap)
+ std::swap(V1, V2);
+ SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
+ SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
+ if (ShiftElts) {
+ SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
+ DAG.getConstant(ShiftElts, dl, MVT::i32));
+ SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Shl,
+ DAG.getConstant(InsertAtByte, dl, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
+ }
+ SDValue Ins = DAG.getNode(PPCISD::XXINSERT, dl, MVT::v4i32, Conv1, Conv2,
+ DAG.getConstant(InsertAtByte, dl, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
+ }
+
+ if (Subtarget.hasVSX()) {
+ if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
+ int SplatIdx = PPC::getVSPLTImmediate(SVOp, 4, DAG);
+ SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
+ SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
+ DAG.getConstant(SplatIdx, dl, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
+ }
+
+ // Left shifts of 8 bytes are actually swaps. Convert accordingly.
+ if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
+ SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
+ SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
+ }
+
+ }
+
if (Subtarget.hasQPX()) {
if (VT.getVectorNumElements() != 4)
return SDValue();
- if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
+ if (V2.isUndef()) V2 = V1;
int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
if (AlignIdx != -1) {
@@ -7192,9 +7453,6 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SplatIdx -= 4;
}
- // FIXME: If SplatIdx == 0 and the input came from a load, then there is
- // nothing to do.
-
return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
DAG.getConstant(SplatIdx, dl, MVT::i32));
}
@@ -7217,7 +7475,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// Cases that are handled by instructions that take permute immediates
// (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
// selected by the instruction selector.
- if (V2.getOpcode() == ISD::UNDEF) {
+ if (V2.isUndef()) {
if (PPC::isSplatShuffleMask(SVOp, 1) ||
PPC::isSplatShuffleMask(SVOp, 2) ||
PPC::isSplatShuffleMask(SVOp, 4) ||
@@ -7315,7 +7573,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
// vector that will get spilled to the constant pool.
- if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
+ if (V2.isUndef()) V2 = V1;
// The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
// that it is in input element units, not in bytes. Convert now.
@@ -7340,8 +7598,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
MVT::i32));
}
- SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
- ResultMask);
+ SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
if (isLittleEndian)
return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
V2, V1, VPermMask);
@@ -7468,6 +7725,16 @@ static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
/// lower, do it, otherwise return null.
SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
+ unsigned IntrinsicID =
+ cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+
+ if (IntrinsicID == Intrinsic::thread_pointer) {
+ // Reads the thread pointer register, used for __builtin_thread_pointer.
+ bool is64bit = Subtarget.isPPC64();
+ return DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
+ is64bit ? MVT::i64 : MVT::i32);
+ }
+
// If this is a lowered altivec predicate compare, CompareOpc is set to the
// opcode number of the comparison.
SDLoc dl(Op);
@@ -7566,12 +7833,10 @@ SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
// Store the input value into Value#0 of the stack slot.
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
- Op.getOperand(0), FIdx, MachinePointerInfo(),
- false, false, 0);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
+ MachinePointerInfo());
// Load it out.
- return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
}
SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
@@ -7594,9 +7859,7 @@ SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
// understand how to form the extending load.
- SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64);
- FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
- FPHalfs, FPHalfs, FPHalfs, FPHalfs);
+ SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
@@ -7613,15 +7876,10 @@ SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDValue StoreChain = DAG.getEntryNode();
- SmallVector<SDValue, 2> Ops;
- Ops.push_back(StoreChain);
- Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32));
- Ops.push_back(Value);
- Ops.push_back(FIdx);
-
- SmallVector<EVT, 2> ValueVTs;
- ValueVTs.push_back(MVT::Other); // chain
- SDVTList VTs = DAG.getVTList(ValueVTs);
+ SDValue Ops[] = {StoreChain,
+ DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
+ Value, FIdx};
+ SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
dl, VTs, Ops, MVT::v4i32, PtrInfo);
@@ -7631,9 +7889,8 @@ SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
- SDValue IntVal = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
- PtrInfo.getWithOffset(Offset),
- false, false, false, 0);
+ SDValue IntVal =
+ DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
if (!Subtarget.useCRBits())
return IntVal;
@@ -7662,24 +7919,20 @@ SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
ScalarMemVT = MemVT.getScalarType();
unsigned Stride = ScalarMemVT.getStoreSize();
- SmallVector<SDValue, 8> Vals, LoadChains;
+ SDValue Vals[4], LoadChains[4];
for (unsigned Idx = 0; Idx < 4; ++Idx) {
SDValue Load;
if (ScalarVT != ScalarMemVT)
- Load =
- DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
- BasePtr,
- LN->getPointerInfo().getWithOffset(Idx*Stride),
- ScalarMemVT, LN->isVolatile(), LN->isNonTemporal(),
- LN->isInvariant(), MinAlign(Alignment, Idx*Stride),
- LN->getAAInfo());
+ Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
+ BasePtr,
+ LN->getPointerInfo().getWithOffset(Idx * Stride),
+ ScalarMemVT, MinAlign(Alignment, Idx * Stride),
+ LN->getMemOperand()->getFlags(), LN->getAAInfo());
else
- Load =
- DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
- LN->getPointerInfo().getWithOffset(Idx*Stride),
- LN->isVolatile(), LN->isNonTemporal(),
- LN->isInvariant(), MinAlign(Alignment, Idx*Stride),
- LN->getAAInfo());
+ Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
+ LN->getPointerInfo().getWithOffset(Idx * Stride),
+ MinAlign(Alignment, Idx * Stride),
+ LN->getMemOperand()->getFlags(), LN->getAAInfo());
if (Idx == 0 && LN->isIndexed()) {
assert(LN->getAddressingMode() == ISD::PRE_INC &&
@@ -7688,8 +7941,8 @@ SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
LN->getAddressingMode());
}
- Vals.push_back(Load);
- LoadChains.push_back(Load.getValue(1));
+ Vals[Idx] = Load;
+ LoadChains[Idx] = Load.getValue(1);
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getConstant(Stride, dl,
@@ -7697,8 +7950,7 @@ SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
}
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
- SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl,
- Op.getValueType(), Vals);
+ SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
if (LN->isIndexed()) {
SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
@@ -7715,23 +7967,20 @@ SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
// To lower v4i1 from a byte array, we load the byte elements of the
// vector and then reuse the BUILD_VECTOR logic.
- SmallVector<SDValue, 4> VectElmts, VectElmtChains;
+ SDValue VectElmts[4], VectElmtChains[4];
for (unsigned i = 0; i < 4; ++i) {
SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
- VectElmts.push_back(DAG.getExtLoad(ISD::EXTLOAD,
- dl, MVT::i32, LoadChain, Idx,
- LN->getPointerInfo().getWithOffset(i),
- MVT::i8 /* memory type */,
- LN->isVolatile(), LN->isNonTemporal(),
- LN->isInvariant(),
- 1 /* alignment */, LN->getAAInfo()));
- VectElmtChains.push_back(VectElmts[i].getValue(1));
+ VectElmts[i] = DAG.getExtLoad(
+ ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
+ LN->getPointerInfo().getWithOffset(i), MVT::i8,
+ /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
+ VectElmtChains[i] = VectElmts[i].getValue(1);
}
LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
- SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i1, VectElmts);
+ SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
SDValue RVals[] = { Value, LoadChain };
return DAG.getMergeValues(RVals, dl);
@@ -7759,7 +8008,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
ScalarMemVT = MemVT.getScalarType();
unsigned Stride = ScalarMemVT.getStoreSize();
- SmallVector<SDValue, 8> Stores;
+ SDValue Stores[4];
for (unsigned Idx = 0; Idx < 4; ++Idx) {
SDValue Ex = DAG.getNode(
ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
@@ -7767,16 +8016,15 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
SDValue Store;
if (ScalarVT != ScalarMemVT)
Store =
- DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
- SN->getPointerInfo().getWithOffset(Idx*Stride),
- ScalarMemVT, SN->isVolatile(), SN->isNonTemporal(),
- MinAlign(Alignment, Idx*Stride), SN->getAAInfo());
+ DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
+ SN->getPointerInfo().getWithOffset(Idx * Stride),
+ ScalarMemVT, MinAlign(Alignment, Idx * Stride),
+ SN->getMemOperand()->getFlags(), SN->getAAInfo());
else
- Store =
- DAG.getStore(StoreChain, dl, Ex, BasePtr,
- SN->getPointerInfo().getWithOffset(Idx*Stride),
- SN->isVolatile(), SN->isNonTemporal(),
- MinAlign(Alignment, Idx*Stride), SN->getAAInfo());
+ Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
+ SN->getPointerInfo().getWithOffset(Idx * Stride),
+ MinAlign(Alignment, Idx * Stride),
+ SN->getMemOperand()->getFlags(), SN->getAAInfo());
if (Idx == 0 && SN->isIndexed()) {
assert(SN->getAddressingMode() == ISD::PRE_INC &&
@@ -7788,7 +8036,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getConstant(Stride, dl,
BasePtr.getValueType()));
- Stores.push_back(Store);
+ Stores[Idx] = Store;
}
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
@@ -7811,9 +8059,7 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
// FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
// understand how to form the extending load.
- SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::f64);
- FPHalfs = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f64,
- FPHalfs, FPHalfs, FPHalfs, FPHalfs);
+ SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
@@ -7829,43 +8075,37 @@ SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
- SmallVector<SDValue, 2> Ops;
- Ops.push_back(StoreChain);
- Ops.push_back(DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32));
- Ops.push_back(Value);
- Ops.push_back(FIdx);
-
- SmallVector<EVT, 2> ValueVTs;
- ValueVTs.push_back(MVT::Other); // chain
- SDVTList VTs = DAG.getVTList(ValueVTs);
+ SDValue Ops[] = {StoreChain,
+ DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
+ Value, FIdx};
+ SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
dl, VTs, Ops, MVT::v4i32, PtrInfo);
// Move data into the byte array.
- SmallVector<SDValue, 4> Loads, LoadChains;
+ SDValue Loads[4], LoadChains[4];
for (unsigned i = 0; i < 4; ++i) {
unsigned Offset = 4*i;
SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
- Loads.push_back(DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
- PtrInfo.getWithOffset(Offset),
- false, false, false, 0));
- LoadChains.push_back(Loads[i].getValue(1));
+ Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
+ PtrInfo.getWithOffset(Offset));
+ LoadChains[i] = Loads[i].getValue(1);
}
StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
- SmallVector<SDValue, 4> Stores;
+ SDValue Stores[4];
for (unsigned i = 0; i < 4; ++i) {
SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
- Stores.push_back(DAG.getTruncStore(
+ Stores[i] = DAG.getTruncStore(
StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
- MVT::i8 /* memory type */, SN->isNonTemporal(), SN->isVolatile(),
- 1 /* alignment */, SN->getAAInfo()));
+ MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
+ SN->getAAInfo());
}
StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
@@ -7958,18 +8198,22 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::VASTART:
- return LowerVASTART(Op, DAG, Subtarget);
+ return LowerVASTART(Op, DAG);
case ISD::VAARG:
- return LowerVAARG(Op, DAG, Subtarget);
+ return LowerVAARG(Op, DAG);
case ISD::VACOPY:
- return LowerVACOPY(Op, DAG, Subtarget);
+ return LowerVACOPY(Op, DAG);
+
+ case ISD::STACKRESTORE:
+ return LowerSTACKRESTORE(Op, DAG);
- case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, Subtarget);
case ISD::DYNAMIC_STACKALLOC:
- return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget);
- case ISD::GET_DYNAMIC_AREA_OFFSET: return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG, Subtarget);
+ return LowerDYNAMIC_STACKALLOC(Op, DAG);
+
+ case ISD::GET_DYNAMIC_AREA_OFFSET:
+ return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
@@ -8048,7 +8292,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
EVT VT = N->getValueType(0);
if (VT == MVT::i64) {
- SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, Subtarget);
+ SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
Results.push_back(NewNode);
Results.push_back(NewNode.getValue(1));
@@ -8099,9 +8343,9 @@ static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
- if (Ord == SequentiallyConsistent)
+ if (Ord == AtomicOrdering::SequentiallyConsistent)
return callIntrinsic(Builder, Intrinsic::ppc_sync);
- if (isAtLeastRelease(Ord))
+ if (isReleaseOrStronger(Ord))
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
return nullptr;
}
@@ -8109,7 +8353,7 @@ Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
- if (IsLoad && isAtLeastAcquire(Ord))
+ if (IsLoad && isAcquireOrStronger(Ord))
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
// FIXME: this is too conservative, a dependent branch + isync is enough.
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
@@ -8119,7 +8363,7 @@ Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
}
MachineBasicBlock *
-PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
+PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
unsigned AtomicSize,
unsigned BinOpcode) const {
// This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
@@ -8154,11 +8398,11 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineFunction *F = BB->getParent();
MachineFunction::iterator It = ++BB->getIterator();
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptrA = MI->getOperand(1).getReg();
- unsigned ptrB = MI->getOperand(2).getReg();
- unsigned incr = MI->getOperand(3).getReg();
- DebugLoc dl = MI->getDebugLoc();
+ unsigned dest = MI.getOperand(0).getReg();
+ unsigned ptrA = MI.getOperand(1).getReg();
+ unsigned ptrB = MI.getOperand(2).getReg();
+ unsigned incr = MI.getOperand(3).getReg();
+ DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
@@ -8203,9 +8447,9 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
}
MachineBasicBlock *
-PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
+PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr &MI,
MachineBasicBlock *BB,
- bool is8bit, // operation
+ bool is8bit, // operation
unsigned BinOpcode) const {
// If we support part-word atomic mnemonics, just use them
if (Subtarget.hasPartwordAtomics())
@@ -8224,11 +8468,11 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineFunction::iterator It = ++BB->getIterator();
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptrA = MI->getOperand(1).getReg();
- unsigned ptrB = MI->getOperand(2).getReg();
- unsigned incr = MI->getOperand(3).getReg();
- DebugLoc dl = MI->getDebugLoc();
+ unsigned dest = MI.getOperand(0).getReg();
+ unsigned ptrA = MI.getOperand(1).getReg();
+ unsigned ptrB = MI.getOperand(2).getReg();
+ unsigned incr = MI.getOperand(3).getReg();
+ DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
@@ -8334,10 +8578,10 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
return BB;
}
-llvm::MachineBasicBlock*
-PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
+llvm::MachineBasicBlock *
+PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineFunction *MF = MBB->getParent();
@@ -8347,10 +8591,10 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MachineFunction::iterator I = ++MBB->getIterator();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+ MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
- unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned DstReg = MI.getOperand(0).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(RC->hasType(MVT::i32) && "Invalid destination!");
unsigned mainDstReg = MRI.createVirtualRegister(RC);
@@ -8407,7 +8651,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// Prepare IP either in reg.
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
unsigned LabelReg = MRI.createVirtualRegister(PtrRC);
- unsigned BufReg = MI->getOperand(1).getReg();
+ unsigned BufReg = MI.getOperand(1).getReg();
if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
setUsesTOCBasePtr(*MBB->getParent());
@@ -8477,22 +8721,22 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
.addReg(mainDstReg).addMBB(mainMBB)
.addReg(restoreDstReg).addMBB(thisMBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return sinkMBB;
}
MachineBasicBlock *
-PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
+PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+ MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
@@ -8507,10 +8751,8 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
unsigned BP =
(PVT == MVT::i64)
? PPC::X30
- : (Subtarget.isSVR4ABI() &&
- MF->getTarget().getRelocationModel() == Reloc::PIC_
- ? PPC::R29
- : PPC::R30);
+ : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
+ : PPC::R30);
MachineInstrBuilder MIB;
@@ -8519,7 +8761,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
const int64_t TOCOffset = 3 * PVT.getStoreSize();
const int64_t BPOffset = 4 * PVT.getStoreSize();
- unsigned BufReg = MI->getOperand(0).getReg();
+ unsigned BufReg = MI.getOperand(0).getReg();
// Reload FP (the jumped-to function may not have had a
// frame pointer, and if so, then its r31 will be restored
@@ -8586,34 +8828,34 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
- MI->eraseFromParent();
+ MI.eraseFromParent();
return MBB;
}
MachineBasicBlock *
-PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
- if (MI->getOpcode() == TargetOpcode::STACKMAP ||
- MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ if (MI.getOpcode() == TargetOpcode::STACKMAP ||
+ MI.getOpcode() == TargetOpcode::PATCHPOINT) {
if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() &&
- MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ MI.getOpcode() == TargetOpcode::PATCHPOINT) {
// Call lowering should have added an r2 operand to indicate a dependence
// on the TOC base pointer value. It can't however, because there is no
// way to mark the dependence as implicit there, and so the stackmap code
// will confuse it with a regular operand. Instead, add the dependence
// here.
setUsesTOCBasePtr(*BB->getParent());
- MI->addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
+ MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
}
return emitPatchPoint(MI, BB);
}
- if (MI->getOpcode() == PPC::EH_SjLj_SetJmp32 ||
- MI->getOpcode() == PPC::EH_SjLj_SetJmp64) {
+ if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
+ MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
return emitEHSjLjSetJmp(MI, BB);
- } else if (MI->getOpcode() == PPC::EH_SjLj_LongJmp32 ||
- MI->getOpcode() == PPC::EH_SjLj_LongJmp64) {
+ } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
+ MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
return emitEHSjLjLongJmp(MI, BB);
}
@@ -8626,44 +8868,43 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
- if (Subtarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 ||
- MI->getOpcode() == PPC::SELECT_CC_I8 ||
- MI->getOpcode() == PPC::SELECT_I4 ||
- MI->getOpcode() == PPC::SELECT_I8)) {
+ if (Subtarget.hasISEL() &&
+ (MI.getOpcode() == PPC::SELECT_CC_I4 ||
+ MI.getOpcode() == PPC::SELECT_CC_I8 ||
+ MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8)) {
SmallVector<MachineOperand, 2> Cond;
- if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
- MI->getOpcode() == PPC::SELECT_CC_I8)
- Cond.push_back(MI->getOperand(4));
+ if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
+ MI.getOpcode() == PPC::SELECT_CC_I8)
+ Cond.push_back(MI.getOperand(4));
else
Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
- Cond.push_back(MI->getOperand(1));
-
- DebugLoc dl = MI->getDebugLoc();
- TII->insertSelect(*BB, MI, dl, MI->getOperand(0).getReg(),
- Cond, MI->getOperand(2).getReg(),
- MI->getOperand(3).getReg());
- } else if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
- MI->getOpcode() == PPC::SELECT_CC_I8 ||
- MI->getOpcode() == PPC::SELECT_CC_F4 ||
- MI->getOpcode() == PPC::SELECT_CC_F8 ||
- MI->getOpcode() == PPC::SELECT_CC_QFRC ||
- MI->getOpcode() == PPC::SELECT_CC_QSRC ||
- MI->getOpcode() == PPC::SELECT_CC_QBRC ||
- MI->getOpcode() == PPC::SELECT_CC_VRRC ||
- MI->getOpcode() == PPC::SELECT_CC_VSFRC ||
- MI->getOpcode() == PPC::SELECT_CC_VSSRC ||
- MI->getOpcode() == PPC::SELECT_CC_VSRC ||
- MI->getOpcode() == PPC::SELECT_I4 ||
- MI->getOpcode() == PPC::SELECT_I8 ||
- MI->getOpcode() == PPC::SELECT_F4 ||
- MI->getOpcode() == PPC::SELECT_F8 ||
- MI->getOpcode() == PPC::SELECT_QFRC ||
- MI->getOpcode() == PPC::SELECT_QSRC ||
- MI->getOpcode() == PPC::SELECT_QBRC ||
- MI->getOpcode() == PPC::SELECT_VRRC ||
- MI->getOpcode() == PPC::SELECT_VSFRC ||
- MI->getOpcode() == PPC::SELECT_VSSRC ||
- MI->getOpcode() == PPC::SELECT_VSRC) {
+ Cond.push_back(MI.getOperand(1));
+
+ DebugLoc dl = MI.getDebugLoc();
+ TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
+ MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
+ } else if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
+ MI.getOpcode() == PPC::SELECT_CC_I8 ||
+ MI.getOpcode() == PPC::SELECT_CC_F4 ||
+ MI.getOpcode() == PPC::SELECT_CC_F8 ||
+ MI.getOpcode() == PPC::SELECT_CC_QFRC ||
+ MI.getOpcode() == PPC::SELECT_CC_QSRC ||
+ MI.getOpcode() == PPC::SELECT_CC_QBRC ||
+ MI.getOpcode() == PPC::SELECT_CC_VRRC ||
+ MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
+ MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
+ MI.getOpcode() == PPC::SELECT_CC_VSRC ||
+ MI.getOpcode() == PPC::SELECT_I4 ||
+ MI.getOpcode() == PPC::SELECT_I8 ||
+ MI.getOpcode() == PPC::SELECT_F4 ||
+ MI.getOpcode() == PPC::SELECT_F8 ||
+ MI.getOpcode() == PPC::SELECT_QFRC ||
+ MI.getOpcode() == PPC::SELECT_QSRC ||
+ MI.getOpcode() == PPC::SELECT_QBRC ||
+ MI.getOpcode() == PPC::SELECT_VRRC ||
+ MI.getOpcode() == PPC::SELECT_VSFRC ||
+ MI.getOpcode() == PPC::SELECT_VSSRC ||
+ MI.getOpcode() == PPC::SELECT_VSRC) {
// The incoming instruction knows the destination vreg to set, the
// condition code register to branch on, the true/false values to
// select between, and a branch opcode to use.
@@ -8677,7 +8918,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *thisMBB = BB;
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
@@ -8690,23 +8931,24 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
- if (MI->getOpcode() == PPC::SELECT_I4 ||
- MI->getOpcode() == PPC::SELECT_I8 ||
- MI->getOpcode() == PPC::SELECT_F4 ||
- MI->getOpcode() == PPC::SELECT_F8 ||
- MI->getOpcode() == PPC::SELECT_QFRC ||
- MI->getOpcode() == PPC::SELECT_QSRC ||
- MI->getOpcode() == PPC::SELECT_QBRC ||
- MI->getOpcode() == PPC::SELECT_VRRC ||
- MI->getOpcode() == PPC::SELECT_VSFRC ||
- MI->getOpcode() == PPC::SELECT_VSSRC ||
- MI->getOpcode() == PPC::SELECT_VSRC) {
+ if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
+ MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
+ MI.getOpcode() == PPC::SELECT_QFRC ||
+ MI.getOpcode() == PPC::SELECT_QSRC ||
+ MI.getOpcode() == PPC::SELECT_QBRC ||
+ MI.getOpcode() == PPC::SELECT_VRRC ||
+ MI.getOpcode() == PPC::SELECT_VSFRC ||
+ MI.getOpcode() == PPC::SELECT_VSSRC ||
+ MI.getOpcode() == PPC::SELECT_VSRC) {
BuildMI(BB, dl, TII->get(PPC::BC))
- .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(sinkMBB);
} else {
- unsigned SelectPred = MI->getOperand(4).getImm();
+ unsigned SelectPred = MI.getOperand(4).getImm();
BuildMI(BB, dl, TII->get(PPC::BCC))
- .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+ .addImm(SelectPred)
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(sinkMBB);
}
// copy0MBB:
@@ -8721,11 +8963,12 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(*BB, BB->begin(), dl,
- TII->get(PPC::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
- } else if (MI->getOpcode() == PPC::ReadTB) {
+ BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(3).getReg())
+ .addMBB(copy0MBB)
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(thisMBB);
+ } else if (MI.getOpcode() == PPC::ReadTB) {
// To read the 64-bit time-base register on a 32-bit target, we read the
// two halves. Should the counter have wrapped while it was being read, we
// need to try again.
@@ -8740,7 +8983,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
F->insert(It, readMBB);
F->insert(It, sinkMBB);
@@ -8754,8 +8997,8 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
- unsigned LoReg = MI->getOperand(0).getReg();
- unsigned HiReg = MI->getOperand(1).getReg();
+ unsigned LoReg = MI.getOperand(0).getReg();
+ unsigned HiReg = MI.getOperand(1).getReg();
BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
@@ -8770,81 +9013,80 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB->addSuccessor(readMBB);
BB->addSuccessor(sinkMBB);
- }
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
+ } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
- else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
+ else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
- else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8)
+ else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
- else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16)
+ else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
- else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32)
+ else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
BB = EmitAtomicBinary(MI, BB, 4, 0);
- else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64)
+ else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
BB = EmitAtomicBinary(MI, BB, 8, 0);
- else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
- MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
+ else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
+ MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
(Subtarget.hasPartwordAtomics() &&
- MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
+ MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
(Subtarget.hasPartwordAtomics() &&
- MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
- bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
+ MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
+ bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
auto LoadMnemonic = PPC::LDARX;
auto StoreMnemonic = PPC::STDCX;
- switch(MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
llvm_unreachable("Compare and swap of unknown size");
case PPC::ATOMIC_CMP_SWAP_I8:
@@ -8866,12 +9108,12 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
StoreMnemonic = PPC::STDCX;
break;
}
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptrA = MI->getOperand(1).getReg();
- unsigned ptrB = MI->getOperand(2).getReg();
- unsigned oldval = MI->getOperand(3).getReg();
- unsigned newval = MI->getOperand(4).getReg();
- DebugLoc dl = MI->getDebugLoc();
+ unsigned dest = MI.getOperand(0).getReg();
+ unsigned ptrA = MI.getOperand(1).getReg();
+ unsigned ptrB = MI.getOperand(2).getReg();
+ unsigned oldval = MI.getOperand(3).getReg();
+ unsigned newval = MI.getOperand(4).getReg();
+ DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
@@ -8928,20 +9170,20 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// exitMBB:
// ...
BB = exitMBB;
- } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
- MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
+ } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
+ MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
// We must use 64-bit registers for addresses when targeting 64-bit,
// since we're actually doing arithmetic on them. Other registers
// can be 32-bit.
bool is64bit = Subtarget.isPPC64();
- bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
+ bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptrA = MI->getOperand(1).getReg();
- unsigned ptrB = MI->getOperand(2).getReg();
- unsigned oldval = MI->getOperand(3).getReg();
- unsigned newval = MI->getOperand(4).getReg();
- DebugLoc dl = MI->getDebugLoc();
+ unsigned dest = MI.getOperand(0).getReg();
+ unsigned ptrA = MI.getOperand(1).getReg();
+ unsigned ptrB = MI.getOperand(2).getReg();
+ unsigned oldval = MI.getOperand(3).getReg();
+ unsigned newval = MI.getOperand(4).getReg();
+ DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
@@ -9076,14 +9318,14 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB = exitMBB;
BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg)
.addReg(ShiftReg);
- } else if (MI->getOpcode() == PPC::FADDrtz) {
+ } else if (MI.getOpcode() == PPC::FADDrtz) {
// This pseudo performs an FADD with rounding mode temporarily forced
// to round-to-zero. We emit this via custom inserter since the FPSCR
// is not modeled at the SelectionDAG level.
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src1 = MI->getOperand(1).getReg();
- unsigned Src2 = MI->getOperand(2).getReg();
- DebugLoc dl = MI->getDebugLoc();
+ unsigned Dest = MI.getOperand(0).getReg();
+ unsigned Src1 = MI.getOperand(1).getReg();
+ unsigned Src2 = MI.getOperand(2).getReg();
+ DebugLoc dl = MI.getDebugLoc();
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
@@ -9100,29 +9342,31 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Restore FPSCR value.
BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
- } else if (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT ||
- MI->getOpcode() == PPC::ANDIo_1_GT_BIT ||
- MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
- MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) {
- unsigned Opcode = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
- MI->getOpcode() == PPC::ANDIo_1_GT_BIT8) ?
- PPC::ANDIo8 : PPC::ANDIo;
- bool isEQ = (MI->getOpcode() == PPC::ANDIo_1_EQ_BIT ||
- MI->getOpcode() == PPC::ANDIo_1_EQ_BIT8);
+ } else if (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
+ MI.getOpcode() == PPC::ANDIo_1_GT_BIT ||
+ MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
+ MI.getOpcode() == PPC::ANDIo_1_GT_BIT8) {
+ unsigned Opcode = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
+ MI.getOpcode() == PPC::ANDIo_1_GT_BIT8)
+ ? PPC::ANDIo8
+ : PPC::ANDIo;
+ bool isEQ = (MI.getOpcode() == PPC::ANDIo_1_EQ_BIT ||
+ MI.getOpcode() == PPC::ANDIo_1_EQ_BIT8);
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned Dest = RegInfo.createVirtualRegister(Opcode == PPC::ANDIo ?
&PPC::GPRCRegClass :
&PPC::G8RCRegClass);
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
BuildMI(*BB, MI, dl, TII->get(Opcode), Dest)
- .addReg(MI->getOperand(1).getReg()).addImm(1);
+ .addReg(MI.getOperand(1).getReg())
+ .addImm(1);
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY),
- MI->getOperand(0).getReg())
- .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
- } else if (MI->getOpcode() == PPC::TCHECK_RET) {
- DebugLoc Dl = MI->getDebugLoc();
+ MI.getOperand(0).getReg())
+ .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
+ } else if (MI.getOpcode() == PPC::TCHECK_RET) {
+ DebugLoc Dl = MI.getDebugLoc();
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
@@ -9131,7 +9375,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
llvm_unreachable("Unexpected instr type to insert");
}
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -9650,14 +9894,18 @@ SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
}
+ std::list<HandleSDNode> PromOpHandles;
+ for (auto &PromOp : PromOps)
+ PromOpHandles.emplace_back(PromOp);
+
// Replace all operations (these are all the same, but have a different
// (i1) return type). DAG.getNode will validate that the types of
// a binary operator match, so go through the list in reverse so that
// we've likely promoted both operands first. Any intermediate truncations or
// extensions disappear.
- while (!PromOps.empty()) {
- SDValue PromOp = PromOps.back();
- PromOps.pop_back();
+ while (!PromOpHandles.empty()) {
+ SDValue PromOp = PromOpHandles.back().getValue();
+ PromOpHandles.pop_back();
if (PromOp.getOpcode() == ISD::TRUNCATE ||
PromOp.getOpcode() == ISD::SIGN_EXTEND ||
@@ -9666,7 +9914,7 @@ SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
PromOp.getOperand(0).getValueType() != MVT::i1) {
// The operand is not yet ready (see comment below).
- PromOps.insert(PromOps.begin(), PromOp);
+ PromOpHandles.emplace_front(PromOp);
continue;
}
@@ -9693,7 +9941,7 @@ SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
// promoted (this should be rare because we're going through the
// list backward, but if one of the operands has several users in
// this cluster of to-be-promoted nodes, it is possible).
- PromOps.insert(PromOps.begin(), PromOp);
+ PromOpHandles.emplace_front(PromOp);
continue;
}
@@ -9900,13 +10148,17 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
}
+ std::list<HandleSDNode> PromOpHandles;
+ for (auto &PromOp : PromOps)
+ PromOpHandles.emplace_back(PromOp);
+
// Replace all operations (these are all the same, but have a different
// (promoted) return type). DAG.getNode will validate that the types of
// a binary operator match, so go through the list in reverse so that
// we've likely promoted both operands first.
- while (!PromOps.empty()) {
- SDValue PromOp = PromOps.back();
- PromOps.pop_back();
+ while (!PromOpHandles.empty()) {
+ SDValue PromOp = PromOpHandles.back().getValue();
+ PromOpHandles.pop_back();
unsigned C;
switch (PromOp.getOpcode()) {
@@ -9923,7 +10175,7 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
// promoted (this should be rare because we're going through the
// list backward, but if one of the operands has several users in
// this cluster of to-be-promoted nodes, it is possible).
- PromOps.insert(PromOps.begin(), PromOp);
+ PromOpHandles.emplace_front(PromOp);
continue;
}
@@ -9935,7 +10187,7 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
(SelectTruncOp[1].count(PromOp.getNode()) &&
PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
- PromOps.insert(PromOps.begin(), PromOp);
+ PromOpHandles.emplace_front(PromOp);
continue;
}
}
@@ -9997,6 +10249,59 @@ SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
ShiftCst);
}
+SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ assert(N->getOpcode() == ISD::BUILD_VECTOR &&
+ "Should be called with a BUILD_VECTOR node");
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc dl(N);
+ if (N->getValueType(0) != MVT::v2f64 || !Subtarget.hasVSX())
+ return SDValue();
+
+ // Looking for:
+ // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
+ if (N->getOperand(0).getOpcode() != ISD::SINT_TO_FP &&
+ N->getOperand(0).getOpcode() != ISD::UINT_TO_FP)
+ return SDValue();
+ if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
+ N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
+ return SDValue();
+ if (N->getOperand(0).getOpcode() != N->getOperand(1).getOpcode())
+ return SDValue();
+
+ SDValue Ext1 = N->getOperand(0).getOperand(0);
+ SDValue Ext2 = N->getOperand(1).getOperand(0);
+ if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+ Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+
+ ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
+ ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
+ if (!Ext1Op || !Ext2Op)
+ return SDValue();
+ if (Ext1.getValueType() != MVT::i32 ||
+ Ext2.getValueType() != MVT::i32)
+ if (Ext1.getOperand(0) != Ext2.getOperand(0))
+ return SDValue();
+
+ int FirstElem = Ext1Op->getZExtValue();
+ int SecondElem = Ext2Op->getZExtValue();
+ int SubvecIdx;
+ if (FirstElem == 0 && SecondElem == 1)
+ SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
+ else if (FirstElem == 2 && SecondElem == 3)
+ SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
+ else
+ return SDValue();
+
+ SDValue SrcVec = Ext1.getOperand(0);
+ auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
+ PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
+ return DAG.getNode(NodeType, dl, MVT::v2f64,
+ SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
+}
+
SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
DAGCombinerInfo &DCI) const {
assert((N->getOpcode() == ISD::SINT_TO_FP ||
@@ -10109,13 +10414,24 @@ SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
MVT VecTy = N->getValueType(0).getSimpleVT();
SDValue LoadOps[] = { Chain, Base };
SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
- DAG.getVTList(VecTy, MVT::Other),
- LoadOps, VecTy, MMO);
+ DAG.getVTList(MVT::v2f64, MVT::Other),
+ LoadOps, MVT::v2f64, MMO);
+
DCI.AddToWorklist(Load.getNode());
Chain = Load.getValue(1);
- SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
- DAG.getVTList(VecTy, MVT::Other), Chain, Load);
+ SDValue Swap = DAG.getNode(
+ PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
DCI.AddToWorklist(Swap.getNode());
+
+ // Add a bitcast if the resulting load type doesn't match v2f64.
+ if (VecTy != MVT::v2f64) {
+ SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
+ DCI.AddToWorklist(N.getNode());
+ // Package {bitcast value, swap's chain} to match Load's shape.
+ return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
+ N, Swap.getValue(1));
+ }
+
return Swap;
}
@@ -10159,8 +10475,15 @@ SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
SDValue Src = N->getOperand(SrcOpnd);
MVT VecTy = Src.getValueType().getSimpleVT();
+
+ // All stores are done as v2f64 and possible bit cast.
+ if (VecTy != MVT::v2f64) {
+ Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
+ DCI.AddToWorklist(Src.getNode());
+ }
+
SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
- DAG.getVTList(VecTy, MVT::Other), Chain, Src);
+ DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
DCI.AddToWorklist(Swap.getNode());
Chain = Swap.getValue(1);
SDValue StoreOps[] = { Chain, Swap, Base };
@@ -10277,6 +10600,111 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
return expandVSXLoadForLE(N, DCI);
}
+ // We sometimes end up with a 64-bit integer load, from which we extract
+ // two single-precision floating-point numbers. This happens with
+ // std::complex<float>, and other similar structures, because of the way we
+ // canonicalize structure copies. However, if we lack direct moves,
+ // then the final bitcasts from the extracted integer values to the
+ // floating-point numbers turn into store/load pairs. Even with direct moves,
+ // just loading the two floating-point numbers is likely better.
+ auto ReplaceTwoFloatLoad = [&]() {
+ if (VT != MVT::i64)
+ return false;
+
+ if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
+ LD->isVolatile())
+ return false;
+
+ // We're looking for a sequence like this:
+ // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
+ // t16: i64 = srl t13, Constant:i32<32>
+ // t17: i32 = truncate t16
+ // t18: f32 = bitcast t17
+ // t19: i32 = truncate t13
+ // t20: f32 = bitcast t19
+
+ if (!LD->hasNUsesOfValue(2, 0))
+ return false;
+
+ auto UI = LD->use_begin();
+ while (UI.getUse().getResNo() != 0) ++UI;
+ SDNode *Trunc = *UI++;
+ while (UI.getUse().getResNo() != 0) ++UI;
+ SDNode *RightShift = *UI;
+ if (Trunc->getOpcode() != ISD::TRUNCATE)
+ std::swap(Trunc, RightShift);
+
+ if (Trunc->getOpcode() != ISD::TRUNCATE ||
+ Trunc->getValueType(0) != MVT::i32 ||
+ !Trunc->hasOneUse())
+ return false;
+ if (RightShift->getOpcode() != ISD::SRL ||
+ !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
+ RightShift->getConstantOperandVal(1) != 32 ||
+ !RightShift->hasOneUse())
+ return false;
+
+ SDNode *Trunc2 = *RightShift->use_begin();
+ if (Trunc2->getOpcode() != ISD::TRUNCATE ||
+ Trunc2->getValueType(0) != MVT::i32 ||
+ !Trunc2->hasOneUse())
+ return false;
+
+ SDNode *Bitcast = *Trunc->use_begin();
+ SDNode *Bitcast2 = *Trunc2->use_begin();
+
+ if (Bitcast->getOpcode() != ISD::BITCAST ||
+ Bitcast->getValueType(0) != MVT::f32)
+ return false;
+ if (Bitcast2->getOpcode() != ISD::BITCAST ||
+ Bitcast2->getValueType(0) != MVT::f32)
+ return false;
+
+ if (Subtarget.isLittleEndian())
+ std::swap(Bitcast, Bitcast2);
+
+ // Bitcast has the second float (in memory-layout order) and Bitcast2
+ // has the first one.
+
+ SDValue BasePtr = LD->getBasePtr();
+ if (LD->isIndexed()) {
+ assert(LD->getAddressingMode() == ISD::PRE_INC &&
+ "Non-pre-inc AM on PPC?");
+ BasePtr =
+ DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
+ LD->getOffset());
+ }
+
+ auto MMOFlags =
+ LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
+ SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
+ LD->getPointerInfo(), LD->getAlignment(),
+ MMOFlags, LD->getAAInfo());
+ SDValue AddPtr =
+ DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
+ BasePtr, DAG.getIntPtrConstant(4, dl));
+ SDValue FloatLoad2 = DAG.getLoad(
+ MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
+ LD->getPointerInfo().getWithOffset(4),
+ MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
+
+ if (LD->isIndexed()) {
+ // Note that DAGCombine should re-form any pre-increment load(s) from
+ // what is produced here if that makes sense.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
+ }
+
+ DCI.CombineTo(Bitcast2, FloatLoad);
+ DCI.CombineTo(Bitcast, FloatLoad2);
+
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
+ SDValue(FloatLoad2.getNode(), 1));
+ return true;
+ };
+
+ if (ReplaceTwoFloatLoad())
+ return SDValue(N, 0);
+
EVT MemVT = LD->getMemoryVT();
Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
@@ -10710,6 +11138,8 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
+ case ISD::BUILD_VECTOR:
+ return DAGCombineBuildVector(N, DCI);
}
return SDValue();
@@ -10801,7 +11231,8 @@ unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
case PPC::DIR_PWR6:
case PPC::DIR_PWR6X:
case PPC::DIR_PWR7:
- case PPC::DIR_PWR8: {
+ case PPC::DIR_PWR8:
+ case PPC::DIR_PWR9: {
if (!ML)
break;
@@ -10812,7 +11243,7 @@ unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
uint64_t LoopSize = 0;
for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
- LoopSize += TII->GetInstSizeInBytes(J);
+ LoopSize += TII->GetInstSizeInBytes(*J);
if (LoopSize > 32)
break;
}
@@ -10837,6 +11268,7 @@ PPCTargetLowering::getConstraintType(StringRef Constraint) const {
case 'b':
case 'r':
case 'f':
+ case 'd':
case 'v':
case 'y':
return C_RegisterClass;
@@ -10928,6 +11360,10 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
if (VT == MVT::i64 && Subtarget.isPPC64())
return std::make_pair(0U, &PPC::G8RCRegClass);
return std::make_pair(0U, &PPC::GPRCRegClass);
+ // 'd' and 'f' constraints are both defined to be "the floating point
+ // registers", where one is for 32-bit and the other for 64-bit. We don't
+ // really care overly much here so just give them all the same reg classes.
+ case 'd':
case 'f':
if (VT == MVT::f32 || VT == MVT::i32)
return std::make_pair(0U, &PPC::F4RCRegClass);
@@ -11126,13 +11562,13 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
isPPC64 ? MVT::i64 : MVT::i32);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo());
}
// Just load the return address off the stack.
SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
- MachinePointerInfo(), false, false, false, 0);
+ MachinePointerInfo());
}
SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -11144,7 +11580,7 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setFrameAddressIsTaken(true);
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout());
+ EVT PtrVT = getPointerTy(MF.getDataLayout());
bool isPPC64 = PtrVT == MVT::i64;
// Naked functions never have a frame pointer, and so we use r1. For all
@@ -11159,8 +11595,7 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
PtrVT);
while (Depth--)
FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
- FrameAddr, MachinePointerInfo(), false, false,
- false, 0);
+ FrameAddr, MachinePointerInfo());
return FrameAddr;
}
@@ -11567,10 +12002,8 @@ PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
if (VT == MVT::v2i64)
return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
- if (Subtarget.hasQPX()) {
- if (VT == MVT::v4f32 || VT == MVT::v4f64 || VT == MVT::v4i1)
- return true;
- }
+ if (Subtarget.hasVSX() || Subtarget.hasQPX())
+ return true;
return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
}
@@ -11588,3 +12021,70 @@ PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo) const {
return PPC::createFastISel(FuncInfo, LibInfo);
}
+
+void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
+ if (Subtarget.isDarwinABI()) return;
+ if (!Subtarget.isPPC64()) return;
+
+ // Update IsSplitCSR in PPCFunctionInfo
+ PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
+ PFI->setIsSplitCSR(true);
+}
+
+void PPCTargetLowering::insertCopiesSplitCSR(
+ MachineBasicBlock *Entry,
+ const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
+ const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
+ if (!IStart)
+ return;
+
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
+ MachineBasicBlock::iterator MBBI = Entry->begin();
+ for (const MCPhysReg *I = IStart; *I; ++I) {
+ const TargetRegisterClass *RC = nullptr;
+ if (PPC::G8RCRegClass.contains(*I))
+ RC = &PPC::G8RCRegClass;
+ else if (PPC::F8RCRegClass.contains(*I))
+ RC = &PPC::F8RCRegClass;
+ else if (PPC::CRRCRegClass.contains(*I))
+ RC = &PPC::CRRCRegClass;
+ else if (PPC::VRRCRegClass.contains(*I))
+ RC = &PPC::VRRCRegClass;
+ else
+ llvm_unreachable("Unexpected register class in CSRsViaCopy!");
+
+ unsigned NewVR = MRI->createVirtualRegister(RC);
+ // Create copy from CSR to a virtual register.
+ // FIXME: this currently does not emit CFI pseudo-instructions, it works
+ // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
+ // nounwind. If we want to generalize this later, we may need to emit
+ // CFI pseudo-instructions.
+ assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ Attribute::NoUnwind) &&
+ "Function should be nounwind in insertCopiesSplitCSR!");
+ Entry->addLiveIn(*I);
+ BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
+ .addReg(*I);
+
+ // Insert the copy-back instructions right before the terminator
+ for (auto *Exit : Exits)
+ BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
+ TII->get(TargetOpcode::COPY), *I)
+ .addReg(NewVR);
+ }
+}
+
+// Override to enable LOAD_STACK_GUARD lowering on Linux.
+bool PPCTargetLowering::useLoadStackGuardNode() const {
+ if (!Subtarget.isTargetLinux())
+ return TargetLowering::useLoadStackGuardNode();
+ return true;
+}
+
+// Override to disable global variable loading on Linux.
+void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
+ if (!Subtarget.isTargetLinux())
+ return TargetLowering::insertSSPDeclarations(M);
+}
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 44bcb8942cfc..e3be8074e62e 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -61,6 +61,18 @@ namespace llvm {
///
VPERM,
+ /// XXSPLT - The PPC VSX splat instructions
+ ///
+ XXSPLT,
+
+ /// XXINSERT - The PPC VSX insert instruction
+ ///
+ XXINSERT,
+
+ /// VECSHL - The PPC VSX shift left instruction
+ ///
+ VECSHL,
+
/// The CMPB instruction (takes two operands of i32 or i64).
CMPB,
@@ -133,6 +145,16 @@ namespace llvm {
/// Direct move from a GPR to a VSX register (zero)
MTVSRZ,
+ /// Extract a subvector from signed integer vector and convert to FP.
+ /// It is primarily used to convert a (widened) illegal integer vector
+ /// type to a legal floating point vector type.
+ /// For example v2i32 -> widened to v4i32 -> v2f64
+ SINT_VEC_TO_FP,
+
+ /// Extract a subvector from unsigned integer vector and convert to FP.
+ /// As with SINT_VEC_TO_FP, used for converting illegal types.
+ UINT_VEC_TO_FP,
+
// FIXME: Remove these once the ANDI glue bug is fixed:
/// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
/// eq or gt bit of CR0 after executing andi. x, 1. This is used to
@@ -297,6 +319,10 @@ namespace llvm {
/// of outputs.
XXSWAPD,
+ /// An SDNode for swaps that are not associated with any loads/stores
+ /// and thereby have no chain.
+ SWAP_NO_CHAIN,
+
/// QVFPERM = This corresponds to the QPX qvfperm instruction.
QVFPERM,
@@ -402,6 +428,16 @@ namespace llvm {
/// VSPLTB/VSPLTH/VSPLTW.
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
+ /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
+ /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
+ /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
+ /// vector into the other. This function will also set a couple of
+ /// output parameters for how much the source vector needs to be shifted and
+ /// what byte number needs to be specified for the instruction to put the
+ /// element in the desired location of the target vector.
+ bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
+ unsigned &InsertAtByte, bool &Swap, bool IsLE);
+
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
@@ -428,6 +464,20 @@ namespace llvm {
/// DAG node.
const char *getTargetNodeName(unsigned Opcode) const override;
+ /// getPreferredVectorAction - The code we generate when vector types are
+ /// legalized by promoting the integer element type is often much worse
+ /// than code we generate if we widen the type for applicable vector types.
+ /// The issue with promoting is that the vector is scalaraized, individual
+ /// elements promoted and then the vector is rebuilt. So say we load a pair
+ /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
+ /// loads, moves back into VSR's (or memory ops if we don't have moves) and
+ /// then the VPERM for the shuffle. All in all a very slow sequence.
+ TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT)
+ const override {
+ if (VT.getVectorElementType().getSizeInBits() % 8 == 0)
+ return TypeWidenVector;
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+ }
bool useSoftFloat() const override;
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
@@ -442,6 +492,18 @@ namespace llvm {
return true;
}
+ bool supportSplitCSR(MachineFunction *MF) const override {
+ return
+ MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ }
+
+ void initializeSplitCSR(MachineBasicBlock *Entry) const override;
+
+ void insertCopiesSplitCSR(
+ MachineBasicBlock *Entry,
+ const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
+
/// getSetCCResultType - Return the ISD::SETCC ValueType
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
@@ -508,26 +570,31 @@ namespace llvm {
unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return true;
+ }
+
Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
bool IsStore, bool IsLoad) const override;
Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
bool IsStore, bool IsLoad) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const override;
- MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
+ MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
MachineBasicBlock *MBB,
unsigned AtomicSize,
unsigned BinOpcode) const;
- MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI,
+ MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
MachineBasicBlock *MBB,
- bool is8bit, unsigned Opcode) const;
+ bool is8bit,
+ unsigned Opcode) const;
- MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI,
+ MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
+ MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const;
ConstraintType getConstraintType(StringRef Constraint) const override;
@@ -672,6 +739,10 @@ namespace llvm {
unsigned
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
+ /// Override to support customized stack guard loading.
+ bool useLoadStackGuardNode() const override;
+ void insertSSPDeclarations(Module &M) const override;
+
private:
struct ReuseLoadInfo {
SDValue Ptr;
@@ -693,11 +764,11 @@ namespace llvm {
SelectionDAG &DAG) const;
void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
- SelectionDAG &DAG, SDLoc dl) const;
+ SelectionDAG &DAG, const SDLoc &dl) const;
SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
- SDLoc dl) const;
+ const SDLoc &dl) const;
SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
- SDLoc dl) const;
+ const SDLoc &dl) const;
SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
@@ -709,13 +780,20 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
- SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
- int SPDiff,
- SDValue Chain,
- SDValue &LROpOut,
+ bool
+ IsEligibleForTailCallOptimization_64SVR4(
+ SDValue Callee,
+ CallingConv::ID CalleeCC,
+ ImmutableCallSite *CS,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const;
+
+ SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
+ SDValue Chain, SDValue &LROpOut,
SDValue &FPOpOut,
- bool isDarwinABI,
- SDLoc dl) const;
+ const SDLoc &dl) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
@@ -727,23 +805,18 @@ namespace llvm {
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const;
- SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const;
- SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const;
- SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const;
- SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const;
- SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) const;
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, SDLoc dl) const;
+ SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
+ const SDLoc &dl) const;
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
@@ -763,26 +836,23 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue FinishCall(CallingConv::ID CallConv, SDLoc dl, bool isTailCall,
- bool isVarArg, bool IsPatchPoint, bool hasNest,
- SelectionDAG &DAG,
- SmallVector<std::pair<unsigned, SDValue>, 8>
- &RegsToPass,
+ SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
+ bool isTailCall, bool isVarArg, bool isPatchPoint,
+ bool hasNest, SelectionDAG &DAG,
+ SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
- SDValue &Callee,
- int SPDiff, unsigned NumBytes,
+ SDValue &Callee, int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
SmallVectorImpl<SDValue> &InVals,
ImmutableCallSite *CS) const;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
@@ -794,75 +864,66 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const override;
- SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
-
- SDValue
- extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG,
- SDValue ArgVal, SDLoc dl) const;
-
- SDValue
- LowerFormalArguments_Darwin(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- SDValue
- LowerFormalArguments_64SVR4(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- SDValue
- LowerFormalArguments_32SVR4(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- SDValue
- createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
- SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
- SelectionDAG &DAG, SDLoc dl) const;
-
- SDValue
- LowerCall_Darwin(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv,
- bool isVarArg, bool isTailCall, bool IsPatchPoint,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const;
- SDValue
- LowerCall_64SVR4(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv,
- bool isVarArg, bool isTailCall, bool IsPatchPoint,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const;
- SDValue
- LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool isTailCall, bool IsPatchPoint,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- ImmutableCallSite *CS) const;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const override;
+
+ SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
+ SelectionDAG &DAG, SDValue ArgVal,
+ const SDLoc &dl) const;
+
+ SDValue LowerFormalArguments_Darwin(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerFormalArguments_64SVR4(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerFormalArguments_32SVR4(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
+ SDValue CallSeqStart,
+ ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
+ const SDLoc &dl) const;
+
+ SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall, bool isPatchPoint,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const;
+ SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall, bool isPatchPoint,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const;
+ SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall, bool isPatchPoint,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ ImmutableCallSite *CS) const;
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td
index 79e4fe379c2d..e7eb8a16180a 100644
--- a/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -244,12 +244,22 @@ def LDARX : XForm_1<31, 84, (outs g8rc:$rD), (ins memrr:$ptr),
// (EH=1 - see Power ISA 2.07 Book II 4.4.2)
def LDARXL : XForm_1<31, 84, (outs g8rc:$rD), (ins memrr:$ptr),
"ldarx $rD, $ptr, 1", IIC_LdStLDARX, []>, isDOT;
+
+let hasExtraDefRegAllocReq = 1 in
+def LDAT : X_RD5_RS5_IM5<31, 614, (outs g8rc:$rD), (ins g8rc:$rA, u5imm:$FC),
+ "ldat $rD, $rA, $FC", IIC_LdStLoad>, isPPC64,
+ Requires<[IsISA3_0]>;
}
let Defs = [CR0], mayStore = 1, hasSideEffects = 0 in
def STDCX : XForm_1<31, 214, (outs), (ins g8rc:$rS, memrr:$dst),
"stdcx. $rS, $dst", IIC_LdStSTDCX, []>, isDOT;
+let mayStore = 1, hasSideEffects = 0 in
+def STDAT : X_RD5_RS5_IM5<31, 742, (outs), (ins g8rc:$rS, g8rc:$rA, u5imm:$FC),
+ "stdat $rS, $rA, $FC", IIC_LdStStore>, isPPC64,
+ Requires<[IsISA3_0]>;
+
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [RM] in
def TCRETURNdi8 :Pseudo< (outs),
@@ -476,8 +486,10 @@ defm ADD8 : XOForm_1r<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"add", "$rT, $rA, $rB", IIC_IntSimple,
[(set i64:$rT, (add i64:$rA, i64:$rB))]>;
// ADD8 has a special form: reg = ADD8(reg, sym@tls) for use by the
-// initial-exec thread-local storage model.
-def ADD8TLS : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc:$rA, tlsreg:$rB),
+// initial-exec thread-local storage model. We need to forbid r0 here -
+// while it works for add just fine, the linker can relax this to local-exec
+// addi, which won't work for r0.
+def ADD8TLS : XOForm_1<31, 266, 0, (outs g8rc:$rT), (ins g8rc_nox0:$rA, tlsreg:$rB),
"add $rT, $rA, $rB", IIC_IntSimple,
[(set i64:$rT, (add i64:$rA, tglobaltlsaddr:$rB))]>;
@@ -502,11 +514,11 @@ let Defs = [CARRY] in {
def SUBFIC8: DForm_2< 8, (outs g8rc:$rD), (ins g8rc:$rA, s16imm64:$imm),
"subfic $rD, $rA, $imm", IIC_IntGeneral,
[(set i64:$rD, (subc imm64SExt16:$imm, i64:$rA))]>;
-defm SUBFC8 : XOForm_1r<31, 8, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
+}
+defm SUBFC8 : XOForm_1rc<31, 8, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"subfc", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (subc i64:$rB, i64:$rA))]>,
PPC970_DGroup_Cracked;
-}
defm SUBF8 : XOForm_1r<31, 40, 0, (outs g8rc:$rT), (ins g8rc:$rA, g8rc:$rB),
"subf", "$rT, $rA, $rB", IIC_IntGeneral,
[(set i64:$rT, (sub i64:$rB, i64:$rA))]>;
@@ -564,6 +576,14 @@ let isCompare = 1, hasSideEffects = 0 in {
def CMPLDI : DForm_6_ext<10, (outs crrc:$dst), (ins g8rc:$src1, u16imm64:$src2),
"cmpldi $dst, $src1, $src2",
IIC_IntCompare>, isPPC64;
+ let Interpretation64Bit = 1, isCodeGenOnly = 1 in
+ def CMPRB8 : X_BF3_L1_RS5_RS5<31, 192, (outs crbitrc:$BF),
+ (ins u1imm:$L, g8rc:$rA, g8rc:$rB),
+ "cmprb $BF, $L, $rA, $rB", IIC_IntCompare, []>,
+ Requires<[IsISA3_0]>;
+ def CMPEQB : X_BF3_RS5_RS5<31, 224, (outs crbitrc:$BF),
+ (ins g8rc:$rA, g8rc:$rB), "cmpeqb $BF, $rA, $rB",
+ IIC_IntCompare, []>, Requires<[IsISA3_0]>;
}
let hasSideEffects = 0 in {
@@ -580,6 +600,9 @@ defm SRAD : XForm_6rc<31, 794, (outs g8rc:$rA), (ins g8rc:$rS, gprc:$rB),
let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
defm CNTLZW8 : XForm_11r<31, 26, (outs g8rc:$rA), (ins g8rc:$rS),
"cntlzw", "$rA, $rS", IIC_IntGeneral, []>;
+defm CNTTZW8 : XForm_11r<31, 538, (outs g8rc:$rA), (ins g8rc:$rS),
+ "cnttzw", "$rA, $rS", IIC_IntGeneral, []>,
+ Requires<[IsISA3_0]>;
defm EXTSB8 : XForm_11r<31, 954, (outs g8rc:$rA), (ins g8rc:$rS),
"extsb", "$rA, $rS", IIC_IntSimple,
@@ -613,9 +636,12 @@ defm EXTSW_32_64 : XForm_11r<31, 986, (outs g8rc:$rA), (ins gprc:$rS),
defm SRADI : XSForm_1rc<31, 413, (outs g8rc:$rA), (ins g8rc:$rS, u6imm:$SH),
"sradi", "$rA, $rS, $SH", IIC_IntRotateDI,
[(set i64:$rA, (sra i64:$rS, (i32 imm:$SH)))]>, isPPC64;
-defm CNTLZD : XForm_11r<31, 58, (outs g8rc:$rA), (ins g8rc:$rS),
+defm CNTLZD : XForm_11r<31, 58, (outs g8rc:$rA), (ins g8rc:$rS),
"cntlzd", "$rA, $rS", IIC_IntGeneral,
[(set i64:$rA, (ctlz i64:$rS))]>;
+defm CNTTZD : XForm_11r<31, 570, (outs g8rc:$rA), (ins g8rc:$rS),
+ "cnttzd", "$rA, $rS", IIC_IntGeneral,
+ [(set i64:$rA, (cttz i64:$rS))]>, Requires<[IsISA3_0]>;
def POPCNTD : XForm_11<31, 506, (outs g8rc:$rA), (ins g8rc:$rS),
"popcntd $rA, $rS", IIC_IntGeneral,
[(set i64:$rA, (ctpop i64:$rS))]>;
@@ -905,6 +931,10 @@ def LDUX : XForm_1<31, 53, (outs g8rc:$rD, ptr_rc_nor0:$ea_result),
"ldux $rD, $addr", IIC_LdStLDUX,
[]>, RegConstraint<"$addr.ptrreg = $ea_result">,
NoEncode<"$ea_result">, isPPC64;
+
+def LDMX : XForm_1<31, 309, (outs g8rc:$rD), (ins memrr:$src),
+ "ldmx $rD, $src", IIC_LdStLD, []>, isPPC64,
+ Requires<[IsISA3_0]>;
}
}
@@ -1246,3 +1276,24 @@ def : Pat<(atomic_load_64 xaddr:$src), (LDX memrr:$src)>;
def : Pat<(atomic_store_64 ixaddr:$ptr, i64:$val), (STD g8rc:$val, memrix:$ptr)>;
def : Pat<(atomic_store_64 xaddr:$ptr, i64:$val), (STDX g8rc:$val, memrr:$ptr)>;
+
+let Predicates = [IsISA3_0] in {
+
+class X_L1_RA5_RB5<bits<6> opcode, bits<10> xo, string opc, RegisterOperand ty,
+ InstrItinClass itin, list<dag> pattern>
+ : X_L1_RS5_RS5<opcode, xo, (outs), (ins ty:$rA, ty:$rB, u1imm:$L),
+ !strconcat(opc, " $rA, $rB, $L"), itin, pattern>;
+
+let Interpretation64Bit = 1, isCodeGenOnly = 1 in {
+def CP_COPY8 : X_L1_RA5_RB5<31, 774, "copy" , g8rc, IIC_LdStCOPY, []>;
+def CP_PASTE8 : X_L1_RA5_RB5<31, 902, "paste" , g8rc, IIC_LdStPASTE, []>;
+def CP_PASTE8o : X_L1_RA5_RB5<31, 902, "paste.", g8rc, IIC_LdStPASTE, []>,isDOT;
+}
+
+// SLB Invalidate Entry Global
+def SLBIEG : XForm_26<31, 466, (outs), (ins gprc:$RS, gprc:$RB),
+ "slbieg $RS, $RB", IIC_SprSLBIEG, []>;
+// SLB Synchronize
+def SLBSYNC : XForm_0<31, 338, (outs), (ins), "slbsync", IIC_SprSLBSYNC, []>;
+
+} // IsISA3_0
diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td
index 53674681b213..e1c4673c2d7f 100644
--- a/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -1213,3 +1213,187 @@ def VNCIPHERLAST : VX1_Int_Ty<1353, "vncipherlast",
int_ppc_altivec_crypto_vncipherlast, v2i64>;
def VSBOX : VXBX_Int_Ty<1480, "vsbox", int_ppc_altivec_crypto_vsbox, v2i64>;
} // HasP8Crypto
+
+// The following altivec instructions were introduced in Power ISA 3.0
+def HasP9Altivec : Predicate<"PPCSubTarget->hasP9Altivec()">;
+let Predicates = [HasP9Altivec] in {
+
+// Vector Compare Not Equal (Zero)
+class P9VCMP<bits<10> xo, string asmstr, ValueType Ty>
+ : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
+ IIC_VecFPCompare, []>;
+class P9VCMPo<bits<10> xo, string asmstr, ValueType Ty>
+ : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
+ IIC_VecFPCompare, []> {
+ let Defs = [CR6];
+ let RC = 1;
+}
+
+// i8 element comparisons.
+def VCMPNEB : P9VCMP < 7, "vcmpneb $vD, $vA, $vB" , v16i8>;
+def VCMPNEBo : P9VCMPo< 7, "vcmpneb. $vD, $vA, $vB" , v16i8>;
+def VCMPNEZB : P9VCMP <263, "vcmpnezb $vD, $vA, $vB" , v16i8>;
+def VCMPNEZBo : P9VCMPo<263, "vcmpnezb. $vD, $vA, $vB", v16i8>;
+
+// i16 element comparisons.
+def VCMPNEH : P9VCMP < 71, "vcmpneh $vD, $vA, $vB" , v8i16>;
+def VCMPNEHo : P9VCMPo< 71, "vcmpneh. $vD, $vA, $vB" , v8i16>;
+def VCMPNEZH : P9VCMP <327, "vcmpnezh $vD, $vA, $vB" , v8i16>;
+def VCMPNEZHo : P9VCMPo<327, "vcmpnezh. $vD, $vA, $vB", v8i16>;
+
+// i32 element comparisons.
+def VCMPNEW : P9VCMP <135, "vcmpnew $vD, $vA, $vB" , v4i32>;
+def VCMPNEWo : P9VCMPo<135, "vcmpnew. $vD, $vA, $vB" , v4i32>;
+def VCMPNEZW : P9VCMP <391, "vcmpnezw $vD, $vA, $vB" , v4i32>;
+def VCMPNEZWo : P9VCMPo<391, "vcmpnezw. $vD, $vA, $vB", v4i32>;
+
+// VX-Form: [PO VRT / UIM VRB XO].
+// We use VXForm_1 to implement it, that is, we use "VRA" (5 bit) to represent
+// "/ UIM" (1 + 4 bit)
+class VX1_VT5_UIM5_VB5<bits<11> xo, string opc, list<dag> pattern>
+ : VXForm_1<xo, (outs vrrc:$vD), (ins u4imm:$UIMM, vrrc:$vB),
+ !strconcat(opc, " $vD, $vB, $UIMM"), IIC_VecGeneral, pattern>;
+
+class VX1_RT5_RA5_VB5<bits<11> xo, string opc, list<dag> pattern>
+ : VXForm_1<xo, (outs g8rc:$rD), (ins g8rc:$rA, vrrc:$vB),
+ !strconcat(opc, " $rD, $rA, $vB"), IIC_VecGeneral, pattern>;
+
+// Vector Extract Unsigned
+def VEXTRACTUB : VX1_VT5_UIM5_VB5<525, "vextractub", []>;
+def VEXTRACTUH : VX1_VT5_UIM5_VB5<589, "vextractuh", []>;
+def VEXTRACTUW : VX1_VT5_UIM5_VB5<653, "vextractuw", []>;
+def VEXTRACTD : VX1_VT5_UIM5_VB5<717, "vextractd" , []>;
+
+// Vector Extract Unsigned Byte/Halfword/Word Left/Right-Indexed
+def VEXTUBLX : VX1_RT5_RA5_VB5<1549, "vextublx", []>;
+def VEXTUBRX : VX1_RT5_RA5_VB5<1805, "vextubrx", []>;
+def VEXTUHLX : VX1_RT5_RA5_VB5<1613, "vextuhlx", []>;
+def VEXTUHRX : VX1_RT5_RA5_VB5<1869, "vextuhrx", []>;
+def VEXTUWLX : VX1_RT5_RA5_VB5<1677, "vextuwlx", []>;
+def VEXTUWRX : VX1_RT5_RA5_VB5<1933, "vextuwrx", []>;
+
+// Vector Insert Element Instructions
+def VINSERTB : VX1_VT5_UIM5_VB5<781, "vinsertb", []>;
+def VINSERTH : VX1_VT5_UIM5_VB5<845, "vinserth", []>;
+def VINSERTW : VX1_VT5_UIM5_VB5<909, "vinsertw", []>;
+def VINSERTD : VX1_VT5_UIM5_VB5<973, "vinsertd", []>;
+
+class VX_VT5_EO5_VB5<bits<11> xo, bits<5> eo, string opc, list<dag> pattern>
+ : VXForm_RD5_XO5_RS5<xo, eo, (outs vrrc:$vD), (ins vrrc:$vB),
+ !strconcat(opc, " $vD, $vB"), IIC_VecGeneral, pattern>;
+
+// Vector Count Leading/Trailing Zero LSB. Result is placed into GPR[rD]
+def VCLZLSBB : VXForm_RD5_XO5_RS5<1538, 0, (outs g8rc:$rD), (ins vrrc:$vB),
+ "vclzlsbb $rD, $vB", IIC_VecGeneral, []>;
+def VCTZLSBB : VXForm_RD5_XO5_RS5<1538, 1, (outs g8rc:$rD), (ins vrrc:$vB),
+ "vctzlsbb $rD, $vB", IIC_VecGeneral, []>;
+// Vector Count Trailing Zeros
+def VCTZB : VX_VT5_EO5_VB5<1538, 28, "vctzb", []>;
+def VCTZH : VX_VT5_EO5_VB5<1538, 29, "vctzh", []>;
+def VCTZW : VX_VT5_EO5_VB5<1538, 30, "vctzw", []>;
+def VCTZD : VX_VT5_EO5_VB5<1538, 31, "vctzd", []>;
+
+// Vector Extend Sign
+def VEXTSB2W : VX_VT5_EO5_VB5<1538, 16, "vextsb2w", []>;
+def VEXTSH2W : VX_VT5_EO5_VB5<1538, 17, "vextsh2w", []>;
+def VEXTSB2D : VX_VT5_EO5_VB5<1538, 24, "vextsb2d", []>;
+def VEXTSH2D : VX_VT5_EO5_VB5<1538, 25, "vextsh2d", []>;
+def VEXTSW2D : VX_VT5_EO5_VB5<1538, 26, "vextsw2d", []>;
+
+// Vector Integer Negate
+def VNEGW : VX_VT5_EO5_VB5<1538, 6, "vnegw", []>;
+def VNEGD : VX_VT5_EO5_VB5<1538, 7, "vnegd", []>;
+
+// Vector Parity Byte
+def VPRTYBW : VX_VT5_EO5_VB5<1538, 8, "vprtybw", []>;
+def VPRTYBD : VX_VT5_EO5_VB5<1538, 9, "vprtybd", []>;
+def VPRTYBQ : VX_VT5_EO5_VB5<1538, 10, "vprtybq", []>;
+
+// Vector (Bit) Permute (Right-indexed)
+def VBPERMD : VXForm_1<1484, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
+ "vbpermd $vD, $vA, $vB", IIC_VecFP, []>;
+def VPERMR : VAForm_1a<59, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
+ "vpermr $vD, $vA, $vB, $vC", IIC_VecFP, []>;
+
+class VX1_VT5_VA5_VB5<bits<11> xo, string opc, list<dag> pattern>
+ : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
+ !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP, pattern>;
+
+// Vector Rotate Left Mask/Mask-Insert
+def VRLWNM : VX1_VT5_VA5_VB5<389, "vrlwnm", []>;
+def VRLWMI : VX1_VT5_VA5_VB5<133, "vrlwmi", []>;
+def VRLDNM : VX1_VT5_VA5_VB5<453, "vrldnm", []>;
+def VRLDMI : VX1_VT5_VA5_VB5<197, "vrldmi", []>;
+
+// Vector Shift Left/Right
+def VSLV : VX1_VT5_VA5_VB5<1860, "vslv", []>;
+def VSRV : VX1_VT5_VA5_VB5<1796, "vsrv", []>;
+
+// Vector Multiply-by-10 (& Write Carry) Unsigned Quadword
+def VMUL10UQ : VXForm_BX<513, (outs vrrc:$vD), (ins vrrc:$vA),
+ "vmul10uq $vD, $vA", IIC_VecFP, []>;
+def VMUL10CUQ : VXForm_BX< 1, (outs vrrc:$vD), (ins vrrc:$vA),
+ "vmul10cuq $vD, $vA", IIC_VecFP, []>;
+
+// Vector Multiply-by-10 Extended (& Write Carry) Unsigned Quadword
+def VMUL10EUQ : VX1_VT5_VA5_VB5<577, "vmul10euq" , []>;
+def VMUL10ECUQ : VX1_VT5_VA5_VB5< 65, "vmul10ecuq", []>;
+
+// Decimal Integer Format Conversion Instructions
+
+// [PO VRT EO VRB 1 PS XO], "_o" means CR6 is set.
+class VX_VT5_EO5_VB5_PS1_XO9_o<bits<5> eo, bits<9> xo, string opc,
+ list<dag> pattern>
+ : VX_RD5_EO5_RS5_PS1_XO9<eo, xo, (outs vrrc:$vD), (ins vrrc:$vB, u1imm:$PS),
+ !strconcat(opc, " $vD, $vB, $PS"), IIC_VecFP, pattern> {
+ let Defs = [CR6];
+}
+
+// [PO VRT EO VRB 1 / XO]
+class VX_VT5_EO5_VB5_XO9_o<bits<5> eo, bits<9> xo, string opc,
+ list<dag> pattern>
+ : VX_RD5_EO5_RS5_PS1_XO9<eo, xo, (outs vrrc:$vD), (ins vrrc:$vB),
+ !strconcat(opc, " $vD, $vB"), IIC_VecFP, pattern> {
+ let Defs = [CR6];
+ let PS = 0;
+}
+
+// Decimal Convert From/to National/Zoned/Signed-QWord
+def BCDCFNo : VX_VT5_EO5_VB5_PS1_XO9_o<7, 385, "bcdcfn." , []>;
+def BCDCFZo : VX_VT5_EO5_VB5_PS1_XO9_o<6, 385, "bcdcfz." , []>;
+def BCDCTNo : VX_VT5_EO5_VB5_XO9_o <5, 385, "bcdctn." , []>;
+def BCDCTZo : VX_VT5_EO5_VB5_PS1_XO9_o<4, 385, "bcdctz." , []>;
+def BCDCFSQo : VX_VT5_EO5_VB5_PS1_XO9_o<2, 385, "bcdcfsq.", []>;
+def BCDCTSQo : VX_VT5_EO5_VB5_XO9_o <0, 385, "bcdctsq.", []>;
+
+// Decimal Copy-Sign/Set-Sign
+let Defs = [CR6] in
+def BCDCPSGNo : VX1_VT5_VA5_VB5<833, "bcdcpsgn.", []>;
+
+def BCDSETSGNo : VX_VT5_EO5_VB5_PS1_XO9_o<31, 385, "bcdsetsgn.", []>;
+
+// [PO VRT VRA VRB 1 PS XO], "_o" means CR6 is set.
+class VX_VT5_VA5_VB5_PS1_XO9_o<bits<9> xo, string opc, list<dag> pattern>
+ : VX_RD5_RSp5_PS1_XO9<xo,
+ (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, u1imm:$PS),
+ !strconcat(opc, " $vD, $vA, $vB, $PS"), IIC_VecFP, pattern> {
+ let Defs = [CR6];
+}
+
+// [PO VRT VRA VRB 1 / XO]
+class VX_VT5_VA5_VB5_XO9_o<bits<9> xo, string opc, list<dag> pattern>
+ : VX_RD5_RSp5_PS1_XO9<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
+ !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP, pattern> {
+ let Defs = [CR6];
+ let PS = 0;
+}
+
+// Decimal Shift/Unsigned-Shift/Shift-and-Round
+def BCDSo : VX_VT5_VA5_VB5_PS1_XO9_o<193, "bcds." , []>;
+def BCDUSo : VX_VT5_VA5_VB5_XO9_o <129, "bcdus.", []>;
+def BCDSRo : VX_VT5_VA5_VB5_PS1_XO9_o<449, "bcdsr.", []>;
+
+// Decimal (Unsigned) Truncate
+def BCDTRUNCo : VX_VT5_VA5_VB5_PS1_XO9_o<257, "bcdtrunc." , []>;
+def BCDUTRUNCo : VX_VT5_VA5_VB5_XO9_o <321, "bcdutrunc.", []>;
+} // end HasP9Altivec
diff --git a/lib/Target/PowerPC/PPCInstrFormats.td b/lib/Target/PowerPC/PPCInstrFormats.td
index 4e03ed27653f..5acff7559544 100644
--- a/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/lib/Target/PowerPC/PPCInstrFormats.td
@@ -360,6 +360,21 @@ class DSForm_1<bits<6> opcode, bits<2> xo, dag OOL, dag IOL, string asmstr,
let Inst{30-31} = xo;
}
+// DQ-Form: [PO T RA DQ TX XO] or [PO S RA DQ SX XO]
+class DQ_RD6_RS5_DQ12<bits<6> opcode, bits<3> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<17> DS_RA;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = DS_RA{16-12}; // Register #
+ let Inst{16-27} = DS_RA{11-0}; // Displacement.
+ let Inst{28} = XT{5};
+ let Inst{29-31} = xo;
+}
// 1.7.6 X-Form
class XForm_base_r3xo<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
@@ -747,6 +762,107 @@ class XForm_htm3<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{31} = RC;
}
+// [PO RT RA RB XO /]
+class X_BF3_L1_RS5_RS5<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<3> BF;
+ bits<1> L;
+ bits<5> RA;
+ bits<5> RB;
+
+ let Pattern = pattern;
+
+ let Inst{6-8} = BF;
+ let Inst{9} = 0;
+ let Inst{10} = L;
+ let Inst{11-15} = RA;
+ let Inst{16-20} = RB;
+ let Inst{21-30} = xo;
+ let Inst{31} = 0;
+}
+
+// Same as XForm_17 but with GPR's and new naming convention
+class X_BF3_RS5_RS5<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<3> BF;
+ bits<5> RA;
+ bits<5> RB;
+
+ let Pattern = pattern;
+
+ let Inst{6-8} = BF;
+ let Inst{9-10} = 0;
+ let Inst{11-15} = RA;
+ let Inst{16-20} = RB;
+ let Inst{21-30} = xo;
+ let Inst{31} = 0;
+}
+
+// e.g. [PO VRT XO VRB XO /] or [PO VRT XO VRB XO RO]
+class X_RD5_XO5_RS5<bits<6> opcode, bits<5> xo2, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : XForm_base_r3xo<opcode, xo, OOL, IOL, asmstr, itin, pattern> {
+ let A = xo2;
+}
+
+class X_BF3_DCMX7_RS5<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<3> BF;
+ bits<7> DCMX;
+ bits<5> VB;
+
+ let Pattern = pattern;
+
+ let Inst{6-8} = BF;
+ let Inst{9-15} = DCMX;
+ let Inst{16-20} = VB;
+ let Inst{21-30} = xo;
+ let Inst{31} = 0;
+}
+
+class X_RD6_IMM8<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<8> IMM8;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-12} = 0;
+ let Inst{13-20} = IMM8;
+ let Inst{21-30} = xo;
+ let Inst{31} = XT{5};
+}
+
+// XForm_base_r3xo for instructions such as P9 atomics where we don't want
+// to specify an SDAG pattern for matching.
+class X_RD5_RS5_IM5<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin>
+ : XForm_base_r3xo<opcode, xo, OOL, IOL, asmstr, itin, []> {
+}
+
+class X_BF3<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin>
+ : XForm_17<opcode, xo, OOL, IOL, asmstr, itin> {
+ let FRA = 0;
+ let FRB = 0;
+}
+
+// [PO /// L RA RB XO /]
+class X_L1_RS5_RS5<bits<6> opcode, bits<10> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : XForm_16<opcode, xo, OOL, IOL, asmstr, itin> {
+ let BF = 0;
+ let Pattern = pattern;
+
+ bit RC = 0;
+ let Inst{31} = RC;
+}
+
// XX*-Form (VSX)
class XX1Form<bits<6> opcode, bits<10> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
@@ -820,6 +936,95 @@ class XX2Form_2<bits<6> opcode, bits<9> xo, dag OOL, dag IOL, string asmstr,
let Inst{31} = XT{5};
}
+class XX2_RD6_UIM5_RS6<bits<6> opcode, bits<9> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<6> XB;
+ bits<5> UIM5;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = UIM5;
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-29} = xo;
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
+// [PO T XO B XO BX /]
+class XX2_RD5_XO5_RS6<bits<6> opcode, bits<5> xo2, bits<9> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<5> RT;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = RT;
+ let Inst{11-15} = xo2;
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-29} = xo;
+ let Inst{30} = XB{5};
+ let Inst{31} = 0;
+}
+
+// [PO T XO B XO BX TX]
+class XX2_RD6_XO5_RS6<bits<6> opcode, bits<5> xo2, bits<9> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = xo2;
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-29} = xo;
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
+class XX2_BF3_DCMX7_RS6<bits<6> opcode, bits<9> xo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<3> BF;
+ bits<7> DCMX;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ let Inst{6-8} = BF;
+ let Inst{9-15} = DCMX;
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-29} = xo;
+ let Inst{30} = XB{5};
+ let Inst{31} = 0;
+}
+
+class XX2_RD6_DCMX7_RS6<bits<6> opcode, bits<4> xo1, bits<3> xo2,
+ dag OOL, dag IOL, string asmstr, InstrItinClass itin,
+ list<dag> pattern>
+ : I<opcode, OOL, IOL, asmstr, itin> {
+ bits<6> XT;
+ bits<7> DCMX;
+ bits<6> XB;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = XT{4-0};
+ let Inst{11-15} = DCMX{4-0};
+ let Inst{16-20} = XB{4-0};
+ let Inst{21-24} = xo1;
+ let Inst{25} = DCMX{5};
+ let Inst{26-28} = xo2;
+ let Inst{29} = DCMX{6};
+ let Inst{30} = XB{5};
+ let Inst{31} = XT{5};
+}
+
class XX3Form<bits<6> opcode, bits<8> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
: I<opcode, OOL, IOL, asmstr, itin> {
@@ -1571,6 +1776,21 @@ class VXForm_5<bits<11> xo, dag OOL, dag IOL, string asmstr,
let Inst{21-31} = xo;
}
+// e.g. [PO VRT EO VRB XO]
+class VXForm_RD5_XO5_RS5<bits<11> xo, bits<5> eo, dag OOL, dag IOL,
+ string asmstr, InstrItinClass itin, list<dag> pattern>
+ : I<4, OOL, IOL, asmstr, itin> {
+ bits<5> RD;
+ bits<5> VB;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = RD;
+ let Inst{11-15} = eo;
+ let Inst{16-20} = VB;
+ let Inst{21-31} = xo;
+}
+
/// VXForm_CR - VX crypto instructions with "VRT, VRA, ST, SIX"
class VXForm_CR<bits<11> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
@@ -1622,6 +1842,44 @@ class VXRForm_1<bits<10> xo, dag OOL, dag IOL, string asmstr,
let Inst{22-31} = xo;
}
+// VX-Form: [PO VRT EO VRB 1 PS XO]
+class VX_RD5_EO5_RS5_PS1_XO9<bits<5> eo, bits<9> xo,
+ dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<4, OOL, IOL, asmstr, itin> {
+ bits<5> VD;
+ bits<5> VB;
+ bit PS;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = VD;
+ let Inst{11-15} = eo;
+ let Inst{16-20} = VB;
+ let Inst{21} = 1;
+ let Inst{22} = PS;
+ let Inst{23-31} = xo;
+}
+
+// VX-Form: [PO VRT VRA VRB 1 PS XO] or [PO VRT VRA VRB 1 / XO]
+class VX_RD5_RSp5_PS1_XO9<bits<9> xo, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I<4, OOL, IOL, asmstr, itin> {
+ bits<5> VD;
+ bits<5> VA;
+ bits<5> VB;
+ bit PS;
+
+ let Pattern = pattern;
+
+ let Inst{6-10} = VD;
+ let Inst{11-15} = VA;
+ let Inst{16-20} = VB;
+ let Inst{21} = 1;
+ let Inst{22} = PS;
+ let Inst{23-31} = xo;
+}
+
// Z23-Form (used by QPX)
class Z23Form_1<bits<6> opcode, bits<8> xo, dag OOL, dag IOL, string asmstr,
InstrItinClass itin, list<dag> pattern>
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index dcff6ad2486f..b6ae70ec1a2d 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -93,6 +93,7 @@ PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
unsigned Directive =
DAG->MF.getSubtarget<PPCSubtarget>().getDarwinDirective();
+ // FIXME: Leaving this as-is until we have POWER9 scheduling info
if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8)
return new PPCDispatchGroupSBHazardRecognizer(II, DAG);
@@ -108,7 +109,7 @@ PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
}
unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
+ const MachineInstr &MI,
unsigned *PredCost) const {
if (!ItinData || UseOldLatencyCalc)
return PPCGenInstrInfo::getInstrLatency(ItinData, MI, PredCost);
@@ -121,9 +122,9 @@ unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
// is an output).
unsigned Latency = 1;
- unsigned DefClass = MI->getDesc().getSchedClass();
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ unsigned DefClass = MI.getDesc().getSchedClass();
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
continue;
@@ -138,22 +139,22 @@ unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
}
int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
unsigned UseIdx) const {
int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
UseMI, UseIdx);
- if (!DefMI->getParent())
+ if (!DefMI.getParent())
return Latency;
- const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
+ const MachineOperand &DefMO = DefMI.getOperand(DefIdx);
unsigned Reg = DefMO.getReg();
bool IsRegCR;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
const MachineRegisterInfo *MRI =
- &DefMI->getParent()->getParent()->getRegInfo();
+ &DefMI.getParent()->getParent()->getRegInfo();
IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
} else {
@@ -161,7 +162,7 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
PPC::CRBITRCRegClass.contains(Reg);
}
- if (UseMI->isBranch() && IsRegCR) {
+ if (UseMI.isBranch() && IsRegCR) {
if (Latency < 0)
Latency = getInstrLatency(ItinData, DefMI);
@@ -181,6 +182,7 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case PPC::DIR_PWR6X:
case PPC::DIR_PWR7:
case PPC::DIR_PWR8:
+ // FIXME: Is this needed for POWER9?
Latency += 2;
break;
}
@@ -258,10 +260,10 @@ bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
}
}
-unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
// Note: This list must be kept consistent with LoadRegFromStackSlot.
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: break;
case PPC::LD:
case PPC::LWZ:
@@ -277,20 +279,20 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
case PPC::RESTORE_VRSAVE:
// Check for the operands added by addFrameReference (the immediate is the
// offset which defaults to 0).
- if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() &&
- MI->getOperand(2).isFI()) {
- FrameIndex = MI->getOperand(2).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm() &&
+ MI.getOperand(2).isFI()) {
+ FrameIndex = MI.getOperand(2).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
}
return 0;
}
-unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
// Note: This list must be kept consistent with StoreRegToStackSlot.
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: break;
case PPC::STD:
case PPC::STW:
@@ -306,25 +308,23 @@ unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
case PPC::SPILL_VRSAVE:
// Check for the operands added by addFrameReference (the immediate is the
// offset which defaults to 0).
- if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() &&
- MI->getOperand(2).isFI()) {
- FrameIndex = MI->getOperand(2).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOperand(1).isImm() && !MI.getOperand(1).getImm() &&
+ MI.getOperand(2).isFI()) {
+ FrameIndex = MI.getOperand(2).getIndex();
+ return MI.getOperand(0).getReg();
}
break;
}
return 0;
}
-MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI,
- bool NewMI,
+MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
- MachineFunction &MF = *MI->getParent()->getParent();
+ MachineFunction &MF = *MI.getParent()->getParent();
// Normal instructions can be commuted the obvious way.
- if (MI->getOpcode() != PPC::RLWIMI &&
- MI->getOpcode() != PPC::RLWIMIo)
+ if (MI.getOpcode() != PPC::RLWIMI && MI.getOpcode() != PPC::RLWIMIo)
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
// Note that RLWIMI can be commuted as a 32-bit instruction, but not as a
// 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because
@@ -332,7 +332,7 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI,
// to the high-bits of the mask (and, thus, the result).
// Cannot commute if it has a non-zero rotate count.
- if (MI->getOperand(3).getImm() != 0)
+ if (MI.getOperand(3).getImm() != 0)
return nullptr;
// If we have a zero rotate count, we have:
@@ -345,28 +345,28 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI,
// Swap op1/op2
assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
"Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.");
- unsigned Reg0 = MI->getOperand(0).getReg();
- unsigned Reg1 = MI->getOperand(1).getReg();
- unsigned Reg2 = MI->getOperand(2).getReg();
- unsigned SubReg1 = MI->getOperand(1).getSubReg();
- unsigned SubReg2 = MI->getOperand(2).getSubReg();
- bool Reg1IsKill = MI->getOperand(1).isKill();
- bool Reg2IsKill = MI->getOperand(2).isKill();
+ unsigned Reg0 = MI.getOperand(0).getReg();
+ unsigned Reg1 = MI.getOperand(1).getReg();
+ unsigned Reg2 = MI.getOperand(2).getReg();
+ unsigned SubReg1 = MI.getOperand(1).getSubReg();
+ unsigned SubReg2 = MI.getOperand(2).getSubReg();
+ bool Reg1IsKill = MI.getOperand(1).isKill();
+ bool Reg2IsKill = MI.getOperand(2).isKill();
bool ChangeReg0 = false;
// If machine instrs are no longer in two-address forms, update
// destination register as well.
if (Reg0 == Reg1) {
// Must be two address instruction!
- assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
+ assert(MI.getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
"Expecting a two-address instruction!");
- assert(MI->getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch");
+ assert(MI.getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch");
Reg2IsKill = false;
ChangeReg0 = true;
}
// Masks.
- unsigned MB = MI->getOperand(4).getImm();
- unsigned ME = MI->getOperand(5).getImm();
+ unsigned MB = MI.getOperand(4).getImm();
+ unsigned ME = MI.getOperand(5).getImm();
// We can't commute a trivial mask (there is no way to represent an all-zero
// mask).
@@ -375,40 +375,40 @@ MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI,
if (NewMI) {
// Create a new instruction.
- unsigned Reg0 = ChangeReg0 ? Reg2 : MI->getOperand(0).getReg();
- bool Reg0IsDead = MI->getOperand(0).isDead();
- return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
- .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
- .addReg(Reg2, getKillRegState(Reg2IsKill))
- .addReg(Reg1, getKillRegState(Reg1IsKill))
- .addImm((ME+1) & 31)
- .addImm((MB-1) & 31);
+ unsigned Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
+ bool Reg0IsDead = MI.getOperand(0).isDead();
+ return BuildMI(MF, MI.getDebugLoc(), MI.getDesc())
+ .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
+ .addReg(Reg2, getKillRegState(Reg2IsKill))
+ .addReg(Reg1, getKillRegState(Reg1IsKill))
+ .addImm((ME + 1) & 31)
+ .addImm((MB - 1) & 31);
}
if (ChangeReg0) {
- MI->getOperand(0).setReg(Reg2);
- MI->getOperand(0).setSubReg(SubReg2);
+ MI.getOperand(0).setReg(Reg2);
+ MI.getOperand(0).setSubReg(SubReg2);
}
- MI->getOperand(2).setReg(Reg1);
- MI->getOperand(1).setReg(Reg2);
- MI->getOperand(2).setSubReg(SubReg1);
- MI->getOperand(1).setSubReg(SubReg2);
- MI->getOperand(2).setIsKill(Reg1IsKill);
- MI->getOperand(1).setIsKill(Reg2IsKill);
+ MI.getOperand(2).setReg(Reg1);
+ MI.getOperand(1).setReg(Reg2);
+ MI.getOperand(2).setSubReg(SubReg1);
+ MI.getOperand(1).setSubReg(SubReg2);
+ MI.getOperand(2).setIsKill(Reg1IsKill);
+ MI.getOperand(1).setIsKill(Reg2IsKill);
// Swap the mask around.
- MI->getOperand(4).setImm((ME+1) & 31);
- MI->getOperand(5).setImm((MB-1) & 31);
- return MI;
+ MI.getOperand(4).setImm((ME + 1) & 31);
+ MI.getOperand(5).setImm((MB - 1) & 31);
+ return &MI;
}
-bool PPCInstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+bool PPCInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
// For VSX A-Type FMA instructions, it is the first two operands that can be
// commuted, however, because the non-encoded tied input operand is listed
// first, the operands to swap are actually the second and third.
- int AltOpc = PPC::getAltVSXFMAOpcode(MI->getOpcode());
+ int AltOpc = PPC::getAltVSXFMAOpcode(MI.getOpcode());
if (AltOpc == -1)
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
@@ -428,6 +428,8 @@ void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break;
case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break;
case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */
+ // FIXME: Update when POWER9 scheduling model is ready.
+ case PPC::DIR_PWR9: Opcode = PPC::NOP_GT_PWR7; break;
}
DebugLoc DL;
@@ -442,7 +444,8 @@ void PPCInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
// Branch analysis.
// Note: If the condition register is set to CTR or CTR8 then this is a
// BDNZ (imm == 1) or BDZ (imm == 0) branch.
-bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
+bool PPCInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
@@ -453,14 +456,14 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
if (I == MBB.end())
return false;
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
return false;
// Get the last instruction in the block.
MachineInstr *LastInst = I;
// If there is only one terminator instruction, process it.
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
if (LastInst->getOpcode() == PPC::B) {
if (!LastInst->getOperand(0).isMBB())
return true;
@@ -522,8 +525,7 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineInstr *SecondLastInst = I;
// If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() &&
- isUnpredicatedTerminator(--I))
+ if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
return true;
// If the block ends with PPC::B and PPC:BCC, handle it.
@@ -633,11 +635,11 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 2;
}
-unsigned
-PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+unsigned PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -721,9 +723,10 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
}
void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc dl,
- unsigned DestReg, ArrayRef<MachineOperand> Cond,
- unsigned TrueReg, unsigned FalseReg) const {
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &dl, unsigned DestReg,
+ ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+ unsigned FalseReg) const {
assert(Cond.size() == 2 &&
"PPC branch conditions have two components!");
@@ -746,8 +749,8 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
auto SelectPred = static_cast<PPC::Predicate>(Cond[0].getImm());
- unsigned SubIdx;
- bool SwapOps;
+ unsigned SubIdx = 0;
+ bool SwapOps = false;
switch (SelectPred) {
case PPC::PRED_EQ:
case PPC::PRED_EQ_MINUS:
@@ -835,9 +838,9 @@ static unsigned getCRBitValue(unsigned CRBit) {
}
void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
// We can end up with self copies and similar things as a result of VSX copy
// legalization. Promote them here.
const TargetRegisterInfo *TRI = &getRegisterInfo();
@@ -883,8 +886,8 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (PPC::CRBITRCRegClass.contains(SrcReg) &&
PPC::GPRCRegClass.contains(DestReg)) {
unsigned CRReg = getCRFromCRBit(SrcReg);
- BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg)
- .addReg(CRReg), getKillRegState(KillSrc);
+ BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(CRReg);
+ getKillRegState(KillSrc);
// Rotate the CR bit in the CR fields to be the least significant bit and
// then mask with 0x1 (MB = ME = 31).
BuildMI(MBB, I, DL, get(PPC::RLWINM), DestReg)
@@ -895,13 +898,13 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
} else if (PPC::CRRCRegClass.contains(SrcReg) &&
PPC::G8RCRegClass.contains(DestReg)) {
- BuildMI(MBB, I, DL, get(PPC::MFOCRF8), DestReg)
- .addReg(SrcReg), getKillRegState(KillSrc);
+ BuildMI(MBB, I, DL, get(PPC::MFOCRF8), DestReg).addReg(SrcReg);
+ getKillRegState(KillSrc);
return;
} else if (PPC::CRRCRegClass.contains(SrcReg) &&
PPC::GPRCRegClass.contains(DestReg)) {
- BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg)
- .addReg(SrcReg), getKillRegState(KillSrc);
+ BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg).addReg(SrcReg);
+ getKillRegState(KillSrc);
return;
}
@@ -1085,12 +1088,11 @@ PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
NewMIs.back()->addMemOperand(MF, MMO);
}
-bool
-PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs,
- bool &NonRI, bool &SpillsVRS) const{
+bool PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
+ unsigned DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr *> &NewMIs,
+ bool &NonRI, bool &SpillsVRS) const {
// Note: If additional load instructions are added here,
// update isLoadFromStackSlot.
@@ -1208,35 +1210,35 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return false;
}
-bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
- unsigned Reg, MachineRegisterInfo *MRI) const {
+bool PPCInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const {
// For some instructions, it is legal to fold ZERO into the RA register field.
// A zero immediate should always be loaded with a single li.
- unsigned DefOpc = DefMI->getOpcode();
+ unsigned DefOpc = DefMI.getOpcode();
if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
return false;
- if (!DefMI->getOperand(1).isImm())
+ if (!DefMI.getOperand(1).isImm())
return false;
- if (DefMI->getOperand(1).getImm() != 0)
+ if (DefMI.getOperand(1).getImm() != 0)
return false;
// Note that we cannot here invert the arguments of an isel in order to fold
// a ZERO into what is presented as the second argument. All we have here
// is the condition bit, and that might come from a CR-logical bit operation.
- const MCInstrDesc &UseMCID = UseMI->getDesc();
+ const MCInstrDesc &UseMCID = UseMI.getDesc();
// Only fold into real machine instructions.
if (UseMCID.isPseudo())
return false;
unsigned UseIdx;
- for (UseIdx = 0; UseIdx < UseMI->getNumOperands(); ++UseIdx)
- if (UseMI->getOperand(UseIdx).isReg() &&
- UseMI->getOperand(UseIdx).getReg() == Reg)
+ for (UseIdx = 0; UseIdx < UseMI.getNumOperands(); ++UseIdx)
+ if (UseMI.getOperand(UseIdx).isReg() &&
+ UseMI.getOperand(UseIdx).getReg() == Reg)
break;
- assert(UseIdx < UseMI->getNumOperands() && "Cannot find Reg in UseMI");
+ assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI");
assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg");
const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx];
@@ -1268,10 +1270,10 @@ bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
}
bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
- UseMI->getOperand(UseIdx).setReg(ZeroReg);
+ UseMI.getOperand(UseIdx).setReg(ZeroReg);
if (DeleteDef)
- DefMI->eraseFromParent();
+ DefMI.eraseFromParent();
return true;
}
@@ -1299,7 +1301,7 @@ bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
}
-bool PPCInstrInfo::isPredicated(const MachineInstr *MI) const {
+bool PPCInstrInfo::isPredicated(const MachineInstr &MI) const {
// The predicated branches are identified by their type, not really by the
// explicit presence of a predicate. Furthermore, some of them can be
// predicated more than once. Because if conversion won't try to predicate
@@ -1310,73 +1312,71 @@ bool PPCInstrInfo::isPredicated(const MachineInstr *MI) const {
return false;
}
-bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- if (!MI->isTerminator())
+bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
+ if (!MI.isTerminator())
return false;
// Conditional branch is a special case.
- if (MI->isBranch() && !MI->isBarrier())
+ if (MI.isBranch() && !MI.isBarrier())
return true;
return !isPredicated(MI);
}
-bool PPCInstrInfo::PredicateInstruction(MachineInstr *MI,
+bool PPCInstrInfo::PredicateInstruction(MachineInstr &MI,
ArrayRef<MachineOperand> Pred) const {
- unsigned OpC = MI->getOpcode();
+ unsigned OpC = MI.getOpcode();
if (OpC == PPC::BLR || OpC == PPC::BLR8) {
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
bool isPPC64 = Subtarget.isPPC64();
- MI->setDesc(get(Pred[0].getImm() ?
- (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR) :
- (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
+ MI.setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
+ : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
} else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
- MI->setDesc(get(PPC::BCLR));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addReg(Pred[1].getReg());
+ MI.setDesc(get(PPC::BCLR));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg());
} else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
- MI->setDesc(get(PPC::BCLRn));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addReg(Pred[1].getReg());
+ MI.setDesc(get(PPC::BCLRn));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg());
} else {
- MI->setDesc(get(PPC::BCCLR));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addImm(Pred[0].getImm())
- .addReg(Pred[1].getReg());
+ MI.setDesc(get(PPC::BCCLR));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(Pred[0].getImm())
+ .addReg(Pred[1].getReg());
}
return true;
} else if (OpC == PPC::B) {
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
bool isPPC64 = Subtarget.isPPC64();
- MI->setDesc(get(Pred[0].getImm() ?
- (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
- (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
+ MI.setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
+ : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
} else if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
- MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
- MI->RemoveOperand(0);
+ MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
+ MI.RemoveOperand(0);
- MI->setDesc(get(PPC::BC));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addReg(Pred[1].getReg())
- .addMBB(MBB);
+ MI.setDesc(get(PPC::BC));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg())
+ .addMBB(MBB);
} else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
- MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
- MI->RemoveOperand(0);
+ MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
+ MI.RemoveOperand(0);
- MI->setDesc(get(PPC::BCn));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addReg(Pred[1].getReg())
- .addMBB(MBB);
+ MI.setDesc(get(PPC::BCn));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg())
+ .addMBB(MBB);
} else {
- MachineBasicBlock *MBB = MI->getOperand(0).getMBB();
- MI->RemoveOperand(0);
-
- MI->setDesc(get(PPC::BCC));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addImm(Pred[0].getImm())
- .addReg(Pred[1].getReg())
- .addMBB(MBB);
+ MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
+ MI.RemoveOperand(0);
+
+ MI.setDesc(get(PPC::BCC));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(Pred[0].getImm())
+ .addReg(Pred[1].getReg())
+ .addMBB(MBB);
}
return true;
@@ -1389,24 +1389,24 @@ bool PPCInstrInfo::PredicateInstruction(MachineInstr *MI,
bool isPPC64 = Subtarget.isPPC64();
if (Pred[0].getImm() == PPC::PRED_BIT_SET) {
- MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) :
- (setLR ? PPC::BCCTRL : PPC::BCCTR)));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addReg(Pred[1].getReg());
+ MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
+ : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg());
return true;
} else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) {
- MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n) :
- (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addReg(Pred[1].getReg());
+ MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
+ : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(Pred[1].getReg());
return true;
}
- MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8) :
- (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addImm(Pred[0].getImm())
- .addReg(Pred[1].getReg());
+ MI.setDesc(get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
+ : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(Pred[0].getImm())
+ .addReg(Pred[1].getReg());
return true;
}
@@ -1444,7 +1444,7 @@ bool PPCInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
return false;
}
-bool PPCInstrInfo::DefinesPredicate(MachineInstr *MI,
+bool PPCInstrInfo::DefinesPredicate(MachineInstr &MI,
std::vector<MachineOperand> &Pred) const {
// Note: At the present time, the contents of Pred from this function is
// unused by IfConversion. This implementation follows ARM by pushing the
@@ -1457,8 +1457,8 @@ bool PPCInstrInfo::DefinesPredicate(MachineInstr *MI,
&PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
bool Found = false;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
for (unsigned c = 0; c < array_lengthof(RCs) && !Found; ++c) {
const TargetRegisterClass *RC = RCs[c];
if (MO.isReg()) {
@@ -1480,8 +1480,8 @@ bool PPCInstrInfo::DefinesPredicate(MachineInstr *MI,
return Found;
}
-bool PPCInstrInfo::isPredicable(MachineInstr *MI) const {
- unsigned OpC = MI->getOpcode();
+bool PPCInstrInfo::isPredicable(MachineInstr &MI) const {
+ unsigned OpC = MI.getOpcode();
switch (OpC) {
default:
return false;
@@ -1496,10 +1496,10 @@ bool PPCInstrInfo::isPredicable(MachineInstr *MI) const {
}
}
-bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, unsigned &SrcReg2,
- int &Mask, int &Value) const {
- unsigned Opc = MI->getOpcode();
+bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &Mask,
+ int &Value) const {
+ unsigned Opc = MI.getOpcode();
switch (Opc) {
default: return false;
@@ -1507,9 +1507,9 @@ bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI,
case PPC::CMPLWI:
case PPC::CMPDI:
case PPC::CMPLDI:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
- Value = MI->getOperand(2).getImm();
+ Value = MI.getOperand(2).getImm();
Mask = 0xFFFF;
return true;
case PPC::CMPW:
@@ -1518,21 +1518,20 @@ bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI,
case PPC::CMPLD:
case PPC::FCMPUS:
case PPC::FCMPUD:
- SrcReg = MI->getOperand(1).getReg();
- SrcReg2 = MI->getOperand(2).getReg();
+ SrcReg = MI.getOperand(1).getReg();
+ SrcReg2 = MI.getOperand(2).getReg();
return true;
}
}
-bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
- unsigned SrcReg, unsigned SrcReg2,
- int Mask, int Value,
+bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int Mask, int Value,
const MachineRegisterInfo *MRI) const {
if (DisableCmpOpt)
return false;
- int OpC = CmpInstr->getOpcode();
- unsigned CRReg = CmpInstr->getOperand(0).getReg();
+ int OpC = CmpInstr.getOpcode();
+ unsigned CRReg = CmpInstr.getOperand(0).getReg();
// FP record forms set CR1 based on the execption status bits, not a
// comparison with zero.
@@ -1571,11 +1570,18 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
} else
return false;
} else if (is32BitUnsignedCompare) {
+ // 32-bit rotate and mask instructions are zero extending only if MB <= ME
+ bool isZeroExtendingRotate =
+ (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINMo ||
+ MIOpC == PPC::RLWNM || MIOpC == PPC::RLWNMo)
+ && MI->getOperand(3).getImm() <= MI->getOperand(4).getImm();
+
// We can perform this optimization, equality only, if MI is
// zero-extending.
if (MIOpC == PPC::CNTLZW || MIOpC == PPC::CNTLZWo ||
MIOpC == PPC::SLW || MIOpC == PPC::SLWo ||
- MIOpC == PPC::SRW || MIOpC == PPC::SRWo) {
+ MIOpC == PPC::SRW || MIOpC == PPC::SRWo ||
+ isZeroExtendingRotate) {
noSub = true;
equalityOnly = true;
} else
@@ -1608,8 +1614,8 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
MachineBasicBlock::iterator I = CmpInstr;
// Scan forward to find the first use of the compare.
- for (MachineBasicBlock::iterator EL = CmpInstr->getParent()->end();
- I != EL; ++I) {
+ for (MachineBasicBlock::iterator EL = CmpInstr.getParent()->end(); I != EL;
+ ++I) {
bool FoundUse = false;
for (MachineRegisterInfo::use_instr_iterator J =MRI->use_instr_begin(CRReg),
JE = MRI->use_instr_end(); J != JE; ++J)
@@ -1633,7 +1639,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
// same BB as the comparison. This is to allow the check below to avoid calls
// (and other explicit clobbers); instead we should really check for these
// more explicitly (in at least a few predecessors).
- else if (MI->getParent() != CmpInstr->getParent() || Value != 0) {
+ else if (MI->getParent() != CmpInstr.getParent() || Value != 0) {
// PPC does not have a record-form SUBri.
return false;
}
@@ -1643,16 +1649,14 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
--I;
// Get ready to iterate backward from CmpInstr.
- MachineBasicBlock::iterator E = MI,
- B = CmpInstr->getParent()->begin();
+ MachineBasicBlock::iterator E = MI, B = CmpInstr.getParent()->begin();
for (; I != E && !noSub; --I) {
const MachineInstr &Instr = *I;
unsigned IOpC = Instr.getOpcode();
- if (&*I != CmpInstr && (
- Instr.modifiesRegister(PPC::CR0, TRI) ||
- Instr.readsRegister(PPC::CR0, TRI)))
+ if (&*I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0, TRI) ||
+ Instr.readsRegister(PPC::CR0, TRI)))
// This instruction modifies or uses the record condition register after
// the one we want to change. While we could do this transformation, it
// would likely not be profitable. This transformation removes one
@@ -1752,13 +1756,17 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
// Create a new virtual register to hold the value of the CR set by the
// record-form instruction. If the instruction was not previously in
// record form, then set the kill flag on the CR.
- CmpInstr->eraseFromParent();
+ CmpInstr.eraseFromParent();
MachineBasicBlock::iterator MII = MI;
BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(),
get(TargetOpcode::COPY), CRReg)
.addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill : 0);
+ // Even if CR0 register were dead before, it is alive now since the
+ // instruction we just built uses it.
+ MI->clearRegisterDeads(PPC::CR0);
+
if (MIOpC != NewOpC) {
// We need to be careful here: we're replacing one instruction with
// another, and we need to make sure that we get all of the right
@@ -1783,6 +1791,8 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
MI->addOperand(*MI->getParent()->getParent(),
MachineOperand::CreateReg(*ImpUses, false, true));
}
+ assert(MI->definesRegister(PPC::CR0) &&
+ "Record-form instruction does not define cr0?");
// Modify the condition code of operands in OperandsToUpdate.
// Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
@@ -1799,17 +1809,17 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr,
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
-unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- unsigned Opcode = MI->getOpcode();
+unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr &MI) const {
+ unsigned Opcode = MI.getOpcode();
if (Opcode == PPC::INLINEASM) {
- const MachineFunction *MF = MI->getParent()->getParent();
- const char *AsmStr = MI->getOperand(0).getSymbolName();
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const char *AsmStr = MI.getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
} else if (Opcode == TargetOpcode::STACKMAP) {
- return MI->getOperand(1).getImm();
+ return MI.getOperand(1).getImm();
} else if (Opcode == TargetOpcode::PATCHPOINT) {
- PatchPointOpers Opers(MI);
+ PatchPointOpers Opers(&MI);
return Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm();
} else {
const MCInstrDesc &Desc = get(Opcode);
@@ -1842,10 +1852,26 @@ ArrayRef<std::pair<unsigned, const char *>>
PPCInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
using namespace PPCII;
static const std::pair<unsigned, const char *> TargetFlags[] = {
- {MO_PLT_OR_STUB, "ppc-plt-or-stub"},
+ {MO_PLT, "ppc-plt"},
{MO_PIC_FLAG, "ppc-pic"},
{MO_NLP_FLAG, "ppc-nlp"},
{MO_NLP_HIDDEN_FLAG, "ppc-nlp-hidden"}};
return makeArrayRef(TargetFlags);
}
+bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::LOAD_STACK_GUARD: {
+ assert(Subtarget.isTargetLinux() &&
+ "Only Linux target is expected to contain LOAD_STACK_GUARD");
+ const int64_t Offset = Subtarget.isPPC64() ? -0x7010 : -0x7008;
+ const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
+ MI.setDesc(get(Subtarget.isPPC64() ? PPC::LD : PPC::LWZ));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(Offset)
+ .addReg(Reg);
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h
index c3c3a480a6aa..98baf125bdff 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/lib/Target/PowerPC/PPCInstrInfo.h
@@ -73,10 +73,10 @@ class PPCInstrInfo : public PPCGenInstrInfo {
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs,
bool &NonRI, bool &SpillsVRS) const;
- bool LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
+ bool LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs,
+ SmallVectorImpl<MachineInstr *> &NewMIs,
bool &NonRI, bool &SpillsVRS) const;
virtual void anchor();
@@ -91,8 +91,7 @@ protected:
///
/// For example, we can commute rlwimi instructions, but only if the
/// rotate amt is zero. We also have to munge the immediates a bit.
- MachineInstr *commuteInstructionImpl(MachineInstr *MI,
- bool NewMI,
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const override;
@@ -113,12 +112,12 @@ public:
const ScheduleDAG *DAG) const override;
unsigned getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
+ const MachineInstr &MI,
unsigned *PredCost = nullptr) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
unsigned UseIdx) const override;
int getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
@@ -128,7 +127,7 @@ public:
}
bool hasLowDefLatency(const TargetSchedModel &SchedModel,
- const MachineInstr *DefMI,
+ const MachineInstr &DefMI,
unsigned DefIdx) const override {
// Machine LICM should hoist all instructions in low-register-pressure
// situations; none are sufficiently free to justify leaving in a loop
@@ -152,12 +151,12 @@ public:
bool isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const override;
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
void insertNoop(MachineBasicBlock &MBB,
@@ -165,25 +164,25 @@ public:
// Branch analysis.
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
// Select analysis.
bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
unsigned, unsigned, int &, int &, int &) const override;
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- DebugLoc DL, unsigned DstReg, ArrayRef<MachineOperand> Cond,
- unsigned TrueReg, unsigned FalseReg) const override;
+ const DebugLoc &DL, unsigned DstReg,
+ ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+ unsigned FalseReg) const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -201,8 +200,8 @@ public:
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
- bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
- unsigned Reg, MachineRegisterInfo *MRI) const override;
+ bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
+ MachineRegisterInfo *MRI) const override;
// If conversion by predication (only supported by some branch instructions).
// All of the profitability checks always return true; it is always
@@ -230,37 +229,34 @@ public:
}
// Predication support.
- bool isPredicated(const MachineInstr *MI) const override;
+ bool isPredicated(const MachineInstr &MI) const override;
- bool isUnpredicatedTerminator(const MachineInstr *MI) const override;
+ bool isUnpredicatedTerminator(const MachineInstr &MI) const override;
- bool PredicateInstruction(MachineInstr *MI,
+ bool PredicateInstruction(MachineInstr &MI,
ArrayRef<MachineOperand> Pred) const override;
bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
ArrayRef<MachineOperand> Pred2) const override;
- bool DefinesPredicate(MachineInstr *MI,
+ bool DefinesPredicate(MachineInstr &MI,
std::vector<MachineOperand> &Pred) const override;
- bool isPredicable(MachineInstr *MI) const override;
+ bool isPredicable(MachineInstr &MI) const override;
// Comparison optimization.
+ bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &Mask, int &Value) const override;
- bool analyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, unsigned &SrcReg2,
- int &Mask, int &Value) const override;
-
- bool optimizeCompareInstr(MachineInstr *CmpInstr,
- unsigned SrcReg, unsigned SrcReg2,
- int Mask, int Value,
+ bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int Mask, int Value,
const MachineRegisterInfo *MRI) const override;
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
- unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
+ unsigned GetInstSizeInBytes(const MachineInstr &MI) const;
void getNoopForMachoTarget(MCInst &NopInst) const override;
@@ -272,6 +268,9 @@ public:
ArrayRef<std::pair<unsigned, const char *>>
getSerializableBitmaskMachineOperandTargetFlags() const override;
+
+ // Lower pseudo instructions after register allocation.
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
};
}
diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td
index ce0f9e6f52a7..4a42a947c6cb 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/lib/Target/PowerPC/PPCInstrInfo.td
@@ -31,6 +31,18 @@ def SDT_PPCvperm : SDTypeProfile<1, 3, [
SDTCisVT<3, v16i8>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>
]>;
+def SDT_PPCVecSplat : SDTypeProfile<1, 2, [ SDTCisVec<0>,
+ SDTCisVec<1>, SDTCisInt<2>
+]>;
+
+def SDT_PPCVecShift : SDTypeProfile<1, 3, [ SDTCisVec<0>,
+ SDTCisVec<1>, SDTCisVec<2>, SDTCisInt<3>
+]>;
+
+def SDT_PPCVecInsert : SDTypeProfile<1, 3, [ SDTCisVec<0>,
+ SDTCisVec<1>, SDTCisVec<2>, SDTCisInt<3>
+]>;
+
def SDT_PPCvcmp : SDTypeProfile<1, 3, [
SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisVT<3, i32>
]>;
@@ -140,7 +152,10 @@ def PPCaddiTlsldLAddr : SDNode<"PPCISD::ADDI_TLSLD_L_ADDR",
def PPCaddisDtprelHA : SDNode<"PPCISD::ADDIS_DTPREL_HA", SDTIntBinOp>;
def PPCaddiDtprelL : SDNode<"PPCISD::ADDI_DTPREL_L", SDTIntBinOp>;
-def PPCvperm : SDNode<"PPCISD::VPERM", SDT_PPCvperm, []>;
+def PPCvperm : SDNode<"PPCISD::VPERM", SDT_PPCvperm, []>;
+def PPCxxsplt : SDNode<"PPCISD::XXSPLT", SDT_PPCVecSplat, []>;
+def PPCxxinsert : SDNode<"PPCISD::XXINSERT", SDT_PPCVecInsert, []>;
+def PPCvecshl : SDNode<"PPCISD::VECSHL", SDT_PPCVecShift, []>;
def PPCqvfperm : SDNode<"PPCISD::QVFPERM", SDT_PPCqvfperm, []>;
def PPCqvgpci : SDNode<"PPCISD::QVGPCI", SDT_PPCqvgpci, []>;
@@ -257,7 +272,7 @@ def HI16 : SDNodeXForm<imm, [{
def HA16 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
- signed int Val = N->getZExtValue();
+ int Val = N->getZExtValue();
return getI32Imm((Val - (signed short)Val) >> 16, SDLoc(N));
}]>;
def MB : SDNodeXForm<imm, [{
@@ -507,6 +522,24 @@ def u6imm : Operand<i32> {
let ParserMatchClass = PPCU6ImmAsmOperand;
let DecoderMethod = "decodeUImmOperand<6>";
}
+def PPCU7ImmAsmOperand : AsmOperandClass {
+ let Name = "U7Imm"; let PredicateMethod = "isU7Imm";
+ let RenderMethod = "addImmOperands";
+}
+def u7imm : Operand<i32> {
+ let PrintMethod = "printU7ImmOperand";
+ let ParserMatchClass = PPCU7ImmAsmOperand;
+ let DecoderMethod = "decodeUImmOperand<7>";
+}
+def PPCU8ImmAsmOperand : AsmOperandClass {
+ let Name = "U8Imm"; let PredicateMethod = "isU8Imm";
+ let RenderMethod = "addImmOperands";
+}
+def u8imm : Operand<i32> {
+ let PrintMethod = "printU8ImmOperand";
+ let ParserMatchClass = PPCU8ImmAsmOperand;
+ let DecoderMethod = "decodeUImmOperand<8>";
+}
def PPCU10ImmAsmOperand : AsmOperandClass {
let Name = "U10Imm"; let PredicateMethod = "isU10Imm";
let RenderMethod = "addImmOperands";
@@ -635,6 +668,13 @@ def PPCDispRIXOperand : AsmOperandClass {
def dispRIX : Operand<iPTR> {
let ParserMatchClass = PPCDispRIXOperand;
}
+def PPCDispRIX16Operand : AsmOperandClass {
+ let Name = "DispRIX16"; let PredicateMethod = "isS16ImmX16";
+ let RenderMethod = "addImmOperands";
+}
+def dispRIX16 : Operand<iPTR> {
+ let ParserMatchClass = PPCDispRIX16Operand;
+}
def PPCDispSPE8Operand : AsmOperandClass {
let Name = "DispSPE8"; let PredicateMethod = "isU8ImmX8";
let RenderMethod = "addImmOperands";
@@ -673,6 +713,12 @@ def memrix : Operand<iPTR> { // memri where the imm is 4-aligned.
let EncoderMethod = "getMemRIXEncoding";
let DecoderMethod = "decodeMemRIXOperands";
}
+def memrix16 : Operand<iPTR> { // memri, imm is 16-aligned, 12-bit, Inst{16:27}
+ let PrintMethod = "printMemRegImm";
+ let MIOperandInfo = (ops dispRIX16:$imm, ptr_rc_nor0:$reg);
+ let EncoderMethod = "getMemRIX16Encoding";
+ let DecoderMethod = "decodeMemRIX16Operands";
+}
def spe8dis : Operand<iPTR> { // SPE displacement where the imm is 8-aligned.
let PrintMethod = "printMemRegImm";
let MIOperandInfo = (ops dispSPE8:$imm, ptr_rc_nor0:$reg);
@@ -746,6 +792,7 @@ def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">;
def NaNsFPMath : Predicate<"!TM.Options.NoNaNsFPMath">;
def HasBPERMD : Predicate<"PPCSubTarget->hasBPERMD()">;
def HasExtDiv : Predicate<"PPCSubTarget->hasExtDiv()">;
+def IsISA3_0 : Predicate<"PPCSubTarget->isISA3_0()">;
//===----------------------------------------------------------------------===//
// PowerPC Multiclass Definitions.
@@ -1365,7 +1412,10 @@ let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
Requires<[In32BitMode]>;
}
-let isBranch = 1, isTerminator = 1 in {
+// This pseudo is never removed from the function, as it serves as
+// a terminator. Size is set to 0 to prevent the builtin assembler
+// from emitting it.
+let isBranch = 1, isTerminator = 1, Size = 0 in {
def EH_SjLj_Setup : Pseudo<(outs), (ins directbrtarget:$dst),
"#EH_SjLj_Setup\t$dst", []>;
}
@@ -1543,6 +1593,13 @@ def LHARXL : XForm_1<31, 116, (outs gprc:$rD), (ins memrr:$src),
def LWARXL : XForm_1<31, 20, (outs gprc:$rD), (ins memrr:$src),
"lwarx $rD, $src, 1", IIC_LdStLWARX, []>, isDOT;
+
+// The atomic instructions use the destination register as well as the next one
+// or two registers in order (modulo 31).
+let hasExtraSrcRegAllocReq = 1 in
+def LWAT : X_RD5_RS5_IM5<31, 582, (outs gprc:$rD), (ins gprc:$rA, u5imm:$FC),
+ "lwat $rD, $rA, $FC", IIC_LdStLoad>,
+ Requires<[IsISA3_0]>;
}
let Defs = [CR0], mayStore = 1, hasSideEffects = 0 in {
@@ -1558,6 +1615,11 @@ def STWCX : XForm_1<31, 150, (outs), (ins gprc:$rS, memrr:$dst),
"stwcx. $rS, $dst", IIC_LdStSTWCX, []>, isDOT;
}
+let mayStore = 1, hasSideEffects = 0 in
+def STWAT : X_RD5_RS5_IM5<31, 710, (outs), (ins gprc:$rS, gprc:$rA, u5imm:$FC),
+ "stwat $rS, $rA, $FC", IIC_LdStStore>,
+ Requires<[IsISA3_0]>;
+
let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
def TRAP : XForm_24<31, 4, (outs), (ins), "trap", IIC_LdStLoad, [(trap)]>;
@@ -1947,6 +2009,10 @@ let isCompare = 1, hasSideEffects = 0 in {
"cmpwi $crD, $rA, $imm", IIC_IntCompare>;
def CMPLWI : DForm_6_ext<10, (outs crrc:$dst), (ins gprc:$src1, u16imm:$src2),
"cmplwi $dst, $src1, $src2", IIC_IntCompare>;
+ def CMPRB : X_BF3_L1_RS5_RS5<31, 192, (outs crbitrc:$BF),
+ (ins u1imm:$L, g8rc:$rA, g8rc:$rB),
+ "cmprb $BF, $L, $rA, $rB", IIC_IntCompare, []>,
+ Requires<[IsISA3_0]>;
}
}
@@ -2000,6 +2066,9 @@ defm SRAWI : XForm_10rc<31, 824, (outs gprc:$rA), (ins gprc:$rS, u5imm:$SH),
defm CNTLZW : XForm_11r<31, 26, (outs gprc:$rA), (ins gprc:$rS),
"cntlzw", "$rA, $rS", IIC_IntGeneral,
[(set i32:$rA, (ctlz i32:$rS))]>;
+defm CNTTZW : XForm_11r<31, 538, (outs gprc:$rA), (ins gprc:$rS),
+ "cnttzw", "$rA, $rS", IIC_IntGeneral,
+ [(set i32:$rA, (cttz i32:$rS))]>, Requires<[IsISA3_0]>;
defm EXTSB : XForm_11r<31, 954, (outs gprc:$rA), (ins gprc:$rS),
"extsb", "$rA, $rS", IIC_IntSimple,
[(set i32:$rA, (sext_inreg i32:$rS, i8))]>;
@@ -2286,6 +2355,10 @@ let isCodeGenOnly = 1 in {
PPC970_DGroup_First, PPC970_Unit_FXU;
}
+// Aliases for mtvrsave/mfvrsave to mfspr/mtspr.
+def : InstAlias<"mtvrsave $rS", (MTVRSAVE gprc:$rS)>;
+def : InstAlias<"mfvrsave $rS", (MFVRSAVE gprc:$rS)>;
+
// SPILL_VRSAVE - Indicate that we're dumping the VRSAVE register,
// so we'll need to scavenge a register for it.
let mayStore = 1 in
@@ -2328,6 +2401,9 @@ def MFCR : XFXForm_3<31, 19, (outs gprc:$rT), (ins),
"mfcr $rT", IIC_SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
} // hasExtraSrcRegAllocReq = 1
+
+def MCRXRX : X_BF3<31, 576, (outs crrc:$BF), (ins),
+ "mcrxrx $BF", IIC_BrMCRX>, Requires<[IsISA3_0]>;
} // hasSideEffects = 0
// Pseudo instruction to perform FADD in round-to-zero mode.
@@ -4138,3 +4214,33 @@ def : Pat<(atomic_store_32 iaddr:$ptr, i32:$val), (STW gprc:$val, memri:$ptr)>;
def : Pat<(atomic_store_8 xaddr:$ptr, i32:$val), (STBX gprc:$val, memrr:$ptr)>;
def : Pat<(atomic_store_16 xaddr:$ptr, i32:$val), (STHX gprc:$val, memrr:$ptr)>;
def : Pat<(atomic_store_32 xaddr:$ptr, i32:$val), (STWX gprc:$val, memrr:$ptr)>;
+
+let Predicates = [IsISA3_0] in {
+
+// Copy-Paste Facility
+// We prefix 'CP' to COPY due to name conflict in Target.td. We also prefix to
+// PASTE for naming consistency.
+let mayLoad = 1 in
+def CP_COPY : X_L1_RA5_RB5<31, 774, "copy" , gprc, IIC_LdStCOPY, []>;
+
+let mayStore = 1 in
+def CP_PASTE : X_L1_RA5_RB5<31, 902, "paste" , gprc, IIC_LdStPASTE, []>;
+
+let mayStore = 1, Defs = [CR0] in
+def CP_PASTEo : X_L1_RA5_RB5<31, 902, "paste.", gprc, IIC_LdStPASTE, []>, isDOT;
+
+def CP_COPYx : PPCAsmPseudo<"copy $rA, $rB" , (ins gprc:$rA, gprc:$rB)>;
+def CP_PASTEx : PPCAsmPseudo<"paste $rA, $rB", (ins gprc:$rA, gprc:$rB)>;
+def CP_COPY_FIRST : PPCAsmPseudo<"copy_first $rA, $rB",
+ (ins gprc:$rA, gprc:$rB)>;
+def CP_PASTE_LAST : PPCAsmPseudo<"paste_last $rA, $rB",
+ (ins gprc:$rA, gprc:$rB)>;
+def CP_ABORT : XForm_0<31, 838, (outs), (ins), "cp_abort", IIC_SprABORT, []>;
+
+// Message Synchronize
+def MSGSYNC : XForm_0<31, 886, (outs), (ins), "msgsync", IIC_SprMSGSYNC, []>;
+
+// Power-Saving Mode Instruction:
+def STOP : XForm_0<19, 370, (outs), (ins), "stop", IIC_SprSTOP, []>;
+
+} // IsISA3_0
diff --git a/lib/Target/PowerPC/PPCInstrVSX.td b/lib/Target/PowerPC/PPCInstrVSX.td
index df1142cb42f3..a02ace00a76f 100644
--- a/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/lib/Target/PowerPC/PPCInstrVSX.td
@@ -57,6 +57,9 @@ def SDT_PPCstxvd2x : SDTypeProfile<0, 2, [
def SDT_PPCxxswapd : SDTypeProfile<1, 1, [
SDTCisSameAs<0, 1>
]>;
+def SDTVecConv : SDTypeProfile<1, 2, [
+ SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>
+]>;
def PPClxvd2x : SDNode<"PPCISD::LXVD2X", SDT_PPClxvd2x,
[SDNPHasChain, SDNPMayLoad]>;
@@ -66,6 +69,9 @@ def PPCxxswapd : SDNode<"PPCISD::XXSWAPD", SDT_PPCxxswapd, [SDNPHasChain]>;
def PPCmfvsr : SDNode<"PPCISD::MFVSR", SDTUnaryOp, []>;
def PPCmtvsra : SDNode<"PPCISD::MTVSRA", SDTUnaryOp, []>;
def PPCmtvsrz : SDNode<"PPCISD::MTVSRZ", SDTUnaryOp, []>;
+def PPCsvec2fp : SDNode<"PPCISD::SINT_VEC_TO_FP", SDTVecConv, []>;
+def PPCuvec2fp: SDNode<"PPCISD::UINT_VEC_TO_FP", SDTVecConv, []>;
+def PPCswapNoChain : SDNode<"PPCISD::SWAP_NO_CHAIN", SDT_PPCxxswapd>;
multiclass XX3Form_Rcr<bits<6> opcode, bits<7> xo, string asmbase,
string asmstr, InstrItinClass itin, Intrinsic Int,
@@ -608,7 +614,8 @@ let Uses = [RM] in {
"xvcvsxwdp $XT, $XB", IIC_VecFP, []>;
def XVCVSXWSP : XX2Form<60, 184,
(outs vsrc:$XT), (ins vsrc:$XB),
- "xvcvsxwsp $XT, $XB", IIC_VecFP, []>;
+ "xvcvsxwsp $XT, $XB", IIC_VecFP,
+ [(set v4f32:$XT, (sint_to_fp v4i32:$XB))]>;
def XVCVUXDDP : XX2Form<60, 488,
(outs vsrc:$XT), (ins vsrc:$XB),
"xvcvuxddp $XT, $XB", IIC_VecFP,
@@ -772,10 +779,14 @@ let Uses = [RM] in {
def XXSLDWI : XX3Form_2<60, 2,
(outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u2imm:$SHW),
- "xxsldwi $XT, $XA, $XB, $SHW", IIC_VecPerm, []>;
+ "xxsldwi $XT, $XA, $XB, $SHW", IIC_VecPerm,
+ [(set v4i32:$XT, (PPCvecshl v4i32:$XA, v4i32:$XB,
+ imm32SExt16:$SHW))]>;
def XXSPLTW : XX2Form_2<60, 164,
(outs vsrc:$XT), (ins vsrc:$XB, u2imm:$UIM),
- "xxspltw $XT, $XB, $UIM", IIC_VecPerm, []>;
+ "xxspltw $XT, $XB, $UIM", IIC_VecPerm,
+ [(set v4i32:$XT,
+ (PPCxxsplt v4i32:$XB, imm32SExt16:$UIM))]>;
} // hasSideEffects
// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
@@ -926,6 +937,16 @@ def : Pat<(sext_inreg v2i64:$C, v2i32),
def : Pat<(v2f64 (sint_to_fp (sext_inreg v2i64:$C, v2i32))),
(XVCVSXWDP (XXSLDWI $C, $C, 1))>;
+def : Pat<(v2f64 (PPCsvec2fp v4i32:$C, 0)),
+ (v2f64 (XVCVSXWDP (v2i64 (XXMRGHW $C, $C))))>;
+def : Pat<(v2f64 (PPCsvec2fp v4i32:$C, 1)),
+ (v2f64 (XVCVSXWDP (v2i64 (XXMRGLW $C, $C))))>;
+
+def : Pat<(v2f64 (PPCuvec2fp v4i32:$C, 0)),
+ (v2f64 (XVCVUXWDP (v2i64 (XXMRGHW $C, $C))))>;
+def : Pat<(v2f64 (PPCuvec2fp v4i32:$C, 1)),
+ (v2f64 (XVCVUXWDP (v2i64 (XXMRGLW $C, $C))))>;
+
// Loads.
def : Pat<(v2f64 (load xoaddr:$src)), (LXVD2X xoaddr:$src)>;
def : Pat<(v2i64 (load xoaddr:$src)), (LXVD2X xoaddr:$src)>;
@@ -945,6 +966,7 @@ def : Pat<(v2f64 (PPCxxswapd v2f64:$src)), (XXPERMDI $src, $src, 2)>;
def : Pat<(v2i64 (PPCxxswapd v2i64:$src)), (XXPERMDI $src, $src, 2)>;
def : Pat<(v4f32 (PPCxxswapd v4f32:$src)), (XXPERMDI $src, $src, 2)>;
def : Pat<(v4i32 (PPCxxswapd v4i32:$src)), (XXPERMDI $src, $src, 2)>;
+def : Pat<(v2f64 (PPCswapNoChain v2f64:$src)), (XXPERMDI $src, $src, 2)>;
// Selects.
def : Pat<(v2f64 (selectcc i1:$lhs, i1:$rhs, v2f64:$tval, v2f64:$fval, SETLT)),
@@ -1007,6 +1029,28 @@ def : Pat<(int_ppc_vsx_xvrsqrtesp v4f32:$A),
def : Pat<(int_ppc_vsx_xvrsqrtedp v2f64:$A),
(XVRSQRTEDP $A)>;
+let Predicates = [IsLittleEndian] in {
+def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
+ (f64 (XSCVSXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
+ (f64 (XSCVSXDDP (COPY_TO_REGCLASS (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>;
+def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
+ (f64 (XSCVUXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
+ (f64 (XSCVUXDDP (COPY_TO_REGCLASS (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>;
+} // IsLittleEndian
+
+let Predicates = [IsBigEndian] in {
+def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
+ (f64 (XSCVSXDDP (COPY_TO_REGCLASS $S, VSFRC)))>;
+def : Pat<(f64 (PPCfcfid (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
+ (f64 (XSCVSXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
+ (f64 (XSCVUXDDP (COPY_TO_REGCLASS $S, VSFRC)))>;
+def : Pat<(f64 (PPCfcfidu (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
+ (f64 (XSCVUXDDP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+} // IsBigEndian
+
} // AddedComplexity
} // HasVSX
@@ -1213,10 +1257,31 @@ let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns.
def XSCVSPDPN : XX2Form<60, 331, (outs vssrc:$XT), (ins vsrc:$XB),
"xscvspdpn $XT, $XB", IIC_VecFP, []>;
+ let Predicates = [IsLittleEndian] in {
+ def : Pat<(f32 (PPCfcfids (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
+ (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+ def : Pat<(f32 (PPCfcfids (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
+ (f32 (XSCVSXDSP (COPY_TO_REGCLASS (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
+ (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
+ (f32 (XSCVUXDSP (COPY_TO_REGCLASS (f64 (COPY_TO_REGCLASS $S, VSRC)), VSFRC)))>;
+ }
+
+ let Predicates = [IsBigEndian] in {
+ def : Pat<(f32 (PPCfcfids (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
+ (f32 (XSCVSXDSP (COPY_TO_REGCLASS $S, VSFRC)))>;
+ def : Pat<(f32 (PPCfcfids (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
+ (f32 (XSCVSXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsra (i64 (vector_extract v2i64:$S, 0))))),
+ (f32 (XSCVUXDSP (COPY_TO_REGCLASS $S, VSFRC)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsra (i64 (vector_extract v2i64:$S, 1))))),
+ (f32 (XSCVUXDSP (COPY_TO_REGCLASS (XXPERMDI $S, $S, 2), VSFRC)))>;
+ }
} // AddedComplexity = 400
} // HasP8Vector
-let Predicates = [HasDirectMove, HasVSX] in {
+let Predicates = [HasDirectMove] in {
// VSX direct move instructions
def MFVSRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$rA), (ins vsfrc:$XT),
"mfvsrd $rA, $XT", IIC_VecGeneral,
@@ -1235,7 +1300,22 @@ let Predicates = [HasDirectMove, HasVSX] in {
def MTVSRWZ : XX1_RS6_RD5_XO<31, 243, (outs vsfrc:$XT), (ins gprc:$rA),
"mtvsrwz $XT, $rA", IIC_VecGeneral,
[(set f64:$XT, (PPCmtvsrz i32:$rA))]>;
-} // HasDirectMove, HasVSX
+} // HasDirectMove
+
+let Predicates = [IsISA3_0, HasDirectMove] in {
+ def MTVSRWS: XX1_RS6_RD5_XO<31, 403, (outs vsrc:$XT), (ins gprc:$rA),
+ "mtvsrws $XT, $rA", IIC_VecGeneral,
+ []>;
+
+ def MTVSRDD: XX1Form<31, 435, (outs vsrc:$XT), (ins g8rc:$rA, g8rc:$rB),
+ "mtvsrdd $XT, $rA, $rB", IIC_VecGeneral,
+ []>, Requires<[In64BitMode]>;
+
+ def MFVSRLD: XX1_RS6_RD5_XO<31, 307, (outs g8rc:$rA), (ins vsrc:$XT),
+ "mfvsrld $rA, $XT", IIC_VecGeneral,
+ []>, Requires<[In64BitMode]>;
+
+} // IsISA3_0, HasDirectMove
/* Direct moves of various widths from GPR's into VSR's. Each move lines
the value up into element 0 (both BE and LE). Namely, entities smaller than
@@ -1285,7 +1365,7 @@ def VectorExtractions {
(v2i64 (COPY_TO_REGCLASS $S, VSRC)), sub_64));
// Word extraction
- dag LE_WORD_0 = (MFVSRWZ (EXTRACT_SUBREG (XXSLDWI $S, $S, 2), sub_64));
+ dag LE_WORD_0 = (MFVSRWZ (EXTRACT_SUBREG (XXPERMDI $S, $S, 2), sub_64));
dag LE_WORD_1 = (MFVSRWZ (EXTRACT_SUBREG (XXSLDWI $S, $S, 1), sub_64));
dag LE_WORD_2 = (MFVSRWZ (EXTRACT_SUBREG
(v2i64 (COPY_TO_REGCLASS $S, VSRC)), sub_64));
@@ -1555,7 +1635,7 @@ let Predicates = [IsBigEndian, HasP8Vector] in {
def : Pat<(f32 (vector_extract v4f32:$S, 1)),
(f32 (XSCVSPDPN (XXSLDWI $S, $S, 1)))>;
def : Pat<(f32 (vector_extract v4f32:$S, 2)),
- (f32 (XSCVSPDPN (XXSLDWI $S, $S, 2)))>;
+ (f32 (XSCVSPDPN (XXPERMDI $S, $S, 2)))>;
def : Pat<(f32 (vector_extract v4f32:$S, 3)),
(f32 (XSCVSPDPN (XXSLDWI $S, $S, 3)))>;
def : Pat<(f32 (vector_extract v4f32:$S, i64:$Idx)),
@@ -1660,7 +1740,7 @@ let Predicates = [IsLittleEndian, HasP8Vector] in {
def : Pat<(f32 (vector_extract v4f32:$S, 0)),
(f32 (XSCVSPDPN (XXSLDWI $S, $S, 3)))>;
def : Pat<(f32 (vector_extract v4f32:$S, 1)),
- (f32 (XSCVSPDPN (XXSLDWI $S, $S, 2)))>;
+ (f32 (XSCVSPDPN (XXPERMDI $S, $S, 2)))>;
def : Pat<(f32 (vector_extract v4f32:$S, 2)),
(f32 (XSCVSPDPN (XXSLDWI $S, $S, 1)))>;
def : Pat<(f32 (vector_extract v4f32:$S, 3)),
@@ -1783,3 +1863,423 @@ def : Pat<(i64 (bitconvert f64:$S)),
def : Pat<(f64 (bitconvert i64:$S)),
(f64 (MTVSRD $S))>;
}
+
+def AlignValues {
+ dag F32_TO_BE_WORD1 = (v4f32 (XXSLDWI (XSCVDPSPN $B), (XSCVDPSPN $B), 3));
+ dag I32_TO_BE_WORD1 = (COPY_TO_REGCLASS (MTVSRWZ $B), VSRC);
+}
+
+// The following VSX instructions were introduced in Power ISA 3.0
+def HasP9Vector : Predicate<"PPCSubTarget->hasP9Vector()">;
+let AddedComplexity = 400, Predicates = [HasP9Vector] in {
+
+ // [PO VRT XO VRB XO /]
+ class X_VT5_XO5_VB5<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc,
+ list<dag> pattern>
+ : X_RD5_XO5_RS5<opcode, xo2, xo, (outs vrrc:$vT), (ins vrrc:$vB),
+ !strconcat(opc, " $vT, $vB"), IIC_VecFP, pattern>;
+
+ // [PO VRT XO VRB XO RO], Round to Odd version of [PO VRT XO VRB XO /]
+ class X_VT5_XO5_VB5_Ro<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc,
+ list<dag> pattern>
+ : X_VT5_XO5_VB5<opcode, xo2, xo, opc, pattern>, isDOT;
+
+ // [PO VRT XO VRB XO /], but the VRB is only used the left 64 bits (or less),
+ // So we use different operand class for VRB
+ class X_VT5_XO5_VB5_TyVB<bits<6> opcode, bits<5> xo2, bits<10> xo, string opc,
+ RegisterOperand vbtype, list<dag> pattern>
+ : X_RD5_XO5_RS5<opcode, xo2, xo, (outs vrrc:$vT), (ins vbtype:$vB),
+ !strconcat(opc, " $vT, $vB"), IIC_VecFP, pattern>;
+
+ // [PO T XO B XO BX /]
+ class XX2_RT5_XO5_XB6<bits<6> opcode, bits<5> xo2, bits<9> xo, string opc,
+ list<dag> pattern>
+ : XX2_RD5_XO5_RS6<opcode, xo2, xo, (outs g8rc:$rT), (ins vsfrc:$XB),
+ !strconcat(opc, " $rT, $XB"), IIC_VecFP, pattern>;
+
+ // [PO T XO B XO BX TX]
+ class XX2_XT6_XO5_XB6<bits<6> opcode, bits<5> xo2, bits<9> xo, string opc,
+ RegisterOperand vtype, list<dag> pattern>
+ : XX2_RD6_XO5_RS6<opcode, xo2, xo, (outs vtype:$XT), (ins vtype:$XB),
+ !strconcat(opc, " $XT, $XB"), IIC_VecFP, pattern>;
+
+ // [PO T A B XO AX BX TX], src and dest register use different operand class
+ class XX3_XT5_XA5_XB5<bits<6> opcode, bits<8> xo, string opc,
+ RegisterOperand xty, RegisterOperand aty, RegisterOperand bty,
+ InstrItinClass itin, list<dag> pattern>
+ : XX3Form<opcode, xo, (outs xty:$XT), (ins aty:$XA, bty:$XB),
+ !strconcat(opc, " $XT, $XA, $XB"), itin, pattern>;
+
+ // [PO VRT VRA VRB XO /]
+ class X_VT5_VA5_VB5<bits<6> opcode, bits<10> xo, string opc,
+ list<dag> pattern>
+ : XForm_1<opcode, xo, (outs vrrc:$vT), (ins vrrc:$vA, vrrc:$vB),
+ !strconcat(opc, " $vT, $vA, $vB"), IIC_VecFP, pattern>;
+
+ // [PO VRT VRA VRB XO RO], Round to Odd version of [PO VRT VRA VRB XO /]
+ class X_VT5_VA5_VB5_Ro<bits<6> opcode, bits<10> xo, string opc,
+ list<dag> pattern>
+ : X_VT5_VA5_VB5<opcode, xo, opc, pattern>, isDOT;
+
+ //===--------------------------------------------------------------------===//
+ // Quad-Precision Scalar Move Instructions:
+
+ // Copy Sign
+ def XSCPSGNQP : X_VT5_VA5_VB5<63, 100, "xscpsgnqp", []>;
+
+ // Absolute/Negative-Absolute/Negate
+ def XSABSQP : X_VT5_XO5_VB5<63, 0, 804, "xsabsqp" , []>;
+ def XSNABSQP : X_VT5_XO5_VB5<63, 8, 804, "xsnabsqp", []>;
+ def XSNEGQP : X_VT5_XO5_VB5<63, 16, 804, "xsnegqp" , []>;
+
+ //===--------------------------------------------------------------------===//
+ // Quad-Precision Scalar Floating-Point Arithmetic Instructions:
+
+ // Add/Divide/Multiply/Subtract
+ def XSADDQP : X_VT5_VA5_VB5 <63, 4, "xsaddqp" , []>;
+ def XSADDQPO : X_VT5_VA5_VB5_Ro<63, 4, "xsaddqpo", []>;
+ def XSDIVQP : X_VT5_VA5_VB5 <63, 548, "xsdivqp" , []>;
+ def XSDIVQPO : X_VT5_VA5_VB5_Ro<63, 548, "xsdivqpo", []>;
+ def XSMULQP : X_VT5_VA5_VB5 <63, 36, "xsmulqp" , []>;
+ def XSMULQPO : X_VT5_VA5_VB5_Ro<63, 36, "xsmulqpo", []>;
+ def XSSUBQP : X_VT5_VA5_VB5 <63, 516, "xssubqp" , []>;
+ def XSSUBQPO : X_VT5_VA5_VB5_Ro<63, 516, "xssubqpo", []>;
+
+ // Square-Root
+ def XSSQRTQP : X_VT5_XO5_VB5 <63, 27, 804, "xssqrtqp" , []>;
+ def XSSQRTQPO : X_VT5_XO5_VB5_Ro<63, 27, 804, "xssqrtqpo", []>;
+
+ // (Negative) Multiply-{Add/Subtract}
+ def XSMADDQP : X_VT5_VA5_VB5 <63, 388, "xsmaddqp" , []>;
+ def XSMADDQPO : X_VT5_VA5_VB5_Ro<63, 388, "xsmaddqpo" , []>;
+ def XSMSUBQP : X_VT5_VA5_VB5 <63, 420, "xsmsubqp" , []>;
+ def XSMSUBQPO : X_VT5_VA5_VB5_Ro<63, 420, "xsmsubqpo" , []>;
+ def XSNMADDQP : X_VT5_VA5_VB5 <63, 452, "xsnmaddqp" , []>;
+ def XSNMADDQPO: X_VT5_VA5_VB5_Ro<63, 452, "xsnmaddqpo", []>;
+ def XSNMSUBQP : X_VT5_VA5_VB5 <63, 484, "xsnmsubqp" , []>;
+ def XSNMSUBQPO: X_VT5_VA5_VB5_Ro<63, 484, "xsnmsubqpo", []>;
+
+ //===--------------------------------------------------------------------===//
+ // Quad/Double-Precision Compare Instructions:
+
+ // [PO BF // VRA VRB XO /]
+ class X_BF3_VA5_VB5<bits<6> opcode, bits<10> xo, string opc,
+ list<dag> pattern>
+ : XForm_17<opcode, xo, (outs crrc:$crD), (ins vrrc:$VA, vrrc:$VB),
+ !strconcat(opc, " $crD, $VA, $VB"), IIC_FPCompare> {
+ let Pattern = pattern;
+ }
+
+ // QP Compare Ordered/Unordered
+ def XSCMPOQP : X_BF3_VA5_VB5<63, 132, "xscmpoqp", []>;
+ def XSCMPUQP : X_BF3_VA5_VB5<63, 644, "xscmpuqp", []>;
+
+ // DP/QP Compare Exponents
+ def XSCMPEXPDP : XX3Form_1<60, 59,
+ (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
+ "xscmpexpdp $crD, $XA, $XB", IIC_FPCompare, []>;
+ def XSCMPEXPQP : X_BF3_VA5_VB5<63, 164, "xscmpexpqp", []>;
+
+ // DP Compare ==, >=, >, !=
+ // Use vsrc for XT, because the entire register of XT is set.
+ // XT.dword[1] = 0x0000_0000_0000_0000
+ def XSCMPEQDP : XX3_XT5_XA5_XB5<60, 3, "xscmpeqdp", vsrc, vsfrc, vsfrc,
+ IIC_FPCompare, []>;
+ def XSCMPGEDP : XX3_XT5_XA5_XB5<60, 19, "xscmpgedp", vsrc, vsfrc, vsfrc,
+ IIC_FPCompare, []>;
+ def XSCMPGTDP : XX3_XT5_XA5_XB5<60, 11, "xscmpgtdp", vsrc, vsfrc, vsfrc,
+ IIC_FPCompare, []>;
+ def XSCMPNEDP : XX3_XT5_XA5_XB5<60, 27, "xscmpnedp", vsrc, vsfrc, vsfrc,
+ IIC_FPCompare, []>;
+ // Vector Compare Not Equal
+ def XVCMPNEDP : XX3Form_Rc<60, 123,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpnedp $XT, $XA, $XB", IIC_VecFPCompare, []>;
+ let Defs = [CR6] in
+ def XVCMPNEDPo : XX3Form_Rc<60, 123,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpnedp. $XT, $XA, $XB", IIC_VecFPCompare, []>,
+ isDOT;
+ def XVCMPNESP : XX3Form_Rc<60, 91,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpnesp $XT, $XA, $XB", IIC_VecFPCompare, []>;
+ let Defs = [CR6] in
+ def XVCMPNESPo : XX3Form_Rc<60, 91,
+ (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB),
+ "xvcmpnesp. $XT, $XA, $XB", IIC_VecFPCompare, []>,
+ isDOT;
+
+ //===--------------------------------------------------------------------===//
+ // Quad-Precision Floating-Point Conversion Instructions:
+
+ // Convert DP -> QP
+ def XSCVDPQP : X_VT5_XO5_VB5_TyVB<63, 22, 836, "xscvdpqp", vsfrc, []>;
+
+ // Round & Convert QP -> DP (dword[1] is set to zero)
+ def XSCVQPDP : X_VT5_XO5_VB5 <63, 20, 836, "xscvqpdp" , []>;
+ def XSCVQPDPO : X_VT5_XO5_VB5_Ro<63, 20, 836, "xscvqpdpo", []>;
+
+ // Truncate & Convert QP -> (Un)Signed (D)Word (dword[1] is set to zero)
+ def XSCVQPSDZ : X_VT5_XO5_VB5<63, 25, 836, "xscvqpsdz", []>;
+ def XSCVQPSWZ : X_VT5_XO5_VB5<63, 9, 836, "xscvqpswz", []>;
+ def XSCVQPUDZ : X_VT5_XO5_VB5<63, 17, 836, "xscvqpudz", []>;
+ def XSCVQPUWZ : X_VT5_XO5_VB5<63, 1, 836, "xscvqpuwz", []>;
+
+ // Convert (Un)Signed DWord -> QP
+ def XSCVSDQP : X_VT5_XO5_VB5_TyVB<63, 10, 836, "xscvsdqp", vsfrc, []>;
+ def XSCVUDQP : X_VT5_XO5_VB5_TyVB<63, 2, 836, "xscvudqp", vsfrc, []>;
+
+ //===--------------------------------------------------------------------===//
+ // Round to Floating-Point Integer Instructions
+
+ // (Round &) Convert DP <-> HP
+ // Note! xscvdphp's src and dest register both use the left 64 bits, so we use
+ // vsfrc for src and dest register. xscvhpdp's src only use the left 16 bits,
+ // but we still use vsfrc for it.
+ def XSCVDPHP : XX2_XT6_XO5_XB6<60, 17, 347, "xscvdphp", vsfrc, []>;
+ def XSCVHPDP : XX2_XT6_XO5_XB6<60, 16, 347, "xscvhpdp", vsfrc, []>;
+
+ // Vector HP -> SP
+ def XVCVHPSP : XX2_XT6_XO5_XB6<60, 24, 475, "xvcvhpsp", vsrc, []>;
+ def XVCVSPHP : XX2_XT6_XO5_XB6<60, 25, 475, "xvcvsphp", vsrc, []>;
+
+ class Z23_VT5_R1_VB5_RMC2_EX1<bits<6> opcode, bits<8> xo, bit ex, string opc,
+ list<dag> pattern>
+ : Z23Form_1<opcode, xo,
+ (outs vrrc:$vT), (ins u1imm:$r, vrrc:$vB, u2imm:$rmc),
+ !strconcat(opc, " $r, $vT, $vB, $rmc"), IIC_VecFP, pattern> {
+ let RC = ex;
+ }
+
+ // Round to Quad-Precision Integer [with Inexact]
+ def XSRQPI : Z23_VT5_R1_VB5_RMC2_EX1<63, 5, 0, "xsrqpi" , []>;
+ def XSRQPIX : Z23_VT5_R1_VB5_RMC2_EX1<63, 5, 1, "xsrqpix", []>;
+
+ // Round Quad-Precision to Double-Extended Precision (fp80)
+ def XSRQPXP : Z23_VT5_R1_VB5_RMC2_EX1<63, 37, 0, "xsrqpxp", []>;
+
+ //===--------------------------------------------------------------------===//
+ // Insert/Extract Instructions
+
+ // Insert Exponent DP/QP
+ // XT NOTE: XT.dword[1] = 0xUUUU_UUUU_UUUU_UUUU
+ def XSIEXPDP : XX1Form <60, 918, (outs vsrc:$XT), (ins g8rc:$rA, g8rc:$rB),
+ "xsiexpdp $XT, $rA, $rB", IIC_VecFP, []>;
+ // vB NOTE: only vB.dword[0] is used, that's why we don't use
+ // X_VT5_VA5_VB5 form
+ def XSIEXPQP : XForm_18<63, 868, (outs vrrc:$vT), (ins vrrc:$vA, vsfrc:$vB),
+ "xsiexpqp $vT, $vA, $vB", IIC_VecFP, []>;
+
+ // Extract Exponent/Significand DP/QP
+ def XSXEXPDP : XX2_RT5_XO5_XB6<60, 0, 347, "xsxexpdp", []>;
+ def XSXSIGDP : XX2_RT5_XO5_XB6<60, 1, 347, "xsxsigdp", []>;
+ def XSXEXPQP : X_VT5_XO5_VB5 <63, 2, 804, "xsxexpqp", []>;
+ def XSXSIGQP : X_VT5_XO5_VB5 <63, 18, 804, "xsxsigqp", []>;
+
+ // Vector Insert Word
+ // XB NOTE: Only XB.dword[1] is used, but we use vsrc on XB.
+ def XXINSERTW :
+ XX2_RD6_UIM5_RS6<60, 181, (outs vsrc:$XT),
+ (ins vsrc:$XTi, vsrc:$XB, u4imm:$UIM),
+ "xxinsertw $XT, $XB, $UIM", IIC_VecFP,
+ [(set v4i32:$XT, (PPCxxinsert v4i32:$XTi, v4i32:$XB,
+ imm32SExt16:$UIM))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">;
+
+ // Vector Extract Unsigned Word
+ def XXEXTRACTUW : XX2_RD6_UIM5_RS6<60, 165,
+ (outs vsfrc:$XT), (ins vsrc:$XB, u4imm:$UIMM),
+ "xxextractuw $XT, $XB, $UIMM", IIC_VecFP, []>;
+
+ // Vector Insert Exponent DP/SP
+ def XVIEXPDP : XX3_XT5_XA5_XB5<60, 248, "xviexpdp", vsrc, vsrc, vsrc,
+ IIC_VecFP, []>;
+ def XVIEXPSP : XX3_XT5_XA5_XB5<60, 216, "xviexpsp", vsrc, vsrc, vsrc,
+ IIC_VecFP, []>;
+
+ // Vector Extract Exponent/Significand DP/SP
+ def XVXEXPDP : XX2_XT6_XO5_XB6<60, 0, 475, "xvxexpdp", vsrc, []>;
+ def XVXEXPSP : XX2_XT6_XO5_XB6<60, 8, 475, "xvxexpsp", vsrc, []>;
+ def XVXSIGDP : XX2_XT6_XO5_XB6<60, 1, 475, "xvxsigdp", vsrc, []>;
+ def XVXSIGSP : XX2_XT6_XO5_XB6<60, 9, 475, "xvxsigsp", vsrc, []>;
+
+ //===--------------------------------------------------------------------===//
+
+ // Test Data Class SP/DP/QP
+ def XSTSTDCSP : XX2_BF3_DCMX7_RS6<60, 298,
+ (outs crrc:$BF), (ins u7imm:$DCMX, vsfrc:$XB),
+ "xststdcsp $BF, $XB, $DCMX", IIC_VecFP, []>;
+ def XSTSTDCDP : XX2_BF3_DCMX7_RS6<60, 362,
+ (outs crrc:$BF), (ins u7imm:$DCMX, vsfrc:$XB),
+ "xststdcdp $BF, $XB, $DCMX", IIC_VecFP, []>;
+ def XSTSTDCQP : X_BF3_DCMX7_RS5 <63, 708,
+ (outs crrc:$BF), (ins u7imm:$DCMX, vrrc:$vB),
+ "xststdcqp $BF, $vB, $DCMX", IIC_VecFP, []>;
+
+ // Vector Test Data Class SP/DP
+ def XVTSTDCSP : XX2_RD6_DCMX7_RS6<60, 13, 5,
+ (outs vsrc:$XT), (ins u7imm:$DCMX, vsrc:$XB),
+ "xvtstdcsp $XT, $XB, $DCMX", IIC_VecFP, []>;
+ def XVTSTDCDP : XX2_RD6_DCMX7_RS6<60, 15, 5,
+ (outs vsrc:$XT), (ins u7imm:$DCMX, vsrc:$XB),
+ "xvtstdcdp $XT, $XB, $DCMX", IIC_VecFP, []>;
+
+ //===--------------------------------------------------------------------===//
+
+ // Maximum/Minimum Type-C/Type-J DP
+ // XT.dword[1] = 0xUUUU_UUUU_UUUU_UUUU, so we use vsrc for XT
+ def XSMAXCDP : XX3_XT5_XA5_XB5<60, 128, "xsmaxcdp", vsrc, vsfrc, vsfrc,
+ IIC_VecFP, []>;
+ def XSMAXJDP : XX3_XT5_XA5_XB5<60, 144, "xsmaxjdp", vsrc, vsfrc, vsfrc,
+ IIC_VecFP, []>;
+ def XSMINCDP : XX3_XT5_XA5_XB5<60, 136, "xsmincdp", vsrc, vsfrc, vsfrc,
+ IIC_VecFP, []>;
+ def XSMINJDP : XX3_XT5_XA5_XB5<60, 152, "xsminjdp", vsrc, vsfrc, vsfrc,
+ IIC_VecFP, []>;
+
+ //===--------------------------------------------------------------------===//
+
+ // Vector Byte-Reverse H/W/D/Q Word
+ def XXBRH : XX2_XT6_XO5_XB6<60, 7, 475, "xxbrh", vsrc, []>;
+ def XXBRW : XX2_XT6_XO5_XB6<60, 15, 475, "xxbrw", vsrc, []>;
+ def XXBRD : XX2_XT6_XO5_XB6<60, 23, 475, "xxbrd", vsrc, []>;
+ def XXBRQ : XX2_XT6_XO5_XB6<60, 31, 475, "xxbrq", vsrc, []>;
+
+ // Vector Permute
+ def XXPERM : XX3_XT5_XA5_XB5<60, 26, "xxperm" , vsrc, vsrc, vsrc,
+ IIC_VecPerm, []>;
+ def XXPERMR : XX3_XT5_XA5_XB5<60, 58, "xxpermr", vsrc, vsrc, vsrc,
+ IIC_VecPerm, []>;
+
+ // Vector Splat Immediate Byte
+ def XXSPLTIB : X_RD6_IMM8<60, 360, (outs vsrc:$XT), (ins u8imm:$IMM8),
+ "xxspltib $XT, $IMM8", IIC_VecPerm, []>;
+
+ //===--------------------------------------------------------------------===//
+ // Vector/Scalar Load/Store Instructions
+
+ let mayLoad = 1 in {
+ // Load Vector
+ def LXV : DQ_RD6_RS5_DQ12<61, 1, (outs vsrc:$XT), (ins memrix16:$src),
+ "lxv $XT, $src", IIC_LdStLFD, []>;
+ // Load DWord
+ def LXSD : DSForm_1<57, 2, (outs vrrc:$vD), (ins memrix:$src),
+ "lxsd $vD, $src", IIC_LdStLFD, []>;
+ // Load SP from src, convert it to DP, and place in dword[0]
+ def LXSSP : DSForm_1<57, 3, (outs vrrc:$vD), (ins memrix:$src),
+ "lxssp $vD, $src", IIC_LdStLFD, []>;
+
+ // [PO T RA RB XO TX] almost equal to [PO S RA RB XO SX], but has different
+ // "out" and "in" dag
+ class X_XT6_RA5_RB5<bits<6> opcode, bits<10> xo, string opc,
+ RegisterOperand vtype, list<dag> pattern>
+ : XX1Form<opcode, xo, (outs vtype:$XT), (ins memrr:$src),
+ !strconcat(opc, " $XT, $src"), IIC_LdStLFD, pattern>;
+
+ // Load as Integer Byte/Halfword & Zero Indexed
+ def LXSIBZX : X_XT6_RA5_RB5<31, 781, "lxsibzx", vsfrc, []>;
+ def LXSIHZX : X_XT6_RA5_RB5<31, 813, "lxsihzx", vsfrc, []>;
+
+ // Load Vector Halfword*8/Byte*16 Indexed
+ def LXVH8X : X_XT6_RA5_RB5<31, 812, "lxvh8x" , vsrc, []>;
+ def LXVB16X : X_XT6_RA5_RB5<31, 876, "lxvb16x", vsrc, []>;
+
+ // Load Vector Indexed
+ def LXVX : X_XT6_RA5_RB5<31, 268, "lxvx" , vsrc, []>;
+
+ // Load Vector (Left-justified) with Length
+ def LXVL : X_XT6_RA5_RB5<31, 269, "lxvl" , vsrc, []>;
+ def LXVLL : X_XT6_RA5_RB5<31, 301, "lxvll" , vsrc, []>;
+
+ // Load Vector Word & Splat Indexed
+ def LXVWSX : X_XT6_RA5_RB5<31, 364, "lxvwsx" , vsrc, []>;
+ } // end mayLoad
+
+ let mayStore = 1 in {
+ // Store Vector
+ def STXV : DQ_RD6_RS5_DQ12<61, 5, (outs), (ins vsrc:$XT, memrix16:$dst),
+ "stxv $XT, $dst", IIC_LdStSTFD, []>;
+ // Store DWord
+ def STXSD : DSForm_1<61, 2, (outs), (ins vrrc:$vS, memrix:$dst),
+ "stxsd $vS, $dst", IIC_LdStSTFD, []>;
+ // Convert DP of dword[0] to SP, and Store to dst
+ def STXSSP : DSForm_1<61, 3, (outs), (ins vrrc:$vS, memrix:$dst),
+ "stxssp $vS, $dst", IIC_LdStSTFD, []>;
+
+ // [PO S RA RB XO SX]
+ class X_XS6_RA5_RB5<bits<6> opcode, bits<10> xo, string opc,
+ RegisterOperand vtype, list<dag> pattern>
+ : XX1Form<opcode, xo, (outs), (ins vtype:$XT, memrr:$dst),
+ !strconcat(opc, " $XT, $dst"), IIC_LdStSTFD, pattern>;
+
+ // Store as Integer Byte/Halfword Indexed
+ def STXSIBX : X_XS6_RA5_RB5<31, 909, "stxsibx" , vsfrc, []>;
+ def STXSIHX : X_XS6_RA5_RB5<31, 941, "stxsihx" , vsfrc, []>;
+
+ // Store Vector Halfword*8/Byte*16 Indexed
+ def STXVH8X : X_XS6_RA5_RB5<31, 940, "stxvh8x" , vsrc, []>;
+ def STXVB16X : X_XS6_RA5_RB5<31, 1004, "stxvb16x", vsrc, []>;
+
+ // Store Vector Indexed
+ def STXVX : X_XS6_RA5_RB5<31, 396, "stxvx" , vsrc, []>;
+
+ // Store Vector (Left-justified) with Length
+ def STXVL : X_XS6_RA5_RB5<31, 397, "stxvl" , vsrc, []>;
+ def STXVLL : X_XS6_RA5_RB5<31, 429, "stxvll" , vsrc, []>;
+ } // end mayStore
+
+ // Patterns for which instructions from ISA 3.0 are a better match
+ let Predicates = [IsLittleEndian, HasP9Vector] in {
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsrz (i32 (extractelt v4i32:$A, 0))))),
+ (f32 (XSCVUXDSP (XXEXTRACTUW $A, 12)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsrz (i32 (extractelt v4i32:$A, 1))))),
+ (f32 (XSCVUXDSP (XXEXTRACTUW $A, 8)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsrz (i32 (extractelt v4i32:$A, 2))))),
+ (f32 (XSCVUXDSP (XXEXTRACTUW $A, 4)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsrz (i32 (extractelt v4i32:$A, 3))))),
+ (f32 (XSCVUXDSP (XXEXTRACTUW $A, 0)))>;
+ def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 0)),
+ (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 12))>;
+ def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 1)),
+ (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 8))>;
+ def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 2)),
+ (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 4))>;
+ def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 3)),
+ (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 0))>;
+ def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 0)),
+ (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 12))>;
+ def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 1)),
+ (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 8))>;
+ def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 2)),
+ (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 4))>;
+ def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 3)),
+ (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 0))>;
+ } // IsLittleEndian, HasP9Vector
+
+ let Predicates = [IsBigEndian, HasP9Vector] in {
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsrz (i32 (extractelt v4i32:$A, 0))))),
+ (f32 (XSCVUXDSP (XXEXTRACTUW $A, 0)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsrz (i32 (extractelt v4i32:$A, 1))))),
+ (f32 (XSCVUXDSP (XXEXTRACTUW $A, 4)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsrz (i32 (extractelt v4i32:$A, 2))))),
+ (f32 (XSCVUXDSP (XXEXTRACTUW $A, 8)))>;
+ def : Pat<(f32 (PPCfcfidus (PPCmtvsrz (i32 (extractelt v4i32:$A, 3))))),
+ (f32 (XSCVUXDSP (XXEXTRACTUW $A, 12)))>;
+ def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 0)),
+ (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 0))>;
+ def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 1)),
+ (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 4))>;
+ def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 2)),
+ (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 8))>;
+ def : Pat<(v4i32 (insertelt v4i32:$A, i32:$B, 3)),
+ (v4i32 (XXINSERTW v4i32:$A, AlignValues.I32_TO_BE_WORD1, 12))>;
+ def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 0)),
+ (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 0))>;
+ def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 1)),
+ (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 4))>;
+ def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 2)),
+ (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 8))>;
+ def : Pat<(v4f32 (insertelt v4f32:$A, f32:$B, 3)),
+ (v4f32 (XXINSERTW v4f32:$A, AlignValues.F32_TO_BE_WORD1, 12))>;
+ } // IsLittleEndian, HasP9Vector
+} // end HasP9Vector, AddedComplexity
diff --git a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
deleted file mode 100644
index e3a35d5df358..000000000000
--- a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
+++ /dev/null
@@ -1,233 +0,0 @@
-//===-------- PPCLoopDataPrefetch.cpp - Loop Data Prefetching Pass --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements a Loop Data Prefetching Pass.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ppc-loop-data-prefetch"
-#include "PPC.h"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/Analysis/CodeMetrics.h"
-#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/IR/CFG.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/ValueMapper.h"
-using namespace llvm;
-
-// By default, we limit this to creating 16 PHIs (which is a little over half
-// of the allocatable register set).
-static cl::opt<bool>
-PrefetchWrites("ppc-loop-prefetch-writes", cl::Hidden, cl::init(false),
- cl::desc("Prefetch write addresses"));
-
-// This seems like a reasonable default for the BG/Q (this pass is enabled, by
-// default, only on the BG/Q).
-static cl::opt<unsigned>
-PrefDist("ppc-loop-prefetch-distance", cl::Hidden, cl::init(300),
- cl::desc("The loop prefetch distance"));
-
-static cl::opt<unsigned>
-CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
- cl::desc("The loop prefetch cache line size"));
-
-namespace llvm {
- void initializePPCLoopDataPrefetchPass(PassRegistry&);
-}
-
-namespace {
-
- class PPCLoopDataPrefetch : public FunctionPass {
- public:
- static char ID; // Pass ID, replacement for typeid
- PPCLoopDataPrefetch() : FunctionPass(ID) {
- initializePPCLoopDataPrefetchPass(*PassRegistry::getPassRegistry());
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<AssumptionCacheTracker>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addRequired<LoopInfoWrapperPass>();
- AU.addPreserved<LoopInfoWrapperPass>();
- AU.addRequired<ScalarEvolutionWrapperPass>();
- // FIXME: For some reason, preserving SE here breaks LSR (even if
- // this pass changes nothing).
- // AU.addPreserved<ScalarEvolutionWrapperPass>();
- AU.addRequired<TargetTransformInfoWrapperPass>();
- }
-
- bool runOnFunction(Function &F) override;
- bool runOnLoop(Loop *L);
-
- private:
- AssumptionCache *AC;
- LoopInfo *LI;
- ScalarEvolution *SE;
- const TargetTransformInfo *TTI;
- const DataLayout *DL;
- };
-}
-
-char PPCLoopDataPrefetch::ID = 0;
-INITIALIZE_PASS_BEGIN(PPCLoopDataPrefetch, "ppc-loop-data-prefetch",
- "PPC Loop Data Prefetch", false, false)
-INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
-INITIALIZE_PASS_END(PPCLoopDataPrefetch, "ppc-loop-data-prefetch",
- "PPC Loop Data Prefetch", false, false)
-
-FunctionPass *llvm::createPPCLoopDataPrefetchPass() { return new PPCLoopDataPrefetch(); }
-
-bool PPCLoopDataPrefetch::runOnFunction(Function &F) {
- LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
- DL = &F.getParent()->getDataLayout();
- AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
-
- bool MadeChange = false;
-
- for (auto I = LI->begin(), IE = LI->end(); I != IE; ++I)
- for (auto L = df_begin(*I), LE = df_end(*I); L != LE; ++L)
- MadeChange |= runOnLoop(*L);
-
- return MadeChange;
-}
-
-bool PPCLoopDataPrefetch::runOnLoop(Loop *L) {
- bool MadeChange = false;
-
- // Only prefetch in the inner-most loop
- if (!L->empty())
- return MadeChange;
-
- SmallPtrSet<const Value *, 32> EphValues;
- CodeMetrics::collectEphemeralValues(L, AC, EphValues);
-
- // Calculate the number of iterations ahead to prefetch
- CodeMetrics Metrics;
- for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
- I != IE; ++I) {
-
- // If the loop already has prefetches, then assume that the user knows
- // what he or she is doing and don't add any more.
- for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
- J != JE; ++J)
- if (CallInst *CI = dyn_cast<CallInst>(J))
- if (Function *F = CI->getCalledFunction())
- if (F->getIntrinsicID() == Intrinsic::prefetch)
- return MadeChange;
-
- Metrics.analyzeBasicBlock(*I, *TTI, EphValues);
- }
- unsigned LoopSize = Metrics.NumInsts;
- if (!LoopSize)
- LoopSize = 1;
-
- unsigned ItersAhead = PrefDist/LoopSize;
- if (!ItersAhead)
- ItersAhead = 1;
-
- SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>, 16> PrefLoads;
- for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
- I != IE; ++I) {
- for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
- J != JE; ++J) {
- Value *PtrValue;
- Instruction *MemI;
-
- if (LoadInst *LMemI = dyn_cast<LoadInst>(J)) {
- MemI = LMemI;
- PtrValue = LMemI->getPointerOperand();
- } else if (StoreInst *SMemI = dyn_cast<StoreInst>(J)) {
- if (!PrefetchWrites) continue;
- MemI = SMemI;
- PtrValue = SMemI->getPointerOperand();
- } else continue;
-
- unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace();
- if (PtrAddrSpace)
- continue;
-
- if (L->isLoopInvariant(PtrValue))
- continue;
-
- const SCEV *LSCEV = SE->getSCEV(PtrValue);
- const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
- if (!LSCEVAddRec)
- continue;
-
- // We don't want to double prefetch individual cache lines. If this load
- // is known to be within one cache line of some other load that has
- // already been prefetched, then don't prefetch this one as well.
- bool DupPref = false;
- for (SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>,
- 16>::iterator K = PrefLoads.begin(), KE = PrefLoads.end();
- K != KE; ++K) {
- const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, K->second);
- if (const SCEVConstant *ConstPtrDiff =
- dyn_cast<SCEVConstant>(PtrDiff)) {
- int64_t PD = std::abs(ConstPtrDiff->getValue()->getSExtValue());
- if (PD < (int64_t) CacheLineSize) {
- DupPref = true;
- break;
- }
- }
- }
- if (DupPref)
- continue;
-
- const SCEV *NextLSCEV = SE->getAddExpr(LSCEVAddRec, SE->getMulExpr(
- SE->getConstant(LSCEVAddRec->getType(), ItersAhead),
- LSCEVAddRec->getStepRecurrence(*SE)));
- if (!isSafeToExpand(NextLSCEV, *SE))
- continue;
-
- PrefLoads.push_back(std::make_pair(MemI, LSCEVAddRec));
-
- Type *I8Ptr = Type::getInt8PtrTy((*I)->getContext(), PtrAddrSpace);
- SCEVExpander SCEVE(*SE, J->getModule()->getDataLayout(), "prefaddr");
- Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, MemI);
-
- IRBuilder<> Builder(MemI);
- Module *M = (*I)->getParent()->getParent();
- Type *I32 = Type::getInt32Ty((*I)->getContext());
- Value *PrefetchFunc = Intrinsic::getDeclaration(M, Intrinsic::prefetch);
- Builder.CreateCall(
- PrefetchFunc,
- {PrefPtrValue,
- ConstantInt::get(I32, MemI->mayReadFromMemory() ? 0 : 1),
- ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)});
-
- MadeChange = true;
- }
- }
-
- return MadeChange;
-}
-
diff --git a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
index 5e188268fee9..48a71cfc2a6e 100644
--- a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
+++ b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
@@ -144,6 +144,9 @@ static Value *GetPointerOperand(Value *MemI) {
}
bool PPCLoopPreIncPrep::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
diff --git a/lib/Target/PowerPC/PPCMCInstLower.cpp b/lib/Target/PowerPC/PPCMCInstLower.cpp
index 44a692d4bb42..18377a44a7f8 100644
--- a/lib/Target/PowerPC/PPCMCInstLower.cpp
+++ b/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -40,21 +40,15 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
Mangler *Mang = AP.Mang;
const DataLayout &DL = AP.getDataLayout();
MCContext &Ctx = AP.OutContext;
- bool isDarwin = TM.getTargetTriple().isOSDarwin();
SmallString<128> Name;
StringRef Suffix;
- if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB) {
- if (isDarwin)
- Suffix = "$stub";
- } else if (MO.getTargetFlags() & PPCII::MO_NLP_FLAG)
+ if (MO.getTargetFlags() & PPCII::MO_NLP_FLAG)
Suffix = "$non_lazy_ptr";
if (!Suffix.empty())
Name += DL.getPrivateGlobalPrefix();
- unsigned PrefixLen = Name.size();
-
if (!MO.isGlobal()) {
assert(MO.isSymbol() && "Isn't a symbol reference");
Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL);
@@ -63,42 +57,16 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
TM.getNameWithPrefix(Name, GV, *Mang);
}
- unsigned OrigLen = Name.size() - PrefixLen;
-
Name += Suffix;
MCSymbol *Sym = Ctx.getOrCreateSymbol(Name);
- StringRef OrigName = StringRef(Name).substr(PrefixLen, OrigLen);
-
- // If the target flags on the operand changes the name of the symbol, do that
- // before we return the symbol.
- if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB && isDarwin) {
- MachineModuleInfoImpl::StubValueTy &StubSym =
- getMachOMMI(AP).getFnStubEntry(Sym);
- if (StubSym.getPointer())
- return Sym;
-
- if (MO.isGlobal()) {
- StubSym =
- MachineModuleInfoImpl::
- StubValueTy(AP.getSymbol(MO.getGlobal()),
- !MO.getGlobal()->hasInternalLinkage());
- } else {
- StubSym =
- MachineModuleInfoImpl::
- StubValueTy(Ctx.getOrCreateSymbol(OrigName), false);
- }
- return Sym;
- }
// If the symbol reference is actually to a non_lazy_ptr, not to the symbol,
// then add the suffix.
if (MO.getTargetFlags() & PPCII::MO_NLP_FLAG) {
MachineModuleInfoMachO &MachO = getMachOMMI(AP);
-
- MachineModuleInfoImpl::StubValueTy &StubSym =
- (MO.getTargetFlags() & PPCII::MO_NLP_HIDDEN_FLAG) ?
- MachO.getHiddenGVStubEntry(Sym) : MachO.getGVStubEntry(Sym);
-
+
+ MachineModuleInfoImpl::StubValueTy &StubSym = MachO.getGVStubEntry(Sym);
+
if (!StubSym.getPointer()) {
assert(MO.isGlobal() && "Extern symbol not handled yet");
StubSym = MachineModuleInfoImpl::
@@ -139,7 +107,7 @@ static MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol,
break;
}
- if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB && !isDarwin)
+ if (MO.getTargetFlags() == PPCII::MO_PLT)
RefKind = MCSymbolRefExpr::VK_PLT;
const MCExpr *Expr = MCSymbolRefExpr::create(Symbol, RefKind, Ctx);
diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp
index fe339d70d7de..a57a83d7aa93 100644
--- a/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -63,6 +63,8 @@ private:
public:
// Main entry point for this pass.
bool runOnMachineFunction(MachineFunction &MF) override {
+ if (skipFunction(*MF.getFunction()))
+ return false;
initialize(MF);
return simplifyCode();
}
diff --git a/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/lib/Target/PowerPC/PPCMachineFunctionInfo.h
index 10a8ce068d40..4c29aa06f048 100644
--- a/lib/Target/PowerPC/PPCMachineFunctionInfo.h
+++ b/lib/Target/PowerPC/PPCMachineFunctionInfo.h
@@ -104,6 +104,10 @@ class PPCFunctionInfo : public MachineFunctionInfo {
/// Whether this uses the PIC Base register or not.
bool UsesPICBase;
+ /// True if this function has a subset of CSRs that is handled explicitly via
+ /// copies
+ bool IsSplitCSR;
+
public:
explicit PPCFunctionInfo(MachineFunction &MF)
: FramePointerSaveIndex(0),
@@ -125,7 +129,8 @@ public:
VarArgsNumFPR(0),
CRSpillFrameIndex(0),
MF(MF),
- UsesPICBase(0) {}
+ UsesPICBase(0),
+ IsSplitCSR(false) {}
int getFramePointerSaveIndex() const { return FramePointerSaveIndex; }
void setFramePointerSaveIndex(int Idx) { FramePointerSaveIndex = Idx; }
@@ -196,6 +201,9 @@ public:
void setUsesPICBase(bool uses) { UsesPICBase = uses; }
bool usesPICBase() const { return UsesPICBase; }
+ bool isSplitCSR() const { return IsSplitCSR; }
+ void setIsSplitCSR(bool s) { IsSplitCSR = s; }
+
MCSymbol *getPICOffsetSymbol() const;
MCSymbol *getGlobalEPSymbol() const;
diff --git a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
new file mode 100644
index 000000000000..bfe20c12974b
--- /dev/null
+++ b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
@@ -0,0 +1,166 @@
+//===----- PPCQPXLoadSplat.cpp - QPX Load Splat Simplification ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The QPX vector registers overlay the scalar floating-point registers, and
+// any scalar floating-point loads splat their value across all vector lanes.
+// Thus, if we have a scalar load followed by a splat, we can remove the splat
+// (i.e. replace the load with a load-and-splat pseudo instruction).
+//
+// This pass must run after anything that might do store-to-load forwarding.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPC.h"
+#include "PPCInstrBuilder.h"
+#include "PPCInstrInfo.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "ppc-qpx-load-splat"
+
+STATISTIC(NumSimplified, "Number of QPX load splats simplified");
+
+namespace llvm {
+ void initializePPCQPXLoadSplatPass(PassRegistry&);
+}
+
+namespace {
+ struct PPCQPXLoadSplat : public MachineFunctionPass {
+ static char ID;
+ PPCQPXLoadSplat() : MachineFunctionPass(ID) {
+ initializePPCQPXLoadSplatPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+ const char *getPassName() const override {
+ return "PowerPC QPX Load Splat Simplification";
+ }
+ };
+ char PPCQPXLoadSplat::ID = 0;
+}
+
+INITIALIZE_PASS(PPCQPXLoadSplat, "ppc-qpx-load-splat",
+ "PowerPC QPX Load Splat Simplification",
+ false, false)
+
+FunctionPass *llvm::createPPCQPXLoadSplatPass() {
+ return new PPCQPXLoadSplat();
+}
+
+bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
+ bool MadeChange = false;
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+
+ for (auto MFI = MF.begin(), MFIE = MF.end(); MFI != MFIE; ++MFI) {
+ MachineBasicBlock *MBB = &*MFI;
+ SmallVector<MachineInstr *, 4> Splats;
+
+ for (auto MBBI = MBB->rbegin(); MBBI != MBB->rend(); ++MBBI) {
+ MachineInstr *MI = &*MBBI;
+
+ if (MI->hasUnmodeledSideEffects() || MI->isCall()) {
+ Splats.clear();
+ continue;
+ }
+
+ // We're looking for a sequence like this:
+ // %F0<def> = LFD 0, %X3<kill>, %QF0<imp-def>; mem:LD8[%a](tbaa=!2)
+ // %QF1<def> = QVESPLATI %QF0<kill>, 0, %RM<imp-use>
+
+ for (auto SI = Splats.begin(); SI != Splats.end();) {
+ MachineInstr *SMI = *SI;
+ unsigned SplatReg = SMI->getOperand(0).getReg();
+ unsigned SrcReg = SMI->getOperand(1).getReg();
+
+ if (MI->modifiesRegister(SrcReg, TRI)) {
+ switch (MI->getOpcode()) {
+ default:
+ SI = Splats.erase(SI);
+ continue;
+ case PPC::LFS:
+ case PPC::LFD:
+ case PPC::LFSU:
+ case PPC::LFDU:
+ case PPC::LFSUX:
+ case PPC::LFDUX:
+ case PPC::LFSX:
+ case PPC::LFDX:
+ case PPC::LFIWAX:
+ case PPC::LFIWZX:
+ if (SplatReg != SrcReg) {
+ // We need to change the load to define the scalar subregister of
+ // the QPX splat source register.
+ unsigned SubRegIndex =
+ TRI->getSubRegIndex(SrcReg, MI->getOperand(0).getReg());
+ unsigned SplatSubReg = TRI->getSubReg(SplatReg, SubRegIndex);
+
+ // Substitute both the explicit defined register, and also the
+ // implicit def of the containing QPX register.
+ MI->getOperand(0).setReg(SplatSubReg);
+ MI->substituteRegister(SrcReg, SplatReg, 0, *TRI);
+ }
+
+ SI = Splats.erase(SI);
+
+ // If SMI is directly after MI, then MBBI's base iterator is
+ // pointing at SMI. Adjust MBBI around the call to erase SMI to
+ // avoid invalidating MBBI.
+ ++MBBI;
+ SMI->eraseFromParent();
+ --MBBI;
+
+ ++NumSimplified;
+ MadeChange = true;
+ continue;
+ }
+ }
+
+ // If this instruction defines the splat register, then we cannot move
+ // the previous definition above it. If it reads from the splat
+ // register, then it must already be alive from some previous
+ // definition, and if the splat register is different from the source
+ // register, then this definition must not be the load for which we're
+ // searching.
+ if (MI->modifiesRegister(SplatReg, TRI) ||
+ (SrcReg != SplatReg &&
+ MI->readsRegister(SplatReg, TRI))) {
+ SI = Splats.erase(SI);
+ continue;
+ }
+
+ ++SI;
+ }
+
+ if (MI->getOpcode() != PPC::QVESPLATI &&
+ MI->getOpcode() != PPC::QVESPLATIs &&
+ MI->getOpcode() != PPC::QVESPLATIb)
+ continue;
+ if (MI->getOperand(2).getImm() != 0)
+ continue;
+
+ // If there are other uses of the scalar value after this, replacing
+ // those uses might be non-trivial.
+ if (!MI->getOperand(1).isKill())
+ continue;
+
+ Splats.push_back(MI);
+ }
+ }
+
+ return MadeChange;
+}
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 934bdf622418..f0161a03d2d4 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -116,6 +116,9 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
: (Subtarget.hasAltivec() ? CSR_Darwin32_Altivec_SaveList
: CSR_Darwin32_SaveList);
+ if (TM.isPPC64() && MF->getInfo<PPCFunctionInfo>()->isSplitCSR())
+ return CSR_SRV464_TLS_PE_SaveList;
+
// On PPC64, we might need to save r2 (but only if it is not reserved).
bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2);
@@ -128,6 +131,31 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
: CSR_SVR432_SaveList);
}
+const MCPhysReg *
+PPCRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
+ assert(MF && "Invalid MachineFunction pointer.");
+ const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>();
+ if (Subtarget.isDarwinABI())
+ return nullptr;
+ if (!TM.isPPC64())
+ return nullptr;
+ if (MF->getFunction()->getCallingConv() != CallingConv::CXX_FAST_TLS)
+ return nullptr;
+ if (!MF->getInfo<PPCFunctionInfo>()->isSplitCSR())
+ return nullptr;
+
+ // On PPC64, we might need to save r2 (but only if it is not reserved).
+ bool SaveR2 = !getReservedRegs(*MF).test(PPC::X2);
+ if (Subtarget.hasAltivec())
+ return SaveR2
+ ? CSR_SVR464_R2_Altivec_ViaCopy_SaveList
+ : CSR_SVR464_Altivec_ViaCopy_SaveList;
+ else
+ return SaveR2
+ ? CSR_SVR464_R2_ViaCopy_SaveList
+ : CSR_SVR464_ViaCopy_SaveList;
+}
+
const uint32_t *
PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
@@ -232,16 +260,15 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
if (TFI->needsFP(MF))
Reserved.set(PPC::R31);
+ bool IsPositionIndependent = TM.isPositionIndependent();
if (hasBasePointer(MF)) {
- if (Subtarget.isSVR4ABI() && !TM.isPPC64() &&
- TM.getRelocationModel() == Reloc::PIC_)
+ if (Subtarget.isSVR4ABI() && !TM.isPPC64() && IsPositionIndependent)
Reserved.set(PPC::R29);
else
Reserved.set(PPC::R30);
}
- if (Subtarget.isSVR4ABI() && !TM.isPPC64() &&
- TM.getRelocationModel() == Reloc::PIC_)
+ if (Subtarget.isSVR4ABI() && !TM.isPPC64() && IsPositionIndependent)
Reserved.set(PPC::R30);
// Reserve Altivec registers when Altivec is unavailable.
@@ -907,8 +934,7 @@ unsigned PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
if (TM.isPPC64())
return PPC::X30;
- if (Subtarget.isSVR4ABI() &&
- TM.getRelocationModel() == Reloc::PIC_)
+ if (Subtarget.isSVR4ABI() && TM.isPositionIndependent())
return PPC::R29;
return PPC::R30;
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h
index b15fde83c9f3..459502eeb2e9 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -75,6 +75,7 @@ public:
/// Code Generation virtual methods...
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
+ const MCPhysReg *getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const override;
const uint32_t *getNoPreservedMask() const override;
diff --git a/lib/Target/PowerPC/PPCSchedule.td b/lib/Target/PowerPC/PPCSchedule.td
index d0954a11cd6a..b4d72eff2b85 100644
--- a/lib/Target/PowerPC/PPCSchedule.td
+++ b/lib/Target/PowerPC/PPCSchedule.td
@@ -70,6 +70,8 @@ def IIC_LdStSTFDU : InstrItinClass;
def IIC_LdStSTVEBX : InstrItinClass;
def IIC_LdStSTWCX : InstrItinClass;
def IIC_LdStSync : InstrItinClass;
+def IIC_LdStCOPY : InstrItinClass;
+def IIC_LdStPASTE : InstrItinClass;
def IIC_SprISYNC : InstrItinClass;
def IIC_SprMFSR : InstrItinClass;
def IIC_SprMTMSR : InstrItinClass;
@@ -104,12 +106,17 @@ def IIC_VecVSR : InstrItinClass;
def IIC_SprMTMSRD : InstrItinClass;
def IIC_SprSLIE : InstrItinClass;
def IIC_SprSLBIE : InstrItinClass;
+def IIC_SprSLBIEG : InstrItinClass;
def IIC_SprSLBMTE : InstrItinClass;
def IIC_SprSLBMFEE : InstrItinClass;
def IIC_SprSLBIA : InstrItinClass;
+def IIC_SprSLBSYNC : InstrItinClass;
def IIC_SprTLBIA : InstrItinClass;
def IIC_SprTLBIEL : InstrItinClass;
def IIC_SprTLBIE : InstrItinClass;
+def IIC_SprABORT : InstrItinClass;
+def IIC_SprMSGSYNC : InstrItinClass;
+def IIC_SprSTOP : InstrItinClass;
//===----------------------------------------------------------------------===//
// Processor instruction itineraries.
diff --git a/lib/Target/PowerPC/PPCSchedule440.td b/lib/Target/PowerPC/PPCSchedule440.td
index 04a43bc03251..2455e5e52de5 100644
--- a/lib/Target/PowerPC/PPCSchedule440.td
+++ b/lib/Target/PowerPC/PPCSchedule440.td
@@ -597,11 +597,12 @@ def PPC440Itineraries : ProcessorItineraries<
def PPC440Model : SchedMachineModel {
let IssueWidth = 2; // 2 instructions are dispatched per cycle.
- let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
let LoadLatency = 5; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
+ let CompleteModel = 0;
+
let Itineraries = PPC440Itineraries;
}
diff --git a/lib/Target/PowerPC/PPCScheduleA2.td b/lib/Target/PowerPC/PPCScheduleA2.td
index 21a357a2efcf..54cfae5d74b7 100644
--- a/lib/Target/PowerPC/PPCScheduleA2.td
+++ b/lib/Target/PowerPC/PPCScheduleA2.td
@@ -1,10 +1,10 @@
//===- PPCScheduleA2.td - PPC A2 Scheduling Definitions --*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
// Primary reference:
@@ -160,12 +160,13 @@ def PPCA2Itineraries : ProcessorItineraries<
def PPCA2Model : SchedMachineModel {
let IssueWidth = 1; // 1 instruction is dispatched per cycle.
- let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
let LoadLatency = 6; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
let MispredictPenalty = 13;
+ let CompleteModel = 0;
+
let Itineraries = PPCA2Itineraries;
}
diff --git a/lib/Target/PowerPC/PPCScheduleE500mc.td b/lib/Target/PowerPC/PPCScheduleE500mc.td
index 36b8517dabf1..f687d326b52d 100644
--- a/lib/Target/PowerPC/PPCScheduleE500mc.td
+++ b/lib/Target/PowerPC/PPCScheduleE500mc.td
@@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the itinerary class data for the Freescale e500mc 32-bit
+// This file defines the itinerary class data for the Freescale e500mc 32-bit
// Power processor.
-//
+//
// All information is derived from the "e500mc Core Reference Manual",
// Freescale Document Number E500MCRM, Rev. 1, 03/2012.
//
@@ -25,12 +25,12 @@ def E500_DIS1 : FuncUnit; // Dispatch stage - insn 2
// * Execute
// 6 pipelined execution units: SFX0, SFX1, BU, FPU, LSU, CFX.
// Some instructions can only execute in SFX0 but not SFX1.
-// The CFX has a bypass path, allowing non-divide instructions to execute
+// The CFX has a bypass path, allowing non-divide instructions to execute
// while a divide instruction is executed.
def E500_SFX0 : FuncUnit; // Simple unit 0
def E500_SFX1 : FuncUnit; // Simple unit 1
def E500_BU : FuncUnit; // Branch unit
-def E500_CFX_DivBypass
+def E500_CFX_DivBypass
: FuncUnit; // CFX divide bypass path
def E500_CFX_0 : FuncUnit; // CFX pipeline
def E500_LSU_0 : FuncUnit; // LSU pipeline
@@ -271,12 +271,12 @@ def PPCE500mcItineraries : ProcessorItineraries<
[NoBypass, E500_GPR_Bypass]>,
InstrItinData<IIC_FPGeneral, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
InstrStage<2, [E500_FPU_0]>],
- [11, 1, 1], // Latency = 8, Repeat rate = 2
+ [11, 1, 1], // Latency = 8, Repeat rate = 2
[E500_FPR_Bypass,
E500_FPR_Bypass, E500_FPR_Bypass]>,
InstrItinData<IIC_FPAddSub, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
InstrStage<4, [E500_FPU_0]>],
- [13, 1, 1], // Latency = 10, Repeat rate = 4
+ [13, 1, 1], // Latency = 10, Repeat rate = 4
[E500_FPR_Bypass,
E500_FPR_Bypass, E500_FPR_Bypass]>,
InstrItinData<IIC_FPCompare, [InstrStage<1, [E500_DIS0, E500_DIS1], 0>,
@@ -311,10 +311,11 @@ def PPCE500mcItineraries : ProcessorItineraries<
def PPCE500mcModel : SchedMachineModel {
let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
- let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
let LoadLatency = 5; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
+ let CompleteModel = 0;
+
let Itineraries = PPCE500mcItineraries;
}
diff --git a/lib/Target/PowerPC/PPCScheduleE5500.td b/lib/Target/PowerPC/PPCScheduleE5500.td
index 7c2693ef0d4f..5db886cf8f94 100644
--- a/lib/Target/PowerPC/PPCScheduleE5500.td
+++ b/lib/Target/PowerPC/PPCScheduleE5500.td
@@ -7,9 +7,9 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the itinerary class data for the Freescale e5500 64-bit
+// This file defines the itinerary class data for the Freescale e5500 64-bit
// Power processor.
-//
+//
// All information is derived from the "e5500 Core Reference Manual",
// Freescale Document Number e5500RM, Rev. 1, 03/2012.
//
@@ -25,16 +25,16 @@ def E5500_DIS1 : FuncUnit;
// * Execute
// 6 pipelined execution units: SFX0, SFX1, BU, FPU, LSU, CFX.
-// The CFX has a bypass path, allowing non-divide instructions to execute
+// The CFX has a bypass path, allowing non-divide instructions to execute
// while a divide instruction is being executed.
def E5500_SFX0 : FuncUnit; // Simple unit 0
def E5500_SFX1 : FuncUnit; // Simple unit 1
def E5500_BU : FuncUnit; // Branch unit
-def E5500_CFX_DivBypass
+def E5500_CFX_DivBypass
: FuncUnit; // CFX divide bypass path
def E5500_CFX_0 : FuncUnit; // CFX pipeline stage 0
-def E5500_CFX_1 : FuncUnit; // CFX pipeline stage 1
+def E5500_CFX_1 : FuncUnit; // CFX pipeline stage 1
def E5500_LSU_0 : FuncUnit; // LSU pipeline
def E5500_FPU_0 : FuncUnit; // FPU pipeline
@@ -331,12 +331,12 @@ def PPCE5500Itineraries : ProcessorItineraries<
[E5500_GPR_Bypass]>,
InstrItinData<IIC_FPGeneral, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
InstrStage<1, [E5500_FPU_0]>],
- [11, 2, 2], // Latency = 7, Repeat rate = 1
+ [11, 2, 2], // Latency = 7, Repeat rate = 1
[E5500_FPR_Bypass,
E5500_FPR_Bypass, E5500_FPR_Bypass]>,
InstrItinData<IIC_FPAddSub, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
InstrStage<1, [E5500_FPU_0]>],
- [11, 2, 2], // Latency = 7, Repeat rate = 1
+ [11, 2, 2], // Latency = 7, Repeat rate = 1
[E5500_FPR_Bypass,
E5500_FPR_Bypass, E5500_FPR_Bypass]>,
InstrItinData<IIC_FPCompare, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
@@ -351,7 +351,7 @@ def PPCE5500Itineraries : ProcessorItineraries<
E5500_FPR_Bypass, E5500_FPR_Bypass]>,
InstrItinData<IIC_FPDivS, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
InstrStage<16, [E5500_FPU_0]>],
- [24, 2, 2], // Latency = 20, Repeat rate = 16
+ [24, 2, 2], // Latency = 20, Repeat rate = 16
[E5500_FPR_Bypass,
E5500_FPR_Bypass, E5500_FPR_Bypass]>,
InstrItinData<IIC_FPFused, [InstrStage<1, [E5500_DIS0, E5500_DIS1], 0>,
@@ -371,10 +371,11 @@ def PPCE5500Itineraries : ProcessorItineraries<
def PPCE5500Model : SchedMachineModel {
let IssueWidth = 2; // 2 micro-ops are dispatched per cycle.
- let MinLatency = -1; // OperandCycles are interpreted as MinLatency.
let LoadLatency = 6; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
+ let CompleteModel = 0;
+
let Itineraries = PPCE5500Itineraries;
}
diff --git a/lib/Target/PowerPC/PPCScheduleG5.td b/lib/Target/PowerPC/PPCScheduleG5.td
index a3b73ab4454f..b5a9f96d45ae 100644
--- a/lib/Target/PowerPC/PPCScheduleG5.td
+++ b/lib/Target/PowerPC/PPCScheduleG5.td
@@ -40,7 +40,7 @@ def G5Itineraries : ProcessorItineraries<
InstrItinData<IIC_IntMulLI , [InstrStage<4, [G5_IU1, G5_IU2]>]>,
InstrItinData<IIC_IntRFID , [InstrStage<1, [G5_IU2]>]>,
InstrItinData<IIC_IntRotateD , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
- InstrItinData<IIC_IntRotateDI , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
+ InstrItinData<IIC_IntRotateDI , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
InstrItinData<IIC_IntRotate , [InstrStage<4, [G5_IU1, G5_IU2]>]>,
InstrItinData<IIC_IntShift , [InstrStage<2, [G5_IU1, G5_IU2]>]>,
InstrItinData<IIC_IntTrapD , [InstrStage<1, [G5_IU1, G5_IU2]>]>,
@@ -51,14 +51,14 @@ def G5Itineraries : ProcessorItineraries<
InstrItinData<IIC_BrMCRX , [InstrStage<3, [G5_BPU]>]>,
InstrItinData<IIC_LdStDCBF , [InstrStage<3, [G5_SLU]>]>,
InstrItinData<IIC_LdStLoad , [InstrStage<3, [G5_SLU]>]>,
- InstrItinData<IIC_LdStLoadUpd , [InstrStage<3, [G5_SLU]>]>,
- InstrItinData<IIC_LdStLoadUpdX, [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpd , [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLoadUpdX, [InstrStage<3, [G5_SLU]>]>,
InstrItinData<IIC_LdStStore , [InstrStage<3, [G5_SLU]>]>,
- InstrItinData<IIC_LdStStoreUpd, [InstrStage<3, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStStoreUpd, [InstrStage<3, [G5_SLU]>]>,
InstrItinData<IIC_LdStDSS , [InstrStage<10, [G5_SLU]>]>,
InstrItinData<IIC_LdStICBI , [InstrStage<40, [G5_SLU]>]>,
InstrItinData<IIC_LdStSTFD , [InstrStage<4, [G5_SLU]>]>,
- InstrItinData<IIC_LdStSTFDU , [InstrStage<4, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStSTFDU , [InstrStage<4, [G5_SLU]>]>,
InstrItinData<IIC_LdStLD , [InstrStage<3, [G5_SLU]>]>,
InstrItinData<IIC_LdStLDU , [InstrStage<3, [G5_SLU]>]>,
InstrItinData<IIC_LdStLDUX , [InstrStage<3, [G5_SLU]>]>,
@@ -67,8 +67,8 @@ def G5Itineraries : ProcessorItineraries<
InstrItinData<IIC_LdStLFDU , [InstrStage<5, [G5_SLU]>]>,
InstrItinData<IIC_LdStLFDUX , [InstrStage<5, [G5_SLU]>]>,
InstrItinData<IIC_LdStLHA , [InstrStage<5, [G5_SLU]>]>,
- InstrItinData<IIC_LdStLHAU , [InstrStage<5, [G5_SLU]>]>,
- InstrItinData<IIC_LdStLHAUX , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLHAU , [InstrStage<5, [G5_SLU]>]>,
+ InstrItinData<IIC_LdStLHAUX , [InstrStage<5, [G5_SLU]>]>,
InstrItinData<IIC_LdStLMW , [InstrStage<64, [G5_SLU]>]>,
InstrItinData<IIC_LdStLVecX , [InstrStage<3, [G5_SLU]>]>,
InstrItinData<IIC_LdStLWA , [InstrStage<5, [G5_SLU]>]>,
@@ -118,12 +118,13 @@ def G5Itineraries : ProcessorItineraries<
def G5Model : SchedMachineModel {
let IssueWidth = 4; // 4 (non-branch) instructions are dispatched per cycle.
- let MinLatency = 0; // Out-of-order dispatch.
let LoadLatency = 3; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
let MispredictPenalty = 16;
+ let CompleteModel = 0;
+
let Itineraries = G5Itineraries;
}
diff --git a/lib/Target/PowerPC/PPCScheduleP7.td b/lib/Target/PowerPC/PPCScheduleP7.td
index 267f56726180..a8678f56900e 100644
--- a/lib/Target/PowerPC/PPCScheduleP7.td
+++ b/lib/Target/PowerPC/PPCScheduleP7.td
@@ -382,7 +382,6 @@ def P7Model : SchedMachineModel {
// branches), but the total internal issue bandwidth per
// cycle (from all queues) is 8.
- let MinLatency = 0; // Out-of-order dispatch.
let LoadLatency = 3; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
@@ -391,6 +390,8 @@ def P7Model : SchedMachineModel {
// Try to make sure we have at least 10 dispatch groups in a loop.
let LoopMicroOpBufferSize = 40;
+ let CompleteModel = 0;
+
let Itineraries = P7Itineraries;
}
diff --git a/lib/Target/PowerPC/PPCScheduleP8.td b/lib/Target/PowerPC/PPCScheduleP8.td
index 69e6d05c6604..8e52da583a0d 100644
--- a/lib/Target/PowerPC/PPCScheduleP8.td
+++ b/lib/Target/PowerPC/PPCScheduleP8.td
@@ -391,7 +391,6 @@ def P8Model : SchedMachineModel {
// up to six non-branch instructions.
// up to two branches in a dispatch group.
- let MinLatency = 0; // Out-of-order dispatch.
let LoadLatency = 3; // Optimistic load latency assuming bypass.
// This is overriden by OperandCycles if the
// Itineraries are queried instead.
@@ -400,6 +399,8 @@ def P8Model : SchedMachineModel {
// Try to make sure we have at least 10 dispatch groups in a loop.
let LoopMicroOpBufferSize = 60;
+ let CompleteModel = 0;
+
let Itineraries = P8Itineraries;
}
diff --git a/lib/Target/PowerPC/PPCSubtarget.cpp b/lib/Target/PowerPC/PPCSubtarget.cpp
index c0fcb6cbb9dc..603f0fccc7c6 100644
--- a/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -70,6 +70,8 @@ void PPCSubtarget::initializeEnvironment() {
HasP8Vector = false;
HasP8Altivec = false;
HasP8Crypto = false;
+ HasP9Vector = false;
+ HasP9Altivec = false;
HasFCPSGN = false;
HasFSQRT = false;
HasFRE = false;
@@ -82,7 +84,6 @@ void PPCSubtarget::initializeEnvironment() {
HasFPRND = false;
HasFPCVT = false;
HasISEL = false;
- HasPOPCNTD = false;
HasBPERMD = false;
HasExtDiv = false;
HasCMPB = false;
@@ -103,12 +104,15 @@ void PPCSubtarget::initializeEnvironment() {
HasHTM = false;
HasFusion = false;
HasFloat128 = false;
+ IsISA3_0 = false;
+
+ HasPOPCNTD = POPCNTD_Unavailable;
}
void PPCSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
// Determine default and user specified characteristics
std::string CPUName = CPU;
- if (CPUName.empty()) {
+ if (CPUName.empty() || CPU == "generic") {
// If cross-compiling with -march=ppc64le without -mcpu
if (TargetTriple.getArch() == Triple::ppc64le)
CPUName = "ppc64le";
@@ -142,18 +146,20 @@ void PPCSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
IsLittleEndian = (TargetTriple.getArch() == Triple::ppc64le);
}
-/// hasLazyResolverStub - Return true if accesses to the specified global have
-/// to go through a dyld lazy resolution stub. This means that an extra load
-/// is required to get the address of the global.
+/// Return true if accesses to the specified global have to go through a dyld
+/// lazy resolution stub. This means that an extra load is required to get the
+/// address of the global.
bool PPCSubtarget::hasLazyResolverStub(const GlobalValue *GV) const {
- // We never have stubs if HasLazyResolverStubs=false or if in static mode.
- if (!HasLazyResolverStubs || TM.getRelocationModel() == Reloc::Static)
- return false;
- bool isDecl = GV->isDeclaration();
- if (GV->hasHiddenVisibility() && !isDecl && !GV->hasCommonLinkage())
+ if (!HasLazyResolverStubs)
return false;
- return GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
- GV->hasCommonLinkage() || isDecl;
+ if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
+ return true;
+ // 32 bit macho has no relocation for a-b if a is undefined, even if b is in
+ // the section that is being relocated. This means we have to use o load even
+ // for GVs that are known to be local to the dso.
+ if (GV->isDeclarationForLinker() || GV->hasCommonLinkage())
+ return true;
+ return false;
}
// Embedded cores need aggressive scheduling (and some others also benefit).
@@ -166,6 +172,8 @@ static bool needsAggressiveScheduling(unsigned Directive) {
case PPC::DIR_E5500:
case PPC::DIR_PWR7:
case PPC::DIR_PWR8:
+ // FIXME: Same as P8 until POWER9 scheduling info is available
+ case PPC::DIR_PWR9:
return true;
}
}
@@ -191,8 +199,6 @@ void PPCSubtarget::getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
}
void PPCSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
- MachineInstr *begin,
- MachineInstr *end,
unsigned NumRegionInstrs) const {
if (needsAggressiveScheduling(DarwinDirective)) {
Policy.OnlyTopDown = false;
diff --git a/lib/Target/PowerPC/PPCSubtarget.h b/lib/Target/PowerPC/PPCSubtarget.h
index 4f5c95c1483f..9fe286a3b7a9 100644
--- a/lib/Target/PowerPC/PPCSubtarget.h
+++ b/lib/Target/PowerPC/PPCSubtarget.h
@@ -18,9 +18,9 @@
#include "PPCISelLowering.h"
#include "PPCInstrInfo.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCInstrItineraries.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -56,6 +56,7 @@ namespace PPC {
DIR_PWR6X,
DIR_PWR7,
DIR_PWR8,
+ DIR_PWR9,
DIR_64
};
}
@@ -64,6 +65,13 @@ class GlobalValue;
class TargetMachine;
class PPCSubtarget : public PPCGenSubtargetInfo {
+public:
+ enum POPCNTDKind {
+ POPCNTD_Unavailable,
+ POPCNTD_Slow,
+ POPCNTD_Fast
+ };
+
protected:
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
@@ -92,6 +100,8 @@ protected:
bool HasP8Vector;
bool HasP8Altivec;
bool HasP8Crypto;
+ bool HasP9Vector;
+ bool HasP9Altivec;
bool HasFCPSGN;
bool HasFSQRT;
bool HasFRE, HasFRES, HasFRSQRTE, HasFRSQRTES;
@@ -101,7 +111,6 @@ protected:
bool HasFPRND;
bool HasFPCVT;
bool HasISEL;
- bool HasPOPCNTD;
bool HasBPERMD;
bool HasExtDiv;
bool HasCMPB;
@@ -122,6 +131,9 @@ protected:
bool HasHTM;
bool HasFusion;
bool HasFloat128;
+ bool IsISA3_0;
+
+ POPCNTDKind HasPOPCNTD;
/// When targeting QPX running a stock PPC64 Linux kernel where the stack
/// alignment has not been changed, we need to keep the 16-byte alignment
@@ -132,7 +144,7 @@ protected:
PPCFrameLowering FrameLowering;
PPCInstrInfo InstrInfo;
PPCTargetLowering TLInfo;
- TargetSelectionDAGInfo TSInfo;
+ SelectionDAGTargetInfo TSInfo;
public:
/// This constructor initializes the data members to match that
@@ -167,7 +179,7 @@ public:
const PPCTargetLowering *getTargetLowering() const override {
return &TLInfo;
}
- const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
const PPCRegisterInfo *getRegisterInfo() const override {
@@ -230,9 +242,10 @@ public:
bool hasP8Vector() const { return HasP8Vector; }
bool hasP8Altivec() const { return HasP8Altivec; }
bool hasP8Crypto() const { return HasP8Crypto; }
+ bool hasP9Vector() const { return HasP9Vector; }
+ bool hasP9Altivec() const { return HasP9Altivec; }
bool hasMFOCRF() const { return HasMFOCRF; }
bool hasISEL() const { return HasISEL; }
- bool hasPOPCNTD() const { return HasPOPCNTD; }
bool hasBPERMD() const { return HasBPERMD; }
bool hasExtDiv() const { return HasExtDiv; }
bool hasCMPB() const { return HasCMPB; }
@@ -261,6 +274,9 @@ public:
bool hasHTM() const { return HasHTM; }
bool hasFusion() const { return HasFusion; }
bool hasFloat128() const { return HasFloat128; }
+ bool isISA3_0() const { return IsISA3_0; }
+
+ POPCNTDKind hasPOPCNTD() const { return HasPOPCNTD; }
const Triple &getTargetTriple() const { return TargetTriple; }
@@ -271,6 +287,7 @@ public:
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
+ bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
bool isDarwinABI() const { return isTargetMachO() || isDarwin(); }
bool isSVR4ABI() const { return !isDarwinABI(); }
@@ -286,8 +303,6 @@ public:
void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override;
void overrideSchedPolicy(MachineSchedPolicy &Policy,
- MachineInstr *begin,
- MachineInstr *end,
unsigned NumRegionInstrs) const override;
bool useAA() const override;
diff --git a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
index a9d2e888f4b7..61ce48ecd04f 100644
--- a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
+++ b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
@@ -73,10 +73,7 @@ protected:
DebugLoc DL = MI->getDebugLoc();
unsigned GPR3 = Is64Bit ? PPC::X3 : PPC::R3;
unsigned Opc1, Opc2;
- SmallVector<unsigned, 4> OrigRegs;
- OrigRegs.push_back(OutReg);
- OrigRegs.push_back(InReg);
- OrigRegs.push_back(GPR3);
+ const unsigned OrigRegs[] = {OutReg, InReg, GPR3};
switch (MI->getOpcode()) {
default:
diff --git a/lib/Target/PowerPC/PPCTOCRegDeps.cpp b/lib/Target/PowerPC/PPCTOCRegDeps.cpp
index bf165c9edc6e..7c53a5601790 100644
--- a/lib/Target/PowerPC/PPCTOCRegDeps.cpp
+++ b/lib/Target/PowerPC/PPCTOCRegDeps.cpp
@@ -61,10 +61,10 @@
//
//===----------------------------------------------------------------------===//
-#include "PPCInstrInfo.h"
-#include "MCTargetDesc/PPCPredicates.h"
#include "PPC.h"
+#include "MCTargetDesc/PPCPredicates.h"
#include "PPCInstrBuilder.h"
+#include "PPCInstrInfo.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
@@ -74,7 +74,6 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp
index d24b590317f5..a4ff86cb1e21 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -15,7 +15,9 @@
#include "PPC.h"
#include "PPCTargetObjectFile.h"
#include "PPCTargetTransformInfo.h"
+#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/MC/MCStreamer.h"
@@ -43,6 +45,10 @@ opt<bool> DisableVSXSwapRemoval("disable-ppc-vsx-swap-removal", cl::Hidden,
cl::desc("Disable VSX Swap Removal for PPC"));
static cl::
+opt<bool> DisableQPXLoadSplat("disable-ppc-qpx-load-splat", cl::Hidden,
+ cl::desc("Disable QPX load splat simplification"));
+
+static cl::
opt<bool> DisableMIPeephole("disable-ppc-peephole", cl::Hidden,
cl::desc("Disable machine peepholes for PPC"));
@@ -172,6 +178,16 @@ static PPCTargetMachine::PPCABI computeTargetABI(const Triple &TT,
return PPCTargetMachine::PPC_ABI_UNKNOWN;
}
+static Reloc::Model getEffectiveRelocModel(const Triple &TT,
+ Optional<Reloc::Model> RM) {
+ if (!RM.hasValue()) {
+ if (TT.isOSDarwin())
+ return Reloc::DynamicNoPIC;
+ return Reloc::Static;
+ }
+ return *RM;
+}
+
// The FeatureString here is a little subtle. We are modifying the feature
// string with what are (currently) non-function specific overrides as it goes
// into the LLVMTargetMachine constructor and then using the stored value in the
@@ -179,10 +195,11 @@ static PPCTargetMachine::PPCABI computeTargetABI(const Triple &TT,
PPCTargetMachine::PPCTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
: LLVMTargetMachine(T, getDataLayoutString(TT), TT, CPU,
- computeFSAdditions(FS, OL, TT), Options, RM, CM, OL),
+ computeFSAdditions(FS, OL, TT), Options,
+ getEffectiveRelocModel(TT, RM), CM, OL),
TLOF(createTLOF(getTargetTriple())),
TargetABI(computeTargetABI(TT, Options)),
Subtarget(TargetTriple, CPU, computeFSAdditions(FS, OL, TT), *this) {
@@ -214,7 +231,8 @@ void PPC32TargetMachine::anchor() { }
PPC32TargetMachine::PPC32TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
@@ -223,7 +241,8 @@ void PPC64TargetMachine::anchor() { }
PPC64TargetMachine::PPC64TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
@@ -245,8 +264,7 @@ PPCTargetMachine::getSubtargetImpl(const Function &F) const {
// it as a key for the subtarget since that can be the only difference
// between two functions.
bool SoftFloat =
- F.hasFnAttribute("use-soft-float") &&
- F.getFnAttribute("use-soft-float").getValueAsString() == "true";
+ F.getFnAttribute("use-soft-float").getValueAsString() == "true";
// If the soft float attribute is set on the function turn on the soft float
// subtarget feature.
if (SoftFloat)
@@ -313,9 +331,9 @@ void PPCPassConfig::addIRPasses() {
if (EnablePrefetch.getNumOccurrences() > 0)
UsePrefetching = EnablePrefetch;
if (UsePrefetching)
- addPass(createPPCLoopDataPrefetchPass());
+ addPass(createLoopDataPrefetchPass());
- if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
+ if (TM->getOptLevel() >= CodeGenOpt::Default && EnableGEPOpt) {
// Call SeparateConstOffsetFromGEP pass to extract constants within indices
// and lower a GEP with multiple indices to either arithmetic operations or
// multiple GEPs with single index.
@@ -379,18 +397,35 @@ void PPCPassConfig::addMachineSSAOptimization() {
}
void PPCPassConfig::addPreRegAlloc() {
- initializePPCVSXFMAMutatePass(*PassRegistry::getPassRegistry());
- insertPass(VSXFMAMutateEarly ? &RegisterCoalescerID : &MachineSchedulerID,
- &PPCVSXFMAMutateID);
- if (getPPCTargetMachine().getRelocationModel() == Reloc::PIC_)
+ if (getOptLevel() != CodeGenOpt::None) {
+ initializePPCVSXFMAMutatePass(*PassRegistry::getPassRegistry());
+ insertPass(VSXFMAMutateEarly ? &RegisterCoalescerID : &MachineSchedulerID,
+ &PPCVSXFMAMutateID);
+ }
+
+ // FIXME: We probably don't need to run these for -fPIE.
+ if (getPPCTargetMachine().isPositionIndependent()) {
+ // FIXME: LiveVariables should not be necessary here!
+ // PPCTLSDYnamicCallPass uses LiveIntervals which previously dependet on
+ // LiveVariables. This (unnecessary) dependency has been removed now,
+ // however a stage-2 clang build fails without LiveVariables computed here.
+ addPass(&LiveVariablesID, false);
addPass(createPPCTLSDynamicCallPass());
+ }
if (EnableExtraTOCRegDeps)
addPass(createPPCTOCRegDepsPass());
}
void PPCPassConfig::addPreSched2() {
- if (getOptLevel() != CodeGenOpt::None)
+ if (getOptLevel() != CodeGenOpt::None) {
addPass(&IfConverterID);
+
+ // This optimization must happen after anything that might do store-to-load
+ // forwarding. Here we're after RA (and, thus, when spills are inserted)
+ // but before post-RA scheduling.
+ if (!DisableQPXLoadSplat)
+ addPass(createPPCQPXLoadSplatPass());
+ }
}
void PPCPassConfig::addPreEmitPass() {
diff --git a/lib/Target/PowerPC/PPCTargetMachine.h b/lib/Target/PowerPC/PPCTargetMachine.h
index 6496339519a1..59b4f1e30c0e 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/lib/Target/PowerPC/PPCTargetMachine.h
@@ -21,7 +21,7 @@
namespace llvm {
-/// PPCTargetMachine - Common code between 32-bit and 64-bit PowerPC targets.
+/// Common code between 32-bit and 64-bit PowerPC targets.
///
class PPCTargetMachine : public LLVMTargetMachine {
public:
@@ -35,8 +35,9 @@ private:
public:
PPCTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
~PPCTargetMachine() override;
@@ -57,25 +58,25 @@ public:
};
};
-/// PPC32TargetMachine - PowerPC 32-bit target machine.
+/// PowerPC 32-bit target machine.
///
class PPC32TargetMachine : public PPCTargetMachine {
virtual void anchor();
public:
PPC32TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
-/// PPC64TargetMachine - PowerPC 64-bit target machine.
+/// PowerPC 64-bit target machine.
///
class PPC64TargetMachine : public PPCTargetMachine {
virtual void anchor();
public:
PPC64TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
diff --git a/lib/Target/PowerPC/PPCTargetObjectFile.cpp b/lib/Target/PowerPC/PPCTargetObjectFile.cpp
index 798bb9d6b892..8f660355c0ac 100644
--- a/lib/Target/PowerPC/PPCTargetObjectFile.cpp
+++ b/lib/Target/PowerPC/PPCTargetObjectFile.cpp
@@ -53,7 +53,7 @@ MCSection *PPC64LinuxTargetObjectFile::SelectSectionForGlobal(
const MCExpr *PPC64LinuxTargetObjectFile::
getDebugThreadLocalSymbol(const MCSymbol *Sym) const {
const MCExpr *Expr =
- MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_PPC_DTPREL, getContext());
+ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_DTPREL, getContext());
return MCBinaryExpr::createAdd(Expr,
MCConstantExpr::create(0x8000, getContext()),
getContext());
diff --git a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index cd86dabd5abe..9331e41fb9c1 100644
--- a/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -21,6 +21,12 @@ using namespace llvm;
static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
+// This is currently only used for the data prefetch pass which is only enabled
+// for BG/Q by default.
+static cl::opt<unsigned>
+CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
+ cl::desc("The loop prefetch cache line size"));
+
//===----------------------------------------------------------------------===//
//
// PPC cost model.
@@ -30,8 +36,9 @@ cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
TargetTransformInfo::PopcntSupportKind
PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
- if (ST->hasPOPCNTD() && TyWidth <= 64)
- return TTI::PSK_FastHardware;
+ if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
+ return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
+ TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
return TTI::PSK_Software;
}
@@ -230,6 +237,18 @@ unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) {
}
+unsigned PPCTTIImpl::getCacheLineSize() {
+ // This is currently only used for the data prefetch pass which is only
+ // enabled for BG/Q by default.
+ return CacheLineSize;
+}
+
+unsigned PPCTTIImpl::getPrefetchDistance() {
+ // This seems like a reasonable default for the BG/Q (this pass is enabled, by
+ // default, only on the BG/Q).
+ return 300;
+}
+
unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
unsigned Directive = ST->getDarwinDirective();
// The 440 has no SIMD support, but floating-point instructions
@@ -248,8 +267,9 @@ unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
// For P7 and P8, floating-point instructions have a 6-cycle latency and
// there are two execution units, so unroll by 12x for latency hiding.
- if (Directive == PPC::DIR_PWR7 ||
- Directive == PPC::DIR_PWR8)
+ // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
+ if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
+ Directive == PPC::DIR_PWR9)
return 12;
// For most things, modern systems have two execution units (and
@@ -355,7 +375,7 @@ int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
// If we can use the permutation-based load sequence, then this is also
// relatively cheap (not counting loop-invariant instructions): one load plus
// one permute (the last load in a series has extra cost, but we're
- // neglecting that here). Note that on the P7, we should do unaligned loads
+ // neglecting that here). Note that on the P7, we could do unaligned loads
// for Altivec types using the VSX instructions, but that's more expensive
// than using the permutation-based load sequence. On the P8, that's no
// longer true.
diff --git a/lib/Target/PowerPC/PPCTargetTransformInfo.h b/lib/Target/PowerPC/PPCTargetTransformInfo.h
index 04c1b02235f0..5ea9a543cdb1 100644
--- a/lib/Target/PowerPC/PPCTargetTransformInfo.h
+++ b/lib/Target/PowerPC/PPCTargetTransformInfo.h
@@ -70,6 +70,8 @@ public:
bool enableInterleavedAccessVectorization();
unsigned getNumberOfRegisters(bool Vector);
unsigned getRegisterBitWidth(bool Vector);
+ unsigned getCacheLineSize();
+ unsigned getPrefetchDistance();
unsigned getMaxInterleaveFactor(unsigned VF);
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
diff --git a/lib/Target/PowerPC/PPCVSXCopy.cpp b/lib/Target/PowerPC/PPCVSXCopy.cpp
index 782583ce3423..60f1ad5585ff 100644
--- a/lib/Target/PowerPC/PPCVSXCopy.cpp
+++ b/lib/Target/PowerPC/PPCVSXCopy.cpp
@@ -13,11 +13,11 @@
//
//===----------------------------------------------------------------------===//
-#include "PPCInstrInfo.h"
-#include "MCTargetDesc/PPCPredicates.h"
#include "PPC.h"
+#include "MCTargetDesc/PPCPredicates.h"
#include "PPCHazardRecognizers.h"
#include "PPCInstrBuilder.h"
+#include "PPCInstrInfo.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
@@ -28,7 +28,6 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index 6b19a2f7118b..7c22cb22bfa5 100644
--- a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -38,8 +38,14 @@
using namespace llvm;
-static cl::opt<bool> DisableVSXFMAMutate("disable-ppc-vsx-fma-mutation",
-cl::desc("Disable VSX FMA instruction mutation"), cl::Hidden);
+// Temporarily disable FMA mutation by default, since it doesn't handle
+// cross-basic-block intervals well.
+// See: http://lists.llvm.org/pipermail/llvm-dev/2016-February/095669.html
+// http://reviews.llvm.org/D17087
+static cl::opt<bool> DisableVSXFMAMutate(
+ "disable-ppc-vsx-fma-mutation",
+ cl::desc("Disable VSX FMA instruction mutation"), cl::init(true),
+ cl::Hidden);
#define DEBUG_TYPE "ppc-vsx-fma-mutate"
@@ -99,7 +105,7 @@ protected:
// %RM<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
// and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
- SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
+ SlotIndex FMAIdx = LIS->getInstructionIndex(*MI);
VNInfo *AddendValNo =
LIS->getInterval(MI->getOperand(1).getReg()).Query(FMAIdx).valueIn();
@@ -168,21 +174,32 @@ protected:
if (OtherUsers || KillsAddendSrc)
continue;
- // Find one of the product operands that is killed by this instruction.
+ // The transformation doesn't work well with things like:
+ // %vreg5 = A-form-op %vreg5, %vreg11, %vreg5;
+ // unless vreg11 is also a kill, so skip when it is not,
+ // and check operand 3 to see it is also a kill to handle the case:
+ // %vreg5 = A-form-op %vreg5, %vreg5, %vreg11;
+ // where vreg5 and vreg11 are both kills. This case would be skipped
+ // otherwise.
+ unsigned OldFMAReg = MI->getOperand(0).getReg();
+
+ // Find one of the product operands that is killed by this instruction.
unsigned KilledProdOp = 0, OtherProdOp = 0;
- if (LIS->getInterval(MI->getOperand(2).getReg())
- .Query(FMAIdx).isKill()) {
+ unsigned Reg2 = MI->getOperand(2).getReg();
+ unsigned Reg3 = MI->getOperand(3).getReg();
+ if (LIS->getInterval(Reg2).Query(FMAIdx).isKill()
+ && Reg2 != OldFMAReg) {
KilledProdOp = 2;
OtherProdOp = 3;
- } else if (LIS->getInterval(MI->getOperand(3).getReg())
- .Query(FMAIdx).isKill()) {
+ } else if (LIS->getInterval(Reg3).Query(FMAIdx).isKill()
+ && Reg3 != OldFMAReg) {
KilledProdOp = 3;
OtherProdOp = 2;
}
- // If there are no killed product operands, then this transformation is
- // likely not profitable.
+ // If there are no usable killed product operands, then this
+ // transformation is likely not profitable.
if (!KilledProdOp)
continue;
@@ -212,14 +229,6 @@ protected:
bool KilledProdRegUndef = MI->getOperand(KilledProdOp).isUndef();
bool OtherProdRegUndef = MI->getOperand(OtherProdOp).isUndef();
- unsigned OldFMAReg = MI->getOperand(0).getReg();
-
- // The transformation doesn't work well with things like:
- // %vreg5 = A-form-op %vreg5, %vreg11, %vreg5;
- // so leave such things alone.
- if (OldFMAReg == KilledProdReg)
- continue;
-
// If there isn't a class that fits, we can't perform the transform.
// This is needed for correctness with a mixture of VSX and Altivec
// instructions to make sure that a low VSX register is not assigned to
@@ -236,23 +245,33 @@ protected:
MI->getOperand(0).setReg(KilledProdReg);
MI->getOperand(1).setReg(KilledProdReg);
MI->getOperand(3).setReg(AddendSrcReg);
- MI->getOperand(2).setReg(OtherProdReg);
MI->getOperand(0).setSubReg(KilledProdSubReg);
MI->getOperand(1).setSubReg(KilledProdSubReg);
MI->getOperand(3).setSubReg(AddSubReg);
- MI->getOperand(2).setSubReg(OtherProdSubReg);
MI->getOperand(1).setIsKill(KilledProdRegKill);
MI->getOperand(3).setIsKill(AddRegKill);
- MI->getOperand(2).setIsKill(OtherProdRegKill);
MI->getOperand(1).setIsUndef(KilledProdRegUndef);
MI->getOperand(3).setIsUndef(AddRegUndef);
- MI->getOperand(2).setIsUndef(OtherProdRegUndef);
MI->setDesc(TII->get(AltOpc));
+ // If the addend is also a multiplicand, replace it with the addend
+ // source in both places.
+ if (OtherProdReg == AddendMI->getOperand(0).getReg()) {
+ MI->getOperand(2).setReg(AddendSrcReg);
+ MI->getOperand(2).setSubReg(AddSubReg);
+ MI->getOperand(2).setIsKill(AddRegKill);
+ MI->getOperand(2).setIsUndef(AddRegUndef);
+ } else {
+ MI->getOperand(2).setReg(OtherProdReg);
+ MI->getOperand(2).setSubReg(OtherProdSubReg);
+ MI->getOperand(2).setIsKill(OtherProdRegKill);
+ MI->getOperand(2).setIsUndef(OtherProdRegUndef);
+ }
+
DEBUG(dbgs() << " -> " << *MI);
// The killed product operand was killed here, so we can reuse it now
@@ -312,7 +331,7 @@ protected:
// Remove the (now unused) copy.
DEBUG(dbgs() << " removing: " << *AddendMI << '\n');
- LIS->RemoveMachineInstrFromMaps(AddendMI);
+ LIS->RemoveMachineInstrFromMaps(*AddendMI);
AddendMI->eraseFromParent();
Changed = true;
@@ -323,6 +342,9 @@ protected:
public:
bool runOnMachineFunction(MachineFunction &MF) override {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
// If we don't have VSX then go ahead and return without doing
// anything.
const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
index 27c540fcf211..d53c8e38254f 100644
--- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
+++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
@@ -191,6 +191,9 @@ private:
public:
// Main entry point for this pass.
bool runOnMachineFunction(MachineFunction &MF) override {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
// If we don't have VSX on the subtarget, don't do anything.
const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
if (!STI.hasVSX())
@@ -404,9 +407,9 @@ bool PPCVSXSwapRemoval::gatherVectorInstructions() {
case PPC::VSPLTB:
case PPC::VSPLTH:
case PPC::VSPLTW:
+ case PPC::XXSPLTW:
// Splats are lane-sensitive, but we can use special handling
- // to adjust the source lane for the splat. This is not yet
- // implemented. When it is, we need to uncomment the following:
+ // to adjust the source lane for the splat.
SwapVector[VecIdx].IsSwappable = 1;
SwapVector[VecIdx].SpecialHandling = SHValues::SH_SPLAT;
break;
@@ -512,7 +515,6 @@ bool PPCVSXSwapRemoval::gatherVectorInstructions() {
// permute control vectors (for shift values 1, 2, 3). However,
// VPERM has a more restrictive register class.
case PPC::XXSLDWI:
- case PPC::XXSPLTW:
break;
}
}
@@ -690,6 +692,7 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
unsigned UseReg = MI->getOperand(0).getReg();
MachineInstr *DefMI = MRI->getVRegDef(UseReg);
+ unsigned DefReg = DefMI->getOperand(0).getReg();
int DefIdx = SwapMap[DefMI];
if (!SwapVector[DefIdx].IsSwap || SwapVector[DefIdx].IsLoad ||
@@ -705,6 +708,25 @@ void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
DEBUG(MI->dump());
DEBUG(dbgs() << "\n");
}
+
+ // Ensure all uses of the register defined by DefMI feed store
+ // instructions
+ for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) {
+ int UseIdx = SwapMap[&UseMI];
+
+ if (SwapVector[UseIdx].VSEMI->getOpcode() != MI->getOpcode()) {
+ SwapVector[Repr].WebRejected = 1;
+
+ DEBUG(dbgs() <<
+ format("Web %d rejected for swap not feeding only stores\n",
+ Repr));
+ DEBUG(dbgs() << " def " << " : ");
+ DEBUG(DefMI->dump());
+ DEBUG(dbgs() << " use " << UseIdx << ": ");
+ DEBUG(SwapVector[UseIdx].VSEMI->dump());
+ DEBUG(dbgs() << "\n");
+ }
+ }
}
}
@@ -803,12 +825,21 @@ void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
llvm_unreachable("Unexpected splat opcode");
case PPC::VSPLTB: NElts = 16; break;
case PPC::VSPLTH: NElts = 8; break;
- case PPC::VSPLTW: NElts = 4; break;
+ case PPC::VSPLTW:
+ case PPC::XXSPLTW: NElts = 4; break;
}
- unsigned EltNo = MI->getOperand(1).getImm();
+ unsigned EltNo;
+ if (MI->getOpcode() == PPC::XXSPLTW)
+ EltNo = MI->getOperand(2).getImm();
+ else
+ EltNo = MI->getOperand(1).getImm();
+
EltNo = (EltNo + NElts / 2) % NElts;
- MI->getOperand(1).setImm(EltNo);
+ if (MI->getOpcode() == PPC::XXSPLTW)
+ MI->getOperand(2).setImm(EltNo);
+ else
+ MI->getOperand(1).setImm(EltNo);
DEBUG(dbgs() << " Into: ");
DEBUG(MI->dump());
diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt
index 01233ae4f578..f1d4ca7b7fab 100644
--- a/lib/Target/PowerPC/README.txt
+++ b/lib/Target/PowerPC/README.txt
@@ -589,6 +589,17 @@ entry:
%tmp34 = zext i1 %tmp3 to i32 ; <i32> [#uses=1]
ret i32 %tmp34
}
+
+//===---------------------------------------------------------------------===//
+for the following code:
+
+void foo (float *__restrict__ a, int *__restrict__ b, int n) {
+ a[n] = b[n] * 2.321;
+}
+
+we load b[n] to GPR, then move it VSX register and convert it float. We should
+use vsx scalar integer load instructions to avoid direct moves
+
//===----------------------------------------------------------------------===//
; RUN: llvm-as < %s | llc -march=ppc32 | not grep fneg
diff --git a/lib/Target/PowerPC/README_P9.txt b/lib/Target/PowerPC/README_P9.txt
new file mode 100644
index 000000000000..d56f7cca7b21
--- /dev/null
+++ b/lib/Target/PowerPC/README_P9.txt
@@ -0,0 +1,605 @@
+//===- README_P9.txt - Notes for improving Power9 code gen ----------------===//
+
+TODO: Instructions Need Implement Instrinstics or Map to LLVM IR
+
+Altivec:
+- Vector Compare Not Equal (Zero):
+ vcmpneb(.) vcmpneh(.) vcmpnew(.)
+ vcmpnezb(.) vcmpnezh(.) vcmpnezw(.)
+ . Same as other VCMP*, use VCMP/VCMPo form (support intrinsic)
+
+- Vector Extract Unsigned: vextractub vextractuh vextractuw vextractd
+ . Don't use llvm extractelement because they have different semantics
+ . Use instrinstics:
+ (set v2i64:$vD, (int_ppc_altivec_vextractub v16i8:$vA, imm:$UIMM))
+ (set v2i64:$vD, (int_ppc_altivec_vextractuh v8i16:$vA, imm:$UIMM))
+ (set v2i64:$vD, (int_ppc_altivec_vextractuw v4i32:$vA, imm:$UIMM))
+ (set v2i64:$vD, (int_ppc_altivec_vextractd v2i64:$vA, imm:$UIMM))
+
+- Vector Extract Unsigned Byte Left/Right-Indexed:
+ vextublx vextubrx vextuhlx vextuhrx vextuwlx vextuwrx
+ . Use instrinstics:
+ // Left-Indexed
+ (set i64:$rD, (int_ppc_altivec_vextublx i64:$rA, v16i8:$vB))
+ (set i64:$rD, (int_ppc_altivec_vextuhlx i64:$rA, v8i16:$vB))
+ (set i64:$rD, (int_ppc_altivec_vextuwlx i64:$rA, v4i32:$vB))
+
+ // Right-Indexed
+ (set i64:$rD, (int_ppc_altivec_vextubrx i64:$rA, v16i8:$vB))
+ (set i64:$rD, (int_ppc_altivec_vextuhrx i64:$rA, v8i16:$vB))
+ (set i64:$rD, (int_ppc_altivec_vextuwrx i64:$rA, v4i32:$vB))
+
+- Vector Insert Element Instructions: vinsertb vinsertd vinserth vinsertw
+ (set v16i8:$vD, (int_ppc_altivec_vinsertb v16i8:$vA, imm:$UIMM))
+ (set v8i16:$vD, (int_ppc_altivec_vinsertd v8i16:$vA, imm:$UIMM))
+ (set v4i32:$vD, (int_ppc_altivec_vinserth v4i32:$vA, imm:$UIMM))
+ (set v2i64:$vD, (int_ppc_altivec_vinsertw v2i64:$vA, imm:$UIMM))
+
+- Vector Count Leading/Trailing Zero LSB. Result is placed into GPR[rD]:
+ vclzlsbb vctzlsbb
+ . Use intrinsic:
+ (set i64:$rD, (int_ppc_altivec_vclzlsbb v16i8:$vB))
+ (set i64:$rD, (int_ppc_altivec_vctzlsbb v16i8:$vB))
+
+- Vector Count Trailing Zeros: vctzb vctzh vctzw vctzd
+ . Map to llvm cttz
+ (set v16i8:$vD, (cttz v16i8:$vB)) // vctzb
+ (set v8i16:$vD, (cttz v8i16:$vB)) // vctzh
+ (set v4i32:$vD, (cttz v4i32:$vB)) // vctzw
+ (set v2i64:$vD, (cttz v2i64:$vB)) // vctzd
+
+- Vector Extend Sign: vextsb2w vextsh2w vextsb2d vextsh2d vextsw2d
+ . vextsb2w:
+ (set v4i32:$vD, (sext v4i8:$vB))
+
+ // PowerISA_V3.0:
+ do i = 0 to 3
+ VR[VRT].word[i] ← EXTS32(VR[VRB].word[i].byte[3])
+ end
+
+ . vextsh2w:
+ (set v4i32:$vD, (sext v4i16:$vB))
+
+ // PowerISA_V3.0:
+ do i = 0 to 3
+ VR[VRT].word[i] ← EXTS32(VR[VRB].word[i].hword[1])
+ end
+
+ . vextsb2d
+ (set v2i64:$vD, (sext v2i8:$vB))
+
+ // PowerISA_V3.0:
+ do i = 0 to 1
+ VR[VRT].dword[i] ← EXTS64(VR[VRB].dword[i].byte[7])
+ end
+
+ . vextsh2d
+ (set v2i64:$vD, (sext v2i16:$vB))
+
+ // PowerISA_V3.0:
+ do i = 0 to 1
+ VR[VRT].dword[i] ← EXTS64(VR[VRB].dword[i].hword[3])
+ end
+
+ . vextsw2d
+ (set v2i64:$vD, (sext v2i32:$vB))
+
+ // PowerISA_V3.0:
+ do i = 0 to 1
+ VR[VRT].dword[i] ← EXTS64(VR[VRB].dword[i].word[1])
+ end
+
+- Vector Integer Negate: vnegw vnegd
+ . Map to llvm ineg
+ (set v4i32:$rT, (ineg v4i32:$rA)) // vnegw
+ (set v2i64:$rT, (ineg v2i64:$rA)) // vnegd
+
+- Vector Parity Byte: vprtybw vprtybd vprtybq
+ . Use intrinsic:
+ (set v4i32:$rD, (int_ppc_altivec_vprtybw v4i32:$vB))
+ (set v2i64:$rD, (int_ppc_altivec_vprtybd v2i64:$vB))
+ (set v1i128:$rD, (int_ppc_altivec_vprtybq v1i128:$vB))
+
+- Vector (Bit) Permute (Right-indexed):
+ . vbpermd: Same as "vbpermq", use VX1_Int_Ty2:
+ VX1_Int_Ty2<1484, "vbpermd", int_ppc_altivec_vbpermd, v2i64, v2i64>;
+
+ . vpermr: use VA1a_Int_Ty3
+ VA1a_Int_Ty3<59, "vpermr", int_ppc_altivec_vpermr, v16i8, v16i8, v16i8>;
+
+- Vector Rotate Left Mask/Mask-Insert: vrlwnm vrlwmi vrldnm vrldmi
+ . Use intrinsic:
+ VX1_Int_Ty<389, "vrlwnm", int_ppc_altivec_vrlwnm, v4i32>;
+ VX1_Int_Ty<133, "vrlwmi", int_ppc_altivec_vrlwmi, v4i32>;
+ VX1_Int_Ty<453, "vrldnm", int_ppc_altivec_vrldnm, v2i64>;
+ VX1_Int_Ty<197, "vrldmi", int_ppc_altivec_vrldmi, v2i64>;
+
+- Vector Shift Left/Right: vslv vsrv
+ . Use intrinsic, don't map to llvm shl and lshr, because they have different
+ semantics, e.g. vslv:
+
+ do i = 0 to 15
+ sh ← VR[VRB].byte[i].bit[5:7]
+ VR[VRT].byte[i] ← src.byte[i:i+1].bit[sh:sh+7]
+ end
+
+ VR[VRT].byte[i] is composed of 2 bytes from src.byte[i:i+1]
+
+ . VX1_Int_Ty<1860, "vslv", int_ppc_altivec_vslv, v16i8>;
+ VX1_Int_Ty<1796, "vsrv", int_ppc_altivec_vsrv, v16i8>;
+
+- Vector Multiply-by-10 (& Write Carry) Unsigned Quadword:
+ vmul10uq vmul10cuq
+ . Use intrinsic:
+ VX1_Int_Ty<513, "vmul10uq", int_ppc_altivec_vmul10uq, v1i128>;
+ VX1_Int_Ty< 1, "vmul10cuq", int_ppc_altivec_vmul10cuq, v1i128>;
+
+- Vector Multiply-by-10 Extended (& Write Carry) Unsigned Quadword:
+ vmul10euq vmul10ecuq
+ . Use intrinsic:
+ VX1_Int_Ty<577, "vmul10euq", int_ppc_altivec_vmul10euq, v1i128>;
+ VX1_Int_Ty< 65, "vmul10ecuq", int_ppc_altivec_vmul10ecuq, v1i128>;
+
+- Decimal Convert From/to National/Zoned/Signed-QWord:
+ bcdcfn. bcdcfz. bcdctn. bcdctz. bcdcfsq. bcdctsq.
+ . Use instrinstics:
+ (set v1i128:$vD, (int_ppc_altivec_bcdcfno v1i128:$vB, i1:$PS))
+ (set v1i128:$vD, (int_ppc_altivec_bcdcfzo v1i128:$vB, i1:$PS))
+ (set v1i128:$vD, (int_ppc_altivec_bcdctno v1i128:$vB))
+ (set v1i128:$vD, (int_ppc_altivec_bcdctzo v1i128:$vB, i1:$PS))
+ (set v1i128:$vD, (int_ppc_altivec_bcdcfsqo v1i128:$vB, i1:$PS))
+ (set v1i128:$vD, (int_ppc_altivec_bcdctsqo v1i128:$vB))
+
+- Decimal Copy-Sign/Set-Sign: bcdcpsgn. bcdsetsgn.
+ . Use instrinstics:
+ (set v1i128:$vD, (int_ppc_altivec_bcdcpsgno v1i128:$vA, v1i128:$vB))
+ (set v1i128:$vD, (int_ppc_altivec_bcdsetsgno v1i128:$vB, i1:$PS))
+
+- Decimal Shift/Unsigned-Shift/Shift-and-Round: bcds. bcdus. bcdsr.
+ . Use instrinstics:
+ (set v1i128:$vD, (int_ppc_altivec_bcdso v1i128:$vA, v1i128:$vB, i1:$PS))
+ (set v1i128:$vD, (int_ppc_altivec_bcduso v1i128:$vA, v1i128:$vB))
+ (set v1i128:$vD, (int_ppc_altivec_bcdsro v1i128:$vA, v1i128:$vB, i1:$PS))
+
+ . Note! Their VA is accessed only 1 byte, i.e. VA.byte[7]
+
+- Decimal (Unsigned) Truncate: bcdtrunc. bcdutrunc.
+ . Use instrinstics:
+ (set v1i128:$vD, (int_ppc_altivec_bcdso v1i128:$vA, v1i128:$vB, i1:$PS))
+ (set v1i128:$vD, (int_ppc_altivec_bcduso v1i128:$vA, v1i128:$vB))
+
+ . Note! Their VA is accessed only 2 byte, i.e. VA.hword[3] (VA.bit[48:63])
+
+VSX:
+- QP Copy Sign: xscpsgnqp
+ . Similar to xscpsgndp
+ . (set f128:$vT, (fcopysign f128:$vB, f128:$vA)
+
+- QP Absolute/Negative-Absolute/Negate: xsabsqp xsnabsqp xsnegqp
+ . Similar to xsabsdp/xsnabsdp/xsnegdp
+ . (set f128:$vT, (fabs f128:$vB)) // xsabsqp
+ (set f128:$vT, (fneg (fabs f128:$vB))) // xsnabsqp
+ (set f128:$vT, (fneg f128:$vB)) // xsnegqp
+
+- QP Add/Divide/Multiply/Subtract/Square-Root:
+ xsaddqp xsdivqp xsmulqp xssubqp xssqrtqp
+ . Similar to xsadddp
+ . isCommutable = 1
+ (set f128:$vT, (fadd f128:$vA, f128:$vB)) // xsaddqp
+ (set f128:$vT, (fmul f128:$vA, f128:$vB)) // xsmulqp
+
+ . isCommutable = 0
+ (set f128:$vT, (fdiv f128:$vA, f128:$vB)) // xsdivqp
+ (set f128:$vT, (fsub f128:$vA, f128:$vB)) // xssubqp
+ (set f128:$vT, (fsqrt f128:$vB))) // xssqrtqp
+
+- Round to Odd of QP Add/Divide/Multiply/Subtract/Square-Root:
+ xsaddqpo xsdivqpo xsmulqpo xssubqpo xssqrtqpo
+ . Similar to xsrsqrtedp??
+ def XSRSQRTEDP : XX2Form<60, 74,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xsrsqrtedp $XT, $XB", IIC_VecFP,
+ [(set f64:$XT, (PPCfrsqrte f64:$XB))]>;
+
+ . Define DAG Node in PPCInstrInfo.td:
+ def PPCfaddrto: SDNode<"PPCISD::FADDRTO", SDTFPBinOp, []>;
+ def PPCfdivrto: SDNode<"PPCISD::FDIVRTO", SDTFPBinOp, []>;
+ def PPCfmulrto: SDNode<"PPCISD::FMULRTO", SDTFPBinOp, []>;
+ def PPCfsubrto: SDNode<"PPCISD::FSUBRTO", SDTFPBinOp, []>;
+ def PPCfsqrtrto: SDNode<"PPCISD::FSQRTRTO", SDTFPUnaryOp, []>;
+
+ DAG patterns of each instruction (PPCInstrVSX.td):
+ . isCommutable = 1
+ (set f128:$vT, (PPCfaddrto f128:$vA, f128:$vB)) // xsaddqpo
+ (set f128:$vT, (PPCfmulrto f128:$vA, f128:$vB)) // xsmulqpo
+
+ . isCommutable = 0
+ (set f128:$vT, (PPCfdivrto f128:$vA, f128:$vB)) // xsdivqpo
+ (set f128:$vT, (PPCfsubrto f128:$vA, f128:$vB)) // xssubqpo
+ (set f128:$vT, (PPCfsqrtrto f128:$vB)) // xssqrtqpo
+
+- QP (Negative) Multiply-{Add/Subtract}: xsmaddqp xsmsubqp xsnmaddqp xsnmsubqp
+ . Ref: xsmaddadp/xsmsubadp/xsnmaddadp/xsnmsubadp
+
+ . isCommutable = 1
+ // xsmaddqp
+ [(set f128:$vT, (fma f128:$vA, f128:$vB, f128:$vTi))]>,
+ RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">,
+ AltVSXFMARel;
+
+ // xsmsubqp
+ [(set f128:$vT, (fma f128:$vA, f128:$vB, (fneg f128:$vTi)))]>,
+ RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">,
+ AltVSXFMARel;
+
+ // xsnmaddqp
+ [(set f128:$vT, (fneg (fma f128:$vA, f128:$vB, f128:$vTi)))]>,
+ RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">,
+ AltVSXFMARel;
+
+ // xsnmsubqp
+ [(set f128:$vT, (fneg (fma f128:$vA, f128:$vB, (fneg f128:$vTi))))]>,
+ RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">,
+ AltVSXFMARel;
+
+- Round to Odd of QP (Negative) Multiply-{Add/Subtract}:
+ xsmaddqpo xsmsubqpo xsnmaddqpo xsnmsubqpo
+ . Similar to xsrsqrtedp??
+
+ . Define DAG Node in PPCInstrInfo.td:
+ def PPCfmarto: SDNode<"PPCISD::FMARTO", SDTFPTernaryOp, []>;
+
+ It looks like we only need to define "PPCfmarto" for these instructions,
+ because according to PowerISA_V3.0, these instructions perform RTO on
+ fma's result:
+ xsmaddqp(o)
+ v ← bfp_MULTIPLY_ADD(src1, src3, src2)
+ rnd ← bfp_ROUND_TO_BFP128(RO, FPSCR.RN, v)
+ result ← bfp_CONVERT_TO_BFP128(rnd)
+
+ xsmsubqp(o)
+ v ← bfp_MULTIPLY_ADD(src1, src3, bfp_NEGATE(src2))
+ rnd ← bfp_ROUND_TO_BFP128(RO, FPSCR.RN, v)
+ result ← bfp_CONVERT_TO_BFP128(rnd)
+
+ xsnmaddqp(o)
+ v ← bfp_MULTIPLY_ADD(src1,src3,src2)
+ rnd ← bfp_NEGATE(bfp_ROUND_TO_BFP128(RO, FPSCR.RN, v))
+ result ← bfp_CONVERT_TO_BFP128(rnd)
+
+ xsnmsubqp(o)
+ v ← bfp_MULTIPLY_ADD(src1, src3, bfp_NEGATE(src2))
+ rnd ← bfp_NEGATE(bfp_ROUND_TO_BFP128(RO, FPSCR.RN, v))
+ result ← bfp_CONVERT_TO_BFP128(rnd)
+
+ DAG patterns of each instruction (PPCInstrVSX.td):
+ . isCommutable = 1
+ // xsmaddqpo
+ [(set f128:$vT, (PPCfmarto f128:$vA, f128:$vB, f128:$vTi))]>,
+ RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">,
+ AltVSXFMARel;
+
+ // xsmsubqpo
+ [(set f128:$vT, (PPCfmarto f128:$vA, f128:$vB, (fneg f128:$vTi)))]>,
+ RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">,
+ AltVSXFMARel;
+
+ // xsnmaddqpo
+ [(set f128:$vT, (fneg (PPCfmarto f128:$vA, f128:$vB, f128:$vTi)))]>,
+ RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">,
+ AltVSXFMARel;
+
+ // xsnmsubqpo
+ [(set f128:$vT, (fneg (PPCfmarto f128:$vA, f128:$vB, (fneg f128:$vTi))))]>,
+ RegConstraint<"$vTi = $vT">, NoEncode<"$vTi">,
+ AltVSXFMARel;
+
+- QP Compare Ordered/Unordered: xscmpoqp xscmpuqp
+ . ref: XSCMPUDP
+ def XSCMPUDP : XX3Form_1<60, 35,
+ (outs crrc:$crD), (ins vsfrc:$XA, vsfrc:$XB),
+ "xscmpudp $crD, $XA, $XB", IIC_FPCompare, []>;
+
+ . No SDAG, intrinsic, builtin are required??
+ Or llvm fcmp order/unorder compare??
+
+- DP/QP Compare Exponents: xscmpexpdp xscmpexpqp
+ . No SDAG, intrinsic, builtin are required?
+
+- DP Compare ==, >=, >, !=: xscmpeqdp xscmpgedp xscmpgtdp xscmpnedp
+ . I checked existing instruction "XSCMPUDP". They are different in target
+ register. "XSCMPUDP" write to CR field, xscmp*dp write to VSX register
+
+ . Use instrinsic:
+ (set i128:$XT, (int_ppc_vsx_xscmpeqdp f64:$XA, f64:$XB))
+ (set i128:$XT, (int_ppc_vsx_xscmpgedp f64:$XA, f64:$XB))
+ (set i128:$XT, (int_ppc_vsx_xscmpgtdp f64:$XA, f64:$XB))
+ (set i128:$XT, (int_ppc_vsx_xscmpnedp f64:$XA, f64:$XB))
+
+- Vector Compare Not Equal: xvcmpnedp xvcmpnedp. xvcmpnesp xvcmpnesp.
+ . Similar to xvcmpeqdp:
+ defm XVCMPEQDP : XX3Form_Rcr<60, 99,
+ "xvcmpeqdp", "$XT, $XA, $XB", IIC_VecFPCompare,
+ int_ppc_vsx_xvcmpeqdp, v2i64, v2f64>;
+
+ . So we should use "XX3Form_Rcr" to implement instrinsic
+
+- Convert DP -> QP: xscvdpqp
+ . Similar to XSCVDPSP:
+ def XSCVDPSP : XX2Form<60, 265,
+ (outs vsfrc:$XT), (ins vsfrc:$XB),
+ "xscvdpsp $XT, $XB", IIC_VecFP, []>;
+ . So, No SDAG, intrinsic, builtin are required??
+
+- Round & Convert QP -> DP (dword[1] is set to zero): xscvqpdp xscvqpdpo
+ . Similar to XSCVDPSP
+ . No SDAG, intrinsic, builtin are required??
+
+- Truncate & Convert QP -> (Un)Signed (D)Word (dword[1] is set to zero):
+ xscvqpsdz xscvqpswz xscvqpudz xscvqpuwz
+ . According to PowerISA_V3.0, these are similar to "XSCVDPSXDS", "XSCVDPSXWS",
+ "XSCVDPUXDS", "XSCVDPUXWS"
+
+ . DAG patterns:
+ (set f128:$XT, (PPCfctidz f128:$XB)) // xscvqpsdz
+ (set f128:$XT, (PPCfctiwz f128:$XB)) // xscvqpswz
+ (set f128:$XT, (PPCfctiduz f128:$XB)) // xscvqpudz
+ (set f128:$XT, (PPCfctiwuz f128:$XB)) // xscvqpuwz
+
+- Convert (Un)Signed DWord -> QP: xscvsdqp xscvudqp
+ . Similar to XSCVSXDSP
+ . (set f128:$XT, (PPCfcfids f64:$XB)) // xscvsdqp
+ (set f128:$XT, (PPCfcfidus f64:$XB)) // xscvudqp
+
+- (Round &) Convert DP <-> HP: xscvdphp xscvhpdp
+ . Similar to XSCVDPSP
+ . No SDAG, intrinsic, builtin are required??
+
+- Vector HP -> SP: xvcvhpsp xvcvsphp
+ . Similar to XVCVDPSP:
+ def XVCVDPSP : XX2Form<60, 393,
+ (outs vsrc:$XT), (ins vsrc:$XB),
+ "xvcvdpsp $XT, $XB", IIC_VecFP, []>;
+ . No SDAG, intrinsic, builtin are required??
+
+- Round to Quad-Precision Integer: xsrqpi xsrqpix
+ . These are combination of "XSRDPI", "XSRDPIC", "XSRDPIM", .., because you
+ need to assign rounding mode in instruction
+ . Provide builtin?
+ (set f128:$vT, (int_ppc_vsx_xsrqpi f128:$vB))
+ (set f128:$vT, (int_ppc_vsx_xsrqpix f128:$vB))
+
+- Round Quad-Precision to Double-Extended Precision (fp80): xsrqpxp
+ . Provide builtin?
+ (set f128:$vT, (int_ppc_vsx_xsrqpxp f128:$vB))
+
+Fixed Point Facility:
+
+- Exploit cmprb and cmpeqb (perhaps for something like
+ isalpha/isdigit/isupper/islower and isspace respectivelly). This can
+ perhaps be done through a builtin.
+
+- Provide testing for cnttz[dw]
+- Insert Exponent DP/QP: xsiexpdp xsiexpqp
+ . Use intrinsic?
+ . xsiexpdp:
+ // Note: rA and rB are the unsigned integer value.
+ (set f128:$XT, (int_ppc_vsx_xsiexpdp i64:$rA, i64:$rB))
+
+ . xsiexpqp:
+ (set f128:$vT, (int_ppc_vsx_xsiexpqp f128:$vA, f64:$vB))
+
+- Extract Exponent/Significand DP/QP: xsxexpdp xsxsigdp xsxexpqp xsxsigqp
+ . Use intrinsic?
+ . (set i64:$rT, (int_ppc_vsx_xsxexpdp f64$XB)) // xsxexpdp
+ (set i64:$rT, (int_ppc_vsx_xsxsigdp f64$XB)) // xsxsigdp
+ (set f128:$vT, (int_ppc_vsx_xsxexpqp f128$vB)) // xsxexpqp
+ (set f128:$vT, (int_ppc_vsx_xsxsigqp f128$vB)) // xsxsigqp
+
+- Vector Insert Word: xxinsertw
+ - Useful for inserting f32/i32 elements into vectors (the element to be
+ inserted needs to be prepared)
+ . Note: llvm has insertelem in "Vector Operations"
+ ; yields <n x <ty>>
+ <result> = insertelement <n x <ty>> <val>, <ty> <elt>, <ty2> <idx>
+
+ But how to map to it??
+ [(set v1f128:$XT, (insertelement v1f128:$XTi, f128:$XB, i4:$UIMM))]>,
+ RegConstraint<"$XTi = $XT">, NoEncode<"$XTi">,
+
+ . Or use intrinsic?
+ (set v1f128:$XT, (int_ppc_vsx_xxinsertw v1f128:$XTi, f128:$XB, i4:$UIMM))
+
+- Vector Extract Unsigned Word: xxextractuw
+ - Not useful for extraction of f32 from v4f32 (the current pattern is better -
+ shift->convert)
+ - It is useful for (uint_to_fp (vector_extract v4i32, N))
+ - Unfortunately, it can't be used for (sint_to_fp (vector_extract v4i32, N))
+ . Note: llvm has extractelement in "Vector Operations"
+ ; yields <ty>
+ <result> = extractelement <n x <ty>> <val>, <ty2> <idx>
+
+ How to map to it??
+ [(set f128:$XT, (extractelement v1f128:$XB, i4:$UIMM))]
+
+ . Or use intrinsic?
+ (set f128:$XT, (int_ppc_vsx_xxextractuw v1f128:$XB, i4:$UIMM))
+
+- Vector Insert Exponent DP/SP: xviexpdp xviexpsp
+ . Use intrinsic
+ (set v2f64:$XT, (int_ppc_vsx_xviexpdp v2f64:$XA, v2f64:$XB))
+ (set v4f32:$XT, (int_ppc_vsx_xviexpsp v4f32:$XA, v4f32:$XB))
+
+- Vector Extract Exponent/Significand DP/SP: xvxexpdp xvxexpsp xvxsigdp xvxsigsp
+ . Use intrinsic
+ (set v2f64:$XT, (int_ppc_vsx_xvxexpdp v2f64:$XB))
+ (set v4f32:$XT, (int_ppc_vsx_xvxexpsp v4f32:$XB))
+ (set v2f64:$XT, (int_ppc_vsx_xvxsigdp v2f64:$XB))
+ (set v4f32:$XT, (int_ppc_vsx_xvxsigsp v4f32:$XB))
+
+- Test Data Class SP/DP/QP: xststdcsp xststdcdp xststdcqp
+ . No SDAG, intrinsic, builtin are required?
+ Because it seems that we have no way to map BF field?
+
+ Instruction Form: [PO T XO B XO BX TX]
+ Asm: xststd* BF,XB,DCMX
+
+ BF is an index to CR register field.
+
+- Vector Test Data Class SP/DP: xvtstdcsp xvtstdcdp
+ . Use intrinsic
+ (set v4f32:$XT, (int_ppc_vsx_xvtstdcsp v4f32:$XB, i7:$DCMX))
+ (set v2f64:$XT, (int_ppc_vsx_xvtstdcdp v2f64:$XB, i7:$DCMX))
+
+- Maximum/Minimum Type-C/Type-J DP: xsmaxcdp xsmaxjdp xsmincdp xsminjdp
+ . PowerISA_V3.0:
+ "xsmaxcdp can be used to implement the C/C++/Java conditional operation
+ (x>y)?x:y for single-precision and double-precision arguments."
+
+ Note! c type and j type have different behavior when:
+ 1. Either input is NaN
+ 2. Both input are +-Infinity, +-Zero
+
+ . dtype map to llvm fmaxnum/fminnum
+ jtype use intrinsic
+
+ . xsmaxcdp xsmincdp
+ (set f64:$XT, (fmaxnum f64:$XA, f64:$XB))
+ (set f64:$XT, (fminnum f64:$XA, f64:$XB))
+
+ . xsmaxjdp xsminjdp
+ (set f64:$XT, (int_ppc_vsx_xsmaxjdp f64:$XA, f64:$XB))
+ (set f64:$XT, (int_ppc_vsx_xsminjdp f64:$XA, f64:$XB))
+
+- Vector Byte-Reverse H/W/D/Q Word: xxbrh xxbrw xxbrd xxbrq
+ . Use intrinsic
+ (set v8i16:$XT, (int_ppc_vsx_xxbrh v8i16:$XB))
+ (set v4i32:$XT, (int_ppc_vsx_xxbrw v4i32:$XB))
+ (set v2i64:$XT, (int_ppc_vsx_xxbrd v2i64:$XB))
+ (set v1i128:$XT, (int_ppc_vsx_xxbrq v1i128:$XB))
+
+- Vector Permute: xxperm xxpermr
+ . I have checked "PPCxxswapd" in PPCInstrVSX.td, but they are different
+ . Use intrinsic
+ (set v16i8:$XT, (int_ppc_vsx_xxperm v16i8:$XA, v16i8:$XB))
+ (set v16i8:$XT, (int_ppc_vsx_xxpermr v16i8:$XA, v16i8:$XB))
+
+- Vector Splat Immediate Byte: xxspltib
+ . Similar to XXSPLTW:
+ def XXSPLTW : XX2Form_2<60, 164,
+ (outs vsrc:$XT), (ins vsrc:$XB, u2imm:$UIM),
+ "xxspltw $XT, $XB, $UIM", IIC_VecPerm, []>;
+
+ . No SDAG, intrinsic, builtin are required?
+
+- Load/Store Vector: lxv stxv
+ . Has likely SDAG match:
+ (set v?:$XT, (load ix16addr:$src))
+ (set v?:$XT, (store ix16addr:$dst))
+
+ . Need define ix16addr in PPCInstrInfo.td
+ ix16addr: 16-byte aligned, see "def memrix16" in PPCInstrInfo.td
+
+- Load/Store Vector Indexed: lxvx stxvx
+ . Has likely SDAG match:
+ (set v?:$XT, (load xoaddr:$src))
+ (set v?:$XT, (store xoaddr:$dst))
+
+- Load/Store DWord: lxsd stxsd
+ . Similar to lxsdx/stxsdx:
+ def LXSDX : XX1Form<31, 588,
+ (outs vsfrc:$XT), (ins memrr:$src),
+ "lxsdx $XT, $src", IIC_LdStLFD,
+ [(set f64:$XT, (load xoaddr:$src))]>;
+
+ . (set f64:$XT, (load ixaddr:$src))
+ (set f64:$XT, (store ixaddr:$dst))
+
+- Load/Store SP, with conversion from/to DP: lxssp stxssp
+ . Similar to lxsspx/stxsspx:
+ def LXSSPX : XX1Form<31, 524, (outs vssrc:$XT), (ins memrr:$src),
+ "lxsspx $XT, $src", IIC_LdStLFD,
+ [(set f32:$XT, (load xoaddr:$src))]>;
+
+ . (set f32:$XT, (load ixaddr:$src))
+ (set f32:$XT, (store ixaddr:$dst))
+
+- Load as Integer Byte/Halfword & Zero Indexed: lxsibzx lxsihzx
+ . Similar to lxsiwzx:
+ def LXSIWZX : XX1Form<31, 12, (outs vsfrc:$XT), (ins memrr:$src),
+ "lxsiwzx $XT, $src", IIC_LdStLFD,
+ [(set f64:$XT, (PPClfiwzx xoaddr:$src))]>;
+
+ . (set f64:$XT, (PPClfiwzx xoaddr:$src))
+
+- Store as Integer Byte/Halfword Indexed: stxsibx stxsihx
+ . Similar to stxsiwx:
+ def STXSIWX : XX1Form<31, 140, (outs), (ins vsfrc:$XT, memrr:$dst),
+ "stxsiwx $XT, $dst", IIC_LdStSTFD,
+ [(PPCstfiwx f64:$XT, xoaddr:$dst)]>;
+
+ . (PPCstfiwx f64:$XT, xoaddr:$dst)
+
+- Load Vector Halfword*8/Byte*16 Indexed: lxvh8x lxvb16x
+ . Similar to lxvd2x/lxvw4x:
+ def LXVD2X : XX1Form<31, 844,
+ (outs vsrc:$XT), (ins memrr:$src),
+ "lxvd2x $XT, $src", IIC_LdStLFD,
+ [(set v2f64:$XT, (int_ppc_vsx_lxvd2x xoaddr:$src))]>;
+
+ . (set v8i16:$XT, (int_ppc_vsx_lxvh8x xoaddr:$src))
+ (set v16i8:$XT, (int_ppc_vsx_lxvb16x xoaddr:$src))
+
+- Store Vector Halfword*8/Byte*16 Indexed: stxvh8x stxvb16x
+ . Similar to stxvd2x/stxvw4x:
+ def STXVD2X : XX1Form<31, 972,
+ (outs), (ins vsrc:$XT, memrr:$dst),
+ "stxvd2x $XT, $dst", IIC_LdStSTFD,
+ [(store v2f64:$XT, xoaddr:$dst)]>;
+
+ . (store v8i16:$XT, xoaddr:$dst)
+ (store v16i8:$XT, xoaddr:$dst)
+
+- Load/Store Vector (Left-justified) with Length: lxvl lxvll stxvl stxvll
+ . Likely needs an intrinsic
+ . (set v?:$XT, (int_ppc_vsx_lxvl xoaddr:$src))
+ (set v?:$XT, (int_ppc_vsx_lxvll xoaddr:$src))
+
+ . (int_ppc_vsx_stxvl xoaddr:$dst))
+ (int_ppc_vsx_stxvll xoaddr:$dst))
+
+- Load Vector Word & Splat Indexed: lxvwsx
+ . Likely needs an intrinsic
+ . (set v?:$XT, (int_ppc_vsx_lxvwsx xoaddr:$src))
+
+Atomic operations (l[dw]at, st[dw]at):
+- Provide custom lowering for common atomic operations to use these
+ instructions with the correct Function Code
+- Ensure the operands are in the correct register (i.e. RT+1, RT+2)
+- Provide builtins since not all FC's necessarily have an existing LLVM
+ atomic operation
+
+Load Doubleword Monitored (ldmx):
+- Investigate whether there are any uses for this. It seems to be related to
+ Garbage Collection so it isn't likely to be all that useful for most
+ languages we deal with.
+
+Move to CR from XER Extended (mcrxrx):
+- Is there a use for this in LLVM?
+
+Fixed Point Facility:
+
+- Copy-Paste Facility: copy copy_first cp_abort paste paste. paste_last
+ . Use instrinstics:
+ (int_ppc_copy_first i32:$rA, i32:$rB)
+ (int_ppc_copy i32:$rA, i32:$rB)
+
+ (int_ppc_paste i32:$rA, i32:$rB)
+ (int_ppc_paste_last i32:$rA, i32:$rB)
+
+ (int_cp_abort)
+
+- Message Synchronize: msgsync
+- SLB*: slbieg slbsync
+- stop
+ . No instrinstics
diff --git a/lib/Target/PowerPC/TargetInfo/Makefile b/lib/Target/PowerPC/TargetInfo/Makefile
deleted file mode 100644
index 2d0560d275f9..000000000000
--- a/lib/Target/PowerPC/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/PowerPC/TargetInfo/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMPowerPCInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-override CPPFLAGS += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/PowerPC/p9-instrs.txt b/lib/Target/PowerPC/p9-instrs.txt
new file mode 100644
index 000000000000..a70582aca398
--- /dev/null
+++ b/lib/Target/PowerPC/p9-instrs.txt
@@ -0,0 +1,442 @@
+Content:
+========
+. Remaining Instructions (Total 56 Instructions, include 2 unknow instructions)
+. Done (Total 155 Instructions: 101 VSX, 54 Altivec)
+
+//------------------------------------------------------------------------------
+//. Remaining Instructions
+//------------------------------------------------------------------------------
+GCC reference: https://sourceware.org/ml/binutils/2015-11/msg00071.html
+
+// Add PC Immediate Shifted DX-form p69
+[PO RT d1 d0 XO d2] addpcis RT,D
+ subpcis Rx,value = addpcis Rx,-value
+
+// 6.17.2 Decimal Integer Format Conversion Instructions
+
+// Decimal Convert From National VX-form p352
+[PO VRT EO VRB 1 PS XO] bcdcfn. VRT,VRB,PS
+
+// Decimal Convert From Zoned VX-form p353
+[PO VRT EO VRB 1 PS XO] bcdcfz. VRT,VRB,PS
+
+// Decimal Convert To National VX-form p354
+[PO VRT EO VRB 1 / XO] bcdctn. VRT,VRB
+
+// Decimal Convert To Zoned VX-form p355
+[PO VRT EO VRB 1 PS XO] bcdctz. VRT,VRB,PS
+
+// Decimal Convert From Signed Quadword VX-form p356
+[PO VRT EO VRB 1 PS XO] bcdcfsq. VRT,VRB,PS
+
+// Decimal Convert To Signed Quadword VX-form p356
+[PO VRT EO VRB 1 / XO] bcdctsq. VRT,VRB
+
+// 6.17.3 Decimal Integer Sign Manipulation Instructions
+
+// Decimal Copy Sign VX-form p358
+[PO VRT VRA VRB XO] bcdcpsgn. VRT,VRA,VRB
+
+// Decimal Set Sign VX-form p358
+[PO VRT EO VRB 1 PS XO] bcdsetsgn. VRT,VRB,PS
+
+// Decimal Shift VX-form p359
+[PO VRT VRA VRB 1 PS XO] bcds. VRT,VRA,VRB,PS
+
+// Decimal Unsigned Shift VX-form p360
+[PO VRT VRA VRB 1 / XO] bcdus. VRT,VRA,VRB
+
+// Decimal Shift and Round VX-form p361
+[PO VRT VRA VRB 1 PS XO] bcdsr. VRT,VRA,VRB,PS
+
+// 6.17.5 Decimal Integer Truncate Instructions
+
+// Decimal Truncate VX-form p362
+[PO VRT VRA VRB 1 PS XO] bcdtrunc. VRT,VRA,VRB,PS
+
+// Decimal Unsigned Truncate VX-form p363
+[PO VRT VRA VRB 1 / XO] bcdutrunc. VRT,VRA,VRB
+
+// 3.3.10.1 Character-Type Compare Instructions
+
+// Compare Ranged Byte X-form p87
+[PO BF / L RA RB XO /] cmprb BF,L,RA,RB
+
+// Compare Equal Byte X-form p88
+[PO BF // RA RB XO /] cmpeqb BF,RA,RB
+
+// 3.3.13 Fixed-Point Logical Instructions
+
+// Count Trailing Zeros Word X-form p95
+[PO RS RA /// XO Rc] cnttzw(.) RA,RS
+
+// 3.3.13.1 64-bit Fixed-Point Logical Instructions
+
+// Count Trailing Zeros Doubleword X-form p98
+[PO RS RA /// XO Rc] cnttzd(.) RA,RS
+
+// 4.4 Copy-Paste Facility
+
+// Copy X-form p858
+[PO /// L RA RB XO /] copy RA,RB,L
+ copy_first = copy RA, RB, 1
+// CP_Abort p860
+[PO /// /// /// XO /] cp_abort
+
+// Paste p859
+[PO /// L RA RB XO Rc] paste(.) RA,RB,L
+ paste_last = paste RA,RB,1
+
+// 3.3.9 Fixed-Point Arithmetic Instructions
+
+// Deliver A Random Number X-form p79
+[PO RT /// L /// XO /] darn RT,L
+
+// Multiply-Add High Doubleword VA-form p81
+[PO RT RA RB RC XO] maddhd RT,RA.RB,RC
+
+// Multiply-Add High Doubleword Unsigned VA-form p81
+[PO RT RA RB RC XO] maddhdu RT,RA.RB,RC
+
+// Multiply-Add Low Doubleword VA-form p81
+[PO RT RA RB RC XO] maddld RT,RA.RB,RC
+
+// Modulo Signed Word X-form p76
+[PO RT RA RB XO /] modsw RT,RA,RB
+
+// Modulo Unsigned Word X-form p76
+[PO RT RA RB XO /] moduw RT,RA,RB
+
+// Modulo Signed Doubleword X-form p84
+[PO RT RA RB XO /] modsd RT,RA,RB
+
+// Modulo Unsigned Doubleword X-form p84
+[PO RT RA RB XO /] modud RT,RA,RB
+
+
+// DFP Test Significance Immediate [Quad] X-form p204
+[PO BF / UIM FRB XO /] dtstsfi BF,UIM,FRB
+[PO BF / UIM FRBp XO /] dtstsfiq BF,UIM,FRBp
+
+// 3.3.14.2.1 64-bit Fixed-Point Shift Instructions
+
+// Extend-Sign Word and Shift Left Immediate XS-form p109
+[PO RS RA sh XO sh Rc] extswsli(.) RA,RS,SH
+
+// 4.5.1 Load Atomic
+
+// Load Word Atomic X-form p864
+[PO RT RA FC XO /] lwat RT,RA,FC
+
+// Load Doubleword Atomic X-form p864
+[PO RT RA FC XO /] ldat RT,RA,FC
+
+// 4.5.2 Store Atomic
+
+// Store Word Atomic X-form p866
+[PO RS RA FC XO /] stwat RS,RA,FC
+
+// Store Doubleword Atomic X-form p866
+[PO RS RA FC XO /] stdat RS,RA,FC
+
+// 3.3.2.1 64-bit Fixed-Point Load Instructions
+
+// Load Doubleword Monitored Indexed X-form p54
+[PO RT RA RB XO /] ldmx RT,RA,RB
+
+// 3.3.16 Move To/From Vector-Scalar Register Instructions
+
+// Move From VSR Lower Doubleword XX1-form p111
+[PO S RA /// XO SX] mfvsrld RA,XS
+
+// Move To VSR Double Doubleword XX1-form p114
+[PO T RA RB XO TX] mtvsrdd XT,RA,RB
+
+// Move To VSR Word & Splat XX1-form p115
+[PO T RA /// XO TX] mtvsrws XT,RA
+
+// Move to CR from XER Extended X-form p119
+[PO BF // /// /// XO /] mcrxrx BF
+
+// Set Boolean X-form p121
+[PO RT BFA // /// XO /] setb RT,BFA
+
+// Message Synchronize X-form p1126
+[PO /// /// /// XO /] msgsync
+
+// SLB Invalidate Entry Global X-form p1026
+[PO RS /// RB XO /] slbieg RS,RB
+
+// SLB Synchronize X-form p1031
+[PO /// /// /// XO /] slbsync
+
+// 3.3.2.1 Power-Saving Mode Instruction
+
+// stop XL-form p957
+[PO /// /// /// XO /] stop
+
+// 4.6.4 Wait Instruction
+// Wait X-form p880
+[PO /// WC /// /// XO /] wait
+
+// Unknow Instructions:
+urfid
+- gcc's implementation:
+ {"urfid", XL(19,306), 0xffffffff, POWER9, PPCNONE, {0}},
+ (4c 00 02 64|64 02 00 4c) urfid
+
+rmieg
+- gcc's implementation:
+ {"rmieg", X(31,882), XRTRA_MASK, POWER9, PPCNONE, {RB}},
+ (7c 00 f6 e4|e4 f6 00 7c) rmieg r30
+
+//------------------------------------------------------------------------------
+//. Done:
+//------------------------------------------------------------------------------
+
+//======================================
+"vsx instructions"
+
+//--------------------------------------
+"7.6.1.2.1 VSX Scalar Move Instructions"
+// VSX Scalar Quad-Precision Move Instructions
+
+// VSX Scalar Copy Sign Quad-Precision X-form p.553
+[PO VRT VRA VRB XO /] xscpsgnqp
+
+// VSX Scalar Absolute Quad-Precision X-form 531
+// VSX Scalar Negate Quad-Precision X-form 627
+// VSX Scalar Negative Absolute Quad-Precision X-form 626
+[PO VRT XO VRB XO /] xsabsqp xsnegqp xsnabsqp
+
+//--------------------------------------
+"7.6.1.3 VSX Floating-Point Arithmetic Instructions"
+
+// VSX Scalar Quad-Precision Elementary Arithmetic
+
+// VSX Scalar Add Quad-Precision [using round to Odd] X-form 539
+// VSX Scalar Divide Quad-Precision [using round to Odd] X-form 584
+// VSX Scalar Multiply Quad-Precision [using round to Odd] X-form 622
+[PO VRT VRA VRB XO RO] xsaddqp xsaddqpo xsdivqp xsdivqpo xsmulqp xsmulqpo
+
+// VSX Scalar Square Root Quad-Precision [using round to Odd] X-form 662
+// VSX Scalar Subtract Quad-Precision [using round to Odd] X-form 667
+ xssubqp xssubqpo
+
+[PO VRT XO VRB XO RO] xssqrtqp xssqrtqpo
+
+// VSX Scalar Quad-Precision Multiply-Add Arithmetic Instructions
+
+// VSX Scalar Multiply-Add Quad-Precision [using round to Odd] X-form 596
+// VSX Scalar Multiply-Subtract Quad-Precision [using round to Odd] X-form 617
+// VSX Scalar Negative Multiply-Add Quad-Precision [using round to Odd] X-form 636
+// VSX Scalar Negative Multiply-Subtract Quad-Precision [using round to Odd]
+// X-form 645
+[PO VRT VRA VRB XO RO] xsmaddqp xsmaddqpo xsmsubqp xsmsubqpo
+ xsnmaddqp xsnmaddqpo xsnmsubqp xsnmsubqpo
+
+22
+//--------------------------------------
+"7.6.1.4 VSX Floating-Point Compare Instructions"
+
+// VSX Scalar Quad-Precision Compare Instructions
+
+// VSX Scalar Compare Ordered Quad-Precision X-form 549
+// VSX Scalar Compare Unordered Quad-Precision X-form 552
+[PO BF // VRA VRB XO /] xscmpoqp xscmpuqp
+
+"7.6.1.8 VSX Scalar Floating-Point Support Instructions"
+// VSX Scalar Compare Exponents Quad-Precision X-form p. 541 542
+[PO BF // A B XO AX BX /] xscmpexpdp
+[PO BF // VRA VRB XO /] xscmpexpqp
+
+// VSX Scalar Compare DP, XX3-form, p.543 544 545
+// VSX Scalar Compare Equal Double-Precision,
+[PO T A B XO AX BX TX] xscmpeqdp xscmpgedp xscmpgtdp xscmpnedp
+
+// VSX Vector Compare Not Equal Double-Precision XX3-form 691
+[PO T A B Rc XO AX BX TX] xvcmpnedp xvcmpnedp. xvcmpnesp xvcmpnesp.
+
+//--------------------------------------
+"7.6.1.5 VSX FP-FP Conversion Instructions"
+// VSX Scalar Quad-Precision Floating-Point Conversion Instructions
+
+// VSX Scalar round & Convert Quad-Precision format to Double-Precision format
+// [using round to Odd] X-form 567
+[PO VRT XO VRB XO /] xscvqpdp xscvqpdpo (actually [PO VRT XO VRB XO RO])
+[PO VRT XO VRB XO /] xscvdpqp
+
+// VSX Scalar Quad-Precision Convert to Integer Instructions
+
+// VSX Scalar truncate & Convert Quad-Precision format to Signed Doubleword format
+// 568 570 572 574
+[PO VRT XO VRB XO /] xscvqpsdz xscvqpswz xscvqpudz xscvqpuwz
+576 = 580 xscvsdqp xscvudqp
+
+"7.6.1.7 VSX Round to Floating-Point Integer Instructions"
+// VSX Scalar round & Convert Double-Precision format to Half-Precision format
+// XX2-form 554 566
+[PO T XO B XO BX TX] xscvdphp xscvhpdp
+
+// VSX Vector Convert Half-Precision format to Single-Precision format
+// XX2-form 703 705
+[PO T XO B XO BX TX] xvcvhpsp xvcvsphp
+
+// VSX Scalar Round to Quad-Precision Integer [with Inexact] Z23-form 654
+[PO VRT /// R VRB RMC XO EX] xsrqpi xsrqpix
+
+// VSX Scalar Round Quad-Precision to Double-Extended Precision Z23-form 656
+[PO VRT /// R VRB RMC XO /] xsrqpxp
+def XSRQPXP : Z23Form_1<63, 37,
+ (outs vrrc:$vT), (ins u5imm:$R, vrrc:$vB, u2imm:$RMC),
+ "xsrqpxp $vT, $R, $vB, $RMC"), IIC_VecFP, []>;
+
+27~28
+//--------------------------------------
+// VSX Scalar Insert Exponent Double-Precision X-form 588
+// VSX Scalar Insert Exponent Quad-Precision X-form 589
+[PO VT rA rB XO /] xsiexpdp
+[PO VRT VRA VRB XO /] xsiexpqp
+
+// VSX Vector Insert Exponent Double-Precision XX3-form 722
+[PO T A B XO AX BX TX] xviexpdp xviexpsp
+
+// VSX Vector Extract Unsigned Word XX2-form 788
+// VSX Vector Insert Word XX2-form
+[PO T / UIM B XO BX TX] xxextractuw xxinsertw
+
+// VSX Scalar Extract Exponent Double-Precision XX2-form 676
+[PO BF DCMX B XO BX /]
+[PO T XO B XO BX /] xsxexpdp xsxsigdp
+// X-form
+[PO VRT XO VRB XO /] xsxexpqp xsxsigqp
+
+// VSX Vector Extract Exponent Double-Precision XX2-form 784
+[PO T XO B XO BX TX] xvxexpdp xvxexpsp
+
+// VSX Vector Extract Significand Double-Precision XX2-form 785
+[PO T XO B XO BX TX] xvxsigdp xvxsigsp
+
+//--------------------------------------
+// VSX Scalar Test Data Class Double-Precision XX2-form p673
+// VSX Scalar Test Data Class Quad-Precision X-form 674
+// VSX Scalar Test Data Class Single-Precision XX2-form 675
+[PO BF DCMX B XO BX /] xststdcdp xststdcsp
+[PO BF DCMX VRB XO /] xststdcqp
+
+// VSX Vector Test Data Class Double-Precision XX2-form 782 783
+[PO T dx B XO dc XO dm BX TX] xvtstdcdp xvtstdcsp
+
+//--------------------------------------
+// VSX Scalar Maximum Type-C Double-Precision XX3-form 601 ~ 609
+[PO T A B XO AX BX TX] xsmaxcdp xsmaxjdp xsmincdp xsminjdp
+
+//--------------------------------------
+// VSX Vector Byte-Reverse Doubleword XX2-form 786 787
+[PO T XO B XO BX TX] xxbrd xxbrh xxbrq xxbrw
+
+// VSX Vector Permute XX3-form 794
+[PO T A B XO AX BX TX] xxperm xxpermr
+
+// VSX Vector Splat Immediate Byte 796 x-form
+[PO T EO IMM8 XO TX] xxspltib <= sign or unsigned?
+
+30
+//--------------------------------------
+// Load VSX Vector DQ-form 511
+[PO T RA DQ TX XO] lxv
+
+// Store VSX Vector DQ-form 526
+[PO S RA DQ SX XO] stxv
+
+// Load VSX Scalar Doubleword DS-form 499
+// Load VSX Scalar Single DS-form 504
+[PO VRT RA DS XO] lxsd lxssp
+
+// Store VSX Scalar Doubleword DS-form 517
+// Store VSX Scalar Single DS-form 520
+[PO VRT RA DS XO] stxsd stxssp
+
+
+// Load VSX Vector Indexed X-form 511
+// Load VSX Scalar as Integer Byte & Zero Indexed X-form 501
+// Load VSX Vector Byte*16 Indexed X-form 506
+// Load VSX Vector with Length X-form 508
+// Load VSX Vector Left-justified with Length X-form 510
+// Load VSX Vector Halfword*8 Indexed X-form 514
+// Load VSX Vector Word & Splat Indexed X-form 516
+[PO T RA RB XO TX] lxvx lxsibzx lxsihzx lxvb16x lxvl lxvll lxvh8x lxvwsx
+
+// Store VSX Scalar as Integer Byte Indexed X-form 518
+// Store VSX Scalar as Integer Halfword Indexed X-form 518
+// Store VSX Vector Byte*16 Indexed X-form 522
+// Store VSX Vector Halfword*8 Indexed X-form 524
+// Store VSX Vector with Length X-form 526
+// Store VSX Vector Left-justified with Length X-form 528
+// Store VSX Vector Indexed X-form 529
+[PO S RA RB XO SX] stxsibx stxsihx stxvb16x stxvh8x stxvl stxvll stxvx
+
+21
+
+//--------------------------------------
+". vector instructions"
+
+[1] PowerISA-v3.0 p.933 - Table 1, and Chapter 6. Vector Facility (altivec)
+[2] https://sourceware.org/ml/binutils/2015-11/msg00071.html
+
+//--------------------------------------
+New patch:
+// vector bit, p.367, 6.16 Vector Bit Permute Instruction
+[PO VRT VRA VRB XO] vbpermd, (existing: vbpermq)
+
+// vector permute, p.280
+[PO VRT VRA VRB VRC XO] vpermr
+
+// vector rotate left, p.341
+[PO VRT VRA VRB XO] vrlwnm vrlwmi vrldnm vrldmi
+
+// vector shift, p.285
+[PO VRT VRA VRB XO] vslv vsrv
+
+// vector multiply-by-10, p.375
+[PO VRT VRA /// XO] vmul10cuq vmul10uq
+[PO VRT VRA VRB XO] vmul10ecuq vmul10euq
+
+12
+//--------------------------------------
+http://reviews.llvm.org/D15887 + ext + neg + prty - vbpermd
+// vector count leading/trailing zero
+. new vx-form: p.31, 1.6.14 VX-FORM
+[PO RT EO VRB XO] vclzlsbb vctzlsbb (p.363)
+
+// Vector Count Trailing Zeros Instructions, 362
+[PO VRT EO VRB XO] vctzb vctzh vctzw vctzd (v16i8 v8i16 v4i32 v2i64)
+
+// vector extend sign (p.314)
+[PO VRT EO VRB XO] vextsb2w vextsh2w vextsb2d vextsh2d vextsw2d
+
+// vector negate, p.313
+[PO VRT EO VRB XO] vnegd vnegw
+
+// vector parity, p.335
+[PO VRT EO VRB XO] vprtybd vprtybq vprtybw
+
+16
+//--------------------------------------
+// vector compare, p.330
+[PO VRT VRA VRB RC XO] vcmpneb vcmpneb. vcmpneh vcmpneh. vcmpnew vcmpnew.
+ vcmpnezb vcmpnezb. vcmpnezh vcmpnezh. vcmpnezw vcmpnezw.
+12
+//--------------------------------------
+http://reviews.llvm.org/D15917 + insert
+// vector extract (p.287) ref: vspltb (v2.07, p.227)
+// vector insert, p.288
+[PO VRT / UIM VRB XO] vinsertb vinsertd vinserth vinsertw
+
+// Vector Extract Unsigned
+[PO VRT / UIM VRB XO] vextractub vextractuh vextractuw vextractd
+
+// p.364: Vector Extract Unsigned Left/Right-Indexed
+[PO RT RA VRB XO] vextublx vextubrx vextuhlx vextuhrx vextuwlx vextuwrx
+
+14
diff --git a/lib/Target/README.txt b/lib/Target/README.txt
index 7e9888cc13e8..ab9a025930fe 100644
--- a/lib/Target/README.txt
+++ b/lib/Target/README.txt
@@ -2081,7 +2081,7 @@ struct x testfunc() {
}
We currently compile this to:
-$ clang t.c -S -o - -O0 -emit-llvm | opt -scalarrepl -S
+$ clang t.c -S -o - -O0 -emit-llvm | opt -sroa -S
%struct.x = type { i8, [4 x i32] }
diff --git a/lib/Target/Sparc/AsmParser/Makefile b/lib/Target/Sparc/AsmParser/Makefile
deleted file mode 100644
index 46b3e45f2bed..000000000000
--- a/lib/Target/Sparc/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/Sparc/AsmParser/Makefile ------------------*- Makefile-*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSparcAsmParser
-
-# Hack: we need to include 'main' Sparc target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index a55274744fd1..b2003b8f101b 100644
--- a/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -7,18 +7,18 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/SparcMCTargetDesc.h"
#include "MCTargetDesc/SparcMCExpr.h"
+#include "MCTargetDesc/SparcMCTargetDesc.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -150,6 +150,22 @@ public:
Sparc::L0_L1, Sparc::L2_L3, Sparc::L4_L5, Sparc::L6_L7,
Sparc::I0_I1, Sparc::I2_I3, Sparc::I4_I5, Sparc::I6_I7};
+ static const MCPhysReg CoprocRegs[32] = {
+ Sparc::C0, Sparc::C1, Sparc::C2, Sparc::C3,
+ Sparc::C4, Sparc::C5, Sparc::C6, Sparc::C7,
+ Sparc::C8, Sparc::C9, Sparc::C10, Sparc::C11,
+ Sparc::C12, Sparc::C13, Sparc::C14, Sparc::C15,
+ Sparc::C16, Sparc::C17, Sparc::C18, Sparc::C19,
+ Sparc::C20, Sparc::C21, Sparc::C22, Sparc::C23,
+ Sparc::C24, Sparc::C25, Sparc::C26, Sparc::C27,
+ Sparc::C28, Sparc::C29, Sparc::C30, Sparc::C31 };
+
+ static const MCPhysReg CoprocPairRegs[] = {
+ Sparc::C0_C1, Sparc::C2_C3, Sparc::C4_C5, Sparc::C6_C7,
+ Sparc::C8_C9, Sparc::C10_C11, Sparc::C12_C13, Sparc::C14_C15,
+ Sparc::C16_C17, Sparc::C18_C19, Sparc::C20_C21, Sparc::C22_C23,
+ Sparc::C24_C25, Sparc::C26_C27, Sparc::C28_C29, Sparc::C30_C31};
+
/// SparcOperand - Instances of this class represent a parsed Sparc machine
/// instruction.
class SparcOperand : public MCParsedAsmOperand {
@@ -161,6 +177,8 @@ public:
rk_FloatReg,
rk_DoubleReg,
rk_QuadReg,
+ rk_CoprocReg,
+ rk_CoprocPairReg,
rk_Special,
};
@@ -224,6 +242,9 @@ public:
|| Reg.Kind == rk_DoubleReg));
}
+ bool isCoprocReg() const {
+ return (Kind == k_Register && Reg.Kind == rk_CoprocReg);
+ }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
@@ -398,6 +419,19 @@ public:
return true;
}
+ static bool MorphToCoprocPairReg(SparcOperand &Op) {
+ unsigned Reg = Op.getReg();
+ assert(Op.Reg.Kind == rk_CoprocReg);
+ unsigned regIdx = 32;
+ if (Reg >= Sparc::C0 && Reg <= Sparc::C31)
+ regIdx = Reg - Sparc::C0;
+ if (regIdx % 2 || regIdx > 31)
+ return false;
+ Op.Reg.RegNum = CoprocPairRegs[regIdx / 2];
+ Op.Reg.Kind = rk_CoprocPairReg;
+ return true;
+ }
+
static std::unique_ptr<SparcOperand>
MorphToMEMrr(unsigned Base, std::unique_ptr<SparcOperand> Op) {
unsigned offsetReg = Op->getReg();
@@ -602,8 +636,12 @@ bool SparcAsmParser::ParseInstruction(ParseInstructionInfo &Info,
return Error(Loc, "unexpected token");
}
- while (getLexer().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
+ while (getLexer().is(AsmToken::Comma) || getLexer().is(AsmToken::Plus)) {
+ if (getLexer().is(AsmToken::Plus)) {
+ // Plus tokens are significant in software_traps (p83, sparcv8.pdf). We must capture them.
+ Operands.push_back(SparcOperand::CreateToken("+", Parser.getTok().getLoc()));
+ }
+ Parser.Lex(); // Eat the comma or plus.
// Parse and remember the operand.
if (parseOperand(Operands, Name) != MatchOperand_Success) {
SMLoc Loc = getLexer().getLoc();
@@ -646,6 +684,12 @@ ParseDirective(AsmToken DirectiveID)
Parser.eatToEndOfStatement();
return false;
}
+ if (IDVal == ".proc") {
+ // For compatibility, ignore this directive.
+ // (It's supposed to be an "optimization" in the Sun assembler)
+ Parser.eatToEndOfStatement();
+ return false;
+ }
// Let the MC layer to handle other directives.
return true;
@@ -728,7 +772,7 @@ SparcAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Parser.getTok().getLoc()));
Parser.Lex(); // Eat the [
- if (Mnemonic == "cas" || Mnemonic == "casx") {
+ if (Mnemonic == "cas" || Mnemonic == "casx" || Mnemonic == "casa") {
SMLoc S = Parser.getTok().getLoc();
if (getLexer().getKind() != AsmToken::Percent)
return MatchOperand_NoMatch;
@@ -809,6 +853,15 @@ SparcAsmParser::parseSparcAsmOperand(std::unique_ptr<SparcOperand> &Op,
case Sparc::FSR:
Op = SparcOperand::CreateToken("%fsr", S);
break;
+ case Sparc::FQ:
+ Op = SparcOperand::CreateToken("%fq", S);
+ break;
+ case Sparc::CPSR:
+ Op = SparcOperand::CreateToken("%csr", S);
+ break;
+ case Sparc::CPQ:
+ Op = SparcOperand::CreateToken("%cq", S);
+ break;
case Sparc::WIM:
Op = SparcOperand::CreateToken("%wim", S);
break;
@@ -846,8 +899,7 @@ SparcAsmParser::parseSparcAsmOperand(std::unique_ptr<SparcOperand> &Op,
const MCExpr *Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None,
getContext());
- if (isCall &&
- getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_)
+ if (isCall && getContext().getObjectFileInfo()->isPositionIndependent())
Res = SparcMCExpr::create(SparcMCExpr::VK_Sparc_WPLT30, Res,
getContext());
Op = SparcOperand::CreateImm(Res, S, E);
@@ -941,6 +993,24 @@ bool SparcAsmParser::matchRegisterName(const AsmToken &Tok,
return true;
}
+ if (name.equals("fq")) {
+ RegNo = Sparc::FQ;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("csr")) {
+ RegNo = Sparc::CPSR;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("cq")) {
+ RegNo = Sparc::CPQ;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
if (name.equals("wim")) {
RegNo = Sparc::WIM;
RegKind = SparcOperand::rk_Special;
@@ -1025,6 +1095,15 @@ bool SparcAsmParser::matchRegisterName(const AsmToken &Tok,
return true;
}
+ // %c0 - %c31
+ if (name.substr(0, 1).equals_lower("c")
+ && !name.substr(1).getAsInteger(10, intVal)
+ && intVal < 32) {
+ RegNo = CoprocRegs[intVal];
+ RegKind = SparcOperand::rk_CoprocReg;
+ return true;
+ }
+
if (name.equals("tpc")) {
RegNo = Sparc::TPC;
RegKind = SparcOperand::rk_Special;
@@ -1141,7 +1220,7 @@ SparcAsmParser::adjustPICRelocation(SparcMCExpr::VariantKind VK,
// actually a %pc10 or %pc22 relocation. Otherwise, they are interpreted
// as %got10 or %got22 relocation.
- if (getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_) {
+ if (getContext().getObjectFileInfo()->isPositionIndependent()) {
switch(VK) {
default: break;
case SparcMCExpr::VK_Sparc_LO:
@@ -1215,5 +1294,9 @@ unsigned SparcAsmParser::validateTargetOperandClass(MCParsedAsmOperand &GOp,
if (SparcOperand::MorphToIntPairReg(Op))
return MCTargetAsmParser::Match_Success;
}
+ if (Op.isCoprocReg() && Kind == MCK_CoprocPair) {
+ if (SparcOperand::MorphToCoprocPairReg(Op))
+ return MCTargetAsmParser::Match_Success;
+ }
return Match_InvalidOperand;
}
diff --git a/lib/Target/Sparc/CMakeLists.txt b/lib/Target/Sparc/CMakeLists.txt
index 5b7bfdd28020..312215cf6cde 100644
--- a/lib/Target/Sparc/CMakeLists.txt
+++ b/lib/Target/Sparc/CMakeLists.txt
@@ -13,6 +13,7 @@ add_public_tablegen_target(SparcCommonTableGen)
add_llvm_target(SparcCodeGen
DelaySlotFiller.cpp
+ LeonPasses.cpp
SparcAsmPrinter.cpp
SparcInstrInfo.cpp
SparcISelDAGToDAG.cpp
diff --git a/lib/Target/Sparc/DelaySlotFiller.cpp b/lib/Target/Sparc/DelaySlotFiller.cpp
index c689b7f7201e..944f3551279e 100644
--- a/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -38,14 +38,10 @@ static cl::opt<bool> DisableDelaySlotFiller(
namespace {
struct Filler : public MachineFunctionPass {
- /// Target machine description which we query for reg. names, data
- /// layout, etc.
- ///
- TargetMachine &TM;
const SparcSubtarget *Subtarget;
static char ID;
- Filler(TargetMachine &tm) : MachineFunctionPass(ID), TM(tm) {}
+ Filler() : MachineFunctionPass(ID) {}
const char *getPassName() const override {
return "SPARC Delay Slot Filler";
@@ -66,6 +62,11 @@ namespace {
return Changed;
}
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
void insertCallDefsUses(MachineBasicBlock::iterator MI,
SmallSet<unsigned, 32>& RegDefs,
SmallSet<unsigned, 32>& RegUses);
@@ -98,7 +99,7 @@ namespace {
/// slots in Sparc MachineFunctions
///
FunctionPass *llvm::createSparcDelaySlotFillerPass(TargetMachine &tm) {
- return new Filler(tm);
+ return new Filler;
}
@@ -268,6 +269,22 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
return true;
}
}
+
+ unsigned Opcode = candidate->getOpcode();
+ // LD and LDD may have NOPs inserted afterwards in the case of some LEON
+ // processors, so we can't use the delay slot if this feature is switched-on.
+ if (Subtarget->insertNOPLoad()
+ &&
+ Opcode >= SP::LDDArr && Opcode <= SP::LDrr)
+ return true;
+
+ // Same as above for FDIV and FSQRT on some LEON processors.
+ if (Subtarget->fixAllFDIVSQRT()
+ &&
+ Opcode >= SP::FDIVD && Opcode <= SP::FSQRTD)
+ return true;
+
+
return false;
}
@@ -290,12 +307,12 @@ void Filler::insertCallDefsUses(MachineBasicBlock::iterator MI,
assert(Reg.isUse() && "CALL first operand is not a use.");
RegUses.insert(Reg.getReg());
- const MachineOperand &RegOrImm = MI->getOperand(1);
- if (RegOrImm.isImm())
+ const MachineOperand &Operand1 = MI->getOperand(1);
+ if (Operand1.isImm() || Operand1.isGlobal())
break;
- assert(RegOrImm.isReg() && "CALLrr second operand is not a register.");
- assert(RegOrImm.isUse() && "CALLrr second operand is not a use.");
- RegUses.insert(RegOrImm.getReg());
+ assert(Operand1.isReg() && "CALLrr second operand is not a register.");
+ assert(Operand1.isUse() && "CALLrr second operand is not a use.");
+ RegUses.insert(Operand1.getReg());
break;
}
}
diff --git a/lib/Target/Sparc/Disassembler/Makefile b/lib/Target/Sparc/Disassembler/Makefile
deleted file mode 100644
index bc17ddc48c7d..000000000000
--- a/lib/Target/Sparc/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/Sparc/Disassembler/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSparcDisassembler
-
-# Hack: we need to include 'main' Sparc target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp b/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
index 51751ec511c9..1dea379e14ec 100644
--- a/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
+++ b/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
@@ -14,7 +14,7 @@
#include "Sparc.h"
#include "SparcRegisterInfo.h"
#include "SparcSubtarget.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCContext.h"
@@ -130,6 +130,25 @@ static const uint16_t IntPairDecoderTable[] = {
SP::I0_I1, SP::I2_I3, SP::I4_I5, SP::I6_I7,
};
+static const unsigned CPRegDecoderTable[] = {
+ SP::C0, SP::C1, SP::C2, SP::C3,
+ SP::C4, SP::C5, SP::C6, SP::C7,
+ SP::C8, SP::C9, SP::C10, SP::C11,
+ SP::C12, SP::C13, SP::C14, SP::C15,
+ SP::C16, SP::C17, SP::C18, SP::C19,
+ SP::C20, SP::C21, SP::C22, SP::C23,
+ SP::C24, SP::C25, SP::C26, SP::C27,
+ SP::C28, SP::C29, SP::C30, SP::C31
+};
+
+
+static const uint16_t CPPairDecoderTable[] = {
+ SP::C0_C1, SP::C2_C3, SP::C4_C5, SP::C6_C7,
+ SP::C8_C9, SP::C10_C11, SP::C12_C13, SP::C14_C15,
+ SP::C16_C17, SP::C18_C19, SP::C20_C21, SP::C22_C23,
+ SP::C24_C25, SP::C26_C27, SP::C28_C29, SP::C30_C31
+};
+
static DecodeStatus DecodeIntRegsRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -191,6 +210,17 @@ static DecodeStatus DecodeQFPRegsRegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeCPRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ unsigned Reg = CPRegDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeFCCRegsRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder) {
@@ -233,6 +263,16 @@ static DecodeStatus DecodeIntPairRegisterClass(MCInst &Inst, unsigned RegNo,
return S;
}
+static DecodeStatus DecodeCPPairRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned RegisterPair = CPPairDecoderTable[RegNo/2];
+ Inst.addOperand(MCOperand::createReg(RegisterPair));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeLoadInt(MCInst &Inst, unsigned insn, uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeLoadIntPair(MCInst &Inst, unsigned insn, uint64_t Address,
@@ -243,6 +283,10 @@ static DecodeStatus DecodeLoadDFP(MCInst &Inst, unsigned insn, uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeLoadQFP(MCInst &Inst, unsigned insn, uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeLoadCP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadCPPair(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeStoreInt(MCInst &Inst, unsigned insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeStoreIntPair(MCInst &Inst, unsigned insn,
@@ -253,6 +297,10 @@ static DecodeStatus DecodeStoreDFP(MCInst &Inst, unsigned insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeStoreQFP(MCInst &Inst, unsigned insn,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreCP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreCPPair(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
static DecodeStatus DecodeCall(MCInst &Inst, unsigned insn,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeSIMM13(MCInst &Inst, unsigned insn,
@@ -263,6 +311,8 @@ static DecodeStatus DecodeReturn(MCInst &MI, unsigned insn, uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeSWAP(MCInst &Inst, unsigned insn, uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeTRAP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
#include "SparcGenDisassemblerTables.inc"
@@ -298,6 +348,18 @@ DecodeStatus SparcDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
return MCDisassembler::Fail;
// Calling the auto-generated decoder function.
+
+ if (STI.getFeatureBits()[Sparc::FeatureV9])
+ {
+ Result = decodeInstruction(DecoderTableSparcV932, Instr, Insn, Address, this, STI);
+ }
+ else
+ {
+ Result = decodeInstruction(DecoderTableSparcV832, Instr, Insn, Address, this, STI);
+ }
+ if (Result != MCDisassembler::Fail)
+ return Result;
+
Result =
decodeInstruction(DecoderTableSparc32, Instr, Insn, Address, this, STI);
@@ -390,6 +452,18 @@ static DecodeStatus DecodeLoadQFP(MCInst &Inst, unsigned insn, uint64_t Address,
DecodeQFPRegsRegisterClass);
}
+static DecodeStatus DecodeLoadCP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeCPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadCPPair(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeCPPairRegisterClass);
+}
+
static DecodeStatus DecodeStoreInt(MCInst &Inst, unsigned insn,
uint64_t Address, const void *Decoder) {
return DecodeMem(Inst, insn, Address, Decoder, false,
@@ -420,6 +494,18 @@ static DecodeStatus DecodeStoreQFP(MCInst &Inst, unsigned insn,
DecodeQFPRegsRegisterClass);
}
+static DecodeStatus DecodeStoreCP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeCPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreCPPair(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeCPPairRegisterClass);
+}
+
static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
uint64_t Address, uint64_t Offset,
uint64_t Width, MCInst &MI,
@@ -547,3 +633,36 @@ static DecodeStatus DecodeSWAP(MCInst &MI, unsigned insn, uint64_t Address,
return MCDisassembler::Success;
}
+
+static DecodeStatus DecodeTRAP(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ unsigned isImm = fieldFromInstruction(insn, 13, 1);
+ unsigned cc =fieldFromInstruction(insn, 25, 4);
+ unsigned rs2 = 0;
+ unsigned imm7 = 0;
+ if (isImm)
+ imm7 = fieldFromInstruction(insn, 0, 7);
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ // Decode RS1.
+ DecodeStatus status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1 | IMM7.
+ if (isImm)
+ MI.addOperand(MCOperand::createImm(imm7));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+
+ // Decode CC
+ MI.addOperand(MCOperand::createImm(cc));
+
+ return MCDisassembler::Success;
+}
diff --git a/lib/Target/Sparc/InstPrinter/Makefile b/lib/Target/Sparc/InstPrinter/Makefile
deleted file mode 100644
index 2dabd82965f4..000000000000
--- a/lib/Target/Sparc/InstPrinter/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/Sparc/InstPrinter/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSparcAsmPrinter
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp b/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
index 5d714fe4da92..4981deae6af6 100644
--- a/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
+++ b/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
@@ -16,6 +16,7 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -115,8 +116,21 @@ void SparcInstPrinter::printOperand(const MCInst *MI, int opNum,
}
if (MO.isImm()) {
- O << (int)MO.getImm();
- return;
+ switch (MI->getOpcode()) {
+ default:
+ O << (int)MO.getImm();
+ return;
+
+ case SP::TICCri: // Fall through
+ case SP::TICCrr: // Fall through
+ case SP::TRAPri: // Fall through
+ case SP::TRAPrr: // Fall through
+ case SP::TXCCri: // Fall through
+ case SP::TXCCrr: // Fall through
+ // Only seven-bit values up to 127.
+ O << ((int) MO.getImm() & 0x7f);
+ return;
+ }
}
assert(MO.isExpr() && "Unknown operand kind in printOperand");
@@ -166,6 +180,11 @@ void SparcInstPrinter::printCCOperand(const MCInst *MI, int opNum,
// Make sure CC is a fp conditional flag.
CC = (CC < 16) ? (CC + 16) : CC;
break;
+ case SP::CBCOND:
+ case SP::CBCONDA:
+ // Make sure CC is a cp conditional flag.
+ CC = (CC < 32) ? (CC + 32) : CC;
+ break;
}
O << SPARCCondCodeToString((SPCC::CondCodes)CC);
}
diff --git a/lib/Target/Sparc/LeonFeatures.td b/lib/Target/Sparc/LeonFeatures.td
new file mode 100755
index 000000000000..63f8b33c80cf
--- /dev/null
+++ b/lib/Target/Sparc/LeonFeatures.td
@@ -0,0 +1,91 @@
+//===-- LeonFeatures.td - Describe the Leon Features -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// CASA Support differs between LEON3-FT GR712RC and LEON3-FT UT699
+// We need to have the option to switch this on and off.
+//===----------------------------------------------------------------------===//
+
+// support to casa instruction; for leon3 subtarget only
+def LeonCASA : SubtargetFeature<
+ "hasleoncasa", "HasLeonCasa", "true",
+ "Enable CASA instruction for LEON3 and LEON4 processors">;
+
+//===----------------------------------------------------------------------===//
+// UMAC and SMAC support for LEON3 and LEON4 processors.
+//===----------------------------------------------------------------------===//
+
+// support to casa instruction; for leon3 subtarget only
+def UMACSMACSupport
+ : SubtargetFeature<"hasumacsmac", "HasUmacSmac", "true",
+ "Enable UMAC and SMAC for LEON3 and LEON4 processors">;
+
+//===----------------------------------------------------------------------===//
+// LEON Erratum fixes
+//===----------------------------------------------------------------------===//
+
+def ReplaceSDIV
+ : SubtargetFeature<
+ "replacesdiv", "PerformSDIVReplace", "true",
+ "AT697E erratum fix: Do not emit SDIV, emit SDIVCC instead">;
+
+def FixCALL
+ : SubtargetFeature<"fixcall", "FixCallImmediates", "true",
+ "AT697E erratum fix: Restrict the size of the immediate "
+ "operand of the CALL instruction to 20 bits">;
+
+def IgnoreZeroFlag
+ : SubtargetFeature<"ignrzeroflag", "IgnoreZeroFlag", "true",
+ "AT697E erratum fix: Do not rely on the zero bit flag "
+ "on a divide overflow for SDIVCC and UDIVCC">;
+
+def InsertNOPDoublePrecision
+ : SubtargetFeature<"insrtnopdblprcsn", "InsertNOPDoublePrecision", "true",
+ "LEON2 erratum fix: Insert a NOP before the double "
+ "precision floating point instruction">;
+
+def FixFSMULD : SubtargetFeature<"fixfsmuld", "FixFSMULD", "true",
+ "LEON3 erratum fix: Do not select FSMULD">;
+
+def ReplaceFMULS
+ : SubtargetFeature<"replacefmuls", "ReplaceFMULS", "true",
+ "LEON3 erratum fix: Replace FMULS instruction with a "
+ "routine using conversions/double precision operations "
+ "to replace FMULS">;
+
+def PreventRoundChange
+ : SubtargetFeature<"prvntroundchange", "PreventRoundChange", "true",
+ "LEON3 erratum fix: Prevent any rounding mode change "
+ "request: use only the round-to-nearest rounding mode">;
+
+def FixAllFDIVSQRT
+ : SubtargetFeature<"fixallfdivsqrt", "FixAllFDIVSQRT", "true",
+ "LEON3 erratum fix: Fix FDIVS/FDIVD/FSQRTS/FSQRTD "
+ "instructions with NOPs and floating-point store">;
+
+def InsertNOPLoad
+ : SubtargetFeature<"insertnopload", "InsertNOPLoad", "true",
+ "LEON3 erratum fix: Insert a NOP instruction after "
+ "every single-cycle load instruction when the next "
+ "instruction is another load/store instruction">;
+
+def FlushCacheLineSWAP
+ : SubtargetFeature<"flshcachelineswap", "FlushCacheLineSWAP", "true",
+ "LEON3 erratum fix: Flush cache line containing the "
+ "lock before performing any of the atomic instructions "
+ "SWAP and LDSTUB">;
+
+def InsertNOPsLoadStore
+ : SubtargetFeature<"insertnopsloadstore", "InsertNOPsLoadStore", "true",
+ "LEON3 erratum fix: Insert NOPs between "
+ "single-precision loads and the store, so the number of "
+ "instructions between is 4">;
diff --git a/lib/Target/Sparc/LeonPasses.cpp b/lib/Target/Sparc/LeonPasses.cpp
new file mode 100755
index 000000000000..5d0920892ff0
--- /dev/null
+++ b/lib/Target/Sparc/LeonPasses.cpp
@@ -0,0 +1,933 @@
+//===------ LeonPasses.cpp - Define passes specific to LEON ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "LeonPasses.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+LEONMachineFunctionPass::LEONMachineFunctionPass(TargetMachine &tm, char &ID)
+ : MachineFunctionPass(ID) {}
+
+LEONMachineFunctionPass::LEONMachineFunctionPass(char &ID)
+ : MachineFunctionPass(ID) {}
+
+int LEONMachineFunctionPass::GetRegIndexForOperand(MachineInstr &MI,
+ int OperandIndex) {
+ if (MI.getNumOperands() > 0) {
+ if (OperandIndex == LAST_OPERAND) {
+ OperandIndex = MI.getNumOperands() - 1;
+ }
+
+ if (MI.getNumOperands() > (unsigned)OperandIndex &&
+ MI.getOperand(OperandIndex).isReg()) {
+ return (int)MI.getOperand(OperandIndex).getReg();
+ }
+ }
+
+ static int NotFoundIndex = -10;
+ // Return a different number each time to avoid any comparisons between the
+ // values returned.
+ NotFoundIndex -= 10;
+ return NotFoundIndex;
+}
+
+// finds a new free FP register
+// checks also the AllocatedRegisters vector
+int LEONMachineFunctionPass::getUnusedFPRegister(MachineRegisterInfo &MRI) {
+ for (int RegisterIndex = SP::F0; RegisterIndex <= SP::F31; ++RegisterIndex) {
+ if (!MRI.isPhysRegUsed(RegisterIndex) &&
+ !(std::find(UsedRegisters.begin(), UsedRegisters.end(),
+ RegisterIndex) != UsedRegisters.end())) {
+ return RegisterIndex;
+ }
+ }
+
+ return -1;
+}
+
+//*****************************************************************************
+//**** InsertNOPLoad pass
+//*****************************************************************************
+// This pass fixes the incorrectly working Load instructions that exists for
+// some earlier versions of the LEON processor line. NOP instructions must
+// be inserted after the load instruction to ensure that the Load instruction
+// behaves as expected for these processors.
+//
+// This pass inserts a NOP after any LD or LDF instruction.
+//
+char InsertNOPLoad::ID = 0;
+
+InsertNOPLoad::InsertNOPLoad(TargetMachine &tm)
+ : LEONMachineFunctionPass(tm, ID) {}
+
+bool InsertNOPLoad::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode >= SP::LDDArr && Opcode <= SP::LDrr) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP));
+ Modified = true;
+ } else if (MI.isInlineAsm()) {
+ // Look for an inline ld or ldf instruction.
+ StringRef AsmString =
+ MI.getOperand(InlineAsm::MIOp_AsmString).getSymbolName();
+ if (AsmString.startswith_lower("ld")) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP));
+ Modified = true;
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** FixFSMULD pass
+//*****************************************************************************
+// This pass fixes the incorrectly working FSMULD instruction that exists for
+// some earlier versions of the LEON processor line.
+//
+// The pass should convert the FSMULD operands to double precision in scratch
+// registers, then calculate the result with the FMULD instruction. Therefore,
+// the pass should replace operations of the form:
+// fsmuld %f20,%f21,%f8
+// with the sequence:
+// fstod %f20,%f0
+// fstod %f21,%f2
+// fmuld %f0,%f2,%f8
+//
+char FixFSMULD::ID = 0;
+
+FixFSMULD::FixFSMULD(TargetMachine &tm) : LEONMachineFunctionPass(tm, ID) {}
+
+bool FixFSMULD::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+
+ const int UNASSIGNED_INDEX = -1;
+ int Reg1Index = UNASSIGNED_INDEX;
+ int Reg2Index = UNASSIGNED_INDEX;
+ int Reg3Index = UNASSIGNED_INDEX;
+
+ if (Opcode == SP::FSMULD && MI.getNumOperands() == 3) {
+ // take the registers from fsmuld %f20,%f21,%f8
+ Reg1Index = MI.getOperand(0).getReg();
+ Reg2Index = MI.getOperand(1).getReg();
+ Reg3Index = MI.getOperand(2).getReg();
+ } else if (MI.isInlineAsm()) {
+ StringRef AsmString =
+ MI.getOperand(InlineAsm::MIOp_AsmString).getSymbolName();
+ if (AsmString.startswith_lower("fsmuld")) {
+ // this is an inline FSMULD instruction
+
+ unsigned StartOp = InlineAsm::MIOp_FirstOperand;
+
+ // extracts the registers from the inline assembly instruction
+ for (unsigned i = StartOp, e = MI.getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
+ if (MO.isReg()) {
+ if (Reg1Index == UNASSIGNED_INDEX)
+ Reg1Index = MO.getReg();
+ else if (Reg2Index == UNASSIGNED_INDEX)
+ Reg2Index = MO.getReg();
+ else if (Reg3Index == UNASSIGNED_INDEX)
+ Reg3Index = MO.getReg();
+ }
+ if (Reg3Index != UNASSIGNED_INDEX)
+ break;
+ }
+ }
+ }
+
+ if (Reg1Index != UNASSIGNED_INDEX && Reg2Index != UNASSIGNED_INDEX &&
+ Reg3Index != UNASSIGNED_INDEX) {
+ clearUsedRegisterList();
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ // Whatever Reg3Index is hasn't been used yet, so we need to reserve it.
+ markRegisterUsed(Reg3Index);
+ const int ScratchReg1Index = getUnusedFPRegister(MF.getRegInfo());
+ markRegisterUsed(ScratchReg1Index);
+ const int ScratchReg2Index = getUnusedFPRegister(MF.getRegInfo());
+ markRegisterUsed(ScratchReg2Index);
+
+ if (ScratchReg1Index == UNASSIGNED_INDEX ||
+ ScratchReg2Index == UNASSIGNED_INDEX) {
+ errs() << "Cannot allocate free scratch registers for the FixFSMULD "
+ "pass."
+ << "\n";
+ } else {
+ // create fstod %f20,%f0
+ BuildMI(MBB, MBBI, DL, TII.get(SP::FSTOD))
+ .addReg(ScratchReg1Index)
+ .addReg(Reg1Index);
+
+ // create fstod %f21,%f2
+ BuildMI(MBB, MBBI, DL, TII.get(SP::FSTOD))
+ .addReg(ScratchReg2Index)
+ .addReg(Reg2Index);
+
+ // create fmuld %f0,%f2,%f8
+ BuildMI(MBB, MBBI, DL, TII.get(SP::FMULD))
+ .addReg(Reg3Index)
+ .addReg(ScratchReg1Index)
+ .addReg(ScratchReg2Index);
+
+ MI.eraseFromParent();
+ MBBI = NMBBI;
+
+ Modified = true;
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** ReplaceFMULS pass
+//*****************************************************************************
+// This pass fixes the incorrectly working FMULS instruction that exists for
+// some earlier versions of the LEON processor line.
+//
+// This pass converts the FMULS operands to double precision in scratch
+// registers, then calculates the result with the FMULD instruction.
+// The pass should replace operations of the form:
+// fmuls %f20,%f21,%f8
+// with the sequence:
+// fstod %f20,%f0
+// fstod %f21,%f2
+// fmuld %f0,%f2,%f8
+//
+char ReplaceFMULS::ID = 0;
+
+ReplaceFMULS::ReplaceFMULS(TargetMachine &tm)
+ : LEONMachineFunctionPass(tm, ID) {}
+
+bool ReplaceFMULS::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+
+ const int UNASSIGNED_INDEX = -1;
+ int Reg1Index = UNASSIGNED_INDEX;
+ int Reg2Index = UNASSIGNED_INDEX;
+ int Reg3Index = UNASSIGNED_INDEX;
+
+ if (Opcode == SP::FMULS && MI.getNumOperands() == 3) {
+ // take the registers from fmuls %f20,%f21,%f8
+ Reg1Index = MI.getOperand(0).getReg();
+ Reg2Index = MI.getOperand(1).getReg();
+ Reg3Index = MI.getOperand(2).getReg();
+ } else if (MI.isInlineAsm()) {
+ StringRef AsmString =
+ MI.getOperand(InlineAsm::MIOp_AsmString).getSymbolName();
+ if (AsmString.startswith_lower("fmuls")) {
+ // this is an inline FMULS instruction
+ unsigned StartOp = InlineAsm::MIOp_FirstOperand;
+
+ // extracts the registers from the inline assembly instruction
+ for (unsigned i = StartOp, e = MI.getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
+ if (MO.isReg()) {
+ if (Reg1Index == UNASSIGNED_INDEX)
+ Reg1Index = MO.getReg();
+ else if (Reg2Index == UNASSIGNED_INDEX)
+ Reg2Index = MO.getReg();
+ else if (Reg3Index == UNASSIGNED_INDEX)
+ Reg3Index = MO.getReg();
+ }
+ if (Reg3Index != UNASSIGNED_INDEX)
+ break;
+ }
+ }
+ }
+
+ if (Reg1Index != UNASSIGNED_INDEX && Reg2Index != UNASSIGNED_INDEX &&
+ Reg3Index != UNASSIGNED_INDEX) {
+ clearUsedRegisterList();
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ // Whatever Reg3Index is hasn't been used yet, so we need to reserve it.
+ markRegisterUsed(Reg3Index);
+ const int ScratchReg1Index = getUnusedFPRegister(MF.getRegInfo());
+ markRegisterUsed(ScratchReg1Index);
+ const int ScratchReg2Index = getUnusedFPRegister(MF.getRegInfo());
+ markRegisterUsed(ScratchReg2Index);
+
+ if (ScratchReg1Index == UNASSIGNED_INDEX ||
+ ScratchReg2Index == UNASSIGNED_INDEX) {
+ errs() << "Cannot allocate free scratch registers for the "
+ "ReplaceFMULS pass."
+ << "\n";
+ } else {
+ // create fstod %f20,%f0
+ BuildMI(MBB, MBBI, DL, TII.get(SP::FSTOD))
+ .addReg(ScratchReg1Index)
+ .addReg(Reg1Index);
+
+ // create fstod %f21,%f2
+ BuildMI(MBB, MBBI, DL, TII.get(SP::FSTOD))
+ .addReg(ScratchReg2Index)
+ .addReg(Reg2Index);
+
+ // create fmuld %f0,%f2,%f8
+ BuildMI(MBB, MBBI, DL, TII.get(SP::FMULD))
+ .addReg(Reg3Index)
+ .addReg(ScratchReg1Index)
+ .addReg(ScratchReg2Index);
+
+ MI.eraseFromParent();
+ MBBI = NMBBI;
+
+ Modified = true;
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** FixAllFDIVSQRT pass
+//*****************************************************************************
+// This pass fixes the incorrectly working FDIVx and FSQRTx instructions that
+// exist for some earlier versions of the LEON processor line. Five NOP
+// instructions need to be inserted after these instructions to ensure the
+// correct result is placed in the destination registers before they are used.
+//
+// This pass implements two fixes:
+// 1) fixing the FSQRTS and FSQRTD instructions.
+// 2) fixing the FDIVS and FDIVD instructions.
+//
+// FSQRTS and FDIVS are converted to FDIVD and FSQRTD respectively earlier in
+// the pipeline when this option is enabled, so this pass needs only to deal
+// with the changes that still need implementing for the "double" versions
+// of these instructions.
+//
+char FixAllFDIVSQRT::ID = 0;
+
+FixAllFDIVSQRT::FixAllFDIVSQRT(TargetMachine &tm)
+ : LEONMachineFunctionPass(tm, ID) {}
+
+bool FixAllFDIVSQRT::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+
+ if (MI.isInlineAsm()) {
+ StringRef AsmString =
+ MI.getOperand(InlineAsm::MIOp_AsmString).getSymbolName();
+ if (AsmString.startswith_lower("fsqrtd")) {
+ // this is an inline fsqrts instruction
+ Opcode = SP::FSQRTD;
+ } else if (AsmString.startswith_lower("fdivd")) {
+ // this is an inline fsqrts instruction
+ Opcode = SP::FDIVD;
+ }
+ }
+
+ // Note: FDIVS and FSQRTS cannot be generated when this erratum fix is
+ // switched on so we don't need to check for them here. They will
+ // already have been converted to FSQRTD or FDIVD earlier in the
+ // pipeline.
+ if (Opcode == SP::FSQRTD || Opcode == SP::FDIVD) {
+ // Insert 5 NOPs before FSQRTD,FDIVD.
+ for (int InsertedCount = 0; InsertedCount < 5; InsertedCount++)
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ // ... and inserting 28 NOPs after FSQRTD,FDIVD.
+ for (int InsertedCount = 0; InsertedCount < 28; InsertedCount++)
+ BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP));
+
+ Modified = true;
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** ReplaceSDIV pass
+//*****************************************************************************
+// This pass fixes the incorrectly working SDIV instruction that
+// exist for some earlier versions of the LEON processor line. The instruction
+// is replaced with an SDIVcc instruction instead, which is working.
+//
+char ReplaceSDIV::ID = 0;
+
+ReplaceSDIV::ReplaceSDIV() : LEONMachineFunctionPass(ID) {}
+
+ReplaceSDIV::ReplaceSDIV(TargetMachine &tm) : LEONMachineFunctionPass(tm, ID) {}
+
+bool ReplaceSDIV::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SP::SDIVrr) {
+ MI.setDesc(TII.get(SP::SDIVCCrr));
+ Modified = true;
+ } else if (Opcode == SP::SDIVri) {
+ MI.setDesc(TII.get(SP::SDIVCCri));
+ Modified = true;
+ }
+ }
+ }
+
+ return Modified;
+}
+
+static RegisterPass<ReplaceSDIV> X("replace-sdiv", "Replase SDIV Pass", false,
+ false);
+
+//*****************************************************************************
+//**** FixCALL pass
+//*****************************************************************************
+// This pass restricts the size of the immediate operand of the CALL
+// instruction, which can cause problems on some earlier versions of the LEON
+// processor, which can interpret some of the call address bits incorrectly.
+//
+char FixCALL::ID = 0;
+
+FixCALL::FixCALL(TargetMachine &tm) : LEONMachineFunctionPass(tm, ID) {}
+
+bool FixCALL::runOnMachineFunction(MachineFunction &MF) {
+ bool Modified = false;
+
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ MI.print(errs());
+ errs() << "\n";
+
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SP::CALL || Opcode == SP::CALLrr) {
+ unsigned NumOperands = MI.getNumOperands();
+ for (unsigned OperandIndex = 0; OperandIndex < NumOperands;
+ OperandIndex++) {
+ MachineOperand &MO = MI.getOperand(OperandIndex);
+ if (MO.isImm()) {
+ int64_t Value = MO.getImm();
+ MO.setImm(Value & 0x000fffffL);
+ Modified = true;
+ break;
+ }
+ }
+ } else if (MI.isInlineAsm()) // inline assembly immediate call
+ {
+ StringRef AsmString =
+ MI.getOperand(InlineAsm::MIOp_AsmString).getSymbolName();
+ if (AsmString.startswith_lower("call")) {
+ // this is an inline call instruction
+ unsigned StartOp = InlineAsm::MIOp_FirstOperand;
+
+ // extracts the registers from the inline assembly instruction
+ for (unsigned i = StartOp, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ if (MO.isImm()) {
+ int64_t Value = MO.getImm();
+ MO.setImm(Value & 0x000fffffL);
+ Modified = true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** IgnoreZeroFlag pass
+//*****************************************************************************
+// This erratum fix fixes the overflow behavior of SDIVCC and UDIVCC
+// instructions that exists on some earlier LEON processors. Where these
+// instructions are detected, they are replaced by a sequence that will
+// explicitly write the overflow bit flag if this is required.
+//
+char IgnoreZeroFlag::ID = 0;
+
+IgnoreZeroFlag::IgnoreZeroFlag(TargetMachine &tm)
+ : LEONMachineFunctionPass(tm, ID) {}
+
+bool IgnoreZeroFlag::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SP::SDIVCCrr || Opcode == SP::SDIVCCri ||
+ Opcode == SP::UDIVCCrr || Opcode == SP::UDIVCCri) {
+
+ // split the current machine basic block - just after the sdivcc/udivcc
+ // instruction
+ // create a label that help us skip the zero flag update (of PSR -
+ // Processor Status Register)
+ // if conditions are not met
+ const BasicBlock *LLVM_BB = MBB.getBasicBlock();
+ MachineFunction::iterator It =
+ std::next(MachineFunction::iterator(MBB));
+
+ MachineBasicBlock *dneBB = MF.CreateMachineBasicBlock(LLVM_BB);
+ MF.insert(It, dneBB);
+
+ // Transfer the remainder of MBB and its successor edges to dneBB.
+ dneBB->splice(dneBB->begin(), &MBB,
+ std::next(MachineBasicBlock::iterator(MI)), MBB.end());
+ dneBB->transferSuccessorsAndUpdatePHIs(&MBB);
+
+ MBB.addSuccessor(dneBB);
+
+ MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
+
+ // bvc - branch if overflow flag not set
+ BuildMI(MBB, NextMBBI, DL, TII.get(SP::BCOND))
+ .addMBB(dneBB)
+ .addImm(SPCC::ICC_VS);
+
+ // bnz - branch if not zero
+ BuildMI(MBB, NextMBBI, DL, TII.get(SP::BCOND))
+ .addMBB(dneBB)
+ .addImm(SPCC::ICC_NE);
+
+ // use the WRPSR (Write Processor State Register) instruction to set the
+ // zeo flag to 1
+ // create wr %g0, 1, %psr
+ BuildMI(MBB, NextMBBI, DL, TII.get(SP::WRPSRri))
+ .addReg(SP::G0)
+ .addImm(1);
+
+ BuildMI(MBB, NextMBBI, DL, TII.get(SP::NOP));
+
+ Modified = true;
+ } else if (MI.isInlineAsm()) {
+ StringRef AsmString =
+ MI.getOperand(InlineAsm::MIOp_AsmString).getSymbolName();
+ if (AsmString.startswith_lower("sdivcc") ||
+ AsmString.startswith_lower("udivcc")) {
+ // this is an inline SDIVCC or UDIVCC instruction
+
+ // split the current machine basic block - just after the
+ // sdivcc/udivcc instruction
+ // create a label that help us skip the zero flag update (of PSR -
+ // Processor Status Register)
+ // if conditions are not met
+ const BasicBlock *LLVM_BB = MBB.getBasicBlock();
+ MachineFunction::iterator It =
+ std::next(MachineFunction::iterator(MBB));
+
+ MachineBasicBlock *dneBB = MF.CreateMachineBasicBlock(LLVM_BB);
+ MF.insert(It, dneBB);
+
+ // Transfer the remainder of MBB and its successor edges to dneBB.
+ dneBB->splice(dneBB->begin(), &MBB,
+ std::next(MachineBasicBlock::iterator(MI)), MBB.end());
+ dneBB->transferSuccessorsAndUpdatePHIs(&MBB);
+
+ MBB.addSuccessor(dneBB);
+
+ MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
+
+ // bvc - branch if overflow flag not set
+ BuildMI(MBB, NextMBBI, DL, TII.get(SP::BCOND))
+ .addMBB(dneBB)
+ .addImm(SPCC::ICC_VS);
+
+ // bnz - branch if not zero
+ BuildMI(MBB, NextMBBI, DL, TII.get(SP::BCOND))
+ .addMBB(dneBB)
+ .addImm(SPCC::ICC_NE);
+
+ // use the WRPSR (Write Processor State Register) instruction to set
+ // the zeo flag to 1
+ // create wr %g0, 1, %psr
+ BuildMI(MBB, NextMBBI, DL, TII.get(SP::WRPSRri))
+ .addReg(SP::G0)
+ .addImm(1);
+
+ BuildMI(MBB, NextMBBI, DL, TII.get(SP::NOP));
+
+ Modified = true;
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** InsertNOPDoublePrecision pass
+//*****************************************************************************
+// This erratum fix for some earlier LEON processors fixes a problem where a
+// double precision load will not yield the correct result if used in FMUL,
+// FDIV, FADD, FSUB or FSQRT instructions later. If this sequence is detected,
+// inserting a NOP between the two instructions will fix the erratum.
+// 1.scans the code after register allocation;
+// 2.checks for the problem conditions as described in the AT697E erratum
+// “Odd-Numbered FPU Register Dependency not Properly Checked in some
+// Double-Precision FPU Operations”;
+// 3.inserts NOPs if the problem exists.
+//
+char InsertNOPDoublePrecision::ID = 0;
+
+InsertNOPDoublePrecision::InsertNOPDoublePrecision(TargetMachine &tm)
+ : LEONMachineFunctionPass(tm, ID) {}
+
+bool InsertNOPDoublePrecision::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SP::LDDFri || Opcode == SP::LDDFrr) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ MachineInstr &NMI = *NMBBI;
+
+ unsigned NextOpcode = NMI.getOpcode();
+ // NMI.print(errs());
+ if (NextOpcode == SP::FADDD || NextOpcode == SP::FSUBD ||
+ NextOpcode == SP::FMULD || NextOpcode == SP::FDIVD) {
+ int RegAIndex = GetRegIndexForOperand(MI, 0);
+ int RegBIndex = GetRegIndexForOperand(NMI, 0);
+ int RegCIndex =
+ GetRegIndexForOperand(NMI, 2); // Second source operand is index 2
+ int RegDIndex =
+ GetRegIndexForOperand(NMI, 1); // Destination operand is index 1
+
+ if ((RegAIndex == RegBIndex + 1 && RegBIndex == RegDIndex) ||
+ (RegAIndex == RegCIndex + 1 && RegCIndex == RegDIndex) ||
+ (RegAIndex == RegBIndex + 1 && RegCIndex == RegDIndex) ||
+ (RegAIndex == RegCIndex + 1 && RegBIndex == RegDIndex)) {
+ // Insert NOP between the two instructions.
+ BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP));
+ Modified = true;
+ }
+
+ // Check the errata patterns that only happen for FADDD and FMULD
+ if (Modified == false &&
+ (NextOpcode == SP::FADDD || NextOpcode == SP::FMULD)) {
+ RegAIndex = GetRegIndexForOperand(MI, 1);
+ if (RegAIndex == RegBIndex + 1 && RegBIndex == RegCIndex &&
+ RegBIndex == RegDIndex) {
+ // Insert NOP between the two instructions.
+ BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP));
+ Modified = true;
+ }
+ }
+ } else if (NextOpcode == SP::FSQRTD) {
+ int RegAIndex = GetRegIndexForOperand(MI, 1);
+ int RegBIndex = GetRegIndexForOperand(NMI, 0);
+ int RegCIndex = GetRegIndexForOperand(NMI, 1);
+
+ if (RegAIndex == RegBIndex + 1 && RegBIndex == RegCIndex) {
+ // Insert NOP between the two instructions.
+ BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP));
+ Modified = true;
+ }
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** PreventRoundChange pass
+//*****************************************************************************
+// To prevent any explicit change of the default rounding mode, this pass
+// detects any call of the fesetround function and removes this call from the
+// list of generated operations.
+//
+char PreventRoundChange::ID = 0;
+
+PreventRoundChange::PreventRoundChange(TargetMachine &tm)
+ : LEONMachineFunctionPass(tm, ID) {}
+
+bool PreventRoundChange::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SP::CALL && MI.getNumOperands() > 0) {
+ MachineOperand &MO = MI.getOperand(0);
+
+ if (MO.isGlobal()) {
+ StringRef FuncName = MO.getGlobal()->getName();
+ if (FuncName.compare_lower("fesetround") == 0) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ MI.eraseFromParent();
+ MBBI = NMBBI;
+ Modified = true;
+ }
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+//*****************************************************************************
+//**** FlushCacheLineSWAP pass
+//*****************************************************************************
+// This pass inserts FLUSHW just before any SWAP atomic instruction.
+//
+char FlushCacheLineSWAP::ID = 0;
+
+FlushCacheLineSWAP::FlushCacheLineSWAP(TargetMachine &tm)
+ : LEONMachineFunctionPass(tm, ID) {}
+
+bool FlushCacheLineSWAP::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SP::SWAPrr || Opcode == SP::SWAPri ||
+ Opcode == SP::LDSTUBrr || Opcode == SP::LDSTUBri) {
+ // insert flush and 5 NOPs before the swap/ldstub instruction
+ BuildMI(MBB, MBBI, DL, TII.get(SP::FLUSH));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+
+ Modified = true;
+ } else if (MI.isInlineAsm()) {
+ StringRef AsmString =
+ MI.getOperand(InlineAsm::MIOp_AsmString).getSymbolName();
+ if (AsmString.startswith_lower("swap") ||
+ AsmString.startswith_lower("ldstub")) {
+ // this is an inline swap or ldstub instruction
+
+ // insert flush and 5 NOPs before the swap/ldstub instruction
+ BuildMI(MBB, MBBI, DL, TII.get(SP::FLUSH));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+
+ Modified = true;
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** InsertNOPsLoadStore pass
+//*****************************************************************************
+// This pass shall insert NOPs between floating point loads and stores when the
+// following circumstances are present [5]:
+// Pattern 1:
+// 1. single-precision load or single-precision FPOP to register %fX, where X is
+// the same register as the store being checked;
+// 2. single-precision load or single-precision FPOP to register %fY , where Y
+// is the opposite register in the same double-precision pair;
+// 3. 0-3 instructions of any kind, except stores from %fX or %fY or operations
+// with %fX as destination;
+// 4. the store (from register %fX) being considered.
+// Pattern 2:
+// 1. double-precision FPOP;
+// 2. any number of operations on any kind, except no double-precision FPOP and
+// at most one (less than two) single-precision or single-to-double FPOPs;
+// 3. the store (from register %fX) being considered.
+//
+char InsertNOPsLoadStore::ID = 0;
+
+InsertNOPsLoadStore::InsertNOPsLoadStore(TargetMachine &tm)
+ : LEONMachineFunctionPass(tm, ID) {}
+
+bool InsertNOPsLoadStore::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ MachineInstr *Pattern1FirstInstruction = NULL;
+ MachineInstr *Pattern2FirstInstruction = NULL;
+ unsigned int StoreInstructionsToCheck = 0;
+ int FxRegIndex, FyRegIndex;
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+
+ if (StoreInstructionsToCheck > 0) {
+ if (((MI.getOpcode() == SP::STFrr || MI.getOpcode() == SP::STFri) &&
+ (GetRegIndexForOperand(MI, LAST_OPERAND) == FxRegIndex ||
+ GetRegIndexForOperand(MI, LAST_OPERAND) == FyRegIndex)) ||
+ GetRegIndexForOperand(MI, 0) == FxRegIndex) {
+ // Insert four NOPs
+ for (unsigned InsertedCount = 0; InsertedCount < 4; InsertedCount++) {
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ }
+ Modified = true;
+ }
+ StoreInstructionsToCheck--;
+ }
+
+ switch (MI.getOpcode()) {
+ // Watch for Pattern 1 FPop instructions
+ case SP::LDrr:
+ case SP::LDri:
+ case SP::LDFrr:
+ case SP::LDFri:
+ case SP::FADDS:
+ case SP::FSUBS:
+ case SP::FMULS:
+ case SP::FDIVS:
+ case SP::FSQRTS:
+ case SP::FCMPS:
+ case SP::FMOVS:
+ case SP::FNEGS:
+ case SP::FABSS:
+ case SP::FITOS:
+ case SP::FSTOI:
+ case SP::FITOD:
+ case SP::FDTOI:
+ case SP::FDTOS:
+ if (Pattern1FirstInstruction != NULL) {
+ FxRegIndex = GetRegIndexForOperand(*Pattern1FirstInstruction, 0);
+ FyRegIndex = GetRegIndexForOperand(MI, 0);
+
+ // Check to see if these registers are part of the same double
+ // precision
+ // register pair.
+ int DoublePrecRegIndexForX = (FxRegIndex - SP::F0) / 2;
+ int DoublePrecRegIndexForY = (FyRegIndex - SP::F0) / 2;
+
+ if (DoublePrecRegIndexForX == DoublePrecRegIndexForY)
+ StoreInstructionsToCheck = 4;
+ }
+
+ Pattern1FirstInstruction = &MI;
+ break;
+ // End of Pattern 1
+
+ // Search for Pattern 2
+ case SP::FADDD:
+ case SP::FSUBD:
+ case SP::FMULD:
+ case SP::FDIVD:
+ case SP::FSQRTD:
+ case SP::FCMPD:
+ Pattern2FirstInstruction = &MI;
+ Pattern1FirstInstruction = NULL;
+ break;
+
+ case SP::STFrr:
+ case SP::STFri:
+ case SP::STDFrr:
+ case SP::STDFri:
+ if (Pattern2FirstInstruction != NULL) {
+ if (GetRegIndexForOperand(MI, LAST_OPERAND) ==
+ GetRegIndexForOperand(*Pattern2FirstInstruction, 0)) {
+ // Insert four NOPs
+ for (unsigned InsertedCount = 0; InsertedCount < 4;
+ InsertedCount++) {
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+ }
+
+ Pattern2FirstInstruction = NULL;
+ }
+ }
+ Pattern1FirstInstruction = NULL;
+ break;
+ // End of Pattern 2
+
+ default:
+ // Ensure we don't count debug-only values while we're testing for the
+ // patterns.
+ if (!MI.isDebugValue())
+ Pattern1FirstInstruction = NULL;
+ break;
+ }
+ }
+ }
+
+ return Modified;
+}
diff --git a/lib/Target/Sparc/LeonPasses.h b/lib/Target/Sparc/LeonPasses.h
new file mode 100755
index 000000000000..5e21813ed029
--- /dev/null
+++ b/lib/Target/Sparc/LeonPasses.h
@@ -0,0 +1,199 @@
+//===------- LeonPasses.h - Define passes specific to LEON ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_LEON_PASSES_H
+#define LLVM_LIB_TARGET_SPARC_LEON_PASSES_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
+
+#include "Sparc.h"
+#include "SparcSubtarget.h"
+
+namespace llvm {
+class LLVM_LIBRARY_VISIBILITY LEONMachineFunctionPass
+ : public MachineFunctionPass {
+protected:
+ const SparcSubtarget *Subtarget;
+ const int LAST_OPERAND = -1;
+
+ // this vector holds free registers that we allocate in groups for some of the
+ // LEON passes
+ std::vector<int> UsedRegisters;
+
+protected:
+ LEONMachineFunctionPass(TargetMachine &tm, char &ID);
+ LEONMachineFunctionPass(char &ID);
+
+ int GetRegIndexForOperand(MachineInstr &MI, int OperandIndex);
+ void clearUsedRegisterList() { UsedRegisters.clear(); }
+
+ void markRegisterUsed(int registerIndex) {
+ UsedRegisters.push_back(registerIndex);
+ }
+ int getUnusedFPRegister(MachineRegisterInfo &MRI);
+};
+
+class LLVM_LIBRARY_VISIBILITY ReplaceSDIV : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ ReplaceSDIV();
+ ReplaceSDIV(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "ReplaceSDIV: Erratum Fix LBR25: do not emit SDIV, but emit SDIVCC "
+ "instead";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY FixCALL : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ FixCALL(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "FixCALL: Erratum Fix LBR26: restrict the size of the immediate "
+ "operand of the CALL instruction to 20 bits";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY IgnoreZeroFlag : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ IgnoreZeroFlag(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "IgnoreZeroFlag: Erratum Fix LBR28: do not rely on the zero bit "
+ "flag on a divide overflow for SDIVCC and UDIVCC";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY InsertNOPDoublePrecision
+ : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ InsertNOPDoublePrecision(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "InsertNOPDoublePrecision: Erratum Fix LBR30: insert a NOP before "
+ "the double precision floating point instruction";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY FixFSMULD : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ FixFSMULD(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "FixFSMULD: Erratum Fix LBR31: do not select FSMULD";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY ReplaceFMULS : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ ReplaceFMULS(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "ReplaceFMULS: Erratum Fix LBR32: replace FMULS instruction with a "
+ "routine using conversions/double precision operations to replace "
+ "FMULS";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY PreventRoundChange
+ : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ PreventRoundChange(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "PreventRoundChange: Erratum Fix LBR33: prevent any rounding mode "
+ "change request: use only the round-to-nearest rounding mode";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY FixAllFDIVSQRT : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ FixAllFDIVSQRT(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "FixAllFDIVSQRT: Erratum Fix LBR34: fix FDIVS/FDIVD/FSQRTS/FSQRTD "
+ "instructions with NOPs and floating-point store";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY InsertNOPLoad : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ InsertNOPLoad(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "InsertNOPLoad: insert a NOP instruction after "
+ "every single-cycle load instruction when the next instruction is "
+ "another load/store instruction";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY FlushCacheLineSWAP
+ : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ FlushCacheLineSWAP(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "FlushCacheLineSWAP: Erratum Fix LBR36: flush cache line containing "
+ "the lock before performing any of the atomic instructions SWAP and "
+ "LDSTUB";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY InsertNOPsLoadStore
+ : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ InsertNOPsLoadStore(TargetMachine &tm);
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ const char *getPassName() const override {
+ return "InsertNOPsLoadStore: Erratum Fix LBR37: insert NOPs between "
+ "single-precision loads and the store, so the number of "
+ "instructions between is 4";
+ }
+};
+} // namespace lllvm
+
+#endif // LLVM_LIB_TARGET_SPARC_LEON_PASSES_H
diff --git a/lib/Target/Sparc/MCTargetDesc/Makefile b/lib/Target/Sparc/MCTargetDesc/Makefile
deleted file mode 100644
index abcbe2da18ec..000000000000
--- a/lib/Target/Sparc/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/Sparc/TargetDesc/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSparcDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp b/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
index d1d7aaa07eab..14a70d862f11 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
@@ -248,7 +248,8 @@ namespace {
llvm_unreachable("fixupNeedsRelaxation() unimplemented");
return false;
}
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {
// FIXME.
llvm_unreachable("relaxInstruction() unimplemented");
}
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp b/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
index 0be60fd7a051..d35e45e03466 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
@@ -29,8 +29,8 @@ namespace {
~SparcELFObjectWriter() override {}
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
bool needsRelocateWithSymbol(const MCSymbol &Sym,
unsigned Type) const override;
@@ -38,7 +38,8 @@ namespace {
};
}
-unsigned SparcELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned SparcELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
index 9171d4dc9c00..45bc4a1de01b 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
@@ -22,6 +22,7 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/EndianStream.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
index 9113e4a46b96..dceaca791aab 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
@@ -15,7 +15,6 @@
#include "InstPrinter/SparcInstPrinter.h"
#include "SparcMCAsmInfo.h"
#include "SparcTargetStreamer.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -81,12 +80,8 @@ createSparcMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
//
// All code models require that the text segment is smaller than 2GB.
-static MCCodeGenInfo *createSparcMCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
-
+static void adjustCodeGenOpts(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
// The default 32-bit code model is abs32/pic32 and the default 32-bit
// code model for JIT is abs32.
switch (CM) {
@@ -94,17 +89,10 @@ static MCCodeGenInfo *createSparcMCCodeGenInfo(const Triple &TT,
case CodeModel::Default:
case CodeModel::JITDefault: CM = CodeModel::Small; break;
}
-
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
-static MCCodeGenInfo *createSparcV9MCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
-
+static void adjustCodeGenOptsV9(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
// The default 64-bit code model is abs44/pic32 and the default 64-bit
// code model for JIT is abs64.
switch (CM) {
@@ -116,9 +104,6 @@ static MCCodeGenInfo *createSparcV9MCCodeGenInfo(const Triple &TT,
CM = CodeModel::Large;
break;
}
-
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
static MCTargetStreamer *
@@ -175,10 +160,10 @@ extern "C" void LLVMInitializeSparcTargetMC() {
}
// Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheSparcTarget,
- createSparcMCCodeGenInfo);
- TargetRegistry::RegisterMCCodeGenInfo(TheSparcV9Target,
- createSparcV9MCCodeGenInfo);
- TargetRegistry::RegisterMCCodeGenInfo(TheSparcelTarget,
- createSparcMCCodeGenInfo);
+ TargetRegistry::registerMCAdjustCodeGenOpts(TheSparcTarget,
+ adjustCodeGenOpts);
+ TargetRegistry::registerMCAdjustCodeGenOpts(TheSparcV9Target,
+ adjustCodeGenOptsV9);
+ TargetRegistry::registerMCAdjustCodeGenOpts(TheSparcelTarget,
+ adjustCodeGenOpts);
}
diff --git a/lib/Target/Sparc/Makefile b/lib/Target/Sparc/Makefile
deleted file mode 100644
index c2a95b47151a..000000000000
--- a/lib/Target/Sparc/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-##===- lib/Target/Sparc/Makefile ---------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMSparcCodeGen
-TARGET = Sparc
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = SparcGenRegisterInfo.inc SparcGenInstrInfo.inc \
- SparcGenAsmWriter.inc SparcGenAsmMatcher.inc \
- SparcGenDAGISel.inc SparcGenDisassemblerTables.inc \
- SparcGenSubtargetInfo.inc SparcGenCallingConv.inc \
- SparcGenMCCodeEmitter.inc
-
-DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
-
diff --git a/lib/Target/Sparc/README.txt b/lib/Target/Sparc/README.txt
index 647c2763e521..d7686eba7af6 100644
--- a/lib/Target/Sparc/README.txt
+++ b/lib/Target/Sparc/README.txt
@@ -1,4 +1,3 @@
-
To-do
-----
diff --git a/lib/Target/Sparc/Sparc.h b/lib/Target/Sparc/Sparc.h
index 96378d522dc0..0a8272d89297 100644
--- a/lib/Target/Sparc/Sparc.h
+++ b/lib/Target/Sparc/Sparc.h
@@ -72,7 +72,24 @@ namespace llvm {
FCC_UGE = 12+16, // Unordered or Greater or Equal
FCC_LE = 13+16, // Less or Equal
FCC_ULE = 14+16, // Unordered or Less or Equal
- FCC_O = 15+16 // Ordered
+ FCC_O = 15+16, // Ordered
+
+ CPCC_A = 8+32, // Always
+ CPCC_N = 0+32, // Never
+ CPCC_3 = 7+32,
+ CPCC_2 = 6+32,
+ CPCC_23 = 5+32,
+ CPCC_1 = 4+32,
+ CPCC_13 = 3+32,
+ CPCC_12 = 2+32,
+ CPCC_123 = 1+32,
+ CPCC_0 = 9+32,
+ CPCC_03 = 10+32,
+ CPCC_02 = 11+32,
+ CPCC_023 = 12+32,
+ CPCC_01 = 13+32,
+ CPCC_013 = 14+32,
+ CPCC_012 = 15+32
};
}
@@ -110,6 +127,22 @@ namespace llvm {
case SPCC::FCC_LE: return "le";
case SPCC::FCC_ULE: return "ule";
case SPCC::FCC_O: return "o";
+ case SPCC::CPCC_A: return "a";
+ case SPCC::CPCC_N: return "n";
+ case SPCC::CPCC_3: return "3";
+ case SPCC::CPCC_2: return "2";
+ case SPCC::CPCC_23: return "23";
+ case SPCC::CPCC_1: return "1";
+ case SPCC::CPCC_13: return "13";
+ case SPCC::CPCC_12: return "12";
+ case SPCC::CPCC_123: return "123";
+ case SPCC::CPCC_0: return "0";
+ case SPCC::CPCC_03: return "03";
+ case SPCC::CPCC_02: return "02";
+ case SPCC::CPCC_023: return "023";
+ case SPCC::CPCC_01: return "01";
+ case SPCC::CPCC_013: return "013";
+ case SPCC::CPCC_012: return "012";
}
llvm_unreachable("Invalid cond code");
}
diff --git a/lib/Target/Sparc/Sparc.td b/lib/Target/Sparc/Sparc.td
index c34122eef92f..7a3d12448d52 100644
--- a/lib/Target/Sparc/Sparc.td
+++ b/lib/Target/Sparc/Sparc.td
@@ -21,79 +21,133 @@ include "llvm/Target/Target.td"
//
def FeatureV9
- : SubtargetFeature<"v9", "IsV9", "true",
- "Enable SPARC-V9 instructions">;
+ : SubtargetFeature<"v9", "IsV9", "true", "Enable SPARC-V9 instructions">;
def FeatureV8Deprecated
- : SubtargetFeature<"deprecated-v8", "V8DeprecatedInsts", "true",
- "Enable deprecated V8 instructions in V9 mode">;
+ : SubtargetFeature<"deprecated-v8", "V8DeprecatedInsts", "true",
+ "Enable deprecated V8 instructions in V9 mode">;
def FeatureVIS
- : SubtargetFeature<"vis", "IsVIS", "true",
- "Enable UltraSPARC Visual Instruction Set extensions">;
+ : SubtargetFeature<"vis", "IsVIS", "true",
+ "Enable UltraSPARC Visual Instruction Set extensions">;
def FeatureVIS2
- : SubtargetFeature<"vis2", "IsVIS2", "true",
- "Enable Visual Instruction Set extensions II">;
+ : SubtargetFeature<"vis2", "IsVIS2", "true",
+ "Enable Visual Instruction Set extensions II">;
def FeatureVIS3
- : SubtargetFeature<"vis3", "IsVIS3", "true",
- "Enable Visual Instruction Set extensions III">;
+ : SubtargetFeature<"vis3", "IsVIS3", "true",
+ "Enable Visual Instruction Set extensions III">;
+def FeatureLeon
+ : SubtargetFeature<"leon", "IsLeon", "true", "Enable LEON extensions">;
def FeatureHardQuad
- : SubtargetFeature<"hard-quad-float", "HasHardQuad", "true",
- "Enable quad-word floating point instructions">;
+ : SubtargetFeature<"hard-quad-float", "HasHardQuad", "true",
+ "Enable quad-word floating point instructions">;
def UsePopc : SubtargetFeature<"popc", "UsePopc", "true",
"Use the popc (population count) instruction">;
+def FeatureSoftFloat
+ : SubtargetFeature<"soft-float", "UseSoftFloat", "true",
+ "Use software emulation for floating point">;
+
+//==== Features added predmoninantly for LEON subtarget support
+include "LeonFeatures.td"
+
//===----------------------------------------------------------------------===//
// Register File, Calling Conv, Instruction Descriptions
//===----------------------------------------------------------------------===//
include "SparcRegisterInfo.td"
include "SparcCallingConv.td"
+include "SparcSchedule.td"
include "SparcInstrInfo.td"
def SparcInstrInfo : InstrInfo;
-def SparcAsmParser : AsmParser {
- bit ShouldEmitMatchRegisterName = 0;
-}
+def SparcAsmParser : AsmParser { bit ShouldEmitMatchRegisterName = 0; }
//===----------------------------------------------------------------------===//
// SPARC processors supported.
//===----------------------------------------------------------------------===//
class Proc<string Name, list<SubtargetFeature> Features>
- : Processor<Name, NoItineraries, Features>;
-
-def : Proc<"generic", []>;
-def : Proc<"v7", []>;
-def : Proc<"v8", []>;
-def : Proc<"supersparc", []>;
-def : Proc<"sparclite", []>;
-def : Proc<"f934", []>;
-def : Proc<"hypersparc", []>;
-def : Proc<"sparclite86x", []>;
-def : Proc<"sparclet", []>;
-def : Proc<"tsc701", []>;
-def : Proc<"v9", [FeatureV9]>;
-def : Proc<"ultrasparc", [FeatureV9, FeatureV8Deprecated, FeatureVIS]>;
-def : Proc<"ultrasparc3", [FeatureV9, FeatureV8Deprecated, FeatureVIS,
- FeatureVIS2]>;
-def : Proc<"niagara", [FeatureV9, FeatureV8Deprecated, FeatureVIS,
- FeatureVIS2]>;
-def : Proc<"niagara2", [FeatureV9, FeatureV8Deprecated, UsePopc,
- FeatureVIS, FeatureVIS2]>;
-def : Proc<"niagara3", [FeatureV9, FeatureV8Deprecated, UsePopc,
- FeatureVIS, FeatureVIS2]>;
-def : Proc<"niagara4", [FeatureV9, FeatureV8Deprecated, UsePopc,
- FeatureVIS, FeatureVIS2, FeatureVIS3]>;
-
+ : Processor<Name, NoItineraries, Features>;
+
+def : Proc<"generic", []>;
+def : Proc<"v7", []>;
+def : Proc<"v8", []>;
+def : Proc<"supersparc", []>;
+def : Proc<"sparclite", []>;
+def : Proc<"f934", []>;
+def : Proc<"hypersparc", []>;
+def : Proc<"sparclite86x", []>;
+def : Proc<"sparclet", []>;
+def : Proc<"tsc701", []>;
+def : Proc<"myriad2", []>;
+def : Proc<"myriad2.1", []>;
+def : Proc<"myriad2.2", []>;
+def : Proc<"v9", [ FeatureV9 ]>;
+def : Proc<"ultrasparc", [ FeatureV9, FeatureV8Deprecated, FeatureVIS ]>;
+def : Proc<"ultrasparc3",
+ [ FeatureV9, FeatureV8Deprecated, FeatureVIS, FeatureVIS2 ]>;
+def : Proc<"niagara",
+ [ FeatureV9, FeatureV8Deprecated, FeatureVIS, FeatureVIS2 ]>;
+def : Proc<"niagara2", [
+ FeatureV9, FeatureV8Deprecated, UsePopc, FeatureVIS, FeatureVIS2
+]>;
+def : Proc<"niagara3", [
+ FeatureV9, FeatureV8Deprecated, UsePopc, FeatureVIS, FeatureVIS2
+]>;
+def : Proc<"niagara4", [
+ FeatureV9, FeatureV8Deprecated, UsePopc, FeatureVIS, FeatureVIS2, FeatureVIS3
+]>;
+
+// LEON 2 FT generic
+def : Processor<"leon2", LEON2Itineraries, [ FeatureLeon ]>;
+
+// LEON 2 FT (AT697E)
+// AT697E: Provides full coverage of AT697E - covers all the erratum fixes for
+// LEON2 AT697E
+def : Processor<"at697e", LEON2Itineraries, [
+ FeatureLeon, ReplaceSDIV, FixCALL, IgnoreZeroFlag, InsertNOPDoublePrecision
+]>;
+
+// LEON 2 FT (AT697F)
+// AT697F: Provides full coverage of AT697F - covers all the erratum fixes for
+// LEON2 AT697F
+def : Processor<"at697f", LEON2Itineraries,
+ [ FeatureLeon, InsertNOPDoublePrecision ]>;
+
+// LEON 3 FT generic
+def : Processor<"leon3", LEON3Itineraries, [ FeatureLeon, UMACSMACSupport ]>;
+
+// LEON 3 FT (UT699). Provides features for the UT699 processor
+// - covers all the erratum fixes for LEON3, but does not support the CASA
+// instruction.
+def : Processor<"ut699", LEON3Itineraries, [
+ FeatureLeon, FixFSMULD, ReplaceFMULS, PreventRoundChange,
+ FixAllFDIVSQRT, InsertNOPLoad, FlushCacheLineSWAP, InsertNOPsLoadStore
+]>;
+
+// LEON3 FT (GR712RC). Provides features for the GR712RC processor.
+// - covers all the erratum fixed for LEON3 and support for the CASA
+// instruction.
+def : Processor<"gr712rc", LEON3Itineraries,
+ [ FeatureLeon, LeonCASA ]>;
+
+// LEON 4 FT generic
+def : Processor<"leon4", LEON4Itineraries,
+ [ FeatureLeon, LeonCASA ]>;
+
+// GR740: Provides full coverage of GR740 - covers all the erratum fixes for
+// LEON3 + support to CASA + LEON 4 instruction timings
+def : Processor<"gr740", LEON4Itineraries,
+ [ FeatureLeon, LeonCASA ]> {}
//===----------------------------------------------------------------------===//
// Declare the target which we are implementing
//===----------------------------------------------------------------------===//
def SparcAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
+ string AsmWriterClassName = "InstPrinter";
int PassSubtarget = 1;
int Variant = 0;
}
@@ -101,6 +155,6 @@ def SparcAsmWriter : AsmWriter {
def Sparc : Target {
// Pull in Instruction Info:
let InstructionSet = SparcInstrInfo;
- let AssemblyParsers = [SparcAsmParser];
- let AssemblyWriters = [SparcAsmWriter];
+ let AssemblyParsers = [ SparcAsmParser ];
+ let AssemblyWriters = [ SparcAsmWriter ];
}
diff --git a/lib/Target/Sparc/SparcAsmPrinter.cpp b/lib/Target/Sparc/SparcAsmPrinter.cpp
index e3b0f5266747..c068440f7c05 100644
--- a/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -18,7 +18,6 @@
#include "SparcInstrInfo.h"
#include "SparcTargetMachine.h"
#include "SparcTargetStreamer.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
@@ -54,7 +53,6 @@ namespace {
void printOperand(const MachineInstr *MI, int opNum, raw_ostream &OS);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &OS,
const char *Modifier = nullptr);
- void printCCOperand(const MachineInstr *MI, int opNum, raw_ostream &OS);
void EmitFunctionBodyStart() override;
void EmitInstruction(const MachineInstr *MI) override;
@@ -185,7 +183,7 @@ void SparcAsmPrinter::LowerGETPCXAndEmitMCInsts(const MachineInstr *MI,
MCOperand MCRegOP = MCOperand::createReg(MO.getReg());
- if (TM.getRelocationModel() != Reloc::PIC_) {
+ if (!isPositionIndependent()) {
// Just load the address of GOT to MCRegOP.
switch(TM.getCodeModel()) {
default:
@@ -376,6 +374,9 @@ void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
O << DL.getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_"
<< MO.getIndex();
break;
+ case MachineOperand::MO_Metadata:
+ MO.getMetadata()->printAsOperand(O, MMI->getModule());
+ break;
default:
llvm_unreachable("<unknown operand type>");
}
@@ -417,6 +418,7 @@ bool SparcAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
default:
// See if this is a generic print operand
return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
+ case 'f':
case 'r':
break;
}
diff --git a/lib/Target/Sparc/SparcFrameLowering.cpp b/lib/Target/Sparc/SparcFrameLowering.cpp
index 39b5e809c9be..87b01553b37e 100644
--- a/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -146,7 +146,7 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF,
// Finally, ensure that the size is sufficiently aligned for the
// data on the stack.
if (MFI->getMaxAlignment() > 0) {
- NumBytes = RoundUpToAlignment(NumBytes, MFI->getMaxAlignment());
+ NumBytes = alignTo(NumBytes, MFI->getMaxAlignment());
}
// Update stack size with corrected value.
@@ -183,7 +183,7 @@ void SparcFrameLowering::emitPrologue(MachineFunction &MF,
}
}
-void SparcFrameLowering::
+MachineBasicBlock::iterator SparcFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
if (!hasReservedCallFrame(MF)) {
@@ -195,7 +195,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
if (Size)
emitSPAdjustment(MF, MBB, I, Size, SP::ADDrr, SP::ADDri);
}
- MBB.erase(I);
+ return MBB.erase(I);
}
@@ -350,7 +350,7 @@ void SparcFrameLowering::remapRegsForLeafProc(MachineFunction &MF) const {
}
assert(verifyLeafProcRegUse(&MRI));
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
MF.verify(0, "After LeafProc Remapping");
#endif
}
diff --git a/lib/Target/Sparc/SparcFrameLowering.h b/lib/Target/Sparc/SparcFrameLowering.h
index cbb4dc04fc23..ac0e69ccde1e 100644
--- a/lib/Target/Sparc/SparcFrameLowering.h
+++ b/lib/Target/Sparc/SparcFrameLowering.h
@@ -29,7 +29,7 @@ public:
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
- void
+ MachineBasicBlock::iterator
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index c4c641659df3..07948a33cde6 100644
--- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -15,7 +15,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -42,7 +41,7 @@ public:
return SelectionDAGISel::runOnMachineFunction(MF);
}
- SDNode *Select(SDNode *N) override;
+ void Select(SDNode *N) override;
// Complex Pattern Selectors.
bool SelectADDRrr(SDValue N, SDValue &R1, SDValue &R2);
@@ -63,7 +62,7 @@ public:
private:
SDNode* getGlobalBaseReg();
- SDNode *SelectInlineAsm(SDNode *N);
+ bool tryInlineAsm(SDNode *N);
};
} // end anonymous namespace
@@ -155,7 +154,7 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
// TODO: fix inline asm support so I can simply tell it that 'i64'
// inputs to asm need to be allocated to the IntPair register type,
// and have that work. Then, delete this function.
-SDNode *SparcDAGToDAGISel::SelectInlineAsm(SDNode *N){
+bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){
std::vector<SDValue> AsmNodeOperands;
unsigned Flag, Kind;
bool Changed = false;
@@ -310,31 +309,32 @@ SDNode *SparcDAGToDAGISel::SelectInlineAsm(SDNode *N){
if (Glue.getNode())
AsmNodeOperands.push_back(Glue);
if (!Changed)
- return nullptr;
+ return false;
SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
New->setNodeId(-1);
- return New.getNode();
+ ReplaceNode(N, New.getNode());
+ return true;
}
-SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
+void SparcDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
if (N->isMachineOpcode()) {
N->setNodeId(-1);
- return nullptr; // Already selected.
+ return; // Already selected.
}
switch (N->getOpcode()) {
default: break;
- case ISD::INLINEASM: {
- SDNode *ResNode = SelectInlineAsm(N);
- if (ResNode)
- return ResNode;
+ case ISD::INLINEASM: {
+ if (tryInlineAsm(N))
+ return;
break;
}
case SPISD::GLOBAL_BASE_REG:
- return getGlobalBaseReg();
+ ReplaceNode(N, getGlobalBaseReg());
+ return;
case ISD::SDIV:
case ISD::UDIV: {
@@ -360,8 +360,8 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
// FIXME: Handle div by immediate.
unsigned Opcode = N->getOpcode() == ISD::SDIV ? SP::SDIVrr : SP::UDIVrr;
- return CurDAG->SelectNodeTo(N, Opcode, MVT::i32, DivLHS, DivRHS,
- TopPart);
+ CurDAG->SelectNodeTo(N, Opcode, MVT::i32, DivLHS, DivRHS, TopPart);
+ return;
}
case ISD::MULHU:
case ISD::MULHS: {
@@ -373,11 +373,12 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::i32, MulLHS, MulRHS);
SDValue ResultHigh = SDValue(Mul, 1);
ReplaceUses(SDValue(N, 0), ResultHigh);
- return nullptr;
+ CurDAG->RemoveDeadNode(N);
+ return;
}
}
- return SelectCode(N);
+ SelectCode(N);
}
@@ -391,6 +392,7 @@ SparcDAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
switch (ConstraintID) {
default: return true;
case InlineAsm::Constraint_i:
+ case InlineAsm::Constraint_o:
case InlineAsm::Constraint_m: // memory
if (!SelectADDRrr(Op, Op0, Op1))
SelectADDRri(Op, Op0, Op1);
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index 5e70ffe2223c..8738bc82683d 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -18,6 +18,7 @@
#include "SparcRegisterInfo.h"
#include "SparcTargetMachine.h"
#include "SparcTargetObjectFile.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -31,7 +32,6 @@
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -184,29 +184,30 @@ static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
// callee's register window. This function translates registers to the
// corresponding caller window %o register.
static unsigned toCallerWindow(unsigned Reg) {
- assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum");
+ static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
+ "Unexpected enum");
if (Reg >= SP::I0 && Reg <= SP::I7)
return Reg - SP::I0 + SP::O0;
return Reg;
}
SDValue
-SparcTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool IsVarArg,
+SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
+ const SDLoc &DL, SelectionDAG &DAG) const {
if (Subtarget->is64Bit())
return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
}
SDValue
-SparcTargetLowering::LowerReturn_32(SDValue Chain,
- CallingConv::ID CallConv, bool IsVarArg,
+SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
+ const SDLoc &DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
// CCValAssign - represent the assignment of the return value to locations.
@@ -287,11 +288,11 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain,
// Lower return values for the 64-bit ABI.
// Return values are passed the exactly the same way as function arguments.
SDValue
-SparcTargetLowering::LowerReturn_64(SDValue Chain,
- CallingConv::ID CallConv, bool IsVarArg,
+SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
+ const SDLoc &DL, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to locations.
SmallVector<CCValAssign, 16> RVLocs;
@@ -363,14 +364,10 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain,
return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
}
-SDValue SparcTargetLowering::
-LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue SparcTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
if (Subtarget->is64Bit())
return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
DL, DAG, InVals);
@@ -381,14 +378,10 @@ LowerFormalArguments(SDValue Chain,
/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
/// passed in either one or two GPRs, including FP values. TODO: we should
/// pass FP values in FP registers for fastcc functions.
-SDValue SparcTargetLowering::
-LowerFormalArguments_32(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue SparcTargetLowering::LowerFormalArguments_32(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
@@ -412,9 +405,8 @@ LowerFormalArguments_32(SDValue Chain,
// Get SRet from [%fp+64].
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
- SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Arg =
+ DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
InVals.push_back(Arg);
continue;
}
@@ -435,9 +427,7 @@ LowerFormalArguments_32(SDValue Chain,
int FrameIdx = MF.getFrameInfo()->
CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
- LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
- MachinePointerInfo(),
- false, false, false, 0);
+ LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
} else {
unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
&SP::IntRegsRegClass);
@@ -473,16 +463,15 @@ LowerFormalArguments_32(SDValue Chain,
auto PtrVT = getPointerTy(DAG.getDataLayout());
if (VA.needsCustom()) {
- assert(VA.getValVT() == MVT::f64 || MVT::v2i32);
+ assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
// If it is double-word aligned, just load.
if (Offset % 8 == 0) {
int FI = MF.getFrameInfo()->CreateFixedObject(8,
Offset,
true);
SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
- SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
- MachinePointerInfo(),
- false,false, false, 0);
+ SDValue Load =
+ DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
InVals.push_back(Load);
continue;
}
@@ -491,17 +480,15 @@ LowerFormalArguments_32(SDValue Chain,
Offset,
true);
SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
- SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue HiVal =
+ DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
int FI2 = MF.getFrameInfo()->CreateFixedObject(4,
Offset+4,
true);
SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
- SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue LoVal =
+ DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
if (IsLittleEndian)
std::swap(LoVal, HiVal);
@@ -519,9 +506,7 @@ LowerFormalArguments_32(SDValue Chain,
SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
SDValue Load ;
if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
- Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
- MachinePointerInfo(),
- false, false, false, 0);
+ Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
} else if (VA.getValVT() == MVT::f128) {
report_fatal_error("SPARCv8 does not handle f128 in calls; "
"pass indirectly");
@@ -573,9 +558,8 @@ LowerFormalArguments_32(SDValue Chain,
true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
- OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr,
- MachinePointerInfo(),
- false, false, 0));
+ OutChains.push_back(
+ DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
ArgOffset += 4;
}
@@ -589,14 +573,10 @@ LowerFormalArguments_32(SDValue Chain,
}
// Lower formal arguments for the 64 bit ABI.
-SDValue SparcTargetLowering::
-LowerFormalArguments_64(SDValue Chain,
- CallingConv::ID CallConv,
- bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue SparcTargetLowering::LowerFormalArguments_64(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
// Analyze arguments according to CC_Sparc64.
@@ -659,10 +639,10 @@ LowerFormalArguments_64(SDValue Chain,
if (VA.isExtInLoc())
Offset += 8 - ValSize;
int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true);
- InVals.push_back(DAG.getLoad(
- VA.getValVT(), DL, Chain,
- DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),
- MachinePointerInfo::getFixedStack(MF, FI), false, false, false, 0));
+ InVals.push_back(
+ DAG.getLoad(VA.getValVT(), DL, Chain,
+ DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),
+ MachinePointerInfo::getFixedStack(MF, FI)));
}
if (!IsVarArg)
@@ -690,9 +670,9 @@ LowerFormalArguments_64(SDValue Chain,
SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true);
auto PtrVT = getPointerTy(MF.getDataLayout());
- OutChains.push_back(DAG.getStore(
- Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
- MachinePointerInfo::getFixedStack(MF, FI), false, false, 0));
+ OutChains.push_back(
+ DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
+ MachinePointerInfo::getFixedStack(MF, FI)));
}
if (!OutChains.empty())
@@ -773,16 +753,22 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
unsigned Size = Flags.getByValSize();
unsigned Align = Flags.getByValAlign();
- int FI = MFI->CreateStackObject(Size, Align, false);
- SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
- SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
-
- Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
- false, // isVolatile,
- (Size <= 32), // AlwaysInline if size <= 32,
- false, // isTailCall
- MachinePointerInfo(), MachinePointerInfo());
- ByValArgs.push_back(FIPtr);
+ if (Size > 0U) {
+ int FI = MFI->CreateStackObject(Size, Align, false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
+
+ Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
+ false, // isVolatile,
+ (Size <= 32), // AlwaysInline if size <= 32,
+ false, // isTailCall
+ MachinePointerInfo(), MachinePointerInfo());
+ ByValArgs.push_back(FIPtr);
+ }
+ else {
+ SDValue nullVal;
+ ByValArgs.push_back(nullVal);
+ }
}
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
@@ -803,8 +789,12 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
// Use local copy if it is a byval arg.
- if (Flags.isByVal())
+ if (Flags.isByVal()) {
Arg = ByValArgs[byvalArgIdx++];
+ if (!Arg) {
+ continue;
+ }
+ }
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -830,9 +820,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
hasStructRetAttr = true;
continue;
}
@@ -847,9 +836,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
continue;
}
}
@@ -884,9 +872,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Part1, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
}
} else {
unsigned Offset = VA.getLocMemOffset() + StackOffset;
@@ -894,15 +881,13 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Part0, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
// Store the second part.
PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Part1, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
}
continue;
}
@@ -926,9 +911,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
}
@@ -953,8 +937,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
- unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_)
- ? SparcMCExpr::VK_Sparc_WPLT30 : 0);
+ unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
@@ -999,15 +982,55 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
- Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
- RVLocs[i].getValVT(), InFlag).getValue(1);
- InFlag = Chain.getValue(2);
- InVals.push_back(Chain.getValue(0));
+ if (RVLocs[i].getLocVT() == MVT::v2i32) {
+ SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
+ SDValue Lo = DAG.getCopyFromReg(
+ Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
+ Chain = Lo.getValue(1);
+ InFlag = Lo.getValue(2);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
+ DAG.getConstant(0, dl, MVT::i32));
+ SDValue Hi = DAG.getCopyFromReg(
+ Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
+ Chain = Hi.getValue(1);
+ InFlag = Hi.getValue(2);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
+ DAG.getConstant(1, dl, MVT::i32));
+ InVals.push_back(Vec);
+ } else {
+ Chain =
+ DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
+ RVLocs[i].getValVT(), InFlag)
+ .getValue(1);
+ InFlag = Chain.getValue(2);
+ InVals.push_back(Chain.getValue(0));
+ }
}
return Chain;
}
+// FIXME? Maybe this could be a TableGen attribute on some registers and
+// this table could be generated automatically from RegInfo.
+unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT,
+ SelectionDAG &DAG) const {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
+ .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
+ .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
+ .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
+ .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
+ .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
+ .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
+ .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
+ .Default(0);
+
+ if (Reg)
+ return Reg;
+
+ report_fatal_error("Invalid register name global variable");
+}
+
// This functions returns true if CalleeName is a ABI function that returns
// a long double (fp128).
static bool isFP128ABICall(const char *CalleeName)
@@ -1131,7 +1154,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
// Keep stack frames 16-byte aligned.
- ArgsSize = RoundUpToAlignment(ArgsSize, 16);
+ ArgsSize = alignTo(ArgsSize, 16);
// Varargs calls require special treatment.
if (CLI.IsVarArg)
@@ -1194,16 +1217,13 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
// Store to %sp+BIAS+128+Offset
- SDValue Store = DAG.getStore(Chain, DL, Arg, HiPtrOff,
- MachinePointerInfo(),
- false, false, 0);
+ SDValue Store =
+ DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
// Load into Reg and Reg+1
- SDValue Hi64 = DAG.getLoad(MVT::i64, DL, Store, HiPtrOff,
- MachinePointerInfo(),
- false, false, false, 0);
- SDValue Lo64 = DAG.getLoad(MVT::i64, DL, Store, LoPtrOff,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Hi64 =
+ DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
+ SDValue Lo64 =
+ DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
Hi64));
RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
@@ -1242,9 +1262,8 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
Subtarget->getStackPointerBias() +
128, DL);
PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
}
// Emit all stores, make sure they occur before the call.
@@ -1267,8 +1286,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// Likewise ExternalSymbol -> TargetExternalSymbol.
SDValue Callee = CLI.Callee;
bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
- unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_)
- ? SparcMCExpr::VK_Sparc_WPLT30 : 0);
+ unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
@@ -1375,6 +1393,14 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
// TargetLowering Implementation
//===----------------------------------------------------------------------===//
+TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ if (AI->getOperation() == AtomicRMWInst::Xchg &&
+ AI->getType()->getPrimitiveSizeInBits() == 32)
+ return AtomicExpansionKind::None; // Uses xchg instruction
+
+ return AtomicExpansionKind::CmpXChg;
+}
+
/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
/// condition.
static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
@@ -1421,7 +1447,7 @@ static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
}
}
-SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
+SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
const SparcSubtarget &STI)
: TargetLowering(TM), Subtarget(&STI) {
MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
@@ -1436,9 +1462,11 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
// Set up the register classes.
addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
- addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
- addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
- addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
+ if (!Subtarget->useSoftFloat()) {
+ addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
+ addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
+ addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
+ }
if (Subtarget->is64Bit()) {
addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
} else {
@@ -1559,6 +1587,9 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
+ setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
+ setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
+
if (Subtarget->is64Bit()) {
setOperationAction(ISD::ADDC, MVT::i64, Custom);
setOperationAction(ISD::ADDE, MVT::i64, Custom);
@@ -1574,9 +1605,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
setOperationAction(ISD::CTPOP, MVT::i64,
Subtarget->usePopc() ? Legal : Expand);
setOperationAction(ISD::CTTZ , MVT::i64, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::CTLZ , MVT::i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::BSWAP, MVT::i64, Expand);
setOperationAction(ISD::ROTL , MVT::i64, Expand);
setOperationAction(ISD::ROTR , MVT::i64, Expand);
@@ -1584,15 +1613,17 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
}
// ATOMICs.
- // FIXME: We insert fences for each atomics and generate sub-optimal code
- // for PSO/TSO. Also, implement other atomicrmw operations.
+ // Atomics are supported on SparcV9. 32-bit atomics are also
+ // supported by some Leon SparcV8 variants. Otherwise, atomics
+ // are unsupported.
+ if (Subtarget->isV9() || Subtarget->hasLeonCasa())
+ setMaxAtomicSizeInBitsSupported(64);
+ else
+ setMaxAtomicSizeInBitsSupported(0);
- setInsertFencesForAtomic(true);
+ setMinCmpXchgSizeInBits(32);
setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
- setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32,
- (Subtarget->isV9() ? Legal: Expand));
-
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
@@ -1629,9 +1660,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
setOperationAction(ISD::FREM , MVT::f32, Expand);
setOperationAction(ISD::FMA , MVT::f32, Expand);
setOperationAction(ISD::CTTZ , MVT::i32, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::CTLZ , MVT::i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
@@ -1730,7 +1759,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
// Setup Runtime library names.
- if (Subtarget->is64Bit()) {
+ if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
setLibcallName(RTLIB::ADD_F128, "_Qp_add");
setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
@@ -1748,7 +1777,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
- } else {
+ } else if (!Subtarget->useSoftFloat()) {
setLibcallName(RTLIB::ADD_F128, "_Q_add");
setLibcallName(RTLIB::SUB_F128, "_Q_sub");
setLibcallName(RTLIB::MUL_F128, "_Q_mul");
@@ -1769,35 +1798,56 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
}
}
+ if (Subtarget->fixAllFDIVSQRT()) {
+ // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
+ // the former instructions generate errata on LEON processors.
+ setOperationAction(ISD::FDIV, MVT::f32, Promote);
+ setOperationAction(ISD::FSQRT, MVT::f32, Promote);
+ }
+
+ if (Subtarget->replaceFMULS()) {
+ // Promote FMULS to FMULD instructions instead as
+ // the former instructions generate errata on LEON processors.
+ setOperationAction(ISD::FMUL, MVT::f32, Promote);
+ }
+
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+
setMinFunctionAlignment(2);
computeRegisterProperties(Subtarget->getRegisterInfo());
}
+bool SparcTargetLowering::useSoftFloat() const {
+ return Subtarget->useSoftFloat();
+}
+
const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch ((SPISD::NodeType)Opcode) {
- case SPISD::FIRST_NUMBER: break;
- case SPISD::CMPICC: return "SPISD::CMPICC";
- case SPISD::CMPFCC: return "SPISD::CMPFCC";
- case SPISD::BRICC: return "SPISD::BRICC";
- case SPISD::BRXCC: return "SPISD::BRXCC";
- case SPISD::BRFCC: return "SPISD::BRFCC";
- case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
- case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
- case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
- case SPISD::Hi: return "SPISD::Hi";
- case SPISD::Lo: return "SPISD::Lo";
- case SPISD::FTOI: return "SPISD::FTOI";
- case SPISD::ITOF: return "SPISD::ITOF";
- case SPISD::FTOX: return "SPISD::FTOX";
- case SPISD::XTOF: return "SPISD::XTOF";
- case SPISD::CALL: return "SPISD::CALL";
- case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
+ case SPISD::FIRST_NUMBER: break;
+ case SPISD::CMPICC: return "SPISD::CMPICC";
+ case SPISD::CMPFCC: return "SPISD::CMPFCC";
+ case SPISD::BRICC: return "SPISD::BRICC";
+ case SPISD::BRXCC: return "SPISD::BRXCC";
+ case SPISD::BRFCC: return "SPISD::BRFCC";
+ case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
+ case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
+ case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
+ case SPISD::EH_SJLJ_SETJMP: return "SPISD::EH_SJLJ_SETJMP";
+ case SPISD::EH_SJLJ_LONGJMP: return "SPISD::EH_SJLJ_LONGJMP";
+ case SPISD::Hi: return "SPISD::Hi";
+ case SPISD::Lo: return "SPISD::Lo";
+ case SPISD::FTOI: return "SPISD::FTOI";
+ case SPISD::ITOF: return "SPISD::ITOF";
+ case SPISD::FTOX: return "SPISD::FTOX";
+ case SPISD::XTOF: return "SPISD::XTOF";
+ case SPISD::CALL: return "SPISD::CALL";
+ case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
- case SPISD::FLUSHW: return "SPISD::FLUSHW";
- case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
- case SPISD::TLS_LD: return "SPISD::TLS_LD";
- case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
+ case SPISD::FLUSHW: return "SPISD::FLUSHW";
+ case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
+ case SPISD::TLS_LD: return "SPISD::TLS_LD";
+ case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
}
return nullptr;
}
@@ -1902,8 +1952,8 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
EVT VT = getPointerTy(DAG.getDataLayout());
- // Handle PIC mode first.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
+ // Handle PIC mode first. SPARC needs a got load for every variable!
+ if (isPositionIndependent()) {
// This is the pic32 code model, the GOT is known to be smaller than 4GB.
SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22,
SparcMCExpr::VK_Sparc_GOT10, DAG);
@@ -1914,8 +1964,7 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setHasCalls(true);
return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
// This is one of the absolute code models.
@@ -2004,16 +2053,15 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
SDValue Symbol = withTargetFlags(Op, callTF, DAG);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- SmallVector<SDValue, 4> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
- Ops.push_back(Symbol);
- Ops.push_back(DAG.getRegister(SP::O0, PtrVT));
const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
DAG.getMachineFunction(), CallingConv::C);
assert(Mask && "Missing call preserved mask for calling convention");
- Ops.push_back(DAG.getRegisterMask(Mask));
- Ops.push_back(InFlag);
+ SDValue Ops[] = {Chain,
+ Callee,
+ Symbol,
+ DAG.getRegister(SP::O0, PtrVT),
+ DAG.getRegisterMask(Mask),
+ InFlag};
Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
@@ -2068,10 +2116,10 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
DAG.getRegister(SP::G7, PtrVT), Offset);
}
-SDValue
-SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args,
- SDValue Arg, SDLoc DL,
- SelectionDAG &DAG) const {
+SDValue SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain,
+ ArgListTy &Args, SDValue Arg,
+ const SDLoc &DL,
+ SelectionDAG &DAG) const {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
EVT ArgVT = Arg.getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
@@ -2084,14 +2132,8 @@ SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args,
// Create a stack object and pass the pointer to the library function.
int FI = MFI->CreateStackObject(16, 8, false);
SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
- Chain = DAG.getStore(Chain,
- DL,
- Entry.Node,
- FIPtr,
- MachinePointerInfo(),
- false,
- false,
- 8);
+ Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
+ /* Alignment = */ 8);
Entry.Node = FIPtr;
Entry.Ty = PointerType::getUnqual(ArgTy);
@@ -2136,7 +2178,7 @@ SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
}
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
- .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args), 0);
+ .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
@@ -2149,19 +2191,13 @@ SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
Chain = CallInfo.second;
// Load RetPtr to get the return value.
- return DAG.getLoad(Op.getValueType(),
- SDLoc(Op),
- Chain,
- RetPtr,
- MachinePointerInfo(),
- false, false, false, 8);
+ return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
+ MachinePointerInfo(), /* Alignment = */ 8);
}
-SDValue
-SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
- unsigned &SPCC,
- SDLoc DL,
- SelectionDAG &DAG) const {
+SDValue SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
+ unsigned &SPCC, const SDLoc &DL,
+ SelectionDAG &DAG) const {
const char *LibCall = nullptr;
bool is64Bit = Subtarget->is64Bit();
@@ -2193,7 +2229,7 @@ SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(DL).setChain(Chain)
- .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
+ .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
@@ -2460,6 +2496,20 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
}
+SDValue SparcTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) const {
+ SDLoc DL(Op);
+ return DAG.getNode(SPISD::EH_SJLJ_SETJMP, DL,
+ DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1));
+
+}
+
+SDValue SparcTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) const {
+ SDLoc DL(Op);
+ return DAG.getNode(SPISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1));
+}
+
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
const SparcTargetLowering &TLI) {
MachineFunction &MF = DAG.getMachineFunction();
@@ -2477,7 +2527,7 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV));
}
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
@@ -2488,20 +2538,19 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
EVT PtrVT = VAListPtr.getValueType();
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDLoc DL(Node);
- SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr,
- MachinePointerInfo(SV), false, false, false, 0);
+ SDValue VAList =
+ DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
// Increment the pointer, VAList, to the next vaarg.
SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
DAG.getIntPtrConstant(VT.getSizeInBits()/8,
DL));
// Store the incremented VAList to the legalized pointer.
- InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr,
- VAListPtr, MachinePointerInfo(SV), false, false, 0);
+ InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
+ MachinePointerInfo(SV));
// Load the actual argument out of the pointer VAList.
// We can't count on greater alignment than the word size.
return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
- false, false, false,
- std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8);
+ std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8);
}
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
@@ -2564,8 +2613,7 @@ static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
while (depth--) {
SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
DAG.getIntPtrConstant(Offset, dl));
- FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(),
- false, false, false, 0);
+ FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
}
if (Subtarget->is64Bit())
FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
@@ -2580,7 +2628,6 @@ static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
uint64_t depth = Op.getConstantOperandVal(0);
return getFRAMEADDR(depth, Op, DAG, Subtarget);
-
}
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
@@ -2613,30 +2660,34 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
dl, VT,
FrameAddr,
DAG.getIntPtrConstant(Offset, dl));
- RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr,
- MachinePointerInfo(), false, false, false, 0);
+ RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
return RetAddr;
}
-static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG, unsigned opcode)
-{
- SDLoc dl(Op);
-
- assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
+static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
+ unsigned opcode) {
+ assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
assert(opcode == ISD::FNEG || opcode == ISD::FABS);
// Lower fneg/fabs on f64 to fneg/fabs on f32.
// fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
// fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
- SDValue SrcReg64 = Op.getOperand(0);
+ // Note: in little-endian, the floating-point value is stored in the
+ // registers are in the opposite order, so the subreg with the sign
+ // bit is the highest-numbered (odd), rather than the
+ // lowest-numbered (even).
+
SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
SrcReg64);
SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
SrcReg64);
- Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
+ if (DAG.getDataLayout().isLittleEndian())
+ Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
+ else
+ Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
dl, MVT::f64), 0);
@@ -2652,29 +2703,22 @@ static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
{
SDLoc dl(Op);
LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
- assert(LdNode && LdNode->getOffset().getOpcode() == ISD::UNDEF
+ assert(LdNode && LdNode->getOffset().isUndef()
&& "Unexpected node type");
unsigned alignment = LdNode->getAlignment();
if (alignment > 8)
alignment = 8;
- SDValue Hi64 = DAG.getLoad(MVT::f64,
- dl,
- LdNode->getChain(),
- LdNode->getBasePtr(),
- LdNode->getPointerInfo(),
- false, false, false, alignment);
+ SDValue Hi64 =
+ DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
+ LdNode->getPointerInfo(), alignment);
EVT addrVT = LdNode->getBasePtr().getValueType();
SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
LdNode->getBasePtr(),
DAG.getConstant(8, dl, addrVT));
- SDValue Lo64 = DAG.getLoad(MVT::f64,
- dl,
- LdNode->getChain(),
- LoPtr,
- LdNode->getPointerInfo(),
- false, false, false, alignment);
+ SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
+ LdNode->getPointerInfo(), alignment);
SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
@@ -2713,7 +2757,7 @@ static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
SDLoc dl(Op);
StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
- assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF
+ assert(StNode && StNode->getOffset().isUndef()
&& "Unexpected node type");
SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
@@ -2734,22 +2778,15 @@ static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
alignment = 8;
SDValue OutChains[2];
- OutChains[0] = DAG.getStore(StNode->getChain(),
- dl,
- SDValue(Hi64, 0),
- StNode->getBasePtr(),
- MachinePointerInfo(),
- false, false, alignment);
+ OutChains[0] =
+ DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
+ StNode->getBasePtr(), MachinePointerInfo(), alignment);
EVT addrVT = StNode->getBasePtr().getValueType();
SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
StNode->getBasePtr(),
DAG.getConstant(8, dl, addrVT));
- OutChains[1] = DAG.getStore(StNode->getChain(),
- dl,
- SDValue(Lo64, 0),
- LoPtr,
- MachinePointerInfo(),
- false, false, alignment);
+ OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
+ MachinePointerInfo(), alignment);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
@@ -2768,8 +2805,7 @@ static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
SDValue Chain = DAG.getStore(
St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
- St->isVolatile(), St->isNonTemporal(), St->getAlignment(),
- St->getAAInfo());
+ St->isVolatile(), St->getMemOperand()->getFlags(), St->getAAInfo());
return Chain;
}
@@ -2780,24 +2816,35 @@ static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
&& "invalid opcode");
+ SDLoc dl(Op);
+
if (Op.getValueType() == MVT::f64)
- return LowerF64Op(Op, DAG, Op.getOpcode());
+ return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
if (Op.getValueType() != MVT::f128)
return Op;
// Lower fabs/fneg on f128 to fabs/fneg on f64
// fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
+ // (As with LowerF64Op, on little-endian, we need to negate the odd
+ // subreg)
- SDLoc dl(Op);
SDValue SrcReg128 = Op.getOperand(0);
SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
SrcReg128);
SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
SrcReg128);
- if (isV9)
- Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
- else
- Hi64 = LowerF64Op(Hi64, DAG, Op.getOpcode());
+
+ if (DAG.getDataLayout().isLittleEndian()) {
+ if (isV9)
+ Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
+ else
+ Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
+ } else {
+ if (isV9)
+ Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
+ else
+ Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
+ }
SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
dl, MVT::f128), 0);
@@ -2906,12 +2953,25 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
+ if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
+ // Expand with a fence.
+ return SDValue();
+
// Monotonic load/stores are legal.
- if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
- return Op;
+ return Op;
+}
- // Otherwise, expand with a fence.
- return SDValue();
+SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ SDLoc dl(Op);
+ switch (IntNo) {
+ default: return SDValue(); // Don't custom lower most intrinsics.
+ case Intrinsic::thread_pointer: {
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ return DAG.getRegister(SP::G7, PtrVT);
+ }
+ }
}
SDValue SparcTargetLowering::
@@ -2943,6 +3003,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
hasHardQuad);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
hasHardQuad);
+ case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG, *this);
+ case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG, *this);
case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
@@ -2972,14 +3034,15 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
}
}
MachineBasicBlock *
-SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
- switch (MI->getOpcode()) {
- default: llvm_unreachable("Unknown SELECT_CC!");
+ switch (MI.getOpcode()) {
+ default: llvm_unreachable("Unknown Custom Instruction!");
case SP::SELECT_CC_Int_ICC:
case SP::SELECT_CC_FP_ICC:
case SP::SELECT_CC_DFP_ICC:
@@ -2990,61 +3053,21 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case SP::SELECT_CC_DFP_FCC:
case SP::SELECT_CC_QFP_FCC:
return expandSelectCC(MI, BB, SP::FBCOND);
+ case SP::EH_SJLJ_SETJMP32ri:
+ case SP::EH_SJLJ_SETJMP32rr:
+ return emitEHSjLjSetJmp(MI, BB);
+ case SP::EH_SJLJ_LONGJMP32rr:
+ case SP::EH_SJLJ_LONGJMP32ri:
+ return emitEHSjLjLongJmp(MI, BB);
+ }
+}
- case SP::ATOMIC_LOAD_ADD_32:
- return expandAtomicRMW(MI, BB, SP::ADDrr);
- case SP::ATOMIC_LOAD_ADD_64:
- return expandAtomicRMW(MI, BB, SP::ADDXrr);
- case SP::ATOMIC_LOAD_SUB_32:
- return expandAtomicRMW(MI, BB, SP::SUBrr);
- case SP::ATOMIC_LOAD_SUB_64:
- return expandAtomicRMW(MI, BB, SP::SUBXrr);
- case SP::ATOMIC_LOAD_AND_32:
- return expandAtomicRMW(MI, BB, SP::ANDrr);
- case SP::ATOMIC_LOAD_AND_64:
- return expandAtomicRMW(MI, BB, SP::ANDXrr);
- case SP::ATOMIC_LOAD_OR_32:
- return expandAtomicRMW(MI, BB, SP::ORrr);
- case SP::ATOMIC_LOAD_OR_64:
- return expandAtomicRMW(MI, BB, SP::ORXrr);
- case SP::ATOMIC_LOAD_XOR_32:
- return expandAtomicRMW(MI, BB, SP::XORrr);
- case SP::ATOMIC_LOAD_XOR_64:
- return expandAtomicRMW(MI, BB, SP::XORXrr);
- case SP::ATOMIC_LOAD_NAND_32:
- return expandAtomicRMW(MI, BB, SP::ANDrr);
- case SP::ATOMIC_LOAD_NAND_64:
- return expandAtomicRMW(MI, BB, SP::ANDXrr);
-
- case SP::ATOMIC_SWAP_64:
- return expandAtomicRMW(MI, BB, 0);
-
- case SP::ATOMIC_LOAD_MAX_32:
- return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G);
- case SP::ATOMIC_LOAD_MAX_64:
- return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G);
- case SP::ATOMIC_LOAD_MIN_32:
- return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE);
- case SP::ATOMIC_LOAD_MIN_64:
- return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE);
- case SP::ATOMIC_LOAD_UMAX_32:
- return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU);
- case SP::ATOMIC_LOAD_UMAX_64:
- return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU);
- case SP::ATOMIC_LOAD_UMIN_32:
- return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU);
- case SP::ATOMIC_LOAD_UMIN_64:
- return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU);
- }
-}
-
-MachineBasicBlock*
-SparcTargetLowering::expandSelectCC(MachineInstr *MI,
- MachineBasicBlock *BB,
+MachineBasicBlock *
+SparcTargetLowering::expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
unsigned BROpcode) const {
const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
- unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
+ DebugLoc dl = MI.getDebugLoc();
+ unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
// To "insert" a SELECT_CC instruction, we actually have to insert the diamond
// control-flow pattern. The incoming instruction knows the destination vreg
@@ -3089,107 +3112,211 @@ SparcTargetLowering::expandSelectCC(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
+ BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(copy0MBB)
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(thisMBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
-MachineBasicBlock*
-SparcTargetLowering::expandAtomicRMW(MachineInstr *MI,
- MachineBasicBlock *MBB,
- unsigned Opcode,
- unsigned CondCode) const {
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
+MachineBasicBlock *
+SparcTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
+ MachineBasicBlock *MBB) const {
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ MachineInstrBuilder MIB;
+
+ MVT PVT = getPointerTy(MF->getDataLayout());
+ unsigned RegSize = PVT.getStoreSize();
+ assert(PVT == MVT::i32 && "Invalid Pointer Size!");
+
+ unsigned Buf = MI.getOperand(0).getReg();
+ unsigned JmpLoc = MRI.createVirtualRegister(&SP::IntRegsRegClass);
+
+ // TO DO: If we do 64-bit handling, this perhaps should be FLUSHW, not TA 3
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::TRAPri), SP::G0).addImm(3).addImm(SPCC::ICC_A);
+
+ // Instruction to restore FP
+ const unsigned FP = SP::I6;
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
+ .addReg(FP)
+ .addReg(Buf)
+ .addImm(0);
- // MI is an atomic read-modify-write instruction of the form:
+ // Instruction to load jmp location
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
+ .addReg(JmpLoc, RegState::Define)
+ .addReg(Buf)
+ .addImm(RegSize);
+
+ // Instruction to restore SP
+ const unsigned SP = SP::O6;
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
+ .addReg(SP)
+ .addReg(Buf)
+ .addImm(2 * RegSize);
+
+ // Instruction to restore I7
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
+ .addReg(SP::I7)
+ .addReg(Buf, RegState::Kill)
+ .addImm(3 * RegSize);
+
+ // Jump to JmpLoc
+ BuildMI(*MBB, MI, DL, TII->get(SP::JMPLrr)).addReg(SP::G0).addReg(JmpLoc, RegState::Kill).addReg(SP::G0);
+
+ MI.eraseFromParent();
+ return MBB;
+}
+
+MachineBasicBlock *
+SparcTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
+ MachineBasicBlock *MBB) const {
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ MachineInstrBuilder MIB;
+
+ MVT PVT = getPointerTy(MF->getDataLayout());
+ unsigned RegSize = PVT.getStoreSize();
+ assert(PVT == MVT::i32 && "Invalid Pointer Size!");
+
+ unsigned DstReg = MI.getOperand(0).getReg();
+ const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
+ assert(RC->hasType(MVT::i32) && "Invalid destination!");
+ unsigned mainDstReg = MRI.createVirtualRegister(RC);
+ unsigned restoreDstReg = MRI.createVirtualRegister(RC);
+
+ // For v = setjmp(buf), we generate
//
- // rd = atomicrmw<op> addr, rs2
+ // thisMBB:
+ // buf[0] = FP
+ // buf[RegSize] = restoreMBB <-- takes address of restoreMBB
+ // buf[RegSize * 2] = O6
+ // buf[RegSize * 3] = I7
+ // Ensure restoreMBB remains in the relocations list (done using a bn instruction)
+ // b mainMBB
//
- // All three operands are registers.
- unsigned DestReg = MI->getOperand(0).getReg();
- unsigned AddrReg = MI->getOperand(1).getReg();
- unsigned Rs2Reg = MI->getOperand(2).getReg();
-
- // SelectionDAG has already inserted memory barriers before and after MI, so
- // we simply have to implement the operatiuon in terms of compare-and-swap.
+ // mainMBB:
+ // v_main = 0
+ // b sinkMBB
//
- // %val0 = load %addr
- // loop:
- // %val = phi %val0, %dest
- // %upd = op %val, %rs2
- // %dest = cas %addr, %val, %upd
- // cmp %val, %dest
- // bne loop
- // done:
+ // restoreMBB:
+ // v_restore = 1
+ // --fall through--
//
- bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg));
- const TargetRegisterClass *ValueRC =
- is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
- unsigned Val0Reg = MRI.createVirtualRegister(ValueRC);
+ // sinkMBB:
+ // v = phi(main, restore)
- BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg)
- .addReg(AddrReg).addImm(0);
+ const BasicBlock *BB = MBB->getBasicBlock();
+ MachineFunction::iterator It = ++MBB->getIterator();
+ MachineBasicBlock *thisMBB = MBB;
+ MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
- // Split the basic block MBB before MI and insert the loop block in the hole.
- MachineFunction::iterator MFI = MBB->getIterator();
- const BasicBlock *LLVM_BB = MBB->getBasicBlock();
- MachineFunction *MF = MBB->getParent();
- MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- ++MFI;
- MF->insert(MFI, LoopMBB);
- MF->insert(MFI, DoneMBB);
-
- // Move MI and following instructions to DoneMBB.
- DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end());
- DoneMBB->transferSuccessorsAndUpdatePHIs(MBB);
-
- // Connect the CFG again.
- MBB->addSuccessor(LoopMBB);
- LoopMBB->addSuccessor(LoopMBB);
- LoopMBB->addSuccessor(DoneMBB);
-
- // Build the loop block.
- unsigned ValReg = MRI.createVirtualRegister(ValueRC);
- // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP).
- unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg);
-
- BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg)
- .addReg(Val0Reg).addMBB(MBB)
- .addReg(DestReg).addMBB(LoopMBB);
-
- if (CondCode) {
- // This is one of the min/max operations. We need a CMPrr followed by a
- // MOVXCC/MOVICC.
- BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg);
- BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
- .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode);
- } else if (Opcode) {
- BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
- .addReg(ValReg).addReg(Rs2Reg);
- }
-
- if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 ||
- MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) {
- unsigned TmpReg = UpdReg;
- UpdReg = MRI.createVirtualRegister(ValueRC);
- BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1);
- }
-
- BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg)
- .addReg(AddrReg).addReg(ValReg).addReg(UpdReg)
- .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
- BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg);
- BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND))
- .addMBB(LoopMBB).addImm(SPCC::ICC_NE);
-
- MI->eraseFromParent();
- return DoneMBB;
+ MF->insert(It, mainMBB);
+ MF->insert(It, restoreMBB);
+ MF->insert(It, sinkMBB);
+ restoreMBB->setHasAddressTaken();
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), MBB,
+ std::next(MachineBasicBlock::iterator(MI)),
+ MBB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ unsigned LabelReg = MRI.createVirtualRegister(&SP::IntRegsRegClass);
+ unsigned LabelReg2 = MRI.createVirtualRegister(&SP::IntRegsRegClass);
+ unsigned BufReg = MI.getOperand(1).getReg();
+
+ // Instruction to store FP
+ const unsigned FP = SP::I6;
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
+ .addReg(BufReg)
+ .addImm(0)
+ .addReg(FP);
+
+ // Instructions to store jmp location
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::SETHIi))
+ .addReg(LabelReg, RegState::Define)
+ .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_HI);
+
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::ORri))
+ .addReg(LabelReg2, RegState::Define)
+ .addReg(LabelReg, RegState::Kill)
+ .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_LO);
+
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
+ .addReg(BufReg)
+ .addImm(RegSize)
+ .addReg(LabelReg2, RegState::Kill);
+
+ // Instruction to store SP
+ const unsigned SP = SP::O6;
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
+ .addReg(BufReg)
+ .addImm(2 * RegSize)
+ .addReg(SP);
+
+ // Instruction to store I7
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
+ .addReg(BufReg)
+ .addImm(3 * RegSize)
+ .addReg(SP::I7);
+
+
+ // FIX ME: This next instruction ensures that the restoreMBB block address remains
+ // valid through optimization passes and serves no other purpose. The ICC_N ensures
+ // that the branch is never taken. This commented-out code here was an alternative
+ // attempt to achieve this which brought myriad problems.
+ //MIB = BuildMI(thisMBB, DL, TII->get(SP::EH_SjLj_Setup)).addMBB(restoreMBB, SparcMCExpr::VK_Sparc_None);
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
+ .addMBB(restoreMBB)
+ .addImm(SPCC::ICC_N);
+
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
+ .addMBB(mainMBB)
+ .addImm(SPCC::ICC_A);
+
+ thisMBB->addSuccessor(mainMBB);
+ thisMBB->addSuccessor(restoreMBB);
+
+
+ // mainMBB:
+ MIB = BuildMI(mainMBB, DL, TII->get(SP::ORrr))
+ .addReg(mainDstReg, RegState::Define)
+ .addReg(SP::G0)
+ .addReg(SP::G0);
+ MIB = BuildMI(mainMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
+
+ mainMBB->addSuccessor(sinkMBB);
+
+
+ // restoreMBB:
+ MIB = BuildMI(restoreMBB, DL, TII->get(SP::ORri))
+ .addReg(restoreDstReg, RegState::Define)
+ .addReg(SP::G0)
+ .addImm(1);
+ //MIB = BuildMI(restoreMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
+ restoreMBB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ MIB = BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(SP::PHI), DstReg)
+ .addReg(mainDstReg).addMBB(mainMBB)
+ .addReg(restoreDstReg).addMBB(restoreMBB);
+
+ MI.eraseFromParent();
+ return sinkMBB;
}
//===----------------------------------------------------------------------===//
@@ -3202,8 +3329,11 @@ SparcTargetLowering::ConstraintType
SparcTargetLowering::getConstraintType(StringRef Constraint) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
- default: break;
- case 'r': return C_RegisterClass;
+ default:
+ break;
+ case 'f':
+ case 'r':
+ return C_RegisterClass;
case 'I': // SIMM13
return C_Other;
}
@@ -3277,6 +3407,9 @@ SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
+ case 'f':
+ return std::make_pair(0U, &SP::FPRegsRegClass);
+
case 'r':
if (VT == MVT::v2i32)
return std::make_pair(0U, &SP::IntPairRegClass);
@@ -3368,10 +3501,9 @@ void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
SDLoc dl(N);
SDValue LoadRes = DAG.getExtLoad(
- Ld->getExtensionType(), dl, MVT::v2i32,
- Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
- MVT::v2i32, Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(), Ld->getAlignment(), Ld->getAAInfo());
+ Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
+ Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
Results.push_back(Res);
@@ -3380,3 +3512,16 @@ void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
}
}
}
+
+// Override to enable LOAD_STACK_GUARD lowering on Linux.
+bool SparcTargetLowering::useLoadStackGuardNode() const {
+ if (!Subtarget->isTargetLinux())
+ return TargetLowering::useLoadStackGuardNode();
+ return true;
+}
+
+// Override to disable global variable loading on Linux.
+void SparcTargetLowering::insertSSPDeclarations(Module &M) const {
+ if (!Subtarget->isTargetLinux())
+ return TargetLowering::insertSSPDeclarations(M);
+}
diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h
index 4e46709cfc09..e0a421b83712 100644
--- a/lib/Target/Sparc/SparcISelLowering.h
+++ b/lib/Target/Sparc/SparcISelLowering.h
@@ -33,6 +33,9 @@ namespace llvm {
SELECT_XCC, // Select between two values using the current XCC flags.
SELECT_FCC, // Select between two values using the current FCC flags.
+ EH_SJLJ_SETJMP, // builtin setjmp operation
+ EH_SJLJ_LONGJMP, // builtin longjmp operation
+
Hi, Lo, // Hi/Lo operations, typically on a global address.
FTOI, // FP to Int within a FP register.
@@ -54,9 +57,11 @@ namespace llvm {
class SparcTargetLowering : public TargetLowering {
const SparcSubtarget *Subtarget;
public:
- SparcTargetLowering(TargetMachine &TM, const SparcSubtarget &STI);
+ SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI);
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
-
+
+ bool useSoftFloat() const override;
+
/// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
@@ -67,8 +72,8 @@ namespace llvm {
unsigned Depth = 0) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const override;
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
const char *getTargetNodeName(unsigned Opcode) const override;
@@ -80,6 +85,14 @@ namespace llvm {
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const override;
+
+ unsigned
+ getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
+ if (ConstraintCode == "o")
+ return InlineAsm::Constraint_o;
+ return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
+ }
+
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
@@ -89,6 +102,9 @@ namespace llvm {
return MVT::i32;
}
+ unsigned getRegisterByName(const char* RegName, EVT VT,
+ SelectionDAG &DAG) const override;
+
/// If a physical register, this returns the register that receives the
/// exception address on entry to an EH pad.
unsigned
@@ -103,28 +119,28 @@ namespace llvm {
return SP::I1;
}
+ /// Override to support customized stack guard loading.
+ bool useLoadStackGuardNode() const override;
+ void insertSSPDeclarations(Module &M) const override;
+
/// getSetCCResultType - Return the ISD::SETCC ValueType
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
- SDValue LowerFormalArguments_32(SDValue Chain,
- CallingConv::ID CallConv,
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue LowerFormalArguments_64(SDValue Chain,
- CallingConv::ID CallConv,
+ SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
@@ -135,44 +151,46 @@ namespace llvm {
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
- SDValue LowerReturn_32(SDValue Chain,
- CallingConv::ID CallConv, bool IsVarArg,
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const override;
+ SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const;
- SDValue LowerReturn_64(SDValue Chain,
- CallingConv::ID CallConv, bool IsVarArg,
+ const SDLoc &DL, SelectionDAG &DAG) const;
+ SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const;
+ const SDLoc &DL, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) const ;
+ SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) const ;
+
unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const;
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const;
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF,
SelectionDAG &DAG) const;
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args,
- SDValue Arg, SDLoc DL,
- SelectionDAG &DAG) const;
+ SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg,
+ const SDLoc &DL, SelectionDAG &DAG) const;
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG,
const char *LibFuncName,
unsigned numArgs) const;
- SDValue LowerF128Compare(SDValue LHS, SDValue RHS,
- unsigned &SPCC,
- SDLoc DL,
- SelectionDAG &DAG) const;
+ SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC,
+ const SDLoc &DL, SelectionDAG &DAG) const;
+
+ SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
bool ShouldShrinkFPConstant(EVT VT) const override {
// Do not shrink FP constpool if VT == MVT::f128.
@@ -180,16 +198,25 @@ namespace llvm {
return VT != MVT::f128;
}
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ // FIXME: We insert fences for each atomics and generate
+ // sub-optimal code for PSO/TSO. (Approximately nobody uses any
+ // mode but TSO, which makes this even more silly)
+ return true;
+ }
+
+ AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
+
void ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>& Results,
SelectionDAG &DAG) const override;
- MachineBasicBlock *expandSelectCC(MachineInstr *MI, MachineBasicBlock *BB,
+ MachineBasicBlock *expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
unsigned BROpcode) const;
- MachineBasicBlock *expandAtomicRMW(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Opcode,
- unsigned CondCode = 0) const;
+ MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
+ MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
};
} // end namespace llvm
diff --git a/lib/Target/Sparc/SparcInstr64Bit.td b/lib/Target/Sparc/SparcInstr64Bit.td
index 419e8ccb1024..f6518c936ebc 100644
--- a/lib/Target/Sparc/SparcInstr64Bit.td
+++ b/lib/Target/Sparc/SparcInstr64Bit.td
@@ -492,7 +492,7 @@ let Predicates = [Is64Bit], Constraints = "$swap = $rd", asi = 0b10000000 in {
I64Regs:$swap),
"casx [$rs1], $rs2, $rd",
[(set i64:$rd,
- (atomic_cmp_swap i64:$rs1, i64:$rs2, i64:$swap))]>;
+ (atomic_cmp_swap_64 i64:$rs1, i64:$rs2, i64:$swap))]>;
} // Predicates = [Is64Bit], Constraints = ...
@@ -501,48 +501,15 @@ let Predicates = [Is64Bit] in {
def : Pat<(atomic_fence imm, imm), (MEMBARi 0xf)>;
// atomic_load_64 addr -> load addr
-def : Pat<(i64 (atomic_load ADDRrr:$src)), (LDXrr ADDRrr:$src)>;
-def : Pat<(i64 (atomic_load ADDRri:$src)), (LDXri ADDRri:$src)>;
+def : Pat<(i64 (atomic_load_64 ADDRrr:$src)), (LDXrr ADDRrr:$src)>;
+def : Pat<(i64 (atomic_load_64 ADDRri:$src)), (LDXri ADDRri:$src)>;
// atomic_store_64 val, addr -> store val, addr
-def : Pat<(atomic_store ADDRrr:$dst, i64:$val), (STXrr ADDRrr:$dst, $val)>;
-def : Pat<(atomic_store ADDRri:$dst, i64:$val), (STXri ADDRri:$dst, $val)>;
+def : Pat<(atomic_store_64 ADDRrr:$dst, i64:$val), (STXrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store_64 ADDRri:$dst, i64:$val), (STXri ADDRri:$dst, $val)>;
} // Predicates = [Is64Bit]
-let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1,
- Defs = [ICC] in
-multiclass AtomicRMW<SDPatternOperator op32, SDPatternOperator op64> {
-
- def _32 : Pseudo<(outs IntRegs:$rd),
- (ins ptr_rc:$addr, IntRegs:$rs2), "",
- [(set i32:$rd, (op32 iPTR:$addr, i32:$rs2))]>;
-
- let Predicates = [Is64Bit] in
- def _64 : Pseudo<(outs I64Regs:$rd),
- (ins ptr_rc:$addr, I64Regs:$rs2), "",
- [(set i64:$rd, (op64 iPTR:$addr, i64:$rs2))]>;
-}
-
-defm ATOMIC_LOAD_ADD : AtomicRMW<atomic_load_add_32, atomic_load_add_64>;
-defm ATOMIC_LOAD_SUB : AtomicRMW<atomic_load_sub_32, atomic_load_sub_64>;
-defm ATOMIC_LOAD_AND : AtomicRMW<atomic_load_and_32, atomic_load_and_64>;
-defm ATOMIC_LOAD_OR : AtomicRMW<atomic_load_or_32, atomic_load_or_64>;
-defm ATOMIC_LOAD_XOR : AtomicRMW<atomic_load_xor_32, atomic_load_xor_64>;
-defm ATOMIC_LOAD_NAND : AtomicRMW<atomic_load_nand_32, atomic_load_nand_64>;
-defm ATOMIC_LOAD_MIN : AtomicRMW<atomic_load_min_32, atomic_load_min_64>;
-defm ATOMIC_LOAD_MAX : AtomicRMW<atomic_load_max_32, atomic_load_max_64>;
-defm ATOMIC_LOAD_UMIN : AtomicRMW<atomic_load_umin_32, atomic_load_umin_64>;
-defm ATOMIC_LOAD_UMAX : AtomicRMW<atomic_load_umax_32, atomic_load_umax_64>;
-
-// There is no 64-bit variant of SWAP, so use a pseudo.
-let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1,
- Defs = [ICC], Predicates = [Is64Bit] in
-def ATOMIC_SWAP_64 : Pseudo<(outs I64Regs:$rd),
- (ins ptr_rc:$addr, I64Regs:$rs2), "",
- [(set i64:$rd,
- (atomic_swap_64 iPTR:$addr, i64:$rs2))]>;
-
let Predicates = [Is64Bit], hasSideEffects = 1, Uses = [ICC], cc = 0b10 in
defm TXCC : TRAP<"%xcc">;
diff --git a/lib/Target/Sparc/SparcInstrAliases.td b/lib/Target/Sparc/SparcInstrAliases.td
index 361d21440a97..df570cea8da8 100644
--- a/lib/Target/Sparc/SparcInstrAliases.td
+++ b/lib/Target/Sparc/SparcInstrAliases.td
@@ -136,59 +136,68 @@ multiclass int_cond_alias<string cond, int condVal> {
(FMOVQ_XCC QFPRegs:$rd, QFPRegs:$rs2, condVal)>,
Requires<[Is64Bit, HasHardQuad]>;
- // t<cond> %icc, rs1 + rs2
- def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs1 + $rs2"),
- (TICCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
- Requires<[HasV9]>;
-
// t<cond> %icc, rs => t<cond> %icc, G0 + rs
def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs2"),
(TICCrr G0, IntRegs:$rs2, condVal)>,
Requires<[HasV9]>;
-
- // t<cond> %xcc, rs1 + rs2
- def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs1 + $rs2"),
- (TXCCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
+ // t<cond> %icc, rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs1 + $rs2"),
+ (TICCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
Requires<[HasV9]>;
+
// t<cond> %xcc, rs => t<cond> %xcc, G0 + rs
def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs2"),
(TXCCrr G0, IntRegs:$rs2, condVal)>,
Requires<[HasV9]>;
+ // t<cond> %xcc, rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs1 + $rs2"),
+ (TXCCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
- // t<cond> rs1 + rs2 => t<cond> %icc, rs1 + rs2
- def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $rs2"),
- (TICCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>;
// t<cond> rs=> t<cond> %icc, G0 + rs2
- def : InstAlias<!strconcat(!strconcat("t", cond), " $rs2"),
- (TICCrr G0, IntRegs:$rs2, condVal)>;
+ //def : InstAlias<!strconcat(!strconcat("t", cond), " $rs2"),
+ // (TICCrr G0, IntRegs:$rs2, condVal)>,
+ // Requires<[HasV9]>;
+
+ // t<cond> rs1 + rs2 => t<cond> %icc, rs1 + rs2
+ //def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $rs2"),
+ // (TICCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
+ // Requires<[HasV9]>;
- // t<cond> %icc, rs1 + imm
- def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs1 + $imm"),
- (TICCri IntRegs:$rs1, i32imm:$imm, condVal)>,
- Requires<[HasV9]>;
// t<cond> %icc, imm => t<cond> %icc, G0 + imm
def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $imm"),
(TICCri G0, i32imm:$imm, condVal)>,
Requires<[HasV9]>;
- // t<cond> %xcc, rs1 + imm
- def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs1 + $imm"),
- (TXCCri IntRegs:$rs1, i32imm:$imm, condVal)>,
+ // t<cond> %icc, rs1 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs1 + $imm"),
+ (TICCri IntRegs:$rs1, i32imm:$imm, condVal)>,
Requires<[HasV9]>;
// t<cond> %xcc, imm => t<cond> %xcc, G0 + imm
def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $imm"),
(TXCCri G0, i32imm:$imm, condVal)>,
Requires<[HasV9]>;
+ // t<cond> %xcc, rs1 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs1 + $imm"),
+ (TXCCri IntRegs:$rs1, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+
+ // t<cond> imm => t<cond> G0 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $imm"),
+ (TRAPri G0, i32imm:$imm, condVal)>;
- // t<cond> rs1 + imm => t<cond> %icc, rs1 + imm
+ // t<cond> rs1 + imm => t<cond> rs1 + imm
def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $imm"),
- (TICCri IntRegs:$rs1, i32imm:$imm, condVal)>;
+ (TRAPri IntRegs:$rs1, i32imm:$imm, condVal)>;
- // t<cond> imm => t<cond> %icc, G0 + imm
- def : InstAlias<!strconcat(!strconcat("t", cond), " $imm"),
- (TICCri G0, i32imm:$imm, condVal)>;
+ // t<cond> rs1 => t<cond> G0 + rs1
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1"),
+ (TRAPrr G0, IntRegs:$rs1, condVal)>;
+ // t<cond> rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $rs2"),
+ (TRAPrr IntRegs:$rs1, IntRegs:$rs2, condVal)>;
}
@@ -244,14 +253,23 @@ multiclass fp_cond_alias<string cond, int condVal> {
Requires<[HasV9, HasHardQuad]>;
}
+
+// Instruction aliases for co-processor conditional branches.
+multiclass cp_cond_alias<string cond, int condVal> {
+
+ // cb<cond> $imm
+ def : InstAlias<!strconcat(!strconcat("cb", cond), " $imm"),
+ (CBCOND brtarget:$imm, condVal), 0>;
+
+ // cb<cond>,a $imm
+ def : InstAlias<!strconcat(!strconcat("cb", cond), ",a $imm"),
+ (CBCONDA brtarget:$imm, condVal), 0>;
+}
+
defm : int_cond_alias<"a", 0b1000>;
-defm : int_cond_alias<"", 0b1000>; // same as a; gnu asm, not in manual
defm : int_cond_alias<"n", 0b0000>;
defm : int_cond_alias<"ne", 0b1001>;
-defm : int_cond_alias<"nz", 0b1001>; // same as ne
defm : int_cond_alias<"e", 0b0001>;
-defm : int_cond_alias<"eq", 0b0001>; // same as e
-defm : int_cond_alias<"z", 0b0001>; // same as e
defm : int_cond_alias<"g", 0b1010>;
defm : int_cond_alias<"le", 0b0010>;
defm : int_cond_alias<"ge", 0b1011>;
@@ -259,16 +277,21 @@ defm : int_cond_alias<"l", 0b0011>;
defm : int_cond_alias<"gu", 0b1100>;
defm : int_cond_alias<"leu", 0b0100>;
defm : int_cond_alias<"cc", 0b1101>;
-defm : int_cond_alias<"geu", 0b1101>; // same as cc
defm : int_cond_alias<"cs", 0b0101>;
-defm : int_cond_alias<"lu", 0b0101>; // same as cs
defm : int_cond_alias<"pos", 0b1110>;
defm : int_cond_alias<"neg", 0b0110>;
defm : int_cond_alias<"vc", 0b1111>;
defm : int_cond_alias<"vs", 0b0111>;
-
+let EmitPriority = 0 in
+{
+ defm : int_cond_alias<"", 0b1000>; // same as a; gnu asm, not in manual
+ defm : int_cond_alias<"nz", 0b1001>; // same as ne
+ defm : int_cond_alias<"eq", 0b0001>; // same as e
+ defm : int_cond_alias<"z", 0b0001>; // same as e
+ defm : int_cond_alias<"geu", 0b1101>; // same as cc
+ defm : int_cond_alias<"lu", 0b0101>; // same as cs
+}
defm : fp_cond_alias<"a", 0b1000>;
-defm : fp_cond_alias<"", 0b1000>; // same as a; gnu asm, not in manual
defm : fp_cond_alias<"n", 0b0000>;
defm : fp_cond_alias<"u", 0b0111>;
defm : fp_cond_alias<"g", 0b0110>;
@@ -277,15 +300,37 @@ defm : fp_cond_alias<"l", 0b0100>;
defm : fp_cond_alias<"ul", 0b0011>;
defm : fp_cond_alias<"lg", 0b0010>;
defm : fp_cond_alias<"ne", 0b0001>;
-defm : fp_cond_alias<"nz", 0b0001>; // same as ne
defm : fp_cond_alias<"e", 0b1001>;
-defm : fp_cond_alias<"z", 0b1001>; // same as e
defm : fp_cond_alias<"ue", 0b1010>;
defm : fp_cond_alias<"ge", 0b1011>;
defm : fp_cond_alias<"uge", 0b1100>;
defm : fp_cond_alias<"le", 0b1101>;
defm : fp_cond_alias<"ule", 0b1110>;
defm : fp_cond_alias<"o", 0b1111>;
+let EmitPriority = 0 in
+{
+ defm : fp_cond_alias<"", 0b1000>; // same as a; gnu asm, not in manual
+ defm : fp_cond_alias<"nz", 0b0001>; // same as ne
+ defm : fp_cond_alias<"z", 0b1001>; // same as e
+}
+
+defm : cp_cond_alias<"a", 0b1000>;
+defm : cp_cond_alias<"n", 0b0000>;
+defm : cp_cond_alias<"3", 0b0111>;
+defm : cp_cond_alias<"2", 0b0110>;
+defm : cp_cond_alias<"23", 0b0101>;
+defm : cp_cond_alias<"1", 0b0100>;
+defm : cp_cond_alias<"13", 0b0011>;
+defm : cp_cond_alias<"12", 0b0010>;
+defm : cp_cond_alias<"123", 0b0001>;
+defm : cp_cond_alias<"0", 0b1001>;
+defm : cp_cond_alias<"03", 0b1010>;
+defm : cp_cond_alias<"02", 0b1011>;
+defm : cp_cond_alias<"023", 0b1100>;
+defm : cp_cond_alias<"01", 0b1101>;
+defm : cp_cond_alias<"013", 0b1110>;
+defm : cp_cond_alias<"012", 0b1111>;
+let EmitPriority = 0 in defm : cp_cond_alias<"", 0b1000>; // same as a; gnu asm, not in manual
// Section A.3 Synthetic Instructions
diff --git a/lib/Target/Sparc/SparcInstrFormats.td b/lib/Target/Sparc/SparcInstrFormats.td
index 74ccf551e473..76366c6695f4 100644
--- a/lib/Target/Sparc/SparcInstrFormats.td
+++ b/lib/Target/Sparc/SparcInstrFormats.td
@@ -7,8 +7,9 @@
//
//===----------------------------------------------------------------------===//
-class InstSP<dag outs, dag ins, string asmstr, list<dag> pattern>
- : Instruction {
+class InstSP<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : Instruction {
field bits<32> Inst;
let Namespace = "SP";
@@ -24,6 +25,8 @@ class InstSP<dag outs, dag ins, string asmstr, list<dag> pattern>
let DecoderNamespace = "Sparc";
field bits<32> SoftFail = 0;
+
+ let Itinerary = itin;
}
//===----------------------------------------------------------------------===//
@@ -31,8 +34,9 @@ class InstSP<dag outs, dag ins, string asmstr, list<dag> pattern>
//===----------------------------------------------------------------------===//
// Format 2 instructions
-class F2<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstSP<outs, ins, asmstr, pattern> {
+class F2<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
bits<3> op2;
bits<22> imm22;
let op = 0; // op = 0
@@ -42,8 +46,9 @@ class F2<dag outs, dag ins, string asmstr, list<dag> pattern>
// Specific F2 classes: SparcV8 manual, page 44
//
-class F2_1<bits<3> op2Val, dag outs, dag ins, string asmstr, list<dag> pattern>
- : F2<outs, ins, asmstr, pattern> {
+class F2_1<bits<3> op2Val, dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F2<outs, ins, asmstr, pattern, itin> {
bits<5> rd;
let op2 = op2Val;
@@ -52,7 +57,8 @@ class F2_1<bits<3> op2Val, dag outs, dag ins, string asmstr, list<dag> pattern>
}
class F2_2<bits<3> op2Val, bit annul, dag outs, dag ins, string asmstr,
- list<dag> pattern> : F2<outs, ins, asmstr, pattern> {
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F2<outs, ins, asmstr, pattern, itin> {
bits<4> cond;
let op2 = op2Val;
@@ -61,8 +67,9 @@ class F2_2<bits<3> op2Val, bit annul, dag outs, dag ins, string asmstr,
}
class F2_3<bits<3> op2Val, bit annul, bit pred,
- dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstSP<outs, ins, asmstr, pattern> {
+ dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
bits<2> cc;
bits<4> cond;
bits<19> imm19;
@@ -77,9 +84,9 @@ class F2_3<bits<3> op2Val, bit annul, bit pred,
let Inst{18-0} = imm19;
}
-class F2_4<bits<3> cond, bit annul, bit pred,
- dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstSP<outs, ins, asmstr, pattern> {
+class F2_4<bits<3> cond, bit annul, bit pred, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
bits<16> imm16;
bits<5> rs1;
@@ -100,8 +107,9 @@ class F2_4<bits<3> cond, bit annul, bit pred,
// Format #3 instruction classes in the Sparc
//===----------------------------------------------------------------------===//
-class F3<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstSP<outs, ins, asmstr, pattern> {
+class F3<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
bits<5> rd;
bits<6> op3;
bits<5> rs1;
@@ -114,7 +122,8 @@ class F3<dag outs, dag ins, string asmstr, list<dag> pattern>
// Specific F3 classes: SparcV8 manual, page 44
//
class F3_1_asi<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
- string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
bits<8> asi;
bits<5> rs2;
@@ -127,13 +136,14 @@ class F3_1_asi<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
}
class F3_1<bits<2> opVal, bits<6> op3val, dag outs, dag ins, string asmstr,
- list<dag> pattern> : F3_1_asi<opVal, op3val, outs, ins,
- asmstr, pattern> {
+ list<dag> pattern, InstrItinClass itin = IIC_iu_instr>
+ : F3_1_asi<opVal, op3val, outs, ins, asmstr, pattern, itin> {
let asi = 0;
}
class F3_2<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
- string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern, InstrItinClass itin = IIC_iu_instr>
+ : F3<outs, ins, asmstr, pattern, itin> {
bits<13> simm13;
let op = opVal;
@@ -145,7 +155,8 @@ class F3_2<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
// floating-point
class F3_3<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
- string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
bits<5> rs2;
let op = opVal;
@@ -157,7 +168,8 @@ class F3_3<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
// floating-point unary operations.
class F3_3u<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
- string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
bits<5> rs2;
let op = opVal;
@@ -170,7 +182,8 @@ class F3_3u<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
// floating-point compares.
class F3_3c<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
- string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
bits<5> rs2;
let op = opVal;
@@ -182,7 +195,8 @@ class F3_3c<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
// Shift by register rs2.
class F3_Sr<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
- string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern, InstrItinClass itin = IIC_iu_instr>
+ : F3<outs, ins, asmstr, pattern, itin> {
bit x = xVal; // 1 for 64-bit shifts.
bits<5> rs2;
@@ -196,7 +210,8 @@ class F3_Sr<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
// Shift by immediate.
class F3_Si<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
- string asmstr, list<dag> pattern> : F3<outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern, InstrItinClass itin = IIC_iu_instr>
+ : F3<outs, ins, asmstr, pattern, itin> {
bit x = xVal; // 1 for 64-bit shifts.
bits<6> shcnt; // shcnt32 / shcnt64.
@@ -210,17 +225,21 @@ class F3_Si<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
// Define rr and ri shift instructions with patterns.
multiclass F3_S<string OpcStr, bits<6> Op3Val, bit XVal, SDNode OpNode,
- ValueType VT, RegisterClass RC> {
+ ValueType VT, RegisterClass RC,
+ InstrItinClass itin = IIC_iu_instr> {
def rr : F3_Sr<2, Op3Val, XVal, (outs RC:$rd), (ins RC:$rs1, IntRegs:$rs2),
!strconcat(OpcStr, " $rs1, $rs2, $rd"),
- [(set VT:$rd, (OpNode VT:$rs1, i32:$rs2))]>;
+ [(set VT:$rd, (OpNode VT:$rs1, i32:$rs2))],
+ itin>;
def ri : F3_Si<2, Op3Val, XVal, (outs RC:$rd), (ins RC:$rs1, i32imm:$shcnt),
!strconcat(OpcStr, " $rs1, $shcnt, $rd"),
- [(set VT:$rd, (OpNode VT:$rs1, (i32 imm:$shcnt)))]>;
+ [(set VT:$rd, (OpNode VT:$rs1, (i32 imm:$shcnt)))],
+ itin>;
}
-class F4<bits<6> op3, dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstSP<outs, ins, asmstr, pattern> {
+class F4<bits<6> op3, dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
bits<5> rd;
let op = 2;
@@ -230,9 +249,9 @@ class F4<bits<6> op3, dag outs, dag ins, string asmstr, list<dag> pattern>
class F4_1<bits<6> op3, dag outs, dag ins,
- string asmstr, list<dag> pattern>
- : F4<op3, outs, ins, asmstr, pattern> {
-
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
bit intcc;
bits<2> cc;
bits<4> cond;
@@ -243,12 +262,12 @@ class F4_1<bits<6> op3, dag outs, dag ins,
let Inst{13} = 0;
let Inst{17-14} = cond;
let Inst{18} = intcc;
-
}
class F4_2<bits<6> op3, dag outs, dag ins,
- string asmstr, list<dag> pattern>
- : F4<op3, outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
bit intcc;
bits<2> cc;
bits<4> cond;
@@ -262,8 +281,9 @@ class F4_2<bits<6> op3, dag outs, dag ins,
}
class F4_3<bits<6> op3, bits<6> opf_low, dag outs, dag ins,
- string asmstr, list<dag> pattern>
- : F4<op3, outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
bits<4> cond;
bit intcc;
bits<2> opf_cc;
@@ -278,8 +298,9 @@ class F4_3<bits<6> op3, bits<6> opf_low, dag outs, dag ins,
}
class F4_4r<bits<6> op3, bits<5> opf_low, bits<3> rcond, dag outs, dag ins,
- string asmstr, list<dag> pattern>
- : F4<op3, outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
bits <5> rs1;
bits <5> rs2;
let Inst{18-14} = rs1;
@@ -291,8 +312,9 @@ class F4_4r<bits<6> op3, bits<5> opf_low, bits<3> rcond, dag outs, dag ins,
class F4_4i<bits<6> op3, bits<3> rcond, dag outs, dag ins,
- string asmstr, list<dag> pattern>
- : F4<op3, outs, ins, asmstr, pattern> {
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
bits<5> rs1;
bits<10> simm10;
let Inst{18-14} = rs1;
@@ -302,9 +324,10 @@ class F4_4i<bits<6> op3, bits<3> rcond, dag outs, dag ins,
}
-class TRAPSP<bits<6> op3Val, bit isimm, dag outs, dag ins, string asmstr,
- list<dag> pattern>: F3<outs, ins, asmstr, pattern> {
-
+class TRAPSP<bits<6> op3Val, bit isimm, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
bits<4> cond;
bits<2> cc;
@@ -317,15 +340,20 @@ class TRAPSP<bits<6> op3Val, bit isimm, dag outs, dag ins, string asmstr,
}
-class TRAPSPrr<bits<6> op3Val, dag outs, dag ins, string asmstr,
- list<dag> pattern>: TRAPSP<op3Val, 0, outs, ins, asmstr, pattern> {
+class TRAPSPrr<bits<6> op3Val, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : TRAPSP<op3Val, 0, outs, ins, asmstr, pattern, itin> {
bits<5> rs2;
let Inst{10-5} = 0;
let Inst{4-0} = rs2;
}
-class TRAPSPri<bits<6> op3Val, dag outs, dag ins, string asmstr,
- list<dag> pattern>: TRAPSP<op3Val, 1, outs, ins, asmstr, pattern> {
+
+class TRAPSPri<bits<6> op3Val, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : TRAPSP<op3Val, 1, outs, ins, asmstr, pattern, itin> {
bits<8> imm;
let Inst{10-8} = 0;
diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp
index 05006ac5772b..cfd342410550 100644
--- a/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -41,17 +41,15 @@ SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
-unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (MI->getOpcode() == SP::LDri ||
- MI->getOpcode() == SP::LDXri ||
- MI->getOpcode() == SP::LDFri ||
- MI->getOpcode() == SP::LDDFri ||
- MI->getOpcode() == SP::LDQFri) {
- if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if (MI.getOpcode() == SP::LDri || MI.getOpcode() == SP::LDXri ||
+ MI.getOpcode() == SP::LDFri || MI.getOpcode() == SP::LDDFri ||
+ MI.getOpcode() == SP::LDQFri) {
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
}
return 0;
@@ -62,17 +60,15 @@ unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
-unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (MI->getOpcode() == SP::STri ||
- MI->getOpcode() == SP::STXri ||
- MI->getOpcode() == SP::STFri ||
- MI->getOpcode() == SP::STDFri ||
- MI->getOpcode() == SP::STQFri) {
- if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
- MI->getOperand(1).getImm() == 0) {
- FrameIndex = MI->getOperand(0).getIndex();
- return MI->getOperand(2).getReg();
+ if (MI.getOpcode() == SP::STri || MI.getOpcode() == SP::STXri ||
+ MI.getOpcode() == SP::STFri || MI.getOpcode() == SP::STDFri ||
+ MI.getOpcode() == SP::STQFri) {
+ if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
+ MI.getOperand(1).getImm() == 0) {
+ FrameIndex = MI.getOperand(0).getIndex();
+ return MI.getOperand(2).getReg();
}
}
return 0;
@@ -119,6 +115,28 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
case SPCC::FCC_UE: return SPCC::FCC_LG;
case SPCC::FCC_NE: return SPCC::FCC_E;
case SPCC::FCC_E: return SPCC::FCC_NE;
+
+ case SPCC::CPCC_A: return SPCC::CPCC_N;
+ case SPCC::CPCC_N: return SPCC::CPCC_A;
+ case SPCC::CPCC_3: // Fall through
+ case SPCC::CPCC_2: // Fall through
+ case SPCC::CPCC_23: // Fall through
+ case SPCC::CPCC_1: // Fall through
+ case SPCC::CPCC_13: // Fall through
+ case SPCC::CPCC_12: // Fall through
+ case SPCC::CPCC_123: // Fall through
+ case SPCC::CPCC_0: // Fall through
+ case SPCC::CPCC_03: // Fall through
+ case SPCC::CPCC_02: // Fall through
+ case SPCC::CPCC_023: // Fall through
+ case SPCC::CPCC_01: // Fall through
+ case SPCC::CPCC_013: // Fall through
+ case SPCC::CPCC_012:
+ // "Opposite" code is not meaningful, as we don't know
+ // what the CoProc condition means here. The cond-code will
+ // only be used in inline assembler, so this code should
+ // not be reached in a normal compilation pass.
+ llvm_unreachable("Meaningless inversion of co-processor cond code");
}
llvm_unreachable("Invalid cond code");
}
@@ -139,7 +157,7 @@ static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
Target = LastInst->getOperand(0).getMBB();
}
-bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -148,15 +166,15 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
if (I == MBB.end())
return false;
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
return false;
// Get the last instruction in the block.
- MachineInstr *LastInst = I;
+ MachineInstr *LastInst = &*I;
unsigned LastOpc = LastInst->getOpcode();
// If there is only one terminator instruction, process it.
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
if (isUncondBranchOpcode(LastOpc)) {
TBB = LastInst->getOperand(0).getMBB();
return false;
@@ -170,7 +188,7 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// Get the instruction before it if it is a terminator.
- MachineInstr *SecondLastInst = I;
+ MachineInstr *SecondLastInst = &*I;
unsigned SecondLastOpc = SecondLastInst->getOpcode();
// If AllowModify is true and the block ends with two or more unconditional
@@ -180,19 +198,19 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
LastInst->eraseFromParent();
LastInst = SecondLastInst;
LastOpc = LastInst->getOpcode();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
// Return now the only terminator is an unconditional branch.
TBB = LastInst->getOperand(0).getMBB();
return false;
} else {
- SecondLastInst = I;
+ SecondLastInst = &*I;
SecondLastOpc = SecondLastInst->getOpcode();
}
}
}
// If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
+ if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
return true;
// If the block ends with a B and a Bcc, handle it.
@@ -222,11 +240,11 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return true;
}
-unsigned
-SparcInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+unsigned SparcInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
"Sparc branch conditions should have one component!");
@@ -282,9 +300,9 @@ bool SparcInstrInfo::ReverseBranchCondition(
}
void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
unsigned numSubRegs = 0;
unsigned movOpc = 0;
const unsigned *subRegIdx = nullptr;
@@ -469,3 +487,20 @@ unsigned SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const
SparcFI->setGlobalBaseReg(GlobalBaseReg);
return GlobalBaseReg;
}
+
+bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::LOAD_STACK_GUARD: {
+ assert(Subtarget.isTargetLinux() &&
+ "Only Linux target is expected to contain LOAD_STACK_GUARD");
+ // offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc.
+ const int64_t Offset = Subtarget.is64Bit() ? 0x28 : 0x14;
+ MI.setDesc(get(Subtarget.is64Bit() ? SP::LDXri : SP::LDri));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(SP::G7)
+ .addImm(Offset);
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/lib/Target/Sparc/SparcInstrInfo.h b/lib/Target/Sparc/SparcInstrInfo.h
index 9de624cc9582..8ed97c1479ca 100644
--- a/lib/Target/Sparc/SparcInstrInfo.h
+++ b/lib/Target/Sparc/SparcInstrInfo.h
@@ -54,7 +54,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
@@ -62,26 +62,25 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify = false) const override ;
+ bool AllowModify = false) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -97,6 +96,9 @@ public:
const TargetRegisterInfo *TRI) const override;
unsigned getGlobalBaseReg(MachineFunction *MF) const;
+
+ // Lower pseudo instructions after register allocation.
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
};
}
diff --git a/lib/Target/Sparc/SparcInstrInfo.td b/lib/Target/Sparc/SparcInstrInfo.td
index ec37c22a5b33..cc55c9c8e032 100644
--- a/lib/Target/Sparc/SparcInstrInfo.td
+++ b/lib/Target/Sparc/SparcInstrInfo.td
@@ -49,6 +49,18 @@ def HasVIS3 : Predicate<"Subtarget->isVIS3()">,
// point instructions.
def HasHardQuad : Predicate<"Subtarget->hasHardQuad()">;
+// HasLeonCASA - This is true when the target processor supports the CASA
+// instruction
+def HasLeonCASA : Predicate<"Subtarget->hasLeonCasa()">;
+
+// HasUMAC_SMAC - This is true when the target processor supports the
+// UMAC and SMAC instructions
+def HasUMAC_SMAC : Predicate<"Subtarget->hasUmacSmac()">;
+
+def HasNoFdivSqrtFix : Predicate<"!Subtarget->fixAllFDIVSQRT()">;
+def HasNoFmulsFix : Predicate<"!Subtarget->replaceFMULS()">;
+def HasNoFsmuldFix : Predicate<"!Subtarget->fixFSMULD()">;
+
// UseDeprecatedInsts - This predicate is true when the target processor is a
// V8, or when it is V9 but the V8 deprecated instructions are efficient enough
// to use when appropriate. In either of these cases, the instruction selector
@@ -154,6 +166,9 @@ SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
def SDTSPtlsld :
SDTypeProfile<1, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
+def SDTSPeh_sjlj_setjmp : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisPtrTy<1>]>;
+def SDTSPeh_sjlj_longjmp: SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+
def SPcmpicc : SDNode<"SPISD::CMPICC", SDTSPcmpicc, [SDNPOutGlue]>;
def SPcmpfcc : SDNode<"SPISD::CMPFCC", SDTSPcmpfcc, [SDNPOutGlue]>;
def SPbricc : SDNode<"SPISD::BRICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
@@ -172,6 +187,13 @@ def SPselecticc : SDNode<"SPISD::SELECT_ICC", SDTSPselectcc, [SDNPInGlue]>;
def SPselectxcc : SDNode<"SPISD::SELECT_XCC", SDTSPselectcc, [SDNPInGlue]>;
def SPselectfcc : SDNode<"SPISD::SELECT_FCC", SDTSPselectcc, [SDNPInGlue]>;
+def SPsjlj_setjmp: SDNode<"SPISD::EH_SJLJ_SETJMP",
+ SDTSPeh_sjlj_setjmp,
+ [SDNPHasChain, SDNPSideEffect]>;
+def SPsjlj_longjmp: SDNode<"SPISD::EH_SJLJ_LONGJMP",
+ SDTSPeh_sjlj_longjmp,
+ [SDNPHasChain, SDNPSideEffect]>;
+
// These are target-independent nodes, but have target-specific formats.
def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
@@ -235,12 +257,28 @@ def FCC_UL : FCC_VAL<19>; // Unordered or Less
def FCC_LG : FCC_VAL<18>; // Less or Greater
def FCC_NE : FCC_VAL<17>; // Not Equal
def FCC_E : FCC_VAL<25>; // Equal
-def FCC_UE : FCC_VAL<24>; // Unordered or Equal
-def FCC_GE : FCC_VAL<25>; // Greater or Equal
-def FCC_UGE : FCC_VAL<26>; // Unordered or Greater or Equal
-def FCC_LE : FCC_VAL<27>; // Less or Equal
-def FCC_ULE : FCC_VAL<28>; // Unordered or Less or Equal
-def FCC_O : FCC_VAL<29>; // Ordered
+def FCC_UE : FCC_VAL<26>; // Unordered or Equal
+def FCC_GE : FCC_VAL<27>; // Greater or Equal
+def FCC_UGE : FCC_VAL<28>; // Unordered or Greater or Equal
+def FCC_LE : FCC_VAL<29>; // Less or Equal
+def FCC_ULE : FCC_VAL<30>; // Unordered or Less or Equal
+def FCC_O : FCC_VAL<31>; // Ordered
+
+class CPCC_VAL<int N> : PatLeaf<(i32 N)>;
+def CPCC_3 : CPCC_VAL<39>; // 3
+def CPCC_2 : CPCC_VAL<38>; // 2
+def CPCC_23 : CPCC_VAL<37>; // 2 or 3
+def CPCC_1 : CPCC_VAL<36>; // 1
+def CPCC_13 : CPCC_VAL<35>; // 1 or 3
+def CPCC_12 : CPCC_VAL<34>; // 1 or 2
+def CPCC_123 : CPCC_VAL<33>; // 1 or 2 or 3
+def CPCC_0 : CPCC_VAL<41>; // 0
+def CPCC_03 : CPCC_VAL<42>; // 0 or 3
+def CPCC_02 : CPCC_VAL<43>; // 0 or 2
+def CPCC_023 : CPCC_VAL<44>; // 0 or 2 or 3
+def CPCC_01 : CPCC_VAL<45>; // 0 or 1
+def CPCC_013 : CPCC_VAL<46>; // 0 or 1 or 3
+def CPCC_012 : CPCC_VAL<47>; // 0 or 1 or 2
//===----------------------------------------------------------------------===//
// Instruction Class Templates
@@ -248,53 +286,61 @@ def FCC_O : FCC_VAL<29>; // Ordered
/// F3_12 multiclass - Define a normal F3_1/F3_2 pattern in one shot.
multiclass F3_12<string OpcStr, bits<6> Op3Val, SDNode OpNode,
- RegisterClass RC, ValueType Ty, Operand immOp> {
+ RegisterClass RC, ValueType Ty, Operand immOp,
+ InstrItinClass itin = IIC_iu_instr> {
def rr : F3_1<2, Op3Val,
(outs RC:$rd), (ins RC:$rs1, RC:$rs2),
!strconcat(OpcStr, " $rs1, $rs2, $rd"),
- [(set Ty:$rd, (OpNode Ty:$rs1, Ty:$rs2))]>;
+ [(set Ty:$rd, (OpNode Ty:$rs1, Ty:$rs2))],
+ itin>;
def ri : F3_2<2, Op3Val,
(outs RC:$rd), (ins RC:$rs1, immOp:$simm13),
!strconcat(OpcStr, " $rs1, $simm13, $rd"),
- [(set Ty:$rd, (OpNode Ty:$rs1, (Ty simm13:$simm13)))]>;
+ [(set Ty:$rd, (OpNode Ty:$rs1, (Ty simm13:$simm13)))],
+ itin>;
}
/// F3_12np multiclass - Define a normal F3_1/F3_2 pattern in one shot, with no
/// pattern.
-multiclass F3_12np<string OpcStr, bits<6> Op3Val> {
+multiclass F3_12np<string OpcStr, bits<6> Op3Val, InstrItinClass itin = IIC_iu_instr> {
def rr : F3_1<2, Op3Val,
(outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
- !strconcat(OpcStr, " $rs1, $rs2, $rd"), []>;
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"), [],
+ itin>;
def ri : F3_2<2, Op3Val,
(outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
- !strconcat(OpcStr, " $rs1, $simm13, $rd"), []>;
+ !strconcat(OpcStr, " $rs1, $simm13, $rd"), [],
+ itin>;
}
// Load multiclass - Define both Reg+Reg/Reg+Imm patterns in one shot.
multiclass Load<string OpcStr, bits<6> Op3Val, SDPatternOperator OpNode,
- RegisterClass RC, ValueType Ty> {
+ RegisterClass RC, ValueType Ty, InstrItinClass itin = IIC_iu_instr> {
def rr : F3_1<3, Op3Val,
(outs RC:$dst), (ins MEMrr:$addr),
!strconcat(OpcStr, " [$addr], $dst"),
- [(set Ty:$dst, (OpNode ADDRrr:$addr))]>;
+ [(set Ty:$dst, (OpNode ADDRrr:$addr))],
+ itin>;
def ri : F3_2<3, Op3Val,
(outs RC:$dst), (ins MEMri:$addr),
!strconcat(OpcStr, " [$addr], $dst"),
- [(set Ty:$dst, (OpNode ADDRri:$addr))]>;
+ [(set Ty:$dst, (OpNode ADDRri:$addr))],
+ itin>;
}
// TODO: Instructions of the LoadASI class are currently asm only; hooking up
// CodeGen's address spaces to use these is a future task.
class LoadASI<string OpcStr, bits<6> Op3Val, SDPatternOperator OpNode,
- RegisterClass RC, ValueType Ty> :
+ RegisterClass RC, ValueType Ty, InstrItinClass itin = NoItinerary> :
F3_1_asi<3, Op3Val, (outs RC:$dst), (ins MEMrr:$addr, i8imm:$asi),
!strconcat(OpcStr, "a [$addr] $asi, $dst"),
[]>;
// LoadA multiclass - As above, but also define alternate address space variant
multiclass LoadA<string OpcStr, bits<6> Op3Val, bits<6> LoadAOp3Val,
- SDPatternOperator OpNode, RegisterClass RC, ValueType Ty> :
- Load<OpcStr, Op3Val, OpNode, RC, Ty> {
+ SDPatternOperator OpNode, RegisterClass RC, ValueType Ty,
+ InstrItinClass itin = NoItinerary> :
+ Load<OpcStr, Op3Val, OpNode, RC, Ty, itin> {
def Arr : LoadASI<OpcStr, LoadAOp3Val, OpNode, RC, Ty>;
}
@@ -302,38 +348,43 @@ multiclass LoadA<string OpcStr, bits<6> Op3Val, bits<6> LoadAOp3Val,
// It is unlikely that general-purpose code could make use of it.
// CAS is preferred for sparc v9.
def LDSTUBrr : F3_1<3, 0b001101, (outs IntRegs:$dst), (ins MEMrr:$addr),
- "ldstub [$addr], $dst", []>;
+ "ldstub [$addr], $dst", []>;
def LDSTUBri : F3_2<3, 0b001101, (outs IntRegs:$dst), (ins MEMri:$addr),
- "ldstub [$addr], $dst", []>;
+ "ldstub [$addr], $dst", []>;
def LDSTUBArr : F3_1_asi<3, 0b011101, (outs IntRegs:$dst),
(ins MEMrr:$addr, i8imm:$asi),
"ldstuba [$addr] $asi, $dst", []>;
// Store multiclass - Define both Reg+Reg/Reg+Imm patterns in one shot.
multiclass Store<string OpcStr, bits<6> Op3Val, SDPatternOperator OpNode,
- RegisterClass RC, ValueType Ty> {
+ RegisterClass RC, ValueType Ty, InstrItinClass itin = IIC_st> {
def rr : F3_1<3, Op3Val,
(outs), (ins MEMrr:$addr, RC:$rd),
!strconcat(OpcStr, " $rd, [$addr]"),
- [(OpNode Ty:$rd, ADDRrr:$addr)]>;
+ [(OpNode Ty:$rd, ADDRrr:$addr)],
+ itin>;
def ri : F3_2<3, Op3Val,
(outs), (ins MEMri:$addr, RC:$rd),
!strconcat(OpcStr, " $rd, [$addr]"),
- [(OpNode Ty:$rd, ADDRri:$addr)]>;
+ [(OpNode Ty:$rd, ADDRri:$addr)],
+ itin>;
}
// TODO: Instructions of the StoreASI class are currently asm only; hooking up
// CodeGen's address spaces to use these is a future task.
class StoreASI<string OpcStr, bits<6> Op3Val,
- SDPatternOperator OpNode, RegisterClass RC, ValueType Ty> :
+ SDPatternOperator OpNode, RegisterClass RC, ValueType Ty,
+ InstrItinClass itin = IIC_st> :
F3_1_asi<3, Op3Val, (outs), (ins MEMrr:$addr, RC:$rd, i8imm:$asi),
- !strconcat(OpcStr, "a $rd, [$addr] $asi"),
- []>;
+ !strconcat(OpcStr, "a $rd, [$addr] $asi"),
+ [],
+ itin>;
multiclass StoreA<string OpcStr, bits<6> Op3Val, bits<6> StoreAOp3Val,
- SDPatternOperator OpNode, RegisterClass RC, ValueType Ty> :
+ SDPatternOperator OpNode, RegisterClass RC, ValueType Ty,
+ InstrItinClass itin = IIC_st> :
Store<OpcStr, Op3Val, OpNode, RC, Ty> {
- def Arr : StoreASI<OpcStr, StoreAOp3Val, OpNode, RC, Ty>;
+ def Arr : StoreASI<OpcStr, StoreAOp3Val, OpNode, RC, Ty, itin>;
}
//===----------------------------------------------------------------------===//
@@ -418,6 +469,27 @@ let usesCustomInserter = 1, Uses = [FCC0] in {
[(set f128:$dst, (SPselectfcc f128:$T, f128:$F, imm:$Cond))]>;
}
+let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
+ let Defs = [WIM] in
+ def EH_SJLJ_SETJMP32ri : Pseudo<(outs IntRegs:$dst), (ins MEMri:$buf),
+ "#EH_SJLJ_SETJMP32",
+ [(set i32:$dst, (SPsjlj_setjmp ADDRri:$buf))]>,
+ Requires<[Is32Bit]>;
+ def EH_SJLJ_SETJMP32rr : Pseudo<(outs IntRegs:$dst), (ins MEMrr:$buf),
+ "#EH_SJLJ_SETJMP32",
+ [(set i32:$dst, (SPsjlj_setjmp ADDRrr:$buf))]>,
+ Requires<[Is32Bit]>;
+ let isTerminator = 1 in
+ def EH_SJLJ_LONGJMP32ri : Pseudo<(outs), (ins MEMri:$buf),
+ "#EH_SJLJ_LONGJMP32",
+ [(SPsjlj_longjmp ADDRri:$buf)]>,
+ Requires<[Is32Bit]>;
+ def EH_SJLJ_LONGJMP32rr : Pseudo<(outs), (ins MEMrr:$buf),
+ "#EH_SJLJ_LONGJMP32",
+ [(SPsjlj_longjmp ADDRrr:$buf)]>,
+ Requires<[Is32Bit]>;
+}
+
// Section B.1 - Load Integer Instructions, p. 90
let DecoderMethod = "DecodeLoadInt" in {
defm LDSB : LoadA<"ldsb", 0b001001, 0b011001, sextloadi8, IntRegs, i32>;
@@ -428,16 +500,16 @@ let DecoderMethod = "DecodeLoadInt" in {
}
let DecoderMethod = "DecodeLoadIntPair" in
- defm LDD : LoadA<"ldd", 0b000011, 0b010011, load, IntPair, v2i32>;
+ defm LDD : LoadA<"ldd", 0b000011, 0b010011, load, IntPair, v2i32, IIC_ldd>;
// Section B.2 - Load Floating-point Instructions, p. 92
let DecoderMethod = "DecodeLoadFP" in {
- defm LDF : Load<"ld", 0b100000, load, FPRegs, f32>;
- def LDFArr : LoadASI<"ld", 0b110000, load, FPRegs, f32>,
+ defm LDF : Load<"ld", 0b100000, load, FPRegs, f32, IIC_iu_or_fpu_instr>;
+ def LDFArr : LoadASI<"ld", 0b110000, load, FPRegs, f32, IIC_iu_or_fpu_instr>,
Requires<[HasV9]>;
}
let DecoderMethod = "DecodeLoadDFP" in {
- defm LDDF : Load<"ldd", 0b100011, load, DFPRegs, f64>;
+ defm LDDF : Load<"ldd", 0b100011, load, DFPRegs, f64, IIC_ldd>;
def LDDFArr : LoadASI<"ldd", 0b110011, load, DFPRegs, f64>,
Requires<[HasV9]>;
}
@@ -445,13 +517,27 @@ let DecoderMethod = "DecodeLoadQFP" in
defm LDQF : LoadA<"ldq", 0b100010, 0b110010, load, QFPRegs, f128>,
Requires<[HasV9, HasHardQuad]>;
+let DecoderMethod = "DecodeLoadCP" in
+ defm LDC : Load<"ld", 0b110000, load, CoprocRegs, i32>;
+let DecoderMethod = "DecodeLoadCPPair" in
+ defm LDDC : Load<"ldd", 0b110011, load, CoprocPair, v2i32, IIC_ldd>;
+
+let DecoderMethod = "DecodeLoadCP", Defs = [CPSR] in {
+ let rd = 0 in {
+ def LDCSRrr : F3_1<3, 0b110001, (outs), (ins MEMrr:$addr),
+ "ld [$addr], %csr", []>;
+ def LDCSRri : F3_2<3, 0b110001, (outs), (ins MEMri:$addr),
+ "ld [$addr], %csr", []>;
+ }
+}
+
let DecoderMethod = "DecodeLoadFP" in
let Defs = [FSR] in {
let rd = 0 in {
def LDFSRrr : F3_1<3, 0b100001, (outs), (ins MEMrr:$addr),
- "ld [$addr], %fsr", []>;
+ "ld [$addr], %fsr", [], IIC_iu_or_fpu_instr>;
def LDFSRri : F3_2<3, 0b100001, (outs), (ins MEMri:$addr),
- "ld [$addr], %fsr", []>;
+ "ld [$addr], %fsr", [], IIC_iu_or_fpu_instr>;
}
let rd = 1 in {
def LDXFSRrr : F3_1<3, 0b100001, (outs), (ins MEMrr:$addr),
@@ -469,7 +555,7 @@ let DecoderMethod = "DecodeStoreInt" in {
}
let DecoderMethod = "DecodeStoreIntPair" in
- defm STD : StoreA<"std", 0b000111, 0b010111, store, IntPair, v2i32>;
+ defm STD : StoreA<"std", 0b000111, 0b010111, store, IntPair, v2i32, IIC_std>;
// Section B.5 - Store Floating-point Instructions, p. 97
let DecoderMethod = "DecodeStoreFP" in {
@@ -478,7 +564,7 @@ let DecoderMethod = "DecodeStoreFP" in {
Requires<[HasV9]>;
}
let DecoderMethod = "DecodeStoreDFP" in {
- defm STDF : Store<"std", 0b100111, store, DFPRegs, f64>;
+ defm STDF : Store<"std", 0b100111, store, DFPRegs, f64, IIC_std>;
def STDFArr : StoreASI<"std", 0b110111, store, DFPRegs, f64>,
Requires<[HasV9]>;
}
@@ -486,21 +572,49 @@ let DecoderMethod = "DecodeStoreQFP" in
defm STQF : StoreA<"stq", 0b100110, 0b110110, store, QFPRegs, f128>,
Requires<[HasV9, HasHardQuad]>;
-let DecoderMethod = "DecodeStoreFP" in
- let Defs = [FSR] in {
- let rd = 0 in {
+let DecoderMethod = "DecodeStoreCP" in
+ defm STC : Store<"st", 0b110100, store, CoprocRegs, i32>;
+
+let DecoderMethod = "DecodeStoreCPPair" in
+ defm STDC : Store<"std", 0b110111, store, CoprocPair, v2i32, IIC_std>;
+
+let DecoderMethod = "DecodeStoreCP", rd = 0 in {
+ let Defs = [CPSR] in {
+ def STCSRrr : F3_1<3, 0b110101, (outs MEMrr:$addr), (ins),
+ "st %csr, [$addr]", [], IIC_st>;
+ def STCSRri : F3_2<3, 0b110101, (outs MEMri:$addr), (ins),
+ "st %csr, [$addr]", [], IIC_st>;
+ }
+ let Defs = [CPQ] in {
+ def STDCQrr : F3_1<3, 0b110110, (outs MEMrr:$addr), (ins),
+ "std %cq, [$addr]", [], IIC_std>;
+ def STDCQri : F3_2<3, 0b110110, (outs MEMri:$addr), (ins),
+ "std %cq, [$addr]", [], IIC_std>;
+ }
+}
+
+let DecoderMethod = "DecodeStoreFP" in {
+ let rd = 0 in {
+ let Defs = [FSR] in {
def STFSRrr : F3_1<3, 0b100101, (outs MEMrr:$addr), (ins),
- "st %fsr, [$addr]", []>;
+ "st %fsr, [$addr]", [], IIC_st>;
def STFSRri : F3_2<3, 0b100101, (outs MEMri:$addr), (ins),
- "st %fsr, [$addr]", []>;
+ "st %fsr, [$addr]", [], IIC_st>;
}
- let rd = 1 in {
- def STXFSRrr : F3_1<3, 0b100101, (outs MEMrr:$addr), (ins),
- "stx %fsr, [$addr]", []>, Requires<[HasV9]>;
- def STXFSRri : F3_2<3, 0b100101, (outs MEMri:$addr), (ins),
- "stx %fsr, [$addr]", []>, Requires<[HasV9]>;
+ let Defs = [FQ] in {
+ def STDFQrr : F3_1<3, 0b100110, (outs MEMrr:$addr), (ins),
+ "std %fq, [$addr]", [], IIC_std>;
+ def STDFQri : F3_2<3, 0b100110, (outs MEMri:$addr), (ins),
+ "std %fq, [$addr]", [], IIC_std>;
}
}
+ let rd = 1, Defs = [FSR] in {
+ def STXFSRrr : F3_1<3, 0b100101, (outs MEMrr:$addr), (ins),
+ "stx %fsr, [$addr]", []>, Requires<[HasV9]>;
+ def STXFSRri : F3_2<3, 0b100101, (outs MEMri:$addr), (ins),
+ "stx %fsr, [$addr]", []>, Requires<[HasV9]>;
+ }
+}
// Section B.8 - SWAP Register with Memory Instruction
// (Atomic swap)
@@ -524,7 +638,8 @@ let Constraints = "$val = $dst", DecoderMethod = "DecodeSWAP" in {
def SETHIi: F2_1<0b100,
(outs IntRegs:$rd), (ins i32imm:$imm22),
"sethi $imm22, $rd",
- [(set i32:$rd, SETHIimm:$imm22)]>;
+ [(set i32:$rd, SETHIimm:$imm22)],
+ IIC_iu_instr>;
// Section B.10 - NOP Instruction, p. 105
// (It's a special case of SETHI)
@@ -619,13 +734,13 @@ let Defs = [ICC], rd = 0 in {
// Section B.18 - Multiply Instructions, p. 113
let Defs = [Y] in {
- defm UMUL : F3_12np<"umul", 0b001010>;
- defm SMUL : F3_12 <"smul", 0b001011, mul, IntRegs, i32, simm13Op>;
+ defm UMUL : F3_12np<"umul", 0b001010, IIC_iu_umul>;
+ defm SMUL : F3_12 <"smul", 0b001011, mul, IntRegs, i32, simm13Op, IIC_iu_smul>;
}
let Defs = [Y, ICC] in {
- defm UMULCC : F3_12np<"umulcc", 0b011010>;
- defm SMULCC : F3_12np<"smulcc", 0b011011>;
+ defm UMULCC : F3_12np<"umulcc", 0b011010, IIC_iu_umul>;
+ defm SMULCC : F3_12np<"smulcc", 0b011011, IIC_iu_smul>;
}
let Defs = [Y, ICC], Uses = [Y, ICC] in {
@@ -634,13 +749,13 @@ let Defs = [Y, ICC], Uses = [Y, ICC] in {
// Section B.19 - Divide Instructions, p. 115
let Uses = [Y], Defs = [Y] in {
- defm UDIV : F3_12np<"udiv", 0b001110>;
- defm SDIV : F3_12np<"sdiv", 0b001111>;
+ defm UDIV : F3_12np<"udiv", 0b001110, IIC_iu_div>;
+ defm SDIV : F3_12np<"sdiv", 0b001111, IIC_iu_div>;
}
let Uses = [Y], Defs = [Y, ICC] in {
- defm UDIVCC : F3_12np<"udivcc", 0b011110>;
- defm SDIVCC : F3_12np<"sdivcc", 0b011111>;
+ defm UDIVCC : F3_12np<"udivcc", 0b011110, IIC_iu_div>;
+ defm SDIVCC : F3_12np<"sdivcc", 0b011111, IIC_iu_div>;
}
// Section B.20 - SAVE and RESTORE, p. 117
@@ -666,26 +781,30 @@ let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
// conditional branch class:
class BranchSP<dag ins, string asmstr, list<dag> pattern>
- : F2_2<0b010, 0, (outs), ins, asmstr, pattern>;
+ : F2_2<0b010, 0, (outs), ins, asmstr, pattern, IIC_iu_instr>;
// conditional branch with annul class:
class BranchSPA<dag ins, string asmstr, list<dag> pattern>
- : F2_2<0b010, 1, (outs), ins, asmstr, pattern>;
+ : F2_2<0b010, 1, (outs), ins, asmstr, pattern, IIC_iu_instr>;
// Conditional branch class on %icc|%xcc with predication:
multiclass IPredBranch<string regstr, list<dag> CCPattern> {
def CC : F2_3<0b001, 0, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond),
- !strconcat("b$cond ", !strconcat(regstr, ", $imm19")),
- CCPattern>;
+ !strconcat("b$cond ", !strconcat(regstr, ", $imm19")),
+ CCPattern,
+ IIC_iu_instr>;
def CCA : F2_3<0b001, 1, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond),
- !strconcat("b$cond,a ", !strconcat(regstr, ", $imm19")),
- []>;
+ !strconcat("b$cond,a ", !strconcat(regstr, ", $imm19")),
+ [],
+ IIC_iu_instr>;
def CCNT : F2_3<0b001, 0, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond),
!strconcat("b$cond,pn ", !strconcat(regstr, ", $imm19")),
- []>;
+ [],
+ IIC_iu_instr>;
def CCANT : F2_3<0b001, 1, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond),
!strconcat("b$cond,a,pn ", !strconcat(regstr, ", $imm19")),
- []>;
+ [],
+ IIC_iu_instr>;
}
} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
@@ -721,26 +840,26 @@ let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
// floating-point conditional branch class:
class FPBranchSP<dag ins, string asmstr, list<dag> pattern>
- : F2_2<0b110, 0, (outs), ins, asmstr, pattern>;
+ : F2_2<0b110, 0, (outs), ins, asmstr, pattern, IIC_fpu_normal_instr>;
// floating-point conditional branch with annul class:
class FPBranchSPA<dag ins, string asmstr, list<dag> pattern>
- : F2_2<0b110, 1, (outs), ins, asmstr, pattern>;
+ : F2_2<0b110, 1, (outs), ins, asmstr, pattern, IIC_fpu_normal_instr>;
// Conditional branch class on %fcc0-%fcc3 with predication:
multiclass FPredBranch {
def CC : F2_3<0b101, 0, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond,
FCCRegs:$cc),
- "fb$cond $cc, $imm19", []>;
+ "fb$cond $cc, $imm19", [], IIC_fpu_normal_instr>;
def CCA : F2_3<0b101, 1, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond,
FCCRegs:$cc),
- "fb$cond,a $cc, $imm19", []>;
+ "fb$cond,a $cc, $imm19", [], IIC_fpu_normal_instr>;
def CCNT : F2_3<0b101, 0, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond,
FCCRegs:$cc),
- "fb$cond,pn $cc, $imm19", []>;
+ "fb$cond,pn $cc, $imm19", [], IIC_fpu_normal_instr>;
def CCANT : F2_3<0b101, 1, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond,
FCCRegs:$cc),
- "fb$cond,a,pn $cc, $imm19", []>;
+ "fb$cond,a,pn $cc, $imm19", [], IIC_fpu_normal_instr>;
}
} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
@@ -755,13 +874,33 @@ let Uses = [FCC0] in {
let Predicates = [HasV9] in
defm BPF : FPredBranch;
+// Section B.22 - Branch on Co-processor Condition Codes Instructions, p. 123
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
+
+// co-processor conditional branch class:
+class CPBranchSP<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b111, 0, (outs), ins, asmstr, pattern>;
+// co-processor conditional branch with annul class:
+class CPBranchSPA<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b111, 1, (outs), ins, asmstr, pattern>;
+
+} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
+
+def CBCOND : CPBranchSP<(ins brtarget:$imm22, CCOp:$cond),
+ "cb$cond $imm22",
+ [(SPbrfcc bb:$imm22, imm:$cond)]>;
+def CBCONDA : CPBranchSPA<(ins brtarget:$imm22, CCOp:$cond),
+ "cb$cond,a $imm22", []>;
+
// Section B.24 - Call and Link Instruction, p. 125
// This is the only Format 1 instruction
let Uses = [O6],
hasDelaySlot = 1, isCall = 1 in {
def CALL : InstSP<(outs), (ins calltarget:$disp, variable_ops),
- "call $disp", []> {
+ "call $disp",
+ [],
+ IIC_jmp_or_call> {
bits<30> disp;
let op = 1;
let Inst{29-0} = disp;
@@ -772,11 +911,13 @@ let Uses = [O6],
def CALLrr : F3_1<2, 0b111000,
(outs), (ins MEMrr:$ptr, variable_ops),
"call $ptr",
- [(call ADDRrr:$ptr)]>;
+ [(call ADDRrr:$ptr)],
+ IIC_jmp_or_call>;
def CALLri : F3_2<2, 0b111000,
(outs), (ins MEMri:$ptr, variable_ops),
"call $ptr",
- [(call ADDRri:$ptr)]>;
+ [(call ADDRri:$ptr)],
+ IIC_jmp_or_call>;
}
}
@@ -785,10 +926,16 @@ let Uses = [O6],
// JMPL Instruction.
let isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
DecoderMethod = "DecodeJMPL" in {
- def JMPLrr: F3_1<2, 0b111000, (outs IntRegs:$dst), (ins MEMrr:$addr),
- "jmpl $addr, $dst", []>;
- def JMPLri: F3_2<2, 0b111000, (outs IntRegs:$dst), (ins MEMri:$addr),
- "jmpl $addr, $dst", []>;
+ def JMPLrr: F3_1<2, 0b111000,
+ (outs IntRegs:$dst), (ins MEMrr:$addr),
+ "jmpl $addr, $dst",
+ [],
+ IIC_jmp_or_call>;
+ def JMPLri: F3_2<2, 0b111000,
+ (outs IntRegs:$dst), (ins MEMri:$addr),
+ "jmpl $addr, $dst",
+ [],
+ IIC_jmp_or_call>;
}
// Section A.3 - Synthetic Instructions, p. 85
@@ -796,37 +943,65 @@ let isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
isCodeGenOnly = 1 in {
let rd = 0, rs1 = 15 in
- def RETL: F3_2<2, 0b111000, (outs), (ins i32imm:$val),
- "jmp %o7+$val", [(retflag simm13:$val)]>;
+ def RETL: F3_2<2, 0b111000,
+ (outs), (ins i32imm:$val),
+ "jmp %o7+$val",
+ [(retflag simm13:$val)],
+ IIC_jmp_or_call>;
let rd = 0, rs1 = 31 in
- def RET: F3_2<2, 0b111000, (outs), (ins i32imm:$val),
- "jmp %i7+$val", []>;
+ def RET: F3_2<2, 0b111000,
+ (outs), (ins i32imm:$val),
+ "jmp %i7+$val",
+ [],
+ IIC_jmp_or_call>;
}
// Section B.26 - Return from Trap Instruction
let isReturn = 1, isTerminator = 1, hasDelaySlot = 1,
isBarrier = 1, rd = 0, DecoderMethod = "DecodeReturn" in {
- def RETTrr : F3_1<2, 0b111001, (outs), (ins MEMrr:$addr),
- "rett $addr", []>;
- def RETTri : F3_2<2, 0b111001, (outs), (ins MEMri:$addr),
- "rett $addr", []>;
+ def RETTrr : F3_1<2, 0b111001,
+ (outs), (ins MEMrr:$addr),
+ "rett $addr",
+ [],
+ IIC_jmp_or_call>;
+ def RETTri : F3_2<2, 0b111001,
+ (outs), (ins MEMri:$addr),
+ "rett $addr",
+ [],
+ IIC_jmp_or_call>;
}
// Section B.27 - Trap on Integer Condition Codes Instruction
+// conditional branch class:
+let DecoderNamespace = "SparcV8", DecoderMethod = "DecodeTRAP", hasSideEffects = 1, Uses = [ICC], cc = 0b00 in
+{
+ def TRAPrr : TRAPSPrr<0b111010,
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2, CCOp:$cond),
+ "t$cond $rs1 + $rs2",
+ []>;
+ def TRAPri : TRAPSPri<0b111010,
+ (outs), (ins IntRegs:$rs1, i32imm:$imm, CCOp:$cond),
+ "t$cond $rs1 + $imm",
+ []>;
+}
+
multiclass TRAP<string regStr> {
- def rr : TRAPSPrr<0b111010, (outs), (ins IntRegs:$rs1, IntRegs:$rs2,
- CCOp:$cond),
- !strconcat(!strconcat("t$cond ", regStr), ", $rs1 + $rs2"), []>;
- def ri : TRAPSPri<0b111010, (outs), (ins IntRegs:$rs1, i32imm:$imm,
- CCOp:$cond),
- !strconcat(!strconcat("t$cond ", regStr), ", $rs1 + $imm"), []>;
+ def rr : TRAPSPrr<0b111010,
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2, CCOp:$cond),
+ !strconcat(!strconcat("t$cond ", regStr), ", $rs1 + $rs2"),
+ []>;
+ def ri : TRAPSPri<0b111010,
+ (outs), (ins IntRegs:$rs1, i32imm:$imm, CCOp:$cond),
+ !strconcat(!strconcat("t$cond ", regStr), ", $rs1 + $imm"),
+ []>;
}
-let hasSideEffects = 1, Uses = [ICC], cc = 0b00 in
+let DecoderNamespace = "SparcV9", DecoderMethod = "DecodeTRAP", Predicates = [HasV9], hasSideEffects = 1, Uses = [ICC], cc = 0b00 in
defm TICC : TRAP<"%icc">;
+
let isBarrier = 1, isTerminator = 1, rd = 0b01000, rs1 = 0, simm13 = 5 in
def TA5 : F3_2<0b10, 0b111010, (outs), (ins), "ta 5", [(trap)]>;
@@ -922,11 +1097,13 @@ let rd = 0 in {
def FITOS : F3_3u<2, 0b110100, 0b011000100,
(outs FPRegs:$rd), (ins FPRegs:$rs2),
"fitos $rs2, $rd",
- [(set FPRegs:$rd, (SPitof FPRegs:$rs2))]>;
+ [(set FPRegs:$rd, (SPitof FPRegs:$rs2))],
+ IIC_fpu_fast_instr>;
def FITOD : F3_3u<2, 0b110100, 0b011001000,
(outs DFPRegs:$rd), (ins FPRegs:$rs2),
"fitod $rs2, $rd",
- [(set DFPRegs:$rd, (SPitof FPRegs:$rs2))]>;
+ [(set DFPRegs:$rd, (SPitof FPRegs:$rs2))],
+ IIC_fpu_fast_instr>;
def FITOQ : F3_3u<2, 0b110100, 0b011001100,
(outs QFPRegs:$rd), (ins FPRegs:$rs2),
"fitoq $rs2, $rd",
@@ -937,11 +1114,13 @@ def FITOQ : F3_3u<2, 0b110100, 0b011001100,
def FSTOI : F3_3u<2, 0b110100, 0b011010001,
(outs FPRegs:$rd), (ins FPRegs:$rs2),
"fstoi $rs2, $rd",
- [(set FPRegs:$rd, (SPftoi FPRegs:$rs2))]>;
+ [(set FPRegs:$rd, (SPftoi FPRegs:$rs2))],
+ IIC_fpu_fast_instr>;
def FDTOI : F3_3u<2, 0b110100, 0b011010010,
(outs FPRegs:$rd), (ins DFPRegs:$rs2),
"fdtoi $rs2, $rd",
- [(set FPRegs:$rd, (SPftoi DFPRegs:$rs2))]>;
+ [(set FPRegs:$rd, (SPftoi DFPRegs:$rs2))],
+ IIC_fpu_fast_instr>;
def FQTOI : F3_3u<2, 0b110100, 0b011010011,
(outs FPRegs:$rd), (ins QFPRegs:$rs2),
"fqtoi $rs2, $rd",
@@ -952,7 +1131,8 @@ def FQTOI : F3_3u<2, 0b110100, 0b011010011,
def FSTOD : F3_3u<2, 0b110100, 0b011001001,
(outs DFPRegs:$rd), (ins FPRegs:$rs2),
"fstod $rs2, $rd",
- [(set f64:$rd, (fextend f32:$rs2))]>;
+ [(set f64:$rd, (fextend f32:$rs2))],
+ IIC_fpu_stod>;
def FSTOQ : F3_3u<2, 0b110100, 0b011001101,
(outs QFPRegs:$rd), (ins FPRegs:$rs2),
"fstoq $rs2, $rd",
@@ -961,7 +1141,8 @@ def FSTOQ : F3_3u<2, 0b110100, 0b011001101,
def FDTOS : F3_3u<2, 0b110100, 0b011000110,
(outs FPRegs:$rd), (ins DFPRegs:$rs2),
"fdtos $rs2, $rd",
- [(set f32:$rd, (fround f64:$rs2))]>;
+ [(set f32:$rd, (fround f64:$rs2))],
+ IIC_fpu_fast_instr>;
def FDTOQ : F3_3u<2, 0b110100, 0b011001110,
(outs QFPRegs:$rd), (ins DFPRegs:$rs2),
"fdtoq $rs2, $rd",
@@ -985,22 +1166,29 @@ def FMOVS : F3_3u<2, 0b110100, 0b000000001,
def FNEGS : F3_3u<2, 0b110100, 0b000000101,
(outs FPRegs:$rd), (ins FPRegs:$rs2),
"fnegs $rs2, $rd",
- [(set f32:$rd, (fneg f32:$rs2))]>;
+ [(set f32:$rd, (fneg f32:$rs2))],
+ IIC_fpu_negs>;
def FABSS : F3_3u<2, 0b110100, 0b000001001,
(outs FPRegs:$rd), (ins FPRegs:$rs2),
"fabss $rs2, $rd",
- [(set f32:$rd, (fabs f32:$rs2))]>;
+ [(set f32:$rd, (fabs f32:$rs2))],
+ IIC_fpu_abs>;
// Floating-point Square Root Instructions, p.145
+// FSQRTS generates an erratum on LEON processors, so by disabling this instruction
+// this will be promoted to use FSQRTD with doubles instead.
+let Predicates = [HasNoFdivSqrtFix] in
def FSQRTS : F3_3u<2, 0b110100, 0b000101001,
(outs FPRegs:$rd), (ins FPRegs:$rs2),
"fsqrts $rs2, $rd",
- [(set f32:$rd, (fsqrt f32:$rs2))]>;
+ [(set f32:$rd, (fsqrt f32:$rs2))],
+ IIC_fpu_sqrts>;
def FSQRTD : F3_3u<2, 0b110100, 0b000101010,
(outs DFPRegs:$rd), (ins DFPRegs:$rs2),
"fsqrtd $rs2, $rd",
- [(set f64:$rd, (fsqrt f64:$rs2))]>;
+ [(set f64:$rd, (fsqrt f64:$rs2))],
+ IIC_fpu_sqrtd>;
def FSQRTQ : F3_3u<2, 0b110100, 0b000101011,
(outs QFPRegs:$rd), (ins QFPRegs:$rs2),
"fsqrtq $rs2, $rd",
@@ -1013,11 +1201,13 @@ def FSQRTQ : F3_3u<2, 0b110100, 0b000101011,
def FADDS : F3_3<2, 0b110100, 0b001000001,
(outs FPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
"fadds $rs1, $rs2, $rd",
- [(set f32:$rd, (fadd f32:$rs1, f32:$rs2))]>;
+ [(set f32:$rd, (fadd f32:$rs1, f32:$rs2))],
+ IIC_fpu_fast_instr>;
def FADDD : F3_3<2, 0b110100, 0b001000010,
(outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
"faddd $rs1, $rs2, $rd",
- [(set f64:$rd, (fadd f64:$rs1, f64:$rs2))]>;
+ [(set f64:$rd, (fadd f64:$rs1, f64:$rs2))],
+ IIC_fpu_fast_instr>;
def FADDQ : F3_3<2, 0b110100, 0b001000011,
(outs QFPRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
"faddq $rs1, $rs2, $rd",
@@ -1027,11 +1217,13 @@ def FADDQ : F3_3<2, 0b110100, 0b001000011,
def FSUBS : F3_3<2, 0b110100, 0b001000101,
(outs FPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
"fsubs $rs1, $rs2, $rd",
- [(set f32:$rd, (fsub f32:$rs1, f32:$rs2))]>;
+ [(set f32:$rd, (fsub f32:$rs1, f32:$rs2))],
+ IIC_fpu_fast_instr>;
def FSUBD : F3_3<2, 0b110100, 0b001000110,
(outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
"fsubd $rs1, $rs2, $rd",
- [(set f64:$rd, (fsub f64:$rs1, f64:$rs2))]>;
+ [(set f64:$rd, (fsub f64:$rs1, f64:$rs2))],
+ IIC_fpu_fast_instr>;
def FSUBQ : F3_3<2, 0b110100, 0b001000111,
(outs QFPRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
"fsubq $rs1, $rs2, $rd",
@@ -1040,25 +1232,32 @@ def FSUBQ : F3_3<2, 0b110100, 0b001000111,
// Floating-point Multiply and Divide Instructions, p. 147
+// FMULS generates an erratum on LEON processors, so by disabling this instruction
+// this will be promoted to use FMULD with doubles instead.
+let Predicates = [HasNoFmulsFix] in
def FMULS : F3_3<2, 0b110100, 0b001001001,
(outs FPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
"fmuls $rs1, $rs2, $rd",
- [(set f32:$rd, (fmul f32:$rs1, f32:$rs2))]>;
+ [(set f32:$rd, (fmul f32:$rs1, f32:$rs2))],
+ IIC_fpu_muls>;
def FMULD : F3_3<2, 0b110100, 0b001001010,
(outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
"fmuld $rs1, $rs2, $rd",
- [(set f64:$rd, (fmul f64:$rs1, f64:$rs2))]>;
+ [(set f64:$rd, (fmul f64:$rs1, f64:$rs2))],
+ IIC_fpu_muld>;
def FMULQ : F3_3<2, 0b110100, 0b001001011,
(outs QFPRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
"fmulq $rs1, $rs2, $rd",
[(set f128:$rd, (fmul f128:$rs1, f128:$rs2))]>,
Requires<[HasHardQuad]>;
+let Predicates = [HasNoFsmuldFix] in
def FSMULD : F3_3<2, 0b110100, 0b001101001,
(outs DFPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
"fsmuld $rs1, $rs2, $rd",
[(set f64:$rd, (fmul (fextend f32:$rs1),
- (fextend f32:$rs2)))]>;
+ (fextend f32:$rs2)))],
+ IIC_fpu_muld>;
def FDMULQ : F3_3<2, 0b110100, 0b001101110,
(outs QFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
"fdmulq $rs1, $rs2, $rd",
@@ -1066,14 +1265,18 @@ def FDMULQ : F3_3<2, 0b110100, 0b001101110,
(fextend f64:$rs2)))]>,
Requires<[HasHardQuad]>;
+// FDIVS generates an erratum on LEON processors, so by disabling this instruction
+// this will be promoted to use FDIVD with doubles instead.
def FDIVS : F3_3<2, 0b110100, 0b001001101,
(outs FPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
"fdivs $rs1, $rs2, $rd",
- [(set f32:$rd, (fdiv f32:$rs1, f32:$rs2))]>;
+ [(set f32:$rd, (fdiv f32:$rs1, f32:$rs2))],
+ IIC_fpu_divs>;
def FDIVD : F3_3<2, 0b110100, 0b001001110,
(outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
"fdivd $rs1, $rs2, $rd",
- [(set f64:$rd, (fdiv f64:$rs1, f64:$rs2))]>;
+ [(set f64:$rd, (fdiv f64:$rs1, f64:$rs2))],
+ IIC_fpu_divd>;
def FDIVQ : F3_3<2, 0b110100, 0b001001111,
(outs QFPRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
"fdivq $rs1, $rs2, $rd",
@@ -1091,11 +1294,13 @@ let Defs = [FCC0], rd = 0, isCodeGenOnly = 1 in {
def FCMPS : F3_3c<2, 0b110101, 0b001010001,
(outs), (ins FPRegs:$rs1, FPRegs:$rs2),
"fcmps $rs1, $rs2",
- [(SPcmpfcc f32:$rs1, f32:$rs2)]>;
+ [(SPcmpfcc f32:$rs1, f32:$rs2)],
+ IIC_fpu_fast_instr>;
def FCMPD : F3_3c<2, 0b110101, 0b001010010,
(outs), (ins DFPRegs:$rs1, DFPRegs:$rs2),
"fcmpd $rs1, $rs2",
- [(SPcmpfcc f64:$rs1, f64:$rs2)]>;
+ [(SPcmpfcc f64:$rs1, f64:$rs2)],
+ IIC_fpu_fast_instr>;
def FCMPQ : F3_3c<2, 0b110101, 0b001010011,
(outs), (ins QFPRegs:$rs1, QFPRegs:$rs2),
"fcmpq $rs1, $rs2",
@@ -1125,7 +1330,8 @@ let Uses = [O6], isCall = 1, hasDelaySlot = 1 in
def TLS_CALL : InstSP<(outs),
(ins calltarget:$disp, TLSSym:$sym, variable_ops),
"call $disp, $sym",
- [(tlscall texternalsym:$disp, tglobaltlsaddr:$sym)]> {
+ [(tlscall texternalsym:$disp, tglobaltlsaddr:$sym)],
+ IIC_jmp_or_call> {
bits<30> disp;
let op = 1;
let Inst{29-0} = disp;
@@ -1303,19 +1509,60 @@ let Predicates = [HasV9], hasSideEffects = 1, rd = 0, rs1 = 0b01111 in
def MEMBARi : F3_2<2, 0b101000, (outs), (ins simm13Op:$simm13),
"membar $simm13", []>;
-// TODO: Should add a CASArr variant. In fact, the CAS instruction,
-// unlike other instructions, only comes in a form which requires an
-// ASI be provided. The ASI value hardcoded here is ASI_PRIMARY, the
-// default unprivileged ASI for SparcV9. (Also of note: some modern
-// SparcV8 implementations provide CASA as an extension, but require
-// the use of SparcV8's default ASI, 0xA ("User Data") instead.)
+// The CAS instruction, unlike other instructions, only comes in a
+// form which requires an ASI be provided. The ASI value hardcoded
+// here is ASI_PRIMARY, the default unprivileged ASI for SparcV9.
let Predicates = [HasV9], Constraints = "$swap = $rd", asi = 0b10000000 in
def CASrr: F3_1_asi<3, 0b111100,
(outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2,
IntRegs:$swap),
"cas [$rs1], $rs2, $rd",
[(set i32:$rd,
- (atomic_cmp_swap iPTR:$rs1, i32:$rs2, i32:$swap))]>;
+ (atomic_cmp_swap_32 iPTR:$rs1, i32:$rs2, i32:$swap))]>;
+
+
+// CASA is supported as an instruction on some LEON3 and all LEON4 processors.
+// This version can be automatically lowered from C code, selecting ASI 10
+let Predicates = [HasLeonCASA], Constraints = "$swap = $rd", asi = 0b00001010 in
+ def CASAasi10: F3_1_asi<3, 0b111100,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2,
+ IntRegs:$swap),
+ "casa [$rs1] 10, $rs2, $rd",
+ [(set i32:$rd,
+ (atomic_cmp_swap_32 iPTR:$rs1, i32:$rs2, i32:$swap))]>;
+
+// CASA supported on some LEON3 and all LEON4 processors. Same pattern as
+// CASrr, above, but with a different ASI. This version is supported for
+// inline assembly lowering only.
+let Predicates = [HasLeonCASA], Constraints = "$swap = $rd" in
+ def CASArr: F3_1_asi<3, 0b111100,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2,
+ IntRegs:$swap, i8imm:$asi),
+ "casa [$rs1] $asi, $rs2, $rd", []>;
+
+// TODO: Add DAG sequence to lower these instructions. Currently, only provided
+// as inline assembler-supported instructions.
+let Predicates = [HasUMAC_SMAC], Defs = [Y, ASR18], Uses = [Y, ASR18] in {
+ def SMACrr : F3_1<2, 0b111111,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2, ASRRegs:$asr18),
+ "smac $rs1, $rs2, $rd",
+ [], IIC_smac_umac>;
+
+ def SMACri : F3_2<2, 0b111111,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13, ASRRegs:$asr18),
+ "smac $rs1, $simm13, $rd",
+ [], IIC_smac_umac>;
+
+ def UMACrr : F3_1<2, 0b111110,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2, ASRRegs:$asr18),
+ "umac $rs1, $rs2, $rd",
+ [], IIC_smac_umac>;
+
+ def UMACri : F3_2<2, 0b111110,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13, ASRRegs:$asr18),
+ "umac $rs1, $simm13, $rd",
+ [], IIC_smac_umac>;
+}
let Defs = [ICC] in {
defm TADDCC : F3_12np<"taddcc", 0b100000>;
@@ -1411,13 +1658,21 @@ def : Pat<(store (i32 0), ADDRri:$dst), (STri ADDRri:$dst, (i32 G0))>;
let Predicates = [HasNoV9] in
def : Pat<(atomic_fence imm, imm), (STBAR)>;
-// atomic_load_32 addr -> load addr
-def : Pat<(i32 (atomic_load ADDRrr:$src)), (LDrr ADDRrr:$src)>;
-def : Pat<(i32 (atomic_load ADDRri:$src)), (LDri ADDRri:$src)>;
-
-// atomic_store_32 val, addr -> store val, addr
-def : Pat<(atomic_store ADDRrr:$dst, i32:$val), (STrr ADDRrr:$dst, $val)>;
-def : Pat<(atomic_store ADDRri:$dst, i32:$val), (STri ADDRri:$dst, $val)>;
+// atomic_load addr -> load addr
+def : Pat<(i32 (atomic_load_8 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load_8 ADDRri:$src)), (LDUBri ADDRri:$src)>;
+def : Pat<(i32 (atomic_load_16 ADDRrr:$src)), (LDUHrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load_16 ADDRri:$src)), (LDUHri ADDRri:$src)>;
+def : Pat<(i32 (atomic_load_32 ADDRrr:$src)), (LDrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load_32 ADDRri:$src)), (LDri ADDRri:$src)>;
+
+// atomic_store val, addr -> store val, addr
+def : Pat<(atomic_store_8 ADDRrr:$dst, i32:$val), (STBrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store_8 ADDRri:$dst, i32:$val), (STBri ADDRri:$dst, $val)>;
+def : Pat<(atomic_store_16 ADDRrr:$dst, i32:$val), (STHrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store_16 ADDRri:$dst, i32:$val), (STHri ADDRri:$dst, $val)>;
+def : Pat<(atomic_store_32 ADDRrr:$dst, i32:$val), (STrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store_32 ADDRri:$dst, i32:$val), (STri ADDRri:$dst, $val)>;
// extract_vector
def : Pat<(extractelt (v2i32 IntPair:$Rn), 0),
diff --git a/lib/Target/Sparc/SparcMCInstLower.cpp b/lib/Target/Sparc/SparcMCInstLower.cpp
index b084d0021ba0..a3cedcbf9dd1 100644
--- a/lib/Target/Sparc/SparcMCInstLower.cpp
+++ b/lib/Target/Sparc/SparcMCInstLower.cpp
@@ -14,7 +14,6 @@
#include "Sparc.h"
#include "MCTargetDesc/SparcMCExpr.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
diff --git a/lib/Target/Sparc/SparcRegisterInfo.cpp b/lib/Target/Sparc/SparcRegisterInfo.cpp
index da31783ba248..37a1fdf4d770 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -105,13 +105,9 @@ SparcRegisterInfo::getPointerRegClass(const MachineFunction &MF,
return Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
}
-static void replaceFI(MachineFunction &MF,
- MachineBasicBlock::iterator II,
- MachineInstr &MI,
- DebugLoc dl,
- unsigned FIOperandNum, int Offset,
- unsigned FramePtr)
-{
+static void replaceFI(MachineFunction &MF, MachineBasicBlock::iterator II,
+ MachineInstr &MI, const DebugLoc &dl,
+ unsigned FIOperandNum, int Offset, unsigned FramePtr) {
// Replace frame index with a frame pointer reference.
if (Offset >= -4096 && Offset <= 4095) {
// If the offset is small enough to fit in the immediate field, directly
diff --git a/lib/Target/Sparc/SparcRegisterInfo.h b/lib/Target/Sparc/SparcRegisterInfo.h
index 32075b1df410..2ac51263957e 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/lib/Target/Sparc/SparcRegisterInfo.h
@@ -39,9 +39,6 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
- void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = nullptr) const;
-
unsigned getFrameRegister(const MachineFunction &MF) const override;
bool canRealignStack(const MachineFunction &MF) const override;
diff --git a/lib/Target/Sparc/SparcRegisterInfo.td b/lib/Target/Sparc/SparcRegisterInfo.td
index cca9463562a4..d1ef3b19dca7 100644
--- a/lib/Target/Sparc/SparcRegisterInfo.td
+++ b/lib/Target/Sparc/SparcRegisterInfo.td
@@ -62,6 +62,12 @@ foreach I = 0-3 in
def FSR : SparcCtrlReg<0, "FSR">; // Floating-point state register.
+def FQ : SparcCtrlReg<0, "FQ">; // Floating-point deferred-trap queue.
+
+def CPSR : SparcCtrlReg<0, "CPSR">; // Co-processor state register.
+
+def CPQ : SparcCtrlReg<0, "CPQ">; // Co-processor queue.
+
// Y register
def Y : SparcCtrlReg<0, "Y">, DwarfRegNum<[64]>;
// Ancillary state registers (implementation defined)
@@ -204,6 +210,40 @@ def D13 : Rd<26, "F26", [F26, F27]>, DwarfRegNum<[85]>;
def D14 : Rd<28, "F28", [F28, F29]>, DwarfRegNum<[86]>;
def D15 : Rd<30, "F30", [F30, F31]>, DwarfRegNum<[87]>;
+// Co-processor registers
+def C0 : Ri< 0, "C0">;
+def C1 : Ri< 1, "C1">;
+def C2 : Ri< 2, "C2">;
+def C3 : Ri< 3, "C3">;
+def C4 : Ri< 4, "C4">;
+def C5 : Ri< 5, "C5">;
+def C6 : Ri< 6, "C6">;
+def C7 : Ri< 7, "C7">;
+def C8 : Ri< 8, "C8">;
+def C9 : Ri< 9, "C9">;
+def C10 : Ri< 10, "C10">;
+def C11 : Ri< 11, "C11">;
+def C12 : Ri< 12, "C12">;
+def C13 : Ri< 13, "C13">;
+def C14 : Ri< 14, "C14">;
+def C15 : Ri< 15, "C15">;
+def C16 : Ri< 16, "C16">;
+def C17 : Ri< 17, "C17">;
+def C18 : Ri< 18, "C18">;
+def C19 : Ri< 19, "C19">;
+def C20 : Ri< 20, "C20">;
+def C21 : Ri< 21, "C21">;
+def C22 : Ri< 22, "C22">;
+def C23 : Ri< 23, "C23">;
+def C24 : Ri< 24, "C24">;
+def C25 : Ri< 25, "C25">;
+def C26 : Ri< 26, "C26">;
+def C27 : Ri< 27, "C27">;
+def C28 : Ri< 28, "C28">;
+def C29 : Ri< 29, "C29">;
+def C30 : Ri< 30, "C30">;
+def C31 : Ri< 31, "C31">;
+
// Unaliased double precision floating point registers.
// FIXME: Define DwarfRegNum for these registers.
def D16 : SparcReg< 1, "F32">;
@@ -259,6 +299,24 @@ def I2_I3 : Rdi<26, "I2", [I2, I3]>;
def I4_I5 : Rdi<28, "I4", [I4, I5]>;
def I6_I7 : Rdi<30, "I6", [I6, I7]>;
+// Aliases of the co-processor registers used for LDD/STD double-word operations
+def C0_C1 : Rdi<0, "C0", [C0, C1]>;
+def C2_C3 : Rdi<2, "C2", [C2, C3]>;
+def C4_C5 : Rdi<4, "C4", [C4, C5]>;
+def C6_C7 : Rdi<6, "C6", [C6, C7]>;
+def C8_C9 : Rdi<8, "C8", [C8, C9]>;
+def C10_C11 : Rdi<10, "C10", [C10, C11]>;
+def C12_C13 : Rdi<12, "C12", [C12, C13]>;
+def C14_C15 : Rdi<14, "C14", [C14, C15]>;
+def C16_C17 : Rdi<16, "C16", [C16, C17]>;
+def C18_C19 : Rdi<18, "C18", [C18, C19]>;
+def C20_C21 : Rdi<20, "C20", [C20, C21]>;
+def C22_C23 : Rdi<22, "C22", [C22, C23]>;
+def C24_C25 : Rdi<24, "C24", [C24, C25]>;
+def C26_C27 : Rdi<26, "C26", [C26, C27]>;
+def C28_C29 : Rdi<28, "C28", [C28, C29]>;
+def C30_C31 : Rdi<30, "C30", [C30, C31]>;
+
// Register classes.
//
// FIXME: the register order should be defined in terms of the preferred
@@ -273,6 +331,7 @@ def IntRegs : RegisterClass<"SP", [i32, i64], 32,
(sequence "L%u", 0, 7),
(sequence "O%u", 0, 7))>;
+
// Should be in the same order as IntRegs.
def IntPair : RegisterClass<"SP", [v2i32], 64,
(add I0_I1, I2_I3, I4_I5, I6_I7,
@@ -296,10 +355,21 @@ def QFPRegs : RegisterClass<"SP", [f128], 128, (sequence "Q%u", 0, 15)>;
// Floating point control register classes.
def FCCRegs : RegisterClass<"SP", [i1], 1, (sequence "FCC%u", 0, 3)>;
-// Ancillary state registers
-def ASRRegs : RegisterClass<"SP", [i32], 32,
- (add Y, (sequence "ASR%u", 1, 31))> {
- let isAllocatable = 0;
+let isAllocatable = 0 in {
+ // Ancillary state registers
+ def ASRRegs : RegisterClass<"SP", [i32], 32,
+ (add Y, (sequence "ASR%u", 1, 31))>;
+
+ // This register class should not be used to hold i64 values.
+ def CoprocRegs : RegisterClass<"SP", [i32], 32,
+ (add (sequence "C%u", 0, 31))>;
+
+ // Should be in the same order as CoprocRegs.
+ def CoprocPair : RegisterClass<"SP", [v2i32], 64,
+ (add C0_C1, C2_C3, C4_C5, C6_C7,
+ C8_C9, C10_C11, C12_C13, C14_C15,
+ C16_C17, C18_C19, C20_C21, C22_C23,
+ C24_C25, C26_C27, C28_C29, C30_C31)>;
}
// Privileged Registers
diff --git a/lib/Target/Sparc/SparcSchedule.td b/lib/Target/Sparc/SparcSchedule.td
new file mode 100755
index 000000000000..f243546b029b
--- /dev/null
+++ b/lib/Target/Sparc/SparcSchedule.td
@@ -0,0 +1,124 @@
+//===-- SparcSchedule.td - Describe the Sparc Itineries ----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+def IIC_iu_or_fpu_instr : InstrItinClass;
+def IIC_iu_instr : InstrItinClass;
+def IIC_fpu_normal_instr : InstrItinClass;
+def IIC_fpu_fast_instr : InstrItinClass;
+def IIC_jmp_or_call : InstrItinClass;
+def IIC_ldd : InstrItinClass;
+def IIC_st : InstrItinClass;
+def IIC_std : InstrItinClass;
+def IIC_iu_smul : InstrItinClass;
+def IIC_iu_umul : InstrItinClass;
+def IIC_iu_div : InstrItinClass;
+def IIC_ticc : InstrItinClass;
+def IIC_ldstub : InstrItinClass;
+def IIC_fpu_muls : InstrItinClass;
+def IIC_fpu_muld : InstrItinClass;
+def IIC_fpu_divs : InstrItinClass;
+def IIC_fpu_divd : InstrItinClass;
+def IIC_fpu_sqrts : InstrItinClass;
+def IIC_fpu_sqrtd : InstrItinClass;
+def IIC_fpu_abs : InstrItinClass;
+def IIC_fpu_movs : InstrItinClass;
+def IIC_fpu_negs : InstrItinClass;
+def IIC_smac_umac : InstrItinClass;
+def IIC_fpu_stod : InstrItinClass;
+
+def LEONIU : FuncUnit; // integer unit
+def LEONFPU : FuncUnit; // floating-point unit
+
+// Ref: http://www.atmel.com/Images/doc4226.pdf
+
+def LEON2Itineraries : ProcessorItineraries<
+[LEONIU, LEONFPU], [], [
+ InstrItinData<IIC_iu_or_fpu_instr, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_iu_instr, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_fpu_normal_instr, [InstrStage<1, [LEONFPU]>], [7, 1]>,
+ InstrItinData<IIC_fpu_fast_instr, [InstrStage<1, [LEONFPU]>], [7, 1]>,
+ InstrItinData<IIC_jmp_or_call, [InstrStage<1, [LEONIU, LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_ldd, [InstrStage<1, [LEONIU, LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_st, [InstrStage<1, [LEONIU, LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_std, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_iu_smul, [InstrStage<1, [LEONIU]>], [5, 1]>,
+ InstrItinData<IIC_iu_umul, [InstrStage<1, [LEONIU]>], [5, 1]>,
+ InstrItinData<IIC_iu_div, [InstrStage<1, [LEONIU]>], [35, 1]>,
+ InstrItinData<IIC_ticc, [InstrStage<1, [LEONIU, LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_ldstub, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_fpu_muls, [InstrStage<1, [LEONFPU]>], [16, 1]>,
+ InstrItinData<IIC_fpu_muld, [InstrStage<1, [LEONFPU]>], [21, 1]>,
+ InstrItinData<IIC_fpu_divs, [InstrStage<1, [LEONFPU]>], [20, 1]>,
+ InstrItinData<IIC_fpu_divd, [InstrStage<1, [LEONFPU]>], [36, 1]>,
+ InstrItinData<IIC_fpu_sqrts, [InstrStage<1, [LEONFPU]>], [37, 1]>,
+ InstrItinData<IIC_fpu_sqrtd, [InstrStage<1, [LEONFPU]>], [65, 1]>,
+ InstrItinData<IIC_fpu_abs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_movs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_negs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_stod, [InstrStage<1, [LEONFPU]>], [2, 1]>
+]>;
+
+def LEON3Itineraries : ProcessorItineraries<
+[LEONIU, LEONFPU], [], [
+ InstrItinData<IIC_iu_or_fpu_instr, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_iu_instr, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_fpu_normal_instr, [InstrStage<1, [LEONFPU]>], [7, 1]>,
+ InstrItinData<IIC_fpu_fast_instr, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_jmp_or_call, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_ldd, [InstrStage<1, [LEONIU, LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_st, [InstrStage<1, [LEONIU, LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_std, [InstrStage<1, [LEONIU, LEONFPU]>], [5, 1]>,
+ InstrItinData<IIC_iu_smul, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_iu_umul, [InstrStage<1, [LEONIU]>], [4, 1]>,
+ InstrItinData<IIC_iu_div, [InstrStage<1, [LEONIU]>], [35, 1]>,
+ InstrItinData<IIC_smac_umac, [InstrStage<1, [LEONIU]>], [2, 1]>,
+ InstrItinData<IIC_ticc, [InstrStage<1, [LEONIU, LEONFPU]>], [5, 1]>,
+ InstrItinData<IIC_ldstub, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_fpu_muls, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_fpu_muld, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_fpu_divs, [InstrStage<1, [LEONFPU]>], [16, 1]>,
+ InstrItinData<IIC_fpu_divd, [InstrStage<1, [LEONFPU]>], [17, 1]>,
+ InstrItinData<IIC_fpu_sqrts, [InstrStage<1, [LEONFPU]>], [24, 1]>,
+ InstrItinData<IIC_fpu_sqrtd, [InstrStage<1, [LEONFPU]>], [25, 1]>,
+ InstrItinData<IIC_fpu_abs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_movs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_negs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_stod, [InstrStage<1, [LEONFPU]>], [4, 1]>
+]>;
+
+def LEON4Itineraries : ProcessorItineraries<
+[LEONIU, LEONFPU], [], [
+ InstrItinData<IIC_iu_or_fpu_instr, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_iu_instr, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_fpu_normal_instr, [InstrStage<1, [LEONFPU]>], [7, 1]>,
+ InstrItinData<IIC_fpu_fast_instr, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_jmp_or_call, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_ldd, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_st, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_std, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_iu_smul, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_iu_umul, [InstrStage<1, [LEONIU]>], [4, 1]>,
+ InstrItinData<IIC_iu_div, [InstrStage<1, [LEONIU]>], [35, 1]>,
+ InstrItinData<IIC_smac_umac, [InstrStage<1, [LEONIU]>], [2, 1]>,
+ InstrItinData<IIC_ticc, [InstrStage<1, [LEONIU, LEONFPU]>], [5, 1]>,
+ InstrItinData<IIC_ldstub, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_fpu_muls, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_fpu_muld, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_fpu_divs, [InstrStage<1, [LEONFPU]>], [16, 1]>,
+ InstrItinData<IIC_fpu_divd, [InstrStage<1, [LEONFPU]>], [17, 1]>,
+ InstrItinData<IIC_fpu_sqrts, [InstrStage<1, [LEONFPU]>], [24, 1]>,
+ InstrItinData<IIC_fpu_sqrtd, [InstrStage<1, [LEONFPU]>], [25, 1]>,
+ InstrItinData<IIC_fpu_abs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_movs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_negs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_stod, [InstrStage<1, [LEONFPU]>], [4, 1]>
+]>;
diff --git a/lib/Target/Sparc/SparcSubtarget.cpp b/lib/Target/Sparc/SparcSubtarget.cpp
index d701594d27af..a6a4dc54faed 100644
--- a/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/lib/Target/Sparc/SparcSubtarget.cpp
@@ -29,10 +29,27 @@ void SparcSubtarget::anchor() { }
SparcSubtarget &SparcSubtarget::initializeSubtargetDependencies(StringRef CPU,
StringRef FS) {
IsV9 = false;
+ IsLeon = false;
V8DeprecatedInsts = false;
IsVIS = false;
HasHardQuad = false;
UsePopc = false;
+ UseSoftFloat = false;
+
+ // Leon features
+ HasLeonCasa = false;
+ HasUmacSmac = false;
+ PerformSDIVReplace = false;
+ FixCallImmediates = false;
+ IgnoreZeroFlag = false;
+ InsertNOPDoublePrecision = false;
+ FixFSMULD = false;
+ ReplaceFMULS = false;
+ PreventRoundChange = false;
+ FixAllFDIVSQRT = false;
+ InsertNOPLoad = false;
+ FlushCacheLineSWAP = false;
+ InsertNOPsLoadStore = false;
// Determine default and user specified characteristics
std::string CPUName = CPU;
@@ -50,9 +67,9 @@ SparcSubtarget &SparcSubtarget::initializeSubtargetDependencies(StringRef CPU,
}
SparcSubtarget::SparcSubtarget(const Triple &TT, const std::string &CPU,
- const std::string &FS, TargetMachine &TM,
+ const std::string &FS, const TargetMachine &TM,
bool is64Bit)
- : SparcGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit),
+ : SparcGenSubtargetInfo(TT, CPU, FS), TargetTriple(TT), Is64Bit(is64Bit),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
FrameLowering(*this) {}
@@ -64,7 +81,7 @@ int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
frameSize += 128;
// Frames with calls must also reserve space for 6 outgoing arguments
// whether they are used or not. LowerCall_64 takes care of that.
- frameSize = RoundUpToAlignment(frameSize, 16);
+ frameSize = alignTo(frameSize, 16);
} else {
// Emit the correct save instruction based on the number of bytes in
// the frame. Minimum stack frame size according to V8 ABI is:
@@ -77,7 +94,7 @@ int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
// Round up to next doubleword boundary -- a double-word boundary
// is required by the ABI.
- frameSize = RoundUpToAlignment(frameSize, 8);
+ frameSize = alignTo(frameSize, 8);
}
return frameSize;
}
diff --git a/lib/Target/Sparc/SparcSubtarget.h b/lib/Target/Sparc/SparcSubtarget.h
index e2fd2f04528a..42d693699994 100644
--- a/lib/Target/Sparc/SparcSubtarget.h
+++ b/lib/Target/Sparc/SparcSubtarget.h
@@ -15,11 +15,11 @@
#define LLVM_LIB_TARGET_SPARC_SPARCSUBTARGET_H
#include "SparcFrameLowering.h"
-#include "SparcInstrInfo.h"
#include "SparcISelLowering.h"
+#include "SparcInstrInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -30,21 +30,41 @@ namespace llvm {
class StringRef;
class SparcSubtarget : public SparcGenSubtargetInfo {
+ Triple TargetTriple;
virtual void anchor();
bool IsV9;
+ bool IsLeon;
bool V8DeprecatedInsts;
bool IsVIS, IsVIS2, IsVIS3;
bool Is64Bit;
bool HasHardQuad;
bool UsePopc;
+ bool UseSoftFloat;
+
+ // LEON features
+ bool HasUmacSmac;
+ bool HasLeonCasa;
+ bool InsertNOPLoad;
+ bool FixFSMULD;
+ bool ReplaceFMULS;
+ bool FixAllFDIVSQRT;
+ bool UseSoftFpu;
+ bool PerformSDIVReplace;
+ bool FixCallImmediates;
+ bool IgnoreZeroFlag;
+ bool InsertNOPDoublePrecision;
+ bool PreventRoundChange;
+ bool FlushCacheLineSWAP;
+ bool InsertNOPsLoadStore;
+
SparcInstrInfo InstrInfo;
SparcTargetLowering TLInfo;
- TargetSelectionDAGInfo TSInfo;
+ SelectionDAGTargetInfo TSInfo;
SparcFrameLowering FrameLowering;
public:
SparcSubtarget(const Triple &TT, const std::string &CPU,
- const std::string &FS, TargetMachine &TM, bool is64bit);
+ const std::string &FS, const TargetMachine &TM, bool is64bit);
const SparcInstrInfo *getInstrInfo() const override { return &InstrInfo; }
const TargetFrameLowering *getFrameLowering() const override {
@@ -56,19 +76,37 @@ public:
const SparcTargetLowering *getTargetLowering() const override {
return &TLInfo;
}
- const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
bool enableMachineScheduler() const override;
bool isV9() const { return IsV9; }
+ bool isLeon() const { return IsLeon; }
bool isVIS() const { return IsVIS; }
bool isVIS2() const { return IsVIS2; }
bool isVIS3() const { return IsVIS3; }
bool useDeprecatedV8Instructions() const { return V8DeprecatedInsts; }
bool hasHardQuad() const { return HasHardQuad; }
bool usePopc() const { return UsePopc; }
+ bool useSoftFloat() const { return UseSoftFloat; }
+
+ // Leon options
+ bool useSoftFpu() const { return UseSoftFpu; }
+ bool hasLeonCasa() const { return HasLeonCasa; }
+ bool hasUmacSmac() const { return HasUmacSmac; }
+ bool performSDIVReplace() const { return PerformSDIVReplace; }
+ bool fixCallImmediates() const { return FixCallImmediates; }
+ bool ignoreZeroFlag() const { return IgnoreZeroFlag; }
+ bool insertNOPDoublePrecision() const { return InsertNOPDoublePrecision; }
+ bool fixFSMULD() const { return FixFSMULD; }
+ bool replaceFMULS() const { return ReplaceFMULS; }
+ bool preventRoundChange() const { return PreventRoundChange; }
+ bool fixAllFDIVSQRT() const { return FixAllFDIVSQRT; }
+ bool flushCacheLineSWAP() const { return FlushCacheLineSWAP; }
+ bool insertNOPsLoadStore() const { return InsertNOPsLoadStore; }
+ bool insertNOPLoad() const { return InsertNOPLoad; }
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
@@ -87,6 +125,8 @@ public:
/// returns adjusted framesize which includes space for register window
/// spills and arguments.
int getAdjustedFrameSize(int stackSize) const;
+
+ bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
};
} // end namespace llvm
diff --git a/lib/Target/Sparc/SparcTargetMachine.cpp b/lib/Target/Sparc/SparcTargetMachine.cpp
index 725d7f047c47..17fe86a70844 100644
--- a/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -13,7 +13,9 @@
#include "SparcTargetMachine.h"
#include "SparcTargetObjectFile.h"
#include "Sparc.h"
+#include "LeonPasses.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -52,28 +54,68 @@ static std::string computeDataLayout(const Triple &T, bool is64Bit) {
return Ret;
}
-/// SparcTargetMachine ctor - Create an ILP32 architecture model
-///
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::Static;
+ return *RM;
+}
+
+/// Create an ILP32 architecture model
SparcTargetMachine::SparcTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL, bool is64bit)
: LLVMTargetMachine(T, computeDataLayout(TT, is64bit), TT, CPU, FS, Options,
- RM, CM, OL),
+ getEffectiveRelocModel(RM), CM, OL),
TLOF(make_unique<SparcELFTargetObjectFile>()),
- Subtarget(TT, CPU, FS, *this, is64bit) {
+ Subtarget(TT, CPU, FS, *this, is64bit), is64Bit(is64bit) {
initAsmInfo();
}
SparcTargetMachine::~SparcTargetMachine() {}
+const SparcSubtarget *
+SparcTargetMachine::getSubtargetImpl(const Function &F) const {
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
+
+ std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
+ ? CPUAttr.getValueAsString().str()
+ : TargetCPU;
+ std::string FS = !FSAttr.hasAttribute(Attribute::None)
+ ? FSAttr.getValueAsString().str()
+ : TargetFS;
+
+ // FIXME: This is related to the code below to reset the target options,
+ // we need to know whether or not the soft float flag is set on the
+ // function, so we can enable it as a subtarget feature.
+ bool softFloat =
+ F.hasFnAttribute("use-soft-float") &&
+ F.getFnAttribute("use-soft-float").getValueAsString() == "true";
+
+ if (softFloat)
+ FS += FS.empty() ? "+soft-float" : ",+soft-float";
+
+ auto &I = SubtargetMap[CPU + FS];
+ if (!I) {
+ // This needs to be done before we create a new subtarget since any
+ // creation will depend on the TM and the code generation flags on the
+ // function that reside in TargetOptions.
+ resetTargetOptions(F);
+ I = llvm::make_unique<SparcSubtarget>(TargetTriple, CPU, FS, *this,
+ this->is64Bit);
+ }
+ return I.get();
+}
+
namespace {
/// Sparc Code Generator Pass Configuration Options.
class SparcPassConfig : public TargetPassConfig {
public:
SparcPassConfig(SparcTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
+ : TargetPassConfig(TM, PM) {}
SparcTargetMachine &getSparcTargetMachine() const {
return getTM<SparcTargetMachine>();
@@ -100,25 +142,62 @@ bool SparcPassConfig::addInstSelector() {
return false;
}
-void SparcPassConfig::addPreEmitPass(){
+void SparcPassConfig::addPreEmitPass() {
addPass(createSparcDelaySlotFillerPass(getSparcTargetMachine()));
+ if (this->getSparcTargetMachine().getSubtargetImpl()->ignoreZeroFlag()) {
+ addPass(new IgnoreZeroFlag(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->performSDIVReplace()) {
+ addPass(new ReplaceSDIV(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->fixCallImmediates()) {
+ addPass(new FixCALL(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->fixFSMULD()) {
+ addPass(new FixFSMULD(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->replaceFMULS()) {
+ addPass(new ReplaceFMULS(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->preventRoundChange()) {
+ addPass(new PreventRoundChange(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->fixAllFDIVSQRT()) {
+ addPass(new FixAllFDIVSQRT(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->insertNOPsLoadStore()) {
+ addPass(new InsertNOPsLoadStore(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->insertNOPLoad()) {
+ addPass(new InsertNOPLoad(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->flushCacheLineSWAP()) {
+ addPass(new FlushCacheLineSWAP(getSparcTargetMachine()));
+ }
+ if (this->getSparcTargetMachine()
+ .getSubtargetImpl()
+ ->insertNOPDoublePrecision()) {
+ addPass(new InsertNOPDoublePrecision(getSparcTargetMachine()));
+ }
}
-void SparcV8TargetMachine::anchor() { }
+void SparcV8TargetMachine::anchor() {}
SparcV8TargetMachine::SparcV8TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
-void SparcV9TargetMachine::anchor() { }
+void SparcV9TargetMachine::anchor() {}
SparcV9TargetMachine::SparcV9TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
@@ -127,6 +206,7 @@ void SparcelTargetMachine::anchor() {}
SparcelTargetMachine::SparcelTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
diff --git a/lib/Target/Sparc/SparcTargetMachine.h b/lib/Target/Sparc/SparcTargetMachine.h
index 903c2d15629f..48193fe095be 100644
--- a/lib/Target/Sparc/SparcTargetMachine.h
+++ b/lib/Target/Sparc/SparcTargetMachine.h
@@ -23,16 +23,17 @@ namespace llvm {
class SparcTargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
SparcSubtarget Subtarget;
+ bool is64Bit;
+ mutable StringMap<std::unique_ptr<SparcSubtarget>> SubtargetMap;
public:
SparcTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL,
- bool is64bit);
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL, bool is64bit);
~SparcTargetMachine() override;
- const SparcSubtarget *getSubtargetImpl(const Function &) const override {
- return &Subtarget;
- }
+ const SparcSubtarget *getSubtargetImpl() const { return &Subtarget; }
+ const SparcSubtarget *getSubtargetImpl(const Function &) const override;
// Pass Pipeline Configuration
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
@@ -41,25 +42,25 @@ public:
}
};
-/// SparcV8TargetMachine - Sparc 32-bit target machine
+/// Sparc 32-bit target machine
///
class SparcV8TargetMachine : public SparcTargetMachine {
virtual void anchor();
public:
SparcV8TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
-/// SparcV9TargetMachine - Sparc 64-bit target machine
+/// Sparc 64-bit target machine
///
class SparcV9TargetMachine : public SparcTargetMachine {
virtual void anchor();
public:
SparcV9TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
@@ -69,7 +70,7 @@ class SparcelTargetMachine : public SparcTargetMachine {
public:
SparcelTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
diff --git a/lib/Target/Sparc/TargetInfo/Makefile b/lib/Target/Sparc/TargetInfo/Makefile
deleted file mode 100644
index 641ed87160c7..000000000000
--- a/lib/Target/Sparc/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/Sparc/TargetInfo/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSparcInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/SystemZ/AsmParser/Makefile b/lib/Target/SystemZ/AsmParser/Makefile
deleted file mode 100644
index 623ae2c4e3ef..000000000000
--- a/lib/Target/SystemZ/AsmParser/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/SystemZ/AsmParser/Makefile ---------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSystemZAsmParser
-
-# Hack: we need to include 'main' SystemZ target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
index 9c995bf42b0b..3923614c89d3 100644
--- a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
+++ b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
@@ -13,9 +13,9 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -391,6 +391,9 @@ public:
: MCTargetAsmParser(Options, sti), Parser(parser) {
MCAsmParserExtension::Initialize(Parser);
+ // Alias the .word directive to .short.
+ parser.addAliasForDirective(".word", ".short");
+
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
}
diff --git a/lib/Target/SystemZ/CMakeLists.txt b/lib/Target/SystemZ/CMakeLists.txt
index 336f037bb733..4b849ad6491b 100644
--- a/lib/Target/SystemZ/CMakeLists.txt
+++ b/lib/Target/SystemZ/CMakeLists.txt
@@ -30,6 +30,7 @@ add_llvm_target(SystemZCodeGen
SystemZSubtarget.cpp
SystemZTargetMachine.cpp
SystemZTargetTransformInfo.cpp
+ SystemZTDC.cpp
)
add_subdirectory(AsmParser)
diff --git a/lib/Target/SystemZ/Disassembler/Makefile b/lib/Target/SystemZ/Disassembler/Makefile
deleted file mode 100644
index efc4cc8e9cb0..000000000000
--- a/lib/Target/SystemZ/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===-- lib/Target/SystemZ/Disassembler/Makefile -----------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSystemZDisassembler
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp b/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
index bf67b75d5337..20e015b42d21 100644
--- a/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
+++ b/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
#include "SystemZ.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -46,6 +46,34 @@ extern "C" void LLVMInitializeSystemZDisassembler() {
createSystemZDisassembler);
}
+/// tryAddingSymbolicOperand - trys to add a symbolic operand in place of the
+/// immediate Value in the MCInst.
+///
+/// @param Value - The immediate Value, has had any PC adjustment made by
+/// the caller.
+/// @param isBranch - If the instruction is a branch instruction
+/// @param Address - The starting address of the instruction
+/// @param Offset - The byte offset to this immediate in the instruction
+/// @param Width - The byte width of this immediate in the instruction
+///
+/// If the getOpInfo() function was set when setupForSymbolicDisassembly() was
+/// called then that function is called to get any symbolic information for the
+/// immediate in the instruction using the Address, Offset and Width. If that
+/// returns non-zero then the symbolic information it returns is used to create
+/// an MCExpr and that is added as an operand to the MCInst. If getOpInfo()
+/// returns zero and isBranch is true then a symbol look up for immediate Value
+/// is done and if a symbol is found an MCExpr is created with that, else
+/// an MCExpr with the immediate Value is created. This function returns true
+/// if it adds an operand to the MCInst and false otherwise.
+static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
+ uint64_t Address, uint64_t Offset,
+ uint64_t Width, MCInst &MI,
+ const void *Decoder) {
+ const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
+ return Dis->tryAddingSymbolicOperand(MI, Value, Address, isBranch,
+ Offset, Width);
+}
+
static DecodeStatus decodeRegisterClass(MCInst &Inst, uint64_t RegNo,
const unsigned *Regs, unsigned Size) {
assert(RegNo < Size && "Invalid register");
@@ -206,22 +234,35 @@ static DecodeStatus decodeS32ImmOperand(MCInst &Inst, uint64_t Imm,
template<unsigned N>
static DecodeStatus decodePCDBLOperand(MCInst &Inst, uint64_t Imm,
- uint64_t Address) {
+ uint64_t Address,
+ bool isBranch,
+ const void *Decoder) {
assert(isUInt<N>(Imm) && "Invalid PC-relative offset");
- Inst.addOperand(MCOperand::createImm(SignExtend64<N>(Imm) * 2 + Address));
+ uint64_t Value = SignExtend64<N>(Imm) * 2 + Address;
+
+ if (!tryAddingSymbolicOperand(Value, isBranch, Address, 2, N / 8,
+ Inst, Decoder))
+ Inst.addOperand(MCOperand::createImm(Value));
+
return MCDisassembler::Success;
}
-static DecodeStatus decodePC16DBLOperand(MCInst &Inst, uint64_t Imm,
- uint64_t Address,
- const void *Decoder) {
- return decodePCDBLOperand<16>(Inst, Imm, Address);
+static DecodeStatus decodePC16DBLBranchOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodePCDBLOperand<16>(Inst, Imm, Address, true, Decoder);
+}
+
+static DecodeStatus decodePC32DBLBranchOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodePCDBLOperand<32>(Inst, Imm, Address, true, Decoder);
}
static DecodeStatus decodePC32DBLOperand(MCInst &Inst, uint64_t Imm,
uint64_t Address,
const void *Decoder) {
- return decodePCDBLOperand<32>(Inst, Imm, Address);
+ return decodePCDBLOperand<32>(Inst, Imm, Address, false, Decoder);
}
static DecodeStatus decodeBDAddr12Operand(MCInst &Inst, uint64_t Field,
diff --git a/lib/Target/SystemZ/InstPrinter/Makefile b/lib/Target/SystemZ/InstPrinter/Makefile
deleted file mode 100644
index 3ba8126735a5..000000000000
--- a/lib/Target/SystemZ/InstPrinter/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/SystemZ/AsmPrinter/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSystemZAsmPrinter
-
-# Hack: we need to include 'main' mips target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/SystemZ/MCTargetDesc/Makefile b/lib/Target/SystemZ/MCTargetDesc/Makefile
deleted file mode 100644
index 08f1a9d51fb5..000000000000
--- a/lib/Target/SystemZ/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/SystemZ/TargetDesc/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSystemZDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
index 57eebe19c044..c4d546cb7dff 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -58,7 +58,8 @@ public:
const MCAsmLayout &Layout) const override {
return false;
}
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {
llvm_unreachable("SystemZ does do not have assembler relaxation");
}
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
index ee1af023769e..368c95f7bac2 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
@@ -24,8 +24,8 @@ public:
protected:
// Override MCELFObjectTargetWriter.
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
};
} // end anonymous namespace
@@ -106,7 +106,8 @@ static unsigned getPLTReloc(unsigned Kind) {
llvm_unreachable("Unsupported absolute address");
}
-unsigned SystemZObjectWriter::GetRelocType(const MCValue &Target,
+unsigned SystemZObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
index 2115d4480eef..e16ba9e15317 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
@@ -10,7 +10,6 @@
#include "SystemZMCTargetDesc.h"
#include "InstPrinter/SystemZInstPrinter.h"
#include "SystemZMCAsmInfo.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
@@ -159,17 +158,8 @@ createSystemZMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
return createSystemZMCSubtargetInfoImpl(TT, CPU, FS);
}
-static MCCodeGenInfo *createSystemZMCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
-
- // Static code is suitable for use in a dynamic executable; there is no
- // separate DynamicNoPIC model.
- if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC)
- RM = Reloc::Static;
-
+static void adjustCodeGenOpts(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
// For SystemZ we define the models as follows:
//
// Small: BRASL can call any function and will use a stub if necessary.
@@ -203,8 +193,6 @@ static MCCodeGenInfo *createSystemZMCCodeGenInfo(const Triple &TT,
CM = CodeModel::Small;
else if (CM == CodeModel::JITDefault)
CM = RM == Reloc::PIC_ ? CodeModel::Small : CodeModel::Medium;
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
static MCInstPrinter *createSystemZMCInstPrinter(const Triple &T,
@@ -220,9 +208,9 @@ extern "C" void LLVMInitializeSystemZTargetMC() {
TargetRegistry::RegisterMCAsmInfo(TheSystemZTarget,
createSystemZMCAsmInfo);
- // Register the MCCodeGenInfo.
- TargetRegistry::RegisterMCCodeGenInfo(TheSystemZTarget,
- createSystemZMCCodeGenInfo);
+ // Register the adjustCodeGenOpts.
+ TargetRegistry::registerMCAdjustCodeGenOpts(TheSystemZTarget,
+ adjustCodeGenOpts);
// Register the MCCodeEmitter.
TargetRegistry::RegisterMCCodeEmitter(TheSystemZTarget,
diff --git a/lib/Target/SystemZ/Makefile b/lib/Target/SystemZ/Makefile
deleted file mode 100644
index 732c31725538..000000000000
--- a/lib/Target/SystemZ/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-##===- lib/Target/SystemZ/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMSystemZCodeGen
-TARGET = SystemZ
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = SystemZGenRegisterInfo.inc \
- SystemZGenAsmWriter.inc \
- SystemZGenAsmMatcher.inc \
- SystemZGenDisassemblerTables.inc \
- SystemZGenInstrInfo.inc \
- SystemZGenDAGISel.inc \
- SystemZGenSubtargetInfo.inc \
- SystemZGenCallingConv.inc \
- SystemZGenMCCodeEmitter.inc
-
-DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
-
diff --git a/lib/Target/SystemZ/README.txt b/lib/Target/SystemZ/README.txt
index cd367d60bab7..86a1322c9e23 100644
--- a/lib/Target/SystemZ/README.txt
+++ b/lib/Target/SystemZ/README.txt
@@ -7,13 +7,6 @@ for later architectures at some point.
--
-SystemZDAGToDAGISel::SelectInlineAsmMemoryOperand() is passed "m" for all
-inline asm memory constraints; it doesn't get to see the original constraint.
-This means that it must conservatively treat all inline asm constraints
-as the most restricted type, "R".
-
---
-
If an inline asm ties an i32 "r" result to an i64 input, the input
will be treated as an i32, leaving the upper bits uninitialised.
For example:
@@ -43,15 +36,6 @@ We don't use the BRANCH ON INDEX instructions.
--
-We might want to use BRANCH ON CONDITION for conditional indirect calls
-and conditional returns.
-
---
-
-We don't use the TEST DATA CLASS instructions.
-
---
-
We only use MVC, XC and CLC for constant-length block operations.
We could extend them to variable-length operations too,
using EXECUTE RELATIVE LONG.
@@ -79,11 +63,6 @@ via a register.)
--
-We don't use the halfword forms of LOAD REVERSED and STORE REVERSED
-(LRVH and STRVH).
-
---
-
We don't use ICM or STCM.
--
@@ -123,7 +102,7 @@ ought to be implemented as:
ngr %r2, %r0
br %r14
-but two-address optimisations reverse the order of the AND and force:
+but two-address optimizations reverse the order of the AND and force:
lhi %r0, 1
ngr %r0, %r2
@@ -166,3 +145,10 @@ If needed, we can support 16-byte atomics using LPQ, STPQ and CSDG.
We might want to model all access registers and use them to spill
32-bit values.
+
+--
+
+We might want to use the 'overflow' condition of eg. AR to support
+llvm.sadd.with.overflow.i32 and related instructions - the generated code
+for signed overflow check is currently quite bad. This would improve
+the results of using -ftrapv.
diff --git a/lib/Target/SystemZ/SystemZ.h b/lib/Target/SystemZ/SystemZ.h
index cafe2c5948c4..c8ea9641fb62 100644
--- a/lib/Target/SystemZ/SystemZ.h
+++ b/lib/Target/SystemZ/SystemZ.h
@@ -87,6 +87,11 @@ const unsigned CCMASK_VCMP_MIXED = CCMASK_1;
const unsigned CCMASK_VCMP_NONE = CCMASK_3;
const unsigned CCMASK_VCMP = CCMASK_0 | CCMASK_1 | CCMASK_3;
+// Condition-code mask assignments for Test Data Class.
+const unsigned CCMASK_TDC_NOMATCH = CCMASK_0;
+const unsigned CCMASK_TDC_MATCH = CCMASK_1;
+const unsigned CCMASK_TDC = CCMASK_TDC_NOMATCH | CCMASK_TDC_MATCH;
+
// The position of the low CC bit in an IPM result.
const unsigned IPM_CC = 28;
@@ -94,6 +99,41 @@ const unsigned IPM_CC = 28;
const unsigned PFD_READ = 1;
const unsigned PFD_WRITE = 2;
+// Mask assignments for TDC
+const unsigned TDCMASK_ZERO_PLUS = 0x800;
+const unsigned TDCMASK_ZERO_MINUS = 0x400;
+const unsigned TDCMASK_NORMAL_PLUS = 0x200;
+const unsigned TDCMASK_NORMAL_MINUS = 0x100;
+const unsigned TDCMASK_SUBNORMAL_PLUS = 0x080;
+const unsigned TDCMASK_SUBNORMAL_MINUS = 0x040;
+const unsigned TDCMASK_INFINITY_PLUS = 0x020;
+const unsigned TDCMASK_INFINITY_MINUS = 0x010;
+const unsigned TDCMASK_QNAN_PLUS = 0x008;
+const unsigned TDCMASK_QNAN_MINUS = 0x004;
+const unsigned TDCMASK_SNAN_PLUS = 0x002;
+const unsigned TDCMASK_SNAN_MINUS = 0x001;
+
+const unsigned TDCMASK_ZERO = TDCMASK_ZERO_PLUS | TDCMASK_ZERO_MINUS;
+const unsigned TDCMASK_POSITIVE = TDCMASK_NORMAL_PLUS |
+ TDCMASK_SUBNORMAL_PLUS |
+ TDCMASK_INFINITY_PLUS;
+const unsigned TDCMASK_NEGATIVE = TDCMASK_NORMAL_MINUS |
+ TDCMASK_SUBNORMAL_MINUS |
+ TDCMASK_INFINITY_MINUS;
+const unsigned TDCMASK_NAN = TDCMASK_QNAN_PLUS |
+ TDCMASK_QNAN_MINUS |
+ TDCMASK_SNAN_PLUS |
+ TDCMASK_SNAN_MINUS;
+const unsigned TDCMASK_PLUS = TDCMASK_POSITIVE |
+ TDCMASK_ZERO_PLUS |
+ TDCMASK_QNAN_PLUS |
+ TDCMASK_SNAN_PLUS;
+const unsigned TDCMASK_MINUS = TDCMASK_NEGATIVE |
+ TDCMASK_ZERO_MINUS |
+ TDCMASK_QNAN_MINUS |
+ TDCMASK_SNAN_MINUS;
+const unsigned TDCMASK_ALL = TDCMASK_PLUS | TDCMASK_MINUS;
+
// Number of bits in a vector register.
const unsigned VectorBits = 128;
@@ -138,6 +178,7 @@ FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM);
FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);
FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);
FunctionPass *createSystemZLDCleanupPass(SystemZTargetMachine &TM);
+FunctionPass *createSystemZTDCPass();
} // end namespace llvm
#endif
diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index 75273114d62f..9c0f327ff744 100644
--- a/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -109,6 +109,85 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R14D);
break;
+ case SystemZ::CondReturn:
+ LoweredMI = MCInstBuilder(SystemZ::BCR)
+ .addImm(MI->getOperand(0).getImm())
+ .addImm(MI->getOperand(1).getImm())
+ .addReg(SystemZ::R14D);
+ break;
+
+ case SystemZ::CRBReturn:
+ LoweredMI = MCInstBuilder(SystemZ::CRB)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R14D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CGRBReturn:
+ LoweredMI = MCInstBuilder(SystemZ::CGRB)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R14D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CIBReturn:
+ LoweredMI = MCInstBuilder(SystemZ::CIB)
+ .addReg(MI->getOperand(0).getReg())
+ .addImm(MI->getOperand(1).getImm())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R14D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CGIBReturn:
+ LoweredMI = MCInstBuilder(SystemZ::CGIB)
+ .addReg(MI->getOperand(0).getReg())
+ .addImm(MI->getOperand(1).getImm())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R14D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CLRBReturn:
+ LoweredMI = MCInstBuilder(SystemZ::CLRB)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R14D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CLGRBReturn:
+ LoweredMI = MCInstBuilder(SystemZ::CLGRB)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R14D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CLIBReturn:
+ LoweredMI = MCInstBuilder(SystemZ::CLIB)
+ .addReg(MI->getOperand(0).getReg())
+ .addImm(MI->getOperand(1).getImm())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R14D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CLGIBReturn:
+ LoweredMI = MCInstBuilder(SystemZ::CLGIB)
+ .addReg(MI->getOperand(0).getReg())
+ .addImm(MI->getOperand(1).getImm())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R14D)
+ .addImm(0);
+ break;
+
case SystemZ::CallBRASL:
LoweredMI = MCInstBuilder(SystemZ::BRASL)
.addReg(SystemZ::R14D)
@@ -126,10 +205,96 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addExpr(Lower.getExpr(MI->getOperand(0), MCSymbolRefExpr::VK_PLT));
break;
+ case SystemZ::CallBRCL:
+ LoweredMI = MCInstBuilder(SystemZ::BRCL)
+ .addImm(MI->getOperand(0).getImm())
+ .addImm(MI->getOperand(1).getImm())
+ .addExpr(Lower.getExpr(MI->getOperand(2), MCSymbolRefExpr::VK_PLT));
+ break;
+
case SystemZ::CallBR:
LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R1D);
break;
+ case SystemZ::CallBCR:
+ LoweredMI = MCInstBuilder(SystemZ::BCR)
+ .addImm(MI->getOperand(0).getImm())
+ .addImm(MI->getOperand(1).getImm())
+ .addReg(SystemZ::R1D);
+ break;
+
+ case SystemZ::CRBCall:
+ LoweredMI = MCInstBuilder(SystemZ::CRB)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R1D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CGRBCall:
+ LoweredMI = MCInstBuilder(SystemZ::CGRB)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R1D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CIBCall:
+ LoweredMI = MCInstBuilder(SystemZ::CIB)
+ .addReg(MI->getOperand(0).getReg())
+ .addImm(MI->getOperand(1).getImm())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R1D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CGIBCall:
+ LoweredMI = MCInstBuilder(SystemZ::CGIB)
+ .addReg(MI->getOperand(0).getReg())
+ .addImm(MI->getOperand(1).getImm())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R1D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CLRBCall:
+ LoweredMI = MCInstBuilder(SystemZ::CLRB)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R1D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CLGRBCall:
+ LoweredMI = MCInstBuilder(SystemZ::CLGRB)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R1D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CLIBCall:
+ LoweredMI = MCInstBuilder(SystemZ::CLIB)
+ .addReg(MI->getOperand(0).getReg())
+ .addImm(MI->getOperand(1).getImm())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R1D)
+ .addImm(0);
+ break;
+
+ case SystemZ::CLGIBCall:
+ LoweredMI = MCInstBuilder(SystemZ::CLGIB)
+ .addReg(MI->getOperand(0).getReg())
+ .addImm(MI->getOperand(1).getImm())
+ .addImm(MI->getOperand(2).getImm())
+ .addReg(SystemZ::R1D)
+ .addImm(0);
+ break;
+
case SystemZ::TLS_GDCALL:
LoweredMI = MCInstBuilder(SystemZ::BRASL)
.addReg(SystemZ::R14D)
@@ -260,6 +425,41 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addImm(15).addReg(SystemZ::R0D);
break;
+ // Emit nothing here but a comment if we can.
+ case SystemZ::MemBarrier:
+ OutStreamer->emitRawComment("MEMBARRIER");
+ return;
+
+ // We want to emit "j .+2" for traps, jumping to the relative immediate field
+ // of the jump instruction, which is an illegal instruction. We cannot emit a
+ // "." symbol, so create and emit a temp label before the instruction and use
+ // that instead.
+ case SystemZ::Trap: {
+ MCSymbol *DotSym = OutContext.createTempSymbol();
+ OutStreamer->EmitLabel(DotSym);
+
+ const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(DotSym, OutContext);
+ const MCConstantExpr *ConstExpr = MCConstantExpr::create(2, OutContext);
+ LoweredMI = MCInstBuilder(SystemZ::J)
+ .addExpr(MCBinaryExpr::createAdd(Expr, ConstExpr, OutContext));
+ }
+ break;
+
+ // Conditional traps will create a branch on condition instruction that jumps
+ // to the relative immediate field of the jump instruction. (eg. "jo .+2")
+ case SystemZ::CondTrap: {
+ MCSymbol *DotSym = OutContext.createTempSymbol();
+ OutStreamer->EmitLabel(DotSym);
+
+ const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(DotSym, OutContext);
+ const MCConstantExpr *ConstExpr = MCConstantExpr::create(2, OutContext);
+ LoweredMI = MCInstBuilder(SystemZ::BRC)
+ .addImm(MI->getOperand(0).getImm())
+ .addImm(MI->getOperand(1).getImm())
+ .addExpr(MCBinaryExpr::createAdd(Expr, ConstExpr, OutContext));
+ }
+ break;
+
default:
Lower.lower(MI, LoweredMI);
break;
diff --git a/lib/Target/SystemZ/SystemZCallingConv.cpp b/lib/Target/SystemZ/SystemZCallingConv.cpp
index cc9c84b6a058..72da51f74b10 100644
--- a/lib/Target/SystemZ/SystemZCallingConv.cpp
+++ b/lib/Target/SystemZ/SystemZCallingConv.cpp
@@ -12,10 +12,10 @@
using namespace llvm;
-const unsigned SystemZ::ArgGPRs[SystemZ::NumArgGPRs] = {
+const MCPhysReg SystemZ::ArgGPRs[SystemZ::NumArgGPRs] = {
SystemZ::R2D, SystemZ::R3D, SystemZ::R4D, SystemZ::R5D, SystemZ::R6D
};
-const unsigned SystemZ::ArgFPRs[SystemZ::NumArgFPRs] = {
+const MCPhysReg SystemZ::ArgFPRs[SystemZ::NumArgFPRs] = {
SystemZ::F0D, SystemZ::F2D, SystemZ::F4D, SystemZ::F6D
};
diff --git a/lib/Target/SystemZ/SystemZCallingConv.h b/lib/Target/SystemZ/SystemZCallingConv.h
index bff0706618aa..b5523e586f4c 100644
--- a/lib/Target/SystemZ/SystemZCallingConv.h
+++ b/lib/Target/SystemZ/SystemZCallingConv.h
@@ -12,14 +12,15 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/MC/MCRegisterInfo.h"
namespace llvm {
namespace SystemZ {
const unsigned NumArgGPRs = 5;
- extern const unsigned ArgGPRs[NumArgGPRs];
+ extern const MCPhysReg ArgGPRs[NumArgGPRs];
const unsigned NumArgFPRs = 4;
- extern const unsigned ArgFPRs[NumArgFPRs];
+ extern const MCPhysReg ArgFPRs[NumArgFPRs];
} // end namespace SystemZ
class SystemZCCState : public CCState {
@@ -79,6 +80,51 @@ public:
bool IsShortVector(unsigned ValNo) { return ArgIsShortVector[ValNo]; }
};
+// Handle i128 argument types. These need to be passed by implicit
+// reference. This could be as simple as the following .td line:
+// CCIfType<[i128], CCPassIndirect<i64>>,
+// except that i128 is not a legal type, and therefore gets split by
+// common code into a pair of i64 arguments.
+inline bool CC_SystemZ_I128Indirect(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
+
+ // ArgFlags.isSplit() is true on the first part of a i128 argument;
+ // PendingMembers.empty() is false on all subsequent parts.
+ if (!ArgFlags.isSplit() && PendingMembers.empty())
+ return false;
+
+ // Push a pending Indirect value location for each part.
+ LocVT = MVT::i64;
+ LocInfo = CCValAssign::Indirect;
+ PendingMembers.push_back(CCValAssign::getPending(ValNo, ValVT,
+ LocVT, LocInfo));
+ if (!ArgFlags.isSplitEnd())
+ return true;
+
+ // OK, we've collected all parts in the pending list. Allocate
+ // the location (register or stack slot) for the indirect pointer.
+ // (This duplicates the usual i64 calling convention rules.)
+ unsigned Reg = State.AllocateReg(SystemZ::ArgGPRs);
+ unsigned Offset = Reg ? 0 : State.AllocateStack(8, 8);
+
+ // Use that same location for all the pending parts.
+ for (auto &It : PendingMembers) {
+ if (Reg)
+ It.convertToReg(Reg);
+ else
+ It.convertToMem(Offset);
+ State.addLoc(It);
+ }
+
+ PendingMembers.clear();
+
+ return true;
+}
+
} // end namespace llvm
#endif
diff --git a/lib/Target/SystemZ/SystemZCallingConv.td b/lib/Target/SystemZ/SystemZCallingConv.td
index bdd1b1598adb..2bf5ac29865f 100644
--- a/lib/Target/SystemZ/SystemZCallingConv.td
+++ b/lib/Target/SystemZ/SystemZCallingConv.td
@@ -33,6 +33,9 @@ def RetCC_SystemZ : CallingConv<[
// Promote i32 to i64 if it has an explicit extension type.
CCIfType<[i32], CCIfExtend<CCPromoteToType<i64>>>,
+ // A SwiftError is returned in R9.
+ CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R9D]>>>,
+
// ABI-compliant code returns 64-bit integers in R2. Make the other
// call-clobbered argument registers available for code that doesn't
// care about the ABI. (R6 is an argument register too, but is
@@ -65,8 +68,17 @@ def CC_SystemZ : CallingConv<[
// are smaller than 64 bits shouldn't.
CCIfType<[i32], CCIfExtend<CCPromoteToType<i64>>>,
+ // A SwiftSelf is passed in callee-saved R10.
+ CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R10D]>>>,
+
+ // A SwiftError is passed in callee-saved R9.
+ CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R9D]>>>,
+
// Force long double values to the stack and pass i64 pointers to them.
CCIfType<[f128], CCPassIndirect<i64>>,
+ // Same for i128 values. These are already split into two i64 here,
+ // so we have to use a custom handler.
+ CCIfType<[i64], CCCustom<"CC_SystemZ_I128Indirect">>,
// The first 5 integer arguments are passed in R2-R6. Note that R6
// is call-saved.
@@ -105,3 +117,6 @@ def CC_SystemZ : CallingConv<[
//===----------------------------------------------------------------------===//
def CSR_SystemZ : CalleeSavedRegs<(add (sequence "R%dD", 6, 15),
(sequence "F%dD", 8, 15))>;
+
+// R9 is used to return SwiftError; remove it from CSR.
+def CSR_SystemZ_SwiftError : CalleeSavedRegs<(sub CSR_SystemZ, R9D)>;
diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp
index 4818ed015522..27350b88554d 100644
--- a/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -18,7 +18,6 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -65,18 +64,22 @@ public:
bool processBlock(MachineBasicBlock &MBB);
bool runOnMachineFunction(MachineFunction &F) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
private:
- Reference getRegReferences(MachineInstr *MI, unsigned Reg);
- bool convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
+ Reference getRegReferences(MachineInstr &MI, unsigned Reg);
+ bool convertToBRCT(MachineInstr &MI, MachineInstr &Compare,
SmallVectorImpl<MachineInstr *> &CCUsers);
- bool convertToLoadAndTest(MachineInstr *MI);
- bool adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
+ bool convertToLoadAndTest(MachineInstr &MI);
+ bool adjustCCMasksForInstr(MachineInstr &MI, MachineInstr &Compare,
SmallVectorImpl<MachineInstr *> &CCUsers);
- bool optimizeCompareZero(MachineInstr *Compare,
+ bool optimizeCompareZero(MachineInstr &Compare,
SmallVectorImpl<MachineInstr *> &CCUsers);
- bool fuseCompareAndBranch(MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool fuseCompareOperations(MachineInstr &Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
const SystemZInstrInfo *TII;
const TargetRegisterInfo *TRI;
@@ -98,14 +101,12 @@ static bool isCCLiveOut(MachineBasicBlock &MBB) {
}
// Return true if any CC result of MI would reflect the value of Reg.
-static bool resultTests(MachineInstr *MI, unsigned Reg) {
- if (MI->getNumOperands() > 0 &&
- MI->getOperand(0).isReg() &&
- MI->getOperand(0).isDef() &&
- MI->getOperand(0).getReg() == Reg)
+static bool resultTests(MachineInstr &MI, unsigned Reg) {
+ if (MI.getNumOperands() > 0 && MI.getOperand(0).isReg() &&
+ MI.getOperand(0).isDef() && MI.getOperand(0).getReg() == Reg)
return true;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case SystemZ::LR:
case SystemZ::LGR:
case SystemZ::LGFR:
@@ -118,7 +119,7 @@ static bool resultTests(MachineInstr *MI, unsigned Reg) {
case SystemZ::LTEBR:
case SystemZ::LTDBR:
case SystemZ::LTXBR:
- if (MI->getOperand(1).getReg() == Reg)
+ if (MI.getOperand(1).getReg() == Reg)
return true;
}
@@ -126,10 +127,10 @@ static bool resultTests(MachineInstr *MI, unsigned Reg) {
}
// Describe the references to Reg or any of its aliases in MI.
-Reference SystemZElimCompare::getRegReferences(MachineInstr *MI, unsigned Reg) {
+Reference SystemZElimCompare::getRegReferences(MachineInstr &MI, unsigned Reg) {
Reference Ref;
- for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
- const MachineOperand &MO = MI->getOperand(I);
+ for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI.getOperand(I);
if (MO.isReg()) {
if (unsigned MOReg = MO.getReg()) {
if (TRI->regsOverlap(MOReg, Reg)) {
@@ -146,23 +147,23 @@ Reference SystemZElimCompare::getRegReferences(MachineInstr *MI, unsigned Reg) {
// Return true if this is a load and test which can be optimized the
// same way as compare instruction.
-static bool isLoadAndTestAsCmp(MachineInstr *MI) {
+static bool isLoadAndTestAsCmp(MachineInstr &MI) {
// If we during isel used a load-and-test as a compare with 0, the
// def operand is dead.
- return ((MI->getOpcode() == SystemZ::LTEBR ||
- MI->getOpcode() == SystemZ::LTDBR ||
- MI->getOpcode() == SystemZ::LTXBR) &&
- MI->getOperand(0).isDead());
+ return (MI.getOpcode() == SystemZ::LTEBR ||
+ MI.getOpcode() == SystemZ::LTDBR ||
+ MI.getOpcode() == SystemZ::LTXBR) &&
+ MI.getOperand(0).isDead();
}
// Return the source register of Compare, which is the unknown value
// being tested.
-static unsigned getCompareSourceReg(MachineInstr *Compare) {
+static unsigned getCompareSourceReg(MachineInstr &Compare) {
unsigned reg = 0;
- if (Compare->isCompare())
- reg = Compare->getOperand(0).getReg();
+ if (Compare.isCompare())
+ reg = Compare.getOperand(0).getReg();
else if (isLoadAndTestAsCmp(Compare))
- reg = Compare->getOperand(1).getReg();
+ reg = Compare.getOperand(1).getReg();
assert (reg);
return reg;
@@ -171,11 +172,11 @@ static unsigned getCompareSourceReg(MachineInstr *Compare) {
// Compare compares the result of MI against zero. If MI is an addition
// of -1 and if CCUsers is a single branch on nonzero, eliminate the addition
// and convert the branch to a BRCT(G). Return true on success.
-bool
-SystemZElimCompare::convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers) {
+bool SystemZElimCompare::convertToBRCT(
+ MachineInstr &MI, MachineInstr &Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
// Check whether we have an addition of -1.
- unsigned Opcode = MI->getOpcode();
+ unsigned Opcode = MI.getOpcode();
unsigned BRCT;
if (Opcode == SystemZ::AHI)
BRCT = SystemZ::BRCT;
@@ -183,7 +184,7 @@ SystemZElimCompare::convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
BRCT = SystemZ::BRCTG;
else
return false;
- if (MI->getOperand(2).getImm() != -1)
+ if (MI.getOperand(2).getImm() != -1)
return false;
// Check whether we have a single JLH.
@@ -201,7 +202,7 @@ SystemZElimCompare::convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
unsigned SrcReg = getCompareSourceReg(Compare);
MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
for (++MBBI; MBBI != MBBE; ++MBBI)
- if (getRegReferences(MBBI, SrcReg))
+ if (getRegReferences(*MBBI, SrcReg))
return false;
// The transformation is OK. Rebuild Branch as a BRCT(G).
@@ -210,24 +211,24 @@ SystemZElimCompare::convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
Branch->RemoveOperand(0);
Branch->setDesc(TII->get(BRCT));
MachineInstrBuilder(*Branch->getParent()->getParent(), Branch)
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addOperand(Target)
- .addReg(SystemZ::CC, RegState::ImplicitDefine);
- MI->eraseFromParent();
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addOperand(Target)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine | RegState::Dead);
+ MI.eraseFromParent();
return true;
}
// If MI is a load instruction, try to convert it into a LOAD AND TEST.
// Return true on success.
-bool SystemZElimCompare::convertToLoadAndTest(MachineInstr *MI) {
- unsigned Opcode = TII->getLoadAndTest(MI->getOpcode());
+bool SystemZElimCompare::convertToLoadAndTest(MachineInstr &MI) {
+ unsigned Opcode = TII->getLoadAndTest(MI.getOpcode());
if (!Opcode)
return false;
- MI->setDesc(TII->get(Opcode));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addReg(SystemZ::CC, RegState::ImplicitDefine);
+ MI.setDesc(TII->get(Opcode));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine);
return true;
}
@@ -236,10 +237,10 @@ bool SystemZElimCompare::convertToLoadAndTest(MachineInstr *MI) {
// would also reflect the value of X. Try to adjust CCUsers so that
// they test the result of MI directly, returning true on success.
// Leave everything unchanged on failure.
-bool SystemZElimCompare::
-adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers) {
- int Opcode = MI->getOpcode();
+bool SystemZElimCompare::adjustCCMasksForInstr(
+ MachineInstr &MI, MachineInstr &Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ int Opcode = MI.getOpcode();
const MCInstrDesc &Desc = TII->get(Opcode);
unsigned MIFlags = Desc.TSFlags;
@@ -247,7 +248,7 @@ adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
unsigned ReusableCCMask = SystemZII::getCompareZeroCCMask(MIFlags);
// For unsigned comparisons with zero, only equality makes sense.
- unsigned CompareFlags = Compare->getDesc().TSFlags;
+ unsigned CompareFlags = Compare.getDesc().TSFlags;
if (CompareFlags & SystemZII::IsLogical)
ReusableCCMask &= SystemZ::CCMASK_CMP_EQ;
@@ -296,9 +297,9 @@ adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
}
// CC is now live after MI.
- int CCDef = MI->findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI);
+ int CCDef = MI.findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI);
assert(CCDef >= 0 && "Couldn't find CC set");
- MI->getOperand(CCDef).setIsDead(false);
+ MI.getOperand(CCDef).setIsDead(false);
// Clear any intervening kills of CC.
MachineBasicBlock::iterator MBBI = MI, MBBE = Compare;
@@ -309,8 +310,8 @@ adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
}
// Return true if Compare is a comparison against zero.
-static bool isCompareZero(MachineInstr *Compare) {
- switch (Compare->getOpcode()) {
+static bool isCompareZero(MachineInstr &Compare) {
+ switch (Compare.getOpcode()) {
case SystemZ::LTEBRCompare:
case SystemZ::LTDBRCompare:
case SystemZ::LTXBRCompare:
@@ -321,9 +322,8 @@ static bool isCompareZero(MachineInstr *Compare) {
if (isLoadAndTestAsCmp(Compare))
return true;
- return (Compare->getNumExplicitOperands() == 2 &&
- Compare->getOperand(1).isImm() &&
- Compare->getOperand(1).getImm() == 0);
+ return Compare.getNumExplicitOperands() == 2 &&
+ Compare.getOperand(1).isImm() && Compare.getOperand(1).getImm() == 0;
}
}
@@ -331,21 +331,20 @@ static bool isCompareZero(MachineInstr *Compare) {
// a value against zero. Return true on success and if Compare should be
// deleted as dead. CCUsers is the list of instructions that use the CC
// value produced by Compare.
-bool SystemZElimCompare::
-optimizeCompareZero(MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers) {
+bool SystemZElimCompare::optimizeCompareZero(
+ MachineInstr &Compare, SmallVectorImpl<MachineInstr *> &CCUsers) {
if (!isCompareZero(Compare))
return false;
// Search back for CC results that are based on the first operand.
unsigned SrcReg = getCompareSourceReg(Compare);
- MachineBasicBlock &MBB = *Compare->getParent();
+ MachineBasicBlock &MBB = *Compare.getParent();
MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB.begin();
Reference CCRefs;
Reference SrcRefs;
while (MBBI != MBBE) {
--MBBI;
- MachineInstr *MI = MBBI;
+ MachineInstr &MI = *MBBI;
if (resultTests(MI, SrcReg)) {
// Try to remove both MI and Compare by converting a branch to BRCT(G).
// We don't care in this case whether CC is modified between MI and
@@ -373,54 +372,85 @@ optimizeCompareZero(MachineInstr *Compare,
// Try to fuse comparison instruction Compare into a later branch.
// Return true on success and if Compare is therefore redundant.
-bool SystemZElimCompare::
-fuseCompareAndBranch(MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers) {
- // See whether we have a comparison that can be fused.
- unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(),
- Compare);
- if (!FusedOpcode)
- return false;
-
+bool SystemZElimCompare::fuseCompareOperations(
+ MachineInstr &Compare, SmallVectorImpl<MachineInstr *> &CCUsers) {
// See whether we have a single branch with which to fuse.
if (CCUsers.size() != 1)
return false;
MachineInstr *Branch = CCUsers[0];
- if (Branch->getOpcode() != SystemZ::BRC)
+ SystemZII::FusedCompareType Type;
+ switch (Branch->getOpcode()) {
+ case SystemZ::BRC:
+ Type = SystemZII::CompareAndBranch;
+ break;
+ case SystemZ::CondReturn:
+ Type = SystemZII::CompareAndReturn;
+ break;
+ case SystemZ::CallBCR:
+ Type = SystemZII::CompareAndSibcall;
+ break;
+ case SystemZ::CondTrap:
+ Type = SystemZII::CompareAndTrap;
+ break;
+ default:
+ return false;
+ }
+
+ // See whether we have a comparison that can be fused.
+ unsigned FusedOpcode =
+ TII->getFusedCompare(Compare.getOpcode(), Type, &Compare);
+ if (!FusedOpcode)
return false;
// Make sure that the operands are available at the branch.
- unsigned SrcReg = Compare->getOperand(0).getReg();
- unsigned SrcReg2 = (Compare->getOperand(1).isReg() ?
- Compare->getOperand(1).getReg() : 0);
+ unsigned SrcReg = Compare.getOperand(0).getReg();
+ unsigned SrcReg2 =
+ Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : 0;
MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
for (++MBBI; MBBI != MBBE; ++MBBI)
if (MBBI->modifiesRegister(SrcReg, TRI) ||
(SrcReg2 && MBBI->modifiesRegister(SrcReg2, TRI)))
return false;
- // Read the branch mask and target.
+ // Read the branch mask, target (if applicable), regmask (if applicable).
MachineOperand CCMask(MBBI->getOperand(1));
- MachineOperand Target(MBBI->getOperand(2));
assert((CCMask.getImm() & ~SystemZ::CCMASK_ICMP) == 0 &&
"Invalid condition-code mask for integer comparison");
+ // This is only valid for CompareAndBranch.
+ MachineOperand Target(MBBI->getOperand(
+ Type == SystemZII::CompareAndBranch ? 2 : 0));
+ const uint32_t *RegMask;
+ if (Type == SystemZII::CompareAndSibcall)
+ RegMask = MBBI->getOperand(2).getRegMask();
// Clear out all current operands.
int CCUse = MBBI->findRegisterUseOperandIdx(SystemZ::CC, false, TRI);
- assert(CCUse >= 0 && "BRC must use CC");
+ assert(CCUse >= 0 && "BRC/BCR must use CC");
Branch->RemoveOperand(CCUse);
- Branch->RemoveOperand(2);
+ // Remove target (branch) or regmask (sibcall).
+ if (Type == SystemZII::CompareAndBranch ||
+ Type == SystemZII::CompareAndSibcall)
+ Branch->RemoveOperand(2);
Branch->RemoveOperand(1);
Branch->RemoveOperand(0);
// Rebuild Branch as a fused compare and branch.
Branch->setDesc(TII->get(FusedOpcode));
- MachineInstrBuilder(*Branch->getParent()->getParent(), Branch)
- .addOperand(Compare->getOperand(0))
- .addOperand(Compare->getOperand(1))
- .addOperand(CCMask)
- .addOperand(Target)
- .addReg(SystemZ::CC, RegState::ImplicitDefine);
+ MachineInstrBuilder MIB(*Branch->getParent()->getParent(), Branch);
+ MIB.addOperand(Compare.getOperand(0))
+ .addOperand(Compare.getOperand(1))
+ .addOperand(CCMask);
+
+ if (Type == SystemZII::CompareAndBranch) {
+ // Only conditional branches define CC, as they may be converted back
+ // to a non-fused branch because of a long displacement. Conditional
+ // returns don't have that problem.
+ MIB.addOperand(Target)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine | RegState::Dead);
+ }
+
+ if (Type == SystemZII::CompareAndSibcall)
+ MIB.addRegMask(RegMask);
// Clear any intervening kills of SrcReg and SrcReg2.
MBBI = Compare;
@@ -445,29 +475,31 @@ bool SystemZElimCompare::processBlock(MachineBasicBlock &MBB) {
SmallVector<MachineInstr *, 4> CCUsers;
MachineBasicBlock::iterator MBBI = MBB.end();
while (MBBI != MBB.begin()) {
- MachineInstr *MI = --MBBI;
- if (CompleteCCUsers &&
- (MI->isCompare() || isLoadAndTestAsCmp(MI)) &&
+ MachineInstr &MI = *--MBBI;
+ if (CompleteCCUsers && (MI.isCompare() || isLoadAndTestAsCmp(MI)) &&
(optimizeCompareZero(MI, CCUsers) ||
- fuseCompareAndBranch(MI, CCUsers))) {
+ fuseCompareOperations(MI, CCUsers))) {
++MBBI;
- MI->eraseFromParent();
+ MI.eraseFromParent();
Changed = true;
CCUsers.clear();
continue;
}
- if (MI->definesRegister(SystemZ::CC)) {
+ if (MI.definesRegister(SystemZ::CC)) {
CCUsers.clear();
CompleteCCUsers = true;
}
- if (MI->readsRegister(SystemZ::CC) && CompleteCCUsers)
- CCUsers.push_back(MI);
+ if (MI.readsRegister(SystemZ::CC) && CompleteCCUsers)
+ CCUsers.push_back(&MI);
}
return Changed;
}
bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) {
+ if (skipFunction(*F.getFunction()))
+ return false;
+
TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo());
TRI = &TII->getRegisterInfo();
diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp
index e1b20d0536d1..ccaed49475ca 100644
--- a/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -81,6 +81,12 @@ void SystemZFrameLowering::determineCalleeSaves(MachineFunction &MF,
for (unsigned I = MFI->getVarArgsFirstGPR(); I < SystemZ::NumArgGPRs; ++I)
SavedRegs.set(SystemZ::ArgGPRs[I]);
+ // If there are any landing pads, entering them will modify r6/r7.
+ if (!MF.getMMI().getLandingPads().empty()) {
+ SavedRegs.set(SystemZ::R6D);
+ SavedRegs.set(SystemZ::R7D);
+ }
+
// If the function requires a frame pointer, record that the hard
// frame pointer will be clobbered.
if (HasFP)
@@ -258,7 +264,8 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
// Do a second scan adding regs as being defined by instruction
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
unsigned Reg = CSI[I].getReg();
- if (Reg != LowGPR && Reg != HighGPR)
+ if (Reg != LowGPR && Reg != HighGPR &&
+ SystemZ::GR64BitRegClass.contains(Reg))
MIB.addReg(Reg, RegState::ImplicitDefine);
}
}
@@ -353,6 +360,15 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF,
uint64_t StackSize = getAllocatedStackSize(MF);
if (StackSize) {
+ // Determine if we want to store a backchain.
+ bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
+
+ // If we need backchain, save current stack pointer. R1 is free at this
+ // point.
+ if (StoreBackchain)
+ BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR))
+ .addReg(SystemZ::R1D, RegState::Define).addReg(SystemZ::R15D);
+
// Allocate StackSize bytes.
int64_t Delta = -int64_t(StackSize);
emitIncrement(MBB, MBBI, DL, SystemZ::R15D, Delta, ZII);
@@ -363,6 +379,10 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF,
BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
SPOffsetFromCFA += Delta;
+
+ if (StoreBackchain)
+ BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::STG))
+ .addReg(SystemZ::R1D, RegState::Kill).addReg(SystemZ::R15D).addImm(0).addReg(0);
}
if (HasFP) {
@@ -511,7 +531,7 @@ SystemZFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
return true;
}
-void SystemZFrameLowering::
+MachineBasicBlock::iterator SystemZFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
@@ -520,7 +540,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF,
case SystemZ::ADJCALLSTACKUP:
assert(hasReservedCallFrame(MF) &&
"ADJSTACKDOWN and ADJSTACKUP should be no-ops");
- MBB.erase(MI);
+ return MBB.erase(MI);
break;
default:
diff --git a/lib/Target/SystemZ/SystemZFrameLowering.h b/lib/Target/SystemZ/SystemZFrameLowering.h
index 46bb6b7a7573..d43a176ad874 100644
--- a/lib/Target/SystemZ/SystemZFrameLowering.h
+++ b/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -46,10 +46,9 @@ public:
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const
- override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
// Return the number of bytes in the callee-allocated part of the frame.
uint64_t getAllocatedStackSize(const MachineFunction &MF) const;
diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index a9093094d884..cd7fcc3070a4 100644
--- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -113,7 +113,8 @@ static uint64_t allOnes(unsigned int Count) {
// (and (rotl Input, Rotate), Mask)
//
// otherwise. The output value has BitSize bits, although Input may be
-// narrower (in which case the upper bits are don't care).
+// narrower (in which case the upper bits are don't care), or wider (in which
+// case the result will be truncated as part of the operation).
struct RxSBGOperands {
RxSBGOperands(unsigned Op, SDValue N)
: Opcode(Op), BitSize(N.getValueType().getSizeInBits()),
@@ -279,18 +280,18 @@ class SystemZDAGToDAGISel : public SelectionDAGISel {
bool expandRxSBG(RxSBGOperands &RxSBG) const;
// Return an undefined value of type VT.
- SDValue getUNDEF(SDLoc DL, EVT VT) const;
+ SDValue getUNDEF(const SDLoc &DL, EVT VT) const;
// Convert N to VT, if it isn't already.
- SDValue convertTo(SDLoc DL, EVT VT, SDValue N) const;
+ SDValue convertTo(const SDLoc &DL, EVT VT, SDValue N) const;
// Try to implement AND or shift node N using RISBG with the zero flag set.
// Return the selected node on success, otherwise return null.
- SDNode *tryRISBGZero(SDNode *N);
+ bool tryRISBGZero(SDNode *N);
// Try to use RISBG or Opcode to implement OR or XOR node N.
// Return the selected node on success, otherwise return null.
- SDNode *tryRxSBG(SDNode *N, unsigned Opcode);
+ bool tryRxSBG(SDNode *N, unsigned Opcode);
// If Op0 is null, then Node is a constant that can be loaded using:
//
@@ -299,14 +300,14 @@ class SystemZDAGToDAGISel : public SelectionDAGISel {
// If Op0 is nonnull, then Node can be implemented using:
//
// (Opcode (Opcode Op0 UpperVal) LowerVal)
- SDNode *splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0,
- uint64_t UpperVal, uint64_t LowerVal);
+ void splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0,
+ uint64_t UpperVal, uint64_t LowerVal);
// Try to use gather instruction Opcode to implement vector insertion N.
- SDNode *tryGather(SDNode *N, unsigned Opcode);
+ bool tryGather(SDNode *N, unsigned Opcode);
// Try to use scatter instruction Opcode to implement store Store.
- SDNode *tryScatter(StoreSDNode *Store, unsigned Opcode);
+ bool tryScatter(StoreSDNode *Store, unsigned Opcode);
// Return true if Load and Store are loads and stores of the same size
// and are guaranteed not to overlap. Such operations can be implemented
@@ -343,7 +344,7 @@ public:
}
// Override SelectionDAGISel.
- SDNode *Select(SDNode *Node) override;
+ void Select(SDNode *Node) override;
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
std::vector<SDValue> &OutOps) override;
@@ -554,6 +555,10 @@ bool SystemZDAGToDAGISel::selectAddress(SDValue Addr,
expandDisp(AM, true, SDValue(),
cast<ConstantSDNode>(Addr)->getSExtValue()))
;
+ // Also see if it's a bare ADJDYNALLOC.
+ else if (Addr.getOpcode() == SystemZISD::ADJDYNALLOC &&
+ expandAdjDynAlloc(AM, true, SDValue()))
+ ;
else
// Otherwise try expanding each component.
while (expandAddress(AM, true) ||
@@ -741,6 +746,16 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
SDValue N = RxSBG.Input;
unsigned Opcode = N.getOpcode();
switch (Opcode) {
+ case ISD::TRUNCATE: {
+ if (RxSBG.Opcode == SystemZ::RNSBG)
+ return false;
+ uint64_t BitSize = N.getValueType().getSizeInBits();
+ uint64_t Mask = allOnes(BitSize);
+ if (!refineRxSBGMask(RxSBG, Mask))
+ return false;
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
case ISD::AND: {
if (RxSBG.Opcode == SystemZ::RNSBG)
return false;
@@ -888,12 +903,13 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
}
}
-SDValue SystemZDAGToDAGISel::getUNDEF(SDLoc DL, EVT VT) const {
+SDValue SystemZDAGToDAGISel::getUNDEF(const SDLoc &DL, EVT VT) const {
SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
return SDValue(N, 0);
}
-SDValue SystemZDAGToDAGISel::convertTo(SDLoc DL, EVT VT, SDValue N) const {
+SDValue SystemZDAGToDAGISel::convertTo(const SDLoc &DL, EVT VT,
+ SDValue N) const {
if (N.getValueType() == MVT::i32 && VT == MVT::i64)
return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32,
DL, VT, getUNDEF(DL, MVT::i64), N);
@@ -903,23 +919,27 @@ SDValue SystemZDAGToDAGISel::convertTo(SDLoc DL, EVT VT, SDValue N) const {
return N;
}
-SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
+bool SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
if (!VT.isInteger() || VT.getSizeInBits() > 64)
- return nullptr;
+ return false;
RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
unsigned Count = 0;
while (expandRxSBG(RISBG))
- if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND)
+ // The widening or narrowing is expected to be free.
+ // Counting widening or narrowing as a saved operation will result in
+ // preferring an R*SBG over a simple shift/logical instruction.
+ if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND &&
+ RISBG.Input.getOpcode() != ISD::TRUNCATE)
Count += 1;
if (Count == 0)
- return nullptr;
+ return false;
if (Count == 1) {
// Prefer to use normal shift instructions over RISBG, since they can handle
// all cases and are sometimes shorter.
if (N->getOpcode() != ISD::AND)
- return nullptr;
+ return false;
// Prefer register extensions like LLC over RISBG. Also prefer to start
// out with normal ANDs if one instruction would be enough. We can convert
@@ -934,9 +954,10 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
if (MaskN->getZExtValue() != RISBG.Mask) {
SDValue NewMask = CurDAG->getConstant(RISBG.Mask, DL, VT);
N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask);
- return SelectCode(N);
+ SelectCode(N);
+ return true;
}
- return nullptr;
+ return false;
}
}
@@ -952,8 +973,11 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
}
SDValue In = convertTo(DL, VT, RISBG.Input);
- N = CurDAG->getMachineNode(OpCode, DL, VT, In);
- return convertTo(DL, VT, SDValue(N, 0)).getNode();
+ SDValue New = convertTo(
+ DL, VT, SDValue(CurDAG->getMachineNode(OpCode, DL, VT, In), 0));
+ ReplaceUses(N, New.getNode());
+ CurDAG->RemoveDeadNode(N);
+ return true;
}
unsigned Opcode = SystemZ::RISBG;
@@ -974,15 +998,18 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
CurDAG->getTargetConstant(RISBG.End | 128, DL, MVT::i32),
CurDAG->getTargetConstant(RISBG.Rotate, DL, MVT::i32)
};
- N = CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops);
- return convertTo(DL, VT, SDValue(N, 0)).getNode();
+ SDValue New = convertTo(
+ DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops), 0));
+ ReplaceUses(N, New.getNode());
+ CurDAG->RemoveDeadNode(N);
+ return true;
}
-SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
+bool SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
if (!VT.isInteger() || VT.getSizeInBits() > 64)
- return nullptr;
+ return false;
// Try treating each operand of N as the second operand of the RxSBG
// and see which goes deepest.
RxSBGOperands RxSBG[] = {
@@ -992,12 +1019,16 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
unsigned Count[] = { 0, 0 };
for (unsigned I = 0; I < 2; ++I)
while (expandRxSBG(RxSBG[I]))
- if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND)
+ // The widening or narrowing is expected to be free.
+ // Counting widening or narrowing as a saved operation will result in
+ // preferring an R*SBG over a simple shift/logical instruction.
+ if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND &&
+ RxSBG[I].Input.getOpcode() != ISD::TRUNCATE)
Count[I] += 1;
// Do nothing if neither operand is suitable.
if (Count[0] == 0 && Count[1] == 0)
- return nullptr;
+ return false;
// Pick the deepest second operand.
unsigned I = Count[0] > Count[1] ? 0 : 1;
@@ -1007,7 +1038,7 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
if (Load->getMemoryVT() == MVT::i8)
- return nullptr;
+ return false;
// See whether we can avoid an AND in the first operand by converting
// ROSBG to RISBG.
@@ -1025,47 +1056,70 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
CurDAG->getTargetConstant(RxSBG[I].End, DL, MVT::i32),
CurDAG->getTargetConstant(RxSBG[I].Rotate, DL, MVT::i32)
};
- N = CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops);
- return convertTo(DL, VT, SDValue(N, 0)).getNode();
+ SDValue New = convertTo(
+ DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops), 0));
+ ReplaceNode(N, New.getNode());
+ return true;
}
-SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
- SDValue Op0, uint64_t UpperVal,
- uint64_t LowerVal) {
+void SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
+ SDValue Op0, uint64_t UpperVal,
+ uint64_t LowerVal) {
EVT VT = Node->getValueType(0);
SDLoc DL(Node);
SDValue Upper = CurDAG->getConstant(UpperVal, DL, VT);
if (Op0.getNode())
Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper);
- Upper = SDValue(Select(Upper.getNode()), 0);
+
+ {
+ // When we haven't passed in Op0, Upper will be a constant. In order to
+ // prevent folding back to the large immediate in `Or = getNode(...)` we run
+ // SelectCode first and end up with an opaque machine node. This means that
+ // we need to use a handle to keep track of Upper in case it gets CSE'd by
+ // SelectCode.
+ //
+ // Note that in the case where Op0 is passed in we could just call
+ // SelectCode(Upper) later, along with the SelectCode(Or), and avoid needing
+ // the handle at all, but it's fine to do it here.
+ //
+ // TODO: This is a pretty hacky way to do this. Can we do something that
+ // doesn't require a two paragraph explanation?
+ HandleSDNode Handle(Upper);
+ SelectCode(Upper.getNode());
+ Upper = Handle.getValue();
+ }
SDValue Lower = CurDAG->getConstant(LowerVal, DL, VT);
SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower);
- return Or.getNode();
+
+ ReplaceUses(Node, Or.getNode());
+ CurDAG->RemoveDeadNode(Node);
+
+ SelectCode(Or.getNode());
}
-SDNode *SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) {
+bool SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) {
SDValue ElemV = N->getOperand(2);
auto *ElemN = dyn_cast<ConstantSDNode>(ElemV);
if (!ElemN)
- return 0;
+ return false;
unsigned Elem = ElemN->getZExtValue();
EVT VT = N->getValueType(0);
if (Elem >= VT.getVectorNumElements())
- return 0;
+ return false;
auto *Load = dyn_cast<LoadSDNode>(N->getOperand(1));
if (!Load || !Load->hasOneUse())
- return 0;
+ return false;
if (Load->getMemoryVT().getSizeInBits() !=
Load->getValueType(0).getSizeInBits())
- return 0;
+ return false;
SDValue Base, Disp, Index;
if (!selectBDVAddr12Only(Load->getBasePtr(), ElemV, Base, Disp, Index) ||
Index.getValueType() != VT.changeVectorElementTypeToInteger())
- return 0;
+ return false;
SDLoc DL(Load);
SDValue Ops[] = {
@@ -1074,39 +1128,41 @@ SDNode *SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) {
};
SDNode *Res = CurDAG->getMachineNode(Opcode, DL, VT, MVT::Other, Ops);
ReplaceUses(SDValue(Load, 1), SDValue(Res, 1));
- return Res;
+ ReplaceNode(N, Res);
+ return true;
}
-SDNode *SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) {
+bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) {
SDValue Value = Store->getValue();
if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- return 0;
+ return false;
if (Store->getMemoryVT().getSizeInBits() !=
Value.getValueType().getSizeInBits())
- return 0;
+ return false;
SDValue ElemV = Value.getOperand(1);
auto *ElemN = dyn_cast<ConstantSDNode>(ElemV);
if (!ElemN)
- return 0;
+ return false;
SDValue Vec = Value.getOperand(0);
EVT VT = Vec.getValueType();
unsigned Elem = ElemN->getZExtValue();
if (Elem >= VT.getVectorNumElements())
- return 0;
+ return false;
SDValue Base, Disp, Index;
if (!selectBDVAddr12Only(Store->getBasePtr(), ElemV, Base, Disp, Index) ||
Index.getValueType() != VT.changeVectorElementTypeToInteger())
- return 0;
+ return false;
SDLoc DL(Store);
SDValue Ops[] = {
Vec, Base, Disp, Index, CurDAG->getTargetConstant(Elem, DL, MVT::i32),
Store->getChain()
};
- return CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops);
+ ReplaceNode(Store, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
+ return true;
}
bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
@@ -1167,7 +1223,7 @@ bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB);
}
-SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
+void SystemZDAGToDAGISel::Select(SDNode *Node) {
// Dump information about the Node being selected
DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n");
@@ -1175,43 +1231,47 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return nullptr;
+ return;
}
unsigned Opcode = Node->getOpcode();
- SDNode *ResNode = nullptr;
switch (Opcode) {
case ISD::OR:
if (Node->getOperand(1).getOpcode() != ISD::Constant)
- ResNode = tryRxSBG(Node, SystemZ::ROSBG);
+ if (tryRxSBG(Node, SystemZ::ROSBG))
+ return;
goto or_xor;
case ISD::XOR:
if (Node->getOperand(1).getOpcode() != ISD::Constant)
- ResNode = tryRxSBG(Node, SystemZ::RXSBG);
+ if (tryRxSBG(Node, SystemZ::RXSBG))
+ return;
// Fall through.
or_xor:
// If this is a 64-bit operation in which both 32-bit halves are nonzero,
// split the operation into two.
- if (!ResNode && Node->getValueType(0) == MVT::i64)
+ if (Node->getValueType(0) == MVT::i64)
if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
uint64_t Val = Op1->getZExtValue();
- if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val))
- Node = splitLargeImmediate(Opcode, Node, Node->getOperand(0),
- Val - uint32_t(Val), uint32_t(Val));
+ if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) {
+ splitLargeImmediate(Opcode, Node, Node->getOperand(0),
+ Val - uint32_t(Val), uint32_t(Val));
+ return;
+ }
}
break;
case ISD::AND:
if (Node->getOperand(1).getOpcode() != ISD::Constant)
- ResNode = tryRxSBG(Node, SystemZ::RNSBG);
+ if (tryRxSBG(Node, SystemZ::RNSBG))
+ return;
// Fall through.
case ISD::ROTL:
case ISD::SHL:
case ISD::SRL:
case ISD::ZERO_EXTEND:
- if (!ResNode)
- ResNode = tryRISBGZero(Node);
+ if (tryRISBGZero(Node))
+ return;
break;
case ISD::Constant:
@@ -1219,9 +1279,11 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
// LLIHF and LGFI, split it into two 32-bit pieces.
if (Node->getValueType(0) == MVT::i64) {
uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue();
- if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val))
- Node = splitLargeImmediate(ISD::OR, Node, SDValue(),
- Val - uint32_t(Val), uint32_t(Val));
+ if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) {
+ splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val),
+ uint32_t(Val));
+ return;
+ }
}
break;
@@ -1249,63 +1311,75 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
case ISD::INSERT_VECTOR_ELT: {
EVT VT = Node->getValueType(0);
unsigned ElemBitSize = VT.getVectorElementType().getSizeInBits();
- if (ElemBitSize == 32)
- ResNode = tryGather(Node, SystemZ::VGEF);
- else if (ElemBitSize == 64)
- ResNode = tryGather(Node, SystemZ::VGEG);
+ if (ElemBitSize == 32) {
+ if (tryGather(Node, SystemZ::VGEF))
+ return;
+ } else if (ElemBitSize == 64) {
+ if (tryGather(Node, SystemZ::VGEG))
+ return;
+ }
break;
}
case ISD::STORE: {
auto *Store = cast<StoreSDNode>(Node);
unsigned ElemBitSize = Store->getValue().getValueType().getSizeInBits();
- if (ElemBitSize == 32)
- ResNode = tryScatter(Store, SystemZ::VSCEF);
- else if (ElemBitSize == 64)
- ResNode = tryScatter(Store, SystemZ::VSCEG);
+ if (ElemBitSize == 32) {
+ if (tryScatter(Store, SystemZ::VSCEF))
+ return;
+ } else if (ElemBitSize == 64) {
+ if (tryScatter(Store, SystemZ::VSCEG))
+ return;
+ }
break;
}
}
- // Select the default instruction
- if (!ResNode)
- ResNode = SelectCode(Node);
-
- DEBUG(errs() << "=> ";
- if (ResNode == nullptr || ResNode == Node)
- Node->dump(CurDAG);
- else
- ResNode->dump(CurDAG);
- errs() << "\n";
- );
- return ResNode;
+ SelectCode(Node);
}
bool SystemZDAGToDAGISel::
SelectInlineAsmMemoryOperand(const SDValue &Op,
unsigned ConstraintID,
std::vector<SDValue> &OutOps) {
+ SystemZAddressingMode::AddrForm Form;
+ SystemZAddressingMode::DispRange DispRange;
+ SDValue Base, Disp, Index;
+
switch(ConstraintID) {
default:
llvm_unreachable("Unexpected asm memory constraint");
case InlineAsm::Constraint_i:
- case InlineAsm::Constraint_m:
case InlineAsm::Constraint_Q:
+ // Accept an address with a short displacement, but no index.
+ Form = SystemZAddressingMode::FormBD;
+ DispRange = SystemZAddressingMode::Disp12Only;
+ break;
case InlineAsm::Constraint_R:
+ // Accept an address with a short displacement and an index.
+ Form = SystemZAddressingMode::FormBDXNormal;
+ DispRange = SystemZAddressingMode::Disp12Only;
+ break;
case InlineAsm::Constraint_S:
+ // Accept an address with a long displacement, but no index.
+ Form = SystemZAddressingMode::FormBD;
+ DispRange = SystemZAddressingMode::Disp20Only;
+ break;
case InlineAsm::Constraint_T:
- // Accept addresses with short displacements, which are compatible
- // with Q, R, S and T. But keep the index operand for future expansion.
- SDValue Base, Disp, Index;
- if (selectBDXAddr(SystemZAddressingMode::FormBD,
- SystemZAddressingMode::Disp12Only,
- Op, Base, Disp, Index)) {
- OutOps.push_back(Base);
- OutOps.push_back(Disp);
- OutOps.push_back(Index);
- return false;
- }
+ case InlineAsm::Constraint_m:
+ // Accept an address with a long displacement and an index.
+ // m works the same as T, as this is the most general case.
+ Form = SystemZAddressingMode::FormBDXNormal;
+ DispRange = SystemZAddressingMode::Disp20Only;
break;
}
+
+ if (selectBDXAddr(Form, DispRange, Op, Base, Disp, Index)) {
+ OutOps.push_back(Base);
+ OutOps.push_back(Disp);
+ OutOps.push_back(Index);
+ return false;
+ }
+
return true;
}
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index b0a612764636..14991bbbd365 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -184,8 +184,6 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
// No special instructions for these.
setOperationAction(ISD::CTTZ, VT, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
// Use *MUL_LOHI where possible instead of MULH*.
@@ -216,6 +214,11 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+
+ // Traps are legal, as we will convert them to "j .+2".
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
// z10 has instructions for signed but not unsigned FP conversion.
// Handle unsigned 32-bit types as signed 64-bit types.
if (!Subtarget.hasFPExtension()) {
@@ -253,6 +256,7 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
// We need to handle dynamic allocations specially because of the
// 160-byte area at the bottom of the stack.
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
+ setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
// Use custom expanders so that we can force the function to use
// a frame pointer.
@@ -310,8 +314,6 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::CTTZ, VT, Legal);
setOperationAction(ISD::CTLZ, VT, Legal);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
// Convert a GPR scalar to a vector by inserting it into element 0.
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
@@ -437,6 +439,11 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::FP_ROUND);
+ setTargetDAGCombine(ISD::BSWAP);
+ setTargetDAGCombine(ISD::SHL);
+ setTargetDAGCombine(ISD::SRA);
+ setTargetDAGCombine(ISD::SRL);
+ setTargetDAGCombine(ISD::ROTL);
// Handle intrinsics.
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
@@ -799,7 +806,7 @@ static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
// Value is a value that has been passed to us in the location described by VA
// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
// any loads onto Chain.
-static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
+static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
CCValAssign &VA, SDValue Chain,
SDValue Value) {
// If the argument has been promoted from a smaller type, insert an
@@ -813,16 +820,12 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
if (VA.isExtInLoc())
Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
- else if (VA.getLocInfo() == CCValAssign::Indirect)
- Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value,
- MachinePointerInfo(), false, false, false, 0);
else if (VA.getLocInfo() == CCValAssign::BCvt) {
// If this is a short vector argument loaded from the stack,
// extend from i64 to full vector size and then bitcast.
assert(VA.getLocVT() == MVT::i64);
assert(VA.getValVT().isVector());
- Value = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i64,
- Value, DAG.getUNDEF(MVT::i64));
+ Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
} else
assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
@@ -832,7 +835,7 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
// Value is a value of type VA.getValVT() that we need to copy into
// the location described by VA. Return a copy of Value converted to
// VA.getValVT(). The caller is responsible for handling indirect values.
-static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL,
+static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
CCValAssign &VA, SDValue Value) {
switch (VA.getLocInfo()) {
case CCValAssign::SExt:
@@ -856,11 +859,10 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL,
}
}
-SDValue SystemZTargetLowering::
-LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue SystemZTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -868,6 +870,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
MF.getInfo<SystemZMachineFunctionInfo>();
auto *TFL =
static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
// Detect unsupported vector argument types.
if (Subtarget.hasVector())
@@ -930,19 +933,34 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
// Create the SelectionDAG nodes corresponding to a load
// from this parameter. Unpromoted ints and floats are
// passed as right-justified 8-byte values.
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
DAG.getIntPtrConstant(4, DL));
ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
- MachinePointerInfo::getFixedStack(MF, FI), false,
- false, false, 0);
+ MachinePointerInfo::getFixedStack(MF, FI));
}
// Convert the value of the argument register into the value that's
// being passed.
- InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
+ if (VA.getLocInfo() == CCValAssign::Indirect) {
+ InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
+ MachinePointerInfo()));
+ // If the original argument was split (e.g. i128), we need
+ // to load all parts of it here (using the same address).
+ unsigned ArgIndex = Ins[I].OrigArgIndex;
+ assert (Ins[I].PartOffset == 0);
+ while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
+ CCValAssign &PartVA = ArgLocs[I + 1];
+ unsigned PartOffset = Ins[I + 1].PartOffset;
+ SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
+ DAG.getIntPtrConstant(PartOffset, DL));
+ InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
+ MachinePointerInfo()));
+ ++I;
+ }
+ } else
+ InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
}
if (IsVarArg) {
@@ -973,8 +991,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
&SystemZ::FP64BitRegClass);
SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
- MachinePointerInfo::getFixedStack(MF, FI),
- false, false, 0);
+ MachinePointerInfo::getFixedStack(MF, FI));
}
// Join the stores, which are independent of one another.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
@@ -987,9 +1004,11 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
}
static bool canUseSiblingCall(const CCState &ArgCCInfo,
- SmallVectorImpl<CCValAssign> &ArgLocs) {
+ SmallVectorImpl<CCValAssign> &ArgLocs,
+ SmallVectorImpl<ISD::OutputArg> &Outs) {
// Punt if there are any indirect or stack arguments, or if the call
- // needs the call-saved argument register R6.
+ // needs the callee-saved argument register R6, or if the call uses
+ // the callee-saved register arguments SwiftSelf and SwiftError.
for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
CCValAssign &VA = ArgLocs[I];
if (VA.getLocInfo() == CCValAssign::Indirect)
@@ -999,6 +1018,8 @@ static bool canUseSiblingCall(const CCState &ArgCCInfo,
unsigned Reg = VA.getLocReg();
if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
return false;
+ if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
+ return false;
}
return true;
}
@@ -1032,7 +1053,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// We don't support GuaranteedTailCallOpt, only automatically-detected
// sibling calls.
- if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs))
+ if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
IsTailCall = false;
// Get a count of how many bytes are to be pushed on the stack.
@@ -1054,11 +1075,25 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
if (VA.getLocInfo() == CCValAssign::Indirect) {
// Store the argument in a stack slot and pass its address.
- SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
+ SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
- MemOpChains.push_back(DAG.getStore(
- Chain, DL, ArgValue, SpillSlot,
- MachinePointerInfo::getFixedStack(MF, FI), false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, ArgValue, SpillSlot,
+ MachinePointerInfo::getFixedStack(MF, FI)));
+ // If the original argument was split (e.g. i128), we need
+ // to store all parts of it here (and pass just one address).
+ unsigned ArgIndex = Outs[I].OrigArgIndex;
+ assert (Outs[I].PartOffset == 0);
+ while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
+ SDValue PartValue = OutVals[I + 1];
+ unsigned PartOffset = Outs[I + 1].PartOffset;
+ SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
+ DAG.getIntPtrConstant(PartOffset, DL));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, PartValue, Address,
+ MachinePointerInfo::getFixedStack(MF, FI)));
+ ++I;
+ }
ArgValue = SpillSlot;
} else
ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
@@ -1080,9 +1115,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
DAG.getIntPtrConstant(Offset, DL));
// Emit the store.
- MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address,
- MachinePointerInfo(),
- false, false, 0));
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
}
}
@@ -1180,17 +1214,23 @@ CanLowerReturn(CallingConv::ID CallConv,
if (Subtarget.hasVector())
VerifyVectorTypes(Outs);
+ // Special case that we cannot easily detect in RetCC_SystemZ since
+ // i128 is not a legal type.
+ for (auto &Out : Outs)
+ if (Out.ArgVT == MVT::i128)
+ return false;
+
SmallVector<CCValAssign, 16> RetLocs;
CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
}
SDValue
-SystemZTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool IsVarArg,
+SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const {
+ const SDLoc &DL, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
// Detect unsupported vector return types.
@@ -1235,8 +1275,8 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
}
-SDValue SystemZTargetLowering::
-prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const {
+SDValue SystemZTargetLowering::prepareVolatileOrAtomicLoad(
+ SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const {
return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain);
}
@@ -1399,6 +1439,11 @@ static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
CCValid = SystemZ::CCMASK_VCMP;
return true;
+ case Intrinsic::s390_tdc:
+ Opcode = SystemZISD::TDC;
+ CCValid = SystemZ::CCMASK_TDC;
+ return true;
+
default:
return false;
}
@@ -1538,7 +1583,7 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
// If C can be converted to a comparison against zero, adjust the operands
// as necessary.
-static void adjustZeroCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
+static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
if (C.ICmpType == SystemZICMP::UnsignedOnly)
return;
@@ -1558,7 +1603,8 @@ static void adjustZeroCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
// adjust the operands as necessary.
-static void adjustSubwordCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
+static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
+ Comparison &C) {
// For us to make any changes, it must a comparison between a single-use
// load and a constant.
if (!C.Op0.hasOneUse() ||
@@ -1614,11 +1660,10 @@ static void adjustSubwordCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
ISD::ZEXTLOAD);
if (C.Op0.getValueType() != MVT::i32 ||
Load->getExtensionType() != ExtType)
- C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
- Load->getChain(), Load->getBasePtr(),
- Load->getPointerInfo(), Load->getMemoryVT(),
- Load->isVolatile(), Load->isNonTemporal(),
- Load->isInvariant(), Load->getAlignment());
+ C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
+ Load->getBasePtr(), Load->getPointerInfo(),
+ Load->getMemoryVT(), Load->getAlignment(),
+ Load->getMemOperand()->getFlags());
// Make sure that the second operand is an i32 with the right value.
if (C.Op1.getValueType() != MVT::i32 ||
@@ -1719,7 +1764,8 @@ static unsigned reverseCCMask(unsigned CCMask) {
// Check whether C tests for equality between X and Y and whether X - Y
// or Y - X is also computed. In that case it's better to compare the
// result of the subtraction against zero.
-static void adjustForSubtraction(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
+static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
+ Comparison &C) {
if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
C.CCMask == SystemZ::CCMASK_CMP_NE) {
for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
@@ -1784,7 +1830,8 @@ static void adjustForLTGFR(Comparison &C) {
// If C compares the truncation of an extending load, try to compare
// the untruncated value instead. This exposes more opportunities to
// reuse CC.
-static void adjustICmpTruncate(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
+static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
+ Comparison &C) {
if (C.Op0.getOpcode() == ISD::TRUNCATE &&
C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
C.Op1.getOpcode() == ISD::Constant &&
@@ -1915,7 +1962,8 @@ static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
// See whether C can be implemented as a TEST UNDER MASK instruction.
// Update the arguments with the TM version if so.
-static void adjustForTestUnderMask(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
+static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
+ Comparison &C) {
// Check that we have a comparison with a constant.
auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
if (!ConstOp1)
@@ -2036,7 +2084,7 @@ static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
- ISD::CondCode Cond, SDLoc DL) {
+ ISD::CondCode Cond, const SDLoc &DL) {
if (CmpOp1.getOpcode() == ISD::Constant) {
uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
unsigned Opcode, CCValid;
@@ -2089,7 +2137,7 @@ static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
}
// Emit the comparison instruction described by C.
-static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
+static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
if (!C.Op1.getNode()) {
SDValue Op;
switch (C.Op0.getOpcode()) {
@@ -2119,9 +2167,9 @@ static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
// Implement a 32-bit *MUL_LOHI operation by extending both operands to
// 64 bits. Extend is the extension type to use. Store the high part
// in Hi and the low part in Lo.
-static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL,
- unsigned Extend, SDValue Op0, SDValue Op1,
- SDValue &Hi, SDValue &Lo) {
+static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
+ SDValue Op0, SDValue Op1, SDValue &Hi,
+ SDValue &Lo) {
Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
@@ -2136,10 +2184,9 @@ static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL,
// Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
// on the extended Op0 and (unextended) Op1. Store the even register result
// in Even and the odd register result in Odd.
-static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
- unsigned Extend, unsigned Opcode,
- SDValue Op0, SDValue Op1,
- SDValue &Even, SDValue &Odd) {
+static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
+ unsigned Extend, unsigned Opcode, SDValue Op0,
+ SDValue Op1, SDValue &Even, SDValue &Odd) {
SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0);
SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
SDValue(In128, 0), Op1);
@@ -2151,7 +2198,7 @@ static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
// Return an i32 value that is 1 if the CC value produced by Glue is
// in the mask CCMask and 0 otherwise. CC is known to have a value
// in CCValid, so other values can be ignored.
-static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue,
+static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue Glue,
unsigned CCValid, unsigned CCMask) {
IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
@@ -2220,7 +2267,7 @@ static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP,
// Return a v2f64 that contains the extended form of elements Start and Start+1
// of v4f32 value Op.
-static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, SDLoc DL,
+static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
SDValue Op) {
int Mask[] = { Start, -1, Start + 1, -1 };
Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
@@ -2229,7 +2276,7 @@ static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, SDLoc DL,
// Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
// producing a result of type VT.
-static SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, SDLoc DL,
+static SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &DL,
EVT VT, SDValue CmpOp0, SDValue CmpOp1) {
// There is no hardware support for v4f32, so extend the vector into
// two v2f64s and compare those.
@@ -2247,7 +2294,7 @@ static SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, SDLoc DL,
// Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
// an integer mask of type VT.
-static SDValue lowerVectorSETCC(SelectionDAG &DAG, SDLoc DL, EVT VT,
+static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
ISD::CondCode CC, SDValue CmpOp0,
SDValue CmpOp1) {
bool IsFP = CmpOp0.getValueType().isFloatingPoint();
@@ -2342,7 +2389,7 @@ static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
}
// Return the absolute or negative absolute of Op; IsNegative decides which.
-static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op,
+static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
bool IsNegative) {
Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
if (IsNegative)
@@ -2414,11 +2461,10 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
const GlobalValue *GV = Node->getGlobal();
int64_t Offset = Node->getOffset();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
- Reloc::Model RM = DAG.getTarget().getRelocationModel();
CodeModel::Model CM = DAG.getTarget().getCodeModel();
SDValue Result;
- if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
+ if (Subtarget.isPC32DBLSymbol(GV, CM)) {
// Assign anchors at 1<<12 byte boundaries.
uint64_t Anchor = Offset & ~uint64_t(0xfff);
Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
@@ -2435,8 +2481,7 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
// If there was a non-zero offset that we didn't fold, create an explicit
@@ -2495,14 +2540,9 @@ SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
}
-SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
- SelectionDAG &DAG) const {
- if (DAG.getTarget().Options.EmulatedTLS)
- return LowerToTLSEmulatedModel(Node, DAG);
- SDLoc DL(Node);
- const GlobalValue *GV = Node->getGlobal();
+SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
+ SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
- TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
// The high part of the thread pointer is in access register 0.
SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
@@ -2517,7 +2557,19 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
// Merge them into a single 64-bit address.
SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
DAG.getConstant(32, DL, PtrVT));
- SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
+ return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
+}
+
+SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
+ SelectionDAG &DAG) const {
+ if (DAG.getTarget().Options.EmulatedTLS)
+ return LowerToTLSEmulatedModel(Node, DAG);
+ SDLoc DL(Node);
+ const GlobalValue *GV = Node->getGlobal();
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
+
+ SDValue TP = lowerThreadPointer(DL, DAG);
// Get the offset of GA from the thread pointer, based on the TLS model.
SDValue Offset;
@@ -2530,8 +2582,7 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
Offset = DAG.getConstantPool(CPV, PtrVT, 8);
Offset = DAG.getLoad(
PtrVT, DL, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
// Call __tls_get_offset to retrieve the offset.
Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
@@ -2546,8 +2597,7 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
Offset = DAG.getConstantPool(CPV, PtrVT, 8);
Offset = DAG.getLoad(
PtrVT, DL, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
// Call __tls_get_offset to retrieve the module base offset.
Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
@@ -2565,8 +2615,7 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
DTPOffset = DAG.getLoad(
PtrVT, DL, DAG.getEntryNode(), DTPOffset,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
break;
@@ -2577,9 +2626,9 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
SystemZII::MO_INDNTPOFF);
Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
- Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ Offset =
+ DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
break;
}
@@ -2591,8 +2640,7 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
Offset = DAG.getConstantPool(CPV, PtrVT, 8);
Offset = DAG.getLoad(
PtrVT, DL, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, 0);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
break;
}
}
@@ -2640,6 +2688,57 @@ SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
}
+SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ SDLoc DL(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+
+ // If the back chain frame index has not been allocated yet, do so.
+ SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>();
+ int BackChainIdx = FI->getFramePointerSaveIndex();
+ if (!BackChainIdx) {
+ // By definition, the frame address is the address of the back chain.
+ BackChainIdx = MFI->CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
+ FI->setFramePointerSaveIndex(BackChainIdx);
+ }
+ SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
+
+ // FIXME The frontend should detect this case.
+ if (Depth > 0) {
+ report_fatal_error("Unsupported stack frame traversal count");
+ }
+
+ return BackChain;
+}
+
+SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
+ if (verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
+ SDLoc DL(Op);
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+
+ // FIXME The frontend should detect this case.
+ if (Depth > 0) {
+ report_fatal_error("Unsupported stack frame traversal count");
+ }
+
+ // Return R14D, which has the return address. Mark it an implicit live-in.
+ unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
+ return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
+}
+
SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
@@ -2715,8 +2814,7 @@ SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
DAG.getIntPtrConstant(Offset, DL));
MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
- MachinePointerInfo(SV, Offset),
- false, false, 0);
+ MachinePointerInfo(SV, Offset));
Offset += 8;
}
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
@@ -2740,8 +2838,9 @@ SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
SDValue SystemZTargetLowering::
lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
- bool RealignOpt = !DAG.getMachineFunction().getFunction()->
- hasFnAttribute("no-realign-stack");
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack");
+ bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
@@ -2763,10 +2862,15 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
// Get a reference to the stack pointer.
SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
+ // If we need a backchain, save it now.
+ SDValue Backchain;
+ if (StoreBackchain)
+ Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
+
// Add extra space for alignment if needed.
if (ExtraAlignSpace)
NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
- DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
+ DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
// Get the new stack pointer value.
SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
@@ -2790,10 +2894,20 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
}
+ if (StoreBackchain)
+ Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
+
SDValue Ops[2] = { Result, Chain };
return DAG.getMergeValues(Ops, DL);
}
+SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
+ SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+
+ return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
+}
+
SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
@@ -3031,6 +3145,27 @@ SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
return Op;
}
+SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
+ cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
+ SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
+ cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
+
+ // The only fence that needs an instruction is a sequentially-consistent
+ // cross-thread fence.
+ if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
+ FenceScope == CrossThread) {
+ return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
+ Op.getOperand(0)),
+ 0);
+ }
+
+ // MEMBARRIER is a compiler barrier; it codegens to a no-op.
+ return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
+}
+
// Op is an atomic load. Lower it into a normal volatile load.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
SelectionDAG &DAG) const {
@@ -3220,8 +3355,24 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
- return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op),
- SystemZ::R15D, Op.getOperand(1));
+ bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
+
+ SDValue Chain = Op.getOperand(0);
+ SDValue NewSP = Op.getOperand(1);
+ SDValue Backchain;
+ SDLoc DL(Op);
+
+ if (StoreBackchain) {
+ SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
+ Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
+ }
+
+ Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
+
+ if (StoreBackchain)
+ Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
+
+ return Chain;
}
SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
@@ -3286,6 +3437,9 @@ SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (Id) {
+ case Intrinsic::thread_pointer:
+ return lowerThreadPointer(SDLoc(Op), DAG);
+
case Intrinsic::s390_vpdi:
return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
@@ -3553,7 +3707,7 @@ static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
// Create a node that performs P on operands Op0 and Op1, casting the
// operands to the appropriate type. The type of the result is determined by P.
-static SDValue getPermuteNode(SelectionDAG &DAG, SDLoc DL,
+static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
const Permute &P, SDValue Op0, SDValue Op1) {
// VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
// elements of a PACK are twice as wide as the outputs.
@@ -3582,7 +3736,8 @@ static SDValue getPermuteNode(SelectionDAG &DAG, SDLoc DL,
// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes. Implement it on operands Ops[0] and Ops[1] using
// VSLDI or VPERM.
-static SDValue getGeneralPermuteNode(SelectionDAG &DAG, SDLoc DL, SDValue *Ops,
+static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
+ SDValue *Ops,
const SmallVectorImpl<int> &Bytes) {
for (unsigned I = 0; I < 2; ++I)
Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
@@ -3600,7 +3755,7 @@ static SDValue getGeneralPermuteNode(SelectionDAG &DAG, SDLoc DL, SDValue *Ops,
IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
else
IndexNodes[I] = DAG.getUNDEF(MVT::i32);
- SDValue Op2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, IndexNodes);
+ SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
}
@@ -3610,7 +3765,7 @@ struct GeneralShuffle {
GeneralShuffle(EVT vt) : VT(vt) {}
void addUndef();
void add(SDValue, unsigned);
- SDValue getNode(SelectionDAG &, SDLoc);
+ SDValue getNode(SelectionDAG &, const SDLoc &);
// The operands of the shuffle.
SmallVector<SDValue, SystemZ::VectorBytes> Ops;
@@ -3667,7 +3822,7 @@ void GeneralShuffle::add(SDValue Op, unsigned Elem) {
}
Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
Byte = unsigned(NewByte) % SystemZ::VectorBytes;
- } else if (Op.getOpcode() == ISD::UNDEF) {
+ } else if (Op.isUndef()) {
addUndef();
return;
} else
@@ -3689,7 +3844,7 @@ void GeneralShuffle::add(SDValue Op, unsigned Elem) {
}
// Return SDNodes for the completed shuffle.
-SDValue GeneralShuffle::getNode(SelectionDAG &DAG, SDLoc DL) {
+SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector");
if (Ops.size() == 0)
@@ -3770,37 +3925,37 @@ SDValue GeneralShuffle::getNode(SelectionDAG &DAG, SDLoc DL) {
// Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
static bool isScalarToVector(SDValue Op) {
for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
- if (Op.getOperand(I).getOpcode() != ISD::UNDEF)
+ if (!Op.getOperand(I).isUndef())
return false;
return true;
}
// Return a vector of type VT that contains Value in the first element.
// The other elements don't matter.
-static SDValue buildScalarToVector(SelectionDAG &DAG, SDLoc DL, EVT VT,
+static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
SDValue Value) {
// If we have a constant, replicate it to all elements and let the
// BUILD_VECTOR lowering take care of it.
if (Value.getOpcode() == ISD::Constant ||
Value.getOpcode() == ISD::ConstantFP) {
SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
- return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
+ return DAG.getBuildVector(VT, DL, Ops);
}
- if (Value.getOpcode() == ISD::UNDEF)
+ if (Value.isUndef())
return DAG.getUNDEF(VT);
return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
}
// Return a vector of type VT in which Op0 is in element 0 and Op1 is in
// element 1. Used for cases in which replication is cheap.
-static SDValue buildMergeScalars(SelectionDAG &DAG, SDLoc DL, EVT VT,
+static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
SDValue Op0, SDValue Op1) {
- if (Op0.getOpcode() == ISD::UNDEF) {
- if (Op1.getOpcode() == ISD::UNDEF)
+ if (Op0.isUndef()) {
+ if (Op1.isUndef())
return DAG.getUNDEF(VT);
return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
}
- if (Op1.getOpcode() == ISD::UNDEF)
+ if (Op1.isUndef())
return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
buildScalarToVector(DAG, DL, VT, Op0),
@@ -3809,15 +3964,15 @@ static SDValue buildMergeScalars(SelectionDAG &DAG, SDLoc DL, EVT VT,
// Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
// vector for them.
-static SDValue joinDwords(SelectionDAG &DAG, SDLoc DL, SDValue Op0,
+static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
SDValue Op1) {
- if (Op0.getOpcode() == ISD::UNDEF && Op1.getOpcode() == ISD::UNDEF)
+ if (Op0.isUndef() && Op1.isUndef())
return DAG.getUNDEF(MVT::v2i64);
// If one of the two inputs is undefined then replicate the other one,
// in order to avoid using another register unnecessarily.
- if (Op0.getOpcode() == ISD::UNDEF)
+ if (Op0.isUndef())
Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
- else if (Op1.getOpcode() == ISD::UNDEF)
+ else if (Op1.isUndef())
Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
else {
Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
@@ -3834,7 +3989,7 @@ static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) {
unsigned BytesPerElement = ElemVT.getStoreSize();
for (unsigned I = 0, E = BVN->getNumOperands(); I != E; ++I) {
SDValue Op = BVN->getOperand(I);
- if (Op.getOpcode() != ISD::UNDEF) {
+ if (!Op.isUndef()) {
uint64_t Value;
if (Op.getOpcode() == ISD::Constant)
Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue();
@@ -3862,7 +4017,7 @@ static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) {
// an empty value.
static SDValue tryBuildVectorReplicate(SelectionDAG &DAG,
const SystemZInstrInfo *TII,
- SDLoc DL, EVT VT, uint64_t Value,
+ const SDLoc &DL, EVT VT, uint64_t Value,
unsigned BitsPerElement) {
// Signed 16-bit values can be replicated using VREPI.
int64_t SignedValue = SignExtend64(Value, BitsPerElement);
@@ -3919,7 +4074,7 @@ static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
GS.add(Op.getOperand(0), Elem);
FoundOne = true;
- } else if (Op.getOpcode() == ISD::UNDEF) {
+ } else if (Op.isUndef()) {
GS.addUndef();
} else {
GS.add(SDValue(), ResidueOps.size());
@@ -3937,7 +4092,7 @@ static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
for (auto &Op : GS.Ops) {
if (!Op.getNode()) {
- Op = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(BVN), VT, ResidueOps);
+ Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
break;
}
}
@@ -3946,14 +4101,14 @@ static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
}
// Combine GPR scalar values Elems into a vector of type VT.
-static SDValue buildVector(SelectionDAG &DAG, SDLoc DL, EVT VT,
+static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
SmallVectorImpl<SDValue> &Elems) {
// See whether there is a single replicated value.
SDValue Single;
unsigned int NumElements = Elems.size();
unsigned int Count = 0;
for (auto Elem : Elems) {
- if (Elem.getOpcode() != ISD::UNDEF) {
+ if (!Elem.isUndef()) {
if (!Single.getNode())
Single = Elem;
else if (Elem != Single) {
@@ -3998,9 +4153,9 @@ static SDValue buildVector(SelectionDAG &DAG, SDLoc DL, EVT VT,
SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
// Avoid unnecessary undefs by reusing the other operand.
- if (Op01.getOpcode() == ISD::UNDEF)
+ if (Op01.isUndef())
Op01 = Op23;
- else if (Op23.getOpcode() == ISD::UNDEF)
+ else if (Op23.isUndef())
Op23 = Op01;
// Merging identical replications is a no-op.
if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
@@ -4034,7 +4189,7 @@ static SDValue buildVector(SelectionDAG &DAG, SDLoc DL, EVT VT,
for (unsigned I = 0; I < NumElements; ++I)
if (!Constants[I].getNode())
Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
- Result = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Constants);
+ Result = DAG.getBuildVector(VT, DL, Constants);
} else {
// Otherwise try to use VLVGP to start the sequence in order to
// avoid a false dependency on any previous contents of the vector
@@ -4042,8 +4197,8 @@ static SDValue buildVector(SelectionDAG &DAG, SDLoc DL, EVT VT,
// is defined.
unsigned I1 = NumElements / 2 - 1;
unsigned I2 = NumElements - 1;
- bool Def1 = (Elems[I1].getOpcode() != ISD::UNDEF);
- bool Def2 = (Elems[I2].getOpcode() != ISD::UNDEF);
+ bool Def1 = !Elems[I1].isUndef();
+ bool Def2 = !Elems[I2].isUndef();
if (Def1 || Def2) {
SDValue Elem1 = Elems[Def1 ? I1 : I2];
SDValue Elem2 = Elems[Def2 ? I2 : I1];
@@ -4057,7 +4212,7 @@ static SDValue buildVector(SelectionDAG &DAG, SDLoc DL, EVT VT,
// Use VLVGx to insert the other elements.
for (unsigned I = 0; I < NumElements; ++I)
- if (!Done[I] && Elems[I].getOpcode() != ISD::UNDEF)
+ if (!Done[I] && !Elems[I].isUndef())
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
DAG.getConstant(I, DL, MVT::i32));
return Result;
@@ -4120,8 +4275,7 @@ SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
}
// See if we should use shuffles to construct the vector from other vectors.
- SDValue Res = tryBuildVectorShuffle(DAG, BVN);
- if (Res.getNode())
+ if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
return Res;
// Detect SCALAR_TO_VECTOR conversions.
@@ -4312,6 +4466,10 @@ SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
+ case ISD::FRAMEADDR:
+ return lowerFRAMEADDR(Op, DAG);
+ case ISD::RETURNADDR:
+ return lowerRETURNADDR(Op, DAG);
case ISD::BR_CC:
return lowerBR_CC(Op, DAG);
case ISD::SELECT_CC:
@@ -4336,6 +4494,8 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
return lowerVACOPY(Op, DAG);
case ISD::DYNAMIC_STACKALLOC:
return lowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::GET_DYNAMIC_AREA_OFFSET:
+ return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
case ISD::SMUL_LOHI:
return lowerSMUL_LOHI(Op, DAG);
case ISD::UMUL_LOHI:
@@ -4348,12 +4508,8 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
return lowerOR(Op, DAG);
case ISD::CTPOP:
return lowerCTPOP(Op, DAG);
- case ISD::CTLZ_ZERO_UNDEF:
- return DAG.getNode(ISD::CTLZ, SDLoc(Op),
- Op.getValueType(), Op.getOperand(0));
- case ISD::CTTZ_ZERO_UNDEF:
- return DAG.getNode(ISD::CTTZ, SDLoc(Op),
- Op.getValueType(), Op.getOperand(0));
+ case ISD::ATOMIC_FENCE:
+ return lowerATOMIC_FENCE(Op, DAG);
case ISD::ATOMIC_SWAP:
return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
case ISD::ATOMIC_STORE:
@@ -4457,6 +4613,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(SEARCH_STRING);
OPCODE(IPM);
OPCODE(SERIALIZE);
+ OPCODE(MEMBARRIER);
OPCODE(TBEGIN);
OPCODE(TBEGIN_NOFLOAT);
OPCODE(TEND);
@@ -4506,6 +4663,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(VISTR_CC);
OPCODE(VSTRC_CC);
OPCODE(VSTRCZ_CC);
+ OPCODE(TDC);
OPCODE(ATOMIC_SWAPW);
OPCODE(ATOMIC_LOADW_ADD);
OPCODE(ATOMIC_LOADW_SUB);
@@ -4518,6 +4676,8 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(ATOMIC_LOADW_UMIN);
OPCODE(ATOMIC_LOADW_UMAX);
OPCODE(ATOMIC_CMP_SWAPW);
+ OPCODE(LRV);
+ OPCODE(STRV);
OPCODE(PREFETCH);
}
return nullptr;
@@ -4535,8 +4695,9 @@ static bool canTreatAsByteVector(EVT VT) {
// of the input vector and Index is the index (based on type VecVT) that
// should be extracted. Return the new extraction if a simplification
// was possible or if Force is true.
-SDValue SystemZTargetLowering::combineExtract(SDLoc DL, EVT ResVT, EVT VecVT,
- SDValue Op, unsigned Index,
+SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
+ EVT VecVT, SDValue Op,
+ unsigned Index,
DAGCombinerInfo &DCI,
bool Force) const {
SelectionDAG &DAG = DCI.DAG;
@@ -4639,9 +4800,8 @@ SDValue SystemZTargetLowering::combineExtract(SDLoc DL, EVT ResVT, EVT VecVT,
// Optimize vector operations in scalar value Op on the basis that Op
// is truncated to TruncVT.
-SDValue
-SystemZTargetLowering::combineTruncateExtract(SDLoc DL, EVT TruncVT, SDValue Op,
- DAGCombinerInfo &DCI) const {
+SDValue SystemZTargetLowering::combineTruncateExtract(
+ const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
// If we have (trunc (extract_vector_elt X, Y)), try to turn it into
// (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
// of type TruncVT.
@@ -4675,145 +4835,295 @@ SystemZTargetLowering::combineTruncateExtract(SDLoc DL, EVT TruncVT, SDValue Op,
return SDValue();
}
-SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
+SDValue SystemZTargetLowering::combineSIGN_EXTEND(
+ SDNode *N, DAGCombinerInfo &DCI) const {
+ // Convert (sext (ashr (shl X, C1), C2)) to
+ // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
+ // cheap as narrower ones.
SelectionDAG &DAG = DCI.DAG;
- unsigned Opcode = N->getOpcode();
- if (Opcode == ISD::SIGN_EXTEND) {
- // Convert (sext (ashr (shl X, C1), C2)) to
- // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
- // cheap as narrower ones.
- SDValue N0 = N->getOperand(0);
- EVT VT = N->getValueType(0);
- if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
- auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
- SDValue Inner = N0.getOperand(0);
- if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
- if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
- unsigned Extra = (VT.getSizeInBits() -
- N0.getValueType().getSizeInBits());
- unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
- unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
- EVT ShiftVT = N0.getOperand(1).getValueType();
- SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
- Inner.getOperand(0));
- SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
- DAG.getConstant(NewShlAmt, SDLoc(Inner),
- ShiftVT));
- return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
- DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
- }
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
+ auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ SDValue Inner = N0.getOperand(0);
+ if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
+ if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
+ unsigned Extra = (VT.getSizeInBits() -
+ N0.getValueType().getSizeInBits());
+ unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
+ unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
+ EVT ShiftVT = N0.getOperand(1).getValueType();
+ SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
+ Inner.getOperand(0));
+ SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
+ DAG.getConstant(NewShlAmt, SDLoc(Inner),
+ ShiftVT));
+ return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
+ DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
}
}
}
- if (Opcode == SystemZISD::MERGE_HIGH ||
- Opcode == SystemZISD::MERGE_LOW) {
- SDValue Op0 = N->getOperand(0);
- SDValue Op1 = N->getOperand(1);
- if (Op0.getOpcode() == ISD::BITCAST)
- Op0 = Op0.getOperand(0);
- if (Op0.getOpcode() == SystemZISD::BYTE_MASK &&
- cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) {
- // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
- // for v4f32.
- if (Op1 == N->getOperand(0))
- return Op1;
- // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
- EVT VT = Op1.getValueType();
- unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
- if (ElemBytes <= 4) {
- Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
- SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
- EVT InVT = VT.changeVectorElementTypeToInteger();
- EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
- SystemZ::VectorBytes / ElemBytes / 2);
- if (VT != InVT) {
- Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
- DCI.AddToWorklist(Op1.getNode());
- }
- SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
- DCI.AddToWorklist(Op.getNode());
- return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
+ return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineMERGE(
+ SDNode *N, DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ unsigned Opcode = N->getOpcode();
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ if (Op0.getOpcode() == ISD::BITCAST)
+ Op0 = Op0.getOperand(0);
+ if (Op0.getOpcode() == SystemZISD::BYTE_MASK &&
+ cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) {
+ // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
+ // for v4f32.
+ if (Op1 == N->getOperand(0))
+ return Op1;
+ // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
+ EVT VT = Op1.getValueType();
+ unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
+ if (ElemBytes <= 4) {
+ Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
+ SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
+ EVT InVT = VT.changeVectorElementTypeToInteger();
+ EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
+ SystemZ::VectorBytes / ElemBytes / 2);
+ if (VT != InVT) {
+ Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
+ DCI.AddToWorklist(Op1.getNode());
}
+ SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
+ DCI.AddToWorklist(Op.getNode());
+ return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
}
}
+ return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineSTORE(
+ SDNode *N, DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ auto *SN = cast<StoreSDNode>(N);
+ auto &Op1 = N->getOperand(1);
+ EVT MemVT = SN->getMemoryVT();
// If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
// for the extraction to be done on a vMiN value, so that we can use VSTE.
// If X has wider elements then convert it to:
// (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
- if (Opcode == ISD::STORE) {
- auto *SN = cast<StoreSDNode>(N);
- EVT MemVT = SN->getMemoryVT();
- if (MemVT.isInteger()) {
- SDValue Value = combineTruncateExtract(SDLoc(N), MemVT,
- SN->getValue(), DCI);
- if (Value.getNode()) {
- DCI.AddToWorklist(Value.getNode());
-
- // Rewrite the store with the new form of stored value.
- return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
- SN->getBasePtr(), SN->getMemoryVT(),
- SN->getMemOperand());
- }
+ if (MemVT.isInteger()) {
+ if (SDValue Value =
+ combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
+ DCI.AddToWorklist(Value.getNode());
+
+ // Rewrite the store with the new form of stored value.
+ return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
+ SN->getBasePtr(), SN->getMemoryVT(),
+ SN->getMemOperand());
}
}
- // Try to simplify a vector extraction.
- if (Opcode == ISD::EXTRACT_VECTOR_ELT) {
- if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
- SDValue Op0 = N->getOperand(0);
- EVT VecVT = Op0.getValueType();
- return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
- IndexN->getZExtValue(), DCI, false);
+ // Combine STORE (BSWAP) into STRVH/STRV/STRVG
+ // See comment in combineBSWAP about volatile accesses.
+ if (!SN->isVolatile() &&
+ Op1.getOpcode() == ISD::BSWAP &&
+ Op1.getNode()->hasOneUse() &&
+ (Op1.getValueType() == MVT::i16 ||
+ Op1.getValueType() == MVT::i32 ||
+ Op1.getValueType() == MVT::i64)) {
+
+ SDValue BSwapOp = Op1.getOperand(0);
+
+ if (BSwapOp.getValueType() == MVT::i16)
+ BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);
+
+ SDValue Ops[] = {
+ N->getOperand(0), BSwapOp, N->getOperand(2),
+ DAG.getValueType(Op1.getValueType())
+ };
+
+ return
+ DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
+ Ops, MemVT, SN->getMemOperand());
}
+ return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
+ SDNode *N, DAGCombinerInfo &DCI) const {
+ // Try to simplify a vector extraction.
+ if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
+ SDValue Op0 = N->getOperand(0);
+ EVT VecVT = Op0.getValueType();
+ return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
+ IndexN->getZExtValue(), DCI, false);
}
+ return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineJOIN_DWORDS(
+ SDNode *N, DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
// (join_dwords X, X) == (replicate X)
- if (Opcode == SystemZISD::JOIN_DWORDS &&
- N->getOperand(0) == N->getOperand(1))
+ if (N->getOperand(0) == N->getOperand(1))
return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
N->getOperand(0));
+ return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineFP_ROUND(
+ SDNode *N, DAGCombinerInfo &DCI) const {
// (fround (extract_vector_elt X 0))
// (fround (extract_vector_elt X 1)) ->
// (extract_vector_elt (VROUND X) 0)
// (extract_vector_elt (VROUND X) 1)
//
// This is a special case since the target doesn't really support v2f32s.
- if (Opcode == ISD::FP_ROUND) {
- SDValue Op0 = N->getOperand(0);
- if (N->getValueType(0) == MVT::f32 &&
- Op0.hasOneUse() &&
- Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- Op0.getOperand(0).getValueType() == MVT::v2f64 &&
- Op0.getOperand(1).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
- SDValue Vec = Op0.getOperand(0);
- for (auto *U : Vec->uses()) {
- if (U != Op0.getNode() &&
- U->hasOneUse() &&
- U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- U->getOperand(0) == Vec &&
- U->getOperand(1).getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
- SDValue OtherRound = SDValue(*U->use_begin(), 0);
- if (OtherRound.getOpcode() == ISD::FP_ROUND &&
- OtherRound.getOperand(0) == SDValue(U, 0) &&
- OtherRound.getValueType() == MVT::f32) {
- SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
- MVT::v4f32, Vec);
- DCI.AddToWorklist(VRound.getNode());
- SDValue Extract1 =
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
- VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
- DCI.AddToWorklist(Extract1.getNode());
- DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
- SDValue Extract0 =
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
- VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
- return Extract0;
- }
+ SelectionDAG &DAG = DCI.DAG;
+ SDValue Op0 = N->getOperand(0);
+ if (N->getValueType(0) == MVT::f32 &&
+ Op0.hasOneUse() &&
+ Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ Op0.getOperand(0).getValueType() == MVT::v2f64 &&
+ Op0.getOperand(1).getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
+ SDValue Vec = Op0.getOperand(0);
+ for (auto *U : Vec->uses()) {
+ if (U != Op0.getNode() &&
+ U->hasOneUse() &&
+ U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ U->getOperand(0) == Vec &&
+ U->getOperand(1).getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
+ SDValue OtherRound = SDValue(*U->use_begin(), 0);
+ if (OtherRound.getOpcode() == ISD::FP_ROUND &&
+ OtherRound.getOperand(0) == SDValue(U, 0) &&
+ OtherRound.getValueType() == MVT::f32) {
+ SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
+ MVT::v4f32, Vec);
+ DCI.AddToWorklist(VRound.getNode());
+ SDValue Extract1 =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
+ VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
+ DCI.AddToWorklist(Extract1.getNode());
+ DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
+ SDValue Extract0 =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
+ VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
+ return Extract0;
+ }
+ }
+ }
+ }
+ return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineBSWAP(
+ SDNode *N, DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ // Combine BSWAP (LOAD) into LRVH/LRV/LRVG
+ // These loads are allowed to access memory multiple times, and so we must check
+ // that the loads are not volatile before performing the combine.
+ if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
+ N->getOperand(0).hasOneUse() &&
+ (N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 ||
+ N->getValueType(0) == MVT::i64) &&
+ !cast<LoadSDNode>(N->getOperand(0))->isVolatile()) {
+ SDValue Load = N->getOperand(0);
+ LoadSDNode *LD = cast<LoadSDNode>(Load);
+
+ // Create the byte-swapping load.
+ SDValue Ops[] = {
+ LD->getChain(), // Chain
+ LD->getBasePtr(), // Ptr
+ DAG.getValueType(N->getValueType(0)) // VT
+ };
+ SDValue BSLoad =
+ DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
+ DAG.getVTList(N->getValueType(0) == MVT::i64 ?
+ MVT::i64 : MVT::i32, MVT::Other),
+ Ops, LD->getMemoryVT(), LD->getMemOperand());
+
+ // If this is an i16 load, insert the truncate.
+ SDValue ResVal = BSLoad;
+ if (N->getValueType(0) == MVT::i16)
+ ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);
+
+ // First, combine the bswap away. This makes the value produced by the
+ // load dead.
+ DCI.CombineTo(N, ResVal);
+
+ // Next, combine the load away, we give it a bogus result value but a real
+ // chain result. The result value is dead because the bswap is dead.
+ DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
+
+ // Return N so it doesn't get rechecked!
+ return SDValue(N, 0);
+ }
+ return SDValue();
+}
+
+SDValue SystemZTargetLowering::combineSHIFTROT(
+ SDNode *N, DAGCombinerInfo &DCI) const {
+
+ SelectionDAG &DAG = DCI.DAG;
+
+ // Shift/rotate instructions only use the last 6 bits of the second operand
+ // register. If the second operand is the result of an AND with an immediate
+ // value that has its last 6 bits set, we can safely remove the AND operation.
+ SDValue N1 = N->getOperand(1);
+ if (N1.getOpcode() == ISD::AND) {
+ auto *AndMask = dyn_cast<ConstantSDNode>(N1.getOperand(1));
+
+ // The AND mask is constant
+ if (AndMask) {
+ auto AmtVal = AndMask->getZExtValue();
+
+ // Bottom 6 bits are set
+ if ((AmtVal & 0x3f) == 0x3f) {
+ SDValue AndOp = N1->getOperand(0);
+
+ // This is the only use, so remove the node
+ if (N1.hasOneUse()) {
+ // Combine the AND away
+ DCI.CombineTo(N1.getNode(), AndOp);
+
+ // Return N so it isn't rechecked
+ return SDValue(N, 0);
+
+ // The node will be reused, so create a new node for this one use
+ } else {
+ SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N),
+ N->getValueType(0), N->getOperand(0),
+ AndOp);
+ DCI.AddToWorklist(Replace.getNode());
+
+ return Replace;
}
}
}
}
+
+ return SDValue();
+}
+
+SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ switch(N->getOpcode()) {
+ default: break;
+ case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI);
+ case SystemZISD::MERGE_HIGH:
+ case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI);
+ case ISD::STORE: return combineSTORE(N, DCI);
+ case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI);
+ case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI);
+ case ISD::FP_ROUND: return combineFP_ROUND(N, DCI);
+ case ISD::BSWAP: return combineBSWAP(N, DCI);
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ case ISD::ROTL: return combineSHIFTROT(N, DCI);
+ }
+
return SDValue();
}
@@ -4831,7 +5141,7 @@ static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
// Split MBB after MI and return the new block (the one that contains
// instructions after MI).
-static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
+static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI,
MachineBasicBlock *MBB) {
MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
NewMBB->splice(NewMBB->begin(), MBB,
@@ -4841,7 +5151,7 @@ static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
}
// Split MBB before MI and return the new block (the one that contains MI).
-static MachineBasicBlock *splitBlockBefore(MachineInstr *MI,
+static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI,
MachineBasicBlock *MBB) {
MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
@@ -4850,34 +5160,36 @@ static MachineBasicBlock *splitBlockBefore(MachineInstr *MI,
}
// Force base value Base into a register before MI. Return the register.
-static unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
+static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
const SystemZInstrInfo *TII) {
if (Base.isReg())
return Base.getReg();
- MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock *MBB = MI.getParent();
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
- BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg)
- .addOperand(Base).addImm(0).addReg(0);
+ BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
+ .addOperand(Base)
+ .addImm(0)
+ .addReg(0);
return Reg;
}
// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
MachineBasicBlock *
-SystemZTargetLowering::emitSelect(MachineInstr *MI,
+SystemZTargetLowering::emitSelect(MachineInstr &MI,
MachineBasicBlock *MBB) const {
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
- unsigned DestReg = MI->getOperand(0).getReg();
- unsigned TrueReg = MI->getOperand(1).getReg();
- unsigned FalseReg = MI->getOperand(2).getReg();
- unsigned CCValid = MI->getOperand(3).getImm();
- unsigned CCMask = MI->getOperand(4).getImm();
- DebugLoc DL = MI->getDebugLoc();
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned TrueReg = MI.getOperand(1).getReg();
+ unsigned FalseReg = MI.getOperand(2).getReg();
+ unsigned CCValid = MI.getOperand(3).getImm();
+ unsigned CCMask = MI.getOperand(4).getImm();
+ DebugLoc DL = MI.getDebugLoc();
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
@@ -4905,7 +5217,7 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI,
.addReg(TrueReg).addMBB(StartMBB)
.addReg(FalseReg).addMBB(FalseMBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return JoinMBB;
}
@@ -4913,21 +5225,21 @@ SystemZTargetLowering::emitSelect(MachineInstr *MI,
// StoreOpcode is the store to use and Invert says whether the store should
// happen when the condition is false rather than true. If a STORE ON
// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
-MachineBasicBlock *
-SystemZTargetLowering::emitCondStore(MachineInstr *MI,
- MachineBasicBlock *MBB,
- unsigned StoreOpcode, unsigned STOCOpcode,
- bool Invert) const {
+MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
+ MachineBasicBlock *MBB,
+ unsigned StoreOpcode,
+ unsigned STOCOpcode,
+ bool Invert) const {
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
- unsigned SrcReg = MI->getOperand(0).getReg();
- MachineOperand Base = MI->getOperand(1);
- int64_t Disp = MI->getOperand(2).getImm();
- unsigned IndexReg = MI->getOperand(3).getReg();
- unsigned CCValid = MI->getOperand(4).getImm();
- unsigned CCMask = MI->getOperand(5).getImm();
- DebugLoc DL = MI->getDebugLoc();
+ unsigned SrcReg = MI.getOperand(0).getReg();
+ MachineOperand Base = MI.getOperand(1);
+ int64_t Disp = MI.getOperand(2).getImm();
+ unsigned IndexReg = MI.getOperand(3).getReg();
+ unsigned CCValid = MI.getOperand(4).getImm();
+ unsigned CCMask = MI.getOperand(5).getImm();
+ DebugLoc DL = MI.getDebugLoc();
StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
@@ -4940,7 +5252,7 @@ SystemZTargetLowering::emitCondStore(MachineInstr *MI,
BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
.addReg(SrcReg).addOperand(Base).addImm(Disp)
.addImm(CCValid).addImm(CCMask);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return MBB;
}
@@ -4969,7 +5281,7 @@ SystemZTargetLowering::emitCondStore(MachineInstr *MI,
.addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
MBB->addSuccessor(JoinMBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return JoinMBB;
}
@@ -4980,12 +5292,9 @@ SystemZTargetLowering::emitCondStore(MachineInstr *MI,
// ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
// is one of the operands. Invert says whether the field should be
// inverted after performing BinOpcode (e.g. for NAND).
-MachineBasicBlock *
-SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
- MachineBasicBlock *MBB,
- unsigned BinOpcode,
- unsigned BitSize,
- bool Invert) const {
+MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
+ MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode,
+ unsigned BitSize, bool Invert) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
@@ -4994,15 +5303,15 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
// Extract the operands. Base can be a register or a frame index.
// Src2 can be a register or immediate.
- unsigned Dest = MI->getOperand(0).getReg();
- MachineOperand Base = earlyUseOperand(MI->getOperand(1));
- int64_t Disp = MI->getOperand(2).getImm();
- MachineOperand Src2 = earlyUseOperand(MI->getOperand(3));
- unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
- unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
- DebugLoc DL = MI->getDebugLoc();
+ unsigned Dest = MI.getOperand(0).getReg();
+ MachineOperand Base = earlyUseOperand(MI.getOperand(1));
+ int64_t Disp = MI.getOperand(2).getImm();
+ MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
+ unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
+ unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
+ DebugLoc DL = MI.getDebugLoc();
if (IsSubWord)
- BitSize = MI->getOperand(6).getImm();
+ BitSize = MI.getOperand(6).getImm();
// Subword operations use 32-bit registers.
const TargetRegisterClass *RC = (BitSize <= 32 ?
@@ -5090,7 +5399,7 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return DoneMBB;
}
@@ -5100,12 +5409,9 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
// minimum or maximum value. KeepOldMask is the BRC condition-code mask
// for when the current field should be kept. BitSize is the width of
// the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
-MachineBasicBlock *
-SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
- MachineBasicBlock *MBB,
- unsigned CompareOpcode,
- unsigned KeepOldMask,
- unsigned BitSize) const {
+MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
+ MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode,
+ unsigned KeepOldMask, unsigned BitSize) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
@@ -5113,15 +5419,15 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
bool IsSubWord = (BitSize < 32);
// Extract the operands. Base can be a register or a frame index.
- unsigned Dest = MI->getOperand(0).getReg();
- MachineOperand Base = earlyUseOperand(MI->getOperand(1));
- int64_t Disp = MI->getOperand(2).getImm();
- unsigned Src2 = MI->getOperand(3).getReg();
- unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
- unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
- DebugLoc DL = MI->getDebugLoc();
+ unsigned Dest = MI.getOperand(0).getReg();
+ MachineOperand Base = earlyUseOperand(MI.getOperand(1));
+ int64_t Disp = MI.getOperand(2).getImm();
+ unsigned Src2 = MI.getOperand(3).getReg();
+ unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
+ unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
+ DebugLoc DL = MI.getDebugLoc();
if (IsSubWord)
- BitSize = MI->getOperand(6).getImm();
+ BitSize = MI.getOperand(6).getImm();
// Subword operations use 32-bit registers.
const TargetRegisterClass *RC = (BitSize <= 32 ?
@@ -5209,30 +5515,31 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return DoneMBB;
}
// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
// instruction MI.
MachineBasicBlock *
-SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
+SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
MachineBasicBlock *MBB) const {
+
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
// Extract the operands. Base can be a register or a frame index.
- unsigned Dest = MI->getOperand(0).getReg();
- MachineOperand Base = earlyUseOperand(MI->getOperand(1));
- int64_t Disp = MI->getOperand(2).getImm();
- unsigned OrigCmpVal = MI->getOperand(3).getReg();
- unsigned OrigSwapVal = MI->getOperand(4).getReg();
- unsigned BitShift = MI->getOperand(5).getReg();
- unsigned NegBitShift = MI->getOperand(6).getReg();
- int64_t BitSize = MI->getOperand(7).getImm();
- DebugLoc DL = MI->getDebugLoc();
+ unsigned Dest = MI.getOperand(0).getReg();
+ MachineOperand Base = earlyUseOperand(MI.getOperand(1));
+ int64_t Disp = MI.getOperand(2).getImm();
+ unsigned OrigCmpVal = MI.getOperand(3).getReg();
+ unsigned OrigSwapVal = MI.getOperand(4).getReg();
+ unsigned BitShift = MI.getOperand(5).getReg();
+ unsigned NegBitShift = MI.getOperand(6).getReg();
+ int64_t BitSize = MI.getOperand(7).getImm();
+ DebugLoc DL = MI.getDebugLoc();
const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
@@ -5323,7 +5630,7 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
MBB->addSuccessor(LoopMBB);
MBB->addSuccessor(DoneMBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return DoneMBB;
}
@@ -5331,18 +5638,18 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
// if the high register of the GR128 value must be cleared or false if
// it's "don't care". SubReg is subreg_l32 when extending a GR32
// and subreg_l64 when extending a GR64.
-MachineBasicBlock *
-SystemZTargetLowering::emitExt128(MachineInstr *MI,
- MachineBasicBlock *MBB,
- bool ClearEven, unsigned SubReg) const {
+MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
+ MachineBasicBlock *MBB,
+ bool ClearEven,
+ unsigned SubReg) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
+ unsigned Dest = MI.getOperand(0).getReg();
+ unsigned Src = MI.getOperand(1).getReg();
unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
@@ -5359,25 +5666,23 @@ SystemZTargetLowering::emitExt128(MachineInstr *MI,
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
.addReg(In128).addReg(Src).addImm(SubReg);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return MBB;
}
-MachineBasicBlock *
-SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
- MachineBasicBlock *MBB,
- unsigned Opcode) const {
+MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
+ MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
- MachineOperand DestBase = earlyUseOperand(MI->getOperand(0));
- uint64_t DestDisp = MI->getOperand(1).getImm();
- MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2));
- uint64_t SrcDisp = MI->getOperand(3).getImm();
- uint64_t Length = MI->getOperand(4).getImm();
+ MachineOperand DestBase = earlyUseOperand(MI.getOperand(0));
+ uint64_t DestDisp = MI.getOperand(1).getImm();
+ MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2));
+ uint64_t SrcDisp = MI.getOperand(3).getImm();
+ uint64_t Length = MI.getOperand(4).getImm();
// When generating more than one CLC, all but the last will need to
// branch to the end when a difference is found.
@@ -5385,10 +5690,10 @@ SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
splitBlockAfter(MI, MBB) : nullptr);
// Check for the loop form, in which operand 5 is the trip count.
- if (MI->getNumExplicitOperands() > 5) {
+ if (MI.getNumExplicitOperands() > 5) {
bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
- uint64_t StartCountReg = MI->getOperand(5).getReg();
+ uint64_t StartCountReg = MI.getOperand(5).getReg();
uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
forceReg(MI, DestBase, TII));
@@ -5491,15 +5796,19 @@ SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
// Apply them using LAY if so.
if (!isUInt<12>(DestDisp)) {
unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
- BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
- .addOperand(DestBase).addImm(DestDisp).addReg(0);
+ BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
+ .addOperand(DestBase)
+ .addImm(DestDisp)
+ .addReg(0);
DestBase = MachineOperand::CreateReg(Reg, false);
DestDisp = 0;
}
if (!isUInt<12>(SrcDisp)) {
unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
- BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
- .addOperand(SrcBase).addImm(SrcDisp).addReg(0);
+ BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
+ .addOperand(SrcBase)
+ .addImm(SrcDisp)
+ .addReg(0);
SrcBase = MachineOperand::CreateReg(Reg, false);
SrcDisp = 0;
}
@@ -5527,26 +5836,24 @@ SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
MBB->addLiveIn(SystemZ::CC);
}
- MI->eraseFromParent();
+ MI.eraseFromParent();
return MBB;
}
// Decompose string pseudo-instruction MI into a loop that continually performs
// Opcode until CC != 3.
-MachineBasicBlock *
-SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
- MachineBasicBlock *MBB,
- unsigned Opcode) const {
+MachineBasicBlock *SystemZTargetLowering::emitStringWrapper(
+ MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
MachineFunction &MF = *MBB->getParent();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
- uint64_t End1Reg = MI->getOperand(0).getReg();
- uint64_t Start1Reg = MI->getOperand(1).getReg();
- uint64_t Start2Reg = MI->getOperand(2).getReg();
- uint64_t CharReg = MI->getOperand(3).getReg();
+ uint64_t End1Reg = MI.getOperand(0).getReg();
+ uint64_t Start1Reg = MI.getOperand(1).getReg();
+ uint64_t Start2Reg = MI.getOperand(2).getReg();
+ uint64_t CharReg = MI.getOperand(3).getReg();
const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
uint64_t This1Reg = MRI.createVirtualRegister(RC);
@@ -5589,26 +5896,24 @@ SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
DoneMBB->addLiveIn(SystemZ::CC);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return DoneMBB;
}
// Update TBEGIN instruction with final opcode and register clobbers.
-MachineBasicBlock *
-SystemZTargetLowering::emitTransactionBegin(MachineInstr *MI,
- MachineBasicBlock *MBB,
- unsigned Opcode,
- bool NoFloat) const {
+MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
+ MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode,
+ bool NoFloat) const {
MachineFunction &MF = *MBB->getParent();
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
const SystemZInstrInfo *TII = Subtarget.getInstrInfo();
// Update opcode.
- MI->setDesc(TII->get(Opcode));
+ MI.setDesc(TII->get(Opcode));
// We cannot handle a TBEGIN that clobbers the stack or frame pointer.
// Make sure to add the corresponding GRSM bits if they are missing.
- uint64_t Control = MI->getOperand(2).getImm();
+ uint64_t Control = MI.getOperand(2).getImm();
static const unsigned GPRControlBit[16] = {
0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
@@ -5616,13 +5921,13 @@ SystemZTargetLowering::emitTransactionBegin(MachineInstr *MI,
Control |= GPRControlBit[15];
if (TFI->hasFP(MF))
Control |= GPRControlBit[11];
- MI->getOperand(2).setImm(Control);
+ MI.getOperand(2).setImm(Control);
// Add GPR clobbers.
for (int I = 0; I < 16; I++) {
if ((Control & GPRControlBit[I]) == 0) {
unsigned Reg = SystemZMC::GR64Regs[I];
- MI->addOperand(MachineOperand::CreateReg(Reg, true, true));
+ MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
}
}
@@ -5631,12 +5936,12 @@ SystemZTargetLowering::emitTransactionBegin(MachineInstr *MI,
if (Subtarget.hasVector()) {
for (int I = 0; I < 32; I++) {
unsigned Reg = SystemZMC::VR128Regs[I];
- MI->addOperand(MachineOperand::CreateReg(Reg, true, true));
+ MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
}
} else {
for (int I = 0; I < 16; I++) {
unsigned Reg = SystemZMC::FP64Regs[I];
- MI->addOperand(MachineOperand::CreateReg(Reg, true, true));
+ MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
}
}
}
@@ -5644,17 +5949,15 @@ SystemZTargetLowering::emitTransactionBegin(MachineInstr *MI,
return MBB;
}
-MachineBasicBlock *
-SystemZTargetLowering::emitLoadAndTestCmp0(MachineInstr *MI,
- MachineBasicBlock *MBB,
- unsigned Opcode) const {
+MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(
+ MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo *MRI = &MF.getRegInfo();
const SystemZInstrInfo *TII =
static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
- unsigned SrcReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(0).getReg();
// Create new virtual register of the same class as source.
const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
@@ -5664,14 +5967,14 @@ SystemZTargetLowering::emitLoadAndTestCmp0(MachineInstr *MI,
// well.
BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
.addReg(SrcReg);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return MBB;
}
-MachineBasicBlock *SystemZTargetLowering::
-EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
- switch (MI->getOpcode()) {
+MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
+ MachineInstr &MI, MachineBasicBlock *MBB) const {
+ switch (MI.getOpcode()) {
case SystemZ::Select32Mux:
case SystemZ::Select32:
case SystemZ::SelectF32:
diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h
index 391636e5467f..b1de8936beed 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/lib/Target/SystemZ/SystemZISelLowering.h
@@ -146,6 +146,9 @@ enum NodeType : unsigned {
// Perform a serialization operation. (BCR 15,0 or BCR 14,0.)
SERIALIZE,
+ // Compiler barrier only; generate a no-op.
+ MEMBARRIER,
+
// Transaction begin. The first operand is the chain, the second
// the TDB pointer, and the third the immediate control field.
// Returns chain and glue.
@@ -275,6 +278,12 @@ enum NodeType : unsigned {
VSTRC_CC,
VSTRCZ_CC,
+ // Test Data Class.
+ //
+ // Operand 0: the value to test
+ // Operand 1: the bit mask
+ TDC,
+
// Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
// ATOMIC_LOAD_<op>.
//
@@ -308,6 +317,19 @@ enum NodeType : unsigned {
// Operand 5: the width of the field in bits (8 or 16)
ATOMIC_CMP_SWAPW,
+ // Byte swapping load.
+ //
+ // Operand 0: the address to load from
+ // Operand 1: the type of load (i16, i32, i64)
+ LRV,
+
+ // Byte swapping store.
+ //
+ // Operand 0: the value to store
+ // Operand 1: the address to store to
+ // Operand 2: the type of store (i16, i32, i64)
+ STRV,
+
// Prefetch from the second operand using the 4-bit control code in
// the first operand. The code is 1 for a load prefetch and 2 for
// a store prefetch.
@@ -423,16 +445,23 @@ public:
return SystemZ::R7D;
}
- MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const
- override;
+ /// Override to support customized stack guard loading.
+ bool useLoadStackGuardNode() const override {
+ return true;
+ }
+ void insertSSPDeclarations(Module &M) const override {
+ }
+
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const override;
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
bool allowTruncateForTailCall(Type *, Type *) const override;
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
+ const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
@@ -443,12 +472,20 @@ public:
LLVMContext &Context) const override;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const override;
- SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+ SelectionDAG &DAG) const override;
+ SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
SelectionDAG &DAG) const override;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+ ISD::NodeType getExtendForAtomicOps() const override {
+ return ISD::ANY_EXTEND;
+ }
+
+ bool supportSwiftError() const override {
+ return true;
+ }
+
private:
const SystemZSubtarget &Subtarget;
@@ -461,15 +498,19 @@ private:
SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node,
SelectionDAG &DAG, unsigned Opcode,
SDValue GOTOffset) const;
+ SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const;
SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SelectionDAG &DAG) const;
SDValue lowerBlockAddress(BlockAddressSDNode *Node,
SelectionDAG &DAG) const;
SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const;
SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const;
+ SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
@@ -477,6 +518,7 @@ private:
SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG,
@@ -498,11 +540,19 @@ private:
unsigned UnpackHigh) const;
SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const;
- SDValue combineExtract(SDLoc DL, EVT ElemVT, EVT VecVT, SDValue OrigOp,
+ SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp,
unsigned Index, DAGCombinerInfo &DCI,
bool Force) const;
- SDValue combineTruncateExtract(SDLoc DL, EVT TruncVT, SDValue Op,
+ SDValue combineTruncateExtract(const SDLoc &DL, EVT TruncVT, SDValue Op,
DAGCombinerInfo &DCI) const;
+ SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineEXTRACT_VECTOR_ELT(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue combineSHIFTROT(SDNode *N, DAGCombinerInfo &DCI) const;
// If the last instruction before MBBI in MBB was some form of COMPARE,
// try to replace it with a COMPARE AND BRANCH just before MBBI.
@@ -514,40 +564,33 @@ private:
MachineBasicBlock *Target) const;
// Implement EmitInstrWithCustomInserter for individual operation types.
- MachineBasicBlock *emitSelect(MachineInstr *MI,
- MachineBasicBlock *BB) const;
- MachineBasicBlock *emitCondStore(MachineInstr *MI,
- MachineBasicBlock *BB,
+ MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB,
unsigned StoreOpcode, unsigned STOCOpcode,
bool Invert) const;
- MachineBasicBlock *emitExt128(MachineInstr *MI,
- MachineBasicBlock *MBB,
+ MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB,
bool ClearEven, unsigned SubReg) const;
- MachineBasicBlock *emitAtomicLoadBinary(MachineInstr *MI,
+ MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI,
MachineBasicBlock *BB,
unsigned BinOpcode, unsigned BitSize,
bool Invert = false) const;
- MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr *MI,
+ MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI,
MachineBasicBlock *MBB,
unsigned CompareOpcode,
unsigned KeepOldMask,
unsigned BitSize) const;
- MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr *MI,
+ MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *emitMemMemWrapper(MachineInstr *MI,
- MachineBasicBlock *BB,
+ MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB,
unsigned Opcode) const;
- MachineBasicBlock *emitStringWrapper(MachineInstr *MI,
- MachineBasicBlock *BB,
+ MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB,
unsigned Opcode) const;
- MachineBasicBlock *emitTransactionBegin(MachineInstr *MI,
+ MachineBasicBlock *emitTransactionBegin(MachineInstr &MI,
MachineBasicBlock *MBB,
- unsigned Opcode,
- bool NoFloat) const;
- MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr *MI,
+ unsigned Opcode, bool NoFloat) const;
+ MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI,
MachineBasicBlock *MBB,
unsigned Opcode) const;
-
};
} // end namespace llvm
diff --git a/lib/Target/SystemZ/SystemZInstrBuilder.h b/lib/Target/SystemZ/SystemZInstrBuilder.h
index 5a1c874dfa36..2cb8aba1b322 100644
--- a/lib/Target/SystemZ/SystemZInstrBuilder.h
+++ b/lib/Target/SystemZ/SystemZInstrBuilder.h
@@ -29,7 +29,7 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI) {
MachineFunction &MF = *MI->getParent()->getParent();
MachineFrameInfo *MFFrame = MF.getFrameInfo();
const MCInstrDesc &MCID = MI->getDesc();
- unsigned Flags = 0;
+ auto Flags = MachineMemOperand::MONone;
if (MCID.mayLoad())
Flags |= MachineMemOperand::MOLoad;
if (MCID.mayStore())
diff --git a/lib/Target/SystemZ/SystemZInstrFP.td b/lib/Target/SystemZ/SystemZInstrFP.td
index 0cb267290cc1..8b32047e08e3 100644
--- a/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/lib/Target/SystemZ/SystemZInstrFP.td
@@ -37,6 +37,10 @@ let hasSideEffects = 0 in {
def LER : UnaryRR <"le", 0x38, null_frag, FP32, FP32>;
def LDR : UnaryRR <"ld", 0x28, null_frag, FP64, FP64>;
def LXR : UnaryRRE<"lx", 0xB365, null_frag, FP128, FP128>;
+
+ // For z13 we prefer LDR over LER to avoid partial register dependencies.
+ let isCodeGenOnly = 1 in
+ def LDR32 : UnaryRR<"ld", 0x28, null_frag, FP32, FP32>;
}
// Moves between two floating-point registers that also set the condition
@@ -443,6 +447,13 @@ let Defs = [CC], CCValues = 0xF in {
def CDB : CompareRXE<"cdb", 0xED19, z_fcmp, FP64, load, 8>;
}
+// Test Data Class.
+let Defs = [CC], CCValues = 0xC in {
+ def TCEB : TestRXE<"tceb", 0xED10, z_tdc, FP32>;
+ def TCDB : TestRXE<"tcdb", 0xED11, z_tdc, FP64>;
+ def TCXB : TestRXE<"tcxb", 0xED12, z_tdc, FP128>;
+}
+
//===----------------------------------------------------------------------===//
// Peepholes
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/SystemZ/SystemZInstrFormats.td b/lib/Target/SystemZ/SystemZInstrFormats.td
index 01f4cdec05cb..973894d5c001 100644
--- a/lib/Target/SystemZ/SystemZInstrFormats.td
+++ b/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -158,6 +158,17 @@ def getThreeOperandOpcode : InstrMapping {
//
//===----------------------------------------------------------------------===//
+class InstI<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<2, outs, ins, asmstr, pattern> {
+ field bits<16> Inst;
+ field bits<16> SoftFail = 0;
+
+ bits<8> I1;
+
+ let Inst{15-8} = op;
+ let Inst{7-0} = I1;
+}
+
class InstRI<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
@@ -172,6 +183,24 @@ class InstRI<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
let Inst{15-0} = I2;
}
+class InstRIEa<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<16> I2;
+ bits<4> M3;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = 0;
+ let Inst{31-16} = I2;
+ let Inst{15-12} = M3;
+ let Inst{11-8} = 0;
+ let Inst{7-0} = op{7-0};
+}
+
class InstRIEb<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<6, outs, ins, asmstr, pattern> {
field bits<48> Inst;
@@ -260,6 +289,24 @@ class InstRIL<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
let Inst{31-0} = I2;
}
+class InstRIS<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<8> I2;
+ bits<4> M3;
+ bits<16> BD4;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = M3;
+ let Inst{31-16} = BD4;
+ let Inst{15-8} = I2;
+ let Inst{7-0} = op{7-0};
+}
+
class InstRR<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<2, outs, ins, asmstr, pattern> {
field bits<16> Inst;
@@ -320,6 +367,41 @@ class InstRRF<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
let Inst{3-0} = R2;
}
+class InstRRFc<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<4, outs, ins, asmstr, pattern> {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R2;
+ bits<4> M3;
+
+ let Inst{31-16} = op;
+ let Inst{15-12} = M3;
+ let Inst{11-8} = 0;
+ let Inst{7-4} = R1;
+ let Inst{3-0} = R2;
+}
+
+class InstRRS<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R2;
+ bits<4> M3;
+ bits<16> BD4;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = R2;
+ let Inst{31-16} = BD4;
+ let Inst{15-12} = M3;
+ let Inst{11-8} = 0;
+ let Inst{7-0} = op{7-0};
+}
+
class InstRX<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
: InstSystemZ<4, outs, ins, asmstr, pattern> {
field bits<32> Inst;
@@ -919,6 +1001,10 @@ class InstVRX<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
// Compare:
// Two input operands and an implicit CC output operand.
//
+// Test:
+// Two input operands and an implicit CC output operand. The second
+// input operand is an "address" operand used as a test class mask.
+//
// Ternary:
// One register output operand and three input operands.
//
@@ -974,12 +1060,30 @@ class BranchUnaryRI<string mnemonic, bits<12> opcode, RegisterOperand cls>
let DisableEncoding = "$R1src";
}
-class LoadMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls>
- : InstRSY<opcode, (outs cls:$R1, cls:$R3), (ins bdaddr20only:$BD2),
+class LoadMultipleRS<string mnemonic, bits<8> opcode, RegisterOperand cls,
+ AddressingMode mode = bdaddr12only>
+ : InstRS<opcode, (outs cls:$R1, cls:$R3), (ins mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2", []> {
+ let mayLoad = 1;
+}
+
+class LoadMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1, cls:$R3), (ins mode:$BD2),
mnemonic#"\t$R1, $R3, $BD2", []> {
let mayLoad = 1;
}
+multiclass LoadMultipleRSPair<string mnemonic, bits<8> rsOpcode,
+ bits<16> rsyOpcode, RegisterOperand cls> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : LoadMultipleRS<mnemonic, rsOpcode, cls, bdaddr12pair>;
+ let DispSize = "20" in
+ def Y : LoadMultipleRSY<mnemonic#"y", rsyOpcode, cls, bdaddr20pair>;
+ }
+}
+
class LoadMultipleVRSa<string mnemonic, bits<16> opcode>
: InstVRSa<opcode, (outs VR128:$V1, VR128:$V3), (ins bdaddr12only:$BD2),
mnemonic#"\t$V1, $V3, $BD2", []> {
@@ -1055,12 +1159,30 @@ class StoreLengthVRSb<string mnemonic, bits<16> opcode,
let AccessBytes = bytes;
}
-class StoreMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls>
- : InstRSY<opcode, (outs), (ins cls:$R1, cls:$R3, bdaddr20only:$BD2),
+class StoreMultipleRS<string mnemonic, bits<8> opcode, RegisterOperand cls,
+ AddressingMode mode = bdaddr12only>
+ : InstRS<opcode, (outs), (ins cls:$R1, cls:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2", []> {
+ let mayStore = 1;
+}
+
+class StoreMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs), (ins cls:$R1, cls:$R3, mode:$BD2),
mnemonic#"\t$R1, $R3, $BD2", []> {
let mayStore = 1;
}
+multiclass StoreMultipleRSPair<string mnemonic, bits<8> rsOpcode,
+ bits<16> rsyOpcode, RegisterOperand cls> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : StoreMultipleRS<mnemonic, rsOpcode, cls, bdaddr12pair>;
+ let DispSize = "20" in
+ def Y : StoreMultipleRSY<mnemonic#"y", rsyOpcode, cls, bdaddr20pair>;
+ }
+}
+
class StoreMultipleVRSa<string mnemonic, bits<16> opcode>
: InstVRSa<opcode, (outs), (ins VR128:$V1, VR128:$V3, bdaddr12only:$BD2),
mnemonic#"\t$V1, $V3, $BD2", []> {
@@ -1186,6 +1308,15 @@ class CondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
let R4 = 0;
}
+class CondUnaryRIE<string mnemonic, bits<16> opcode, RegisterOperand cls,
+ Immediate imm>
+ : InstRIEd<opcode, (outs cls:$R1),
+ (ins imm:$I2, cond4:$valid, cond4:$R3),
+ mnemonic#"$R3\t$R1, $I2", []>,
+ Requires<[FeatureLoadStoreOnCond2]> {
+ let CCMaskLast = 1;
+}
+
// Like CondUnaryRRF, but used for the raw assembly form. The condition-code
// mask is the third operand rather than being part of the mnemonic.
class AsmCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
@@ -1198,6 +1329,16 @@ class AsmCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
let R4 = 0;
}
+class AsmCondUnaryRIE<string mnemonic, bits<16> opcode, RegisterOperand cls,
+ Immediate imm>
+ : InstRIEd<opcode, (outs cls:$R1),
+ (ins cls:$R1src, imm:$I2, imm32zx4:$R3),
+ mnemonic#"\t$R1, $I2, $R3", []>,
+ Requires<[FeatureLoadStoreOnCond2]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
// Like CondUnaryRRF, but with a fixed CC mask.
class FixedCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2, bits<4> ccmask>
@@ -1210,6 +1351,17 @@ class FixedCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
let R4 = 0;
}
+class FixedCondUnaryRIE<string mnemonic, bits<16> opcode, RegisterOperand cls,
+ Immediate imm, bits<4> ccmask>
+ : InstRIEd<opcode, (outs cls:$R1),
+ (ins cls:$R1src, imm:$I2),
+ mnemonic#"\t$R1, $I2", []>,
+ Requires<[FeatureLoadStoreOnCond2]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let R3 = ccmask;
+}
+
class UnaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,
RegisterOperand cls, Immediate imm>
: InstRI<opcode, (outs cls:$R1), (ins imm:$I2),
@@ -1391,9 +1543,9 @@ class BinaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
class BinaryRRF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R3, cls2:$R2),
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R2, cls2:$R3),
mnemonic#"r\t$R1, $R3, $R2",
- [(set cls1:$R1, (operator cls1:$R3, cls2:$R2))]> {
+ [(set cls1:$R1, (operator cls1:$R2, cls2:$R3))]> {
let OpKey = mnemonic ## cls1;
let OpType = "reg";
let R4 = 0;
@@ -1874,6 +2026,14 @@ class CompareVRRa<string mnemonic, bits<16> opcode, SDPatternOperator operator,
let M5 = 0;
}
+class TestRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRXE<opcode, (outs), (ins cls:$R1, bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, bdxaddr12only:$XBD2)]> {
+ let M3 = 0;
+}
+
class TernaryRRD<string mnemonic, bits<16> opcode,
SDPatternOperator operator, RegisterOperand cls>
: InstRRD<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, cls:$R2),
@@ -1885,6 +2045,40 @@ class TernaryRRD<string mnemonic, bits<16> opcode,
let DisableEncoding = "$R1src";
}
+class TernaryRS<string mnemonic, bits<8> opcode, RegisterOperand cls,
+ bits<5> bytes, AddressingMode mode = bdaddr12only>
+ : InstRS<opcode, (outs cls:$R1),
+ (ins cls:$R1src, imm32zx4:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2", []> {
+
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class TernaryRSY<string mnemonic, bits<16> opcode, RegisterOperand cls,
+ bits<5> bytes, AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1),
+ (ins cls:$R1src, imm32zx4:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2", []> {
+
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+multiclass TernaryRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode,
+ RegisterOperand cls, bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : TernaryRS<mnemonic, rsOpcode, cls, bytes, bdaddr12pair>;
+ let DispSize = "20" in
+ def Y : TernaryRSY<mnemonic#"y", rsyOpcode, cls, bytes, bdaddr20pair>;
+ }
+}
+
class TernaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
RegisterOperand cls, SDPatternOperator load, bits<5> bytes>
: InstRXF<opcode, (outs cls:$R1),
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp
index e6b5fc8e6235..4084e93e5acb 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -15,6 +15,7 @@
#include "SystemZInstrBuilder.h"
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
@@ -54,7 +55,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
// Get two load or store instructions. Use the original instruction for one
// of them (arbitrarily the second here) and create a clone for the other.
- MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
+ MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI);
MBB->insert(MI, EarlierMI);
// Set up the two 64-bit registers.
@@ -69,8 +70,8 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
MachineOperand &LowOffsetOp = MI->getOperand(2);
LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
- // Clear the kill flags for the base and index registers in the first
- // instruction.
+ // Clear the kill flags for the base and index registers in the first
+ // instruction.
EarlierMI->getOperand(1).setIsKill(false);
EarlierMI->getOperand(3).setIsKill(false);
@@ -105,59 +106,89 @@ void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
// and HighOpcode takes an unsigned 32-bit operand. In those cases,
// MI has the same kind of operand as LowOpcode, so needs to be converted
// if HighOpcode is used.
-void SystemZInstrInfo::expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
+void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode,
bool ConvertHigh) const {
- unsigned Reg = MI->getOperand(0).getReg();
+ unsigned Reg = MI.getOperand(0).getReg();
bool IsHigh = isHighReg(Reg);
- MI->setDesc(get(IsHigh ? HighOpcode : LowOpcode));
+ MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
if (IsHigh && ConvertHigh)
- MI->getOperand(1).setImm(uint32_t(MI->getOperand(1).getImm()));
+ MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
}
// MI is a three-operand RIE-style pseudo instruction. Replace it with
// LowOpcodeK if the registers are both low GR32s, otherwise use a move
// followed by HighOpcode or LowOpcode, depending on whether the target
// is a high or low GR32.
-void SystemZInstrInfo::expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
+void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned LowOpcodeK,
unsigned HighOpcode) const {
- unsigned DestReg = MI->getOperand(0).getReg();
- unsigned SrcReg = MI->getOperand(1).getReg();
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
bool DestIsHigh = isHighReg(DestReg);
bool SrcIsHigh = isHighReg(SrcReg);
if (!DestIsHigh && !SrcIsHigh)
- MI->setDesc(get(LowOpcodeK));
+ MI.setDesc(get(LowOpcodeK));
else {
- emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
- DestReg, SrcReg, SystemZ::LR, 32,
- MI->getOperand(1).isKill());
- MI->setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
- MI->getOperand(1).setReg(DestReg);
- MI->tieOperands(0, 1);
+ emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
+ SystemZ::LR, 32, MI.getOperand(1).isKill());
+ MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
+ MI.getOperand(1).setReg(DestReg);
+ MI.tieOperands(0, 1);
}
}
// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
// if the first operand is a low GR32 and HighOpcode if the first operand
// is a high GR32.
-void SystemZInstrInfo::expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
+void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const {
- unsigned Reg = MI->getOperand(0).getReg();
+ unsigned Reg = MI.getOperand(0).getReg();
unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
- MI->getOperand(2).getImm());
- MI->setDesc(get(Opcode));
+ MI.getOperand(2).getImm());
+ MI.setDesc(get(Opcode));
}
// MI is an RR-style pseudo instruction that zero-extends the low Size bits
// of one GRX32 into another. Replace it with LowOpcode if both operands
// are low registers, otherwise use RISB[LH]G.
-void SystemZInstrInfo::expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
+void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned Size) const {
- emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
- MI->getOperand(0).getReg(), MI->getOperand(1).getReg(),
- LowOpcode, Size, MI->getOperand(1).isKill());
- MI->eraseFromParent();
+ emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
+ MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
+ Size, MI.getOperand(1).isKill());
+ MI.eraseFromParent();
+}
+
+void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineFunction &MF = *MBB->getParent();
+ const unsigned Reg = MI->getOperand(0).getReg();
+
+ // Conveniently, all 4 instructions are cloned from LOAD_STACK_GUARD,
+ // so they already have operand 0 set to reg.
+
+ // ear <reg>, %a0
+ MachineInstr *Ear1MI = MF.CloneMachineInstr(MI);
+ MBB->insert(MI, Ear1MI);
+ Ear1MI->setDesc(get(SystemZ::EAR));
+ MachineInstrBuilder(MF, Ear1MI).addImm(0);
+
+ // sllg <reg>, <reg>, 32
+ MachineInstr *SllgMI = MF.CloneMachineInstr(MI);
+ MBB->insert(MI, SllgMI);
+ SllgMI->setDesc(get(SystemZ::SLLG));
+ MachineInstrBuilder(MF, SllgMI).addReg(Reg).addReg(0).addImm(32);
+
+ // ear <reg>, %a1
+ MachineInstr *Ear2MI = MF.CloneMachineInstr(MI);
+ MBB->insert(MI, Ear2MI);
+ Ear2MI->setDesc(get(SystemZ::EAR));
+ MachineInstrBuilder(MF, Ear2MI).addImm(1);
+
+ // lg <reg>, 40(<reg>)
+ MI->setDesc(get(SystemZ::LG));
+ MachineInstrBuilder(MF, MI).addReg(Reg).addImm(40).addReg(0);
}
// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
@@ -167,7 +198,7 @@ void SystemZInstrInfo::expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
// KillSrc is true if this move is the last use of SrcReg.
void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned DestReg,
+ const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, unsigned LowLowOpcode,
unsigned Size, bool KillSrc) const {
unsigned Opcode;
@@ -196,45 +227,41 @@ void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
// Return 0 otherwise.
//
// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
-static int isSimpleMove(const MachineInstr *MI, int &FrameIndex,
+static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
unsigned Flag) {
- const MCInstrDesc &MCID = MI->getDesc();
- if ((MCID.TSFlags & Flag) &&
- MI->getOperand(1).isFI() &&
- MI->getOperand(2).getImm() == 0 &&
- MI->getOperand(3).getReg() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ const MCInstrDesc &MCID = MI.getDesc();
+ if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
+ MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
return 0;
}
-unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
}
-unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
}
-bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
+bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
int &DestFrameIndex,
int &SrcFrameIndex) const {
// Check for MVC 0(Length,FI1),0(FI2)
- const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
- if (MI->getOpcode() != SystemZ::MVC ||
- !MI->getOperand(0).isFI() ||
- MI->getOperand(1).getImm() != 0 ||
- !MI->getOperand(3).isFI() ||
- MI->getOperand(4).getImm() != 0)
+ const MachineFrameInfo *MFI = MI.getParent()->getParent()->getFrameInfo();
+ if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
+ MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
+ MI.getOperand(4).getImm() != 0)
return false;
// Check that Length covers the full slots.
- int64_t Length = MI->getOperand(2).getImm();
- unsigned FI1 = MI->getOperand(0).getIndex();
- unsigned FI2 = MI->getOperand(3).getIndex();
+ int64_t Length = MI.getOperand(2).getImm();
+ unsigned FI1 = MI.getOperand(0).getIndex();
+ unsigned FI2 = MI.getOperand(3).getIndex();
if (MFI->getObjectSize(FI1) != Length ||
MFI->getObjectSize(FI2) != Length)
return false;
@@ -244,7 +271,7 @@ bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
return true;
}
-bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -261,7 +288,7 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Working from the bottom, when we see a non-terminator instruction, we're
// done.
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
break;
// A terminator that isn't a branch can't easily be handled by this
@@ -270,7 +297,7 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return true;
// Can't handle indirect branches.
- SystemZII::Branch Branch(getBranchInfo(I));
+ SystemZII::Branch Branch(getBranchInfo(*I));
if (!Branch.Target->isMBB())
return true;
@@ -347,7 +374,7 @@ unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
continue;
if (!I->isBranch())
break;
- if (!getBranchInfo(I).Target->isMBB())
+ if (!getBranchInfo(*I).Target->isMBB())
break;
// Remove the branch.
I->eraseFromParent();
@@ -365,11 +392,11 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return false;
}
-unsigned
-SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+unsigned SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// In this function we output 32-bit branches, which should always
// have enough range. They can be shortened and relaxed by later code
// in the pipeline, if desired.
@@ -402,17 +429,16 @@ SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
return Count;
}
-bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, unsigned &SrcReg2,
- int &Mask, int &Value) const {
- assert(MI->isCompare() && "Caller should have checked for a comparison");
+bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &Mask,
+ int &Value) const {
+ assert(MI.isCompare() && "Caller should have checked for a comparison");
- if (MI->getNumExplicitOperands() == 2 &&
- MI->getOperand(0).isReg() &&
- MI->getOperand(1).isImm()) {
- SrcReg = MI->getOperand(0).getReg();
+ if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
+ MI.getOperand(1).isImm()) {
+ SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
- Value = MI->getOperand(1).getImm();
+ Value = MI.getOperand(1).getImm();
Mask = ~0;
return true;
}
@@ -445,7 +471,7 @@ static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
// the result of an IPM sequence whose input CC survives until Compare,
// and whether Compare is therefore redundant. Delete it and return
// true if so.
-static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
+static bool removeIPMBasedCompare(MachineInstr &Compare, unsigned SrcReg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI) {
MachineInstr *LGFR = nullptr;
@@ -466,16 +492,16 @@ static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
return false;
// Check that there are no assignments to CC between the IPM and Compare,
- if (IPM->getParent() != Compare->getParent())
+ if (IPM->getParent() != Compare.getParent())
return false;
- MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare;
+ MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare.getIterator();
for (++MBBI; MBBI != MBBE; ++MBBI) {
- MachineInstr *MI = MBBI;
- if (MI->modifiesRegister(SystemZ::CC, TRI))
+ MachineInstr &MI = *MBBI;
+ if (MI.modifiesRegister(SystemZ::CC, TRI))
return false;
}
- Compare->eraseFromParent();
+ Compare.eraseFromParent();
if (LGFR)
eraseIfDead(LGFR, MRI);
eraseIfDead(RLL, MRI);
@@ -485,13 +511,11 @@ static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
return true;
}
-bool
-SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
- unsigned SrcReg, unsigned SrcReg2,
- int Mask, int Value,
- const MachineRegisterInfo *MRI) const {
+bool SystemZInstrInfo::optimizeCompareInstr(
+ MachineInstr &Compare, unsigned SrcReg, unsigned SrcReg2, int Mask,
+ int Value, const MachineRegisterInfo *MRI) const {
assert(!SrcReg2 && "Only optimizing constant comparisons so far");
- bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
+ bool IsLogical = (Compare.getDesc().TSFlags & SystemZII::IsLogical) != 0;
return Value == 0 && !IsLogical &&
removeIPMBasedCompare(Compare, SrcReg, MRI, &RI);
}
@@ -506,15 +530,43 @@ static unsigned getConditionalMove(unsigned Opcode) {
}
}
-bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
- unsigned Opcode = MI->getOpcode();
- return STI.hasLoadStoreOnCond() && getConditionalMove(Opcode);
+static unsigned getConditionalLoadImmediate(unsigned Opcode) {
+ switch (Opcode) {
+ case SystemZ::LHI: return SystemZ::LOCHI;
+ case SystemZ::LGHI: return SystemZ::LOCGHI;
+ default: return 0;
+ }
+}
+
+bool SystemZInstrInfo::isPredicable(MachineInstr &MI) const {
+ unsigned Opcode = MI.getOpcode();
+ if (STI.hasLoadStoreOnCond() && getConditionalMove(Opcode))
+ return true;
+ if (STI.hasLoadStoreOnCond2() && getConditionalLoadImmediate(Opcode))
+ return true;
+ if (Opcode == SystemZ::Return ||
+ Opcode == SystemZ::Trap ||
+ Opcode == SystemZ::CallJG ||
+ Opcode == SystemZ::CallBR)
+ return true;
+ return false;
}
bool SystemZInstrInfo::
isProfitableToIfCvt(MachineBasicBlock &MBB,
unsigned NumCycles, unsigned ExtraPredCycles,
BranchProbability Probability) const {
+ // Avoid using conditional returns at the end of a loop (since then
+ // we'd need to emit an unconditional branch to the beginning anyway,
+ // making the loop body longer). This doesn't apply for low-probability
+ // loops (eg. compare-and-swap retry), so just decide based on branch
+ // probability instead of looping structure.
+ // However, since Compare and Trap instructions cost the same as a regular
+ // Compare instruction, we should allow the if conversion to convert this
+ // into a Conditional Compare regardless of the branch probability.
+ if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
+ MBB.succ_empty() && Probability < BranchProbability(1, 8))
+ return false;
// For now only convert single instructions.
return NumCycles == 1;
}
@@ -530,27 +582,82 @@ isProfitableToIfCvt(MachineBasicBlock &TMBB,
}
bool SystemZInstrInfo::
-PredicateInstruction(MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
+isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ BranchProbability Probability) const {
+ // For now only duplicate single instructions.
+ return NumCycles == 1;
+}
+
+bool SystemZInstrInfo::PredicateInstruction(
+ MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
assert(Pred.size() == 2 && "Invalid condition");
unsigned CCValid = Pred[0].getImm();
unsigned CCMask = Pred[1].getImm();
assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
- unsigned Opcode = MI->getOpcode();
+ unsigned Opcode = MI.getOpcode();
if (STI.hasLoadStoreOnCond()) {
if (unsigned CondOpcode = getConditionalMove(Opcode)) {
- MI->setDesc(get(CondOpcode));
- MachineInstrBuilder(*MI->getParent()->getParent(), MI)
- .addImm(CCValid).addImm(CCMask)
- .addReg(SystemZ::CC, RegState::Implicit);
+ MI.setDesc(get(CondOpcode));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(CCValid)
+ .addImm(CCMask)
+ .addReg(SystemZ::CC, RegState::Implicit);
return true;
}
}
+ if (STI.hasLoadStoreOnCond2()) {
+ if (unsigned CondOpcode = getConditionalLoadImmediate(Opcode)) {
+ MI.setDesc(get(CondOpcode));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(CCValid)
+ .addImm(CCMask)
+ .addReg(SystemZ::CC, RegState::Implicit);
+ return true;
+ }
+ }
+ if (Opcode == SystemZ::Trap) {
+ MI.setDesc(get(SystemZ::CondTrap));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(CCValid).addImm(CCMask)
+ .addReg(SystemZ::CC, RegState::Implicit);
+ return true;
+ }
+ if (Opcode == SystemZ::Return) {
+ MI.setDesc(get(SystemZ::CondReturn));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(CCValid).addImm(CCMask)
+ .addReg(SystemZ::CC, RegState::Implicit);
+ return true;
+ }
+ if (Opcode == SystemZ::CallJG) {
+ MachineOperand FirstOp = MI.getOperand(0);
+ const uint32_t *RegMask = MI.getOperand(1).getRegMask();
+ MI.RemoveOperand(1);
+ MI.RemoveOperand(0);
+ MI.setDesc(get(SystemZ::CallBRCL));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(CCValid).addImm(CCMask)
+ .addOperand(FirstOp)
+ .addRegMask(RegMask)
+ .addReg(SystemZ::CC, RegState::Implicit);
+ return true;
+ }
+ if (Opcode == SystemZ::CallBR) {
+ const uint32_t *RegMask = MI.getOperand(0).getRegMask();
+ MI.RemoveOperand(0);
+ MI.setDesc(get(SystemZ::CallBCR));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addImm(CCValid).addImm(CCMask)
+ .addRegMask(RegMask)
+ .addReg(SystemZ::CC, RegState::Implicit);
+ return true;
+ }
return false;
}
void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned DestReg,
+ const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
// Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too.
if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
@@ -571,7 +678,8 @@ void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::LGR;
else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
- Opcode = SystemZ::LER;
+ // For z13 we prefer LDR over LER to avoid partial register dependencies.
+ Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
Opcode = SystemZ::LDR;
else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
@@ -654,6 +762,14 @@ static LogicOp interpretAndImmediate(unsigned Opcode) {
}
}
+static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
+ if (OldMI->registerDefIsDead(SystemZ::CC)) {
+ MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC);
+ if (CCDef != nullptr)
+ CCDef->setIsDead(true);
+ }
+}
+
// Used to return from convertToThreeAddress after replacing two-address
// instruction OldMI with three-address instruction NewMI.
static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
@@ -664,31 +780,29 @@ static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
for (unsigned I = 1; I < NumOps; ++I) {
MachineOperand &Op = OldMI->getOperand(I);
if (Op.isReg() && Op.isKill())
- LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI);
+ LV->replaceKillInstruction(Op.getReg(), *OldMI, *NewMI);
}
}
+ transferDeadCC(OldMI, NewMI);
return NewMI;
}
-MachineInstr *
-SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
- MachineInstr *MI = MBBI;
- MachineBasicBlock *MBB = MI->getParent();
+MachineInstr *SystemZInstrInfo::convertToThreeAddress(
+ MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
+ MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
- unsigned Opcode = MI->getOpcode();
- unsigned NumOps = MI->getNumOperands();
+ unsigned Opcode = MI.getOpcode();
+ unsigned NumOps = MI.getNumOperands();
// Try to convert something like SLL into SLLK, if supported.
// We prefer to keep the two-operand form where possible both
// because it tends to be shorter and because some instructions
// have memory forms that can be used during spilling.
if (STI.hasDistinctOps()) {
- MachineOperand &Dest = MI->getOperand(0);
- MachineOperand &Src = MI->getOperand(1);
+ MachineOperand &Dest = MI.getOperand(0);
+ MachineOperand &Src = MI.getOperand(1);
unsigned DestReg = Dest.getReg();
unsigned SrcReg = Src.getReg();
// AHIMux is only really a three-operand instruction when both operands
@@ -707,23 +821,23 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// Create three address instruction without adding the implicit
// operands. Those will instead be copied over from the original
// instruction by the loop below.
- MachineInstrBuilder MIB(*MF,
- MF->CreateMachineInstr(get(ThreeOperandOpcode),
- MI->getDebugLoc(), /*NoImplicit=*/true));
+ MachineInstrBuilder MIB(
+ *MF, MF->CreateMachineInstr(get(ThreeOperandOpcode), MI.getDebugLoc(),
+ /*NoImplicit=*/true));
MIB.addOperand(Dest);
// Keep the kill state, but drop the tied flag.
MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
// Keep the remaining operands as-is.
for (unsigned I = 2; I < NumOps; ++I)
- MIB.addOperand(MI->getOperand(I));
+ MIB.addOperand(MI.getOperand(I));
MBB->insert(MI, MIB);
- return finishConvertToThreeAddress(MI, MIB, LV);
+ return finishConvertToThreeAddress(&MI, MIB, LV);
}
}
// Try to convert an AND into an RISBG-type instruction.
if (LogicOp And = interpretAndImmediate(Opcode)) {
- uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB;
+ uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
// AND IMMEDIATE leaves the other bits of the register unchanged.
Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
unsigned Start, End;
@@ -739,36 +853,55 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
Start &= 31;
End &= 31;
}
- MachineOperand &Dest = MI->getOperand(0);
- MachineOperand &Src = MI->getOperand(1);
+ MachineOperand &Dest = MI.getOperand(0);
+ MachineOperand &Src = MI.getOperand(1);
MachineInstrBuilder MIB =
- BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode))
- .addOperand(Dest).addReg(0)
- .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg())
- .addImm(Start).addImm(End + 128).addImm(0);
- return finishConvertToThreeAddress(MI, MIB, LV);
+ BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
+ .addOperand(Dest)
+ .addReg(0)
+ .addReg(Src.getReg(), getKillRegState(Src.isKill()),
+ Src.getSubReg())
+ .addImm(Start)
+ .addImm(End + 128)
+ .addImm(0);
+ return finishConvertToThreeAddress(&MI, MIB, LV);
}
}
return nullptr;
}
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex,
+ LiveIntervals *LIS) const {
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Size = MFI->getObjectSize(FrameIndex);
- unsigned Opcode = MI->getOpcode();
+ unsigned Opcode = MI.getOpcode();
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
- if ((Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
- isInt<8>(MI->getOperand(2).getImm()) &&
- !MI->getOperand(3).getReg()) {
- // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
- return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
- get(SystemZ::AGSI))
- .addFrameIndex(FrameIndex)
- .addImm(0)
- .addImm(MI->getOperand(2).getImm());
+ if (LIS != nullptr && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
+ isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
+
+ // Check CC liveness, since new instruction introduces a dead
+ // def of CC.
+ MCRegUnitIterator CCUnit(SystemZ::CC, TRI);
+ LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit);
+ ++CCUnit;
+ assert (!CCUnit.isValid() && "CC only has one reg unit.");
+ SlotIndex MISlot =
+ LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
+ if (!CCLiveRange.liveAt(MISlot)) {
+ // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
+ MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
+ MI.getDebugLoc(), get(SystemZ::AGSI))
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addImm(MI.getOperand(2).getImm());
+ BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true);
+ CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator());
+ return BuiltMI;
+ }
}
return nullptr;
}
@@ -778,20 +911,23 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
return nullptr;
unsigned OpNum = Ops[0];
- assert(Size == MF.getRegInfo()
- .getRegClass(MI->getOperand(OpNum).getReg())->getSize() &&
+ assert(Size ==
+ MF.getRegInfo()
+ .getRegClass(MI.getOperand(OpNum).getReg())
+ ->getSize() &&
"Invalid size combination");
- if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) &&
- OpNum == 0 &&
- isInt<8>(MI->getOperand(2).getImm())) {
+ if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
+ isInt<8>(MI.getOperand(2).getImm())) {
// A(G)HI %reg, CONST -> A(G)SI %mem, CONST
Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
- return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
- get(Opcode))
- .addFrameIndex(FrameIndex)
- .addImm(0)
- .addImm(MI->getOperand(2).getImm());
+ MachineInstr *BuiltMI =
+ BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addImm(MI.getOperand(2).getImm());
+ transferDeadCC(&MI, BuiltMI);
+ return BuiltMI;
}
if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
@@ -801,9 +937,9 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// source register instead.
if (OpNum == 0) {
unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
- return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(StoreOpcode))
- .addOperand(MI->getOperand(1))
+ .addOperand(MI.getOperand(1))
.addFrameIndex(FrameIndex)
.addImm(0)
.addReg(0);
@@ -812,8 +948,8 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// destination register instead.
if (OpNum == 1) {
unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
- unsigned Dest = MI->getOperand(0).getReg();
- return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ unsigned Dest = MI.getOperand(0).getReg();
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(LoadOpcode), Dest)
.addFrameIndex(FrameIndex)
.addImm(0)
@@ -834,26 +970,26 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// might be equal. We don't worry about that case here, because spill slot
// coloring happens later, and because we have special code to remove
// MVCs that turn out to be redundant.
- if (OpNum == 0 && MI->hasOneMemOperand()) {
- MachineMemOperand *MMO = *MI->memoperands_begin();
+ if (OpNum == 0 && MI.hasOneMemOperand()) {
+ MachineMemOperand *MMO = *MI.memoperands_begin();
if (MMO->getSize() == Size && !MMO->isVolatile()) {
// Handle conversion of loads.
- if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
- return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) {
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(SystemZ::MVC))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(Size)
- .addOperand(MI->getOperand(1))
- .addImm(MI->getOperand(2).getImm())
+ .addOperand(MI.getOperand(1))
+ .addImm(MI.getOperand(2).getImm())
.addMemOperand(MMO);
}
// Handle conversion of stores.
- if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
- return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) {
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
get(SystemZ::MVC))
- .addOperand(MI->getOperand(1))
- .addImm(MI->getOperand(2).getImm())
+ .addOperand(MI.getOperand(1))
+ .addImm(MI.getOperand(2).getImm())
.addImm(Size)
.addFrameIndex(FrameIndex)
.addImm(0)
@@ -866,7 +1002,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// into <INSN>.
int MemOpcode = SystemZ::getMemOpcode(Opcode);
if (MemOpcode >= 0) {
- unsigned NumOps = MI->getNumExplicitOperands();
+ unsigned NumOps = MI.getNumExplicitOperands();
if (OpNum == NumOps - 1) {
const MCInstrDesc &MemDesc = get(MemOpcode);
uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
@@ -874,12 +1010,13 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
assert(AccessBytes <= Size && "Access outside the frame index");
uint64_t Offset = Size - AccessBytes;
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
- MI->getDebugLoc(), get(MemOpcode));
+ MI.getDebugLoc(), get(MemOpcode));
for (unsigned I = 0; I < OpNum; ++I)
- MIB.addOperand(MI->getOperand(I));
+ MIB.addOperand(MI.getOperand(I));
MIB.addFrameIndex(FrameIndex).addImm(Offset);
if (MemDesc.TSFlags & SystemZII::HasIndex)
MIB.addReg(0);
+ transferDeadCC(&MI, MIB);
return MIB;
}
}
@@ -888,14 +1025,14 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
}
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS) const {
return nullptr;
}
-bool
-SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
- switch (MI->getOpcode()) {
+bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
case SystemZ::L128:
splitMove(MI, SystemZ::LG);
return true;
@@ -1033,13 +1170,13 @@ SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return true;
case SystemZ::RISBMux: {
- bool DestIsHigh = isHighReg(MI->getOperand(0).getReg());
- bool SrcIsHigh = isHighReg(MI->getOperand(2).getReg());
+ bool DestIsHigh = isHighReg(MI.getOperand(0).getReg());
+ bool SrcIsHigh = isHighReg(MI.getOperand(2).getReg());
if (SrcIsHigh == DestIsHigh)
- MI->setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
+ MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
else {
- MI->setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
- MI->getOperand(5).setImm(MI->getOperand(5).getImm() ^ 32);
+ MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
+ MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
}
return true;
}
@@ -1048,62 +1185,65 @@ SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
splitAdjDynAlloc(MI);
return true;
+ case TargetOpcode::LOAD_STACK_GUARD:
+ expandLoadStackGuard(&MI);
+ return true;
+
default:
return false;
}
}
-uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const {
- if (MI->getOpcode() == TargetOpcode::INLINEASM) {
- const MachineFunction *MF = MI->getParent()->getParent();
- const char *AsmStr = MI->getOperand(0).getSymbolName();
+uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
+ if (MI.getOpcode() == TargetOpcode::INLINEASM) {
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const char *AsmStr = MI.getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
}
- return MI->getDesc().getSize();
+ return MI.getDesc().getSize();
}
SystemZII::Branch
-SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
+SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
case SystemZ::BR:
case SystemZ::J:
case SystemZ::JG:
return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
- SystemZ::CCMASK_ANY, &MI->getOperand(0));
+ SystemZ::CCMASK_ANY, &MI.getOperand(0));
case SystemZ::BRC:
case SystemZ::BRCL:
- return SystemZII::Branch(SystemZII::BranchNormal,
- MI->getOperand(0).getImm(),
- MI->getOperand(1).getImm(), &MI->getOperand(2));
+ return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
+ MI.getOperand(1).getImm(), &MI.getOperand(2));
case SystemZ::BRCT:
return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
- SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
+ SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
case SystemZ::BRCTG:
return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
- SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
+ SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
case SystemZ::CIJ:
case SystemZ::CRJ:
return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
- MI->getOperand(2).getImm(), &MI->getOperand(3));
+ MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CLIJ:
case SystemZ::CLRJ:
return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
- MI->getOperand(2).getImm(), &MI->getOperand(3));
+ MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CGIJ:
case SystemZ::CGRJ:
return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
- MI->getOperand(2).getImm(), &MI->getOperand(3));
+ MI.getOperand(2).getImm(), &MI.getOperand(3));
case SystemZ::CLGIJ:
case SystemZ::CLGRJ:
return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
- MI->getOperand(2).getImm(), &MI->getOperand(3));
+ MI.getOperand(2).getImm(), &MI.getOperand(3));
default:
llvm_unreachable("Unrecognized branch opcode");
@@ -1250,28 +1390,107 @@ bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
return false;
}
-unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode,
- const MachineInstr *MI) const {
+unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
+ SystemZII::FusedCompareType Type,
+ const MachineInstr *MI) const {
switch (Opcode) {
- case SystemZ::CR:
- return SystemZ::CRJ;
- case SystemZ::CGR:
- return SystemZ::CGRJ;
case SystemZ::CHI:
- return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0;
case SystemZ::CGHI:
- return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0;
- case SystemZ::CLR:
- return SystemZ::CLRJ;
- case SystemZ::CLGR:
- return SystemZ::CLGRJ;
+ if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
+ return 0;
+ break;
case SystemZ::CLFI:
- return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLIJ : 0;
case SystemZ::CLGFI:
- return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLGIJ : 0;
- default:
- return 0;
+ if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
+ return 0;
}
+ switch (Type) {
+ case SystemZII::CompareAndBranch:
+ switch (Opcode) {
+ case SystemZ::CR:
+ return SystemZ::CRJ;
+ case SystemZ::CGR:
+ return SystemZ::CGRJ;
+ case SystemZ::CHI:
+ return SystemZ::CIJ;
+ case SystemZ::CGHI:
+ return SystemZ::CGIJ;
+ case SystemZ::CLR:
+ return SystemZ::CLRJ;
+ case SystemZ::CLGR:
+ return SystemZ::CLGRJ;
+ case SystemZ::CLFI:
+ return SystemZ::CLIJ;
+ case SystemZ::CLGFI:
+ return SystemZ::CLGIJ;
+ default:
+ return 0;
+ }
+ case SystemZII::CompareAndReturn:
+ switch (Opcode) {
+ case SystemZ::CR:
+ return SystemZ::CRBReturn;
+ case SystemZ::CGR:
+ return SystemZ::CGRBReturn;
+ case SystemZ::CHI:
+ return SystemZ::CIBReturn;
+ case SystemZ::CGHI:
+ return SystemZ::CGIBReturn;
+ case SystemZ::CLR:
+ return SystemZ::CLRBReturn;
+ case SystemZ::CLGR:
+ return SystemZ::CLGRBReturn;
+ case SystemZ::CLFI:
+ return SystemZ::CLIBReturn;
+ case SystemZ::CLGFI:
+ return SystemZ::CLGIBReturn;
+ default:
+ return 0;
+ }
+ case SystemZII::CompareAndSibcall:
+ switch (Opcode) {
+ case SystemZ::CR:
+ return SystemZ::CRBCall;
+ case SystemZ::CGR:
+ return SystemZ::CGRBCall;
+ case SystemZ::CHI:
+ return SystemZ::CIBCall;
+ case SystemZ::CGHI:
+ return SystemZ::CGIBCall;
+ case SystemZ::CLR:
+ return SystemZ::CLRBCall;
+ case SystemZ::CLGR:
+ return SystemZ::CLGRBCall;
+ case SystemZ::CLFI:
+ return SystemZ::CLIBCall;
+ case SystemZ::CLGFI:
+ return SystemZ::CLGIBCall;
+ default:
+ return 0;
+ }
+ case SystemZII::CompareAndTrap:
+ switch (Opcode) {
+ case SystemZ::CR:
+ return SystemZ::CRT;
+ case SystemZ::CGR:
+ return SystemZ::CGRT;
+ case SystemZ::CHI:
+ return SystemZ::CIT;
+ case SystemZ::CGHI:
+ return SystemZ::CGIT;
+ case SystemZ::CLR:
+ return SystemZ::CLRT;
+ case SystemZ::CLGR:
+ return SystemZ::CLGRT;
+ case SystemZ::CLFI:
+ return SystemZ::CLFIT;
+ case SystemZ::CLGFI:
+ return SystemZ::CLGIT;
+ default:
+ return 0;
+ }
+ }
+ return 0;
}
void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.h b/lib/Target/SystemZ/SystemZInstrInfo.h
index d9094ba93658..010010b89dc8 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -111,6 +111,22 @@ struct Branch {
const MachineOperand *target)
: Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {}
};
+// Kinds of fused compares in compare-and-* instructions. Together with type
+// of the converted compare, this identifies the compare-and-*
+// instruction.
+enum FusedCompareType {
+ // Relative branch - CRJ etc.
+ CompareAndBranch,
+
+ // Indirect branch, used for return - CRBReturn etc.
+ CompareAndReturn,
+
+ // Indirect branch, used for sibcall - CRBCall etc.
+ CompareAndSibcall,
+
+ // Trap
+ CompareAndTrap
+};
} // end namespace SystemZII
class SystemZSubtarget;
@@ -120,16 +136,17 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
void splitMove(MachineBasicBlock::iterator MI, unsigned NewOpcode) const;
void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const;
- void expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
- unsigned HighOpcode, bool ConvertHigh) const;
- void expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
+ void expandRIPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned HighOpcode,
+ bool ConvertHigh) const;
+ void expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned LowOpcodeK, unsigned HighOpcode) const;
- void expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
+ void expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned HighOpcode) const;
- void expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
+ void expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
unsigned Size) const;
+ void expandLoadStackGuard(MachineInstr *MI) const;
void emitGRX32Move(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
unsigned LowLowOpcode, unsigned Size, bool KillSrc) const;
virtual void anchor();
@@ -137,26 +154,26 @@ public:
explicit SystemZInstrInfo(SystemZSubtarget &STI);
// Override TargetInstrInfo.
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
+ bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
int &SrcFrameIndex) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
- bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ const DebugLoc &DL) const override;
+ bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask, int &Value) const override;
- bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int Mask, int Value,
const MachineRegisterInfo *MRI) const override;
- bool isPredicable(MachineInstr *MI) const override;
+ bool isPredicable(MachineInstr &MI) const override;
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
unsigned ExtraPredCycles,
BranchProbability Probability) const override;
@@ -165,10 +182,12 @@ public:
MachineBasicBlock &FMBB,
unsigned NumCyclesF, unsigned ExtraPredCyclesF,
BranchProbability Probability) const override;
- bool PredicateInstruction(MachineInstr *MI,
+ bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ BranchProbability Probability) const override;
+ bool PredicateInstruction(MachineInstr &MI,
ArrayRef<MachineOperand> Pred) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -181,17 +200,18 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
+ MachineInstr &MI,
LiveVariables *LV) const override;
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- int FrameIndex) const override;
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- MachineInstr *LoadMI) const override;
- bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override;
+ MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex,
+ LiveIntervals *LIS = nullptr) const override;
+ MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS = nullptr) const override;
+ bool expandPostRAPseudo(MachineInstr &MBBI) const override;
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
override;
@@ -199,14 +219,14 @@ public:
const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
// Return the size in bytes of MI.
- uint64_t getInstSizeInBytes(const MachineInstr *MI) const;
+ uint64_t getInstSizeInBytes(const MachineInstr &MI) const;
// Return true if MI is a conditional or unconditional branch.
// When returning true, set Cond to the mask of condition-code
// values on which the instruction will branch, and set Target
// to the operand that contains the branch target. This target
// can be a register or a basic block.
- SystemZII::Branch getBranchInfo(const MachineInstr *MI) const;
+ SystemZII::Branch getBranchInfo(const MachineInstr &MI) const;
// Get the load and store opcodes for a given register class.
void getLoadStoreOpcodes(const TargetRegisterClass *RC,
@@ -229,11 +249,12 @@ public:
bool isRxSBGMask(uint64_t Mask, unsigned BitSize,
unsigned &Start, unsigned &End) const;
- // If Opcode is a COMPARE opcode for which an associated COMPARE AND
- // BRANCH exists, return the opcode for the latter, otherwise return 0.
+ // If Opcode is a COMPARE opcode for which an associated fused COMPARE AND *
+ // operation exists, return the opcode for the latter, otherwise return 0.
// MI, if nonnull, is the compare instruction.
- unsigned getCompareAndBranch(unsigned Opcode,
- const MachineInstr *MI = nullptr) const;
+ unsigned getFusedCompare(unsigned Opcode,
+ SystemZII::FusedCompareType Type,
+ const MachineInstr *MI = nullptr) const;
// Emit code before MBBI in MI to move immediate value Value into
// physical register Reg.
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td
index d5dabc2cd6ab..c510ca774be3 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -36,6 +36,22 @@ let hasSideEffects = 0 in {
let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
def Return : Alias<2, (outs), (ins), [(z_retflag)]>;
+// A conditional return instruction (bcr <cond>, %r14).
+let isReturn = 1, isTerminator = 1, hasCtrlDep = 1, CCMaskFirst = 1, Uses = [CC] in
+ def CondReturn : Alias<2, (outs), (ins cond4:$valid, cond4:$R1), []>;
+
+// Fused compare and conditional returns.
+let isReturn = 1, isTerminator = 1, hasCtrlDep = 1 in {
+ def CRBReturn : Alias<6, (outs), (ins GR32:$R1, GR32:$R2, cond4:$M3), []>;
+ def CGRBReturn : Alias<6, (outs), (ins GR64:$R1, GR64:$R2, cond4:$M3), []>;
+ def CIBReturn : Alias<6, (outs), (ins GR32:$R1, imm32sx8:$I2, cond4:$M3), []>;
+ def CGIBReturn : Alias<6, (outs), (ins GR64:$R1, imm64sx8:$I2, cond4:$M3), []>;
+ def CLRBReturn : Alias<6, (outs), (ins GR32:$R1, GR32:$R2, cond4:$M3), []>;
+ def CLGRBReturn : Alias<6, (outs), (ins GR64:$R1, GR64:$R2, cond4:$M3), []>;
+ def CLIBReturn : Alias<6, (outs), (ins GR32:$R1, imm32zx8:$I2, cond4:$M3), []>;
+ def CLGIBReturn : Alias<6, (outs), (ins GR64:$R1, imm64zx8:$I2, cond4:$M3), []>;
+}
+
// Unconditional branches. R1 is the condition-code mask (all 1s).
let isBranch = 1, isTerminator = 1, isBarrier = 1, R1 = 15 in {
let isIndirectBranch = 1 in
@@ -51,6 +67,17 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, R1 = 15 in {
def JG : InstRIL<0xC04, (outs), (ins brtarget32:$I2), "jg\t$I2", []>;
}
+// FIXME: This trap instruction should be marked as isTerminator, but there is
+// currently a general bug that allows non-terminators to be placed between
+// terminators. Temporarily leave this unmarked until the bug is fixed.
+let isBarrier = 1, hasCtrlDep = 1 in {
+ def Trap : Alias<4, (outs), (ins), [(trap)]>;
+}
+
+let isTerminator = 1, hasCtrlDep = 1, Uses = [CC] in {
+ def CondTrap : Alias<4, (outs), (ins cond4:$valid, cond4:$R1), []>;
+}
+
// Conditional branches. It's easier for LLVM to handle these branches
// in their raw BRC/BRCL form, with the 4-bit condition-code mask being
// the first operand. It seems friendlier to use mnemonic forms like
@@ -62,15 +89,25 @@ let isBranch = 1, isTerminator = 1, Uses = [CC] in {
[(z_br_ccmask cond4:$valid, cond4:$R1, bb:$I2)]>;
def BRCL : InstRIL<0xC04, (outs), (ins cond4:$valid, cond4:$R1,
brtarget32:$I2), "jg$R1\t$I2", []>;
+ let isIndirectBranch = 1 in
+ def BCR : InstRR<0x07, (outs), (ins cond4:$valid, cond4:$R1, GR64:$R2),
+ "b${R1}r\t$R2", []>;
}
def AsmBRC : InstRI<0xA74, (outs), (ins imm32zx4:$R1, brtarget16:$I2),
"brc\t$R1, $I2", []>;
def AsmBRCL : InstRIL<0xC04, (outs), (ins imm32zx4:$R1, brtarget32:$I2),
"brcl\t$R1, $I2", []>;
- def AsmBCR : InstRR<0x07, (outs), (ins imm32zx4:$R1, GR64:$R2),
- "bcr\t$R1, $R2", []>;
+ let isIndirectBranch = 1 in {
+ def AsmBC : InstRX<0x47, (outs), (ins imm32zx4:$R1, bdxaddr12only:$XBD2),
+ "bc\t$R1, $XBD2", []>;
+ def AsmBCR : InstRR<0x07, (outs), (ins imm32zx4:$R1, GR64:$R2),
+ "bcr\t$R1, $R2", []>;
+ }
}
+def AsmNop : InstAlias<"nop\t$XBD", (AsmBC 0, bdxaddr12only:$XBD), 0>;
+def AsmNopR : InstAlias<"nopr\t$R", (AsmBCR 0, GR64:$R), 0>;
+
// Fused compare-and-branch instructions. As for normal branches,
// we handle these instructions internally in their raw CRJ-like form,
// but use assembly macros like CRJE when writing them out.
@@ -83,38 +120,83 @@ multiclass CompareBranches<Operand ccmask, string pos1, string pos2> {
let isBranch = 1, isTerminator = 1, Defs = [CC] in {
def RJ : InstRIEb<0xEC76, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3,
brtarget16:$RI4),
- "crj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ "crj"##pos1##"\t$R1, $R2"##pos2##", $RI4", []>;
def GRJ : InstRIEb<0xEC64, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3,
brtarget16:$RI4),
- "cgrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ "cgrj"##pos1##"\t$R1, $R2"##pos2##", $RI4", []>;
def IJ : InstRIEc<0xEC7E, (outs), (ins GR32:$R1, imm32sx8:$I2, ccmask:$M3,
brtarget16:$RI4),
- "cij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ "cij"##pos1##"\t$R1, $I2"##pos2##", $RI4", []>;
def GIJ : InstRIEc<0xEC7C, (outs), (ins GR64:$R1, imm64sx8:$I2, ccmask:$M3,
brtarget16:$RI4),
- "cgij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ "cgij"##pos1##"\t$R1, $I2"##pos2##", $RI4", []>;
def LRJ : InstRIEb<0xEC77, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3,
brtarget16:$RI4),
- "clrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ "clrj"##pos1##"\t$R1, $R2"##pos2##", $RI4", []>;
def LGRJ : InstRIEb<0xEC65, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3,
brtarget16:$RI4),
- "clgrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ "clgrj"##pos1##"\t$R1, $R2"##pos2##", $RI4", []>;
def LIJ : InstRIEc<0xEC7F, (outs), (ins GR32:$R1, imm32zx8:$I2, ccmask:$M3,
brtarget16:$RI4),
- "clij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ "clij"##pos1##"\t$R1, $I2"##pos2##", $RI4", []>;
def LGIJ : InstRIEc<0xEC7D, (outs), (ins GR64:$R1, imm64zx8:$I2, ccmask:$M3,
brtarget16:$RI4),
- "clgij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ "clgij"##pos1##"\t$R1, $I2"##pos2##", $RI4", []>;
+ let isIndirectBranch = 1 in {
+ def RB : InstRRS<0xECF6, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3,
+ bdaddr12only:$BD4),
+ "crb"##pos1##"\t$R1, $R2"##pos2##", $BD4", []>;
+ def GRB : InstRRS<0xECE4, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3,
+ bdaddr12only:$BD4),
+ "cgrb"##pos1##"\t$R1, $R2"##pos2##", $BD4", []>;
+ def IB : InstRIS<0xECFE, (outs), (ins GR32:$R1, imm32sx8:$I2, ccmask:$M3,
+ bdaddr12only:$BD4),
+ "cib"##pos1##"\t$R1, $I2"##pos2##", $BD4", []>;
+ def GIB : InstRIS<0xECFC, (outs), (ins GR64:$R1, imm64sx8:$I2, ccmask:$M3,
+ bdaddr12only:$BD4),
+ "cgib"##pos1##"\t$R1, $I2"##pos2##", $BD4", []>;
+ def LRB : InstRRS<0xECF7, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3,
+ bdaddr12only:$BD4),
+ "clrb"##pos1##"\t$R1, $R2"##pos2##", $BD4", []>;
+ def LGRB : InstRRS<0xECE5, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3,
+ bdaddr12only:$BD4),
+ "clgrb"##pos1##"\t$R1, $R2"##pos2##", $BD4", []>;
+ def LIB : InstRIS<0xECFF, (outs), (ins GR32:$R1, imm32zx8:$I2, ccmask:$M3,
+ bdaddr12only:$BD4),
+ "clib"##pos1##"\t$R1, $I2"##pos2##", $BD4", []>;
+ def LGIB : InstRIS<0xECFD, (outs), (ins GR64:$R1, imm64zx8:$I2, ccmask:$M3,
+ bdaddr12only:$BD4),
+ "clgib"##pos1##"\t$R1, $I2"##pos2##", $BD4", []>;
+ }
+ }
+
+ let isTerminator = 1, hasCtrlDep = 1 in {
+ def RT : InstRRFc<0xB972, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3),
+ "crt"##pos1##"\t$R1, $R2"##pos2, []>;
+ def GRT : InstRRFc<0xB960, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3),
+ "cgrt"##pos1##"\t$R1, $R2"##pos2, []>;
+ def LRT : InstRRFc<0xB973, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3),
+ "clrt"##pos1##"\t$R1, $R2"##pos2, []>;
+ def LGRT : InstRRFc<0xB961, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3),
+ "clgrt"##pos1##"\t$R1, $R2"##pos2, []>;
+ def IT : InstRIEa<0xEC72, (outs), (ins GR32:$R1, imm32sx16:$I2, ccmask:$M3),
+ "cit"##pos1##"\t$R1, $I2"##pos2, []>;
+ def GIT : InstRIEa<0xEC70, (outs), (ins GR64:$R1, imm32sx16:$I2, ccmask:$M3),
+ "cgit"##pos1##"\t$R1, $I2"##pos2, []>;
+ def LFIT : InstRIEa<0xEC73, (outs), (ins GR32:$R1, imm32zx16:$I2, ccmask:$M3),
+ "clfit"##pos1##"\t$R1, $I2"##pos2, []>;
+ def LGIT : InstRIEa<0xEC71, (outs), (ins GR64:$R1, imm32zx16:$I2, ccmask:$M3),
+ "clgit"##pos1##"\t$R1, $I2"##pos2, []>;
}
}
let isCodeGenOnly = 1 in
defm C : CompareBranches<cond4, "$M3", "">;
-defm AsmC : CompareBranches<imm32zx4, "", "$M3, ">;
+defm AsmC : CompareBranches<imm32zx4, "", ", $M3">;
// Define AsmParser mnemonics for each general condition-code mask
// (integer or floating-point)
-multiclass CondExtendedMnemonic<bits<4> ccmask, string name> {
- let R1 = ccmask in {
+multiclass CondExtendedMnemonicA<bits<4> ccmask, string name> {
+ let isBranch = 1, isTerminator = 1, R1 = ccmask in {
def J : InstRI<0xA74, (outs), (ins brtarget16:$I2),
"j"##name##"\t$I2", []>;
def JG : InstRIL<0xC04, (outs), (ins brtarget32:$I2),
@@ -123,25 +205,36 @@ multiclass CondExtendedMnemonic<bits<4> ccmask, string name> {
}
def LOCR : FixedCondUnaryRRF<"locr"##name, 0xB9F2, GR32, GR32, ccmask>;
def LOCGR : FixedCondUnaryRRF<"locgr"##name, 0xB9E2, GR64, GR64, ccmask>;
+ def LOCHI : FixedCondUnaryRIE<"lochi"##name, 0xEC42, GR64, imm32sx16,
+ ccmask>;
+ def LOCGHI: FixedCondUnaryRIE<"locghi"##name, 0xEC46, GR64, imm64sx16,
+ ccmask>;
def LOC : FixedCondUnaryRSY<"loc"##name, 0xEBF2, GR32, ccmask, 4>;
def LOCG : FixedCondUnaryRSY<"locg"##name, 0xEBE2, GR64, ccmask, 8>;
def STOC : FixedCondStoreRSY<"stoc"##name, 0xEBF3, GR32, ccmask, 4>;
def STOCG : FixedCondStoreRSY<"stocg"##name, 0xEBE3, GR64, ccmask, 8>;
}
-defm AsmO : CondExtendedMnemonic<1, "o">;
-defm AsmH : CondExtendedMnemonic<2, "h">;
-defm AsmNLE : CondExtendedMnemonic<3, "nle">;
-defm AsmL : CondExtendedMnemonic<4, "l">;
-defm AsmNHE : CondExtendedMnemonic<5, "nhe">;
-defm AsmLH : CondExtendedMnemonic<6, "lh">;
-defm AsmNE : CondExtendedMnemonic<7, "ne">;
-defm AsmE : CondExtendedMnemonic<8, "e">;
-defm AsmNLH : CondExtendedMnemonic<9, "nlh">;
-defm AsmHE : CondExtendedMnemonic<10, "he">;
-defm AsmNL : CondExtendedMnemonic<11, "nl">;
-defm AsmLE : CondExtendedMnemonic<12, "le">;
-defm AsmNH : CondExtendedMnemonic<13, "nh">;
-defm AsmNO : CondExtendedMnemonic<14, "no">;
+
+multiclass CondExtendedMnemonic<bits<4> ccmask, string name1, string name2>
+ : CondExtendedMnemonicA<ccmask, name1> {
+ let isAsmParserOnly = 1 in
+ defm Alt : CondExtendedMnemonicA<ccmask, name2>;
+}
+
+defm AsmO : CondExtendedMnemonicA<1, "o">;
+defm AsmH : CondExtendedMnemonic<2, "h", "p">;
+defm AsmNLE : CondExtendedMnemonicA<3, "nle">;
+defm AsmL : CondExtendedMnemonic<4, "l", "m">;
+defm AsmNHE : CondExtendedMnemonicA<5, "nhe">;
+defm AsmLH : CondExtendedMnemonicA<6, "lh">;
+defm AsmNE : CondExtendedMnemonic<7, "ne", "nz">;
+defm AsmE : CondExtendedMnemonic<8, "e", "z">;
+defm AsmNLH : CondExtendedMnemonicA<9, "nlh">;
+defm AsmHE : CondExtendedMnemonicA<10, "he">;
+defm AsmNL : CondExtendedMnemonic<11, "nl", "nm">;
+defm AsmLE : CondExtendedMnemonicA<12, "le">;
+defm AsmNH : CondExtendedMnemonic<13, "nh", "np">;
+defm AsmNO : CondExtendedMnemonicA<14, "no">;
// Define AsmParser mnemonics for each integer condition-code mask.
// This is like the list above, except that condition 3 is not possible
@@ -151,31 +244,76 @@ defm AsmNO : CondExtendedMnemonic<14, "no">;
// We don't make one of the two names an alias of the other because
// we need the custom parsing routines to select the correct register class.
multiclass IntCondExtendedMnemonicA<bits<4> ccmask, string name> {
- let M3 = ccmask in {
- def CR : InstRIEb<0xEC76, (outs), (ins GR32:$R1, GR32:$R2,
- brtarget16:$RI4),
- "crj"##name##"\t$R1, $R2, $RI4", []>;
- def CGR : InstRIEb<0xEC64, (outs), (ins GR64:$R1, GR64:$R2,
- brtarget16:$RI4),
- "cgrj"##name##"\t$R1, $R2, $RI4", []>;
- def CI : InstRIEc<0xEC7E, (outs), (ins GR32:$R1, imm32sx8:$I2,
- brtarget16:$RI4),
- "cij"##name##"\t$R1, $I2, $RI4", []>;
- def CGI : InstRIEc<0xEC7C, (outs), (ins GR64:$R1, imm64sx8:$I2,
- brtarget16:$RI4),
- "cgij"##name##"\t$R1, $I2, $RI4", []>;
- def CLR : InstRIEb<0xEC77, (outs), (ins GR32:$R1, GR32:$R2,
- brtarget16:$RI4),
- "clrj"##name##"\t$R1, $R2, $RI4", []>;
- def CLGR : InstRIEb<0xEC65, (outs), (ins GR64:$R1, GR64:$R2,
+ let isBranch = 1, isTerminator = 1, M3 = ccmask in {
+ def CRJ : InstRIEb<0xEC76, (outs), (ins GR32:$R1, GR32:$R2,
+ brtarget16:$RI4),
+ "crj"##name##"\t$R1, $R2, $RI4", []>;
+ def CGRJ : InstRIEb<0xEC64, (outs), (ins GR64:$R1, GR64:$R2,
+ brtarget16:$RI4),
+ "cgrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CIJ : InstRIEc<0xEC7E, (outs), (ins GR32:$R1, imm32sx8:$I2,
brtarget16:$RI4),
- "clgrj"##name##"\t$R1, $R2, $RI4", []>;
- def CLI : InstRIEc<0xEC7F, (outs), (ins GR32:$R1, imm32zx8:$I2,
+ "cij"##name##"\t$R1, $I2, $RI4", []>;
+ def CGIJ : InstRIEc<0xEC7C, (outs), (ins GR64:$R1, imm64sx8:$I2,
brtarget16:$RI4),
- "clij"##name##"\t$R1, $I2, $RI4", []>;
- def CLGI : InstRIEc<0xEC7D, (outs), (ins GR64:$R1, imm64zx8:$I2,
+ "cgij"##name##"\t$R1, $I2, $RI4", []>;
+ def CLRJ : InstRIEb<0xEC77, (outs), (ins GR32:$R1, GR32:$R2,
brtarget16:$RI4),
- "clgij"##name##"\t$R1, $I2, $RI4", []>;
+ "clrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CLGRJ : InstRIEb<0xEC65, (outs), (ins GR64:$R1, GR64:$R2,
+ brtarget16:$RI4),
+ "clgrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CLIJ : InstRIEc<0xEC7F, (outs), (ins GR32:$R1, imm32zx8:$I2,
+ brtarget16:$RI4),
+ "clij"##name##"\t$R1, $I2, $RI4", []>;
+ def CLGIJ : InstRIEc<0xEC7D, (outs), (ins GR64:$R1, imm64zx8:$I2,
+ brtarget16:$RI4),
+ "clgij"##name##"\t$R1, $I2, $RI4", []>;
+ let isIndirectBranch = 1 in {
+ def CRB : InstRRS<0xECF6, (outs), (ins GR32:$R1, GR32:$R2,
+ bdaddr12only:$BD4),
+ "crb"##name##"\t$R1, $R2, $BD4", []>;
+ def CGRB : InstRRS<0xECE4, (outs), (ins GR64:$R1, GR64:$R2,
+ bdaddr12only:$BD4),
+ "cgrb"##name##"\t$R1, $R2, $BD4", []>;
+ def CIB : InstRIS<0xECFE, (outs), (ins GR32:$R1, imm32sx8:$I2,
+ bdaddr12only:$BD4),
+ "cib"##name##"\t$R1, $I2, $BD4", []>;
+ def CGIB : InstRIS<0xECFC, (outs), (ins GR64:$R1, imm64sx8:$I2,
+ bdaddr12only:$BD4),
+ "cgib"##name##"\t$R1, $I2, $BD4", []>;
+ def CLRB : InstRRS<0xECF7, (outs), (ins GR32:$R1, GR32:$R2,
+ bdaddr12only:$BD4),
+ "clrb"##name##"\t$R1, $R2, $BD4", []>;
+ def CLGRB : InstRRS<0xECE5, (outs), (ins GR64:$R1, GR64:$R2,
+ bdaddr12only:$BD4),
+ "clgrb"##name##"\t$R1, $R2, $BD4", []>;
+ def CLIB : InstRIS<0xECFF, (outs), (ins GR32:$R1, imm32zx8:$I2,
+ bdaddr12only:$BD4),
+ "clib"##name##"\t$R1, $I2, $BD4", []>;
+ def CLGIB : InstRIS<0xECFD, (outs), (ins GR64:$R1, imm64zx8:$I2,
+ bdaddr12only:$BD4),
+ "clgib"##name##"\t$R1, $I2, $BD4", []>;
+ }
+ }
+
+ let hasCtrlDep = 1, isTerminator = 1, M3 = ccmask in {
+ def CRT : InstRRFc<0xB972, (outs), (ins GR32:$R1, GR32:$R2),
+ "crt"##name##"\t$R1, $R2", []>;
+ def CGRT : InstRRFc<0xB960, (outs), (ins GR64:$R1, GR64:$R2),
+ "cgrt"##name##"\t$R1, $R2", []>;
+ def CLRT : InstRRFc<0xB973, (outs), (ins GR32:$R1, GR32:$R2),
+ "clrt"##name##"\t$R1, $R2", []>;
+ def CLGRT : InstRRFc<0xB961, (outs), (ins GR64:$R1, GR64:$R2),
+ "clgrt"##name##"\t$R1, $R2", []>;
+ def CIT : InstRIEa<0xEC72, (outs), (ins GR32:$R1, imm32sx16:$I2),
+ "cit"##name##"\t$R1, $I2", []>;
+ def CGIT : InstRIEa<0xEC70, (outs), (ins GR64:$R1, imm32sx16:$I2),
+ "cgit"##name##"\t$R1, $I2", []>;
+ def CLFIT : InstRIEa<0xEC73, (outs), (ins GR32:$R1, imm32zx16:$I2),
+ "clfit"##name##"\t$R1, $I2", []>;
+ def CLGIT : InstRIEa<0xEC71, (outs), (ins GR64:$R1, imm32zx16:$I2),
+ "clgit"##name##"\t$R1, $I2", []>;
}
}
multiclass IntCondExtendedMnemonic<bits<4> ccmask, string name1, string name2>
@@ -249,6 +387,26 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
def CallBR : Alias<2, (outs), (ins), [(z_sibcall R1D)]>;
}
+let CCMaskFirst = 1, isCall = 1, isTerminator = 1, isReturn = 1 in {
+ def CallBRCL : Alias<6, (outs), (ins cond4:$valid, cond4:$R1,
+ pcrel32:$I2), []>;
+
+ let Uses = [R1D] in
+ def CallBCR : Alias<2, (outs), (ins cond4:$valid, cond4:$R1), []>;
+}
+
+// Fused compare and conditional sibling calls.
+let isCall = 1, isTerminator = 1, isReturn = 1, Uses = [R1D] in {
+ def CRBCall : Alias<6, (outs), (ins GR32:$R1, GR32:$R2, cond4:$M3), []>;
+ def CGRBCall : Alias<6, (outs), (ins GR64:$R1, GR64:$R2, cond4:$M3), []>;
+ def CIBCall : Alias<6, (outs), (ins GR32:$R1, imm32sx8:$I2, cond4:$M3), []>;
+ def CGIBCall : Alias<6, (outs), (ins GR64:$R1, imm64sx8:$I2, cond4:$M3), []>;
+ def CLRBCall : Alias<6, (outs), (ins GR32:$R1, GR32:$R2, cond4:$M3), []>;
+ def CLGRBCall : Alias<6, (outs), (ins GR64:$R1, GR64:$R2, cond4:$M3), []>;
+ def CLIBCall : Alias<6, (outs), (ins GR32:$R1, imm32zx8:$I2, cond4:$M3), []>;
+ def CLGIBCall : Alias<6, (outs), (ins GR64:$R1, imm64zx8:$I2, cond4:$M3), []>;
+}
+
// TLS calls. These will be lowered into a call to __tls_get_offset,
// with an extra relocation specifying the TLS symbol.
let isCall = 1, Defs = [R14D, CC] in {
@@ -261,12 +419,14 @@ let isCall = 1, Defs = [R14D, CC] in {
// Define the general form of the call instructions for the asm parser.
// These instructions don't hard-code %r14 as the return address register.
// Allow an optional TLS marker symbol to generate TLS call relocations.
-def BRAS : InstRI<0xA75, (outs), (ins GR64:$R1, brtarget16tls:$I2),
- "bras\t$R1, $I2", []>;
-def BRASL : InstRIL<0xC05, (outs), (ins GR64:$R1, brtarget32tls:$I2),
- "brasl\t$R1, $I2", []>;
-def BASR : InstRR<0x0D, (outs), (ins GR64:$R1, ADDR64:$R2),
- "basr\t$R1, $R2", []>;
+let isCall = 1, Defs = [CC] in {
+ def BRAS : InstRI<0xA75, (outs), (ins GR64:$R1, brtarget16tls:$I2),
+ "bras\t$R1, $I2", []>;
+ def BRASL : InstRIL<0xC05, (outs), (ins GR64:$R1, brtarget32tls:$I2),
+ "brasl\t$R1, $I2", []>;
+ def BASR : InstRR<0x0D, (outs), (ins GR64:$R1, ADDR64:$R2),
+ "basr\t$R1, $R2", []>;
+}
//===----------------------------------------------------------------------===//
// Move instructions
@@ -294,6 +454,14 @@ let Uses = [CC] in {
def AsmLOCR : AsmCondUnaryRRF<"loc", 0xB9F2, GR32, GR32>;
def AsmLOCGR : AsmCondUnaryRRF<"locg", 0xB9E2, GR64, GR64>;
}
+let isCodeGenOnly = 1, Uses = [CC] in {
+ def LOCHI : CondUnaryRIE<"lochi", 0xEC42, GR32, imm32sx16>;
+ def LOCGHI : CondUnaryRIE<"locghi", 0xEC46, GR64, imm64sx16>;
+}
+let Uses = [CC] in {
+ def AsmLOCHI : AsmCondUnaryRIE<"lochi", 0xEC42, GR32, imm32sx16>;
+ def AsmLOCGHI : AsmCondUnaryRIE<"locghi", 0xEC46, GR64, imm64sx16>;
+}
// Immediate moves.
let hasSideEffects = 0, isAsCheapAsAMove = 1, isMoveImm = 1,
@@ -546,10 +714,14 @@ def : StoreGR64PC<STRL, aligned_truncstorei32>;
//===----------------------------------------------------------------------===//
// Multi-register loads.
+defm LM : LoadMultipleRSPair<"lm", 0x98, 0xEB98, GR32>;
def LMG : LoadMultipleRSY<"lmg", 0xEB04, GR64>;
+def LMH : LoadMultipleRSY<"lmh", 0xEB96, GRH32>;
// Multi-register stores.
+defm STM : StoreMultipleRSPair<"stm", 0x90, 0xEB90, GR32>;
def STMG : StoreMultipleRSY<"stmg", 0xEB24, GR64>;
+def STMH : StoreMultipleRSY<"stmh", 0xEB26, GRH32>;
//===----------------------------------------------------------------------===//
// Byte swaps
@@ -563,13 +735,14 @@ let hasSideEffects = 0 in {
// Byte-swapping loads. Unlike normal loads, these instructions are
// allowed to access storage more than once.
-def LRV : UnaryRXY<"lrv", 0xE31E, loadu<bswap, nonvolatile_load>, GR32, 4>;
-def LRVG : UnaryRXY<"lrvg", 0xE30F, loadu<bswap, nonvolatile_load>, GR64, 8>;
+def LRVH : UnaryRXY<"lrvh", 0xE31F, z_lrvh, GR32, 2>;
+def LRV : UnaryRXY<"lrv", 0xE31E, z_lrv, GR32, 4>;
+def LRVG : UnaryRXY<"lrvg", 0xE30F, z_lrvg, GR64, 8>;
// Likewise byte-swapping stores.
-def STRV : StoreRXY<"strv", 0xE33E, storeu<bswap, nonvolatile_store>, GR32, 4>;
-def STRVG : StoreRXY<"strvg", 0xE32F, storeu<bswap, nonvolatile_store>,
- GR64, 8>;
+def STRVH : StoreRXY<"strvh", 0xE33F, z_strvh, GR32, 2>;
+def STRV : StoreRXY<"strv", 0xE33E, z_strv, GR32, 4>;
+def STRVG : StoreRXY<"strvg", 0xE32F, z_strvg, GR64, 8>;
//===----------------------------------------------------------------------===//
// Load address instructions
@@ -657,6 +830,11 @@ defm : InsertMem<"inserti8", IC32Y, GR32, azextloadi8, bdxaddr20pair>;
defm : InsertMem<"inserti8", IC, GR64, azextloadi8, bdxaddr12pair>;
defm : InsertMem<"inserti8", ICY, GR64, azextloadi8, bdxaddr20pair>;
+let Defs = [CC] in {
+ defm ICM : TernaryRSPair<"icm", 0xBF, 0xEB81, GR32, 0>;
+ def ICMH : TernaryRSY<"icmh", 0xEB80, GRH32, 0>;
+}
+
// Insertions of a 16-bit immediate, leaving other bits unaffected.
// We don't have or_as_insert equivalents of these operations because
// OI is available instead.
@@ -812,7 +990,7 @@ defm : ZXB<subc, GR64, SLGFR>;
let Defs = [CC], Uses = [CC] in {
// Subtraction of a register.
def SLBR : BinaryRRE<"slb", 0xB999, sube, GR32, GR32>;
- def SLGBR : BinaryRRE<"slbg", 0xB989, sube, GR64, GR64>;
+ def SLBGR : BinaryRRE<"slbg", 0xB989, sube, GR64, GR64>;
// Subtraction of memory.
def SLB : BinaryRXY<"slb", 0xE399, sube, GR32, load, 4>;
@@ -865,7 +1043,7 @@ let Defs = [CC] in {
// ANDs of memory.
let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
defm N : BinaryRXPair<"n", 0x54, 0xE354, and, GR32, load, 4>;
- def NG : BinaryRXY<"ng", 0xE380, and, GR64, load, 8>;
+ def NG : BinaryRXY<"ng", 0xE380, and, GR64, load, 8>;
}
// AND to memory
@@ -1030,6 +1208,7 @@ def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>;
// Shift left.
let hasSideEffects = 0 in {
defm SLL : BinaryRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
+ defm SLA : BinaryRSAndK<"sla", 0x8B, 0xEBDD, null_frag, GR32>;
def SLLG : BinaryRSY<"sllg", 0xEB0D, shl, GR64>;
}
@@ -1208,6 +1387,9 @@ let Defs = [CC] in {
defm TM : CompareSIPair<"tm", 0x91, 0xEB51, z_tm_mem, anyextloadi8, imm32zx8>;
}
+def TML : InstAlias<"tml\t$R, $I", (TMLL GR32:$R, imm32ll16:$I), 0>;
+def TMH : InstAlias<"tmh\t$R, $I", (TMLH GR32:$R, imm32lh16:$I), 0>;
+
//===----------------------------------------------------------------------===//
// Prefetch
//===----------------------------------------------------------------------===//
@@ -1224,6 +1406,10 @@ def PFDRL : PrefetchRILPC<"pfdrl", 0xC62, z_prefetch>;
let hasSideEffects = 1 in
def Serialize : Alias<2, (outs), (ins), [(z_serialize)]>;
+// A pseudo instruction that serves as a compiler barrier.
+let hasSideEffects = 1 in
+def MemBarrier : Pseudo<(outs), (ins), [(z_membarrier)]>;
+
let Predicates = [FeatureInterlockedAccess1], Defs = [CC] in {
def LAA : LoadAndOpRSY<"laa", 0xEBF8, atomic_load_add_32, GR32>;
def LAAG : LoadAndOpRSY<"laag", 0xEBE8, atomic_load_add_64, GR64>;
@@ -1466,6 +1652,10 @@ let mayLoad = 1, Defs = [CC] in
defm SRST : StringRRE<"srst", 0xb25e, z_search_string>;
// Other instructions for inline assembly
+let hasSideEffects = 1, Defs = [CC], isCall = 1 in
+ def SVC : InstI<0x0A, (outs), (ins imm32zx8:$I1),
+ "svc\t$I1",
+ []>;
let hasSideEffects = 1, Defs = [CC], mayStore = 1 in
def STCK : InstS<0xB205, (outs), (ins bdaddr12only:$BD2),
"stck\t$BD2",
@@ -1483,6 +1673,12 @@ let hasSideEffects = 1, Defs = [CC], mayStore = 1 in
"stfle\t$BD2",
[]>;
+let hasSideEffects = 1 in {
+ def EX : InstRX<0x44, (outs), (ins GR64:$R1, bdxaddr12only:$XBD2),
+ "ex\t$R1, $XBD2", []>;
+ def EXRL : InstRIL<0xC60, (outs), (ins GR64:$R1, pcrel32:$I2),
+ "exrl\t$R1, $I2", []>;
+}
//===----------------------------------------------------------------------===//
@@ -1515,6 +1711,42 @@ def : Pat<(sra (shl (i64 (anyext (i32 (z_select_ccmask 1, 0, imm32zx4:$valid,
(i32 63)),
(Select64 (LGHI -1), (LGHI 0), imm32zx4:$valid, imm32zx4:$cc)>;
+// Avoid generating 2 XOR instructions. (xor (and x, y), y) is
+// equivalent to (and (xor x, -1), y)
+def : Pat<(and (xor GR64:$x, (i64 -1)), GR64:$y),
+ (XGR GR64:$y, (NGR GR64:$y, GR64:$x))>;
+
+// Shift/rotate instructions only use the last 6 bits of the second operand
+// register, so we can safely use NILL (16 fewer bits than NILF) to only AND the
+// last 16 bits.
+// Complexity is added so that we match this before we match NILF on the AND
+// operation alone.
+let AddedComplexity = 4 in {
+ def : Pat<(shl GR32:$val, (and GR32:$shift, uimm32:$imm)),
+ (SLL GR32:$val, (NILL GR32:$shift, uimm32:$imm), 0)>;
+
+ def : Pat<(sra GR32:$val, (and GR32:$shift, uimm32:$imm)),
+ (SRA GR32:$val, (NILL GR32:$shift, uimm32:$imm), 0)>;
+
+ def : Pat<(srl GR32:$val, (and GR32:$shift, uimm32:$imm)),
+ (SRL GR32:$val, (NILL GR32:$shift, uimm32:$imm), 0)>;
+
+ def : Pat<(shl GR64:$val, (and GR32:$shift, uimm32:$imm)),
+ (SLLG GR64:$val, (NILL GR32:$shift, uimm32:$imm), 0)>;
+
+ def : Pat<(sra GR64:$val, (and GR32:$shift, uimm32:$imm)),
+ (SRAG GR64:$val, (NILL GR32:$shift, uimm32:$imm), 0)>;
+
+ def : Pat<(srl GR64:$val, (and GR32:$shift, uimm32:$imm)),
+ (SRLG GR64:$val, (NILL GR32:$shift, uimm32:$imm), 0)>;
+
+ def : Pat<(rotl GR32:$val, (and GR32:$shift, uimm32:$imm)),
+ (RLL GR32:$val, (NILL GR32:$shift, uimm32:$imm), 0)>;
+
+ def : Pat<(rotl GR64:$val, (and GR32:$shift, uimm32:$imm)),
+ (RLLG GR64:$val, (NILL GR32:$shift, uimm32:$imm), 0)>;
+}
+
// Peepholes for turning scalar operations into block operations.
defm : BlockLoadStore<anyextloadi8, i32, MVCSequence, NCSequence, OCSequence,
XCSequence, 1>;
diff --git a/lib/Target/SystemZ/SystemZLDCleanup.cpp b/lib/Target/SystemZ/SystemZLDCleanup.cpp
index 24165be29ae7..2cdf2f9bf990 100644
--- a/lib/Target/SystemZ/SystemZLDCleanup.cpp
+++ b/lib/Target/SystemZ/SystemZLDCleanup.cpp
@@ -64,6 +64,9 @@ void SystemZLDCleanup::getAnalysisUsage(AnalysisUsage &AU) const {
}
bool SystemZLDCleanup::runOnMachineFunction(MachineFunction &F) {
+ if (skipFunction(*F.getFunction()))
+ return false;
+
TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo());
MF = &F;
@@ -92,9 +95,9 @@ bool SystemZLDCleanup::VisitNode(MachineDomTreeNode *Node,
switch (I->getOpcode()) {
case SystemZ::TLS_LDCALL:
if (TLSBaseAddrReg)
- I = ReplaceTLSCall(I, TLSBaseAddrReg);
+ I = ReplaceTLSCall(&*I, TLSBaseAddrReg);
else
- I = SetRegister(I, &TLSBaseAddrReg);
+ I = SetRegister(&*I, &TLSBaseAddrReg);
Changed = true;
break;
default:
diff --git a/lib/Target/SystemZ/SystemZLongBranch.cpp b/lib/Target/SystemZ/SystemZLongBranch.cpp
index 8dab44e7f8af..a24d47d2d16b 100644
--- a/lib/Target/SystemZ/SystemZLongBranch.cpp
+++ b/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -58,7 +58,6 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -139,12 +138,16 @@ public:
}
bool runOnMachineFunction(MachineFunction &F) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
private:
void skipNonTerminators(BlockPosition &Position, MBBInfo &Block);
void skipTerminator(BlockPosition &Position, TerminatorInfo &Terminator,
bool AssumeRelaxed);
- TerminatorInfo describeTerminator(MachineInstr *MI);
+ TerminatorInfo describeTerminator(MachineInstr &MI);
uint64_t initMBBInfo();
bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address);
bool mustRelaxABranch();
@@ -207,11 +210,11 @@ void SystemZLongBranch::skipTerminator(BlockPosition &Position,
}
// Return a description of terminator instruction MI.
-TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr *MI) {
+TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr &MI) {
TerminatorInfo Terminator;
Terminator.Size = TII->getInstSizeInBytes(MI);
- if (MI->isConditionalBranch() || MI->isUnconditionalBranch()) {
- switch (MI->getOpcode()) {
+ if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
+ switch (MI.getOpcode()) {
case SystemZ::J:
// Relaxes to JG, which is 2 bytes longer.
Terminator.ExtraRelaxSize = 2;
@@ -248,7 +251,7 @@ TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr *MI) {
default:
llvm_unreachable("Unrecognized branch instruction");
}
- Terminator.Branch = MI;
+ Terminator.Branch = &MI;
Terminator.TargetBlock =
TII->getBranchInfo(MI).Target->getMBB()->getNumber();
}
@@ -280,7 +283,7 @@ uint64_t SystemZLongBranch::initMBBInfo() {
MachineBasicBlock::iterator MI = MBB->begin();
MachineBasicBlock::iterator End = MBB->end();
while (MI != End && !MI->isTerminator()) {
- Block.Size += TII->getInstSizeInBytes(MI);
+ Block.Size += TII->getInstSizeInBytes(*MI);
++MI;
}
skipNonTerminators(Position, Block);
@@ -289,7 +292,7 @@ uint64_t SystemZLongBranch::initMBBInfo() {
while (MI != End) {
if (!MI->isDebugValue()) {
assert(MI->isTerminator() && "Terminator followed by non-terminator");
- Terminators.push_back(describeTerminator(MI));
+ Terminators.push_back(describeTerminator(*MI));
skipTerminator(Position, Terminators.back(), false);
++Block.NumTerminators;
}
diff --git a/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
index f4a517bd54df..4f64f4c65f1d 100644
--- a/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
+++ b/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
@@ -22,14 +22,15 @@ class SystemZMachineFunctionInfo : public MachineFunctionInfo {
unsigned VarArgsFirstFPR;
unsigned VarArgsFrameIndex;
unsigned RegSaveFrameIndex;
+ int FramePointerSaveIndex;
bool ManipulatesSP;
unsigned NumLocalDynamics;
public:
explicit SystemZMachineFunctionInfo(MachineFunction &MF)
: LowSavedGPR(0), HighSavedGPR(0), VarArgsFirstGPR(0), VarArgsFirstFPR(0),
- VarArgsFrameIndex(0), RegSaveFrameIndex(0), ManipulatesSP(false),
- NumLocalDynamics(0) {}
+ VarArgsFrameIndex(0), RegSaveFrameIndex(0), FramePointerSaveIndex(0),
+ ManipulatesSP(false), NumLocalDynamics(0) {}
// Get and set the first call-saved GPR that should be saved and restored
// by this function. This is 0 if no GPRs need to be saved or restored.
@@ -59,6 +60,10 @@ public:
unsigned getRegSaveFrameIndex() const { return RegSaveFrameIndex; }
void setRegSaveFrameIndex(unsigned FI) { RegSaveFrameIndex = FI; }
+ // Get and set the frame index of where the old frame pointer is stored.
+ int getFramePointerSaveIndex() const { return FramePointerSaveIndex; }
+ void setFramePointerSaveIndex(int Idx) { FramePointerSaveIndex = Idx; }
+
// Get and set whether the function directly manipulates the stack pointer,
// e.g. through STACKSAVE or STACKRESTORE.
bool getManipulatesSP() const { return ManipulatesSP; }
diff --git a/lib/Target/SystemZ/SystemZOperands.td b/lib/Target/SystemZ/SystemZOperands.td
index 9af90d492cf8..17b076d88a34 100644
--- a/lib/Target/SystemZ/SystemZOperands.td
+++ b/lib/Target/SystemZ/SystemZOperands.td
@@ -451,11 +451,11 @@ def PCRelTLS32 : PCRelTLSAsmOperand<"32">;
// and multiplied by 2.
def brtarget16 : PCRelOperand<OtherVT, PCRel16> {
let EncoderMethod = "getPC16DBLEncoding";
- let DecoderMethod = "decodePC16DBLOperand";
+ let DecoderMethod = "decodePC16DBLBranchOperand";
}
def brtarget32 : PCRelOperand<OtherVT, PCRel32> {
let EncoderMethod = "getPC32DBLEncoding";
- let DecoderMethod = "decodePC32DBLOperand";
+ let DecoderMethod = "decodePC32DBLBranchOperand";
}
// Variants of brtarget16/32 with an optional additional TLS symbol.
@@ -464,12 +464,12 @@ def tlssym : Operand<i64> { }
def brtarget16tls : PCRelTLSOperand<OtherVT, PCRelTLS16> {
let MIOperandInfo = (ops brtarget16:$func, tlssym:$sym);
let EncoderMethod = "getPC16DBLTLSEncoding";
- let DecoderMethod = "decodePC16DBLOperand";
+ let DecoderMethod = "decodePC16DBLBranchOperand";
}
def brtarget32tls : PCRelTLSOperand<OtherVT, PCRelTLS32> {
let MIOperandInfo = (ops brtarget32:$func, tlssym:$sym);
let EncoderMethod = "getPC32DBLTLSEncoding";
- let DecoderMethod = "decodePC32DBLOperand";
+ let DecoderMethod = "decodePC32DBLBranchOperand";
}
// A PC-relative offset of a global value. The offset is sign-extended
diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td
index 3c95a1e11b45..8d031f1ea05d 100644
--- a/lib/Target/SystemZ/SystemZOperators.td
+++ b/lib/Target/SystemZ/SystemZOperators.td
@@ -79,6 +79,14 @@ def SDT_ZI32Intrinsic : SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>;
def SDT_ZPrefetch : SDTypeProfile<0, 2,
[SDTCisVT<0, i32>,
SDTCisPtrTy<1>]>;
+def SDT_ZLoadBSwap : SDTypeProfile<1, 2,
+ [SDTCisInt<0>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, OtherVT>]>;
+def SDT_ZStoreBSwap : SDTypeProfile<0, 3,
+ [SDTCisInt<0>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, OtherVT>]>;
def SDT_ZTBegin : SDTypeProfile<0, 2,
[SDTCisPtrTy<0>,
SDTCisVT<1, i32>]>;
@@ -137,6 +145,7 @@ def SDT_ZVecQuaternaryInt : SDTypeProfile<1, 4,
SDTCisSameAs<0, 2>,
SDTCisSameAs<0, 3>,
SDTCisVT<4, i32>]>;
+def SDT_ZTest : SDTypeProfile<0, 2, [SDTCisVT<1, i64>]>;
//===----------------------------------------------------------------------===//
// Node definitions
@@ -188,6 +197,15 @@ def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>;
def z_serialize : SDNode<"SystemZISD::SERIALIZE", SDTNone,
[SDNPHasChain, SDNPMayStore]>;
+def z_membarrier : SDNode<"SystemZISD::MEMBARRIER", SDTNone,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+def z_loadbswap : SDNode<"SystemZISD::LRV", SDT_ZLoadBSwap,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def z_storebswap : SDNode<"SystemZISD::STRV", SDT_ZStoreBSwap,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
+def z_tdc : SDNode<"SystemZISD::TDC", SDT_ZTest, [SDNPOutGlue]>;
// Defined because the index is an i32 rather than a pointer.
def z_vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
@@ -329,6 +347,17 @@ def z_vsrl : SDNode<"ISD::SRL", SDT_ZVecBinary>;
// Pattern fragments
//===----------------------------------------------------------------------===//
+def z_lrvh : PatFrag<(ops node:$addr), (z_loadbswap node:$addr, i16)>;
+def z_lrv : PatFrag<(ops node:$addr), (z_loadbswap node:$addr, i32)>;
+def z_lrvg : PatFrag<(ops node:$addr), (z_loadbswap node:$addr, i64)>;
+
+def z_strvh : PatFrag<(ops node:$src, node:$addr),
+ (z_storebswap node:$src, node:$addr, i16)>;
+def z_strv : PatFrag<(ops node:$src, node:$addr),
+ (z_storebswap node:$src, node:$addr, i32)>;
+def z_strvg : PatFrag<(ops node:$src, node:$addr),
+ (z_storebswap node:$src, node:$addr, i64)>;
+
// Signed and unsigned comparisons.
def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{
unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
diff --git a/lib/Target/SystemZ/SystemZProcessors.td b/lib/Target/SystemZ/SystemZProcessors.td
index 32fbe5ae9ef9..9adc0189e650 100644
--- a/lib/Target/SystemZ/SystemZProcessors.td
+++ b/lib/Target/SystemZ/SystemZProcessors.td
@@ -29,6 +29,11 @@ def FeatureLoadStoreOnCond : SystemZFeature<
"Assume that the load/store-on-condition facility is installed"
>;
+def FeatureLoadStoreOnCond2 : SystemZFeature<
+ "load-store-on-cond-2", "LoadStoreOnCond2",
+ "Assume that the load/store-on-condition facility 2 is installed"
+>;
+
def FeatureHighWord : SystemZFeature<
"high-word", "HighWord",
"Assume that the high-word facility is installed"
@@ -92,5 +97,6 @@ def : Processor<"z13", NoItineraries,
[FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
FeatureFPExtension, FeaturePopulationCount,
FeatureFastSerialization, FeatureInterlockedAccess1,
+ FeatureMiscellaneousExtensions,
FeatureTransactionalExecution, FeatureProcessorAssist,
- FeatureVector]>;
+ FeatureVector, FeatureLoadStoreOnCond2]>;
diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index 6fd24e3df625..b5e5fd4bfc4f 100644
--- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -24,12 +24,20 @@ SystemZRegisterInfo::SystemZRegisterInfo()
const MCPhysReg *
SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
+ MF->getFunction()->getAttributes().hasAttrSomewhere(
+ Attribute::SwiftError))
+ return CSR_SystemZ_SwiftError_SaveList;
return CSR_SystemZ_SaveList;
}
const uint32_t *
SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
+ if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
+ MF.getFunction()->getAttributes().hasAttrSomewhere(
+ Attribute::SwiftError))
+ return CSR_SystemZ_SwiftError_RegMask;
return CSR_SystemZ_RegMask;
}
@@ -84,8 +92,14 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
// accepts the offset exists.
unsigned Opcode = MI->getOpcode();
unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
- if (OpcodeForOffset)
+ if (OpcodeForOffset) {
+ if (OpcodeForOffset == SystemZ::LE &&
+ MF.getSubtarget<SystemZSubtarget>().hasVector()) {
+ // If LE is ok for offset, use LDE instead on z13.
+ OpcodeForOffset = SystemZ::LDE32;
+ }
MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
+ }
else {
// Create an anchor point that is in range. Start at 0xffff so that
// can use LLILH to load the immediate.
diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.h b/lib/Target/SystemZ/SystemZRegisterInfo.h
index a0db5a9c188f..e41c06c98af2 100644
--- a/lib/Target/SystemZ/SystemZRegisterInfo.h
+++ b/lib/Target/SystemZ/SystemZRegisterInfo.h
@@ -33,6 +33,15 @@ struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
public:
SystemZRegisterInfo();
+ /// getPointerRegClass - Return the register class to use to hold pointers.
+ /// This is currently only used by LOAD_STACK_GUARD, which requires a non-%r0
+ /// register, hence ADDR64.
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind=0) const override {
+ return &SystemZ::ADDR64BitRegClass;
+ }
+
// Override TargetRegisterInfo.h.
bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;
diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
index 178aa3817311..657482504045 100644
--- a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
+++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -23,7 +23,7 @@ using namespace llvm;
// address Dest. Sequence is the opcode to use for straight-line code
// (such as MVC) and Loop is the opcode to use for loops (such as MVC_LOOP).
// Return the chain for the completed operation.
-static SDValue emitMemMem(SelectionDAG &DAG, SDLoc DL, unsigned Sequence,
+static SDValue emitMemMem(SelectionDAG &DAG, const SDLoc &DL, unsigned Sequence,
unsigned Loop, SDValue Chain, SDValue Dst,
SDValue Src, uint64_t Size) {
EVT PtrVT = Src.getValueType();
@@ -46,12 +46,10 @@ static SDValue emitMemMem(SelectionDAG &DAG, SDLoc DL, unsigned Sequence,
DAG.getConstant(Size, DL, PtrVT));
}
-SDValue SystemZSelectionDAGInfo::
-EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
- bool IsVolatile, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const {
+SDValue SystemZSelectionDAGInfo::EmitTargetCodeForMemcpy(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool IsVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
if (IsVolatile)
return SDValue();
@@ -64,24 +62,21 @@ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// Handle a memset of 1, 2, 4 or 8 bytes with the operands given by
// Chain, Dst, ByteVal and Size. These cases are expected to use
// MVI, MVHHI, MVHI and MVGHI respectively.
-static SDValue memsetStore(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+static SDValue memsetStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Dst, uint64_t ByteVal, uint64_t Size,
- unsigned Align,
- MachinePointerInfo DstPtrInfo) {
+ unsigned Align, MachinePointerInfo DstPtrInfo) {
uint64_t StoreVal = ByteVal;
for (unsigned I = 1; I < Size; ++I)
StoreVal |= ByteVal << (I * 8);
- return DAG.getStore(Chain, DL,
- DAG.getConstant(StoreVal, DL,
- MVT::getIntegerVT(Size * 8)),
- Dst, DstPtrInfo, false, false, Align);
+ return DAG.getStore(
+ Chain, DL, DAG.getConstant(StoreVal, DL, MVT::getIntegerVT(Size * 8)),
+ Dst, DstPtrInfo, Align);
}
-SDValue SystemZSelectionDAGInfo::
-EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Dst, SDValue Byte, SDValue Size,
- unsigned Align, bool IsVolatile,
- MachinePointerInfo DstPtrInfo) const {
+SDValue SystemZSelectionDAGInfo::EmitTargetCodeForMemset(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dst,
+ SDValue Byte, SDValue Size, unsigned Align, bool IsVolatile,
+ MachinePointerInfo DstPtrInfo) const {
EVT PtrVT = Dst.getValueType();
if (IsVolatile)
@@ -116,15 +111,14 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
} else {
// Handle one and two bytes using STC.
if (Bytes <= 2) {
- SDValue Chain1 = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo,
- false, false, Align);
+ SDValue Chain1 = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo, Align);
if (Bytes == 1)
return Chain1;
SDValue Dst2 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
DAG.getConstant(1, DL, PtrVT));
- SDValue Chain2 = DAG.getStore(Chain, DL, Byte, Dst2,
- DstPtrInfo.getWithOffset(1),
- false, false, 1);
+ SDValue Chain2 =
+ DAG.getStore(Chain, DL, Byte, Dst2, DstPtrInfo.getWithOffset(1),
+ /* Alignment = */ 1);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2);
}
}
@@ -138,8 +132,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// Copy the byte to the first location and then use MVC to copy
// it to the rest.
- Chain = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo,
- false, false, Align);
+ Chain = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo, Align);
SDValue DstPlus1 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
DAG.getConstant(1, DL, PtrVT));
return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
@@ -150,7 +143,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// Use CLC to compare [Src1, Src1 + Size) with [Src2, Src2 + Size),
// deciding whether to use a loop or straight-line code.
-static SDValue emitCLC(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+static SDValue emitCLC(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src1, SDValue Src2, uint64_t Size) {
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
EVT PtrVT = Src1.getValueType();
@@ -174,7 +167,8 @@ static SDValue emitCLC(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// less than zero if CC == 1 and greater than zero if CC >= 2.
// The sequence starts with IPM, which puts CC into bits 29 and 28
// of an integer and clears bits 30 and 31.
-static SDValue addIPMSequence(SDLoc DL, SDValue Glue, SelectionDAG &DAG) {
+static SDValue addIPMSequence(const SDLoc &DL, SDValue Glue,
+ SelectionDAG &DAG) {
SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
@@ -183,11 +177,10 @@ static SDValue addIPMSequence(SDLoc DL, SDValue Glue, SelectionDAG &DAG) {
return ROTL;
}
-std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
-EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Src1, SDValue Src2, SDValue Size,
- MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const {
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForMemcmp(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src1,
+ SDValue Src2, SDValue Size, MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const {
if (auto *CSize = dyn_cast<ConstantSDNode>(Size)) {
uint64_t Bytes = CSize->getZExtValue();
assert(Bytes > 0 && "Caller should have handled 0-size case");
@@ -198,10 +191,9 @@ EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
return std::make_pair(SDValue(), SDValue());
}
-std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
-EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Src, SDValue Char, SDValue Length,
- MachinePointerInfo SrcPtrInfo) const {
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForMemchr(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src,
+ SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const {
// Use SRST to find the character. End is its address on success.
EVT PtrVT = Src.getValueType();
SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other, MVT::Glue);
@@ -226,22 +218,20 @@ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
return std::make_pair(End, Chain);
}
-std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
-EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Dest, SDValue Src,
- MachinePointerInfo DestPtrInfo,
- MachinePointerInfo SrcPtrInfo, bool isStpcpy) const {
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForStrcpy(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest,
+ SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo,
+ bool isStpcpy) const {
SDVTList VTs = DAG.getVTList(Dest.getValueType(), MVT::Other);
SDValue EndDest = DAG.getNode(SystemZISD::STPCPY, DL, VTs, Chain, Dest, Src,
DAG.getConstant(0, DL, MVT::i32));
return std::make_pair(isStpcpy ? EndDest : Dest, EndDest.getValue(1));
}
-std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
-EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Src1, SDValue Src2,
- MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const {
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForStrcmp(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src1,
+ SDValue Src2, MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const {
SDVTList VTs = DAG.getVTList(Src1.getValueType(), MVT::Other, MVT::Glue);
SDValue Unused = DAG.getNode(SystemZISD::STRCMP, DL, VTs, Chain, Src1, Src2,
DAG.getConstant(0, DL, MVT::i32));
@@ -255,7 +245,8 @@ EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
// and the second being the out chain.
//
// This can be used for strlen by setting Limit to 0.
-static std::pair<SDValue, SDValue> getBoundedStrlen(SelectionDAG &DAG, SDLoc DL,
+static std::pair<SDValue, SDValue> getBoundedStrlen(SelectionDAG &DAG,
+ const SDLoc &DL,
SDValue Chain, SDValue Src,
SDValue Limit) {
EVT PtrVT = Src.getValueType();
@@ -265,19 +256,18 @@ static std::pair<SDValue, SDValue> getBoundedStrlen(SelectionDAG &DAG, SDLoc DL,
Chain = End.getValue(1);
SDValue Len = DAG.getNode(ISD::SUB, DL, PtrVT, End, Src);
return std::make_pair(Len, Chain);
-}
+}
-std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
-EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Src, MachinePointerInfo SrcPtrInfo) const {
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForStrlen(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src,
+ MachinePointerInfo SrcPtrInfo) const {
EVT PtrVT = Src.getValueType();
return getBoundedStrlen(DAG, DL, Chain, Src, DAG.getConstant(0, DL, PtrVT));
}
-std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
-EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Src, SDValue MaxLength,
- MachinePointerInfo SrcPtrInfo) const {
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForStrnlen(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src,
+ SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const {
EVT PtrVT = Src.getValueType();
MaxLength = DAG.getZExtOrTrunc(MaxLength, DL, PtrVT);
SDValue Limit = DAG.getNode(ISD::ADD, DL, PtrVT, Src, MaxLength);
diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
index 246fa3e5e656..93cd970c30c6 100644
--- a/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
+++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
@@ -7,66 +7,64 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the SystemZ subclass for TargetSelectionDAGInfo.
+// This file defines the SystemZ subclass for SelectionDAGTargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZSELECTIONDAGINFO_H
#define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZSELECTIONDAGINFO_H
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
namespace llvm {
class SystemZTargetMachine;
-class SystemZSelectionDAGInfo : public TargetSelectionDAGInfo {
+class SystemZSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
explicit SystemZSelectionDAGInfo() = default;
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool IsVolatile, bool AlwaysInline,
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &DL,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool IsVolatile,
+ bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL,
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &DL,
SDValue Chain, SDValue Dst, SDValue Byte,
SDValue Size, unsigned Align, bool IsVolatile,
MachinePointerInfo DstPtrInfo) const override;
std::pair<SDValue, SDValue>
- EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src1, SDValue Src2, SDValue Size,
MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const override;
std::pair<SDValue, SDValue>
- EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src, SDValue Char, SDValue Length,
MachinePointerInfo SrcPtrInfo) const override;
- std::pair<SDValue, SDValue>
- EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Dest, SDValue Src,
- MachinePointerInfo DestPtrInfo,
- MachinePointerInfo SrcPtrInfo,
- bool isStpcpy) const override;
+ std::pair<SDValue, SDValue> EmitTargetCodeForStrcpy(
+ SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest,
+ SDValue Src, MachinePointerInfo DestPtrInfo,
+ MachinePointerInfo SrcPtrInfo, bool isStpcpy) const override;
std::pair<SDValue, SDValue>
- EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src1, SDValue Src2,
MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const override;
std::pair<SDValue, SDValue>
- EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src,
MachinePointerInfo SrcPtrInfo) const override;
std::pair<SDValue, SDValue>
- EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
SDValue Src, SDValue MaxLength,
MachinePointerInfo SrcPtrInfo) const override;
};
diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp
index 846edd51341a..7f26a3519e50 100644
--- a/lib/Target/SystemZ/SystemZShortenInst.cpp
+++ b/lib/Target/SystemZ/SystemZShortenInst.cpp
@@ -35,6 +35,10 @@ public:
bool processBlock(MachineBasicBlock &MBB);
bool runOnMachineFunction(MachineFunction &F) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
private:
bool shortenIIF(MachineInstr &MI, unsigned LLIxL, unsigned LLIxH);
@@ -68,18 +72,20 @@ static void tieOpsIfNeeded(MachineInstr &MI) {
// MI loads one word of a GPR using an IIxF instruction and LLIxL and LLIxH
// are the halfword immediate loads for the same word. Try to use one of them
-// instead of IIxF.
-bool SystemZShortenInst::shortenIIF(MachineInstr &MI,
- unsigned LLIxL, unsigned LLIxH) {
+// instead of IIxF.
+bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned LLIxL,
+ unsigned LLIxH) {
unsigned Reg = MI.getOperand(0).getReg();
// The new opcode will clear the other half of the GR64 reg, so
// cancel if that is live.
- unsigned thisSubRegIdx = (SystemZ::GRH32BitRegClass.contains(Reg) ?
- SystemZ::subreg_h32 : SystemZ::subreg_l32);
- unsigned otherSubRegIdx = (thisSubRegIdx == SystemZ::subreg_l32 ?
- SystemZ::subreg_h32 : SystemZ::subreg_l32);
- unsigned GR64BitReg = TRI->getMatchingSuperReg(Reg, thisSubRegIdx,
- &SystemZ::GR64BitRegClass);
+ unsigned thisSubRegIdx =
+ (SystemZ::GRH32BitRegClass.contains(Reg) ? SystemZ::subreg_h32
+ : SystemZ::subreg_l32);
+ unsigned otherSubRegIdx =
+ (thisSubRegIdx == SystemZ::subreg_l32 ? SystemZ::subreg_h32
+ : SystemZ::subreg_l32);
+ unsigned GR64BitReg =
+ TRI->getMatchingSuperReg(Reg, thisSubRegIdx, &SystemZ::GR64BitRegClass);
unsigned OtherReg = TRI->getSubReg(GR64BitReg, otherSubRegIdx);
if (LiveRegs.contains(OtherReg))
return false;
@@ -135,11 +141,10 @@ bool SystemZShortenInst::shortenOn001(MachineInstr &MI, unsigned Opcode) {
// Calls shortenOn001 if CCLive is false. CC def operand is added in
// case of success.
-bool SystemZShortenInst::shortenOn001AddCC(MachineInstr &MI,
- unsigned Opcode) {
+bool SystemZShortenInst::shortenOn001AddCC(MachineInstr &MI, unsigned Opcode) {
if (!LiveRegs.contains(SystemZ::CC) && shortenOn001(MI, Opcode)) {
MachineInstrBuilder(*MI.getParent()->getParent(), &MI)
- .addReg(SystemZ::CC, RegState::ImplicitDefine);
+ .addReg(SystemZ::CC, RegState::ImplicitDefine | RegState::Dead);
return true;
}
return false;
@@ -177,7 +182,7 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) {
// Set up the set of live registers at the end of MBB (live out)
LiveRegs.clear();
- LiveRegs.addLiveOuts(&MBB);
+ LiveRegs.addLiveOuts(MBB);
// Iterate backwards through the block looking for instructions to change.
for (auto MBBI = MBB.rbegin(), MBBE = MBB.rend(); MBBI != MBBE; ++MBBI) {
@@ -264,6 +269,9 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) {
}
bool SystemZShortenInst::runOnMachineFunction(MachineFunction &F) {
+ if (skipFunction(*F.getFunction()))
+ return false;
+
const SystemZSubtarget &ST = F.getSubtarget<SystemZSubtarget>();
TII = ST.getInstrInfo();
TRI = ST.getRegisterInfo();
diff --git a/lib/Target/SystemZ/SystemZSubtarget.cpp b/lib/Target/SystemZ/SystemZSubtarget.cpp
index 0b49fcdd8f78..67d5e0179fe2 100644
--- a/lib/Target/SystemZ/SystemZSubtarget.cpp
+++ b/lib/Target/SystemZ/SystemZSubtarget.cpp
@@ -40,21 +40,11 @@ SystemZSubtarget::SystemZSubtarget(const Triple &TT, const std::string &CPU,
HasPopulationCount(false), HasFastSerialization(false),
HasInterlockedAccess1(false), HasMiscellaneousExtensions(false),
HasTransactionalExecution(false), HasProcessorAssist(false),
- HasVector(false), TargetTriple(TT),
+ HasVector(false), HasLoadStoreOnCond2(false), TargetTriple(TT),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
TSInfo(), FrameLowering() {}
-// Return true if GV binds locally under reloc model RM.
-static bool bindsLocally(const GlobalValue *GV, Reloc::Model RM) {
- // For non-PIC, all symbols bind locally.
- if (RM == Reloc::Static)
- return true;
-
- return GV->hasLocalLinkage() || !GV->hasDefaultVisibility();
-}
-
bool SystemZSubtarget::isPC32DBLSymbol(const GlobalValue *GV,
- Reloc::Model RM,
CodeModel::Model CM) const {
// PC32DBL accesses require the low bit to be clear. Note that a zero
// value selects the default alignment and is therefore OK.
@@ -63,7 +53,7 @@ bool SystemZSubtarget::isPC32DBLSymbol(const GlobalValue *GV,
// For the small model, all locally-binding symbols are in range.
if (CM == CodeModel::Small)
- return bindsLocally(GV, RM);
+ return TLInfo.getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
// For Medium and above, assume that the symbol is not within the 4GB range.
// Taking the address of locally-defined text would be OK, but that
diff --git a/lib/Target/SystemZ/SystemZSubtarget.h b/lib/Target/SystemZ/SystemZSubtarget.h
index f7eaf01cb77e..6007f6fc9c4c 100644
--- a/lib/Target/SystemZ/SystemZSubtarget.h
+++ b/lib/Target/SystemZ/SystemZSubtarget.h
@@ -45,6 +45,7 @@ protected:
bool HasTransactionalExecution;
bool HasProcessorAssist;
bool HasVector;
+ bool HasLoadStoreOnCond2;
private:
Triple TargetTriple;
@@ -69,7 +70,7 @@ public:
const SystemZTargetLowering *getTargetLowering() const override {
return &TLInfo;
}
- const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
@@ -85,6 +86,9 @@ public:
// Return true if the target has the load/store-on-condition facility.
bool hasLoadStoreOnCond() const { return HasLoadStoreOnCond; }
+ // Return true if the target has the load/store-on-condition facility 2.
+ bool hasLoadStoreOnCond2() const { return HasLoadStoreOnCond2; }
+
// Return true if the target has the high-word facility.
bool hasHighWord() const { return HasHighWord; }
@@ -116,8 +120,7 @@ public:
// Return true if GV can be accessed using LARL for reloc model RM
// and code model CM.
- bool isPC32DBLSymbol(const GlobalValue *GV, Reloc::Model RM,
- CodeModel::Model CM) const;
+ bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const;
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
};
diff --git a/lib/Target/SystemZ/SystemZTDC.cpp b/lib/Target/SystemZ/SystemZTDC.cpp
new file mode 100644
index 000000000000..96a9ef82c125
--- /dev/null
+++ b/lib/Target/SystemZ/SystemZTDC.cpp
@@ -0,0 +1,382 @@
+//===-- SystemZTDC.cpp - Utilize Test Data Class instruction --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass looks for instructions that can be replaced by a Test Data Class
+// instruction, and replaces them when profitable.
+//
+// Roughly, the following rules are recognized:
+//
+// 1: fcmp pred X, 0 -> tdc X, mask
+// 2: fcmp pred X, +-inf -> tdc X, mask
+// 3: fcmp pred X, +-minnorm -> tdc X, mask
+// 4: tdc (fabs X), mask -> tdc X, newmask
+// 5: icmp slt (bitcast float X to int), 0 -> tdc X, mask [ie. signbit]
+// 6: icmp sgt (bitcast float X to int), -1 -> tdc X, mask
+// 7: icmp ne/eq (call @llvm.s390.tdc.*(X, mask)) -> tdc X, mask/~mask
+// 8: and i1 (tdc X, M1), (tdc X, M2) -> tdc X, (M1 & M2)
+// 9: or i1 (tdc X, M1), (tdc X, M2) -> tdc X, (M1 | M2)
+// 10: xor i1 (tdc X, M1), (tdc X, M2) -> tdc X, (M1 ^ M2)
+//
+// The pass works in 4 steps:
+//
+// 1. All fcmp and icmp instructions in a function are checked for a match
+// with rules 1-3 and 5-7. Their TDC equivalents are stored in
+// the ConvertedInsts mapping. If the operand of a fcmp instruction is
+// a fabs, it's also folded according to rule 4.
+// 2. All and/or/xor i1 instructions whose both operands have been already
+// mapped are mapped according to rules 8-10. LogicOpsWorklist is used
+// as a queue of instructions to check.
+// 3. All mapped instructions that are considered worthy of conversion (ie.
+// replacing them will actually simplify the final code) are replaced
+// with a call to the s390.tdc intrinsic.
+// 4. All intermediate results of replaced instructions are removed if unused.
+//
+// Instructions that match rules 1-3 are considered unworthy of conversion
+// on their own (since a comparison instruction is superior), but are mapped
+// in the hopes of folding the result using rules 4 and 8-10 (likely removing
+// the original comparison in the process).
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZ.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include <deque>
+#include <set>
+
+using namespace llvm;
+
+namespace llvm {
+ void initializeSystemZTDCPassPass(PassRegistry&);
+}
+
+namespace {
+
+class SystemZTDCPass : public FunctionPass {
+public:
+ static char ID;
+ SystemZTDCPass() : FunctionPass(ID) {
+ initializeSystemZTDCPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override;
+private:
+ // Maps seen instructions that can be mapped to a TDC, values are
+ // (TDC operand, TDC mask, worthy flag) triples.
+ MapVector<Instruction *, std::tuple<Value *, int, bool>> ConvertedInsts;
+ // The queue of and/or/xor i1 instructions to be potentially folded.
+ std::vector<BinaryOperator *> LogicOpsWorklist;
+ // Instructions matched while folding, to be removed at the end if unused.
+ std::set<Instruction *> PossibleJunk;
+
+ // Tries to convert a fcmp instruction.
+ void convertFCmp(CmpInst &I);
+
+ // Tries to convert an icmp instruction.
+ void convertICmp(CmpInst &I);
+
+ // Tries to convert an i1 and/or/xor instruction, whose both operands
+ // have been already converted.
+ void convertLogicOp(BinaryOperator &I);
+
+ // Marks an instruction as converted - adds it to ConvertedInsts and adds
+ // any and/or/xor i1 users to the queue.
+ void converted(Instruction *I, Value *V, int Mask, bool Worthy) {
+ ConvertedInsts[I] = std::make_tuple(V, Mask, Worthy);
+ auto &M = *I->getFunction()->getParent();
+ auto &Ctx = M.getContext();
+ for (auto *U : I->users()) {
+ auto *LI = dyn_cast<BinaryOperator>(U);
+ if (LI && LI->getType() == Type::getInt1Ty(Ctx) &&
+ (LI->getOpcode() == Instruction::And ||
+ LI->getOpcode() == Instruction::Or ||
+ LI->getOpcode() == Instruction::Xor)) {
+ LogicOpsWorklist.push_back(LI);
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+char SystemZTDCPass::ID = 0;
+INITIALIZE_PASS(SystemZTDCPass, "systemz-tdc",
+ "SystemZ Test Data Class optimization", false, false)
+
+FunctionPass *llvm::createSystemZTDCPass() {
+ return new SystemZTDCPass();
+}
+
+void SystemZTDCPass::convertFCmp(CmpInst &I) {
+ Value *Op0 = I.getOperand(0);
+ auto *Const = dyn_cast<ConstantFP>(I.getOperand(1));
+ auto Pred = I.getPredicate();
+ // Only comparisons with consts are interesting.
+ if (!Const)
+ return;
+ // Compute the smallest normal number (and its negation).
+ auto &Sem = Op0->getType()->getFltSemantics();
+ APFloat Smallest = APFloat::getSmallestNormalized(Sem);
+ APFloat NegSmallest = Smallest;
+ NegSmallest.changeSign();
+ // Check if Const is one of our recognized consts.
+ int WhichConst;
+ if (Const->isZero()) {
+ // All comparisons with 0 can be converted.
+ WhichConst = 0;
+ } else if (Const->isInfinity()) {
+ // Likewise for infinities.
+ WhichConst = Const->isNegative() ? 2 : 1;
+ } else if (Const->isExactlyValue(Smallest)) {
+ // For Smallest, we cannot do EQ separately from GT.
+ if ((Pred & CmpInst::FCMP_OGE) != CmpInst::FCMP_OGE &&
+ (Pred & CmpInst::FCMP_OGE) != 0)
+ return;
+ WhichConst = 3;
+ } else if (Const->isExactlyValue(NegSmallest)) {
+ // Likewise for NegSmallest, we cannot do EQ separately from LT.
+ if ((Pred & CmpInst::FCMP_OLE) != CmpInst::FCMP_OLE &&
+ (Pred & CmpInst::FCMP_OLE) != 0)
+ return;
+ WhichConst = 4;
+ } else {
+ // Not one of our special constants.
+ return;
+ }
+ // Partial masks to use for EQ, GT, LT, UN comparisons, respectively.
+ static const int Masks[][4] = {
+ { // 0
+ SystemZ::TDCMASK_ZERO, // eq
+ SystemZ::TDCMASK_POSITIVE, // gt
+ SystemZ::TDCMASK_NEGATIVE, // lt
+ SystemZ::TDCMASK_NAN, // un
+ },
+ { // inf
+ SystemZ::TDCMASK_INFINITY_PLUS, // eq
+ 0, // gt
+ (SystemZ::TDCMASK_ZERO |
+ SystemZ::TDCMASK_NEGATIVE |
+ SystemZ::TDCMASK_NORMAL_PLUS |
+ SystemZ::TDCMASK_SUBNORMAL_PLUS), // lt
+ SystemZ::TDCMASK_NAN, // un
+ },
+ { // -inf
+ SystemZ::TDCMASK_INFINITY_MINUS, // eq
+ (SystemZ::TDCMASK_ZERO |
+ SystemZ::TDCMASK_POSITIVE |
+ SystemZ::TDCMASK_NORMAL_MINUS |
+ SystemZ::TDCMASK_SUBNORMAL_MINUS), // gt
+ 0, // lt
+ SystemZ::TDCMASK_NAN, // un
+ },
+ { // minnorm
+ 0, // eq (unsupported)
+ (SystemZ::TDCMASK_NORMAL_PLUS |
+ SystemZ::TDCMASK_INFINITY_PLUS), // gt (actually ge)
+ (SystemZ::TDCMASK_ZERO |
+ SystemZ::TDCMASK_NEGATIVE |
+ SystemZ::TDCMASK_SUBNORMAL_PLUS), // lt
+ SystemZ::TDCMASK_NAN, // un
+ },
+ { // -minnorm
+ 0, // eq (unsupported)
+ (SystemZ::TDCMASK_ZERO |
+ SystemZ::TDCMASK_POSITIVE |
+ SystemZ::TDCMASK_SUBNORMAL_MINUS), // gt
+ (SystemZ::TDCMASK_NORMAL_MINUS |
+ SystemZ::TDCMASK_INFINITY_MINUS), // lt (actually le)
+ SystemZ::TDCMASK_NAN, // un
+ }
+ };
+ // Construct the mask as a combination of the partial masks.
+ int Mask = 0;
+ if (Pred & CmpInst::FCMP_OEQ)
+ Mask |= Masks[WhichConst][0];
+ if (Pred & CmpInst::FCMP_OGT)
+ Mask |= Masks[WhichConst][1];
+ if (Pred & CmpInst::FCMP_OLT)
+ Mask |= Masks[WhichConst][2];
+ if (Pred & CmpInst::FCMP_UNO)
+ Mask |= Masks[WhichConst][3];
+ // A lone fcmp is unworthy of tdc conversion on its own, but may become
+ // worthy if combined with fabs.
+ bool Worthy = false;
+ if (CallInst *CI = dyn_cast<CallInst>(Op0)) {
+ Function *F = CI->getCalledFunction();
+ if (F && F->getIntrinsicID() == Intrinsic::fabs) {
+ // Fold with fabs - adjust the mask appropriately.
+ Mask &= SystemZ::TDCMASK_PLUS;
+ Mask |= Mask >> 1;
+ Op0 = CI->getArgOperand(0);
+ // A combination of fcmp with fabs is a win, unless the constant
+ // involved is 0 (which is handled by later passes).
+ Worthy = WhichConst != 0;
+ PossibleJunk.insert(CI);
+ }
+ }
+ converted(&I, Op0, Mask, Worthy);
+}
+
+void SystemZTDCPass::convertICmp(CmpInst &I) {
+ Value *Op0 = I.getOperand(0);
+ auto *Const = dyn_cast<ConstantInt>(I.getOperand(1));
+ auto Pred = I.getPredicate();
+ // All our icmp rules involve comparisons with consts.
+ if (!Const)
+ return;
+ if (auto *Cast = dyn_cast<BitCastInst>(Op0)) {
+ // Check for icmp+bitcast used for signbit.
+ if (!Cast->getSrcTy()->isFloatTy() &&
+ !Cast->getSrcTy()->isDoubleTy() &&
+ !Cast->getSrcTy()->isFP128Ty())
+ return;
+ Value *V = Cast->getOperand(0);
+ int Mask;
+ if (Pred == CmpInst::ICMP_SLT && Const->isZero()) {
+ // icmp slt (bitcast X), 0 - set if sign bit true
+ Mask = SystemZ::TDCMASK_MINUS;
+ } else if (Pred == CmpInst::ICMP_SGT && Const->isMinusOne()) {
+ // icmp sgt (bitcast X), -1 - set if sign bit false
+ Mask = SystemZ::TDCMASK_PLUS;
+ } else {
+ // Not a sign bit check.
+ return;
+ }
+ PossibleJunk.insert(Cast);
+ converted(&I, V, Mask, true);
+ } else if (auto *CI = dyn_cast<CallInst>(Op0)) {
+ // Check if this is a pre-existing call of our tdc intrinsic.
+ Function *F = CI->getCalledFunction();
+ if (!F || F->getIntrinsicID() != Intrinsic::s390_tdc)
+ return;
+ if (!Const->isZero())
+ return;
+ Value *V = CI->getArgOperand(0);
+ auto *MaskC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+ // Bail if the mask is not a constant.
+ if (!MaskC)
+ return;
+ int Mask = MaskC->getZExtValue();
+ Mask &= SystemZ::TDCMASK_ALL;
+ if (Pred == CmpInst::ICMP_NE) {
+ // icmp ne (call llvm.s390.tdc(...)), 0 -> simple TDC
+ } else if (Pred == CmpInst::ICMP_EQ) {
+ // icmp eq (call llvm.s390.tdc(...)), 0 -> TDC with inverted mask
+ Mask ^= SystemZ::TDCMASK_ALL;
+ } else {
+ // An unknown comparison - ignore.
+ return;
+ }
+ PossibleJunk.insert(CI);
+ converted(&I, V, Mask, false);
+ }
+}
+
+void SystemZTDCPass::convertLogicOp(BinaryOperator &I) {
+ Value *Op0, *Op1;
+ int Mask0, Mask1;
+ bool Worthy0, Worthy1;
+ std::tie(Op0, Mask0, Worthy0) = ConvertedInsts[cast<Instruction>(I.getOperand(0))];
+ std::tie(Op1, Mask1, Worthy1) = ConvertedInsts[cast<Instruction>(I.getOperand(1))];
+ if (Op0 != Op1)
+ return;
+ int Mask;
+ switch (I.getOpcode()) {
+ case Instruction::And:
+ Mask = Mask0 & Mask1;
+ break;
+ case Instruction::Or:
+ Mask = Mask0 | Mask1;
+ break;
+ case Instruction::Xor:
+ Mask = Mask0 ^ Mask1;
+ break;
+ default:
+ llvm_unreachable("Unknown op in convertLogicOp");
+ }
+ converted(&I, Op0, Mask, true);
+}
+
+bool SystemZTDCPass::runOnFunction(Function &F) {
+ ConvertedInsts.clear();
+ LogicOpsWorklist.clear();
+ PossibleJunk.clear();
+
+ // Look for icmp+fcmp instructions.
+ for (auto &I : instructions(F)) {
+ if (I.getOpcode() == Instruction::FCmp)
+ convertFCmp(cast<CmpInst>(I));
+ else if (I.getOpcode() == Instruction::ICmp)
+ convertICmp(cast<CmpInst>(I));
+ }
+
+ // If none found, bail already.
+ if (ConvertedInsts.empty())
+ return false;
+
+ // Process the queue of logic instructions.
+ while (!LogicOpsWorklist.empty()) {
+ BinaryOperator *Op = LogicOpsWorklist.back();
+ LogicOpsWorklist.pop_back();
+ // If both operands mapped, and the instruction itself not yet mapped,
+ // convert it.
+ if (ConvertedInsts.count(dyn_cast<Instruction>(Op->getOperand(0))) &&
+ ConvertedInsts.count(dyn_cast<Instruction>(Op->getOperand(1))) &&
+ !ConvertedInsts.count(Op))
+ convertLogicOp(*Op);
+ }
+
+ // Time to actually replace the instructions. Do it in the reverse order
+ // of finding them, since there's a good chance the earlier ones will be
+ // unused (due to being folded into later ones).
+ Module &M = *F.getParent();
+ auto &Ctx = M.getContext();
+ Value *Zero32 = ConstantInt::get(Type::getInt32Ty(Ctx), 0);
+ bool MadeChange = false;
+ for (auto &It : reverse(ConvertedInsts)) {
+ Instruction *I = It.first;
+ Value *V;
+ int Mask;
+ bool Worthy;
+ std::tie(V, Mask, Worthy) = It.second;
+ if (!I->user_empty()) {
+ // If used and unworthy of conversion, skip it.
+ if (!Worthy)
+ continue;
+ // Call the intrinsic, compare result with 0.
+ Value *TDCFunc = Intrinsic::getDeclaration(&M, Intrinsic::s390_tdc,
+ V->getType());
+ IRBuilder<> IRB(I);
+ Value *MaskVal = ConstantInt::get(Type::getInt64Ty(Ctx), Mask);
+ Instruction *TDC = IRB.CreateCall(TDCFunc, {V, MaskVal});
+ Value *ICmp = IRB.CreateICmp(CmpInst::ICMP_NE, TDC, Zero32);
+ I->replaceAllUsesWith(ICmp);
+ }
+ // If unused, or used and converted, remove it.
+ I->eraseFromParent();
+ MadeChange = true;
+ }
+
+ if (!MadeChange)
+ return false;
+
+ // We've actually done something - now clear misc accumulated junk (fabs,
+ // bitcast).
+ for (auto *I : PossibleJunk)
+ if (I->user_empty())
+ I->eraseFromParent();
+
+ return true;
+}
diff --git a/lib/Target/SystemZ/SystemZTargetMachine.cpp b/lib/Target/SystemZ/SystemZTargetMachine.cpp
index f305e85f6cfe..85a3f6f4a8be 100644
--- a/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -10,6 +10,7 @@
#include "SystemZTargetMachine.h"
#include "SystemZTargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
@@ -79,13 +80,22 @@ static std::string computeDataLayout(const Triple &TT, StringRef CPU,
return Ret;
}
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ // Static code is suitable for use in a dynamic executable; there is no
+ // separate DynamicNoPIC model.
+ if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
+ return Reloc::Static;
+ return *RM;
+}
+
SystemZTargetMachine::SystemZTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: LLVMTargetMachine(T, computeDataLayout(TT, CPU, FS), TT, CPU, FS, Options,
- RM, CM, OL),
+ getEffectiveRelocModel(RM), CM, OL),
TLOF(make_unique<TargetLoweringObjectFileELF>()),
Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
@@ -112,6 +122,9 @@ public:
} // end anonymous namespace
void SystemZPassConfig::addIRPasses() {
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createSystemZTDCPass());
+
TargetPassConfig::addIRPasses();
}
@@ -125,8 +138,7 @@ bool SystemZPassConfig::addInstSelector() {
}
void SystemZPassConfig::addPreSched2() {
- if (getOptLevel() != CodeGenOpt::None &&
- getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
+ if (getOptLevel() != CodeGenOpt::None)
addPass(&IfConverterID);
}
diff --git a/lib/Target/SystemZ/SystemZTargetMachine.h b/lib/Target/SystemZ/SystemZTargetMachine.h
index 1a8f1f7f3aaa..69cf9bc6e525 100644
--- a/lib/Target/SystemZ/SystemZTargetMachine.h
+++ b/lib/Target/SystemZ/SystemZTargetMachine.h
@@ -29,7 +29,7 @@ class SystemZTargetMachine : public LLVMTargetMachine {
public:
SystemZTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
~SystemZTargetMachine() override;
diff --git a/lib/Target/SystemZ/TargetInfo/Makefile b/lib/Target/SystemZ/TargetInfo/Makefile
deleted file mode 100644
index 0be80eb4e6ad..000000000000
--- a/lib/Target/SystemZ/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/SystemZ/TargetInfo/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMSystemZInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp
index 1b74e8cba4fe..5d1616d03779 100644
--- a/lib/Target/Target.cpp
+++ b/lib/Target/Target.cpp
@@ -24,6 +24,9 @@
using namespace llvm;
+// Avoid including "llvm-c/Core.h" for compile time, fwd-declare this instead.
+extern "C" LLVMContextRef LLVMGetGlobalContext(void);
+
inline TargetLibraryInfoImpl *unwrap(LLVMTargetLibraryInfoRef P) {
return reinterpret_cast<TargetLibraryInfoImpl*>(P);
}
@@ -42,11 +45,20 @@ void LLVMInitializeTarget(LLVMPassRegistryRef R) {
initializeTarget(*unwrap(R));
}
+LLVMTargetDataRef LLVMGetModuleDataLayout(LLVMModuleRef M) {
+ return wrap(&unwrap(M)->getDataLayout());
+}
+
+void LLVMSetModuleDataLayout(LLVMModuleRef M, LLVMTargetDataRef DL) {
+ unwrap(M)->setDataLayout(*unwrap(DL));
+}
+
LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep) {
return wrap(new DataLayout(StringRep));
}
-void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) {
+void LLVMDisposeTargetData(LLVMTargetDataRef TD) {
+ delete unwrap(TD);
}
void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
@@ -72,11 +84,11 @@ unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) {
}
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) {
- return wrap(unwrap(TD)->getIntPtrType(getGlobalContext()));
+ return wrap(unwrap(TD)->getIntPtrType(*unwrap(LLVMGetGlobalContext())));
}
LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
- return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), AS));
+ return wrap(unwrap(TD)->getIntPtrType(*unwrap(LLVMGetGlobalContext()), AS));
}
LLVMTypeRef LLVMIntPtrTypeInContext(LLVMContextRef C, LLVMTargetDataRef TD) {
@@ -127,7 +139,3 @@ unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef TD, LLVMTypeRef StructT
StructType *STy = unwrap<StructType>(StructTy);
return unwrap(TD)->getStructLayout(STy)->getElementOffset(Element);
}
-
-void LLVMDisposeTargetData(LLVMTargetDataRef TD) {
- delete unwrap(TD);
-}
diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp
index a0b0d8f24046..f863f429f43c 100644
--- a/lib/Target/TargetLoweringObjectFile.cpp
+++ b/lib/Target/TargetLoweringObjectFile.cpp
@@ -43,7 +43,7 @@ using namespace llvm;
void TargetLoweringObjectFile::Initialize(MCContext &ctx,
const TargetMachine &TM) {
Ctx = &ctx;
- InitMCObjectFileInfo(TM.getTargetTriple(), TM.getRelocationModel(),
+ InitMCObjectFileInfo(TM.getTargetTriple(), TM.isPositionIndependent(),
TM.getCodeModel(), *Ctx);
}
@@ -173,7 +173,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
// If the global is required to have a unique address, it can't be put
// into a mergable section: just drop it into the general read-only
// section instead.
- if (!GVar->hasUnnamedAddr())
+ if (!GVar->hasGlobalUnnamedAddr())
return SectionKind::getReadOnly();
// If initializer is a null-terminated string, put it in a "cstring"
@@ -202,6 +202,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
case 4: return SectionKind::getMergeableConst4();
case 8: return SectionKind::getMergeableConst8();
case 16: return SectionKind::getMergeableConst16();
+ case 32: return SectionKind::getMergeableConst32();
default:
return SectionKind::getReadOnly();
}
@@ -221,16 +222,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
}
}
- // Okay, this isn't a constant. If the initializer for the global is going
- // to require a runtime relocation by the dynamic linker, put it into a more
- // specific section to improve startup time of the app. This coalesces these
- // globals together onto fewer pages, improving the locality of the dynamic
- // linker.
- if (ReloModel == Reloc::Static)
- return SectionKind::getData();
-
- if (C->needsRelocation())
- return SectionKind::getData();
+ // Okay, this isn't a constant.
return SectionKind::getData();
}
@@ -252,8 +244,10 @@ TargetLoweringObjectFile::SectionForGlobal(const GlobalValue *GV,
MCSection *TargetLoweringObjectFile::getSectionForJumpTable(
const Function &F, Mangler &Mang, const TargetMachine &TM) const {
+ unsigned Align = 0;
return getSectionForConstant(F.getParent()->getDataLayout(),
- SectionKind::getReadOnly(), /*C=*/nullptr);
+ SectionKind::getReadOnly(), /*C=*/nullptr,
+ Align);
}
bool TargetLoweringObjectFile::shouldPutJumpTableInFunctionSection(
@@ -277,7 +271,8 @@ bool TargetLoweringObjectFile::shouldPutJumpTableInFunctionSection(
/// Given a mergable constant with the specified size and relocation
/// information, return a section that it should be placed in.
MCSection *TargetLoweringObjectFile::getSectionForConstant(
- const DataLayout &DL, SectionKind Kind, const Constant *C) const {
+ const DataLayout &DL, SectionKind Kind, const Constant *C,
+ unsigned &Align) const {
if (Kind.isReadOnly() && ReadOnlySection != nullptr)
return ReadOnlySection;
diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp
index 850c93cb21b8..82c68505c4e1 100644
--- a/lib/Target/TargetMachine.cpp
+++ b/lib/Target/TargetMachine.cpp
@@ -18,21 +18,23 @@
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/SectionKind.h"
-#include "llvm/IR/LegacyPassManager.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
+cl::opt<bool> EnableIPRA("enable-ipra", cl::init(false), cl::Hidden,
+ cl::desc("Enable interprocedural register allocation "
+ "to reduce load/store at procedure calls."));
+
//---------------------------------------------------------------------------
// TargetMachine Class
//
@@ -41,18 +43,23 @@ TargetMachine::TargetMachine(const Target &T, StringRef DataLayoutString,
const Triple &TT, StringRef CPU, StringRef FS,
const TargetOptions &Options)
: TheTarget(T), DL(DataLayoutString), TargetTriple(TT), TargetCPU(CPU),
- TargetFS(FS), CodeGenInfo(nullptr), AsmInfo(nullptr), MRI(nullptr),
- MII(nullptr), STI(nullptr), RequireStructuredCFG(false),
- Options(Options) {}
+ TargetFS(FS), AsmInfo(nullptr), MRI(nullptr), MII(nullptr), STI(nullptr),
+ RequireStructuredCFG(false), Options(Options) {
+ if (EnableIPRA.getNumOccurrences())
+ this->Options.EnableIPRA = EnableIPRA;
+}
TargetMachine::~TargetMachine() {
- delete CodeGenInfo;
delete AsmInfo;
delete MRI;
delete MII;
delete STI;
}
+bool TargetMachine::isPositionIndependent() const {
+ return getRelocationModel() == Reloc::PIC_;
+}
+
/// \brief Reset the target options based on the function's attributes.
// FIXME: This function needs to go away for a number of reasons:
// a) global state on the TargetMachine is terrible in general,
@@ -72,21 +79,13 @@ void TargetMachine::resetTargetOptions(const Function &F) const {
RESET_OPTION(NoNaNsFPMath, "no-nans-fp-math");
}
-/// getRelocationModel - Returns the code generation relocation model. The
-/// choices are static, PIC, and dynamic-no-pic, and target default.
-Reloc::Model TargetMachine::getRelocationModel() const {
- if (!CodeGenInfo)
- return Reloc::Default;
- return CodeGenInfo->getRelocationModel();
-}
+/// Returns the code generation relocation model. The choices are static, PIC,
+/// and dynamic-no-pic.
+Reloc::Model TargetMachine::getRelocationModel() const { return RM; }
-/// getCodeModel - Returns the code model. The choices are small, kernel,
-/// medium, large, and target default.
-CodeModel::Model TargetMachine::getCodeModel() const {
- if (!CodeGenInfo)
- return CodeModel::Default;
- return CodeGenInfo->getCodeModel();
-}
+/// Returns the code model. The choices are small, kernel, medium, large, and
+/// target default.
+CodeModel::Model TargetMachine::getCodeModel() const { return CMModel; }
/// Get the IR-specified TLS model for Var.
static TLSModel::Model getSelectedTLSModel(const GlobalValue *GV) {
@@ -106,23 +105,65 @@ static TLSModel::Model getSelectedTLSModel(const GlobalValue *GV) {
llvm_unreachable("invalid TLS model");
}
+// FIXME: make this a proper option
+static bool CanUseCopyRelocWithPIE = false;
+
+bool TargetMachine::shouldAssumeDSOLocal(const Module &M,
+ const GlobalValue *GV) const {
+ Reloc::Model RM = getRelocationModel();
+ const Triple &TT = getTargetTriple();
+
+ // DLLImport explicitly marks the GV as external.
+ if (GV && GV->hasDLLImportStorageClass())
+ return false;
+
+ // Every other GV is local on COFF
+ if (TT.isOSBinFormatCOFF())
+ return true;
+
+ if (GV && (GV->hasLocalLinkage() || !GV->hasDefaultVisibility()))
+ return true;
+
+ if (TT.isOSBinFormatMachO()) {
+ if (RM == Reloc::Static)
+ return true;
+ return GV && GV->isStrongDefinitionForLinker();
+ }
+
+ assert(TT.isOSBinFormatELF());
+ assert(RM != Reloc::DynamicNoPIC);
+
+ bool IsExecutable =
+ RM == Reloc::Static || M.getPIELevel() != PIELevel::Default;
+ if (IsExecutable) {
+ // If the symbol is defined, it cannot be preempted.
+ if (GV && !GV->isDeclarationForLinker())
+ return true;
+
+ bool IsTLS = GV && GV->isThreadLocal();
+ // Check if we can use copy relocations.
+ if (!IsTLS && (RM == Reloc::Static || CanUseCopyRelocWithPIE))
+ return true;
+ }
+
+ // ELF supports preemption of other symbols.
+ return false;
+}
+
TLSModel::Model TargetMachine::getTLSModel(const GlobalValue *GV) const {
- bool isLocal = GV->hasLocalLinkage();
- bool isDeclaration = GV->isDeclaration();
- bool isPIC = getRelocationModel() == Reloc::PIC_;
- bool isPIE = Options.PositionIndependentExecutable;
- // FIXME: what should we do for protected and internal visibility?
- // For variables, is internal different from hidden?
- bool isHidden = GV->hasHiddenVisibility();
+ bool IsPIE = GV->getParent()->getPIELevel() != PIELevel::Default;
+ Reloc::Model RM = getRelocationModel();
+ bool IsSharedLibrary = RM == Reloc::PIC_ && !IsPIE;
+ bool IsLocal = shouldAssumeDSOLocal(*GV->getParent(), GV);
TLSModel::Model Model;
- if (isPIC && !isPIE) {
- if (isLocal || isHidden)
+ if (IsSharedLibrary) {
+ if (IsLocal)
Model = TLSModel::LocalDynamic;
else
Model = TLSModel::GeneralDynamic;
} else {
- if (!isDeclaration || isHidden)
+ if (IsLocal)
Model = TLSModel::LocalExec;
else
Model = TLSModel::InitialExec;
@@ -136,18 +177,10 @@ TLSModel::Model TargetMachine::getTLSModel(const GlobalValue *GV) const {
return Model;
}
-/// getOptLevel - Returns the optimization level: None, Less,
-/// Default, or Aggressive.
-CodeGenOpt::Level TargetMachine::getOptLevel() const {
- if (!CodeGenInfo)
- return CodeGenOpt::Default;
- return CodeGenInfo->getOptLevel();
-}
+/// Returns the optimization level: None, Less, Default, or Aggressive.
+CodeGenOpt::Level TargetMachine::getOptLevel() const { return OptLevel; }
-void TargetMachine::setOptLevel(CodeGenOpt::Level Level) const {
- if (CodeGenInfo)
- CodeGenInfo->setOptLevel(Level);
-}
+void TargetMachine::setOptLevel(CodeGenOpt::Level Level) { OptLevel = Level; }
TargetIRAnalysis TargetMachine::getTargetIRAnalysis() {
return TargetIRAnalysis([this](const Function &F) {
diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp
index f82566c37baa..02836eaf08e5 100644
--- a/lib/Target/TargetMachineC.cpp
+++ b/lib/Target/TargetMachineC.cpp
@@ -18,7 +18,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/LegacyPassManager.h"
-#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CodeGenCWrappers.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/Host.h"
@@ -32,15 +32,6 @@
using namespace llvm;
-namespace llvm {
-// Friend to the TargetMachine, access legacy API that are made private in C++
-struct C_API_PRIVATE_ACCESS {
- static const DataLayout &getDataLayout(const TargetMachine &T) {
- return T.getDataLayout();
- }
-};
-}
-
static TargetMachine *unwrap(LLVMTargetMachineRef P) {
return reinterpret_cast<TargetMachine *>(P);
}
@@ -114,7 +105,7 @@ LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T,
const char* Triple, const char* CPU, const char* Features,
LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc,
LLVMCodeModel CodeModel) {
- Reloc::Model RM;
+ Optional<Reloc::Model> RM;
switch (Reloc){
case LLVMRelocStatic:
RM = Reloc::Static;
@@ -126,7 +117,6 @@ LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T,
RM = Reloc::DynamicNoPIC;
break;
default:
- RM = Reloc::Default;
break;
}
@@ -175,16 +165,15 @@ char* LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T) {
return strdup(StringRep.c_str());
}
-/** Deprecated: use LLVMGetDataLayout(LLVMModuleRef M) instead. */
-LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) {
- return wrap(&C_API_PRIVATE_ACCESS::getDataLayout(*unwrap(T)));
-}
-
void LLVMSetTargetMachineAsmVerbosity(LLVMTargetMachineRef T,
LLVMBool VerboseAsm) {
unwrap(T)->Options.MCOptions.AsmVerbose = VerboseAsm;
}
+LLVMTargetDataRef LLVMCreateTargetDataLayout(LLVMTargetMachineRef T) {
+ return wrap(new DataLayout(unwrap(T)->createDataLayout()));
+}
+
static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M,
raw_pwrite_stream &OS,
LLVMCodeGenFileType codegen,
diff --git a/lib/Target/TargetRecip.cpp b/lib/Target/TargetRecip.cpp
index d41b6436928b..183fa5062eab 100644
--- a/lib/Target/TargetRecip.cpp
+++ b/lib/Target/TargetRecip.cpp
@@ -14,11 +14,10 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/StringRef.h"
+#include "llvm/Target/TargetRecip.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetRecip.h"
-#include <map>
using namespace llvm;
@@ -157,9 +156,10 @@ void TargetRecip::parseIndividualParams(const std::vector<std::string> &Args) {
// If the precision was not specified, the double entry is also initialized.
if (Val.back() != 'f' && Val.back() != 'd') {
- RecipMap[Val.str() + 'd'].Enabled = !IsDisabled;
+ RecipParams &Params = RecipMap[Val.str() + 'd'];
+ Params.Enabled = !IsDisabled;
if (!RefStepString.empty())
- RecipMap[Val.str() + 'd'].RefinementSteps = RefSteps;
+ Params.RefinementSteps = RefSteps;
}
}
}
diff --git a/lib/Target/TargetSubtargetInfo.cpp b/lib/Target/TargetSubtargetInfo.cpp
index 6a61fcdf0f86..c3f94a99b4ca 100644
--- a/lib/Target/TargetSubtargetInfo.cpp
+++ b/lib/Target/TargetSubtargetInfo.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Support/CommandLine.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
diff --git a/lib/Target/WebAssembly/CMakeLists.txt b/lib/Target/WebAssembly/CMakeLists.txt
index e5c68e598479..b2865f1a0f9e 100644
--- a/lib/Target/WebAssembly/CMakeLists.txt
+++ b/lib/Target/WebAssembly/CMakeLists.txt
@@ -10,11 +10,11 @@ tablegen(LLVM WebAssemblyGenSubtargetInfo.inc -gen-subtarget)
add_public_tablegen_target(WebAssemblyCommonTableGen)
add_llvm_target(WebAssemblyCodeGen
- Relooper.cpp
WebAssemblyArgumentMove.cpp
WebAssemblyAsmPrinter.cpp
WebAssemblyCFGStackify.cpp
WebAssemblyFastISel.cpp
+ WebAssemblyFixIrreducibleControlFlow.cpp
WebAssemblyFrameLowering.cpp
WebAssemblyISelDAGToDAG.cpp
WebAssemblyISelLowering.cpp
@@ -22,14 +22,17 @@ add_llvm_target(WebAssemblyCodeGen
WebAssemblyLowerBrUnless.cpp
WebAssemblyMachineFunctionInfo.cpp
WebAssemblyMCInstLower.cpp
+ WebAssemblyOptimizeLiveIntervals.cpp
WebAssemblyOptimizeReturned.cpp
WebAssemblyPeephole.cpp
- WebAssemblyPEI.cpp
+ WebAssemblyPrepareForLiveIntervals.cpp
WebAssemblyRegisterInfo.cpp
WebAssemblyRegColoring.cpp
WebAssemblyRegNumbering.cpp
WebAssemblyRegStackify.cpp
+ WebAssemblyReplacePhysRegs.cpp
WebAssemblySelectionDAGInfo.cpp
+ WebAssemblySetP2AlignOperands.cpp
WebAssemblyStoreResults.cpp
WebAssemblySubtarget.cpp
WebAssemblyTargetMachine.cpp
diff --git a/lib/Target/WebAssembly/Disassembler/Makefile b/lib/Target/WebAssembly/Disassembler/Makefile
deleted file mode 100644
index bcd36ba6f01f..000000000000
--- a/lib/Target/WebAssembly/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===-- lib/Target/WebAssembly/Disassembler/Makefile -------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMWebAssemblyDisassembler
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp b/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
index 0143b10c0ab1..c0355aef0b35 100644
--- a/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
+++ b/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
@@ -18,7 +18,7 @@
#include "WebAssembly.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -93,6 +93,7 @@ MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction(
const MCOperandInfo &Info = Desc.OpInfo[i];
switch (Info.OperandType) {
case MCOI::OPERAND_IMMEDIATE:
+ case WebAssembly::OPERAND_P2ALIGN:
case WebAssembly::OPERAND_BASIC_BLOCK: {
if (Pos + sizeof(uint64_t) > Bytes.size())
return MCDisassembler::Fail;
@@ -109,7 +110,8 @@ MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction(
MI.addOperand(MCOperand::createReg(Reg));
break;
}
- case WebAssembly::OPERAND_FPIMM: {
+ case WebAssembly::OPERAND_FP32IMM:
+ case WebAssembly::OPERAND_FP64IMM: {
// TODO: MC converts all floating point immediate operands to double.
// This is fine for numeric values, but may cause NaNs to change bits.
if (Pos + sizeof(uint64_t) > Bytes.size())
diff --git a/lib/Target/WebAssembly/InstPrinter/Makefile b/lib/Target/WebAssembly/InstPrinter/Makefile
deleted file mode 100644
index 87534379f796..000000000000
--- a/lib/Target/WebAssembly/InstPrinter/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/WebAssembly/AsmPrinter/Makefile ----------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMWebAssemblyAsmPrinter
-
-# Hack: we need to include 'main' wasm target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp b/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp
index 9a95150cb557..267d716dd1d0 100644
--- a/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp
+++ b/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp
@@ -110,14 +110,22 @@ void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
}
static std::string toString(const APFloat &FP) {
+ // Print NaNs with custom payloads specially.
+ if (FP.isNaN() &&
+ !FP.bitwiseIsEqual(APFloat::getQNaN(FP.getSemantics())) &&
+ !FP.bitwiseIsEqual(APFloat::getQNaN(FP.getSemantics(), /*Negative=*/true))) {
+ APInt AI = FP.bitcastToAPInt();
+ return
+ std::string(AI.isNegative() ? "-" : "") + "nan:0x" +
+ utohexstr(AI.getZExtValue() &
+ (AI.getBitWidth() == 32 ? INT64_C(0x007fffff) :
+ INT64_C(0x000fffffffffffff)),
+ /*LowerCase=*/true);
+ }
+
+ // Use C99's hexadecimal floating-point representation.
static const size_t BufBytes = 128;
char buf[BufBytes];
- if (FP.isNaN())
- assert((FP.bitwiseIsEqual(APFloat::getQNaN(FP.getSemantics())) ||
- FP.bitwiseIsEqual(
- APFloat::getQNaN(FP.getSemantics(), /*Negative=*/true))) &&
- "convertToHexString handles neither SNaN nor NaN payloads");
- // Use C99's hexadecimal floating-point representation.
auto Written = FP.convertToHexString(
buf, /*hexDigits=*/0, /*upperCase=*/false, APFloat::rmNearestTiesToEven);
(void)Written;
@@ -137,11 +145,11 @@ void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
if (int(WAReg) >= 0)
printRegName(O, WAReg);
else if (OpNo >= MII.get(MI->getOpcode()).getNumDefs())
- O << "$pop" << (WAReg & INT32_MAX);
+ O << "$pop" << WebAssemblyFunctionInfo::getWARegStackId(WAReg);
else if (WAReg != WebAssemblyFunctionInfo::UnusedReg)
- O << "$push" << (WAReg & INT32_MAX);
+ O << "$push" << WebAssemblyFunctionInfo::getWARegStackId(WAReg);
else
- O << "$discard";
+ O << "$drop";
// Add a '=' suffix if this is a def.
if (OpNo < MII.get(MI->getOpcode()).getNumDefs())
O << '=';
@@ -157,10 +165,20 @@ void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
// control flow stack, and it may be nice to pretty-print.
O << Op.getImm();
} else if (Op.isFPImm()) {
- assert((OpNo < MII.get(MI->getOpcode()).getNumOperands() ||
- MII.get(MI->getOpcode()).TSFlags == 0) &&
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ assert(OpNo < Desc.getNumOperands() &&
+ "Unexpected floating-point immediate as a non-fixed operand");
+ assert(Desc.TSFlags == 0 &&
"WebAssembly variable_ops floating point ops don't use TSFlags");
- O << toString(APFloat(Op.getFPImm()));
+ const MCOperandInfo &Info = Desc.OpInfo[OpNo];
+ if (Info.OperandType == WebAssembly::OPERAND_FP32IMM) {
+ // TODO: MC converts all floating point immediate operands to double.
+ // This is fine for numeric values, but may cause NaNs to change bits.
+ O << toString(APFloat(float(Op.getFPImm())));
+ } else {
+ assert(Info.OperandType == WebAssembly::OPERAND_FP64IMM);
+ O << toString(APFloat(Op.getFPImm()));
+ }
} else {
assert((OpNo < MII.get(MI->getOpcode()).getNumOperands() ||
(MII.get(MI->getOpcode()).TSFlags &
@@ -172,6 +190,16 @@ void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
}
}
+void
+WebAssemblyInstPrinter::printWebAssemblyP2AlignOperand(const MCInst *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(OpNo).getImm();
+ if (Imm == WebAssembly::GetDefaultP2Align(MI->getOpcode()))
+ return;
+ O << ":p2align=" << Imm;
+}
+
const char *llvm::WebAssembly::TypeToString(MVT Ty) {
switch (Ty.SimpleTy) {
case MVT::i32:
diff --git a/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h b/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h
index cd6c59a41c33..07b0f914e447 100644
--- a/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h
+++ b/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h
@@ -15,8 +15,9 @@
#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_INSTPRINTER_WEBASSEMBLYINSTPRINTER_H
#define LLVM_LIB_TARGET_WEBASSEMBLY_INSTPRINTER_WEBASSEMBLYINSTPRINTER_H
-#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/MC/MCInstPrinter.h"
namespace llvm {
@@ -36,6 +37,8 @@ public:
// Used by tblegen code.
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printWebAssemblyP2AlignOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O);
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
diff --git a/lib/Target/WebAssembly/MCTargetDesc/Makefile b/lib/Target/WebAssembly/MCTargetDesc/Makefile
deleted file mode 100644
index 11dcb4ff6075..000000000000
--- a/lib/Target/WebAssembly/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/WebAssembly/TargetDesc/Makefile ----------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMWebAssemblyDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
index bba06f65e169..df6fb8968d56 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
@@ -55,7 +55,8 @@ public:
bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {}
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {}
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
};
@@ -73,8 +74,10 @@ void WebAssemblyAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value,
bool IsPCRel) const {
const MCFixupKindInfo &Info = getFixupKindInfo(Fixup.getKind());
- unsigned NumBytes = RoundUpToAlignment(Info.TargetSize, 8);
- if (!Value)
+ assert(Info.Flags == 0 && "WebAssembly does not use MCFixupKindInfo flags");
+
+ unsigned NumBytes = (Info.TargetSize + 7) / 8;
+ if (Value == 0)
return; // Doesn't change encoding.
// Shift the value into position.
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp
index 2bb58b33934e..2146f67959b8 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp
@@ -25,8 +25,8 @@ public:
WebAssemblyELFObjectWriter(bool Is64Bit, uint8_t OSABI);
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
};
} // end anonymous namespace
@@ -35,7 +35,8 @@ WebAssemblyELFObjectWriter::WebAssemblyELFObjectWriter(bool Is64Bit,
: MCELFObjectTargetWriter(Is64Bit, OSABI, ELF::EM_WEBASSEMBLY,
/*HasRelocationAddend=*/false) {}
-unsigned WebAssemblyELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned WebAssemblyELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
// WebAssembly functions are not allocated in the address space. To resolve a
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
index 02c717a92101..d8c39216c53b 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
@@ -15,7 +15,6 @@
#include "WebAssemblyMCAsmInfo.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/Support/CommandLine.h"
using namespace llvm;
#define DEBUG_TYPE "wasm-mc-asm-info"
@@ -48,4 +47,7 @@ WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T) {
ExceptionsType = ExceptionHandling::None;
// TODO: UseIntegratedAssembler?
+
+ // WebAssembly's stack is never executable.
+ UsesNonexecutableStackSection = false;
}
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
index f409bd77442c..23f8b3d0e827 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
@@ -22,6 +22,7 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/EndianStream.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -33,7 +34,6 @@ STATISTIC(MCNumFixups, "Number of MC fixups created.");
namespace {
class WebAssemblyMCCodeEmitter final : public MCCodeEmitter {
const MCInstrInfo &MCII;
- const MCContext &Ctx;
// Implementation generated by tablegen.
uint64_t getBinaryCodeForInstr(const MCInst &MI,
@@ -45,14 +45,12 @@ class WebAssemblyMCCodeEmitter final : public MCCodeEmitter {
const MCSubtargetInfo &STI) const override;
public:
- WebAssemblyMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
- : MCII(mcii), Ctx(ctx) {}
+ WebAssemblyMCCodeEmitter(const MCInstrInfo &mcii) : MCII(mcii) {}
};
} // end anonymous namespace
-MCCodeEmitter *llvm::createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII,
- MCContext &Ctx) {
- return new WebAssemblyMCCodeEmitter(MCII, Ctx);
+MCCodeEmitter *llvm::createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII) {
+ return new WebAssemblyMCCodeEmitter(MCII);
}
void WebAssemblyMCCodeEmitter::encodeInstruction(
@@ -78,7 +76,8 @@ void WebAssemblyMCCodeEmitter::encodeInstruction(
support::endian::Writer<support::little>(OS).write<uint64_t>(0);
Fixups.push_back(MCFixup::create(
(1 + MCII.get(MI.getOpcode()).isVariadic() + i) * sizeof(uint64_t),
- MO.getExpr(), STI.getTargetTriple().isArch64Bit() ? FK_Data_8 : FK_Data_4,
+ MO.getExpr(),
+ STI.getTargetTriple().isArch64Bit() ? FK_Data_8 : FK_Data_4,
MI.getLoc()));
++MCNumFixups;
} else {
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp
index 37000f1cd571..ac11a64086f2 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp
@@ -16,7 +16,6 @@
#include "InstPrinter/WebAssemblyInstPrinter.h"
#include "WebAssemblyMCAsmInfo.h"
#include "WebAssemblyTargetStreamer.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -40,6 +39,15 @@ static MCAsmInfo *createMCAsmInfo(const MCRegisterInfo & /*MRI*/,
return new WebAssemblyMCAsmInfo(TT);
}
+static void adjustCodeGenOpts(const Triple & /*TT*/, Reloc::Model /*RM*/,
+ CodeModel::Model &CM) {
+ CodeModel::Model M = (CM == CodeModel::Default || CM == CodeModel::JITDefault)
+ ? CodeModel::Large
+ : CM;
+ if (M != CodeModel::Large)
+ report_fatal_error("Non-large code models are not supported yet");
+}
+
static MCInstrInfo *createMCInstrInfo() {
MCInstrInfo *X = new MCInstrInfo();
InitWebAssemblyMCInstrInfo(X);
@@ -57,14 +65,14 @@ static MCInstPrinter *createMCInstPrinter(const Triple & /*T*/,
const MCAsmInfo &MAI,
const MCInstrInfo &MII,
const MCRegisterInfo &MRI) {
- assert(SyntaxVariant == 0);
+ assert(SyntaxVariant == 0 && "WebAssembly only has one syntax variant");
return new WebAssemblyInstPrinter(MAI, MII, MRI);
}
static MCCodeEmitter *createCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo & /*MRI*/,
- MCContext &Ctx) {
- return createWebAssemblyMCCodeEmitter(MCII, Ctx);
+ MCContext & /*Ctx*/) {
+ return createWebAssemblyMCCodeEmitter(MCII);
}
static MCAsmBackend *createAsmBackend(const Target & /*T*/,
@@ -99,6 +107,9 @@ extern "C" void LLVMInitializeWebAssemblyTargetMC() {
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createMCInstrInfo);
+ // Register the MC codegen info.
+ TargetRegistry::registerMCAdjustCodeGenOpts(*T, adjustCodeGenOpts);
+
// Register the MC register info.
TargetRegistry::RegisterMCRegInfo(*T, createMCRegisterInfo);
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
index 9bac4f82822a..001bd7f1fc43 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -33,8 +33,7 @@ class raw_pwrite_stream;
extern Target TheWebAssemblyTarget32;
extern Target TheWebAssemblyTarget64;
-MCCodeEmitter *createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII,
- MCContext &Ctx);
+MCCodeEmitter *createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII);
MCAsmBackend *createWebAssemblyAsmBackend(const Triple &TT);
@@ -45,8 +44,12 @@ namespace WebAssembly {
enum OperandType {
/// Basic block label in a branch construct.
OPERAND_BASIC_BLOCK = MCOI::OPERAND_FIRST_TARGET,
- /// Floating-point immediate.
- OPERAND_FPIMM
+ /// 32-bit floating-point immediates.
+ OPERAND_FP32IMM,
+ /// 64-bit floating-point immediates.
+ OPERAND_FP64IMM,
+ /// p2align immediate for load and store address alignment.
+ OPERAND_P2ALIGN
};
/// WebAssembly-specific directive identifiers.
@@ -87,4 +90,49 @@ enum {
#define GET_SUBTARGETINFO_ENUM
#include "WebAssemblyGenSubtargetInfo.inc"
+namespace llvm {
+namespace WebAssembly {
+
+/// Return the default p2align value for a load or store with the given opcode.
+inline unsigned GetDefaultP2Align(unsigned Opcode) {
+ switch (Opcode) {
+ case WebAssembly::LOAD8_S_I32:
+ case WebAssembly::LOAD8_U_I32:
+ case WebAssembly::LOAD8_S_I64:
+ case WebAssembly::LOAD8_U_I64:
+ case WebAssembly::STORE8_I32:
+ case WebAssembly::STORE8_I64:
+ return 0;
+ case WebAssembly::LOAD16_S_I32:
+ case WebAssembly::LOAD16_U_I32:
+ case WebAssembly::LOAD16_S_I64:
+ case WebAssembly::LOAD16_U_I64:
+ case WebAssembly::STORE16_I32:
+ case WebAssembly::STORE16_I64:
+ return 1;
+ case WebAssembly::LOAD_I32:
+ case WebAssembly::LOAD_F32:
+ case WebAssembly::STORE_I32:
+ case WebAssembly::STORE_F32:
+ case WebAssembly::LOAD32_S_I64:
+ case WebAssembly::LOAD32_U_I64:
+ case WebAssembly::STORE32_I64:
+ return 2;
+ case WebAssembly::LOAD_I64:
+ case WebAssembly::LOAD_F64:
+ case WebAssembly::STORE_I64:
+ case WebAssembly::STORE_F64:
+ return 3;
+ default: llvm_unreachable("Only loads and stores have p2align values");
+ }
+}
+
+/// The operand number of the load or store address in load/store instructions.
+static const unsigned MemOpAddressOperandNo = 2;
+/// The operand number of the stored value in a store instruction.
+static const unsigned StoreValueOperandNo = 4;
+
+} // end namespace WebAssembly
+} // end namespace llvm
+
#endif
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
index 1d2822869a15..3d61c15717b4 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
@@ -16,12 +16,10 @@
#include "WebAssemblyTargetStreamer.h"
#include "InstPrinter/WebAssemblyInstPrinter.h"
#include "WebAssemblyMCTargetDesc.h"
-#include "WebAssemblyTargetObjectFile.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbolELF.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
@@ -66,6 +64,16 @@ void WebAssemblyTargetAsmStreamer::emitLocal(ArrayRef<MVT> Types) {
void WebAssemblyTargetAsmStreamer::emitEndFunc() { OS << "\t.endfunc\n"; }
+void WebAssemblyTargetAsmStreamer::emitIndirectFunctionType(
+ StringRef name, SmallVectorImpl<MVT> &SignatureVTs, size_t NumResults) {
+ OS << "\t.functype\t" << name;
+ if (NumResults == 0) OS << ", void";
+ for (auto Ty : SignatureVTs) {
+ OS << ", " << WebAssembly::TypeToString(Ty);
+ }
+ OS << "\n";
+}
+
// FIXME: What follows is not the real binary encoding.
static void EncodeTypes(MCStreamer &Streamer, ArrayRef<MVT> Types) {
diff --git a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
index c66a51574efb..51354ef22d71 100644
--- a/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
+++ b/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
@@ -37,6 +37,12 @@ public:
virtual void emitLocal(ArrayRef<MVT> Types) = 0;
/// .endfunc
virtual void emitEndFunc() = 0;
+ /// .functype
+ virtual void emitIndirectFunctionType(StringRef name,
+ SmallVectorImpl<MVT> &SignatureVTs,
+ size_t NumResults) {
+ llvm_unreachable("emitIndirectFunctionType not implemented");
+ }
};
/// This part is for ascii assembly output
@@ -50,6 +56,9 @@ public:
void emitResult(ArrayRef<MVT> Types) override;
void emitLocal(ArrayRef<MVT> Types) override;
void emitEndFunc() override;
+ void emitIndirectFunctionType(StringRef name,
+ SmallVectorImpl<MVT> &SignatureVTs,
+ size_t NumResults) override;
};
/// This part is for ELF object output
diff --git a/lib/Target/WebAssembly/Makefile b/lib/Target/WebAssembly/Makefile
deleted file mode 100644
index c501a2b1ab15..000000000000
--- a/lib/Target/WebAssembly/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-##===- lib/Target/WebAssembly/Makefile ---------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMWebAssemblyCodeGen
-TARGET = WebAssembly
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = \
- WebAssemblyGenAsmWriter.inc \
- WebAssemblyGenDAGISel.inc \
- WebAssemblyGenFastISel.inc \
- WebAssemblyGenInstrInfo.inc \
- WebAssemblyGenMCCodeEmitter.inc \
- WebAssemblyGenRegisterInfo.inc \
- WebAssemblyGenSubtargetInfo.inc
-
-DIRS = InstPrinter TargetInfo MCTargetDesc Disassembler
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/WebAssembly/README.txt b/lib/Target/WebAssembly/README.txt
index b97ea454165c..a6c2eefc0578 100644
--- a/lib/Target/WebAssembly/README.txt
+++ b/lib/Target/WebAssembly/README.txt
@@ -13,32 +13,18 @@ binary encoding of WebAssembly itself:
* https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md
The backend is built, tested and archived on the following waterfall:
- https://build.chromium.org/p/client.wasm.llvm/console
+ https://wasm-stat.us
The backend's bringup is done using the GCC torture test suite first since it
doesn't require C library support. Current known failures are in
known_gcc_test_failures.txt, all other tests should pass. The waterfall will
turn red if not. Once most of these pass, further testing will use LLVM's own
test suite. The tests can be run locally using:
- github.com/WebAssembly/experimental/blob/master/buildbot/torture_test.py
-
-Interesting work that remains to be done:
-* Write a pass to restructurize irreducible control flow. This needs to be done
- before register allocation to be efficient, because it may duplicate basic
- blocks and WebAssembly performs register allocation at a whole-function
- level. Note that LLVM's GPU code has such a pass, but it linearizes control
- flow (e.g. both sides of branches execute and are masked) which is undesirable
- for WebAssembly.
+ https://github.com/WebAssembly/waterfall/blob/master/src/compile_torture_tests.py
//===---------------------------------------------------------------------===//
-set_local instructions have a return value. We should (a) model this,
-and (b) write optimizations which take advantage of it. Keep in mind that
-many set_local instructions are implicit!
-
-//===---------------------------------------------------------------------===//
-
-Br, br_if, and tableswitch instructions can support having a value on the
+Br, br_if, and br_table instructions can support having a value on the
expression stack across the jump (sometimes). We should (a) model this, and
(b) extend the stackifier to utilize it.
@@ -58,10 +44,6 @@ us too?
//===---------------------------------------------------------------------===//
-When is it profitable to set isAsCheapAsAMove on instructions in WebAssembly?
-
-//===---------------------------------------------------------------------===//
-
Register stackification uses the EXPR_STACK physical register to impose
ordering dependencies on instructions with stack operands. This is pessimistic;
we should consider alternate ways to model stack dependencies.
@@ -82,7 +64,74 @@ stores.
//===---------------------------------------------------------------------===//
-Memset/memcpy/memmove should be marked with the "returned" attribute somehow,
-even when they are translated through intrinsics.
+Consider implementing optimizeSelect, optimizeCompareInstr, optimizeCondBranch,
+optimizeLoadInstr, and/or getMachineCombinerPatterns.
+
+//===---------------------------------------------------------------------===//
+
+Find a clean way to fix the problem which leads to the Shrink Wrapping pass
+being run after the WebAssembly PEI pass.
+
+//===---------------------------------------------------------------------===//
+
+When setting multiple local variables to the same constant, we currently get
+code like this:
+
+ i32.const $4=, 0
+ i32.const $3=, 0
+
+It could be done with a smaller encoding like this:
+
+ i32.const $push5=, 0
+ tee_local $push6=, $4=, $pop5
+ copy_local $3=, $pop6
+
+//===---------------------------------------------------------------------===//
+
+WebAssembly registers are implicitly initialized to zero. Explicit zeroing is
+therefore often redundant and could be optimized away.
+
+//===---------------------------------------------------------------------===//
+
+Small indices may use smaller encodings than large indices.
+WebAssemblyRegColoring and/or WebAssemblyRegRenumbering should sort registers
+according to their usage frequency to maximize the usage of smaller encodings.
+
+//===---------------------------------------------------------------------===//
+
+When the last statement in a function body computes the return value, it can
+just let that value be the exit value of the outermost block, rather than
+needing an explicit return operation.
+
+//===---------------------------------------------------------------------===//
+
+Many cases of irreducible control flow could be transformed more optimally
+than via the transform in WebAssemblyFixIrreducibleControlFlow.cpp.
+
+It may also be worthwhile to do transforms before register coloring,
+particularly when duplicating code, to allow register coloring to be aware of
+the duplication.
+
+//===---------------------------------------------------------------------===//
+
+WebAssemblyRegStackify could use AliasAnalysis to reorder loads and stores more
+aggressively.
+
+//===---------------------------------------------------------------------===//
+
+WebAssemblyRegStackify is currently a greedy algorithm. This means that, for
+example, a binary operator will stackify with its user before its operands.
+However, if moving the binary operator to its user moves it to a place where
+its operands can't be moved to, it would be better to leave it in place, or
+perhaps move it up, so that it can stackify its operands. A binary operator
+has two operands and one result, so in such cases there could be a net win by
+prefering the operands.
+
+//===---------------------------------------------------------------------===//
+
+Instruction ordering has a significant influence on register stackification and
+coloring. Consider experimenting with the MachineScheduler (enable via
+enableMachineScheduler) and determine if it can be configured to schedule
+instructions advantageously for this purpose.
//===---------------------------------------------------------------------===//
diff --git a/lib/Target/WebAssembly/Relooper.cpp b/lib/Target/WebAssembly/Relooper.cpp
deleted file mode 100644
index 9b718ef094aa..000000000000
--- a/lib/Target/WebAssembly/Relooper.cpp
+++ /dev/null
@@ -1,984 +0,0 @@
-//===-- Relooper.cpp - Top-level interface for WebAssembly ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===---------------------------------------------------------------------===//
-///
-/// \file
-/// \brief This implements the Relooper algorithm. This implementation includes
-/// optimizations added since the original academic paper [1] was published.
-///
-/// [1] Alon Zakai. 2011. Emscripten: an LLVM-to-JavaScript compiler. In
-/// Proceedings of the ACM international conference companion on Object
-/// oriented programming systems languages and applications companion
-/// (SPLASH '11). ACM, New York, NY, USA, 301-312. DOI=10.1145/2048147.2048224
-/// http://doi.acm.org/10.1145/2048147.2048224
-///
-//===-------------------------------------------------------------------===//
-
-#include "Relooper.h"
-#include "WebAssembly.h"
-
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/IR/CFG.h"
-#include "llvm/IR/Function.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-#include <cstring>
-#include <cstdlib>
-#include <functional>
-#include <list>
-#include <stack>
-#include <string>
-
-#define DEBUG_TYPE "relooper"
-
-using namespace llvm;
-using namespace Relooper;
-
-static cl::opt<int> RelooperSplittingFactor(
- "relooper-splitting-factor",
- cl::desc(
- "How much to discount code size when deciding whether to split a node"),
- cl::init(5));
-
-static cl::opt<unsigned> RelooperMultipleSwitchThreshold(
- "relooper-multiple-switch-threshold",
- cl::desc(
- "How many entries to allow in a multiple before we use a switch"),
- cl::init(10));
-
-static cl::opt<unsigned> RelooperNestingLimit(
- "relooper-nesting-limit",
- cl::desc(
- "How much nesting is acceptable"),
- cl::init(20));
-
-
-namespace {
-///
-/// Implements the relooper algorithm for a function's blocks.
-///
-/// Implementation details: The Relooper instance has
-/// ownership of the blocks and shapes, and frees them when done.
-///
-struct RelooperAlgorithm {
- std::deque<Block *> Blocks;
- std::deque<Shape *> Shapes;
- Shape *Root;
- bool MinSize;
- int BlockIdCounter;
- int ShapeIdCounter;
-
- RelooperAlgorithm();
- ~RelooperAlgorithm();
-
- void AddBlock(Block *New, int Id = -1);
-
- // Calculates the shapes
- void Calculate(Block *Entry);
-
- // Sets us to try to minimize size
- void SetMinSize(bool MinSize_) { MinSize = MinSize_; }
-};
-
-struct RelooperAnalysis final : public FunctionPass {
- static char ID;
- RelooperAnalysis() : FunctionPass(ID) {}
- const char *getPassName() const override { return "relooper"; }
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesAll();
- }
- bool runOnFunction(Function &F) override;
-};
-}
-
-// RelooperAnalysis
-
-char RelooperAnalysis::ID = 0;
-FunctionPass *llvm::createWebAssemblyRelooper() {
- return new RelooperAnalysis();
-}
-
-bool RelooperAnalysis::runOnFunction(Function &F) {
- DEBUG(dbgs() << "Relooping function '" << F.getName() << "'\n");
- RelooperAlgorithm R;
- // FIXME: remove duplication between relooper's and LLVM's BBs.
- std::map<const BasicBlock *, Block *> BB2B;
- std::map<const Block *, const BasicBlock *> B2BB;
- for (const BasicBlock &BB : F) {
- // FIXME: getName is wrong here, Code is meant to represent amount of code.
- // FIXME: use BranchVarInit for switch.
- Block *B = new Block(BB.getName().str().data(), /*BranchVarInit=*/nullptr);
- R.AddBlock(B);
- assert(BB2B.find(&BB) == BB2B.end() && "Inserting the same block twice");
- assert(B2BB.find(B) == B2BB.end() && "Inserting the same block twice");
- BB2B[&BB] = B;
- B2BB[B] = &BB;
- }
- for (Block *B : R.Blocks) {
- const BasicBlock *BB = B2BB[B];
- for (const BasicBlock *Successor : successors(BB))
- // FIXME: add branch's Condition and Code below.
- B->AddBranchTo(BB2B[Successor], /*Condition=*/nullptr, /*Code=*/nullptr);
- }
- R.Calculate(BB2B[&F.getEntryBlock()]);
- return false; // Analysis passes don't modify anything.
-}
-
-// Helpers
-
-typedef MapVector<Block *, BlockSet> BlockBlockSetMap;
-typedef std::list<Block *> BlockList;
-
-template <class T, class U>
-static bool contains(const T &container, const U &contained) {
- return container.count(contained);
-}
-
-
-// Branch
-
-Branch::Branch(const char *ConditionInit, const char *CodeInit)
- : Ancestor(nullptr), Labeled(true) {
- // FIXME: move from char* to LLVM data structures
- Condition = ConditionInit ? strdup(ConditionInit) : nullptr;
- Code = CodeInit ? strdup(CodeInit) : nullptr;
-}
-
-Branch::~Branch() {
- // FIXME: move from char* to LLVM data structures
- free(static_cast<void *>(const_cast<char *>(Condition)));
- free(static_cast<void *>(const_cast<char *>(Code)));
-}
-
-// Block
-
-Block::Block(const char *CodeInit, const char *BranchVarInit)
- : Parent(nullptr), Id(-1), IsCheckedMultipleEntry(false) {
- // FIXME: move from char* to LLVM data structures
- Code = strdup(CodeInit);
- BranchVar = BranchVarInit ? strdup(BranchVarInit) : nullptr;
-}
-
-Block::~Block() {
- // FIXME: move from char* to LLVM data structures
- free(static_cast<void *>(const_cast<char *>(Code)));
- free(static_cast<void *>(const_cast<char *>(BranchVar)));
-}
-
-void Block::AddBranchTo(Block *Target, const char *Condition,
- const char *Code) {
- assert(!contains(BranchesOut, Target) &&
- "cannot add more than one branch to the same target");
- BranchesOut[Target] = make_unique<Branch>(Condition, Code);
-}
-
-// Relooper
-
-RelooperAlgorithm::RelooperAlgorithm()
- : Root(nullptr), MinSize(false), BlockIdCounter(1),
- ShapeIdCounter(0) { // block ID 0 is reserved for clearings
-}
-
-RelooperAlgorithm::~RelooperAlgorithm() {
- for (auto Curr : Blocks)
- delete Curr;
- for (auto Curr : Shapes)
- delete Curr;
-}
-
-void RelooperAlgorithm::AddBlock(Block *New, int Id) {
- New->Id = Id == -1 ? BlockIdCounter++ : Id;
- Blocks.push_back(New);
-}
-
-struct RelooperRecursor {
- RelooperAlgorithm *Parent;
- RelooperRecursor(RelooperAlgorithm *ParentInit) : Parent(ParentInit) {}
-};
-
-void RelooperAlgorithm::Calculate(Block *Entry) {
- // Scan and optimize the input
- struct PreOptimizer : public RelooperRecursor {
- PreOptimizer(RelooperAlgorithm *Parent) : RelooperRecursor(Parent) {}
- BlockSet Live;
-
- void FindLive(Block *Root) {
- BlockList ToInvestigate;
- ToInvestigate.push_back(Root);
- while (!ToInvestigate.empty()) {
- Block *Curr = ToInvestigate.front();
- ToInvestigate.pop_front();
- if (contains(Live, Curr))
- continue;
- Live.insert(Curr);
- for (const auto &iter : Curr->BranchesOut)
- ToInvestigate.push_back(iter.first);
- }
- }
-
- // If a block has multiple entries but no exits, and it is small enough, it
- // is useful to split it. A common example is a C++ function where
- // everything ends up at a final exit block and does some RAII cleanup.
- // Without splitting, we will be forced to introduce labelled loops to
- // allow reaching the final block
- void SplitDeadEnds() {
- unsigned TotalCodeSize = 0;
- for (const auto &Curr : Live) {
- TotalCodeSize += strlen(Curr->Code);
- }
- BlockSet Splits;
- BlockSet Removed;
- for (const auto &Original : Live) {
- if (Original->BranchesIn.size() <= 1 ||
- !Original->BranchesOut.empty())
- continue; // only dead ends, for now
- if (contains(Original->BranchesOut, Original))
- continue; // cannot split a looping node
- if (strlen(Original->Code) * (Original->BranchesIn.size() - 1) >
- TotalCodeSize / RelooperSplittingFactor)
- continue; // if splitting increases raw code size by a significant
- // amount, abort
- // Split the node (for simplicity, we replace all the blocks, even
- // though we could have reused the original)
- DEBUG(dbgs() << " Splitting '" << Original->Code << "'\n");
- for (const auto &Prior : Original->BranchesIn) {
- Block *Split = new Block(Original->Code, Original->BranchVar);
- Parent->AddBlock(Split, Original->Id);
- Split->BranchesIn.insert(Prior);
- std::unique_ptr<Branch> Details;
- Details.swap(Prior->BranchesOut[Original]);
- Prior->BranchesOut[Split] = make_unique<Branch>(Details->Condition,
- Details->Code);
- for (const auto &iter : Original->BranchesOut) {
- Block *Post = iter.first;
- Branch *Details = iter.second.get();
- Split->BranchesOut[Post] = make_unique<Branch>(Details->Condition,
- Details->Code);
- Post->BranchesIn.insert(Split);
- }
- Splits.insert(Split);
- Removed.insert(Original);
- }
- for (const auto &iter : Original->BranchesOut) {
- Block *Post = iter.first;
- Post->BranchesIn.remove(Original);
- }
- }
- for (const auto &iter : Splits)
- Live.insert(iter);
- for (const auto &iter : Removed)
- Live.remove(iter);
- }
- };
- PreOptimizer Pre(this);
- Pre.FindLive(Entry);
-
- // Add incoming branches from live blocks, ignoring dead code
- for (unsigned i = 0; i < Blocks.size(); i++) {
- Block *Curr = Blocks[i];
- if (!contains(Pre.Live, Curr))
- continue;
- for (const auto &iter : Curr->BranchesOut)
- iter.first->BranchesIn.insert(Curr);
- }
-
- if (!MinSize)
- Pre.SplitDeadEnds();
-
- // Recursively process the graph
-
- struct Analyzer : public RelooperRecursor {
- Analyzer(RelooperAlgorithm *Parent) : RelooperRecursor(Parent) {}
-
- // Add a shape to the list of shapes in this Relooper calculation
- void Notice(Shape *New) {
- New->Id = Parent->ShapeIdCounter++;
- Parent->Shapes.push_back(New);
- }
-
- // Create a list of entries from a block. If LimitTo is provided, only
- // results in that set will appear
- void GetBlocksOut(Block *Source, BlockSet &Entries,
- BlockSet *LimitTo = nullptr) {
- for (const auto &iter : Source->BranchesOut)
- if (!LimitTo || contains(*LimitTo, iter.first))
- Entries.insert(iter.first);
- }
-
- // Converts/processes all branchings to a specific target
- void Solipsize(Block *Target, Branch::FlowType Type, Shape *Ancestor,
- BlockSet &From) {
- DEBUG(dbgs() << " Solipsize '" << Target->Code << "' type " << Type
- << "\n");
- for (auto iter = Target->BranchesIn.begin();
- iter != Target->BranchesIn.end();) {
- Block *Prior = *iter;
- if (!contains(From, Prior)) {
- iter++;
- continue;
- }
- std::unique_ptr<Branch> PriorOut;
- PriorOut.swap(Prior->BranchesOut[Target]);
- PriorOut->Ancestor = Ancestor;
- PriorOut->Type = Type;
- if (MultipleShape *Multiple = dyn_cast<MultipleShape>(Ancestor))
- Multiple->Breaks++; // We are breaking out of this Multiple, so need a
- // loop
- iter++; // carefully increment iter before erasing
- Target->BranchesIn.remove(Prior);
- Target->ProcessedBranchesIn.insert(Prior);
- Prior->ProcessedBranchesOut[Target].swap(PriorOut);
- }
- }
-
- Shape *MakeSimple(BlockSet &Blocks, Block *Inner, BlockSet &NextEntries) {
- DEBUG(dbgs() << " MakeSimple inner block '" << Inner->Code << "'\n");
- SimpleShape *Simple = new SimpleShape;
- Notice(Simple);
- Simple->Inner = Inner;
- Inner->Parent = Simple;
- if (Blocks.size() > 1) {
- Blocks.remove(Inner);
- GetBlocksOut(Inner, NextEntries, &Blocks);
- BlockSet JustInner;
- JustInner.insert(Inner);
- for (const auto &iter : NextEntries)
- Solipsize(iter, Branch::Direct, Simple, JustInner);
- }
- return Simple;
- }
-
- Shape *MakeLoop(BlockSet &Blocks, BlockSet &Entries,
- BlockSet &NextEntries) {
- // Find the inner blocks in this loop. Proceed backwards from the entries
- // until
- // you reach a seen block, collecting as you go.
- BlockSet InnerBlocks;
- BlockSet Queue = Entries;
- while (!Queue.empty()) {
- Block *Curr = *(Queue.begin());
- Queue.remove(*Queue.begin());
- if (!contains(InnerBlocks, Curr)) {
- // This element is new, mark it as inner and remove from outer
- InnerBlocks.insert(Curr);
- Blocks.remove(Curr);
- // Add the elements prior to it
- for (const auto &iter : Curr->BranchesIn)
- Queue.insert(iter);
- }
- }
- assert(!InnerBlocks.empty());
-
- for (const auto &Curr : InnerBlocks) {
- for (const auto &iter : Curr->BranchesOut) {
- Block *Possible = iter.first;
- if (!contains(InnerBlocks, Possible))
- NextEntries.insert(Possible);
- }
- }
-
- LoopShape *Loop = new LoopShape();
- Notice(Loop);
-
- // Solipsize the loop, replacing with break/continue and marking branches
- // as Processed (will not affect later calculations)
- // A. Branches to the loop entries become a continue to this shape
- for (const auto &iter : Entries)
- Solipsize(iter, Branch::Continue, Loop, InnerBlocks);
- // B. Branches to outside the loop (a next entry) become breaks on this
- // shape
- for (const auto &iter : NextEntries)
- Solipsize(iter, Branch::Break, Loop, InnerBlocks);
- // Finish up
- Shape *Inner = Process(InnerBlocks, Entries, nullptr);
- Loop->Inner = Inner;
- return Loop;
- }
-
- // For each entry, find the independent group reachable by it. The
- // independent group is the entry itself, plus all the blocks it can
- // reach that cannot be directly reached by another entry. Note that we
- // ignore directly reaching the entry itself by another entry.
- // @param Ignore - previous blocks that are irrelevant
- void FindIndependentGroups(BlockSet &Entries,
- BlockBlockSetMap &IndependentGroups,
- BlockSet *Ignore = nullptr) {
- typedef std::map<Block *, Block *> BlockBlockMap;
-
- struct HelperClass {
- BlockBlockSetMap &IndependentGroups;
- BlockBlockMap Ownership; // For each block, which entry it belongs to.
- // We have reached it from there.
-
- HelperClass(BlockBlockSetMap &IndependentGroupsInit)
- : IndependentGroups(IndependentGroupsInit) {}
- void InvalidateWithChildren(Block *New) {
- // Being in the list means you need to be invalidated
- BlockList ToInvalidate;
- ToInvalidate.push_back(New);
- while (!ToInvalidate.empty()) {
- Block *Invalidatee = ToInvalidate.front();
- ToInvalidate.pop_front();
- Block *Owner = Ownership[Invalidatee];
- // Owner may have been invalidated, do not add to
- // IndependentGroups!
- if (contains(IndependentGroups, Owner))
- IndependentGroups[Owner].remove(Invalidatee);
- if (Ownership[Invalidatee]) { // may have been seen before and
- // invalidated already
- Ownership[Invalidatee] = nullptr;
- for (const auto &iter : Invalidatee->BranchesOut) {
- Block *Target = iter.first;
- BlockBlockMap::iterator Known = Ownership.find(Target);
- if (Known != Ownership.end()) {
- Block *TargetOwner = Known->second;
- if (TargetOwner)
- ToInvalidate.push_back(Target);
- }
- }
- }
- }
- }
- };
- HelperClass Helper(IndependentGroups);
-
- // We flow out from each of the entries, simultaneously.
- // When we reach a new block, we add it as belonging to the one we got to
- // it from.
- // If we reach a new block that is already marked as belonging to someone,
- // it is reachable by two entries and is not valid for any of them.
- // Remove it and all it can reach that have been visited.
-
- // Being in the queue means we just added this item, and
- // we need to add its children
- BlockList Queue;
- for (const auto &Entry : Entries) {
- Helper.Ownership[Entry] = Entry;
- IndependentGroups[Entry].insert(Entry);
- Queue.push_back(Entry);
- }
- while (!Queue.empty()) {
- Block *Curr = Queue.front();
- Queue.pop_front();
- Block *Owner = Helper.Ownership[Curr]; // Curr must be in the ownership
- // map if we are in the queue
- if (!Owner)
- continue; // we have been invalidated meanwhile after being reached
- // from two entries
- // Add all children
- for (const auto &iter : Curr->BranchesOut) {
- Block *New = iter.first;
- BlockBlockMap::iterator Known = Helper.Ownership.find(New);
- if (Known == Helper.Ownership.end()) {
- // New node. Add it, and put it in the queue
- Helper.Ownership[New] = Owner;
- IndependentGroups[Owner].insert(New);
- Queue.push_back(New);
- continue;
- }
- Block *NewOwner = Known->second;
- if (!NewOwner)
- continue; // We reached an invalidated node
- if (NewOwner != Owner)
- // Invalidate this and all reachable that we have seen - we reached
- // this from two locations
- Helper.InvalidateWithChildren(New);
- // otherwise, we have the same owner, so do nothing
- }
- }
-
- // Having processed all the interesting blocks, we remain with just one
- // potential issue:
- // If a->b, and a was invalidated, but then b was later reached by
- // someone else, we must invalidate b. To check for this, we go over all
- // elements in the independent groups, if an element has a parent which
- // does *not* have the same owner, we/ must remove it and all its
- // children.
-
- for (const auto &iter : Entries) {
- BlockSet &CurrGroup = IndependentGroups[iter];
- BlockList ToInvalidate;
- for (const auto &iter : CurrGroup) {
- Block *Child = iter;
- for (const auto &iter : Child->BranchesIn) {
- Block *Parent = iter;
- if (Ignore && contains(*Ignore, Parent))
- continue;
- if (Helper.Ownership[Parent] != Helper.Ownership[Child])
- ToInvalidate.push_back(Child);
- }
- }
- while (!ToInvalidate.empty()) {
- Block *Invalidatee = ToInvalidate.front();
- ToInvalidate.pop_front();
- Helper.InvalidateWithChildren(Invalidatee);
- }
- }
-
- // Remove empty groups
- for (const auto &iter : Entries)
- if (IndependentGroups[iter].empty())
- IndependentGroups.erase(iter);
- }
-
- Shape *MakeMultiple(BlockSet &Blocks, BlockSet &Entries,
- BlockBlockSetMap &IndependentGroups, Shape *Prev,
- BlockSet &NextEntries) {
- bool Fused = isa<SimpleShape>(Prev);
- MultipleShape *Multiple = new MultipleShape();
- Notice(Multiple);
- BlockSet CurrEntries;
- for (auto &iter : IndependentGroups) {
- Block *CurrEntry = iter.first;
- BlockSet &CurrBlocks = iter.second;
- // Create inner block
- CurrEntries.clear();
- CurrEntries.insert(CurrEntry);
- for (const auto &CurrInner : CurrBlocks) {
- // Remove the block from the remaining blocks
- Blocks.remove(CurrInner);
- // Find new next entries and fix branches to them
- for (auto iter = CurrInner->BranchesOut.begin();
- iter != CurrInner->BranchesOut.end();) {
- Block *CurrTarget = iter->first;
- auto Next = iter;
- Next++;
- if (!contains(CurrBlocks, CurrTarget)) {
- NextEntries.insert(CurrTarget);
- Solipsize(CurrTarget, Branch::Break, Multiple, CurrBlocks);
- }
- iter = Next; // increment carefully because Solipsize can remove us
- }
- }
- Multiple->InnerMap[CurrEntry->Id] =
- Process(CurrBlocks, CurrEntries, nullptr);
- // If we are not fused, then our entries will actually be checked
- if (!Fused)
- CurrEntry->IsCheckedMultipleEntry = true;
- }
- // Add entries not handled as next entries, they are deferred
- for (const auto &Entry : Entries)
- if (!contains(IndependentGroups, Entry))
- NextEntries.insert(Entry);
- // The multiple has been created, we can decide how to implement it
- if (Multiple->InnerMap.size() >= RelooperMultipleSwitchThreshold) {
- Multiple->UseSwitch = true;
- Multiple->Breaks++; // switch captures breaks
- }
- return Multiple;
- }
-
- // Main function.
- // Process a set of blocks with specified entries, returns a shape
- // The Make* functions receive a NextEntries. If they fill it with data,
- // those are the entries for the ->Next block on them, and the blocks
- // are what remains in Blocks (which Make* modify). In this way
- // we avoid recursing on Next (imagine a long chain of Simples, if we
- // recursed we could blow the stack).
- Shape *Process(BlockSet &Blocks, BlockSet &InitialEntries, Shape *Prev) {
- BlockSet *Entries = &InitialEntries;
- BlockSet TempEntries[2];
- int CurrTempIndex = 0;
- BlockSet *NextEntries;
- Shape *Ret = nullptr;
-
- auto Make = [&](Shape *Temp) {
- if (Prev)
- Prev->Next = Temp;
- if (!Ret)
- Ret = Temp;
- Prev = Temp;
- Entries = NextEntries;
- };
-
- while (1) {
- CurrTempIndex = 1 - CurrTempIndex;
- NextEntries = &TempEntries[CurrTempIndex];
- NextEntries->clear();
-
- if (Entries->empty())
- return Ret;
- if (Entries->size() == 1) {
- Block *Curr = *(Entries->begin());
- if (Curr->BranchesIn.empty()) {
- // One entry, no looping ==> Simple
- Make(MakeSimple(Blocks, Curr, *NextEntries));
- if (NextEntries->empty())
- return Ret;
- continue;
- }
- // One entry, looping ==> Loop
- Make(MakeLoop(Blocks, *Entries, *NextEntries));
- if (NextEntries->empty())
- return Ret;
- continue;
- }
-
- // More than one entry, try to eliminate through a Multiple groups of
- // independent blocks from an entry/ies. It is important to remove
- // through multiples as opposed to looping since the former is more
- // performant.
- BlockBlockSetMap IndependentGroups;
- FindIndependentGroups(*Entries, IndependentGroups);
-
- if (!IndependentGroups.empty()) {
- // We can handle a group in a multiple if its entry cannot be reached
- // by another group.
- // Note that it might be reachable by itself - a loop. But that is
- // fine, we will create a loop inside the multiple block (which
- // is the performant order to do it).
- for (auto iter = IndependentGroups.begin();
- iter != IndependentGroups.end();) {
- Block *Entry = iter->first;
- BlockSet &Group = iter->second;
- auto curr = iter++; // iterate carefully, we may delete
- for (BlockSet::iterator iterBranch = Entry->BranchesIn.begin();
- iterBranch != Entry->BranchesIn.end(); iterBranch++) {
- Block *Origin = *iterBranch;
- if (!contains(Group, Origin)) {
- // Reached from outside the group, so we cannot handle this
- IndependentGroups.erase(curr);
- break;
- }
- }
- }
-
- // As an optimization, if we have 2 independent groups, and one is a
- // small dead end, we can handle only that dead end.
- // The other then becomes a Next - without nesting in the code and
- // recursion in the analysis.
- // TODO: if the larger is the only dead end, handle that too
- // TODO: handle >2 groups
- // TODO: handle not just dead ends, but also that do not branch to the
- // NextEntries. However, must be careful there since we create a
- // Next, and that Next can prevent eliminating a break (since we no
- // longer naturally reach the same place), which may necessitate a
- // one-time loop, which makes the unnesting pointless.
- if (IndependentGroups.size() == 2) {
- // Find the smaller one
- auto iter = IndependentGroups.begin();
- Block *SmallEntry = iter->first;
- auto SmallSize = iter->second.size();
- iter++;
- Block *LargeEntry = iter->first;
- auto LargeSize = iter->second.size();
- if (SmallSize != LargeSize) { // ignore the case where they are
- // identical - keep things symmetrical
- // there
- if (SmallSize > LargeSize) {
- Block *Temp = SmallEntry;
- SmallEntry = LargeEntry;
- LargeEntry = Temp; // Note: we did not flip the Sizes too, they
- // are now invalid. TODO: use the smaller
- // size as a limit?
- }
- // Check if dead end
- bool DeadEnd = true;
- BlockSet &SmallGroup = IndependentGroups[SmallEntry];
- for (const auto &Curr : SmallGroup) {
- for (const auto &iter : Curr->BranchesOut) {
- Block *Target = iter.first;
- if (!contains(SmallGroup, Target)) {
- DeadEnd = false;
- break;
- }
- }
- if (!DeadEnd)
- break;
- }
- if (DeadEnd)
- IndependentGroups.erase(LargeEntry);
- }
- }
-
- if (!IndependentGroups.empty())
- // Some groups removable ==> Multiple
- Make(MakeMultiple(Blocks, *Entries, IndependentGroups, Prev,
- *NextEntries));
- if (NextEntries->empty())
- return Ret;
- continue;
- }
- // No independent groups, must be loopable ==> Loop
- Make(MakeLoop(Blocks, *Entries, *NextEntries));
- if (NextEntries->empty())
- return Ret;
- continue;
- }
- }
- };
-
- // Main
-
- BlockSet AllBlocks;
- for (const auto &Curr : Pre.Live) {
- AllBlocks.insert(Curr);
- }
-
- BlockSet Entries;
- Entries.insert(Entry);
- Root = Analyzer(this).Process(AllBlocks, Entries, nullptr);
- assert(Root);
-
- ///
- /// Relooper post-optimizer
- ///
- struct PostOptimizer {
- RelooperAlgorithm *Parent;
- std::stack<Shape *> LoopStack;
-
- PostOptimizer(RelooperAlgorithm *ParentInit) : Parent(ParentInit) {}
-
- void ShapeSwitch(Shape* var,
- std::function<void (SimpleShape*)> simple,
- std::function<void (MultipleShape*)> multiple,
- std::function<void (LoopShape*)> loop) {
- switch (var->getKind()) {
- case Shape::SK_Simple: {
- simple(cast<SimpleShape>(var));
- break;
- }
- case Shape::SK_Multiple: {
- multiple(cast<MultipleShape>(var));
- break;
- }
- case Shape::SK_Loop: {
- loop(cast<LoopShape>(var));
- break;
- }
- }
- }
-
- // Find the blocks that natural control flow can get us directly to, or
- // through a multiple that we ignore
- void FollowNaturalFlow(Shape *S, BlockSet &Out) {
- ShapeSwitch(S, [&](SimpleShape* Simple) {
- Out.insert(Simple->Inner);
- }, [&](MultipleShape* Multiple) {
- for (const auto &iter : Multiple->InnerMap) {
- FollowNaturalFlow(iter.second, Out);
- }
- FollowNaturalFlow(Multiple->Next, Out);
- }, [&](LoopShape* Loop) {
- FollowNaturalFlow(Loop->Inner, Out);
- });
- }
-
- void FindNaturals(Shape *Root, Shape *Otherwise = nullptr) {
- if (Root->Next) {
- Root->Natural = Root->Next;
- FindNaturals(Root->Next, Otherwise);
- } else {
- Root->Natural = Otherwise;
- }
-
- ShapeSwitch(Root, [](SimpleShape* Simple) {
- }, [&](MultipleShape* Multiple) {
- for (const auto &iter : Multiple->InnerMap) {
- FindNaturals(iter.second, Root->Natural);
- }
- }, [&](LoopShape* Loop){
- FindNaturals(Loop->Inner, Loop->Inner);
- });
- }
-
- // Remove unneeded breaks and continues.
- // A flow operation is trivially unneeded if the shape we naturally get to
- // by normal code execution is the same as the flow forces us to.
- void RemoveUnneededFlows(Shape *Root, Shape *Natural = nullptr,
- LoopShape *LastLoop = nullptr,
- unsigned Depth = 0) {
- BlockSet NaturalBlocks;
- FollowNaturalFlow(Natural, NaturalBlocks);
- Shape *Next = Root;
- while (Next) {
- Root = Next;
- Next = nullptr;
- ShapeSwitch(
- Root,
- [&](SimpleShape* Simple) {
- if (Simple->Inner->BranchVar)
- LastLoop =
- nullptr; // a switch clears out the loop (TODO: only for
- // breaks, not continue)
-
- if (Simple->Next) {
- if (!Simple->Inner->BranchVar &&
- Simple->Inner->ProcessedBranchesOut.size() == 2 &&
- Depth < RelooperNestingLimit) {
- // If there is a next block, we already know at Simple
- // creation time to make direct branches, and we can do
- // nothing more in general. But, we try to optimize the
- // case of a break and a direct: This would normally be
- // if (break?) { break; } ..
- // but if we make sure to nest the else, we can save the
- // break,
- // if (!break?) { .. }
- // This is also better because the more canonical nested
- // form is easier to further optimize later. The
- // downside is more nesting, which adds to size in builds with
- // whitespace.
- // Note that we avoid switches, as it complicates control flow
- // and is not relevant for the common case we optimize here.
- bool Found = false;
- bool Abort = false;
- for (const auto &iter : Simple->Inner->ProcessedBranchesOut) {
- Block *Target = iter.first;
- Branch *Details = iter.second.get();
- if (Details->Type == Branch::Break) {
- Found = true;
- if (!contains(NaturalBlocks, Target))
- Abort = true;
- } else if (Details->Type != Branch::Direct)
- Abort = true;
- }
- if (Found && !Abort) {
- for (const auto &iter : Simple->Inner->ProcessedBranchesOut) {
- Branch *Details = iter.second.get();
- if (Details->Type == Branch::Break) {
- Details->Type = Branch::Direct;
- if (MultipleShape *Multiple =
- dyn_cast<MultipleShape>(Details->Ancestor))
- Multiple->Breaks--;
- } else {
- assert(Details->Type == Branch::Direct);
- Details->Type = Branch::Nested;
- }
- }
- }
- Depth++; // this optimization increases depth, for us and all
- // our next chain (i.e., until this call returns)
- }
- Next = Simple->Next;
- } else {
- // If there is no next then Natural is where we will
- // go to by doing nothing, so we can potentially optimize some
- // branches to direct.
- for (const auto &iter : Simple->Inner->ProcessedBranchesOut) {
- Block *Target = iter.first;
- Branch *Details = iter.second.get();
- if (Details->Type != Branch::Direct &&
- contains(NaturalBlocks,
- Target)) { // note: cannot handle split blocks
- Details->Type = Branch::Direct;
- if (MultipleShape *Multiple =
- dyn_cast<MultipleShape>(Details->Ancestor))
- Multiple->Breaks--;
- } else if (Details->Type == Branch::Break && LastLoop &&
- LastLoop->Natural == Details->Ancestor->Natural) {
- // it is important to simplify breaks, as simpler breaks
- // enable other optimizations
- Details->Labeled = false;
- if (MultipleShape *Multiple =
- dyn_cast<MultipleShape>(Details->Ancestor))
- Multiple->Breaks--;
- }
- }
- }
- }, [&](MultipleShape* Multiple)
- {
- for (const auto &iter : Multiple->InnerMap) {
- RemoveUnneededFlows(iter.second, Multiple->Next,
- Multiple->Breaks ? nullptr : LastLoop,
- Depth + 1);
- }
- Next = Multiple->Next;
- }, [&](LoopShape* Loop)
- {
- RemoveUnneededFlows(Loop->Inner, Loop->Inner, Loop, Depth + 1);
- Next = Loop->Next;
- });
- }
- }
-
- // After we know which loops exist, we can calculate which need to be
- // labeled
- void FindLabeledLoops(Shape *Root) {
- Shape *Next = Root;
- while (Next) {
- Root = Next;
- Next = nullptr;
-
- ShapeSwitch(
- Root,
- [&](SimpleShape *Simple) {
- MultipleShape *Fused = dyn_cast<MultipleShape>(Root->Next);
- // If we are fusing a Multiple with a loop into this Simple, then
- // visit it now
- if (Fused && Fused->Breaks)
- LoopStack.push(Fused);
- if (Simple->Inner->BranchVar)
- LoopStack.push(nullptr); // a switch means breaks are now useless,
- // push a dummy
- if (Fused) {
- if (Fused->UseSwitch)
- LoopStack.push(nullptr); // a switch means breaks are now
- // useless, push a dummy
- for (const auto &iter : Fused->InnerMap) {
- FindLabeledLoops(iter.second);
- }
- }
- for (const auto &iter : Simple->Inner->ProcessedBranchesOut) {
- Branch *Details = iter.second.get();
- if (Details->Type == Branch::Break ||
- Details->Type == Branch::Continue) {
- assert(!LoopStack.empty());
- if (Details->Ancestor != LoopStack.top() && Details->Labeled) {
- if (MultipleShape *Multiple =
- dyn_cast<MultipleShape>(Details->Ancestor)) {
- Multiple->Labeled = true;
- } else {
- LoopShape *Loop = cast<LoopShape>(Details->Ancestor);
- Loop->Labeled = true;
- }
- } else {
- Details->Labeled = false;
- }
- }
- if (Fused && Fused->UseSwitch)
- LoopStack.pop();
- if (Simple->Inner->BranchVar)
- LoopStack.pop();
- if (Fused && Fused->Breaks)
- LoopStack.pop();
- if (Fused)
- Next = Fused->Next;
- else
- Next = Root->Next;
- }
- }
- , [&](MultipleShape* Multiple) {
- if (Multiple->Breaks)
- LoopStack.push(Multiple);
- for (const auto &iter : Multiple->InnerMap)
- FindLabeledLoops(iter.second);
- if (Multiple->Breaks)
- LoopStack.pop();
- Next = Root->Next;
- }
- , [&](LoopShape* Loop) {
- LoopStack.push(Loop);
- FindLabeledLoops(Loop->Inner);
- LoopStack.pop();
- Next = Root->Next;
- });
- }
- }
-
- void Process(Shape * Root) {
- FindNaturals(Root);
- RemoveUnneededFlows(Root);
- FindLabeledLoops(Root);
- }
- };
-
- PostOptimizer(this).Process(Root);
-}
diff --git a/lib/Target/WebAssembly/Relooper.h b/lib/Target/WebAssembly/Relooper.h
deleted file mode 100644
index 7c564de82f34..000000000000
--- a/lib/Target/WebAssembly/Relooper.h
+++ /dev/null
@@ -1,186 +0,0 @@
-//===-- Relooper.h - Top-level interface for WebAssembly ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===-------------------------------------------------------------------===//
-///
-/// \file
-/// \brief This defines an optimized C++ implemention of the Relooper
-/// algorithm, originally developed as part of Emscripten, which
-/// generates a structured AST from arbitrary control flow.
-///
-//===-------------------------------------------------------------------===//
-
-#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/Support/Casting.h"
-
-#include <cassert>
-#include <cstdarg>
-#include <cstdio>
-#include <deque>
-#include <list>
-#include <map>
-#include <memory>
-#include <set>
-
-namespace llvm {
-
-namespace Relooper {
-
-struct Block;
-struct Shape;
-
-///
-/// Info about a branching from one block to another
-///
-struct Branch {
- enum FlowType {
- Direct = 0, // We will directly reach the right location through other
- // means, no need for continue or break
- Break = 1,
- Continue = 2,
- Nested = 3 // This code is directly reached, but we must be careful to
- // ensure it is nested in an if - it is not reached
- // unconditionally, other code paths exist alongside it that we need to make
- // sure do not intertwine
- };
- Shape
- *Ancestor; // If not nullptr, this shape is the relevant one for purposes
- // of getting to the target block. We break or continue on it
- Branch::FlowType
- Type; // If Ancestor is not nullptr, this says whether to break or
- // continue
- bool Labeled; // If a break or continue, whether we need to use a label
- const char *Condition; // The condition for which we branch. For example,
- // "my_var == 1". Conditions are checked one by one.
- // One of the conditions should have nullptr as the
- // condition, in which case it is the default
- // FIXME: move from char* to LLVM data structures
- const char *Code; // If provided, code that is run right before the branch is
- // taken. This is useful for phis
- // FIXME: move from char* to LLVM data structures
-
- Branch(const char *ConditionInit, const char *CodeInit = nullptr);
- ~Branch();
-};
-
-typedef SetVector<Block *> BlockSet;
-typedef MapVector<Block *, Branch *> BlockBranchMap;
-typedef MapVector<Block *, std::unique_ptr<Branch>> OwningBlockBranchMap;
-
-///
-/// Represents a basic block of code - some instructions that end with a
-/// control flow modifier (a branch, return or throw).
-///
-struct Block {
- // Branches become processed after we finish the shape relevant to them. For
- // example, when we recreate a loop, branches to the loop start become
- // continues and are now processed. When we calculate what shape to generate
- // from a set of blocks, we ignore processed branches. Blocks own the Branch
- // objects they use, and destroy them when done.
- OwningBlockBranchMap BranchesOut;
- BlockSet BranchesIn;
- OwningBlockBranchMap ProcessedBranchesOut;
- BlockSet ProcessedBranchesIn;
- Shape *Parent; // The shape we are directly inside
- int Id; // A unique identifier, defined when added to relooper. Note that this
- // uniquely identifies a *logical* block - if we split it, the two
- // instances have the same content *and* the same Id
- const char *Code; // The string representation of the code in this block.
- // Owning pointer (we copy the input)
- // FIXME: move from char* to LLVM data structures
- const char *BranchVar; // A variable whose value determines where we go; if
- // this is not nullptr, emit a switch on that variable
- // FIXME: move from char* to LLVM data structures
- bool IsCheckedMultipleEntry; // If true, we are a multiple entry, so reaching
- // us requires setting the label variable
-
- Block(const char *CodeInit, const char *BranchVarInit);
- ~Block();
-
- void AddBranchTo(Block *Target, const char *Condition,
- const char *Code = nullptr);
-};
-
-///
-/// Represents a structured control flow shape
-///
-struct Shape {
- int Id; // A unique identifier. Used to identify loops, labels are Lx where x
- // is the Id. Defined when added to relooper
- Shape *Next; // The shape that will appear in the code right after this one
- Shape *Natural; // The shape that control flow gets to naturally (if there is
- // Next, then this is Next)
-
- /// Discriminator for LLVM-style RTTI (dyn_cast<> et al.)
- enum ShapeKind { SK_Simple, SK_Multiple, SK_Loop };
-
-private:
- ShapeKind Kind;
-
-public:
- ShapeKind getKind() const { return Kind; }
-
- Shape(ShapeKind KindInit) : Id(-1), Next(nullptr), Kind(KindInit) {}
-};
-
-///
-/// Simple: No control flow at all, just instructions.
-///
-struct SimpleShape : public Shape {
- Block *Inner;
-
- SimpleShape() : Shape(SK_Simple), Inner(nullptr) {}
-
- static bool classof(const Shape *S) { return S->getKind() == SK_Simple; }
-};
-
-///
-/// A shape that may be implemented with a labeled loop.
-///
-struct LabeledShape : public Shape {
- bool Labeled; // If we have a loop, whether it needs to be labeled
-
- LabeledShape(ShapeKind KindInit) : Shape(KindInit), Labeled(false) {}
-};
-
-// Blocks with the same id were split and are identical, so we just care about
-// ids in Multiple entries
-typedef std::map<int, Shape *> IdShapeMap;
-
-///
-/// Multiple: A shape with more than one entry. If the next block to
-/// be entered is among them, we run it and continue to
-/// the next shape, otherwise we continue immediately to the
-/// next shape.
-///
-struct MultipleShape : public LabeledShape {
- IdShapeMap InnerMap; // entry block ID -> shape
- int Breaks; // If we have branches on us, we need a loop (or a switch). This
- // is a counter of requirements,
- // if we optimize it to 0, the loop is unneeded
- bool UseSwitch; // Whether to switch on label as opposed to an if-else chain
-
- MultipleShape() : LabeledShape(SK_Multiple), Breaks(0), UseSwitch(false) {}
-
- static bool classof(const Shape *S) { return S->getKind() == SK_Multiple; }
-};
-
-///
-/// Loop: An infinite loop.
-///
-struct LoopShape : public LabeledShape {
- Shape *Inner;
-
- LoopShape() : LabeledShape(SK_Loop), Inner(nullptr) {}
-
- static bool classof(const Shape *S) { return S->getKind() == SK_Loop; }
-};
-
-} // namespace Relooper
-
-} // namespace llvm
diff --git a/lib/Target/WebAssembly/TargetInfo/Makefile b/lib/Target/WebAssembly/TargetInfo/Makefile
deleted file mode 100644
index b021eb6d9455..000000000000
--- a/lib/Target/WebAssembly/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/WebAssembly/TargetInfo/Makefile ----------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMWebAssemblyInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/WebAssembly/WebAssembly.h b/lib/Target/WebAssembly/WebAssembly.h
index e972da5af74f..957f31cae222 100644
--- a/lib/Target/WebAssembly/WebAssembly.h
+++ b/lib/Target/WebAssembly/WebAssembly.h
@@ -23,23 +23,28 @@ namespace llvm {
class WebAssemblyTargetMachine;
class FunctionPass;
+// LLVM IR passes.
FunctionPass *createWebAssemblyOptimizeReturned();
+// ISel and immediate followup passes.
FunctionPass *createWebAssemblyISelDag(WebAssemblyTargetMachine &TM,
CodeGenOpt::Level OptLevel);
FunctionPass *createWebAssemblyArgumentMove();
+FunctionPass *createWebAssemblySetP2AlignOperands();
+// Late passes.
+FunctionPass *createWebAssemblyReplacePhysRegs();
+FunctionPass *createWebAssemblyPrepareForLiveIntervals();
+FunctionPass *createWebAssemblyOptimizeLiveIntervals();
FunctionPass *createWebAssemblyStoreResults();
FunctionPass *createWebAssemblyRegStackify();
FunctionPass *createWebAssemblyRegColoring();
-FunctionPass *createWebAssemblyPEI();
+FunctionPass *createWebAssemblyFixIrreducibleControlFlow();
FunctionPass *createWebAssemblyCFGStackify();
FunctionPass *createWebAssemblyLowerBrUnless();
FunctionPass *createWebAssemblyRegNumbering();
FunctionPass *createWebAssemblyPeephole();
-FunctionPass *createWebAssemblyRelooper();
-
} // end namespace llvm
#endif
diff --git a/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp b/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
index 3893c408cf63..5887f45371fc 100644
--- a/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
@@ -65,8 +65,8 @@ FunctionPass *llvm::createWebAssemblyArgumentMove() {
}
/// Test whether the given instruction is an ARGUMENT.
-static bool IsArgument(const MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static bool IsArgument(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
case WebAssembly::ARGUMENT_I32:
case WebAssembly::ARGUMENT_I64:
case WebAssembly::ARGUMENT_F32:
@@ -88,20 +88,18 @@ bool WebAssemblyArgumentMove::runOnMachineFunction(MachineFunction &MF) {
MachineBasicBlock::iterator InsertPt = EntryMBB.end();
// Look for the first NonArg instruction.
- for (auto MII = EntryMBB.begin(), MIE = EntryMBB.end(); MII != MIE; ++MII) {
- MachineInstr *MI = MII;
+ for (MachineInstr &MI : EntryMBB) {
if (!IsArgument(MI)) {
- InsertPt = MII;
+ InsertPt = MI;
break;
}
}
// Now move any argument instructions later in the block
// to before our first NonArg instruction.
- for (auto I = InsertPt, E = EntryMBB.end(); I != E; ++I) {
- MachineInstr *MI = I;
+ for (MachineInstr &MI : llvm::make_range(InsertPt, EntryMBB.end())) {
if (IsArgument(MI)) {
- EntryMBB.insert(InsertPt, MI->removeFromParent());
+ EntryMBB.insert(InsertPt, MI.removeFromParent());
Changed = true;
}
}
diff --git a/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
index 45ac99d90ed9..54e9f7f52901 100644
--- a/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
@@ -67,6 +67,7 @@ private:
// AsmPrinter Implementation.
//===------------------------------------------------------------------===//
+ void EmitEndOfAsmFile(Module &M) override;
void EmitJumpTableInfo() override;
void EmitConstantPool() override;
void EmitFunctionBodyStart() override;
@@ -93,10 +94,7 @@ private:
//===----------------------------------------------------------------------===//
MVT WebAssemblyAsmPrinter::getRegType(unsigned RegNo) const {
- const TargetRegisterClass *TRC =
- TargetRegisterInfo::isVirtualRegister(RegNo)
- ? MRI->getRegClass(RegNo)
- : MRI->getTargetRegisterInfo()->getMinimalPhysRegClass(RegNo);
+ const TargetRegisterClass *TRC = MRI->getRegClass(RegNo);
for (MVT T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
if (TRC->hasType(T))
return T;
@@ -119,8 +117,7 @@ std::string WebAssemblyAsmPrinter::regToString(const MachineOperand &MO) {
return '$' + utostr(WAReg);
}
-WebAssemblyTargetStreamer *
-WebAssemblyAsmPrinter::getTargetStreamer() {
+WebAssemblyTargetStreamer *WebAssemblyAsmPrinter::getTargetStreamer() {
MCTargetStreamer *TS = OutStreamer->getTargetStreamer();
return static_cast<WebAssemblyTargetStreamer *>(TS);
}
@@ -128,16 +125,6 @@ WebAssemblyAsmPrinter::getTargetStreamer() {
//===----------------------------------------------------------------------===//
// WebAssemblyAsmPrinter Implementation.
//===----------------------------------------------------------------------===//
-
-void WebAssemblyAsmPrinter::EmitConstantPool() {
- assert(MF->getConstantPool()->getConstants().empty() &&
- "WebAssembly disables constant pools");
-}
-
-void WebAssemblyAsmPrinter::EmitJumpTableInfo() {
- // Nothing to do; jump tables are incorporated into the instruction stream.
-}
-
static void ComputeLegalValueVTs(const Function &F, const TargetMachine &TM,
Type *Ty, SmallVectorImpl<MVT> &ValueVTs) {
const DataLayout &DL(F.getParent()->getDataLayout());
@@ -154,6 +141,42 @@ static void ComputeLegalValueVTs(const Function &F, const TargetMachine &TM,
}
}
+void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) {
+ for (const auto &F : M) {
+ // Emit function type info for all undefined functions
+ if (F.isDeclarationForLinker() && !F.isIntrinsic()) {
+ SmallVector<MVT, 4> SignatureVTs;
+ ComputeLegalValueVTs(F, TM, F.getReturnType(), SignatureVTs);
+ size_t NumResults = SignatureVTs.size();
+ if (SignatureVTs.size() > 1) {
+ // WebAssembly currently can't lower returns of multiple values without
+ // demoting to sret (see WebAssemblyTargetLowering::CanLowerReturn). So
+ // replace multiple return values with a pointer parameter.
+ SignatureVTs.clear();
+ SignatureVTs.push_back(
+ MVT::getIntegerVT(M.getDataLayout().getPointerSizeInBits()));
+ NumResults = 0;
+ }
+
+ for (auto &Arg : F.args()) {
+ ComputeLegalValueVTs(F, TM, Arg.getType(), SignatureVTs);
+ }
+
+ getTargetStreamer()->emitIndirectFunctionType(F.getName(), SignatureVTs,
+ NumResults);
+ }
+ }
+}
+
+void WebAssemblyAsmPrinter::EmitConstantPool() {
+ assert(MF->getConstantPool()->getConstants().empty() &&
+ "WebAssembly disables constant pools");
+}
+
+void WebAssemblyAsmPrinter::EmitJumpTableInfo() {
+ // Nothing to do; jump tables are incorporated into the instruction stream.
+}
+
void WebAssemblyAsmPrinter::EmitFunctionBodyStart() {
if (!MFI->getParams().empty())
getTargetStreamer()->emitParam(MFI->getParams());
@@ -184,13 +207,6 @@ void WebAssemblyAsmPrinter::EmitFunctionBodyStart() {
LocalTypes.push_back(getRegType(VReg));
AnyWARegs = true;
}
- auto &PhysRegs = MFI->getPhysRegs();
- for (unsigned PReg = 0; PReg < PhysRegs.size(); ++PReg) {
- if (PhysRegs[PReg] == -1U)
- continue;
- LocalTypes.push_back(getRegType(PReg));
- AnyWARegs = true;
- }
if (AnyWARegs)
getTargetStreamer()->emitLocal(LocalTypes);
@@ -212,6 +228,30 @@ void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// These represent values which are live into the function entry, so there's
// no instruction to emit.
break;
+ case WebAssembly::FALLTHROUGH_RETURN_I32:
+ case WebAssembly::FALLTHROUGH_RETURN_I64:
+ case WebAssembly::FALLTHROUGH_RETURN_F32:
+ case WebAssembly::FALLTHROUGH_RETURN_F64: {
+ // These instructions represent the implicit return at the end of a
+ // function body. The operand is always a pop.
+ assert(MFI->isVRegStackified(MI->getOperand(0).getReg()));
+
+ if (isVerbose()) {
+ OutStreamer->AddComment("fallthrough-return: $pop" +
+ utostr(MFI->getWARegStackId(
+ MFI->getWAReg(MI->getOperand(0).getReg()))));
+ OutStreamer->AddBlankLine();
+ }
+ break;
+ }
+ case WebAssembly::FALLTHROUGH_RETURN_VOID:
+ // This instruction represents the implicit return at the end of a
+ // function body with no return value.
+ if (isVerbose()) {
+ OutStreamer->AddComment("fallthrough-return");
+ OutStreamer->AddBlankLine();
+ }
+ break;
default: {
WebAssemblyMCInstLower MCInstLowering(OutContext, *this);
MCInst TmpInst;
diff --git a/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
index a39349c562fd..33166f5b554f 100644
--- a/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
@@ -10,10 +10,10 @@
/// \file
/// \brief This file implements a CFG stacking pass.
///
-/// This pass reorders the blocks in a function to put them into a reverse
-/// post-order [0], with special care to keep the order as similar as possible
-/// to the original order, and to keep loops contiguous even in the case of
-/// split backedges.
+/// This pass reorders the blocks in a function to put them into topological
+/// order, ignoring loop backedges, and without any loop being interrupted
+/// by a block not dominated by the loop header, with special care to keep the
+/// order as similar as possible to the original order.
///
/// Then, it inserts BLOCK and LOOP markers to mark the start of scopes, since
/// scope boundaries serve as the labels for WebAssembly's control transfers.
@@ -21,14 +21,13 @@
/// This is sufficient to convert arbitrary CFGs into a form that works on
/// WebAssembly, provided that all loops are single-entry.
///
-/// [0] https://en.wikipedia.org/wiki/Depth-first_search#Vertex_orderings
-///
//===----------------------------------------------------------------------===//
#include "WebAssembly.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
#include "WebAssemblySubtarget.h"
-#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -70,90 +69,6 @@ FunctionPass *llvm::createWebAssemblyCFGStackify() {
return new WebAssemblyCFGStackify();
}
-static void EliminateMultipleEntryLoops(MachineFunction &MF,
- const MachineLoopInfo &MLI) {
- SmallPtrSet<MachineBasicBlock *, 8> InSet;
- for (scc_iterator<MachineFunction *> I = scc_begin(&MF), E = scc_end(&MF);
- I != E; ++I) {
- const std::vector<MachineBasicBlock *> &CurrentSCC = *I;
-
- // Skip trivial SCCs.
- if (CurrentSCC.size() == 1)
- continue;
-
- InSet.insert(CurrentSCC.begin(), CurrentSCC.end());
- MachineBasicBlock *Header = nullptr;
- for (MachineBasicBlock *MBB : CurrentSCC) {
- for (MachineBasicBlock *Pred : MBB->predecessors()) {
- if (InSet.count(Pred))
- continue;
- if (!Header) {
- Header = MBB;
- break;
- }
- // TODO: Implement multiple-entry loops.
- report_fatal_error("multiple-entry loops are not supported yet");
- }
- }
- assert(MLI.isLoopHeader(Header));
-
- InSet.clear();
- }
-}
-
-namespace {
-/// Post-order traversal stack entry.
-struct POStackEntry {
- MachineBasicBlock *MBB;
- SmallVector<MachineBasicBlock *, 0> Succs;
-
- POStackEntry(MachineBasicBlock *MBB, MachineFunction &MF,
- const MachineLoopInfo &MLI);
-};
-} // end anonymous namespace
-
-static bool LoopContains(const MachineLoop *Loop,
- const MachineBasicBlock *MBB) {
- return Loop ? Loop->contains(MBB) : true;
-}
-
-POStackEntry::POStackEntry(MachineBasicBlock *MBB, MachineFunction &MF,
- const MachineLoopInfo &MLI)
- : MBB(MBB), Succs(MBB->successors()) {
- // RPO is not a unique form, since at every basic block with multiple
- // successors, the DFS has to pick which order to visit the successors in.
- // Sort them strategically (see below).
- MachineLoop *Loop = MLI.getLoopFor(MBB);
- MachineFunction::iterator Next = next(MachineFunction::iterator(MBB));
- MachineBasicBlock *LayoutSucc = Next == MF.end() ? nullptr : &*Next;
- std::stable_sort(
- Succs.begin(), Succs.end(),
- [=, &MLI](const MachineBasicBlock *A, const MachineBasicBlock *B) {
- if (A == B)
- return false;
-
- // Keep loops contiguous by preferring the block that's in the same
- // loop.
- bool LoopContainsA = LoopContains(Loop, A);
- bool LoopContainsB = LoopContains(Loop, B);
- if (LoopContainsA && !LoopContainsB)
- return true;
- if (!LoopContainsA && LoopContainsB)
- return false;
-
- // Minimize perturbation by preferring the block which is the immediate
- // layout successor.
- if (A == LayoutSucc)
- return true;
- if (B == LayoutSucc)
- return false;
-
- // TODO: More sophisticated orderings may be profitable here.
-
- return false;
- });
-}
-
/// Return the "bottom" block of a loop. This differs from
/// MachineLoop::getBottomBlock in that it works even if the loop is
/// discontiguous.
@@ -165,53 +80,166 @@ static MachineBasicBlock *LoopBottom(const MachineLoop *Loop) {
return Bottom;
}
-/// Sort the blocks in RPO, taking special care to make sure that loops are
-/// contiguous even in the case of split backedges.
-///
-/// TODO: Determine whether RPO is actually worthwhile, or whether we should
-/// move to just a stable-topological-sort-based approach that would preserve
-/// more of the original order.
-static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI) {
- // Note that we do our own RPO rather than using
- // "llvm/ADT/PostOrderIterator.h" because we want control over the order that
- // successors are visited in (see above). Also, we can sort the blocks in the
- // MachineFunction as we go.
- SmallPtrSet<MachineBasicBlock *, 16> Visited;
- SmallVector<POStackEntry, 16> Stack;
-
- MachineBasicBlock *EntryBlock = &*MF.begin();
- Visited.insert(EntryBlock);
- Stack.push_back(POStackEntry(EntryBlock, MF, MLI));
-
- for (;;) {
- POStackEntry &Entry = Stack.back();
- SmallVectorImpl<MachineBasicBlock *> &Succs = Entry.Succs;
- if (!Succs.empty()) {
- MachineBasicBlock *Succ = Succs.pop_back_val();
- if (Visited.insert(Succ).second)
- Stack.push_back(POStackEntry(Succ, MF, MLI));
- continue;
- }
+static void MaybeUpdateTerminator(MachineBasicBlock *MBB) {
+#ifndef NDEBUG
+ bool AnyBarrier = false;
+#endif
+ bool AllAnalyzable = true;
+ for (const MachineInstr &Term : MBB->terminators()) {
+#ifndef NDEBUG
+ AnyBarrier |= Term.isBarrier();
+#endif
+ AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch();
+ }
+ assert((AnyBarrier || AllAnalyzable) &&
+ "AnalyzeBranch needs to analyze any block with a fallthrough");
+ if (AllAnalyzable)
+ MBB->updateTerminator();
+}
- // Put the block in its position in the MachineFunction.
- MachineBasicBlock &MBB = *Entry.MBB;
- MBB.moveBefore(&*MF.begin());
-
- // Branch instructions may utilize a fallthrough, so update them if a
- // fallthrough has been added or removed.
- if (!MBB.empty() && MBB.back().isTerminator() && !MBB.back().isBranch() &&
- !MBB.back().isBarrier())
- report_fatal_error(
- "Non-branch terminator with fallthrough cannot yet be rewritten");
- if (MBB.empty() || !MBB.back().isTerminator() || MBB.back().isBranch())
- MBB.updateTerminator();
-
- Stack.pop_back();
- if (Stack.empty())
- break;
+namespace {
+/// Sort blocks by their number.
+struct CompareBlockNumbers {
+ bool operator()(const MachineBasicBlock *A,
+ const MachineBasicBlock *B) const {
+ return A->getNumber() > B->getNumber();
+ }
+};
+/// Sort blocks by their number in the opposite order..
+struct CompareBlockNumbersBackwards {
+ bool operator()(const MachineBasicBlock *A,
+ const MachineBasicBlock *B) const {
+ return A->getNumber() < B->getNumber();
}
+};
+/// Bookkeeping for a loop to help ensure that we don't mix blocks not dominated
+/// by the loop header among the loop's blocks.
+struct Entry {
+ const MachineLoop *Loop;
+ unsigned NumBlocksLeft;
+
+ /// List of blocks not dominated by Loop's header that are deferred until
+ /// after all of Loop's blocks have been seen.
+ std::vector<MachineBasicBlock *> Deferred;
+
+ explicit Entry(const MachineLoop *L)
+ : Loop(L), NumBlocksLeft(L->getNumBlocks()) {}
+};
+}
- // Now that we've sorted the blocks in RPO, renumber them.
+/// Sort the blocks, taking special care to make sure that loops are not
+/// interrupted by blocks not dominated by their header.
+/// TODO: There are many opportunities for improving the heuristics here.
+/// Explore them.
+static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
+ const MachineDominatorTree &MDT) {
+ // Prepare for a topological sort: Record the number of predecessors each
+ // block has, ignoring loop backedges.
+ MF.RenumberBlocks();
+ SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0);
+ for (MachineBasicBlock &MBB : MF) {
+ unsigned N = MBB.pred_size();
+ if (MachineLoop *L = MLI.getLoopFor(&MBB))
+ if (L->getHeader() == &MBB)
+ for (const MachineBasicBlock *Pred : MBB.predecessors())
+ if (L->contains(Pred))
+ --N;
+ NumPredsLeft[MBB.getNumber()] = N;
+ }
+
+ // Topological sort the CFG, with additional constraints:
+ // - Between a loop header and the last block in the loop, there can be
+ // no blocks not dominated by the loop header.
+ // - It's desirable to preserve the original block order when possible.
+ // We use two ready lists; Preferred and Ready. Preferred has recently
+ // processed sucessors, to help preserve block sequences from the original
+ // order. Ready has the remaining ready blocks.
+ PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
+ CompareBlockNumbers>
+ Preferred;
+ PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
+ CompareBlockNumbersBackwards>
+ Ready;
+ SmallVector<Entry, 4> Loops;
+ for (MachineBasicBlock *MBB = &MF.front();;) {
+ const MachineLoop *L = MLI.getLoopFor(MBB);
+ if (L) {
+ // If MBB is a loop header, add it to the active loop list. We can't put
+ // any blocks that it doesn't dominate until we see the end of the loop.
+ if (L->getHeader() == MBB)
+ Loops.push_back(Entry(L));
+ // For each active loop the block is in, decrement the count. If MBB is
+ // the last block in an active loop, take it off the list and pick up any
+ // blocks deferred because the header didn't dominate them.
+ for (Entry &E : Loops)
+ if (E.Loop->contains(MBB) && --E.NumBlocksLeft == 0)
+ for (auto DeferredBlock : E.Deferred)
+ Ready.push(DeferredBlock);
+ while (!Loops.empty() && Loops.back().NumBlocksLeft == 0)
+ Loops.pop_back();
+ }
+ // The main topological sort logic.
+ for (MachineBasicBlock *Succ : MBB->successors()) {
+ // Ignore backedges.
+ if (MachineLoop *SuccL = MLI.getLoopFor(Succ))
+ if (SuccL->getHeader() == Succ && SuccL->contains(MBB))
+ continue;
+ // Decrement the predecessor count. If it's now zero, it's ready.
+ if (--NumPredsLeft[Succ->getNumber()] == 0)
+ Preferred.push(Succ);
+ }
+ // Determine the block to follow MBB. First try to find a preferred block,
+ // to preserve the original block order when possible.
+ MachineBasicBlock *Next = nullptr;
+ while (!Preferred.empty()) {
+ Next = Preferred.top();
+ Preferred.pop();
+ // If X isn't dominated by the top active loop header, defer it until that
+ // loop is done.
+ if (!Loops.empty() &&
+ !MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
+ Loops.back().Deferred.push_back(Next);
+ Next = nullptr;
+ continue;
+ }
+ // If Next was originally ordered before MBB, and it isn't because it was
+ // loop-rotated above the header, it's not preferred.
+ if (Next->getNumber() < MBB->getNumber() &&
+ (!L || !L->contains(Next) ||
+ L->getHeader()->getNumber() < Next->getNumber())) {
+ Ready.push(Next);
+ Next = nullptr;
+ continue;
+ }
+ break;
+ }
+ // If we didn't find a suitable block in the Preferred list, check the
+ // general Ready list.
+ if (!Next) {
+ // If there are no more blocks to process, we're done.
+ if (Ready.empty()) {
+ MaybeUpdateTerminator(MBB);
+ break;
+ }
+ for (;;) {
+ Next = Ready.top();
+ Ready.pop();
+ // If Next isn't dominated by the top active loop header, defer it until
+ // that loop is done.
+ if (!Loops.empty() &&
+ !MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
+ Loops.back().Deferred.push_back(Next);
+ continue;
+ }
+ break;
+ }
+ }
+ // Move the next block into place and iterate.
+ Next->moveAfter(MBB);
+ MaybeUpdateTerminator(MBB);
+ MBB = Next;
+ }
+ assert(Loops.empty() && "Active loop list not finished");
MF.RenumberBlocks();
#ifndef NDEBUG
@@ -266,12 +294,26 @@ static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred,
return false;
}
+/// Test whether MI is a child of some other node in an expression tree.
+static bool IsChild(const MachineInstr &MI,
+ const WebAssemblyFunctionInfo &MFI) {
+ if (MI.getNumOperands() == 0)
+ return false;
+ const MachineOperand &MO = MI.getOperand(0);
+ if (!MO.isReg() || MO.isImplicit() || !MO.isDef())
+ return false;
+ unsigned Reg = MO.getReg();
+ return TargetRegisterInfo::isVirtualRegister(Reg) &&
+ MFI.isVRegStackified(Reg);
+}
+
/// Insert a BLOCK marker for branches to MBB (if needed).
static void PlaceBlockMarker(MachineBasicBlock &MBB, MachineFunction &MF,
SmallVectorImpl<MachineBasicBlock *> &ScopeTops,
const WebAssemblyInstrInfo &TII,
const MachineLoopInfo &MLI,
- MachineDominatorTree &MDT) {
+ MachineDominatorTree &MDT,
+ WebAssemblyFunctionInfo &MFI) {
// First compute the nearest common dominator of all forward non-fallthrough
// predecessors so that we minimize the time that the BLOCK is on the stack,
// which reduces overall stack height.
@@ -319,14 +361,15 @@ static void PlaceBlockMarker(MachineBasicBlock &MBB, MachineFunction &MF,
MachineLoop *HeaderLoop = MLI.getLoopFor(Header);
if (HeaderLoop && MBB.getNumber() > LoopBottom(HeaderLoop)->getNumber()) {
// Header is the header of a loop that does not lexically contain MBB, so
- // the BLOCK needs to be above the LOOP.
+ // the BLOCK needs to be above the LOOP, after any END constructs.
InsertPos = Header->begin();
+ while (InsertPos->getOpcode() != WebAssembly::LOOP)
+ ++InsertPos;
} else {
// Otherwise, insert the BLOCK as late in Header as we can, but before the
// beginning of the local expression tree and any nested BLOCKs.
InsertPos = Header->getFirstTerminator();
- while (InsertPos != Header->begin() &&
- prev(InsertPos)->definesRegister(WebAssembly::EXPR_STACK) &&
+ while (InsertPos != Header->begin() && IsChild(*prev(InsertPos), MFI) &&
prev(InsertPos)->getOpcode() != WebAssembly::LOOP &&
prev(InsertPos)->getOpcode() != WebAssembly::END_BLOCK &&
prev(InsertPos)->getOpcode() != WebAssembly::END_LOOP)
@@ -388,7 +431,7 @@ static void PlaceLoopMarker(
assert((!ScopeTops[AfterLoop->getNumber()] ||
ScopeTops[AfterLoop->getNumber()]->getNumber() < MBB.getNumber()) &&
- "With RPO we should visit the outer-most loop for a block first.");
+ "With block sorting the outermost loop for a block should be first.");
if (!ScopeTops[AfterLoop->getNumber()])
ScopeTops[AfterLoop->getNumber()] = &MBB;
}
@@ -409,7 +452,8 @@ GetDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack,
/// Insert LOOP and BLOCK markers at appropriate places.
static void PlaceMarkers(MachineFunction &MF, const MachineLoopInfo &MLI,
const WebAssemblyInstrInfo &TII,
- MachineDominatorTree &MDT) {
+ MachineDominatorTree &MDT,
+ WebAssemblyFunctionInfo &MFI) {
// For each block whose label represents the end of a scope, record the block
// which holds the beginning of the scope. This will allow us to quickly skip
// over scoped regions when walking blocks. We allocate one more than the
@@ -425,7 +469,7 @@ static void PlaceMarkers(MachineFunction &MF, const MachineLoopInfo &MLI,
PlaceLoopMarker(MBB, MF, ScopeTops, LoopTops, TII, MLI);
// Place the BLOCK for MBB if MBB is branched to from above.
- PlaceBlockMarker(MBB, MF, ScopeTops, TII, MLI, MDT);
+ PlaceBlockMarker(MBB, MF, ScopeTops, TII, MLI, MDT, MFI);
}
// Now rewrite references to basic blocks to be depth immediates.
@@ -478,16 +522,14 @@ bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) {
auto &MDT = getAnalysis<MachineDominatorTree>();
// Liveness is not tracked for EXPR_STACK physreg.
const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
MF.getRegInfo().invalidateLiveness();
- // RPO sorting needs all loops to be single-entry.
- EliminateMultipleEntryLoops(MF, MLI);
-
- // Sort the blocks in RPO, with contiguous loops.
- SortBlocks(MF, MLI);
+ // Sort the blocks, with contiguous loops.
+ SortBlocks(MF, MLI, MDT);
// Place the BLOCK and LOOP markers to indicate the beginnings of scopes.
- PlaceMarkers(MF, MLI, TII, MDT);
+ PlaceMarkers(MF, MLI, TII, MDT, MFI);
return true;
}
diff --git a/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 1b761b1a9d73..7bfa4074849b 100644
--- a/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -12,10 +12,13 @@
/// class. Some of the target-specific code is generated by tablegen in the file
/// WebAssemblyGenFastISel.inc, which is #included here.
///
+/// TODO: kill flags
+///
//===----------------------------------------------------------------------===//
#include "WebAssembly.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
#include "WebAssemblySubtarget.h"
#include "WebAssemblyTargetMachine.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
@@ -41,13 +44,122 @@ using namespace llvm;
namespace {
class WebAssemblyFastISel final : public FastISel {
+ // All possible address modes.
+ class Address {
+ public:
+ typedef enum { RegBase, FrameIndexBase } BaseKind;
+
+ private:
+ BaseKind Kind;
+ union {
+ unsigned Reg;
+ int FI;
+ } Base;
+
+ int64_t Offset;
+
+ const GlobalValue *GV;
+
+ public:
+ // Innocuous defaults for our address.
+ Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; }
+ void setKind(BaseKind K) { Kind = K; }
+ BaseKind getKind() const { return Kind; }
+ bool isRegBase() const { return Kind == RegBase; }
+ bool isFIBase() const { return Kind == FrameIndexBase; }
+ void setReg(unsigned Reg) {
+ assert(isRegBase() && "Invalid base register access!");
+ Base.Reg = Reg;
+ }
+ unsigned getReg() const {
+ assert(isRegBase() && "Invalid base register access!");
+ return Base.Reg;
+ }
+ void setFI(unsigned FI) {
+ assert(isFIBase() && "Invalid base frame index access!");
+ Base.FI = FI;
+ }
+ unsigned getFI() const {
+ assert(isFIBase() && "Invalid base frame index access!");
+ return Base.FI;
+ }
+
+ void setOffset(int64_t Offset_) { Offset = Offset_; }
+ int64_t getOffset() const { return Offset; }
+ void setGlobalValue(const GlobalValue *G) { GV = G; }
+ const GlobalValue *getGlobalValue() const { return GV; }
+ };
+
/// Keep a pointer to the WebAssemblySubtarget around so that we can make the
/// right decision when generating code for different targets.
const WebAssemblySubtarget *Subtarget;
LLVMContext *Context;
- // Call handling routines.
private:
+ // Utility helper routines
+ MVT::SimpleValueType getSimpleType(Type *Ty) {
+ EVT VT = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true);
+ return VT.isSimple() ? VT.getSimpleVT().SimpleTy :
+ MVT::INVALID_SIMPLE_VALUE_TYPE;
+ }
+ MVT::SimpleValueType getLegalType(MVT::SimpleValueType VT) {
+ switch (VT) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ return MVT::i32;
+ case MVT::i32:
+ case MVT::i64:
+ case MVT::f32:
+ case MVT::f64:
+ return VT;
+ default:
+ break;
+ }
+ return MVT::INVALID_SIMPLE_VALUE_TYPE;
+ }
+ bool computeAddress(const Value *Obj, Address &Addr);
+ void materializeLoadStoreOperands(Address &Addr);
+ void addLoadStoreOperands(const Address &Addr, const MachineInstrBuilder &MIB,
+ MachineMemOperand *MMO);
+ unsigned maskI1Value(unsigned Reg, const Value *V);
+ unsigned getRegForI1Value(const Value *V, bool &Not);
+ unsigned zeroExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From);
+ unsigned signExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From);
+ unsigned zeroExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To);
+ unsigned signExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To);
+ unsigned getRegForUnsignedValue(const Value *V);
+ unsigned getRegForSignedValue(const Value *V);
+ unsigned getRegForPromotedValue(const Value *V, bool IsSigned);
+ unsigned notValue(unsigned Reg);
+ unsigned copyValue(unsigned Reg);
+
+ // Backend specific FastISel code.
+ unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
+ unsigned fastMaterializeConstant(const Constant *C) override;
+ bool fastLowerArguments() override;
+
+ // Selection routines.
+ bool selectCall(const Instruction *I);
+ bool selectSelect(const Instruction *I);
+ bool selectTrunc(const Instruction *I);
+ bool selectZExt(const Instruction *I);
+ bool selectSExt(const Instruction *I);
+ bool selectICmp(const Instruction *I);
+ bool selectFCmp(const Instruction *I);
+ bool selectBitCast(const Instruction *I);
+ bool selectLoad(const Instruction *I);
+ bool selectStore(const Instruction *I);
+ bool selectBr(const Instruction *I);
+ bool selectRet(const Instruction *I);
+ bool selectUnreachable(const Instruction *I);
+
public:
// Backend specific FastISel code.
WebAssemblyFastISel(FunctionLoweringInfo &FuncInfo,
@@ -64,11 +176,1001 @@ public:
} // end anonymous namespace
+bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
+
+ const User *U = nullptr;
+ unsigned Opcode = Instruction::UserOp1;
+ if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
+ // Don't walk into other basic blocks unless the object is an alloca from
+ // another block, otherwise it may not have a virtual register assigned.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
+ Opcode = C->getOpcode();
+ U = C;
+ }
+
+ if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
+ if (Addr.getGlobalValue())
+ return false;
+ Addr.setGlobalValue(GV);
+ return true;
+ }
+
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::BitCast: {
+ // Look through bitcasts.
+ return computeAddress(U->getOperand(0), Addr);
+ }
+ case Instruction::IntToPtr: {
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
+ TLI.getPointerTy(DL))
+ return computeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::PtrToInt: {
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
+ return computeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ Address SavedAddr = Addr;
+ uint64_t TmpOffset = Addr.getOffset();
+ // Iterate through the GEP folding the constants into offsets where
+ // we can.
+ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
+ GTI != E; ++GTI) {
+ const Value *Op = GTI.getOperand();
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
+ TmpOffset += SL->getElementOffset(Idx);
+ } else {
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
+ for (;;) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ TmpOffset += CI->getSExtValue() * S;
+ break;
+ }
+ if (S == 1 && Addr.isRegBase() && Addr.getReg() == 0) {
+ // An unscaled add of a register. Set it as the new base.
+ Addr.setReg(getRegForValue(Op));
+ break;
+ }
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ TmpOffset += CI->getSExtValue() * S;
+ // Iterate on the other operand.
+ Op = cast<AddOperator>(Op)->getOperand(0);
+ continue;
+ }
+ // Unsupported
+ goto unsupported_gep;
+ }
+ }
+ }
+ // Try to grab the base operand now.
+ Addr.setOffset(TmpOffset);
+ if (computeAddress(U->getOperand(0), Addr))
+ return true;
+ // We failed, restore everything and try the other options.
+ Addr = SavedAddr;
+ unsupported_gep:
+ break;
+ }
+ case Instruction::Alloca: {
+ const AllocaInst *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ Addr.setKind(Address::FrameIndexBase);
+ Addr.setFI(SI->second);
+ return true;
+ }
+ break;
+ }
+ case Instruction::Add: {
+ // Adds of constants are common and easy enough.
+ const Value *LHS = U->getOperand(0);
+ const Value *RHS = U->getOperand(1);
+
+ if (isa<ConstantInt>(LHS))
+ std::swap(LHS, RHS);
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ Addr.setOffset(Addr.getOffset() + CI->getSExtValue());
+ return computeAddress(LHS, Addr);
+ }
+
+ Address Backup = Addr;
+ if (computeAddress(LHS, Addr) && computeAddress(RHS, Addr))
+ return true;
+ Addr = Backup;
+
+ break;
+ }
+ case Instruction::Sub: {
+ // Subs of constants are common and easy enough.
+ const Value *LHS = U->getOperand(0);
+ const Value *RHS = U->getOperand(1);
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ Addr.setOffset(Addr.getOffset() - CI->getSExtValue());
+ return computeAddress(LHS, Addr);
+ }
+ break;
+ }
+ }
+ Addr.setReg(getRegForValue(Obj));
+ return Addr.getReg() != 0;
+}
+
+void WebAssemblyFastISel::materializeLoadStoreOperands(Address &Addr) {
+ if (Addr.isRegBase()) {
+ unsigned Reg = Addr.getReg();
+ if (Reg == 0) {
+ Reg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::CONST_I64 :
+ WebAssembly::CONST_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), Reg)
+ .addImm(0);
+ Addr.setReg(Reg);
+ }
+ }
+}
+
+void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr,
+ const MachineInstrBuilder &MIB,
+ MachineMemOperand *MMO) {
+ if (const GlobalValue *GV = Addr.getGlobalValue())
+ MIB.addGlobalAddress(GV, Addr.getOffset());
+ else
+ MIB.addImm(Addr.getOffset());
+
+ if (Addr.isRegBase())
+ MIB.addReg(Addr.getReg());
+ else
+ MIB.addFrameIndex(Addr.getFI());
+
+ // Set the alignment operand (this is rewritten in SetP2AlignOperands).
+ // TODO: Disable SetP2AlignOperands for FastISel and just do it here.
+ MIB.addImm(0);
+
+ MIB.addMemOperand(MMO);
+}
+
+unsigned WebAssemblyFastISel::maskI1Value(unsigned Reg, const Value *V) {
+ return zeroExtendToI32(Reg, V, MVT::i1);
+}
+
+unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V, bool &Not) {
+ if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(V))
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(ICmp->getOperand(1)))
+ if (ICmp->isEquality() && C->isZero() && C->getType()->isIntegerTy(32)) {
+ Not = ICmp->isTrueWhenEqual();
+ return getRegForValue(ICmp->getOperand(0));
+ }
+
+ if (BinaryOperator::isNot(V)) {
+ Not = true;
+ return getRegForValue(BinaryOperator::getNotArgument(V));
+ }
+
+ Not = false;
+ return maskI1Value(getRegForValue(V), V);
+}
+
+unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From) {
+ switch (From) {
+ case MVT::i1:
+ // If the value is naturally an i1, we don't need to mask it.
+ // TODO: Recursively examine selects, phis, and, or, xor, constants.
+ if (From == MVT::i1 && V != nullptr) {
+ if (isa<CmpInst>(V) ||
+ (isa<Argument>(V) && cast<Argument>(V)->hasZExtAttr()))
+ return copyValue(Reg);
+ }
+ case MVT::i8:
+ case MVT::i16:
+ break;
+ case MVT::i32:
+ return copyValue(Reg);
+ default:
+ return 0;
+ }
+
+ unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::CONST_I32), Imm)
+ .addImm(~(~uint64_t(0) << MVT(From).getSizeInBits()));
+
+ unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::AND_I32), Result)
+ .addReg(Reg)
+ .addReg(Imm);
+
+ return Result;
+}
+
+unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From) {
+ switch (From) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ break;
+ case MVT::i32:
+ return copyValue(Reg);
+ default:
+ return 0;
+ }
+
+ unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::CONST_I32), Imm)
+ .addImm(32 - MVT(From).getSizeInBits());
+
+ unsigned Left = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::SHL_I32), Left)
+ .addReg(Reg)
+ .addReg(Imm);
+
+ unsigned Right = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::SHR_S_I32), Right)
+ .addReg(Left)
+ .addReg(Imm);
+
+ return Right;
+}
+
+unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To) {
+ if (To == MVT::i64) {
+ if (From == MVT::i64)
+ return copyValue(Reg);
+
+ Reg = zeroExtendToI32(Reg, V, From);
+
+ unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
+ .addReg(Reg);
+ return Result;
+ }
+
+ return zeroExtendToI32(Reg, V, From);
+}
+
+unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To) {
+ if (To == MVT::i64) {
+ if (From == MVT::i64)
+ return copyValue(Reg);
+
+ Reg = signExtendToI32(Reg, V, From);
+
+ unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
+ .addReg(Reg);
+ return Result;
+ }
+
+ return signExtendToI32(Reg, V, From);
+}
+
+unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
+ MVT::SimpleValueType From = getSimpleType(V->getType());
+ MVT::SimpleValueType To = getLegalType(From);
+ return zeroExtend(getRegForValue(V), V, From, To);
+}
+
+unsigned WebAssemblyFastISel::getRegForSignedValue(const Value *V) {
+ MVT::SimpleValueType From = getSimpleType(V->getType());
+ MVT::SimpleValueType To = getLegalType(From);
+ return zeroExtend(getRegForValue(V), V, From, To);
+}
+
+unsigned WebAssemblyFastISel::getRegForPromotedValue(const Value *V,
+ bool IsSigned) {
+ return IsSigned ? getRegForSignedValue(V) :
+ getRegForUnsignedValue(V);
+}
+
+unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
+ assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass);
+
+ unsigned NotReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::EQZ_I32), NotReg)
+ .addReg(Reg);
+ return NotReg;
+}
+
+unsigned WebAssemblyFastISel::copyValue(unsigned Reg) {
+ unsigned ResultReg = createResultReg(MRI.getRegClass(Reg));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::COPY), ResultReg)
+ .addReg(Reg);
+ return ResultReg;
+}
+
+unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ unsigned ResultReg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::COPY_LOCAL_I64 :
+ WebAssembly::COPY_LOCAL_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addFrameIndex(SI->second);
+ return ResultReg;
+ }
+
+ return 0;
+}
+
+unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
+ unsigned ResultReg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::CONST_I64 :
+ WebAssembly::CONST_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addGlobalAddress(GV);
+ return ResultReg;
+ }
+
+ // Let target-independent code handle it.
+ return 0;
+}
+
+bool WebAssemblyFastISel::fastLowerArguments() {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const Function *F = FuncInfo.Fn;
+ if (F->isVarArg())
+ return false;
+
+ unsigned i = 0;
+ for (auto const &Arg : F->args()) {
+ const AttributeSet &Attrs = F->getAttributes();
+ if (Attrs.hasAttribute(i+1, Attribute::ByVal) ||
+ Attrs.hasAttribute(i+1, Attribute::SwiftSelf) ||
+ Attrs.hasAttribute(i+1, Attribute::SwiftError) ||
+ Attrs.hasAttribute(i+1, Attribute::InAlloca) ||
+ Attrs.hasAttribute(i+1, Attribute::Nest))
+ return false;
+
+ Type *ArgTy = Arg.getType();
+ if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
+ return false;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(ArgTy)) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = WebAssembly::ARGUMENT_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::ARGUMENT_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::ARGUMENT_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::ARGUMENT_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addImm(i);
+ updateValueMap(&Arg, ResultReg);
+
+ ++i;
+ }
+
+ MRI.addLiveIn(WebAssembly::ARGUMENTS);
+
+ auto *MFI = MF->getInfo<WebAssemblyFunctionInfo>();
+ for (auto const &Arg : F->args())
+ MFI->addParam(getLegalType(getSimpleType(Arg.getType())));
+
+ return true;
+}
+
+bool WebAssemblyFastISel::selectCall(const Instruction *I) {
+ const CallInst *Call = cast<CallInst>(I);
+
+ if (Call->isMustTailCall() || Call->isInlineAsm() ||
+ Call->getFunctionType()->isVarArg())
+ return false;
+
+ Function *Func = Call->getCalledFunction();
+ if (Func && Func->isIntrinsic())
+ return false;
+
+ FunctionType *FuncTy = Call->getFunctionType();
+ unsigned Opc;
+ bool IsDirect = Func != nullptr;
+ bool IsVoid = FuncTy->getReturnType()->isVoidTy();
+ unsigned ResultReg;
+ if (IsVoid) {
+ Opc = IsDirect ? WebAssembly::CALL_VOID : WebAssembly::CALL_INDIRECT_VOID;
+ } else {
+ MVT::SimpleValueType RetTy = getSimpleType(Call->getType());
+ switch (RetTy) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = IsDirect ? WebAssembly::CALL_I32 : WebAssembly::CALL_INDIRECT_I32;
+ ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ break;
+ case MVT::i64:
+ Opc = IsDirect ? WebAssembly::CALL_I64 : WebAssembly::CALL_INDIRECT_I64;
+ ResultReg = createResultReg(&WebAssembly::I64RegClass);
+ break;
+ case MVT::f32:
+ Opc = IsDirect ? WebAssembly::CALL_F32 : WebAssembly::CALL_INDIRECT_F32;
+ ResultReg = createResultReg(&WebAssembly::F32RegClass);
+ break;
+ case MVT::f64:
+ Opc = IsDirect ? WebAssembly::CALL_F64 : WebAssembly::CALL_INDIRECT_F64;
+ ResultReg = createResultReg(&WebAssembly::F64RegClass);
+ break;
+ default:
+ return false;
+ }
+ }
+
+ SmallVector<unsigned, 8> Args;
+ for (unsigned i = 0, e = Call->getNumArgOperands(); i < e; ++i) {
+ Value *V = Call->getArgOperand(i);
+ MVT::SimpleValueType ArgTy = getSimpleType(V->getType());
+ if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
+ return false;
+
+ const AttributeSet &Attrs = Call->getAttributes();
+ if (Attrs.hasAttribute(i+1, Attribute::ByVal) ||
+ Attrs.hasAttribute(i+1, Attribute::SwiftSelf) ||
+ Attrs.hasAttribute(i+1, Attribute::SwiftError) ||
+ Attrs.hasAttribute(i+1, Attribute::InAlloca) ||
+ Attrs.hasAttribute(i+1, Attribute::Nest))
+ return false;
+
+ unsigned Reg;
+
+ if (Attrs.hasAttribute(i+1, Attribute::SExt))
+ Reg = getRegForSignedValue(V);
+ else if (Attrs.hasAttribute(i+1, Attribute::ZExt))
+ Reg = getRegForUnsignedValue(V);
+ else
+ Reg = getRegForValue(V);
+
+ if (Reg == 0)
+ return false;
+
+ Args.push_back(Reg);
+ }
+
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+
+ if (!IsVoid)
+ MIB.addReg(ResultReg, RegState::Define);
+
+ if (IsDirect)
+ MIB.addGlobalAddress(Func);
+ else
+ MIB.addReg(getRegForValue(Call->getCalledValue()));
+
+ for (unsigned ArgReg : Args)
+ MIB.addReg(ArgReg);
+
+ if (!IsVoid)
+ updateValueMap(Call, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
+ const SelectInst *Select = cast<SelectInst>(I);
+
+ bool Not;
+ unsigned CondReg = getRegForI1Value(Select->getCondition(), Not);
+ if (CondReg == 0)
+ return false;
+
+ unsigned TrueReg = getRegForValue(Select->getTrueValue());
+ if (TrueReg == 0)
+ return false;
+
+ unsigned FalseReg = getRegForValue(Select->getFalseValue());
+ if (FalseReg == 0)
+ return false;
+
+ if (Not)
+ std::swap(TrueReg, FalseReg);
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(Select->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = WebAssembly::SELECT_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::SELECT_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::SELECT_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::SELECT_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(TrueReg)
+ .addReg(FalseReg)
+ .addReg(CondReg);
+
+ updateValueMap(Select, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
+ const TruncInst *Trunc = cast<TruncInst>(I);
+
+ unsigned Reg = getRegForValue(Trunc->getOperand(0));
+ if (Reg == 0)
+ return false;
+
+ if (Trunc->getOperand(0)->getType()->isIntegerTy(64)) {
+ unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I32_WRAP_I64), Result)
+ .addReg(Reg);
+ Reg = Result;
+ }
+
+ updateValueMap(Trunc, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
+ const ZExtInst *ZExt = cast<ZExtInst>(I);
+
+ const Value *Op = ZExt->getOperand(0);
+ MVT::SimpleValueType From = getSimpleType(Op->getType());
+ MVT::SimpleValueType To = getLegalType(getSimpleType(ZExt->getType()));
+ unsigned Reg = zeroExtend(getRegForValue(Op), Op, From, To);
+ if (Reg == 0)
+ return false;
+
+ updateValueMap(ZExt, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
+ const SExtInst *SExt = cast<SExtInst>(I);
+
+ const Value *Op = SExt->getOperand(0);
+ MVT::SimpleValueType From = getSimpleType(Op->getType());
+ MVT::SimpleValueType To = getLegalType(getSimpleType(SExt->getType()));
+ unsigned Reg = signExtend(getRegForValue(Op), Op, From, To);
+ if (Reg == 0)
+ return false;
+
+ updateValueMap(SExt, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
+ const ICmpInst *ICmp = cast<ICmpInst>(I);
+
+ bool I32 = getSimpleType(ICmp->getOperand(0)->getType()) != MVT::i64;
+ unsigned Opc;
+ bool isSigned = false;
+ switch (ICmp->getPredicate()) {
+ case ICmpInst::ICMP_EQ:
+ Opc = I32 ? WebAssembly::EQ_I32 : WebAssembly::EQ_I64;
+ break;
+ case ICmpInst::ICMP_NE:
+ Opc = I32 ? WebAssembly::NE_I32 : WebAssembly::NE_I64;
+ break;
+ case ICmpInst::ICMP_UGT:
+ Opc = I32 ? WebAssembly::GT_U_I32 : WebAssembly::GT_U_I64;
+ break;
+ case ICmpInst::ICMP_UGE:
+ Opc = I32 ? WebAssembly::GE_U_I32 : WebAssembly::GE_U_I64;
+ break;
+ case ICmpInst::ICMP_ULT:
+ Opc = I32 ? WebAssembly::LT_U_I32 : WebAssembly::LT_U_I64;
+ break;
+ case ICmpInst::ICMP_ULE:
+ Opc = I32 ? WebAssembly::LE_U_I32 : WebAssembly::LE_U_I64;
+ break;
+ case ICmpInst::ICMP_SGT:
+ Opc = I32 ? WebAssembly::GT_S_I32 : WebAssembly::GT_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SGE:
+ Opc = I32 ? WebAssembly::GE_S_I32 : WebAssembly::GE_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SLT:
+ Opc = I32 ? WebAssembly::LT_S_I32 : WebAssembly::LT_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SLE:
+ Opc = I32 ? WebAssembly::LE_S_I32 : WebAssembly::LE_S_I64;
+ isSigned = true;
+ break;
+ default: return false;
+ }
+
+ unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), isSigned);
+ if (LHS == 0)
+ return false;
+
+ unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), isSigned);
+ if (RHS == 0)
+ return false;
+
+ unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(LHS)
+ .addReg(RHS);
+ updateValueMap(ICmp, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
+ const FCmpInst *FCmp = cast<FCmpInst>(I);
+
+ unsigned LHS = getRegForValue(FCmp->getOperand(0));
+ if (LHS == 0)
+ return false;
+
+ unsigned RHS = getRegForValue(FCmp->getOperand(1));
+ if (RHS == 0)
+ return false;
+
+ bool F32 = getSimpleType(FCmp->getOperand(0)->getType()) != MVT::f64;
+ unsigned Opc;
+ bool Not = false;
+ switch (FCmp->getPredicate()) {
+ case FCmpInst::FCMP_OEQ:
+ Opc = F32 ? WebAssembly::EQ_F32 : WebAssembly::EQ_F64;
+ break;
+ case FCmpInst::FCMP_UNE:
+ Opc = F32 ? WebAssembly::NE_F32 : WebAssembly::NE_F64;
+ break;
+ case FCmpInst::FCMP_OGT:
+ Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
+ break;
+ case FCmpInst::FCMP_OGE:
+ Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
+ break;
+ case FCmpInst::FCMP_OLT:
+ Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
+ break;
+ case FCmpInst::FCMP_OLE:
+ Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
+ break;
+ case FCmpInst::FCMP_UGT:
+ Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_UGE:
+ Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_ULT:
+ Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_ULE:
+ Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
+ Not = true;
+ break;
+ default:
+ return false;
+ }
+
+ unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(LHS)
+ .addReg(RHS);
+
+ if (Not)
+ ResultReg = notValue(ResultReg);
+
+ updateValueMap(FCmp, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
+ // Target-independent code can handle this, except it doesn't set the dead
+ // flag on the ARGUMENTS clobber, so we have to do that manually in order
+ // to satisfy code that expects this of isBitcast() instructions.
+ EVT VT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT RetVT = TLI.getValueType(DL, I->getType());
+ if (!VT.isSimple() || !RetVT.isSimple())
+ return false;
+
+ if (VT == RetVT) {
+ // No-op bitcast.
+ updateValueMap(I, getRegForValue(I->getOperand(0)));
+ return true;
+ }
+
+ unsigned Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(),
+ getRegForValue(I->getOperand(0)),
+ I->getOperand(0)->hasOneUse());
+ if (!Reg)
+ return false;
+ MachineBasicBlock::iterator Iter = FuncInfo.InsertPt;
+ --Iter;
+ assert(Iter->isBitcast());
+ Iter->setPhysRegsDeadExcept(ArrayRef<unsigned>(), TRI);
+ updateValueMap(I, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
+ const LoadInst *Load = cast<LoadInst>(I);
+ if (Load->isAtomic())
+ return false;
+
+ Address Addr;
+ if (!computeAddress(Load->getPointerOperand(), Addr))
+ return false;
+
+ // TODO: Fold a following sign-/zero-extend into the load instruction.
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(Load->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ Opc = WebAssembly::LOAD8_U_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i16:
+ Opc = WebAssembly::LOAD16_U_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i32:
+ Opc = WebAssembly::LOAD_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::LOAD_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::LOAD_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::LOAD_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+
+ materializeLoadStoreOperands(Addr);
+
+ unsigned ResultReg = createResultReg(RC);
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg);
+
+ addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Load));
+
+ updateValueMap(Load, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectStore(const Instruction *I) {
+ const StoreInst *Store = cast<StoreInst>(I);
+ if (Store->isAtomic())
+ return false;
+
+ Address Addr;
+ if (!computeAddress(Store->getPointerOperand(), Addr))
+ return false;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ bool VTIsi1 = false;
+ switch (getSimpleType(Store->getValueOperand()->getType())) {
+ case MVT::i1:
+ VTIsi1 = true;
+ case MVT::i8:
+ Opc = WebAssembly::STORE8_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i16:
+ Opc = WebAssembly::STORE16_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i32:
+ Opc = WebAssembly::STORE_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::STORE_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::STORE_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::STORE_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default: return false;
+ }
+
+ materializeLoadStoreOperands(Addr);
+
+ unsigned ValueReg = getRegForValue(Store->getValueOperand());
+ if (VTIsi1)
+ ValueReg = maskI1Value(ValueReg, Store->getValueOperand());
+
+ unsigned ResultReg = createResultReg(RC);
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg);
+
+ addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Store));
+
+ MIB.addReg(ValueReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectBr(const Instruction *I) {
+ const BranchInst *Br = cast<BranchInst>(I);
+ if (Br->isUnconditional()) {
+ MachineBasicBlock *MSucc = FuncInfo.MBBMap[Br->getSuccessor(0)];
+ fastEmitBranch(MSucc, Br->getDebugLoc());
+ return true;
+ }
+
+ MachineBasicBlock *TBB = FuncInfo.MBBMap[Br->getSuccessor(0)];
+ MachineBasicBlock *FBB = FuncInfo.MBBMap[Br->getSuccessor(1)];
+
+ bool Not;
+ unsigned CondReg = getRegForI1Value(Br->getCondition(), Not);
+
+ unsigned Opc = WebAssembly::BR_IF;
+ if (Not)
+ Opc = WebAssembly::BR_UNLESS;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
+ .addMBB(TBB)
+ .addReg(CondReg);
+
+ finishCondBranch(Br->getParent(), TBB, FBB);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectRet(const Instruction *I) {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+
+ if (Ret->getNumOperands() == 0) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::RETURN_VOID));
+ return true;
+ }
+
+ Value *RV = Ret->getOperand(0);
+ unsigned Opc;
+ switch (getSimpleType(RV->getType())) {
+ case MVT::i1: case MVT::i8:
+ case MVT::i16: case MVT::i32:
+ Opc = WebAssembly::RETURN_I32;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::RETURN_I64;
+ break;
+ case MVT::f32: Opc = WebAssembly::RETURN_F32; break;
+ case MVT::f64: Opc = WebAssembly::RETURN_F64; break;
+ default: return false;
+ }
+
+ unsigned Reg;
+ if (FuncInfo.Fn->getAttributes().hasAttribute(0, Attribute::SExt))
+ Reg = getRegForSignedValue(RV);
+ else if (FuncInfo.Fn->getAttributes().hasAttribute(0, Attribute::ZExt))
+ Reg = getRegForUnsignedValue(RV);
+ else
+ Reg = getRegForValue(RV);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)).addReg(Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectUnreachable(const Instruction *I) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::UNREACHABLE));
+ return true;
+}
+
bool WebAssemblyFastISel::fastSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
- default:
+ case Instruction::Call:
+ if (selectCall(I))
+ return true;
break;
- // TODO: add fast-isel selection cases here...
+ case Instruction::Select: return selectSelect(I);
+ case Instruction::Trunc: return selectTrunc(I);
+ case Instruction::ZExt: return selectZExt(I);
+ case Instruction::SExt: return selectSExt(I);
+ case Instruction::ICmp: return selectICmp(I);
+ case Instruction::FCmp: return selectFCmp(I);
+ case Instruction::BitCast: return selectBitCast(I);
+ case Instruction::Load: return selectLoad(I);
+ case Instruction::Store: return selectStore(I);
+ case Instruction::Br: return selectBr(I);
+ case Instruction::Ret: return selectRet(I);
+ case Instruction::Unreachable: return selectUnreachable(I);
+ default: break;
}
// Fall back to target-independent instruction selection.
diff --git a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
new file mode 100644
index 000000000000..5dc90920e310
--- /dev/null
+++ b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
@@ -0,0 +1,296 @@
+//=- WebAssemblyFixIrreducibleControlFlow.cpp - Fix irreducible control flow -//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a pass that transforms irreducible control flow
+/// into reducible control flow. Irreducible control flow means multiple-entry
+/// loops; they appear as CFG cycles that are not recorded in MachineLoopInfo
+/// due to being unnatural.
+///
+/// Note that LLVM has a generic pass that lowers irreducible control flow, but
+/// it linearizes control flow, turning diamonds into two triangles, which is
+/// both unnecessary and undesirable for WebAssembly.
+///
+/// TODO: The transformation implemented here handles all irreducible control
+/// flow, without exponential code-size expansion, though it does so by creating
+/// inefficient code in many cases. Ideally, we should add other
+/// transformations, including code-duplicating cases, which can be more
+/// efficient in common cases, and they can fall back to this conservative
+/// implementation as needed.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/ADT/PriorityQueue.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-fix-irreducible-control-flow"
+
+namespace {
+class WebAssemblyFixIrreducibleControlFlow final : public MachineFunctionPass {
+ const char *getPassName() const override {
+ return "WebAssembly Fix Irreducible Control Flow";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ bool VisitLoop(MachineFunction &MF, MachineLoopInfo &MLI, MachineLoop *Loop);
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyFixIrreducibleControlFlow() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyFixIrreducibleControlFlow::ID = 0;
+FunctionPass *llvm::createWebAssemblyFixIrreducibleControlFlow() {
+ return new WebAssemblyFixIrreducibleControlFlow();
+}
+
+namespace {
+
+/// A utility for walking the blocks of a loop, handling a nested inner
+/// loop as a monolithic conceptual block.
+class MetaBlock {
+ MachineBasicBlock *Block;
+ SmallVector<MachineBasicBlock *, 2> Preds;
+ SmallVector<MachineBasicBlock *, 2> Succs;
+
+public:
+ explicit MetaBlock(MachineBasicBlock *MBB)
+ : Block(MBB), Preds(MBB->pred_begin(), MBB->pred_end()),
+ Succs(MBB->succ_begin(), MBB->succ_end()) {}
+
+ explicit MetaBlock(MachineLoop *Loop) : Block(Loop->getHeader()) {
+ Loop->getExitBlocks(Succs);
+ for (MachineBasicBlock *Pred : Block->predecessors())
+ if (!Loop->contains(Pred))
+ Preds.push_back(Pred);
+ }
+
+ MachineBasicBlock *getBlock() const { return Block; }
+
+ const SmallVectorImpl<MachineBasicBlock *> &predecessors() const {
+ return Preds;
+ }
+ const SmallVectorImpl<MachineBasicBlock *> &successors() const {
+ return Succs;
+ }
+
+ bool operator==(const MetaBlock &MBB) { return Block == MBB.Block; }
+ bool operator!=(const MetaBlock &MBB) { return Block != MBB.Block; }
+};
+
+class SuccessorList final : public MetaBlock {
+ size_t Index;
+ size_t Num;
+
+public:
+ explicit SuccessorList(MachineBasicBlock *MBB)
+ : MetaBlock(MBB), Index(0), Num(successors().size()) {}
+
+ explicit SuccessorList(MachineLoop *Loop)
+ : MetaBlock(Loop), Index(0), Num(successors().size()) {}
+
+ bool HasNext() const { return Index != Num; }
+
+ MachineBasicBlock *Next() {
+ assert(HasNext());
+ return successors()[Index++];
+ }
+};
+
+} // end anonymous namespace
+
+bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF,
+ MachineLoopInfo &MLI,
+ MachineLoop *Loop) {
+ MachineBasicBlock *Header = Loop ? Loop->getHeader() : &*MF.begin();
+ SetVector<MachineBasicBlock *> RewriteSuccs;
+
+ // DFS through Loop's body, looking for for irreducible control flow. Loop is
+ // natural, and we stay in its body, and we treat any nested loops
+ // monolithically, so any cycles we encounter indicate irreducibility.
+ SmallPtrSet<MachineBasicBlock *, 8> OnStack;
+ SmallPtrSet<MachineBasicBlock *, 8> Visited;
+ SmallVector<SuccessorList, 4> LoopWorklist;
+ LoopWorklist.push_back(SuccessorList(Header));
+ OnStack.insert(Header);
+ Visited.insert(Header);
+ while (!LoopWorklist.empty()) {
+ SuccessorList &Top = LoopWorklist.back();
+ if (Top.HasNext()) {
+ MachineBasicBlock *Next = Top.Next();
+ if (Next == Header || (Loop && !Loop->contains(Next)))
+ continue;
+ if (LLVM_LIKELY(OnStack.insert(Next).second)) {
+ if (!Visited.insert(Next).second) {
+ OnStack.erase(Next);
+ continue;
+ }
+ MachineLoop *InnerLoop = MLI.getLoopFor(Next);
+ if (InnerLoop != Loop)
+ LoopWorklist.push_back(SuccessorList(InnerLoop));
+ else
+ LoopWorklist.push_back(SuccessorList(Next));
+ } else {
+ RewriteSuccs.insert(Top.getBlock());
+ }
+ continue;
+ }
+ OnStack.erase(Top.getBlock());
+ LoopWorklist.pop_back();
+ }
+
+ // Most likely, we didn't find any irreducible control flow.
+ if (LLVM_LIKELY(RewriteSuccs.empty()))
+ return false;
+
+ DEBUG(dbgs() << "Irreducible control flow detected!\n");
+
+ // Ok. We have irreducible control flow! Create a dispatch block which will
+ // contains a jump table to any block in the problematic set of blocks.
+ MachineBasicBlock *Dispatch = MF.CreateMachineBasicBlock();
+ MF.insert(MF.end(), Dispatch);
+ MLI.changeLoopFor(Dispatch, Loop);
+
+ // Add the jump table.
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ MachineInstrBuilder MIB = BuildMI(*Dispatch, Dispatch->end(), DebugLoc(),
+ TII.get(WebAssembly::BR_TABLE_I32));
+
+ // Add the register which will be used to tell the jump table which block to
+ // jump to.
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ unsigned Reg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ MIB.addReg(Reg);
+
+ // Collect all the blocks which need to have their successors rewritten,
+ // add the successors to the jump table, and remember their index.
+ DenseMap<MachineBasicBlock *, unsigned> Indices;
+ SmallVector<MachineBasicBlock *, 4> SuccWorklist(RewriteSuccs.begin(),
+ RewriteSuccs.end());
+ while (!SuccWorklist.empty()) {
+ MachineBasicBlock *MBB = SuccWorklist.pop_back_val();
+ auto Pair = Indices.insert(std::make_pair(MBB, 0));
+ if (!Pair.second)
+ continue;
+
+ unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1;
+ DEBUG(dbgs() << "MBB#" << MBB->getNumber() << " has index " << Index
+ << "\n");
+
+ Pair.first->second = Index;
+ for (auto Pred : MBB->predecessors())
+ RewriteSuccs.insert(Pred);
+
+ MIB.addMBB(MBB);
+ Dispatch->addSuccessor(MBB);
+
+ MetaBlock Meta(MBB);
+ for (auto *Succ : Meta.successors())
+ if (Succ != Header && (!Loop || Loop->contains(Succ)))
+ SuccWorklist.push_back(Succ);
+ }
+
+ // Rewrite the problematic successors for every block in RewriteSuccs.
+ // For simplicity, we just introduce a new block for every edge we need to
+ // rewrite. Fancier things are possible.
+ for (MachineBasicBlock *MBB : RewriteSuccs) {
+ DenseMap<MachineBasicBlock *, MachineBasicBlock *> Map;
+ for (auto *Succ : MBB->successors()) {
+ if (!Indices.count(Succ))
+ continue;
+
+ MachineBasicBlock *Split = MF.CreateMachineBasicBlock();
+ MF.insert(MBB->isLayoutSuccessor(Succ) ? MachineFunction::iterator(Succ)
+ : MF.end(),
+ Split);
+ MLI.changeLoopFor(Split, Loop);
+
+ // Set the jump table's register of the index of the block we wish to
+ // jump to, and jump to the jump table.
+ BuildMI(*Split, Split->end(), DebugLoc(), TII.get(WebAssembly::CONST_I32),
+ Reg)
+ .addImm(Indices[Succ]);
+ BuildMI(*Split, Split->end(), DebugLoc(), TII.get(WebAssembly::BR))
+ .addMBB(Dispatch);
+ Split->addSuccessor(Dispatch);
+ Map[Succ] = Split;
+ }
+ // Remap the terminator operands and the successor list.
+ for (MachineInstr &Term : MBB->terminators())
+ for (auto &Op : Term.explicit_uses())
+ if (Op.isMBB() && Indices.count(Op.getMBB()))
+ Op.setMBB(Map[Op.getMBB()]);
+ for (auto Rewrite : Map)
+ MBB->replaceSuccessor(Rewrite.first, Rewrite.second);
+ }
+
+ // Create a fake default label, because br_table requires one.
+ MIB.addMBB(MIB.getInstr()
+ ->getOperand(MIB.getInstr()->getNumExplicitOperands() - 1)
+ .getMBB());
+
+ return true;
+}
+
+bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction(
+ MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ bool Changed = false;
+ auto &MLI = getAnalysis<MachineLoopInfo>();
+
+ // Visit the function body, which is identified as a null loop.
+ Changed |= VisitLoop(MF, MLI, nullptr);
+
+ // Visit all the loops.
+ SmallVector<MachineLoop *, 8> Worklist(MLI.begin(), MLI.end());
+ while (!Worklist.empty()) {
+ MachineLoop *CurLoop = Worklist.pop_back_val();
+ Worklist.append(CurLoop->begin(), CurLoop->end());
+ Changed |= VisitLoop(MF, MLI, CurLoop);
+ }
+
+ // If we made any changes, completely recompute everything.
+ if (LLVM_UNLIKELY(Changed)) {
+ DEBUG(dbgs() << "Recomputing dominators and loops.\n");
+ MF.getRegInfo().invalidateLiveness();
+ MF.RenumberBlocks();
+ getAnalysis<MachineDominatorTree>().runOnMachineFunction(MF);
+ MLI.runOnMachineFunction(MF);
+ }
+
+ return Changed;
+}
diff --git a/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
index 0eefd57f1f2c..0a5782e5c287 100644
--- a/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
@@ -34,10 +34,7 @@ using namespace llvm;
#define DEBUG_TYPE "wasm-frame-info"
-// TODO: Implement a red zone?
// TODO: wasm64
-// TODO: Prolog/epilog should be stackified too. This pass runs after register
-// stackification, so we'll have to do it manually.
// TODO: Emit TargetOpcode::CFI_INSTRUCTION instructions
/// Return true if the specified function should have a dedicated frame pointer
@@ -46,7 +43,7 @@ bool WebAssemblyFrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const auto *RegInfo =
MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
- return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() ||
+ return MFI->isFrameAddressTaken() || MFI->hasVarSizedObjects() ||
MFI->hasStackMap() || MFI->hasPatchPoint() ||
RegInfo->needsStackRealignment(MF);
}
@@ -62,63 +59,64 @@ bool WebAssemblyFrameLowering::hasReservedCallFrame(
}
-/// Adjust the stack pointer by a constant amount.
-static void adjustStackPointer(unsigned StackSize,
- bool AdjustUp,
- MachineFunction& MF,
- MachineBasicBlock& MBB,
- const TargetInstrInfo* TII,
- MachineBasicBlock::iterator InsertPt,
- const DebugLoc& DL) {
- auto &MRI = MF.getRegInfo();
- unsigned SPReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
- auto *SPSymbol = MF.createExternalSymbolName("__stack_pointer");
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), SPReg)
- .addExternalSymbol(SPSymbol);
- // This MachinePointerInfo should reference __stack_pointer as well but
- // doesn't because MachinePointerInfo() takes a GV which we don't have for
- // __stack_pointer. TODO: check if PseudoSourceValue::ExternalSymbolCallEntry
- // is appropriate instead. (likewise for EmitEpologue below)
- auto *LoadMMO = new MachineMemOperand(MachinePointerInfo(),
- MachineMemOperand::MOLoad, 4, 4);
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::LOAD_I32), SPReg)
- .addImm(0)
- .addReg(SPReg)
- .addMemOperand(LoadMMO);
- // Add/Subtract the frame size
- unsigned OffsetReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
- .addImm(StackSize);
- BuildMI(MBB, InsertPt, DL,
- TII->get(AdjustUp ? WebAssembly::ADD_I32 : WebAssembly::SUB_I32),
- WebAssembly::SP32)
- .addReg(SPReg)
- .addReg(OffsetReg);
- // The SP32 register now has the new stacktop. Also write it back to memory.
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
- .addExternalSymbol(SPSymbol);
- auto *MMO = new MachineMemOperand(MachinePointerInfo(),
+/// Returns true if this function needs a local user-space stack pointer.
+/// Unlike a machine stack pointer, the wasm user stack pointer is a global
+/// variable, so it is loaded into a register in the prolog.
+bool WebAssemblyFrameLowering::needsSP(const MachineFunction &MF,
+ const MachineFrameInfo &MFI) const {
+ return MFI.getStackSize() || MFI.adjustsStack() || hasFP(MF);
+}
+
+/// Returns true if the local user-space stack pointer needs to be written back
+/// to memory by this function (this is not meaningful if needsSP is false). If
+/// false, the stack red zone can be used and only a local SP is needed.
+bool WebAssemblyFrameLowering::needsSPWriteback(
+ const MachineFunction &MF, const MachineFrameInfo &MFI) const {
+ assert(needsSP(MF, MFI));
+ return MFI.getStackSize() > RedZoneSize || MFI.hasCalls() ||
+ MF.getFunction()->hasFnAttribute(Attribute::NoRedZone);
+}
+
+static void writeSPToMemory(unsigned SrcReg, MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &InsertAddr,
+ MachineBasicBlock::iterator &InsertStore,
+ const DebugLoc &DL) {
+ const char *ES = "__stack_pointer";
+ auto *SPSymbol = MF.createExternalSymbolName(ES);
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned Zero = MRI.createVirtualRegister(PtrRC);
+ unsigned Drop = MRI.createVirtualRegister(PtrRC);
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ BuildMI(MBB, InsertAddr, DL, TII->get(WebAssembly::CONST_I32), Zero)
+ .addImm(0);
+ auto *MMO = new MachineMemOperand(MachinePointerInfo(MF.getPSVManager()
+ .getExternalSymbolCallEntry(ES)),
MachineMemOperand::MOStore, 4, 4);
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::STORE_I32), WebAssembly::SP32)
- .addImm(0)
- .addReg(OffsetReg)
- .addReg(WebAssembly::SP32)
+ BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::STORE_I32), Drop)
+ .addExternalSymbol(SPSymbol)
+ .addReg(Zero)
+ .addImm(2) // p2align
+ .addReg(SrcReg)
.addMemOperand(MMO);
}
-void WebAssemblyFrameLowering::eliminateCallFramePseudoInstr(
+MachineBasicBlock::iterator
+WebAssemblyFrameLowering::eliminateCallFramePseudoInstr(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const auto *TII =
- static_cast<const WebAssemblyInstrInfo*>(MF.getSubtarget().getInstrInfo());
- DebugLoc DL = I->getDebugLoc();
- unsigned Opc = I->getOpcode();
- bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
- unsigned Amount = I->getOperand(0).getImm();
- if (Amount)
- adjustStackPointer(Amount, IsDestroy, MF, MBB,
- TII, I, DL);
- MBB.erase(I);
+ assert(!I->getOperand(0).getImm() && hasFP(MF) &&
+ "Call frame pseudos should only be used for dynamic stack adjustment");
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ if (I->getOpcode() == TII->getCallFrameDestroyOpcode() &&
+ needsSPWriteback(MF, *MF.getFrameInfo())) {
+ DebugLoc DL = I->getDebugLoc();
+ writeSPToMemory(WebAssembly::SP32, MF, MBB, I, I, DL);
+ }
+ return MBB.erase(I);
}
void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF,
@@ -127,49 +125,91 @@ void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF,
auto *MFI = MF.getFrameInfo();
assert(MFI->getCalleeSavedInfo().empty() &&
"WebAssembly should not have callee-saved registers");
- assert(!hasFP(MF) && "Functions needing frame pointers not yet supported");
+
+ if (!needsSP(MF, *MFI)) return;
uint64_t StackSize = MFI->getStackSize();
- if (!StackSize && (!MFI->adjustsStack() || MFI->getMaxCallFrameSize() == 0))
- return;
- const auto *TII = MF.getSubtarget().getInstrInfo();
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ auto &MRI = MF.getRegInfo();
auto InsertPt = MBB.begin();
DebugLoc DL;
- adjustStackPointer(StackSize, false, MF, MBB, TII, InsertPt, DL);
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned Zero = MRI.createVirtualRegister(PtrRC);
+ unsigned SPReg = MRI.createVirtualRegister(PtrRC);
+ const char *ES = "__stack_pointer";
+ auto *SPSymbol = MF.createExternalSymbolName(ES);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), Zero)
+ .addImm(0);
+ auto *LoadMMO = new MachineMemOperand(MachinePointerInfo(MF.getPSVManager()
+ .getExternalSymbolCallEntry(ES)),
+ MachineMemOperand::MOLoad, 4, 4);
+ // Load the SP value.
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::LOAD_I32),
+ StackSize ? SPReg : (unsigned)WebAssembly::SP32)
+ .addExternalSymbol(SPSymbol)
+ .addReg(Zero) // addr
+ .addImm(2) // p2align
+ .addMemOperand(LoadMMO);
+
+ if (StackSize) {
+ // Subtract the frame size
+ unsigned OffsetReg = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
+ .addImm(StackSize);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::SUB_I32),
+ WebAssembly::SP32)
+ .addReg(SPReg)
+ .addReg(OffsetReg);
+ }
+ if (hasFP(MF)) {
+ // Unlike most conventional targets (where FP points to the saved FP),
+ // FP points to the bottom of the fixed-size locals, so we can use positive
+ // offsets in load/store instructions.
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY),
+ WebAssembly::FP32)
+ .addReg(WebAssembly::SP32);
+ }
+ if (StackSize && needsSPWriteback(MF, *MFI)) {
+ writeSPToMemory(WebAssembly::SP32, MF, MBB, InsertPt, InsertPt, DL);
+ }
}
void WebAssemblyFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- uint64_t StackSize = MF.getFrameInfo()->getStackSize();
- if (!StackSize)
- return;
- const auto *TII = MF.getSubtarget().getInstrInfo();
+ auto *MFI = MF.getFrameInfo();
+ uint64_t StackSize = MFI->getStackSize();
+ if (!needsSP(MF, *MFI) || !needsSPWriteback(MF, *MFI)) return;
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
auto &MRI = MF.getRegInfo();
- unsigned OffsetReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
auto InsertPt = MBB.getFirstTerminator();
DebugLoc DL;
- if (InsertPt != MBB.end()) {
+ if (InsertPt != MBB.end())
DL = InsertPt->getDebugLoc();
+
+ // Restore the stack pointer. If we had fixed-size locals, add the offset
+ // subtracted in the prolog.
+ unsigned SPReg = 0;
+ MachineBasicBlock::iterator InsertAddr = InsertPt;
+ if (StackSize) {
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned OffsetReg = MRI.createVirtualRegister(PtrRC);
+ InsertAddr =
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
+ .addImm(StackSize);
+ // In the epilog we don't need to write the result back to the SP32 physreg
+ // because it won't be used again. We can use a stackified register instead.
+ SPReg = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::ADD_I32), SPReg)
+ .addReg(hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32)
+ .addReg(OffsetReg);
+ } else {
+ SPReg = hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32;
}
- // Restore the stack pointer. Without FP its value is just SP32 - stacksize
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
- .addImm(StackSize);
- auto *SPSymbol = MF.createExternalSymbolName("__stack_pointer");
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::ADD_I32), WebAssembly::SP32)
- .addReg(WebAssembly::SP32)
- .addReg(OffsetReg);
- // Re-use OffsetReg to hold the address of the stacktop
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
- .addExternalSymbol(SPSymbol);
- auto *MMO = new MachineMemOperand(MachinePointerInfo(),
- MachineMemOperand::MOStore, 4, 4);
- BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::STORE_I32), WebAssembly::SP32)
- .addImm(0)
- .addReg(OffsetReg)
- .addReg(WebAssembly::SP32)
- .addMemOperand(MMO);
+ writeSPToMemory(SPReg, MF, MBB, InsertAddr, InsertPt, DL);
}
diff --git a/lib/Target/WebAssembly/WebAssemblyFrameLowering.h b/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
index 5f4708fe77ed..e20fc5df7443 100644
--- a/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
+++ b/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
@@ -19,18 +19,24 @@
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
+class MachineFrameInfo;
class WebAssemblyFrameLowering final : public TargetFrameLowering {
-public:
+ public:
+ /// Size of the red zone for the user stack (leaf functions can use this much
+ /// space below the stack pointer without writing it back to memory).
+ // TODO: (ABI) Revisit and decide how large it should be.
+ static const size_t RedZoneSize = 128;
+
WebAssemblyFrameLowering()
: TargetFrameLowering(StackGrowsDown, /*StackAlignment=*/16,
/*LocalAreaOffset=*/0,
/*TransientStackAlignment=*/16,
/*StackRealignable=*/true) {}
- void
- eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const override;
+ MachineBasicBlock::iterator eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
/// These methods insert prolog and epilog code into the function.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
@@ -38,8 +44,13 @@ public:
bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
+
+ private:
+ bool needsSP(const MachineFunction &MF, const MachineFrameInfo &MFI) const;
+ bool needsSPWriteback(const MachineFunction &MF,
+ const MachineFrameInfo &MFI) const;
};
-} // end namespace llvm
+} // end namespace llvm
#endif
diff --git a/lib/Target/WebAssembly/WebAssemblyISD.def b/lib/Target/WebAssembly/WebAssemblyISD.def
index 3a03fa55b220..2f0f106ef5b7 100644
--- a/lib/Target/WebAssembly/WebAssemblyISD.def
+++ b/lib/Target/WebAssembly/WebAssemblyISD.def
@@ -20,6 +20,6 @@ HANDLE_NODETYPE(RETURN)
HANDLE_NODETYPE(ARGUMENT)
HANDLE_NODETYPE(Wrapper)
HANDLE_NODETYPE(BR_IF)
-HANDLE_NODETYPE(TABLESWITCH)
+HANDLE_NODETYPE(BR_TABLE)
// add memory opcodes starting at ISD::FIRST_TARGET_MEMORY_OPCODE here...
diff --git a/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
index 8390f797c43e..88c38b3602b9 100644
--- a/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
@@ -54,7 +54,7 @@ public:
return SelectionDAGISel::runOnMachineFunction(MF);
}
- SDNode *Select(SDNode *Node) override;
+ void Select(SDNode *Node) override;
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
std::vector<SDValue> &OutOps) override;
@@ -67,7 +67,7 @@ private:
};
} // end anonymous namespace
-SDNode *WebAssemblyDAGToDAGISel::Select(SDNode *Node) {
+void WebAssemblyDAGToDAGISel::Select(SDNode *Node) {
// Dump information about the Node being selected.
DEBUG(errs() << "Selecting: ");
DEBUG(Node->dump(CurDAG));
@@ -77,11 +77,10 @@ SDNode *WebAssemblyDAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return nullptr;
+ return;
}
// Few custom selection stuff.
- SDNode *ResNode = nullptr;
EVT VT = Node->getValueType(0);
switch (Node->getOpcode()) {
@@ -92,16 +91,7 @@ SDNode *WebAssemblyDAGToDAGISel::Select(SDNode *Node) {
}
// Select the default instruction.
- ResNode = SelectCode(Node);
-
- DEBUG(errs() << "=> ");
- if (ResNode == nullptr || ResNode == Node)
- DEBUG(Node->dump(CurDAG));
- else
- DEBUG(ResNode->dump(CurDAG));
- DEBUG(errs() << "\n");
-
- return ResNode;
+ SelectCode(Node);
}
bool WebAssemblyDAGToDAGISel::SelectInlineAsmMemoryOperand(
diff --git a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index e9933b092988..9e7731997d58 100644
--- a/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -26,7 +26,6 @@
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -35,61 +34,6 @@ using namespace llvm;
#define DEBUG_TYPE "wasm-lower"
-namespace {
-// Diagnostic information for unimplemented or unsupported feature reporting.
-// TODO: This code is copied from BPF and AMDGPU; consider factoring it out
-// and sharing code.
-class DiagnosticInfoUnsupported final : public DiagnosticInfo {
-private:
- // Debug location where this diagnostic is triggered.
- DebugLoc DLoc;
- const Twine &Description;
- const Function &Fn;
- SDValue Value;
-
- static int KindID;
-
- static int getKindID() {
- if (KindID == 0)
- KindID = llvm::getNextAvailablePluginDiagnosticKind();
- return KindID;
- }
-
-public:
- DiagnosticInfoUnsupported(SDLoc DLoc, const Function &Fn, const Twine &Desc,
- SDValue Value)
- : DiagnosticInfo(getKindID(), DS_Error), DLoc(DLoc.getDebugLoc()),
- Description(Desc), Fn(Fn), Value(Value) {}
-
- void print(DiagnosticPrinter &DP) const override {
- std::string Str;
- raw_string_ostream OS(Str);
-
- if (DLoc) {
- auto DIL = DLoc.get();
- StringRef Filename = DIL->getFilename();
- unsigned Line = DIL->getLine();
- unsigned Column = DIL->getColumn();
- OS << Filename << ':' << Line << ':' << Column << ' ';
- }
-
- OS << "in function " << Fn.getName() << ' ' << *Fn.getFunctionType() << '\n'
- << Description;
- if (Value)
- Value->print(OS);
- OS << '\n';
- OS.flush();
- DP << Str;
- }
-
- static bool classof(const DiagnosticInfo *DI) {
- return DI->getKind() == getKindID();
- }
-};
-
-int DiagnosticInfoUnsupported::KindID = 0;
-} // end anonymous namespace
-
WebAssemblyTargetLowering::WebAssemblyTargetLowering(
const TargetMachine &TM, const WebAssemblySubtarget &STI)
: TargetLowering(TM), Subtarget(&STI) {
@@ -116,6 +60,8 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
setOperationAction(ISD::JumpTable, MVTPtr, Custom);
+ setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
+ setOperationAction(ISD::BRIND, MVT::Other, Custom);
// Take the default expansion for va_arg, va_copy, and va_end. There is no
// default action for va_start, so we do that custom.
@@ -148,7 +94,7 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
for (auto T : {MVT::i32, MVT::i64}) {
// Expand unavailable integer operations.
for (auto Op :
- {ISD::BSWAP, ISD::ROTL, ISD::ROTR, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
+ {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
ISD::MULHS, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS,
ISD::SRA_PARTS, ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC,
ISD::SUBE}) {
@@ -167,6 +113,7 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
+ setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
// Expand these forms; we pattern-match the forms that we can handle in isel.
for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
@@ -204,13 +151,14 @@ bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
EVT VT) const {
unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
- if (BitWidth > 1 && BitWidth < 8)
- BitWidth = 8;
+ if (BitWidth > 1 && BitWidth < 8) BitWidth = 8;
if (BitWidth > 64) {
- BitWidth = 64;
+ // The shift will be lowered to a libcall, and compiler-rt libcalls expect
+ // the count to be an i32.
+ BitWidth = 32;
assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
- "64-bit shift counts ought to be enough for anyone");
+ "32-bit shift counts ought to be enough for anyone");
}
MVT Result = MVT::getIntegerVT(BitWidth);
@@ -219,13 +167,13 @@ MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
return Result;
}
-const char *
-WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
+const char *WebAssemblyTargetLowering::getTargetNodeName(
+ unsigned Opcode) const {
switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
- case WebAssemblyISD::FIRST_NUMBER:
- break;
-#define HANDLE_NODETYPE(NODE) \
- case WebAssemblyISD::NODE: \
+ case WebAssemblyISD::FIRST_NUMBER:
+ break;
+#define HANDLE_NODETYPE(NODE) \
+ case WebAssemblyISD::NODE: \
return "WebAssemblyISD::" #NODE;
#include "WebAssemblyISD.def"
#undef HANDLE_NODETYPE
@@ -240,17 +188,17 @@ WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
// WebAssembly register class.
if (Constraint.size() == 1) {
switch (Constraint[0]) {
- case 'r':
- assert(VT != MVT::iPTR && "Pointer MVT not expected here");
- if (VT.isInteger() && !VT.isVector()) {
- if (VT.getSizeInBits() <= 32)
- return std::make_pair(0U, &WebAssembly::I32RegClass);
- if (VT.getSizeInBits() <= 64)
- return std::make_pair(0U, &WebAssembly::I64RegClass);
- }
- break;
- default:
- break;
+ case 'r':
+ assert(VT != MVT::iPTR && "Pointer MVT not expected here");
+ if (VT.isInteger() && !VT.isVector()) {
+ if (VT.getSizeInBits() <= 32)
+ return std::make_pair(0U, &WebAssembly::I32RegClass);
+ if (VT.getSizeInBits() <= 64)
+ return std::make_pair(0U, &WebAssembly::I64RegClass);
+ }
+ break;
+ default:
+ break;
}
}
@@ -274,17 +222,33 @@ bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
// WebAssembly offsets are added as unsigned without wrapping. The
// isLegalAddressingMode gives us no way to determine if wrapping could be
// happening, so we approximate this by accepting only non-negative offsets.
- if (AM.BaseOffs < 0)
- return false;
+ if (AM.BaseOffs < 0) return false;
// WebAssembly has no scale register operands.
- if (AM.Scale != 0)
- return false;
+ if (AM.Scale != 0) return false;
// Everything else is legal.
return true;
}
+bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
+ EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const {
+ // WebAssembly supports unaligned accesses, though it should be declared
+ // with the p2align attribute on loads and stores which do so, and there
+ // may be a performance impact. We tell LLVM they're "fast" because
+ // for the kinds of things that LLVM uses this for (merging adjacent stores
+ // of constants, etc.), WebAssembly implementations will either want the
+ // unaligned access or they'll split anyway.
+ if (Fast) *Fast = true;
+ return true;
+}
+
+bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
+ // The current thinking is that wasm engines will perform this optimization,
+ // so we can save on code size.
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// WebAssembly Lowering private implementation.
//===----------------------------------------------------------------------===//
@@ -293,10 +257,10 @@ bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
// Lowering Code
//===----------------------------------------------------------------------===//
-static void fail(SDLoc DL, SelectionDAG &DAG, const char *msg) {
+static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
- DiagnosticInfoUnsupported(DL, *MF.getFunction(), msg, SDValue()));
+ DiagnosticInfoUnsupported(*MF.getFunction(), msg, DL.getDebugLoc()));
}
// Test whether the given calling convention is supported.
@@ -312,14 +276,14 @@ static bool CallingConvSupported(CallingConv::ID CallConv) {
CallConv == CallingConv::CXX_FAST_TLS;
}
-SDValue
-WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue WebAssemblyTargetLowering::LowerCall(
+ CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc DL = CLI.DL;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
MachineFunction &MF = DAG.getMachineFunction();
+ auto Layout = MF.getDataLayout();
CallingConv::ID CallConv = CLI.CallConv;
if (!CallingConvSupported(CallConv))
@@ -337,16 +301,15 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
fail(DL, DAG, "WebAssembly doesn't support tail call yet");
CLI.IsTailCall = false;
- SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
-
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
if (Ins.size() > 1)
fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
- for (const ISD::OutputArg &Out : Outs) {
- if (Out.Flags.isByVal())
- fail(DL, DAG, "WebAssembly hasn't implemented byval arguments");
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ for (unsigned i = 0; i < Outs.size(); ++i) {
+ const ISD::OutputArg &Out = Outs[i];
+ SDValue &OutVal = OutVals[i];
if (Out.Flags.isNest())
fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
if (Out.Flags.isInAlloca())
@@ -355,28 +318,41 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
if (Out.Flags.isInConsecutiveRegsLast())
fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
+ if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
+ auto *MFI = MF.getFrameInfo();
+ int FI = MFI->CreateStackObject(Out.Flags.getByValSize(),
+ Out.Flags.getByValAlign(),
+ /*isSS=*/false);
+ SDValue SizeNode =
+ DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
+ SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
+ Chain = DAG.getMemcpy(
+ Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
+ /*isVolatile*/ false, /*AlwaysInline=*/false,
+ /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
+ OutVal = FINode;
+ }
}
bool IsVarArg = CLI.IsVarArg;
unsigned NumFixedArgs = CLI.NumFixedArgs;
- auto PtrVT = getPointerTy(MF.getDataLayout());
+
+ auto PtrVT = getPointerTy(Layout);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
if (IsVarArg) {
- // Outgoing non-fixed arguments are placed at the top of the stack. First
- // compute their offsets and the total amount of argument stack space
- // needed.
+ // Outgoing non-fixed arguments are placed in a buffer. First
+ // compute their offsets and the total amount of buffer space needed.
for (SDValue Arg :
make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
EVT VT = Arg.getValueType();
assert(VT != MVT::iPTR && "Legalized args should be concrete");
Type *Ty = VT.getTypeForEVT(*DAG.getContext());
- unsigned Offset =
- CCInfo.AllocateStack(MF.getDataLayout().getTypeAllocSize(Ty),
- MF.getDataLayout().getABITypeAlignment(Ty));
+ unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
+ Layout.getABITypeAlignment(Ty));
CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
Offset, VT.getSimpleVT(),
CCValAssign::Full));
@@ -385,17 +361,13 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
- SDValue NB;
- if (NumBytes) {
- NB = DAG.getConstant(NumBytes, DL, PtrVT, true);
- Chain = DAG.getCALLSEQ_START(Chain, NB, DL);
- }
-
- if (IsVarArg) {
+ SDValue FINode;
+ if (IsVarArg && NumBytes) {
// For non-fixed arguments, next emit stores to store the argument values
- // to the stack at the offsets computed above.
- SDValue SP = DAG.getCopyFromReg(
- Chain, DL, getStackPointerRegisterToSaveRestore(), PtrVT);
+ // to the stack buffer at the offsets computed above.
+ int FI = MF.getFrameInfo()->CreateStackObject(NumBytes,
+ Layout.getStackAlignment(),
+ /*isSS=*/false);
unsigned ValNo = 0;
SmallVector<SDValue, 8> Chains;
for (SDValue Arg :
@@ -403,14 +375,17 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
assert(ArgLocs[ValNo].getValNo() == ValNo &&
"ArgLocs should remain in order and only hold varargs args");
unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
- SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, SP,
+ FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
+ SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
DAG.getConstant(Offset, DL, PtrVT));
- Chains.push_back(DAG.getStore(Chain, DL, Arg, Add,
- MachinePointerInfo::getStack(MF, Offset),
- false, false, 0));
+ Chains.push_back(DAG.getStore(
+ Chain, DL, Arg, Add,
+ MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
}
if (!Chains.empty())
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
+ } else if (IsVarArg) {
+ FINode = DAG.getIntPtrConstant(0, DL);
}
// Compute the operands for the CALLn node.
@@ -422,8 +397,10 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
// isn't reliable.
Ops.append(OutVals.begin(),
IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
+ // Add a pointer to the vararg buffer.
+ if (IsVarArg) Ops.push_back(FINode);
- SmallVector<EVT, 8> Tys;
+ SmallVector<EVT, 8> InTys;
for (const auto &In : Ins) {
assert(!In.Flags.isByVal() && "byval is not valid for return values");
assert(!In.Flags.isNest() && "nest is not valid for return values");
@@ -436,13 +413,13 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
"WebAssembly hasn't implemented cons regs last return values");
// Ignore In.getOrigAlign() because all our arguments are passed in
// registers.
- Tys.push_back(In.VT);
+ InTys.push_back(In.VT);
}
- Tys.push_back(MVT::Other);
- SDVTList TyList = DAG.getVTList(Tys);
+ InTys.push_back(MVT::Other);
+ SDVTList InTyList = DAG.getVTList(InTys);
SDValue Res =
DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
- DL, TyList, Ops);
+ DL, InTyList, Ops);
if (Ins.empty()) {
Chain = Res;
} else {
@@ -450,11 +427,6 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = Res.getValue(1);
}
- if (NumBytes) {
- SDValue Unused = DAG.getTargetConstant(0, DL, PtrVT);
- Chain = DAG.getCALLSEQ_END(Chain, NB, Unused, SDValue(), DL);
- }
-
return Chain;
}
@@ -469,7 +441,7 @@ bool WebAssemblyTargetLowering::CanLowerReturn(
SDValue WebAssemblyTargetLowering::LowerReturn(
SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const {
assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
if (!CallingConvSupported(CallConv))
@@ -496,10 +468,11 @@ SDValue WebAssemblyTargetLowering::LowerReturn(
}
SDValue WebAssemblyTargetLowering::LowerFormalArguments(
- SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
+ auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
if (!CallingConvSupported(CallConv))
fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
@@ -509,8 +482,6 @@ SDValue WebAssemblyTargetLowering::LowerFormalArguments(
MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
for (const ISD::InputArg &In : Ins) {
- if (In.Flags.isByVal())
- fail(DL, DAG, "WebAssembly hasn't implemented byval arguments");
if (In.Flags.isInAlloca())
fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
if (In.Flags.isNest())
@@ -528,11 +499,22 @@ SDValue WebAssemblyTargetLowering::LowerFormalArguments(
: DAG.getUNDEF(In.VT));
// Record the number and types of arguments.
- MF.getInfo<WebAssemblyFunctionInfo>()->addParam(In.VT);
+ MFI->addParam(In.VT);
}
- // Incoming varargs arguments are on the stack and will be accessed through
- // va_arg, so we don't need to do anything for them here.
+ // Varargs are copied into a buffer allocated by the caller, and a pointer to
+ // the buffer is passed as an argument.
+ if (IsVarArg) {
+ MVT PtrVT = getPointerTy(MF.getDataLayout());
+ unsigned VarargVreg =
+ MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
+ MFI->setVarargBufferVreg(VarargVreg);
+ Chain = DAG.getCopyToReg(
+ Chain, DL, VarargVreg,
+ DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
+ DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
+ MFI->addParam(PtrVT);
+ }
return Chain;
}
@@ -543,31 +525,85 @@ SDValue WebAssemblyTargetLowering::LowerFormalArguments(
SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
+ SDLoc DL(Op);
switch (Op.getOpcode()) {
- default:
- llvm_unreachable("unimplemented operation lowering");
- return SDValue();
- case ISD::FrameIndex:
- return LowerFrameIndex(Op, DAG);
- case ISD::GlobalAddress:
- return LowerGlobalAddress(Op, DAG);
- case ISD::ExternalSymbol:
- return LowerExternalSymbol(Op, DAG);
- case ISD::JumpTable:
- return LowerJumpTable(Op, DAG);
- case ISD::BR_JT:
- return LowerBR_JT(Op, DAG);
- case ISD::VASTART:
- return LowerVASTART(Op, DAG);
+ default:
+ llvm_unreachable("unimplemented operation lowering");
+ return SDValue();
+ case ISD::FrameIndex:
+ return LowerFrameIndex(Op, DAG);
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::ExternalSymbol:
+ return LowerExternalSymbol(Op, DAG);
+ case ISD::JumpTable:
+ return LowerJumpTable(Op, DAG);
+ case ISD::BR_JT:
+ return LowerBR_JT(Op, DAG);
+ case ISD::VASTART:
+ return LowerVASTART(Op, DAG);
+ case ISD::BlockAddress:
+ case ISD::BRIND:
+ fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
+ return SDValue();
+ case ISD::RETURNADDR: // Probably nothing meaningful can be returned here.
+ fail(DL, DAG, "WebAssembly hasn't implemented __builtin_return_address");
+ return SDValue();
+ case ISD::FRAMEADDR:
+ return LowerFRAMEADDR(Op, DAG);
+ case ISD::CopyToReg:
+ return LowerCopyToReg(Op, DAG);
}
}
+SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Src = Op.getOperand(2);
+ if (isa<FrameIndexSDNode>(Src.getNode())) {
+ // CopyToReg nodes don't support FrameIndex operands. Other targets select
+ // the FI to some LEA-like instruction, but since we don't have that, we
+ // need to insert some kind of instruction that can take an FI operand and
+ // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
+ // copy_local between Op and its FI operand.
+ SDValue Chain = Op.getOperand(0);
+ SDLoc DL(Op);
+ unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
+ EVT VT = Src.getValueType();
+ SDValue Copy(
+ DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_LOCAL_I32
+ : WebAssembly::COPY_LOCAL_I64,
+ DL, VT, Src),
+ 0);
+ return Op.getNode()->getNumValues() == 1
+ ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
+ : DAG.getCopyToReg(Chain, DL, Reg, Copy, Op.getNumOperands() == 4
+ ? Op.getOperand(3)
+ : SDValue());
+ }
+ return SDValue();
+}
+
SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
SelectionDAG &DAG) const {
int FI = cast<FrameIndexSDNode>(Op)->getIndex();
return DAG.getTargetFrameIndex(FI, Op.getValueType());
}
+SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ // Non-zero depths are not supported by WebAssembly currently. Use the
+ // legalizer's default expansion, which is to return 0 (what this function is
+ // documented to do).
+ if (Op.getConstantOperandVal(0) > 0)
+ return SDValue();
+
+ DAG.getMachineFunction().getFrameInfo()->setFrameAddressIsTaken(true);
+ EVT VT = Op.getValueType();
+ unsigned FP =
+ Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
+}
+
SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
@@ -582,9 +618,8 @@ SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset()));
}
-SDValue
-WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
- SelectionDAG &DAG) const {
+SDValue WebAssemblyTargetLowering::LowerExternalSymbol(
+ SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
const auto *ES = cast<ExternalSymbolSDNode>(Op);
EVT VT = Op.getValueType();
@@ -603,7 +638,7 @@ WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
SelectionDAG &DAG) const {
// There's no need for a Wrapper node because we always incorporate a jump
- // table operand into a TABLESWITCH instruction, rather than ever
+ // table operand into a BR_TABLE instruction, rather than ever
// materializing it in a register.
const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
@@ -625,16 +660,15 @@ SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
+ // Add an operand for each case.
+ for (auto MBB : MBBs) Ops.push_back(DAG.getBasicBlock(MBB));
+
// TODO: For now, we just pick something arbitrary for a default case for now.
// We really want to sniff out the guard and put in the real default case (and
// delete the guard).
Ops.push_back(DAG.getBasicBlock(MBBs[0]));
- // Add an operand for each case.
- for (auto MBB : MBBs)
- Ops.push_back(DAG.getBasicBlock(MBB));
-
- return DAG.getNode(WebAssemblyISD::TABLESWITCH, DL, MVT::Other, Ops);
+ return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
}
SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
@@ -642,16 +676,13 @@ SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
SDLoc DL(Op);
EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
- // The incoming non-fixed arguments are placed on the top of the stack, with
- // natural alignment, at the point of the call, so the base pointer is just
- // the current frame pointer.
- DAG.getMachineFunction().getFrameInfo()->setFrameAddressIsTaken(true);
- unsigned FP =
- Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
- SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FP, PtrVT);
+ auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), DL, FrameAddr, Op.getOperand(1),
- MachinePointerInfo(SV), false, false, 0);
+
+ SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
+ MFI->getVarargBufferVreg(), PtrVT);
+ return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
+ MachinePointerInfo(SV), 0);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/lib/Target/WebAssembly/WebAssemblyISelLowering.h
index e7232a042e12..5bc723028e63 100644
--- a/lib/Target/WebAssembly/WebAssemblyISelLowering.h
+++ b/lib/Target/WebAssembly/WebAssemblyISelLowering.h
@@ -29,17 +29,17 @@ enum NodeType : unsigned {
#undef HANDLE_NODETYPE
};
-} // end namespace WebAssemblyISD
+} // end namespace WebAssemblyISD
class WebAssemblySubtarget;
class WebAssemblyTargetMachine;
class WebAssemblyTargetLowering final : public TargetLowering {
-public:
+ public:
WebAssemblyTargetLowering(const TargetMachine &TM,
const WebAssemblySubtarget &STI);
-private:
+ private:
/// Keep a pointer to the WebAssemblySubtarget around so that we can make the
/// right decision when generating code for different targets.
const WebAssemblySubtarget *Subtarget;
@@ -49,13 +49,16 @@ private:
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
const char *getTargetNodeName(unsigned Opcode) const override;
- std::pair<unsigned, const TargetRegisterClass *>
- getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
- StringRef Constraint, MVT VT) const override;
+ std::pair<unsigned, const TargetRegisterClass *> getRegForInlineAsmConstraint(
+ const TargetRegisterInfo *TRI, StringRef Constraint,
+ MVT VT) const override;
bool isCheapToSpeculateCttz() const override;
bool isCheapToSpeculateCtlz() const override;
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
unsigned AS) const override;
+ bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align,
+ bool *Fast) const override;
+ bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
@@ -65,29 +68,31 @@ private:
LLVMContext &Context) const override;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals, SDLoc dl,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
SelectionDAG &DAG) const override;
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
+ const SDLoc &DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
// Custom lowering hooks.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const;
};
namespace WebAssembly {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
-} // end namespace WebAssembly
+} // end namespace WebAssembly
-} // end namespace llvm
+} // end namespace llvm
#endif
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrControl.td b/lib/Target/WebAssembly/WebAssemblyInstrControl.td
index fda95953db81..444e275c6ebf 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrControl.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrControl.td
@@ -16,12 +16,12 @@ let Defs = [ARGUMENTS] in {
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
// The condition operand is a boolean value which WebAssembly represents as i32.
-def BR_IF : I<(outs), (ins I32:$cond, bb_op:$dst),
+def BR_IF : I<(outs), (ins bb_op:$dst, I32:$cond),
[(brcond I32:$cond, bb:$dst)],
- "br_if \t$cond, $dst">;
+ "br_if \t$dst, $cond">;
let isCodeGenOnly = 1 in
-def BR_UNLESS : I<(outs), (ins I32:$cond, bb_op:$dst), [],
- "br_unless\t$cond, $dst">;
+def BR_UNLESS : I<(outs), (ins bb_op:$dst, I32:$cond), [],
+ "br_unless\t$dst, $cond">;
let isBarrier = 1 in {
def BR : I<(outs), (ins bb_op:$dst),
[(br bb:$dst)],
@@ -32,27 +32,27 @@ def BR : I<(outs), (ins bb_op:$dst),
} // Defs = [ARGUMENTS]
def : Pat<(brcond (i32 (setne I32:$cond, 0)), bb:$dst),
- (BR_IF I32:$cond, bb_op:$dst)>;
+ (BR_IF bb_op:$dst, I32:$cond)>;
def : Pat<(brcond (i32 (seteq I32:$cond, 0)), bb:$dst),
- (BR_UNLESS I32:$cond, bb_op:$dst)>;
+ (BR_UNLESS bb_op:$dst, I32:$cond)>;
let Defs = [ARGUMENTS] in {
// TODO: SelectionDAG's lowering insists on using a pointer as the index for
-// jump tables, so in practice we don't ever use TABLESWITCH_I64 in wasm32 mode
+// jump tables, so in practice we don't ever use BR_TABLE_I64 in wasm32 mode
// currently.
// Set TSFlags{0} to 1 to indicate that the variable_ops are immediates.
// Set TSFlags{1} to 1 to indicate that the immediates represent labels.
let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
-def TABLESWITCH_I32 : I<(outs), (ins I32:$index, bb_op:$default, variable_ops),
- [(WebAssemblytableswitch I32:$index, bb:$default)],
- "tableswitch\t$index, $default"> {
+def BR_TABLE_I32 : I<(outs), (ins I32:$index, variable_ops),
+ [(WebAssemblybr_table I32:$index)],
+ "br_table \t$index"> {
let TSFlags{0} = 1;
let TSFlags{1} = 1;
}
-def TABLESWITCH_I64 : I<(outs), (ins I64:$index, bb_op:$default, variable_ops),
- [(WebAssemblytableswitch I64:$index, bb:$default)],
- "tableswitch\t$index, $default"> {
+def BR_TABLE_I64 : I<(outs), (ins I64:$index, variable_ops),
+ [(WebAssemblybr_table I64:$index)],
+ "br_table \t$index"> {
let TSFlags{0} = 1;
let TSFlags{1} = 1;
}
@@ -71,6 +71,10 @@ def END_LOOP : I<(outs), (ins), [], "end_loop">;
multiclass RETURN<WebAssemblyRegClass vt> {
def RETURN_#vt : I<(outs), (ins vt:$val), [(WebAssemblyreturn vt:$val)],
"return \t$val">;
+ // Equivalent to RETURN_#vt, for use at the end of a function when wasm
+ // semantics return by falling off the end of the block.
+ let isCodeGenOnly = 1 in
+ def FALLTHROUGH_RETURN_#vt : I<(outs), (ins vt:$val), []>;
}
let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
@@ -80,6 +84,10 @@ let isReturn = 1 in {
defm : RETURN<F32>;
defm : RETURN<F64>;
def RETURN_VOID : I<(outs), (ins), [(WebAssemblyreturn)], "return">;
+
+ // This is to RETURN_VOID what FALLTHROUGH_RETURN_#vt is to RETURN_#vt.
+ let isCodeGenOnly = 1 in
+ def FALLTHROUGH_RETURN_VOID : I<(outs), (ins), []>;
} // isReturn = 1
def UNREACHABLE : I<(outs), (ins), [(trap)], "unreachable">;
} // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrFloat.td b/lib/Target/WebAssembly/WebAssemblyInstrFloat.td
index 5520c6de6732..64569720375c 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrFloat.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrFloat.td
@@ -77,12 +77,12 @@ def : Pat<(setge f64:$lhs, f64:$rhs), (GE_F64 f64:$lhs, f64:$rhs)>;
let Defs = [ARGUMENTS] in {
-def SELECT_F32 : I<(outs F32:$dst), (ins I32:$cond, F32:$lhs, F32:$rhs),
+def SELECT_F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs, I32:$cond),
[(set F32:$dst, (select I32:$cond, F32:$lhs, F32:$rhs))],
- "f32.select\t$dst, $cond, $lhs, $rhs">;
-def SELECT_F64 : I<(outs F64:$dst), (ins I32:$cond, F64:$lhs, F64:$rhs),
+ "f32.select\t$dst, $lhs, $rhs, $cond">;
+def SELECT_F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs, I32:$cond),
[(set F64:$dst, (select I32:$cond, F64:$lhs, F64:$rhs))],
- "f64.select\t$dst, $cond, $lhs, $rhs">;
+ "f64.select\t$dst, $lhs, $rhs, $cond">;
} // Defs = [ARGUMENTS]
@@ -90,12 +90,12 @@ def SELECT_F64 : I<(outs F64:$dst), (ins I32:$cond, F64:$lhs, F64:$rhs),
// WebAssembly's select interprets any non-zero value as true, so we can fold
// a setne with 0 into a select.
def : Pat<(select (i32 (setne I32:$cond, 0)), F32:$lhs, F32:$rhs),
- (SELECT_F32 I32:$cond, F32:$lhs, F32:$rhs)>;
+ (SELECT_F32 F32:$lhs, F32:$rhs, I32:$cond)>;
def : Pat<(select (i32 (setne I32:$cond, 0)), F64:$lhs, F64:$rhs),
- (SELECT_F64 I32:$cond, F64:$lhs, F64:$rhs)>;
+ (SELECT_F64 F64:$lhs, F64:$rhs, I32:$cond)>;
// And again, this time with seteq instead of setne and the arms reversed.
def : Pat<(select (i32 (seteq I32:$cond, 0)), F32:$lhs, F32:$rhs),
- (SELECT_F32 I32:$cond, F32:$rhs, F32:$lhs)>;
+ (SELECT_F32 F32:$rhs, F32:$lhs, I32:$cond)>;
def : Pat<(select (i32 (seteq I32:$cond, 0)), F64:$lhs, F64:$rhs),
- (SELECT_F64 I32:$cond, F64:$rhs, F64:$lhs)>;
+ (SELECT_F64 F64:$rhs, F64:$lhs, I32:$cond)>;
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
index 028e9af0834f..2fd3eab99d78 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
@@ -15,6 +15,7 @@
#include "WebAssemblyInstrInfo.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
#include "WebAssemblySubtarget.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -32,16 +33,32 @@ WebAssemblyInstrInfo::WebAssemblyInstrInfo(const WebAssemblySubtarget &STI)
WebAssembly::ADJCALLSTACKUP),
RI(STI.getTargetTriple()) {}
+bool WebAssemblyInstrInfo::isReallyTriviallyReMaterializable(
+ const MachineInstr &MI, AliasAnalysis *AA) const {
+ switch (MI.getOpcode()) {
+ case WebAssembly::CONST_I32:
+ case WebAssembly::CONST_I64:
+ case WebAssembly::CONST_F32:
+ case WebAssembly::CONST_F64:
+ // isReallyTriviallyReMaterializableGeneric misses these because of the
+ // ARGUMENTS implicit def, so we manualy override it here.
+ return true;
+ default:
+ return false;
+ }
+}
+
void WebAssemblyInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
- DebugLoc DL, unsigned DestReg,
+ const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
// This method is called by post-RA expansion, which expects only pregs to
// exist. However we need to handle both here.
auto &MRI = MBB.getParent()->getRegInfo();
- const TargetRegisterClass *RC = TargetRegisterInfo::isVirtualRegister(DestReg) ?
- MRI.getRegClass(DestReg) :
- MRI.getTargetRegisterInfo()->getMinimalPhysRegClass(SrcReg);
+ const TargetRegisterClass *RC =
+ TargetRegisterInfo::isVirtualRegister(DestReg)
+ ? MRI.getRegClass(DestReg)
+ : MRI.getTargetRegisterInfo()->getMinimalPhysRegClass(DestReg);
unsigned CopyLocalOpcode;
if (RC == &WebAssembly::I32RegClass)
@@ -59,8 +76,23 @@ void WebAssemblyInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addReg(SrcReg, KillSrc ? RegState::Kill : 0);
}
+MachineInstr *
+WebAssemblyInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+ unsigned OpIdx1,
+ unsigned OpIdx2) const {
+ // If the operands are stackified, we can't reorder them.
+ WebAssemblyFunctionInfo &MFI =
+ *MI.getParent()->getParent()->getInfo<WebAssemblyFunctionInfo>();
+ if (MFI.isVRegStackified(MI.getOperand(OpIdx1).getReg()) ||
+ MFI.isVRegStackified(MI.getOperand(OpIdx2).getReg()))
+ return nullptr;
+
+ // Otherwise use the default implementation.
+ return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+}
+
// Branch analysis.
-bool WebAssemblyInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool WebAssemblyInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -75,22 +107,22 @@ bool WebAssemblyInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
if (HaveCond)
return true;
// If we're running after CFGStackify, we can't optimize further.
- if (!MI.getOperand(1).isMBB())
+ if (!MI.getOperand(0).isMBB())
return true;
Cond.push_back(MachineOperand::CreateImm(true));
- Cond.push_back(MI.getOperand(0));
- TBB = MI.getOperand(1).getMBB();
+ Cond.push_back(MI.getOperand(1));
+ TBB = MI.getOperand(0).getMBB();
HaveCond = true;
break;
case WebAssembly::BR_UNLESS:
if (HaveCond)
return true;
// If we're running after CFGStackify, we can't optimize further.
- if (!MI.getOperand(1).isMBB())
+ if (!MI.getOperand(0).isMBB())
return true;
Cond.push_back(MachineOperand::CreateImm(false));
- Cond.push_back(MI.getOperand(0));
- TBB = MI.getOperand(1).getMBB();
+ Cond.push_back(MI.getOperand(1));
+ TBB = MI.getOperand(0).getMBB();
HaveCond = true;
break;
case WebAssembly::BR:
@@ -133,7 +165,7 @@ unsigned WebAssemblyInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+ const DebugLoc &DL) const {
if (Cond.empty()) {
if (!TBB)
return 0;
@@ -145,13 +177,11 @@ unsigned WebAssemblyInstrInfo::InsertBranch(MachineBasicBlock &MBB,
assert(Cond.size() == 2 && "Expected a flag and a successor block");
if (Cond[0].getImm()) {
- BuildMI(&MBB, DL, get(WebAssembly::BR_IF))
- .addOperand(Cond[1])
- .addMBB(TBB);
+ BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).addOperand(Cond[1]);
} else {
BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS))
- .addOperand(Cond[1])
- .addMBB(TBB);
+ .addMBB(TBB)
+ .addOperand(Cond[1]);
}
if (!FBB)
return 1;
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
index 5ddd9b36f243..d93f958ca4cd 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
+++ b/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
@@ -34,18 +34,24 @@ public:
const WebAssemblyRegisterInfo &getRegisterInfo() const { return RI; }
+ bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
+ AliasAnalysis *AA) const override;
+
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+ unsigned OpIdx1,
+ unsigned OpIdx2) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify = false) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
};
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
index 2e682a475471..4b319871cf16 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
@@ -30,7 +30,7 @@ def SDT_WebAssemblyCallSeqEnd :
SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
def SDT_WebAssemblyCall0 : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
def SDT_WebAssemblyCall1 : SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>;
-def SDT_WebAssemblyTableswitch : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
+def SDT_WebAssemblyBrTable : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
def SDT_WebAssemblyArgument : SDTypeProfile<1, 1, [SDTCisVT<1, i32>]>;
def SDT_WebAssemblyReturn : SDTypeProfile<0, -1, []>;
def SDT_WebAssemblyWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
@@ -52,9 +52,9 @@ def WebAssemblycall0 : SDNode<"WebAssemblyISD::CALL0",
def WebAssemblycall1 : SDNode<"WebAssemblyISD::CALL1",
SDT_WebAssemblyCall1,
[SDNPHasChain, SDNPVariadic]>;
-def WebAssemblytableswitch : SDNode<"WebAssemblyISD::TABLESWITCH",
- SDT_WebAssemblyTableswitch,
- [SDNPHasChain, SDNPVariadic]>;
+def WebAssemblybr_table : SDNode<"WebAssemblyISD::BR_TABLE",
+ SDT_WebAssemblyBrTable,
+ [SDNPHasChain, SDNPVariadic]>;
def WebAssemblyargument : SDNode<"WebAssemblyISD::ARGUMENT",
SDT_WebAssemblyArgument>;
def WebAssemblyreturn : SDNode<"WebAssemblyISD::RETURN",
@@ -71,10 +71,17 @@ let OperandNamespace = "WebAssembly" in {
let OperandType = "OPERAND_BASIC_BLOCK" in
def bb_op : Operand<OtherVT>;
-let OperandType = "OPERAND_FPIMM" in {
+let OperandType = "OPERAND_FP32IMM" in
def f32imm_op : Operand<f32>;
+
+let OperandType = "OPERAND_FP64IMM" in
def f64imm_op : Operand<f64>;
-} // OperandType = "OPERAND_FPIMM"
+
+let OperandType = "OPERAND_P2ALIGN" in {
+def P2Align : Operand<i32> {
+ let PrintMethod = "printWebAssemblyP2AlignOperand";
+}
+} // OperandType = "OPERAND_P2ALIGN"
} // OperandNamespace = "WebAssembly"
@@ -101,15 +108,9 @@ defm : ARGUMENT<F64>;
let Defs = [ARGUMENTS] in {
// get_local and set_local are not generated by instruction selection; they
-// are implied by virtual register uses and defs in most contexts. However,
-// they are explicitly emitted for special purposes.
+// are implied by virtual register uses and defs.
multiclass LOCAL<WebAssemblyRegClass vt> {
- def GET_LOCAL_#vt : I<(outs vt:$res), (ins i32imm:$regno), [],
- "get_local\t$res, $regno">;
- // TODO: set_local returns its operand value
- def SET_LOCAL_#vt : I<(outs), (ins i32imm:$regno, vt:$src), [],
- "set_local\t$regno, $src">;
-
+let hasSideEffects = 0 in {
// COPY_LOCAL is not an actual instruction in wasm, but since we allow
// get_local and set_local to be implicit, we can have a COPY_LOCAL which
// is actually a no-op because all the work is done in the implied
@@ -117,13 +118,21 @@ multiclass LOCAL<WebAssemblyRegClass vt> {
let isAsCheapAsAMove = 1 in
def COPY_LOCAL_#vt : I<(outs vt:$res), (ins vt:$src), [],
"copy_local\t$res, $src">;
+
+ // TEE_LOCAL is similar to COPY_LOCAL, but writes two copies of its result.
+ // Typically this would be used to stackify one result and write the other
+ // result to a local.
+ let isAsCheapAsAMove = 1 in
+ def TEE_LOCAL_#vt : I<(outs vt:$res, vt:$also), (ins vt:$src), [],
+ "tee_local\t$res, $also, $src">;
+} // hasSideEffects = 0
}
defm : LOCAL<I32>;
defm : LOCAL<I64>;
defm : LOCAL<F32>;
defm : LOCAL<F64>;
-let isMoveImm = 1 in {
+let isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1 in {
def CONST_I32 : I<(outs I32:$res), (ins i32imm:$imm),
[(set I32:$res, imm:$imm)],
"i32.const\t$res, $imm">;
@@ -136,7 +145,7 @@ def CONST_F32 : I<(outs F32:$res), (ins f32imm_op:$imm),
def CONST_F64 : I<(outs F64:$res), (ins f64imm_op:$imm),
[(set F64:$res, fpimm:$imm)],
"f64.const\t$res, $imm">;
-} // isMoveImm = 1
+} // isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1
} // Defs = [ARGUMENTS]
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrInteger.td b/lib/Target/WebAssembly/WebAssemblyInstrInteger.td
index 09e5eafb85e9..7eaa57bb217e 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrInteger.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrInteger.td
@@ -36,6 +36,8 @@ defm XOR : BinaryInt<xor, "xor ">;
defm SHL : BinaryInt<shl, "shl ">;
defm SHR_U : BinaryInt<srl, "shr_u">;
defm SHR_S : BinaryInt<sra, "shr_s">;
+defm ROTL : BinaryInt<rotl, "rotl">;
+defm ROTR : BinaryInt<rotr, "rotr">;
let isCommutable = 1 in {
defm EQ : ComparisonInt<SETEQ, "eq ">;
@@ -54,22 +56,29 @@ defm CLZ : UnaryInt<ctlz, "clz ">;
defm CTZ : UnaryInt<cttz, "ctz ">;
defm POPCNT : UnaryInt<ctpop, "popcnt">;
+def EQZ_I32 : I<(outs I32:$dst), (ins I32:$src),
+ [(set I32:$dst, (setcc I32:$src, 0, SETEQ))],
+ "i32.eqz \t$dst, $src">;
+def EQZ_I64 : I<(outs I32:$dst), (ins I64:$src),
+ [(set I32:$dst, (setcc I64:$src, 0, SETEQ))],
+ "i64.eqz \t$dst, $src">;
+
} // Defs = [ARGUMENTS]
-// Expand the "don't care" operations to supported operations.
-def : Pat<(ctlz_zero_undef I32:$src), (CLZ_I32 I32:$src)>;
-def : Pat<(ctlz_zero_undef I64:$src), (CLZ_I64 I64:$src)>;
-def : Pat<(cttz_zero_undef I32:$src), (CTZ_I32 I32:$src)>;
-def : Pat<(cttz_zero_undef I64:$src), (CTZ_I64 I64:$src)>;
+// Optimize away an explicit mask on a rotate count.
+def : Pat<(rotl I32:$lhs, (and I32:$rhs, 31)), (ROTL_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(rotr I32:$lhs, (and I32:$rhs, 31)), (ROTR_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(rotl I64:$lhs, (and I64:$rhs, 63)), (ROTL_I64 I64:$lhs, I64:$rhs)>;
+def : Pat<(rotr I64:$lhs, (and I64:$rhs, 63)), (ROTR_I64 I64:$lhs, I64:$rhs)>;
let Defs = [ARGUMENTS] in {
-def SELECT_I32 : I<(outs I32:$dst), (ins I32:$cond, I32:$lhs, I32:$rhs),
+def SELECT_I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs, I32:$cond),
[(set I32:$dst, (select I32:$cond, I32:$lhs, I32:$rhs))],
- "i32.select\t$dst, $cond, $lhs, $rhs">;
-def SELECT_I64 : I<(outs I64:$dst), (ins I32:$cond, I64:$lhs, I64:$rhs),
+ "i32.select\t$dst, $lhs, $rhs, $cond">;
+def SELECT_I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs, I32:$cond),
[(set I64:$dst, (select I32:$cond, I64:$lhs, I64:$rhs))],
- "i64.select\t$dst, $cond, $lhs, $rhs">;
+ "i64.select\t$dst, $lhs, $rhs, $cond">;
} // Defs = [ARGUMENTS]
@@ -77,12 +86,12 @@ def SELECT_I64 : I<(outs I64:$dst), (ins I32:$cond, I64:$lhs, I64:$rhs),
// WebAssembly's select interprets any non-zero value as true, so we can fold
// a setne with 0 into a select.
def : Pat<(select (i32 (setne I32:$cond, 0)), I32:$lhs, I32:$rhs),
- (SELECT_I32 I32:$cond, I32:$lhs, I32:$rhs)>;
+ (SELECT_I32 I32:$lhs, I32:$rhs, I32:$cond)>;
def : Pat<(select (i32 (setne I32:$cond, 0)), I64:$lhs, I64:$rhs),
- (SELECT_I64 I32:$cond, I64:$lhs, I64:$rhs)>;
+ (SELECT_I64 I64:$lhs, I64:$rhs, I32:$cond)>;
// And again, this time with seteq instead of setne and the arms reversed.
def : Pat<(select (i32 (seteq I32:$cond, 0)), I32:$lhs, I32:$rhs),
- (SELECT_I32 I32:$cond, I32:$rhs, I32:$lhs)>;
+ (SELECT_I32 I32:$rhs, I32:$lhs, I32:$cond)>;
def : Pat<(select (i32 (seteq I32:$cond, 0)), I64:$lhs, I64:$rhs),
- (SELECT_I64 I32:$cond, I64:$rhs, I64:$lhs)>;
+ (SELECT_I64 I64:$rhs, I64:$lhs, I32:$cond)>;
diff --git a/lib/Target/WebAssembly/WebAssemblyInstrMemory.td b/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
index b39ac5212f87..521c664ca4ab 100644
--- a/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
+++ b/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
@@ -28,6 +28,18 @@ def regPlusImm : PatFrag<(ops node:$addr, node:$off),
(add node:$addr, node:$off),
[{ return N->getFlags()->hasNoUnsignedWrap(); }]>;
+// Treat an 'or' node as an 'add' if the or'ed bits are known to be zero.
+def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+ return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
+
+ APInt KnownZero0, KnownOne0;
+ CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
+ APInt KnownZero1, KnownOne1;
+ CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
+ return (~KnownZero0 & ~KnownZero1) == 0;
+}]>;
+
// GlobalAddresses are conceptually unsigned values, so we can also fold them
// into immediate values as long as their offsets are non-negative.
def regPlusGA : PatFrag<(ops node:$addr, node:$off),
@@ -46,325 +58,392 @@ def regPlusGA : PatFrag<(ops node:$addr, node:$off),
let Defs = [ARGUMENTS] in {
// Basic load.
-def LOAD_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr), [],
- "i32.load\t$dst, ${off}(${addr})">;
-def LOAD_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr), [],
- "i64.load\t$dst, ${off}(${addr})">;
-def LOAD_F32 : I<(outs F32:$dst), (ins i32imm:$off, I32:$addr), [],
- "f32.load\t$dst, ${off}(${addr})">;
-def LOAD_F64 : I<(outs F64:$dst), (ins i32imm:$off, I32:$addr), [],
- "f64.load\t$dst, ${off}(${addr})">;
+def LOAD_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i32.load\t$dst, ${off}(${addr})${p2align}">;
+def LOAD_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i64.load\t$dst, ${off}(${addr})${p2align}">;
+def LOAD_F32 : I<(outs F32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "f32.load\t$dst, ${off}(${addr})${p2align}">;
+def LOAD_F64 : I<(outs F64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "f64.load\t$dst, ${off}(${addr})${p2align}">;
} // Defs = [ARGUMENTS]
// Select loads with no constant offset.
-def : Pat<(i32 (load I32:$addr)), (LOAD_I32 0, $addr)>;
-def : Pat<(i64 (load I32:$addr)), (LOAD_I64 0, $addr)>;
-def : Pat<(f32 (load I32:$addr)), (LOAD_F32 0, $addr)>;
-def : Pat<(f64 (load I32:$addr)), (LOAD_F64 0, $addr)>;
+def : Pat<(i32 (load I32:$addr)), (LOAD_I32 0, $addr, 0)>;
+def : Pat<(i64 (load I32:$addr)), (LOAD_I64 0, $addr, 0)>;
+def : Pat<(f32 (load I32:$addr)), (LOAD_F32 0, $addr, 0)>;
+def : Pat<(f64 (load I32:$addr)), (LOAD_F64 0, $addr, 0)>;
// Select loads with a constant offset.
def : Pat<(i32 (load (regPlusImm I32:$addr, imm:$off))),
- (LOAD_I32 imm:$off, $addr)>;
+ (LOAD_I32 imm:$off, $addr, 0)>;
def : Pat<(i64 (load (regPlusImm I32:$addr, imm:$off))),
- (LOAD_I64 imm:$off, $addr)>;
+ (LOAD_I64 imm:$off, $addr, 0)>;
def : Pat<(f32 (load (regPlusImm I32:$addr, imm:$off))),
- (LOAD_F32 imm:$off, $addr)>;
+ (LOAD_F32 imm:$off, $addr, 0)>;
def : Pat<(f64 (load (regPlusImm I32:$addr, imm:$off))),
- (LOAD_F64 imm:$off, $addr)>;
+ (LOAD_F64 imm:$off, $addr, 0)>;
+def : Pat<(i32 (load (or_is_add I32:$addr, imm:$off))),
+ (LOAD_I32 imm:$off, $addr, 0)>;
+def : Pat<(i64 (load (or_is_add I32:$addr, imm:$off))),
+ (LOAD_I64 imm:$off, $addr, 0)>;
+def : Pat<(f32 (load (or_is_add I32:$addr, imm:$off))),
+ (LOAD_F32 imm:$off, $addr, 0)>;
+def : Pat<(f64 (load (or_is_add I32:$addr, imm:$off))),
+ (LOAD_F64 imm:$off, $addr, 0)>;
def : Pat<(i32 (load (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD_I32 tglobaladdr:$off, $addr)>;
+ (LOAD_I32 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (load (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD_I64 tglobaladdr:$off, $addr)>;
+ (LOAD_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(f32 (load (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD_F32 tglobaladdr:$off, $addr)>;
+ (LOAD_F32 tglobaladdr:$off, $addr, 0)>;
def : Pat<(f64 (load (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD_F64 tglobaladdr:$off, $addr)>;
+ (LOAD_F64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i32 (load (add I32:$addr, (WebAssemblywrapper texternalsym:$off)))),
- (LOAD_I32 texternalsym:$off, $addr)>;
+ (LOAD_I32 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (load (add I32:$addr, (WebAssemblywrapper texternalsym:$off)))),
- (LOAD_I64 texternalsym:$off, $addr)>;
+ (LOAD_I64 texternalsym:$off, $addr, 0)>;
def : Pat<(f32 (load (add I32:$addr, (WebAssemblywrapper texternalsym:$off)))),
- (LOAD_F32 texternalsym:$off, $addr)>;
+ (LOAD_F32 texternalsym:$off, $addr, 0)>;
def : Pat<(f64 (load (add I32:$addr, (WebAssemblywrapper texternalsym:$off)))),
- (LOAD_F64 texternalsym:$off, $addr)>;
+ (LOAD_F64 texternalsym:$off, $addr, 0)>;
// Select loads with just a constant offset.
-def : Pat<(i32 (load imm:$off)), (LOAD_I32 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (load imm:$off)), (LOAD_I64 imm:$off, (CONST_I32 0))>;
-def : Pat<(f32 (load imm:$off)), (LOAD_F32 imm:$off, (CONST_I32 0))>;
-def : Pat<(f64 (load imm:$off)), (LOAD_F64 imm:$off, (CONST_I32 0))>;
+def : Pat<(i32 (load imm:$off)), (LOAD_I32 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (load imm:$off)), (LOAD_I64 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(f32 (load imm:$off)), (LOAD_F32 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(f64 (load imm:$off)), (LOAD_F64 imm:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (load (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD_I32 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD_I32 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (load (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(f32 (load (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD_F32 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD_F32 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(f64 (load (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD_F64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD_F64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (load (WebAssemblywrapper texternalsym:$off))),
- (LOAD_I32 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD_I32 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (load (WebAssemblywrapper texternalsym:$off))),
- (LOAD_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD_I64 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(f32 (load (WebAssemblywrapper texternalsym:$off))),
- (LOAD_F32 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD_F32 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(f64 (load (WebAssemblywrapper texternalsym:$off))),
- (LOAD_F64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD_F64 texternalsym:$off, (CONST_I32 0), 0)>;
let Defs = [ARGUMENTS] in {
// Extending load.
-def LOAD8_S_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr), [],
- "i32.load8_s\t$dst, ${off}(${addr})">;
-def LOAD8_U_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr), [],
- "i32.load8_u\t$dst, ${off}(${addr})">;
-def LOAD16_S_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr), [],
- "i32.load16_s\t$dst, ${off}(${addr})">;
-def LOAD16_U_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr), [],
- "i32.load16_u\t$dst, ${off}(${addr})">;
-def LOAD8_S_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr), [],
- "i64.load8_s\t$dst, ${off}(${addr})">;
-def LOAD8_U_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr), [],
- "i64.load8_u\t$dst, ${off}(${addr})">;
-def LOAD16_S_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr), [],
- "i64.load16_s\t$dst, ${off}(${addr})">;
-def LOAD16_U_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr), [],
- "i64.load16_u\t$dst, ${off}(${addr})">;
-def LOAD32_S_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr), [],
- "i64.load32_s\t$dst, ${off}(${addr})">;
-def LOAD32_U_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr), [],
- "i64.load32_u\t$dst, ${off}(${addr})">;
+def LOAD8_S_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i32.load8_s\t$dst, ${off}(${addr})${p2align}">;
+def LOAD8_U_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i32.load8_u\t$dst, ${off}(${addr})${p2align}">;
+def LOAD16_S_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i32.load16_s\t$dst, ${off}(${addr})${p2align}">;
+def LOAD16_U_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i32.load16_u\t$dst, ${off}(${addr})${p2align}">;
+def LOAD8_S_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i64.load8_s\t$dst, ${off}(${addr})${p2align}">;
+def LOAD8_U_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i64.load8_u\t$dst, ${off}(${addr})${p2align}">;
+def LOAD16_S_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i64.load16_s\t$dst, ${off}(${addr})${p2align}">;
+def LOAD16_U_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i64.load16_u\t$dst, ${off}(${addr})${p2align}">;
+def LOAD32_S_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i64.load32_s\t$dst, ${off}(${addr})${p2align}">;
+def LOAD32_U_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align), [],
+ "i64.load32_u\t$dst, ${off}(${addr})${p2align}">;
} // Defs = [ARGUMENTS]
// Select extending loads with no constant offset.
-def : Pat<(i32 (sextloadi8 I32:$addr)), (LOAD8_S_I32 0, $addr)>;
-def : Pat<(i32 (zextloadi8 I32:$addr)), (LOAD8_U_I32 0, $addr)>;
-def : Pat<(i32 (sextloadi16 I32:$addr)), (LOAD16_S_I32 0, $addr)>;
-def : Pat<(i32 (zextloadi16 I32:$addr)), (LOAD16_U_I32 0, $addr)>;
-def : Pat<(i64 (sextloadi8 I32:$addr)), (LOAD8_S_I64 0, $addr)>;
-def : Pat<(i64 (zextloadi8 I32:$addr)), (LOAD8_U_I64 0, $addr)>;
-def : Pat<(i64 (sextloadi16 I32:$addr)), (LOAD16_S_I64 0, $addr)>;
-def : Pat<(i64 (zextloadi16 I32:$addr)), (LOAD16_U_I64 0, $addr)>;
-def : Pat<(i64 (sextloadi32 I32:$addr)), (LOAD32_S_I64 0, $addr)>;
-def : Pat<(i64 (zextloadi32 I32:$addr)), (LOAD32_U_I64 0, $addr)>;
+def : Pat<(i32 (sextloadi8 I32:$addr)), (LOAD8_S_I32 0, $addr, 0)>;
+def : Pat<(i32 (zextloadi8 I32:$addr)), (LOAD8_U_I32 0, $addr, 0)>;
+def : Pat<(i32 (sextloadi16 I32:$addr)), (LOAD16_S_I32 0, $addr, 0)>;
+def : Pat<(i32 (zextloadi16 I32:$addr)), (LOAD16_U_I32 0, $addr, 0)>;
+def : Pat<(i64 (sextloadi8 I32:$addr)), (LOAD8_S_I64 0, $addr, 0)>;
+def : Pat<(i64 (zextloadi8 I32:$addr)), (LOAD8_U_I64 0, $addr, 0)>;
+def : Pat<(i64 (sextloadi16 I32:$addr)), (LOAD16_S_I64 0, $addr, 0)>;
+def : Pat<(i64 (zextloadi16 I32:$addr)), (LOAD16_U_I64 0, $addr, 0)>;
+def : Pat<(i64 (sextloadi32 I32:$addr)), (LOAD32_S_I64 0, $addr, 0)>;
+def : Pat<(i64 (zextloadi32 I32:$addr)), (LOAD32_U_I64 0, $addr, 0)>;
// Select extending loads with a constant offset.
def : Pat<(i32 (sextloadi8 (regPlusImm I32:$addr, imm:$off))),
- (LOAD8_S_I32 imm:$off, $addr)>;
+ (LOAD8_S_I32 imm:$off, $addr, 0)>;
def : Pat<(i32 (zextloadi8 (regPlusImm I32:$addr, imm:$off))),
- (LOAD8_U_I32 imm:$off, $addr)>;
+ (LOAD8_U_I32 imm:$off, $addr, 0)>;
def : Pat<(i32 (sextloadi16 (regPlusImm I32:$addr, imm:$off))),
- (LOAD16_S_I32 imm:$off, $addr)>;
+ (LOAD16_S_I32 imm:$off, $addr, 0)>;
def : Pat<(i32 (zextloadi16 (regPlusImm I32:$addr, imm:$off))),
- (LOAD16_U_I32 imm:$off, $addr)>;
+ (LOAD16_U_I32 imm:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi8 (regPlusImm I32:$addr, imm:$off))),
- (LOAD8_S_I64 imm:$off, $addr)>;
+ (LOAD8_S_I64 imm:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi8 (regPlusImm I32:$addr, imm:$off))),
- (LOAD8_U_I64 imm:$off, $addr)>;
+ (LOAD8_U_I64 imm:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi16 (regPlusImm I32:$addr, imm:$off))),
- (LOAD16_S_I64 imm:$off, $addr)>;
+ (LOAD16_S_I64 imm:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi16 (regPlusImm I32:$addr, imm:$off))),
- (LOAD16_U_I64 imm:$off, $addr)>;
+ (LOAD16_U_I64 imm:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi32 (regPlusImm I32:$addr, imm:$off))),
- (LOAD32_S_I64 imm:$off, $addr)>;
+ (LOAD32_S_I64 imm:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi32 (regPlusImm I32:$addr, imm:$off))),
- (LOAD32_U_I64 imm:$off, $addr)>;
+ (LOAD32_U_I64 imm:$off, $addr, 0)>;
+def : Pat<(i32 (sextloadi8 (or_is_add I32:$addr, imm:$off))),
+ (LOAD8_S_I32 imm:$off, $addr, 0)>;
+def : Pat<(i32 (zextloadi8 (or_is_add I32:$addr, imm:$off))),
+ (LOAD8_U_I32 imm:$off, $addr, 0)>;
+def : Pat<(i32 (sextloadi16 (or_is_add I32:$addr, imm:$off))),
+ (LOAD16_S_I32 imm:$off, $addr, 0)>;
+def : Pat<(i32 (zextloadi16 (or_is_add I32:$addr, imm:$off))),
+ (LOAD16_U_I32 imm:$off, $addr, 0)>;
+def : Pat<(i64 (sextloadi8 (or_is_add I32:$addr, imm:$off))),
+ (LOAD8_S_I64 imm:$off, $addr, 0)>;
+def : Pat<(i64 (zextloadi8 (or_is_add I32:$addr, imm:$off))),
+ (LOAD8_U_I64 imm:$off, $addr, 0)>;
+def : Pat<(i64 (sextloadi16 (or_is_add I32:$addr, imm:$off))),
+ (LOAD16_S_I64 imm:$off, $addr, 0)>;
+def : Pat<(i64 (zextloadi16 (or_is_add I32:$addr, imm:$off))),
+ (LOAD16_U_I64 imm:$off, $addr, 0)>;
+def : Pat<(i64 (sextloadi32 (or_is_add I32:$addr, imm:$off))),
+ (LOAD32_S_I64 imm:$off, $addr, 0)>;
+def : Pat<(i64 (zextloadi32 (or_is_add I32:$addr, imm:$off))),
+ (LOAD32_U_I64 imm:$off, $addr, 0)>;
def : Pat<(i32 (sextloadi8 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD8_S_I32 tglobaladdr:$off, $addr)>;
+ (LOAD8_S_I32 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i32 (zextloadi8 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD8_U_I32 tglobaladdr:$off, $addr)>;
+ (LOAD8_U_I32 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i32 (sextloadi16 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD16_S_I32 tglobaladdr:$off, $addr)>;
+ (LOAD16_S_I32 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i32 (zextloadi16 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD16_U_I32 tglobaladdr:$off, $addr)>;
+ (LOAD16_U_I32 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi8 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD8_S_I64 tglobaladdr:$off, $addr)>;
+ (LOAD8_S_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi8 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD8_U_I64 tglobaladdr:$off, $addr)>;
+ (LOAD8_U_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi16 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD16_S_I64 tglobaladdr:$off, $addr)>;
+ (LOAD16_S_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi16 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD16_U_I64 tglobaladdr:$off, $addr)>;
+ (LOAD16_U_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi32 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD32_S_I64 tglobaladdr:$off, $addr)>;
+ (LOAD32_S_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi32 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD32_U_I64 tglobaladdr:$off, $addr)>;
+ (LOAD32_U_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i32 (sextloadi8 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD8_S_I32 texternalsym:$off, $addr)>;
+ (LOAD8_S_I32 texternalsym:$off, $addr, 0)>;
def : Pat<(i32 (zextloadi8 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD8_U_I32 texternalsym:$off, $addr)>;
+ (LOAD8_U_I32 texternalsym:$off, $addr, 0)>;
def : Pat<(i32 (sextloadi16 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD16_S_I32 texternalsym:$off, $addr)>;
+ (LOAD16_S_I32 texternalsym:$off, $addr, 0)>;
def : Pat<(i32 (zextloadi16 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD16_U_I32 texternalsym:$off, $addr)>;
+ (LOAD16_U_I32 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi8 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD8_S_I64 texternalsym:$off, $addr)>;
+ (LOAD8_S_I64 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi8 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD8_U_I64 texternalsym:$off, $addr)>;
+ (LOAD8_U_I64 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi16 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD16_S_I64 texternalsym:$off, $addr)>;
+ (LOAD16_S_I64 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi16 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD16_U_I64 texternalsym:$off, $addr)>;
+ (LOAD16_U_I64 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (sextloadi32 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD32_S_I64 texternalsym:$off, $addr)>;
+ (LOAD32_S_I64 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (zextloadi32 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD32_U_I64 texternalsym:$off, $addr)>;
+ (LOAD32_U_I64 texternalsym:$off, $addr, 0)>;
// Select extending loads with just a constant offset.
-def : Pat<(i32 (sextloadi8 imm:$off)), (LOAD8_S_I32 imm:$off, (CONST_I32 0))>;
-def : Pat<(i32 (zextloadi8 imm:$off)), (LOAD8_U_I32 imm:$off, (CONST_I32 0))>;
-def : Pat<(i32 (sextloadi16 imm:$off)), (LOAD16_S_I32 imm:$off, (CONST_I32 0))>;
-def : Pat<(i32 (zextloadi16 imm:$off)), (LOAD16_U_I32 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (sextloadi8 imm:$off)), (LOAD8_S_I64 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (zextloadi8 imm:$off)), (LOAD8_U_I64 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (sextloadi16 imm:$off)), (LOAD16_S_I64 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (zextloadi16 imm:$off)), (LOAD16_U_I64 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (sextloadi32 imm:$off)), (LOAD32_S_I64 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (zextloadi32 imm:$off)), (LOAD32_U_I64 imm:$off, (CONST_I32 0))>;
+def : Pat<(i32 (sextloadi8 imm:$off)),
+ (LOAD8_S_I32 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i32 (zextloadi8 imm:$off)),
+ (LOAD8_U_I32 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i32 (sextloadi16 imm:$off)),
+ (LOAD16_S_I32 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i32 (zextloadi16 imm:$off)),
+ (LOAD16_U_I32 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (sextloadi8 imm:$off)),
+ (LOAD8_S_I64 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (zextloadi8 imm:$off)),
+ (LOAD8_U_I64 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (sextloadi16 imm:$off)),
+ (LOAD16_S_I64 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (zextloadi16 imm:$off)),
+ (LOAD16_U_I64 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (sextloadi32 imm:$off)),
+ (LOAD32_S_I64 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (zextloadi32 imm:$off)),
+ (LOAD32_U_I64 imm:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (sextloadi8 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD8_S_I32 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD8_S_I32 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (zextloadi8 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD8_U_I32 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD8_U_I32 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (sextloadi16 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD16_S_I32 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD16_S_I32 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (zextloadi16 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD16_U_I32 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD16_U_I32 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (sextloadi8 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD8_S_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD8_S_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (zextloadi8 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD8_U_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD8_U_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (sextloadi16 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD16_S_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD16_S_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (zextloadi16 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD16_U_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD16_U_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (sextloadi32 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD32_S_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD32_S_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (zextloadi32 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD32_U_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD32_U_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (sextloadi8 (WebAssemblywrapper texternalsym:$off))),
- (LOAD8_S_I32 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD8_S_I32 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (zextloadi8 (WebAssemblywrapper texternalsym:$off))),
- (LOAD8_U_I32 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD8_U_I32 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (sextloadi16 (WebAssemblywrapper texternalsym:$off))),
- (LOAD16_S_I32 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD16_S_I32 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (zextloadi16 (WebAssemblywrapper texternalsym:$off))),
- (LOAD16_U_I32 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD16_U_I32 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (sextloadi8 (WebAssemblywrapper texternalsym:$off))),
- (LOAD8_S_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD8_S_I64 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (zextloadi8 (WebAssemblywrapper texternalsym:$off))),
- (LOAD8_U_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD8_U_I64 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (sextloadi16 (WebAssemblywrapper texternalsym:$off))),
- (LOAD16_S_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD16_S_I64 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (zextloadi16 (WebAssemblywrapper texternalsym:$off))),
- (LOAD16_U_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD16_U_I64 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (sextloadi32 (WebAssemblywrapper texternalsym:$off))),
- (LOAD32_S_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD32_S_I64 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (zextloadi32 (WebAssemblywrapper texternalsym:$off))),
- (LOAD32_U_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD32_U_I64 texternalsym:$off, (CONST_I32 0), 0)>;
// Resolve "don't care" extending loads to zero-extending loads. This is
// somewhat arbitrary, but zero-extending is conceptually simpler.
// Select "don't care" extending loads with no constant offset.
-def : Pat<(i32 (extloadi8 I32:$addr)), (LOAD8_U_I32 0, $addr)>;
-def : Pat<(i32 (extloadi16 I32:$addr)), (LOAD16_U_I32 0, $addr)>;
-def : Pat<(i64 (extloadi8 I32:$addr)), (LOAD8_U_I64 0, $addr)>;
-def : Pat<(i64 (extloadi16 I32:$addr)), (LOAD16_U_I64 0, $addr)>;
-def : Pat<(i64 (extloadi32 I32:$addr)), (LOAD32_U_I64 0, $addr)>;
+def : Pat<(i32 (extloadi8 I32:$addr)), (LOAD8_U_I32 0, $addr, 0)>;
+def : Pat<(i32 (extloadi16 I32:$addr)), (LOAD16_U_I32 0, $addr, 0)>;
+def : Pat<(i64 (extloadi8 I32:$addr)), (LOAD8_U_I64 0, $addr, 0)>;
+def : Pat<(i64 (extloadi16 I32:$addr)), (LOAD16_U_I64 0, $addr, 0)>;
+def : Pat<(i64 (extloadi32 I32:$addr)), (LOAD32_U_I64 0, $addr, 0)>;
// Select "don't care" extending loads with a constant offset.
def : Pat<(i32 (extloadi8 (regPlusImm I32:$addr, imm:$off))),
- (LOAD8_U_I32 imm:$off, $addr)>;
+ (LOAD8_U_I32 imm:$off, $addr, 0)>;
def : Pat<(i32 (extloadi16 (regPlusImm I32:$addr, imm:$off))),
- (LOAD16_U_I32 imm:$off, $addr)>;
+ (LOAD16_U_I32 imm:$off, $addr, 0)>;
def : Pat<(i64 (extloadi8 (regPlusImm I32:$addr, imm:$off))),
- (LOAD8_U_I64 imm:$off, $addr)>;
+ (LOAD8_U_I64 imm:$off, $addr, 0)>;
def : Pat<(i64 (extloadi16 (regPlusImm I32:$addr, imm:$off))),
- (LOAD16_U_I64 imm:$off, $addr)>;
+ (LOAD16_U_I64 imm:$off, $addr, 0)>;
def : Pat<(i64 (extloadi32 (regPlusImm I32:$addr, imm:$off))),
- (LOAD32_U_I64 imm:$off, $addr)>;
+ (LOAD32_U_I64 imm:$off, $addr, 0)>;
+def : Pat<(i32 (extloadi8 (or_is_add I32:$addr, imm:$off))),
+ (LOAD8_U_I32 imm:$off, $addr, 0)>;
+def : Pat<(i32 (extloadi16 (or_is_add I32:$addr, imm:$off))),
+ (LOAD16_U_I32 imm:$off, $addr, 0)>;
+def : Pat<(i64 (extloadi8 (or_is_add I32:$addr, imm:$off))),
+ (LOAD8_U_I64 imm:$off, $addr, 0)>;
+def : Pat<(i64 (extloadi16 (or_is_add I32:$addr, imm:$off))),
+ (LOAD16_U_I64 imm:$off, $addr, 0)>;
+def : Pat<(i64 (extloadi32 (or_is_add I32:$addr, imm:$off))),
+ (LOAD32_U_I64 imm:$off, $addr, 0)>;
def : Pat<(i32 (extloadi8 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD8_U_I32 tglobaladdr:$off, $addr)>;
+ (LOAD8_U_I32 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i32 (extloadi16 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD16_U_I32 tglobaladdr:$off, $addr)>;
+ (LOAD16_U_I32 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (extloadi8 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD8_U_I64 tglobaladdr:$off, $addr)>;
+ (LOAD8_U_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (extloadi16 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD16_U_I64 tglobaladdr:$off, $addr)>;
+ (LOAD16_U_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i64 (extloadi32 (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off)))),
- (LOAD32_U_I64 tglobaladdr:$off, $addr)>;
+ (LOAD32_U_I64 tglobaladdr:$off, $addr, 0)>;
def : Pat<(i32 (extloadi8 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD8_U_I32 texternalsym:$off, $addr)>;
+ (LOAD8_U_I32 texternalsym:$off, $addr, 0)>;
def : Pat<(i32 (extloadi16 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD16_U_I32 texternalsym:$off, $addr)>;
+ (LOAD16_U_I32 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (extloadi8 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD8_U_I64 texternalsym:$off, $addr)>;
+ (LOAD8_U_I64 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (extloadi16 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD16_U_I64 texternalsym:$off, $addr)>;
+ (LOAD16_U_I64 texternalsym:$off, $addr, 0)>;
def : Pat<(i64 (extloadi32 (add I32:$addr,
(WebAssemblywrapper texternalsym:$off)))),
- (LOAD32_U_I64 texternalsym:$off, $addr)>;
+ (LOAD32_U_I64 texternalsym:$off, $addr, 0)>;
// Select "don't care" extending loads with just a constant offset.
-def : Pat<(i32 (extloadi8 imm:$off)), (LOAD8_U_I32 imm:$off, (CONST_I32 0))>;
-def : Pat<(i32 (extloadi16 imm:$off)), (LOAD16_U_I32 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (extloadi8 imm:$off)), (LOAD8_U_I64 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (extloadi16 imm:$off)), (LOAD16_U_I64 imm:$off, (CONST_I32 0))>;
-def : Pat<(i64 (extloadi32 imm:$off)), (LOAD32_U_I64 imm:$off, (CONST_I32 0))>;
+def : Pat<(i32 (extloadi8 imm:$off)),
+ (LOAD8_U_I32 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i32 (extloadi16 imm:$off)),
+ (LOAD16_U_I32 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (extloadi8 imm:$off)),
+ (LOAD8_U_I64 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (extloadi16 imm:$off)),
+ (LOAD16_U_I64 imm:$off, (CONST_I32 0), 0)>;
+def : Pat<(i64 (extloadi32 imm:$off)),
+ (LOAD32_U_I64 imm:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (extloadi8 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD8_U_I32 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD8_U_I32 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (extloadi16 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD16_U_I32 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD16_U_I32 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (extloadi8 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD8_U_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD8_U_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (extloadi16 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD16_U_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD16_U_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (extloadi32 (WebAssemblywrapper tglobaladdr:$off))),
- (LOAD32_U_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD32_U_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (extloadi8 (WebAssemblywrapper texternalsym:$off))),
- (LOAD8_U_I32 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD8_U_I32 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i32 (extloadi16 (WebAssemblywrapper texternalsym:$off))),
- (LOAD16_U_I32 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD16_U_I32 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (extloadi8 (WebAssemblywrapper texternalsym:$off))),
- (LOAD8_U_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD8_U_I64 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (extloadi16 (WebAssemblywrapper texternalsym:$off))),
- (LOAD16_U_I64 texternalsym:$off, (CONST_I32 0))>;
+ (LOAD16_U_I64 texternalsym:$off, (CONST_I32 0), 0)>;
def : Pat<(i64 (extloadi32 (WebAssemblywrapper texternalsym:$off))),
- (LOAD32_U_I64 tglobaladdr:$off, (CONST_I32 0))>;
+ (LOAD32_U_I64 tglobaladdr:$off, (CONST_I32 0), 0)>;
let Defs = [ARGUMENTS] in {
@@ -374,205 +453,232 @@ let Defs = [ARGUMENTS] in {
// instruction definition patterns that don't reference all of the output
// operands.
// Note: WebAssembly inverts SelectionDAG's usual operand order.
-def STORE_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr, I32:$val), [],
- "i32.store\t$dst, ${off}(${addr}), $val">;
-def STORE_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr, I64:$val), [],
- "i64.store\t$dst, ${off}(${addr}), $val">;
-def STORE_F32 : I<(outs F32:$dst), (ins i32imm:$off, I32:$addr, F32:$val), [],
- "f32.store\t$dst, ${off}(${addr}), $val">;
-def STORE_F64 : I<(outs F64:$dst), (ins i32imm:$off, I32:$addr, F64:$val), [],
- "f64.store\t$dst, ${off}(${addr}), $val">;
+def STORE_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, I32:$val), [],
+ "i32.store\t$dst, ${off}(${addr})${p2align}, $val">;
+def STORE_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, I64:$val), [],
+ "i64.store\t$dst, ${off}(${addr})${p2align}, $val">;
+def STORE_F32 : I<(outs F32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, F32:$val), [],
+ "f32.store\t$dst, ${off}(${addr})${p2align}, $val">;
+def STORE_F64 : I<(outs F64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, F64:$val), [],
+ "f64.store\t$dst, ${off}(${addr})${p2align}, $val">;
} // Defs = [ARGUMENTS]
// Select stores with no constant offset.
-def : Pat<(store I32:$val, I32:$addr), (STORE_I32 0, I32:$addr, I32:$val)>;
-def : Pat<(store I64:$val, I32:$addr), (STORE_I64 0, I32:$addr, I64:$val)>;
-def : Pat<(store F32:$val, I32:$addr), (STORE_F32 0, I32:$addr, F32:$val)>;
-def : Pat<(store F64:$val, I32:$addr), (STORE_F64 0, I32:$addr, F64:$val)>;
+def : Pat<(store I32:$val, I32:$addr), (STORE_I32 0, I32:$addr, 0, I32:$val)>;
+def : Pat<(store I64:$val, I32:$addr), (STORE_I64 0, I32:$addr, 0, I64:$val)>;
+def : Pat<(store F32:$val, I32:$addr), (STORE_F32 0, I32:$addr, 0, F32:$val)>;
+def : Pat<(store F64:$val, I32:$addr), (STORE_F64 0, I32:$addr, 0, F64:$val)>;
// Select stores with a constant offset.
def : Pat<(store I32:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE_I32 imm:$off, I32:$addr, I32:$val)>;
+ (STORE_I32 imm:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(store I64:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE_I64 imm:$off, I32:$addr, I64:$val)>;
+ (STORE_I64 imm:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(store F32:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE_F32 imm:$off, I32:$addr, F32:$val)>;
+ (STORE_F32 imm:$off, I32:$addr, 0, F32:$val)>;
def : Pat<(store F64:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE_F64 imm:$off, I32:$addr, F64:$val)>;
+ (STORE_F64 imm:$off, I32:$addr, 0, F64:$val)>;
+def : Pat<(store I32:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE_I32 imm:$off, I32:$addr, 0, I32:$val)>;
+def : Pat<(store I64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE_I64 imm:$off, I32:$addr, 0, I64:$val)>;
+def : Pat<(store F32:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE_F32 imm:$off, I32:$addr, 0, F32:$val)>;
+def : Pat<(store F64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE_F64 imm:$off, I32:$addr, 0, F64:$val)>;
def : Pat<(store I32:$val, (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE_I32 tglobaladdr:$off, I32:$addr, I32:$val)>;
+ (STORE_I32 tglobaladdr:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(store I64:$val, (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE_I64 tglobaladdr:$off, I32:$addr, I64:$val)>;
+ (STORE_I64 tglobaladdr:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(store F32:$val, (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE_F32 tglobaladdr:$off, I32:$addr, F32:$val)>;
+ (STORE_F32 tglobaladdr:$off, I32:$addr, 0, F32:$val)>;
def : Pat<(store F64:$val, (regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE_F64 tglobaladdr:$off, I32:$addr, F64:$val)>;
+ (STORE_F64 tglobaladdr:$off, I32:$addr, 0, F64:$val)>;
def : Pat<(store I32:$val, (add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE_I32 texternalsym:$off, I32:$addr, I32:$val)>;
+ (STORE_I32 texternalsym:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(store I64:$val, (add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE_I64 texternalsym:$off, I32:$addr, I64:$val)>;
+ (STORE_I64 texternalsym:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(store F32:$val, (add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE_F32 texternalsym:$off, I32:$addr, F32:$val)>;
+ (STORE_F32 texternalsym:$off, I32:$addr, 0, F32:$val)>;
def : Pat<(store F64:$val, (add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE_F64 texternalsym:$off, I32:$addr, F64:$val)>;
+ (STORE_F64 texternalsym:$off, I32:$addr, 0, F64:$val)>;
// Select stores with just a constant offset.
def : Pat<(store I32:$val, imm:$off),
- (STORE_I32 imm:$off, (CONST_I32 0), I32:$val)>;
+ (STORE_I32 imm:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(store I64:$val, imm:$off),
- (STORE_I64 imm:$off, (CONST_I32 0), I64:$val)>;
+ (STORE_I64 imm:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(store F32:$val, imm:$off),
- (STORE_F32 imm:$off, (CONST_I32 0), F32:$val)>;
+ (STORE_F32 imm:$off, (CONST_I32 0), 0, F32:$val)>;
def : Pat<(store F64:$val, imm:$off),
- (STORE_F64 imm:$off, (CONST_I32 0), F64:$val)>;
+ (STORE_F64 imm:$off, (CONST_I32 0), 0, F64:$val)>;
def : Pat<(store I32:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE_I32 tglobaladdr:$off, (CONST_I32 0), I32:$val)>;
+ (STORE_I32 tglobaladdr:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(store I64:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE_I64 tglobaladdr:$off, (CONST_I32 0), I64:$val)>;
+ (STORE_I64 tglobaladdr:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(store F32:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE_F32 tglobaladdr:$off, (CONST_I32 0), F32:$val)>;
+ (STORE_F32 tglobaladdr:$off, (CONST_I32 0), 0, F32:$val)>;
def : Pat<(store F64:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE_F64 tglobaladdr:$off, (CONST_I32 0), F64:$val)>;
+ (STORE_F64 tglobaladdr:$off, (CONST_I32 0), 0, F64:$val)>;
def : Pat<(store I32:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE_I32 texternalsym:$off, (CONST_I32 0), I32:$val)>;
+ (STORE_I32 texternalsym:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(store I64:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE_I64 texternalsym:$off, (CONST_I32 0), I64:$val)>;
+ (STORE_I64 texternalsym:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(store F32:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE_F32 texternalsym:$off, (CONST_I32 0), F32:$val)>;
+ (STORE_F32 texternalsym:$off, (CONST_I32 0), 0, F32:$val)>;
def : Pat<(store F64:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE_F64 texternalsym:$off, (CONST_I32 0), F64:$val)>;
+ (STORE_F64 texternalsym:$off, (CONST_I32 0), 0, F64:$val)>;
let Defs = [ARGUMENTS] in {
// Truncating store.
-def STORE8_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr, I32:$val), [],
- "i32.store8\t$dst, ${off}(${addr}), $val">;
-def STORE16_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr, I32:$val), [],
- "i32.store16\t$dst, ${off}(${addr}), $val">;
-def STORE8_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr, I64:$val), [],
- "i64.store8\t$dst, ${off}(${addr}), $val">;
-def STORE16_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr, I64:$val), [],
- "i64.store16\t$dst, ${off}(${addr}), $val">;
-def STORE32_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr, I64:$val), [],
- "i64.store32\t$dst, ${off}(${addr}), $val">;
+def STORE8_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, I32:$val), [],
+ "i32.store8\t$dst, ${off}(${addr})${p2align}, $val">;
+def STORE16_I32 : I<(outs I32:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, I32:$val), [],
+ "i32.store16\t$dst, ${off}(${addr})${p2align}, $val">;
+def STORE8_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, I64:$val), [],
+ "i64.store8\t$dst, ${off}(${addr})${p2align}, $val">;
+def STORE16_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, I64:$val), [],
+ "i64.store16\t$dst, ${off}(${addr})${p2align}, $val">;
+def STORE32_I64 : I<(outs I64:$dst), (ins i32imm:$off, I32:$addr,
+ P2Align:$p2align, I64:$val), [],
+ "i64.store32\t$dst, ${off}(${addr})${p2align}, $val">;
} // Defs = [ARGUMENTS]
// Select truncating stores with no constant offset.
def : Pat<(truncstorei8 I32:$val, I32:$addr),
- (STORE8_I32 0, I32:$addr, I32:$val)>;
+ (STORE8_I32 0, I32:$addr, 0, I32:$val)>;
def : Pat<(truncstorei16 I32:$val, I32:$addr),
- (STORE16_I32 0, I32:$addr, I32:$val)>;
+ (STORE16_I32 0, I32:$addr, 0, I32:$val)>;
def : Pat<(truncstorei8 I64:$val, I32:$addr),
- (STORE8_I64 0, I32:$addr, I64:$val)>;
+ (STORE8_I64 0, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei16 I64:$val, I32:$addr),
- (STORE16_I64 0, I32:$addr, I64:$val)>;
+ (STORE16_I64 0, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei32 I64:$val, I32:$addr),
- (STORE32_I64 0, I32:$addr, I64:$val)>;
+ (STORE32_I64 0, I32:$addr, 0, I64:$val)>;
// Select truncating stores with a constant offset.
def : Pat<(truncstorei8 I32:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE8_I32 imm:$off, I32:$addr, I32:$val)>;
+ (STORE8_I32 imm:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(truncstorei16 I32:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE16_I32 imm:$off, I32:$addr, I32:$val)>;
+ (STORE16_I32 imm:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(truncstorei8 I64:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE8_I64 imm:$off, I32:$addr, I64:$val)>;
+ (STORE8_I64 imm:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei16 I64:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE16_I64 imm:$off, I32:$addr, I64:$val)>;
+ (STORE16_I64 imm:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei32 I64:$val, (regPlusImm I32:$addr, imm:$off)),
- (STORE32_I64 imm:$off, I32:$addr, I64:$val)>;
+ (STORE32_I64 imm:$off, I32:$addr, 0, I64:$val)>;
+def : Pat<(truncstorei8 I32:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE8_I32 imm:$off, I32:$addr, 0, I32:$val)>;
+def : Pat<(truncstorei16 I32:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE16_I32 imm:$off, I32:$addr, 0, I32:$val)>;
+def : Pat<(truncstorei8 I64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE8_I64 imm:$off, I32:$addr, 0, I64:$val)>;
+def : Pat<(truncstorei16 I64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE16_I64 imm:$off, I32:$addr, 0, I64:$val)>;
+def : Pat<(truncstorei32 I64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE32_I64 imm:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei8 I32:$val,
(regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE8_I32 tglobaladdr:$off, I32:$addr, I32:$val)>;
+ (STORE8_I32 tglobaladdr:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(truncstorei16 I32:$val,
(regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE16_I32 tglobaladdr:$off, I32:$addr, I32:$val)>;
+ (STORE16_I32 tglobaladdr:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(truncstorei8 I64:$val,
(regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE8_I64 tglobaladdr:$off, I32:$addr, I64:$val)>;
+ (STORE8_I64 tglobaladdr:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei16 I64:$val,
(regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE16_I64 tglobaladdr:$off, I32:$addr, I64:$val)>;
+ (STORE16_I64 tglobaladdr:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei32 I64:$val,
(regPlusGA I32:$addr,
(WebAssemblywrapper tglobaladdr:$off))),
- (STORE32_I64 tglobaladdr:$off, I32:$addr, I64:$val)>;
+ (STORE32_I64 tglobaladdr:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei8 I32:$val, (add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE8_I32 texternalsym:$off, I32:$addr, I32:$val)>;
+ (STORE8_I32 texternalsym:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(truncstorei16 I32:$val,
(add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE16_I32 texternalsym:$off, I32:$addr, I32:$val)>;
+ (STORE16_I32 texternalsym:$off, I32:$addr, 0, I32:$val)>;
def : Pat<(truncstorei8 I64:$val,
(add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE8_I64 texternalsym:$off, I32:$addr, I64:$val)>;
+ (STORE8_I64 texternalsym:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei16 I64:$val,
(add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE16_I64 texternalsym:$off, I32:$addr, I64:$val)>;
+ (STORE16_I64 texternalsym:$off, I32:$addr, 0, I64:$val)>;
def : Pat<(truncstorei32 I64:$val,
(add I32:$addr,
(WebAssemblywrapper texternalsym:$off))),
- (STORE32_I64 texternalsym:$off, I32:$addr, I64:$val)>;
+ (STORE32_I64 texternalsym:$off, I32:$addr, 0, I64:$val)>;
// Select truncating stores with just a constant offset.
def : Pat<(truncstorei8 I32:$val, imm:$off),
- (STORE8_I32 imm:$off, (CONST_I32 0), I32:$val)>;
+ (STORE8_I32 imm:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(truncstorei16 I32:$val, imm:$off),
- (STORE16_I32 imm:$off, (CONST_I32 0), I32:$val)>;
+ (STORE16_I32 imm:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(truncstorei8 I64:$val, imm:$off),
- (STORE8_I64 imm:$off, (CONST_I32 0), I64:$val)>;
+ (STORE8_I64 imm:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(truncstorei16 I64:$val, imm:$off),
- (STORE16_I64 imm:$off, (CONST_I32 0), I64:$val)>;
+ (STORE16_I64 imm:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(truncstorei32 I64:$val, imm:$off),
- (STORE32_I64 imm:$off, (CONST_I32 0), I64:$val)>;
+ (STORE32_I64 imm:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(truncstorei8 I32:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE8_I32 tglobaladdr:$off, (CONST_I32 0), I32:$val)>;
+ (STORE8_I32 tglobaladdr:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(truncstorei16 I32:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE16_I32 tglobaladdr:$off, (CONST_I32 0), I32:$val)>;
+ (STORE16_I32 tglobaladdr:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(truncstorei8 I64:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE8_I64 tglobaladdr:$off, (CONST_I32 0), I64:$val)>;
+ (STORE8_I64 tglobaladdr:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(truncstorei16 I64:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE16_I64 tglobaladdr:$off, (CONST_I32 0), I64:$val)>;
+ (STORE16_I64 tglobaladdr:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(truncstorei32 I64:$val, (WebAssemblywrapper tglobaladdr:$off)),
- (STORE32_I64 tglobaladdr:$off, (CONST_I32 0), I64:$val)>;
+ (STORE32_I64 tglobaladdr:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(truncstorei8 I32:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE8_I32 texternalsym:$off, (CONST_I32 0), I32:$val)>;
+ (STORE8_I32 texternalsym:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(truncstorei16 I32:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE16_I32 texternalsym:$off, (CONST_I32 0), I32:$val)>;
+ (STORE16_I32 texternalsym:$off, (CONST_I32 0), 0, I32:$val)>;
def : Pat<(truncstorei8 I64:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE8_I64 texternalsym:$off, (CONST_I32 0), I64:$val)>;
+ (STORE8_I64 texternalsym:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(truncstorei16 I64:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE16_I64 texternalsym:$off, (CONST_I32 0), I64:$val)>;
+ (STORE16_I64 texternalsym:$off, (CONST_I32 0), 0, I64:$val)>;
def : Pat<(truncstorei32 I64:$val, (WebAssemblywrapper texternalsym:$off)),
- (STORE32_I64 texternalsym:$off, (CONST_I32 0), I64:$val)>;
+ (STORE32_I64 texternalsym:$off, (CONST_I32 0), 0, I64:$val)>;
let Defs = [ARGUMENTS] in {
-// Memory size.
-def MEMORY_SIZE_I32 : I<(outs I32:$dst), (ins),
- [(set I32:$dst, (int_wasm_memory_size))],
- "memory_size\t$dst">,
- Requires<[HasAddr32]>;
-def MEMORY_SIZE_I64 : I<(outs I64:$dst), (ins),
- [(set I64:$dst, (int_wasm_memory_size))],
- "memory_size\t$dst">,
- Requires<[HasAddr64]>;
+// Current memory size.
+def CURRENT_MEMORY_I32 : I<(outs I32:$dst), (ins),
+ [(set I32:$dst, (int_wasm_current_memory))],
+ "current_memory\t$dst">,
+ Requires<[HasAddr32]>;
+def CURRENT_MEMORY_I64 : I<(outs I64:$dst), (ins),
+ [(set I64:$dst, (int_wasm_current_memory))],
+ "current_memory\t$dst">,
+ Requires<[HasAddr64]>;
// Grow memory.
def GROW_MEMORY_I32 : I<(outs), (ins I32:$delta),
diff --git a/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
index b009a4e054cc..af53f3db967b 100644
--- a/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
@@ -16,9 +16,9 @@
//===----------------------------------------------------------------------===//
#include "WebAssembly.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "WebAssemblyMachineFunctionInfo.h"
#include "WebAssemblySubtarget.h"
-#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/Debug.h"
@@ -61,12 +61,12 @@ bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
auto &MRI = MF.getRegInfo();
for (auto &MBB : MF) {
- for (auto MII = MBB.begin(); MII != MBB.end(); ) {
+ for (auto MII = MBB.begin(); MII != MBB.end();) {
MachineInstr *MI = &*MII++;
if (MI->getOpcode() != WebAssembly::BR_UNLESS)
continue;
- unsigned Cond = MI->getOperand(0).getReg();
+ unsigned Cond = MI->getOperand(1).getReg();
bool Inverted = false;
// Attempt to invert the condition in place.
@@ -74,7 +74,7 @@ bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
assert(MRI.hasOneDef(Cond));
MachineInstr *Def = MRI.getVRegDef(Cond);
switch (Def->getOpcode()) {
- using namespace WebAssembly;
+ using namespace WebAssembly;
case EQ_I32: Def->setDesc(TII.get(NE_I32)); Inverted = true; break;
case NE_I32: Def->setDesc(TII.get(EQ_I32)); Inverted = true; break;
case GT_S_I32: Def->setDesc(TII.get(LE_S_I32)); Inverted = true; break;
@@ -106,15 +106,10 @@ bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
// If we weren't able to invert the condition in place. Insert an
// expression to invert it.
if (!Inverted) {
- unsigned ZeroReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
- MFI.stackifyVReg(ZeroReg);
- BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::CONST_I32), ZeroReg)
- .addImm(0);
unsigned Tmp = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
MFI.stackifyVReg(Tmp);
- BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::EQ_I32), Tmp)
- .addReg(Cond)
- .addReg(ZeroReg);
+ BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::EQZ_I32), Tmp)
+ .addReg(Cond);
Cond = Tmp;
Inverted = true;
}
@@ -123,8 +118,8 @@ bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
// delete the br_unless.
assert(Inverted);
BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::BR_IF))
- .addReg(Cond)
- .addOperand(MI->getOperand(1));
+ .addOperand(MI->getOperand(0))
+ .addReg(Cond);
MBB.erase(MI);
}
}
diff --git a/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h b/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
index 6a60280900a9..89f607d84b71 100644
--- a/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
+++ b/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
@@ -39,18 +39,24 @@ class WebAssemblyFunctionInfo final : public MachineFunctionInfo {
/// - defined and used in LIFO order with other stack registers
BitVector VRegStackified;
- // One entry for each possible target reg. we expect it to be small.
- std::vector<unsigned> PhysRegs;
+ // A virtual register holding the pointer to the vararg buffer for vararg
+ // functions. It is created and set in TLI::LowerFormalArguments and read by
+ // TLI::LowerVASTART
+ unsigned VarargVreg = -1U;
-public:
- explicit WebAssemblyFunctionInfo(MachineFunction &MF) : MF(MF) {
- PhysRegs.resize(WebAssembly::NUM_TARGET_REGS, -1U);
- }
+ public:
+ explicit WebAssemblyFunctionInfo(MachineFunction &MF) : MF(MF) {}
~WebAssemblyFunctionInfo() override;
void addParam(MVT VT) { Params.push_back(VT); }
const std::vector<MVT> &getParams() const { return Params; }
+ unsigned getVarargBufferVreg() const {
+ assert(VarargVreg != -1U && "Vararg vreg hasn't been set");
+ return VarargVreg;
+ }
+ void setVarargBufferVreg(unsigned Reg) { VarargVreg = Reg; }
+
static const unsigned UnusedReg = -1u;
void stackifyVReg(unsigned VReg) {
@@ -71,25 +77,15 @@ public:
WARegs[TargetRegisterInfo::virtReg2Index(VReg)] = WAReg;
}
unsigned getWAReg(unsigned Reg) const {
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- assert(TargetRegisterInfo::virtReg2Index(Reg) < WARegs.size());
- return WARegs[TargetRegisterInfo::virtReg2Index(Reg)];
- }
- return PhysRegs[Reg];
- }
- // If new virtual registers are created after initWARegs has been called,
- // this function can be used to add WebAssembly register mappings for them.
- void addWAReg(unsigned VReg, unsigned WAReg) {
- assert(VReg = WARegs.size());
- WARegs.push_back(WAReg);
+ assert(TargetRegisterInfo::virtReg2Index(Reg) < WARegs.size());
+ return WARegs[TargetRegisterInfo::virtReg2Index(Reg)];
}
- void addPReg(unsigned PReg, unsigned WAReg) {
- assert(PReg < WebAssembly::NUM_TARGET_REGS);
- assert(WAReg < -1U);
- PhysRegs[PReg] = WAReg;
+ // For a given stackified WAReg, return the id number to print with push/pop.
+ static unsigned getWARegStackId(unsigned Reg) {
+ assert(Reg & INT32_MIN);
+ return Reg & INT32_MAX;
}
- const std::vector<unsigned> &getPhysRegs() const { return PhysRegs; }
};
} // end namespace llvm
diff --git a/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
new file mode 100644
index 000000000000..473de7ddae7e
--- /dev/null
+++ b/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
@@ -0,0 +1,105 @@
+//===--- WebAssemblyOptimizeLiveIntervals.cpp - LiveInterval processing ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Optimize LiveIntervals for use in a post-RA context.
+//
+/// LiveIntervals normally runs before register allocation when the code is
+/// only recently lowered out of SSA form, so it's uncommon for registers to
+/// have multiple defs, and then they do, the defs are usually closely related.
+/// Later, after coalescing, tail duplication, and other optimizations, it's
+/// more common to see registers with multiple unrelated defs. This pass
+/// updates LiveIntervalAnalysis to distribute the value numbers across separate
+/// LiveIntervals.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-optimize-live-intervals"
+
+namespace {
+class WebAssemblyOptimizeLiveIntervals final : public MachineFunctionPass {
+ const char *getPassName() const override {
+ return "WebAssembly Optimize Live Intervals";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addPreservedID(LiveVariablesID);
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyOptimizeLiveIntervals() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyOptimizeLiveIntervals::ID = 0;
+FunctionPass *llvm::createWebAssemblyOptimizeLiveIntervals() {
+ return new WebAssemblyOptimizeLiveIntervals();
+}
+
+bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Optimize LiveIntervals **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ LiveIntervals &LIS = getAnalysis<LiveIntervals>();
+
+ // We don't preserve SSA form.
+ MRI.leaveSSA();
+
+ assert(MRI.tracksLiveness() &&
+ "OptimizeLiveIntervals expects liveness");
+
+ // Split multiple-VN LiveIntervals into multiple LiveIntervals.
+ SmallVector<LiveInterval*, 4> SplitLIs;
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (MRI.reg_nodbg_empty(Reg))
+ continue;
+
+ LIS.splitSeparateComponents(LIS.getInterval(Reg), SplitLIs);
+ SplitLIs.clear();
+ }
+
+ // In PrepareForLiveIntervals, we conservatively inserted IMPLICIT_DEF
+ // instructions to satisfy LiveIntervals' requirement that all uses be
+ // dominated by defs. Now that LiveIntervals has computed which of these
+ // defs are actually needed and which are dead, remove the dead ones.
+ for (auto MII = MF.begin()->begin(), MIE = MF.begin()->end(); MII != MIE; ) {
+ MachineInstr *MI = &*MII++;
+ if (MI->isImplicitDef() && MI->getOperand(0).isDead()) {
+ LiveInterval &LI = LIS.getInterval(MI->getOperand(0).getReg());
+ LIS.removeVRegDefAt(LI, LIS.getInstructionIndex(*MI).getRegSlot());
+ LIS.RemoveMachineInstrFromMaps(*MI);
+ MI->eraseFromParent();
+ }
+ }
+
+ return false;
+}
diff --git a/lib/Target/WebAssembly/WebAssemblyPEI.cpp b/lib/Target/WebAssembly/WebAssemblyPEI.cpp
deleted file mode 100644
index d570d4266110..000000000000
--- a/lib/Target/WebAssembly/WebAssemblyPEI.cpp
+++ /dev/null
@@ -1,1066 +0,0 @@
-//===-- WebAssemblyPEI.cpp - Insert Prolog/Epilog code in function --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass is responsible for finalizing the functions frame layout, saving
-// callee saved registers, and for emitting prolog & epilog code for the
-// function.
-//
-// This pass must be run after register allocation. After this pass is
-// executed, it is illegal to construct MO_FrameIndex operands.
-//
-// This is a copy of lib/CodeGen/PrologEpilogInserter.cpp except that it does
-// not assert that all virtual registers are gone (because WebAssembly currently
-// uses virtual rather than physical registers), and only runs
-// MRI.clearVirtRegs() if scavenging happened (which it never does). It also
-// uses a different class name so it can be registered via INITIALIZE_PASS.
-// It is otherwise unmodified, so any changes to the target-independent PEI
-// can be easily applied.
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/IndexedMap.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/CodeGen/StackProtector.h"
-#include "llvm/CodeGen/WinEHFuncInfo.h"
-#include "llvm/IR/DiagnosticInfo.h"
-#include "llvm/IR/InlineAsm.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include <climits>
-
-using namespace llvm;
-
-#define DEBUG_TYPE "pei"
-namespace llvm {
-void initializeWasmPEIPass(PassRegistry&);
-}
-namespace {
-class WasmPEI : public MachineFunctionPass {
-public:
- static char ID;
- WasmPEI() : MachineFunctionPass(ID) {
- initializeWasmPEIPass(*PassRegistry::getPassRegistry());
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const override;
-
- /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
- /// frame indexes with appropriate references.
- ///
- bool runOnMachineFunction(MachineFunction &Fn) override;
-
-private:
- RegScavenger *RS;
-
- // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved
- // stack frame indexes.
- unsigned MinCSFrameIndex, MaxCSFrameIndex;
-
- // Save and Restore blocks of the current function. Typically there is a
- // single save block, unless Windows EH funclets are involved.
- SmallVector<MachineBasicBlock *, 1> SaveBlocks;
- SmallVector<MachineBasicBlock *, 4> RestoreBlocks;
-
- // Flag to control whether to use the register scavenger to resolve
- // frame index materialization registers. Set according to
- // TRI->requiresFrameIndexScavenging() for the current function.
- bool FrameIndexVirtualScavenging;
-
- void calculateSets(MachineFunction &Fn);
- void calculateCallsInformation(MachineFunction &Fn);
- void assignCalleeSavedSpillSlots(MachineFunction &Fn,
- const BitVector &SavedRegs);
- void insertCSRSpillsAndRestores(MachineFunction &Fn);
- void calculateFrameObjectOffsets(MachineFunction &Fn);
- void replaceFrameIndices(MachineFunction &Fn);
- void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
- int &SPAdj);
- void scavengeFrameVirtualRegs(MachineFunction &Fn);
- void insertPrologEpilogCode(MachineFunction &Fn);
-};
-} // namespace
-
-char WasmPEI::ID = 0;
-
-namespace llvm {
-FunctionPass *createWebAssemblyPEI() {
- return new WasmPEI();
-}
-}
-
-static cl::opt<unsigned>
-WarnStackSize("wasm-warn-stack-size", cl::Hidden, cl::init((unsigned)-1),
- cl::desc("Warn for stack size bigger than the given"
- " number"));
-
-INITIALIZE_PASS_BEGIN(WasmPEI, "wasmprologepilog",
- "Wasm Prologue/Epilogue Insertion", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_DEPENDENCY(StackProtector)
-INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_END(WasmPEI, "wasmprologepilog",
- "Wasm Prologue/Epilogue Insertion & Frame Finalization",
- false, false)
-
-STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
-STATISTIC(NumBytesStackSpace,
- "Number of bytes used for stack in all functions");
-
-void WasmPEI::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addPreserved<MachineLoopInfo>();
- AU.addPreserved<MachineDominatorTree>();
- AU.addRequired<StackProtector>();
- AU.addRequired<TargetPassConfig>();
- MachineFunctionPass::getAnalysisUsage(AU);
-}
-
-/// Compute the set of return blocks
-void WasmPEI::calculateSets(MachineFunction &Fn) {
- const MachineFrameInfo *MFI = Fn.getFrameInfo();
-
- // Even when we do not change any CSR, we still want to insert the
- // prologue and epilogue of the function.
- // So set the save points for those.
-
- // Use the points found by shrink-wrapping, if any.
- if (MFI->getSavePoint()) {
- SaveBlocks.push_back(MFI->getSavePoint());
- assert(MFI->getRestorePoint() && "Both restore and save must be set");
- MachineBasicBlock *RestoreBlock = MFI->getRestorePoint();
- // If RestoreBlock does not have any successor and is not a return block
- // then the end point is unreachable and we do not need to insert any
- // epilogue.
- if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock())
- RestoreBlocks.push_back(RestoreBlock);
- return;
- }
-
- // Save refs to entry and return blocks.
- SaveBlocks.push_back(&Fn.front());
- for (MachineBasicBlock &MBB : Fn) {
- if (MBB.isEHFuncletEntry())
- SaveBlocks.push_back(&MBB);
- if (MBB.isReturnBlock())
- RestoreBlocks.push_back(&MBB);
- }
-}
-
-/// StackObjSet - A set of stack object indexes
-typedef SmallSetVector<int, 8> StackObjSet;
-
-/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
-/// frame indexes with appropriate references.
-///
-bool WasmPEI::runOnMachineFunction(MachineFunction &Fn) {
- const Function* F = Fn.getFunction();
- const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
- const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
-
- // LOCALMOD: assert removed from target-independent PEI
- //assert(!Fn.getRegInfo().getNumVirtRegs() && "Regalloc must assign all vregs");
-
- RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : nullptr;
- FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn);
-
- // Calculate the MaxCallFrameSize and AdjustsStack variables for the
- // function's frame information. Also eliminates call frame pseudo
- // instructions.
- calculateCallsInformation(Fn);
-
- // Determine which of the registers in the callee save list should be saved.
- BitVector SavedRegs;
- TFI->determineCalleeSaves(Fn, SavedRegs, RS);
-
- // Insert spill code for any callee saved registers that are modified.
- assignCalleeSavedSpillSlots(Fn, SavedRegs);
-
- // Determine placement of CSR spill/restore code:
- // place all spills in the entry block, all restores in return blocks.
- calculateSets(Fn);
-
- // Add the code to save and restore the callee saved registers.
- if (!F->hasFnAttribute(Attribute::Naked))
- insertCSRSpillsAndRestores(Fn);
-
- // Allow the target machine to make final modifications to the function
- // before the frame layout is finalized.
- TFI->processFunctionBeforeFrameFinalized(Fn, RS);
-
- // Calculate actual frame offsets for all abstract stack objects...
- calculateFrameObjectOffsets(Fn);
-
- // Add prolog and epilog code to the function. This function is required
- // to align the stack frame as necessary for any stack variables or
- // called functions. Because of this, calculateCalleeSavedRegisters()
- // must be called before this function in order to set the AdjustsStack
- // and MaxCallFrameSize variables.
- if (!F->hasFnAttribute(Attribute::Naked))
- insertPrologEpilogCode(Fn);
-
- // Replace all MO_FrameIndex operands with physical register references
- // and actual offsets.
- //
- replaceFrameIndices(Fn);
-
- // If register scavenging is needed, as we've enabled doing it as a
- // post-pass, scavenge the virtual registers that frame index elimination
- // inserted.
- if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging) {
- scavengeFrameVirtualRegs(Fn);
- // Clear any vregs created by virtual scavenging.
- // LOCALMOD: made this call conditional with scavengeFrameVirtualregs()
- Fn.getRegInfo().clearVirtRegs();
- }
-
- // Warn on stack size when we exceeds the given limit.
- MachineFrameInfo *MFI = Fn.getFrameInfo();
- uint64_t StackSize = MFI->getStackSize();
- if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) {
- DiagnosticInfoStackSize DiagStackSize(*F, StackSize);
- F->getContext().diagnose(DiagStackSize);
- }
-
- delete RS;
- SaveBlocks.clear();
- RestoreBlocks.clear();
- return true;
-}
-
-/// calculateCallsInformation - Calculate the MaxCallFrameSize and AdjustsStack
-/// variables for the function's frame information and eliminate call frame
-/// pseudo instructions.
-void WasmPEI::calculateCallsInformation(MachineFunction &Fn) {
- const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
- const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
- MachineFrameInfo *MFI = Fn.getFrameInfo();
-
- unsigned MaxCallFrameSize = 0;
- bool AdjustsStack = MFI->adjustsStack();
-
- // Get the function call frame set-up and tear-down instruction opcode
- unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
- unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
-
- // Early exit for targets which have no call frame setup/destroy pseudo
- // instructions.
- if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
- return;
-
- std::vector<MachineBasicBlock::iterator> FrameSDOps;
- for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB)
- for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
- if (I->getOpcode() == FrameSetupOpcode ||
- I->getOpcode() == FrameDestroyOpcode) {
- assert(I->getNumOperands() >= 1 && "Call Frame Setup/Destroy Pseudo"
- " instructions should have a single immediate argument!");
- unsigned Size = I->getOperand(0).getImm();
- if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
- AdjustsStack = true;
- FrameSDOps.push_back(I);
- } else if (I->isInlineAsm()) {
- // Some inline asm's need a stack frame, as indicated by operand 1.
- unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
- if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
- AdjustsStack = true;
- }
-
- MFI->setAdjustsStack(AdjustsStack);
- MFI->setMaxCallFrameSize(MaxCallFrameSize);
-
- for (std::vector<MachineBasicBlock::iterator>::iterator
- i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) {
- MachineBasicBlock::iterator I = *i;
-
- // If call frames are not being included as part of the stack frame, and
- // the target doesn't indicate otherwise, remove the call frame pseudos
- // here. The sub/add sp instruction pairs are still inserted, but we don't
- // need to track the SP adjustment for frame index elimination.
- if (TFI->canSimplifyCallFramePseudos(Fn))
- TFI->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I);
- }
-}
-
-void WasmPEI::assignCalleeSavedSpillSlots(MachineFunction &F,
- const BitVector &SavedRegs) {
- // These are used to keep track the callee-save area. Initialize them.
- MinCSFrameIndex = INT_MAX;
- MaxCSFrameIndex = 0;
-
- if (SavedRegs.empty())
- return;
-
- const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo();
- const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&F);
-
- std::vector<CalleeSavedInfo> CSI;
- for (unsigned i = 0; CSRegs[i]; ++i) {
- unsigned Reg = CSRegs[i];
- if (SavedRegs.test(Reg))
- CSI.push_back(CalleeSavedInfo(Reg));
- }
-
- const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
- MachineFrameInfo *MFI = F.getFrameInfo();
- if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) {
- // If target doesn't implement this, use generic code.
-
- if (CSI.empty())
- return; // Early exit if no callee saved registers are modified!
-
- unsigned NumFixedSpillSlots;
- const TargetFrameLowering::SpillSlot *FixedSpillSlots =
- TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots);
-
- // Now that we know which registers need to be saved and restored, allocate
- // stack slots for them.
- for (std::vector<CalleeSavedInfo>::iterator I = CSI.begin(), E = CSI.end();
- I != E; ++I) {
- unsigned Reg = I->getReg();
- const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
-
- int FrameIdx;
- if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) {
- I->setFrameIdx(FrameIdx);
- continue;
- }
-
- // Check to see if this physreg must be spilled to a particular stack slot
- // on this target.
- const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots;
- while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots &&
- FixedSlot->Reg != Reg)
- ++FixedSlot;
-
- if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
- // Nope, just spill it anywhere convenient.
- unsigned Align = RC->getAlignment();
- unsigned StackAlign = TFI->getStackAlignment();
-
- // We may not be able to satisfy the desired alignment specification of
- // the TargetRegisterClass if the stack alignment is smaller. Use the
- // min.
- Align = std::min(Align, StackAlign);
- FrameIdx = MFI->CreateStackObject(RC->getSize(), Align, true);
- if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
- if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
- } else {
- // Spill it to the stack where we must.
- FrameIdx =
- MFI->CreateFixedSpillStackObject(RC->getSize(), FixedSlot->Offset);
- }
-
- I->setFrameIdx(FrameIdx);
- }
- }
-
- MFI->setCalleeSavedInfo(CSI);
-}
-
-/// Helper function to update the liveness information for the callee-saved
-/// registers.
-static void updateLiveness(MachineFunction &MF) {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- // Visited will contain all the basic blocks that are in the region
- // where the callee saved registers are alive:
- // - Anything that is not Save or Restore -> LiveThrough.
- // - Save -> LiveIn.
- // - Restore -> LiveOut.
- // The live-out is not attached to the block, so no need to keep
- // Restore in this set.
- SmallPtrSet<MachineBasicBlock *, 8> Visited;
- SmallVector<MachineBasicBlock *, 8> WorkList;
- MachineBasicBlock *Entry = &MF.front();
- MachineBasicBlock *Save = MFI->getSavePoint();
-
- if (!Save)
- Save = Entry;
-
- if (Entry != Save) {
- WorkList.push_back(Entry);
- Visited.insert(Entry);
- }
- Visited.insert(Save);
-
- MachineBasicBlock *Restore = MFI->getRestorePoint();
- if (Restore)
- // By construction Restore cannot be visited, otherwise it
- // means there exists a path to Restore that does not go
- // through Save.
- WorkList.push_back(Restore);
-
- while (!WorkList.empty()) {
- const MachineBasicBlock *CurBB = WorkList.pop_back_val();
- // By construction, the region that is after the save point is
- // dominated by the Save and post-dominated by the Restore.
- if (CurBB == Save && Save != Restore)
- continue;
- // Enqueue all the successors not already visited.
- // Those are by construction either before Save or after Restore.
- for (MachineBasicBlock *SuccBB : CurBB->successors())
- if (Visited.insert(SuccBB).second)
- WorkList.push_back(SuccBB);
- }
-
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
-
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- for (MachineBasicBlock *MBB : Visited) {
- MCPhysReg Reg = CSI[i].getReg();
- // Add the callee-saved register as live-in.
- // It's killed at the spill.
- if (!MBB->isLiveIn(Reg))
- MBB->addLiveIn(Reg);
- }
- }
-}
-
-/// insertCSRSpillsAndRestores - Insert spill and restore code for
-/// callee saved registers used in the function.
-///
-void WasmPEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
- // Get callee saved register information.
- MachineFrameInfo *MFI = Fn.getFrameInfo();
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
-
- MFI->setCalleeSavedInfoValid(true);
-
- // Early exit if no callee saved registers are modified!
- if (CSI.empty())
- return;
-
- const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
- const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
- const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
- MachineBasicBlock::iterator I;
-
- // Spill using target interface.
- for (MachineBasicBlock *SaveBlock : SaveBlocks) {
- I = SaveBlock->begin();
- if (!TFI->spillCalleeSavedRegisters(*SaveBlock, I, CSI, TRI)) {
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- // Insert the spill to the stack frame.
- unsigned Reg = CSI[i].getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(*SaveBlock, I, Reg, true, CSI[i].getFrameIdx(),
- RC, TRI);
- }
- }
- // Update the live-in information of all the blocks up to the save point.
- updateLiveness(Fn);
- }
-
- // Restore using target interface.
- for (MachineBasicBlock *MBB : RestoreBlocks) {
- I = MBB->end();
-
- // Skip over all terminator instructions, which are part of the return
- // sequence.
- MachineBasicBlock::iterator I2 = I;
- while (I2 != MBB->begin() && (--I2)->isTerminator())
- I = I2;
-
- bool AtStart = I == MBB->begin();
- MachineBasicBlock::iterator BeforeI = I;
- if (!AtStart)
- --BeforeI;
-
- // Restore all registers immediately before the return and any
- // terminators that precede it.
- if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(*MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI);
- assert(I != MBB->begin() &&
- "loadRegFromStackSlot didn't insert any code!");
- // Insert in reverse order. loadRegFromStackSlot can insert
- // multiple instructions.
- if (AtStart)
- I = MBB->begin();
- else {
- I = BeforeI;
- ++I;
- }
- }
- }
- }
-}
-
-/// AdjustStackOffset - Helper function used to adjust the stack frame offset.
-static inline void
-AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
- bool StackGrowsDown, int64_t &Offset,
- unsigned &MaxAlign, unsigned Skew) {
- // If the stack grows down, add the object size to find the lowest address.
- if (StackGrowsDown)
- Offset += MFI->getObjectSize(FrameIdx);
-
- unsigned Align = MFI->getObjectAlignment(FrameIdx);
-
- // If the alignment of this object is greater than that of the stack, then
- // increase the stack alignment to match.
- MaxAlign = std::max(MaxAlign, Align);
-
- // Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align, Skew);
-
- if (StackGrowsDown) {
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
- MFI->setObjectOffset(FrameIdx, -Offset); // Set the computed offset
- } else {
- DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n");
- MFI->setObjectOffset(FrameIdx, Offset);
- Offset += MFI->getObjectSize(FrameIdx);
- }
-}
-
-/// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
-/// those required to be close to the Stack Protector) to stack offsets.
-static void
-AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
- SmallSet<int, 16> &ProtectedObjs,
- MachineFrameInfo *MFI, bool StackGrowsDown,
- int64_t &Offset, unsigned &MaxAlign, unsigned Skew) {
-
- for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
- E = UnassignedObjs.end(); I != E; ++I) {
- int i = *I;
- AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
- ProtectedObjs.insert(i);
- }
-}
-
-/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
-/// abstract stack objects.
-///
-void WasmPEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
- const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
- StackProtector *SP = &getAnalysis<StackProtector>();
-
- bool StackGrowsDown =
- TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
-
- // Loop over all of the stack objects, assigning sequential addresses...
- MachineFrameInfo *MFI = Fn.getFrameInfo();
-
- // Start at the beginning of the local area.
- // The Offset is the distance from the stack top in the direction
- // of stack growth -- so it's always nonnegative.
- int LocalAreaOffset = TFI.getOffsetOfLocalArea();
- if (StackGrowsDown)
- LocalAreaOffset = -LocalAreaOffset;
- assert(LocalAreaOffset >= 0
- && "Local area offset should be in direction of stack growth");
- int64_t Offset = LocalAreaOffset;
-
- // Skew to be applied to alignment.
- unsigned Skew = TFI.getStackAlignmentSkew(Fn);
-
- // If there are fixed sized objects that are preallocated in the local area,
- // non-fixed objects can't be allocated right at the start of local area.
- // We currently don't support filling in holes in between fixed sized
- // objects, so we adjust 'Offset' to point to the end of last fixed sized
- // preallocated object.
- for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) {
- int64_t FixedOff;
- if (StackGrowsDown) {
- // The maximum distance from the stack pointer is at lower address of
- // the object -- which is given by offset. For down growing stack
- // the offset is negative, so we negate the offset to get the distance.
- FixedOff = -MFI->getObjectOffset(i);
- } else {
- // The maximum distance from the start pointer is at the upper
- // address of the object.
- FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i);
- }
- if (FixedOff > Offset) Offset = FixedOff;
- }
-
- // First assign frame offsets to stack objects that are used to spill
- // callee saved registers.
- if (StackGrowsDown) {
- for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) {
- // If the stack grows down, we need to add the size to find the lowest
- // address of the object.
- Offset += MFI->getObjectSize(i);
-
- unsigned Align = MFI->getObjectAlignment(i);
- // Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align, Skew);
-
- MFI->setObjectOffset(i, -Offset); // Set the computed offset
- }
- } else {
- int MaxCSFI = MaxCSFrameIndex, MinCSFI = MinCSFrameIndex;
- for (int i = MaxCSFI; i >= MinCSFI ; --i) {
- unsigned Align = MFI->getObjectAlignment(i);
- // Adjust to alignment boundary
- Offset = RoundUpToAlignment(Offset, Align, Skew);
-
- MFI->setObjectOffset(i, Offset);
- Offset += MFI->getObjectSize(i);
- }
- }
-
- unsigned MaxAlign = MFI->getMaxAlignment();
-
- // Make sure the special register scavenging spill slot is closest to the
- // incoming stack pointer if a frame pointer is required and is closer
- // to the incoming rather than the final stack pointer.
- const TargetRegisterInfo *RegInfo = Fn.getSubtarget().getRegisterInfo();
- bool EarlyScavengingSlots = (TFI.hasFP(Fn) &&
- TFI.isFPCloseToIncomingSP() &&
- RegInfo->useFPForScavengingIndex(Fn) &&
- !RegInfo->needsStackRealignment(Fn));
- if (RS && EarlyScavengingSlots) {
- SmallVector<int, 2> SFIs;
- RS->getScavengingFrameIndices(SFIs);
- for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
- IE = SFIs.end(); I != IE; ++I)
- AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew);
- }
-
- // FIXME: Once this is working, then enable flag will change to a target
- // check for whether the frame is large enough to want to use virtual
- // frame index registers. Functions which don't want/need this optimization
- // will continue to use the existing code path.
- if (MFI->getUseLocalStackAllocationBlock()) {
- unsigned Align = MFI->getLocalFrameMaxAlign();
-
- // Adjust to alignment boundary.
- Offset = RoundUpToAlignment(Offset, Align, Skew);
-
- DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
-
- // Resolve offsets for objects in the local block.
- for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) {
- std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i);
- int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
- DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
- FIOffset << "]\n");
- MFI->setObjectOffset(Entry.first, FIOffset);
- }
- // Allocate the local block
- Offset += MFI->getLocalFrameSize();
-
- MaxAlign = std::max(Align, MaxAlign);
- }
-
- // Make sure that the stack protector comes before the local variables on the
- // stack.
- SmallSet<int, 16> ProtectedObjs;
- if (MFI->getStackProtectorIndex() >= 0) {
- StackObjSet LargeArrayObjs;
- StackObjSet SmallArrayObjs;
- StackObjSet AddrOfObjs;
-
- AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
- Offset, MaxAlign, Skew);
-
- // Assign large stack objects first.
- for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
- if (MFI->isObjectPreAllocated(i) &&
- MFI->getUseLocalStackAllocationBlock())
- continue;
- if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
- continue;
- if (RS && RS->isScavengingFrameIndex((int)i))
- continue;
- if (MFI->isDeadObjectIndex(i))
- continue;
- if (MFI->getStackProtectorIndex() == (int)i)
- continue;
-
- switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
- case StackProtector::SSPLK_None:
- continue;
- case StackProtector::SSPLK_SmallArray:
- SmallArrayObjs.insert(i);
- continue;
- case StackProtector::SSPLK_AddrOf:
- AddrOfObjs.insert(i);
- continue;
- case StackProtector::SSPLK_LargeArray:
- LargeArrayObjs.insert(i);
- continue;
- }
- llvm_unreachable("Unexpected SSPLayoutKind.");
- }
-
- AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign, Skew);
- AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign, Skew);
- AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign, Skew);
- }
-
- // Then assign frame offsets to stack objects that are not used to spill
- // callee saved registers.
- for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
- if (MFI->isObjectPreAllocated(i) &&
- MFI->getUseLocalStackAllocationBlock())
- continue;
- if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
- continue;
- if (RS && RS->isScavengingFrameIndex((int)i))
- continue;
- if (MFI->isDeadObjectIndex(i))
- continue;
- if (MFI->getStackProtectorIndex() == (int)i)
- continue;
- if (ProtectedObjs.count(i))
- continue;
-
- AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
- }
-
- // Make sure the special register scavenging spill slot is closest to the
- // stack pointer.
- if (RS && !EarlyScavengingSlots) {
- SmallVector<int, 2> SFIs;
- RS->getScavengingFrameIndices(SFIs);
- for (SmallVectorImpl<int>::iterator I = SFIs.begin(),
- IE = SFIs.end(); I != IE; ++I)
- AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew);
- }
-
- if (!TFI.targetHandlesStackFrameRounding()) {
- // If we have reserved argument space for call sites in the function
- // immediately on entry to the current function, count it as part of the
- // overall stack size.
- if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn))
- Offset += MFI->getMaxCallFrameSize();
-
- // Round up the size to a multiple of the alignment. If the function has
- // any calls or alloca's, align to the target's StackAlignment value to
- // ensure that the callee's frame or the alloca data is suitably aligned;
- // otherwise, for leaf functions, align to the TransientStackAlignment
- // value.
- unsigned StackAlign;
- if (MFI->adjustsStack() || MFI->hasVarSizedObjects() ||
- (RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0))
- StackAlign = TFI.getStackAlignment();
- else
- StackAlign = TFI.getTransientStackAlignment();
-
- // If the frame pointer is eliminated, all frame offsets will be relative to
- // SP not FP. Align to MaxAlign so this works.
- StackAlign = std::max(StackAlign, MaxAlign);
- Offset = RoundUpToAlignment(Offset, StackAlign, Skew);
- }
-
- // Update frame info to pretend that this is part of the stack...
- int64_t StackSize = Offset - LocalAreaOffset;
- MFI->setStackSize(StackSize);
- NumBytesStackSpace += StackSize;
-}
-
-/// insertPrologEpilogCode - Scan the function for modified callee saved
-/// registers, insert spill code for these callee saved registers, then add
-/// prolog and epilog code to the function.
-///
-void WasmPEI::insertPrologEpilogCode(MachineFunction &Fn) {
- const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
-
- // Add prologue to the function...
- for (MachineBasicBlock *SaveBlock : SaveBlocks)
- TFI.emitPrologue(Fn, *SaveBlock);
-
- // Add epilogue to restore the callee-save registers in each exiting block.
- for (MachineBasicBlock *RestoreBlock : RestoreBlocks)
- TFI.emitEpilogue(Fn, *RestoreBlock);
-
- for (MachineBasicBlock *SaveBlock : SaveBlocks)
- TFI.inlineStackProbe(Fn, *SaveBlock);
-
- // Emit additional code that is required to support segmented stacks, if
- // we've been asked for it. This, when linked with a runtime with support
- // for segmented stacks (libgcc is one), will result in allocating stack
- // space in small chunks instead of one large contiguous block.
- if (Fn.shouldSplitStack()) {
- for (MachineBasicBlock *SaveBlock : SaveBlocks)
- TFI.adjustForSegmentedStacks(Fn, *SaveBlock);
- }
-
- // Emit additional code that is required to explicitly handle the stack in
- // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The
- // approach is rather similar to that of Segmented Stacks, but it uses a
- // different conditional check and another BIF for allocating more stack
- // space.
- if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE)
- for (MachineBasicBlock *SaveBlock : SaveBlocks)
- TFI.adjustForHiPEPrologue(Fn, *SaveBlock);
-}
-
-/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
-/// register references and actual offsets.
-///
-void WasmPEI::replaceFrameIndices(MachineFunction &Fn) {
- const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
- if (!TFI.needsFrameIndexResolution(Fn)) return;
-
- // Store SPAdj at exit of a basic block.
- SmallVector<int, 8> SPState;
- SPState.resize(Fn.getNumBlockIDs());
- SmallPtrSet<MachineBasicBlock*, 8> Reachable;
-
- // Iterate over the reachable blocks in DFS order.
- for (auto DFI = df_ext_begin(&Fn, Reachable), DFE = df_ext_end(&Fn, Reachable);
- DFI != DFE; ++DFI) {
- int SPAdj = 0;
- // Check the exit state of the DFS stack predecessor.
- if (DFI.getPathLength() >= 2) {
- MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
- assert(Reachable.count(StackPred) &&
- "DFS stack predecessor is already visited.\n");
- SPAdj = SPState[StackPred->getNumber()];
- }
- MachineBasicBlock *BB = *DFI;
- replaceFrameIndices(BB, Fn, SPAdj);
- SPState[BB->getNumber()] = SPAdj;
- }
-
- // Handle the unreachable blocks.
- for (auto &BB : Fn) {
- if (Reachable.count(&BB))
- // Already handled in DFS traversal.
- continue;
- int SPAdj = 0;
- replaceFrameIndices(&BB, Fn, SPAdj);
- }
-}
-
-void WasmPEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
- int &SPAdj) {
- assert(Fn.getSubtarget().getRegisterInfo() &&
- "getRegisterInfo() must be implemented!");
- const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
- const TargetRegisterInfo &TRI = *Fn.getSubtarget().getRegisterInfo();
- const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
- unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
- unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
-
- if (RS && !FrameIndexVirtualScavenging) RS->enterBasicBlock(BB);
-
- bool InsideCallSequence = false;
-
- for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
-
- if (I->getOpcode() == FrameSetupOpcode ||
- I->getOpcode() == FrameDestroyOpcode) {
- InsideCallSequence = (I->getOpcode() == FrameSetupOpcode);
- SPAdj += TII.getSPAdjust(I);
-
- MachineBasicBlock::iterator PrevI = BB->end();
- if (I != BB->begin()) PrevI = std::prev(I);
- TFI->eliminateCallFramePseudoInstr(Fn, *BB, I);
-
- // Visit the instructions created by eliminateCallFramePseudoInstr().
- if (PrevI == BB->end())
- I = BB->begin(); // The replaced instr was the first in the block.
- else
- I = std::next(PrevI);
- continue;
- }
-
- MachineInstr *MI = I;
- bool DoIncr = true;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- if (!MI->getOperand(i).isFI())
- continue;
-
- // Frame indices in debug values are encoded in a target independent
- // way with simply the frame index and offset rather than any
- // target-specific addressing mode.
- if (MI->isDebugValue()) {
- assert(i == 0 && "Frame indices can only appear as the first "
- "operand of a DBG_VALUE machine instruction");
- unsigned Reg;
- MachineOperand &Offset = MI->getOperand(1);
- Offset.setImm(Offset.getImm() +
- TFI->getFrameIndexReference(
- Fn, MI->getOperand(0).getIndex(), Reg));
- MI->getOperand(0).ChangeToRegister(Reg, false /*isDef*/);
- continue;
- }
-
- // TODO: This code should be commoned with the code for
- // PATCHPOINT. There's no good reason for the difference in
- // implementation other than historical accident. The only
- // remaining difference is the unconditional use of the stack
- // pointer as the base register.
- if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
- assert((!MI->isDebugValue() || i == 0) &&
- "Frame indicies can only appear as the first operand of a "
- "DBG_VALUE machine instruction");
- unsigned Reg;
- MachineOperand &Offset = MI->getOperand(i + 1);
- const unsigned refOffset =
- TFI->getFrameIndexReferenceFromSP(Fn, MI->getOperand(i).getIndex(),
- Reg);
-
- Offset.setImm(Offset.getImm() + refOffset);
- MI->getOperand(i).ChangeToRegister(Reg, false /*isDef*/);
- continue;
- }
-
- // Some instructions (e.g. inline asm instructions) can have
- // multiple frame indices and/or cause eliminateFrameIndex
- // to insert more than one instruction. We need the register
- // scavenger to go through all of these instructions so that
- // it can update its register information. We keep the
- // iterator at the point before insertion so that we can
- // revisit them in full.
- bool AtBeginning = (I == BB->begin());
- if (!AtBeginning) --I;
-
- // If this instruction has a FrameIndex operand, we need to
- // use that target machine register info object to eliminate
- // it.
- TRI.eliminateFrameIndex(MI, SPAdj, i,
- FrameIndexVirtualScavenging ? nullptr : RS);
-
- // Reset the iterator if we were at the beginning of the BB.
- if (AtBeginning) {
- I = BB->begin();
- DoIncr = false;
- }
-
- MI = nullptr;
- break;
- }
-
- // If we are looking at a call sequence, we need to keep track of
- // the SP adjustment made by each instruction in the sequence.
- // This includes both the frame setup/destroy pseudos (handled above),
- // as well as other instructions that have side effects w.r.t the SP.
- // Note that this must come after eliminateFrameIndex, because
- // if I itself referred to a frame index, we shouldn't count its own
- // adjustment.
- if (MI && InsideCallSequence)
- SPAdj += TII.getSPAdjust(MI);
-
- if (DoIncr && I != BB->end()) ++I;
-
- // Update register states.
- if (RS && !FrameIndexVirtualScavenging && MI) RS->forward(MI);
- }
-}
-
-/// scavengeFrameVirtualRegs - Replace all frame index virtual registers
-/// with physical registers. Use the register scavenger to find an
-/// appropriate register to use.
-///
-/// FIXME: Iterating over the instruction stream is unnecessary. We can simply
-/// iterate over the vreg use list, which at this point only contains machine
-/// operands for which eliminateFrameIndex need a new scratch reg.
-void
-WasmPEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
- // Run through the instructions and find any virtual registers.
- for (MachineFunction::iterator BB = Fn.begin(),
- E = Fn.end(); BB != E; ++BB) {
- RS->enterBasicBlock(&*BB);
-
- int SPAdj = 0;
-
- // The instruction stream may change in the loop, so check BB->end()
- // directly.
- for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
- // We might end up here again with a NULL iterator if we scavenged a
- // register for which we inserted spill code for definition by what was
- // originally the first instruction in BB.
- if (I == MachineBasicBlock::iterator(nullptr))
- I = BB->begin();
-
- MachineInstr *MI = I;
- MachineBasicBlock::iterator J = std::next(I);
- MachineBasicBlock::iterator P =
- I == BB->begin() ? MachineBasicBlock::iterator(nullptr)
- : std::prev(I);
-
- // RS should process this instruction before we might scavenge at this
- // location. This is because we might be replacing a virtual register
- // defined by this instruction, and if so, registers killed by this
- // instruction are available, and defined registers are not.
- RS->forward(I);
-
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- if (MI->getOperand(i).isReg()) {
- MachineOperand &MO = MI->getOperand(i);
- unsigned Reg = MO.getReg();
- if (Reg == 0)
- continue;
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
-
- // When we first encounter a new virtual register, it
- // must be a definition.
- assert(MI->getOperand(i).isDef() &&
- "frame index virtual missing def!");
- // Scavenge a new scratch register
- const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
- unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj);
-
- ++NumScavengedRegs;
-
- // Replace this reference to the virtual register with the
- // scratch register.
- assert (ScratchReg && "Missing scratch register!");
- Fn.getRegInfo().replaceRegWith(Reg, ScratchReg);
-
- // Because this instruction was processed by the RS before this
- // register was allocated, make sure that the RS now records the
- // register as being used.
- RS->setRegUsed(ScratchReg);
- }
- }
-
- // If the scavenger needed to use one of its spill slots, the
- // spill code will have been inserted in between I and J. This is a
- // problem because we need the spill code before I: Move I to just
- // prior to J.
- if (I != std::prev(J)) {
- BB->splice(J, &*BB, I);
-
- // Before we move I, we need to prepare the RS to visit I again.
- // Specifically, RS will assert if it sees uses of registers that
- // it believes are undefined. Because we have already processed
- // register kills in I, when it visits I again, it will believe that
- // those registers are undefined. To avoid this situation, unprocess
- // the instruction I.
- assert(RS->getCurrentPosition() == I &&
- "The register scavenger has an unexpected position");
- I = P;
- RS->unprocess(P);
- } else
- ++I;
- }
- }
-}
diff --git a/lib/Target/WebAssembly/WebAssemblyPeephole.cpp b/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
index 4ad6eed7385b..56d44e6466eb 100644
--- a/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
@@ -12,14 +12,23 @@
///
//===----------------------------------------------------------------------===//
-#include "WebAssembly.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
#define DEBUG_TYPE "wasm-peephole"
+static cl::opt<bool> DisableWebAssemblyFallthroughReturnOpt(
+ "disable-wasm-fallthrough-return-opt", cl::Hidden,
+ cl::desc("WebAssembly: Disable fallthrough-return optimizations."),
+ cl::init(false));
+
namespace {
class WebAssemblyPeephole final : public MachineFunctionPass {
const char *getPassName() const override {
@@ -28,6 +37,7 @@ class WebAssemblyPeephole final : public MachineFunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -44,11 +54,65 @@ FunctionPass *llvm::createWebAssemblyPeephole() {
return new WebAssemblyPeephole();
}
-bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
+/// If desirable, rewrite NewReg to a drop register.
+static bool MaybeRewriteToDrop(unsigned OldReg, unsigned NewReg,
+ MachineOperand &MO, WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI) {
bool Changed = false;
+ if (OldReg == NewReg) {
+ Changed = true;
+ unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
+ MO.setReg(NewReg);
+ MO.setIsDead();
+ MFI.stackifyVReg(NewReg);
+ }
+ return Changed;
+}
+
+static bool MaybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
+ const MachineFunction &MF,
+ WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI,
+ const WebAssemblyInstrInfo &TII,
+ unsigned FallthroughOpc,
+ unsigned CopyLocalOpc) {
+ if (DisableWebAssemblyFallthroughReturnOpt)
+ return false;
+ if (&MBB != &MF.back())
+ return false;
+ if (&MI != &MBB.back())
+ return false;
+
+ // If the operand isn't stackified, insert a COPY_LOCAL to read the operand
+ // and stackify it.
+ MachineOperand &MO = MI.getOperand(0);
+ unsigned Reg = MO.getReg();
+ if (!MFI.isVRegStackified(Reg)) {
+ unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
+ BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(CopyLocalOpc), NewReg)
+ .addReg(Reg);
+ MO.setReg(NewReg);
+ MFI.stackifyVReg(NewReg);
+ }
+
+ // Rewrite the return.
+ MI.setDesc(TII.get(FallthroughOpc));
+ return true;
+}
+
+bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Peephole **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
MachineRegisterInfo &MRI = MF.getRegInfo();
WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ const WebAssemblyTargetLowering &TLI =
+ *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
+ auto &LibInfo = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ bool Changed = false;
for (auto &MBB : MF)
for (auto &MI : MBB)
@@ -66,20 +130,67 @@ bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
case WebAssembly::STORE_I64: {
// Store instructions return their value operand. If we ended up using
// the same register for both, replace it with a dead def so that it
- // can use $discard instead.
+ // can use $drop instead.
MachineOperand &MO = MI.getOperand(0);
unsigned OldReg = MO.getReg();
- // TODO: Handle SP/physregs
- if (OldReg == MI.getOperand(3).getReg()
- && TargetRegisterInfo::isVirtualRegister(MI.getOperand(3).getReg())) {
- Changed = true;
- unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
- MO.setReg(NewReg);
- MO.setIsDead();
- MFI.stackifyVReg(NewReg);
- MFI.addWAReg(NewReg, WebAssemblyFunctionInfo::UnusedReg);
+ unsigned NewReg =
+ MI.getOperand(WebAssembly::StoreValueOperandNo).getReg();
+ Changed |= MaybeRewriteToDrop(OldReg, NewReg, MO, MFI, MRI);
+ break;
+ }
+ case WebAssembly::CALL_I32:
+ case WebAssembly::CALL_I64: {
+ MachineOperand &Op1 = MI.getOperand(1);
+ if (Op1.isSymbol()) {
+ StringRef Name(Op1.getSymbolName());
+ if (Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
+ Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
+ Name == TLI.getLibcallName(RTLIB::MEMSET)) {
+ LibFunc::Func Func;
+ if (LibInfo.getLibFunc(Name, Func)) {
+ const auto &Op2 = MI.getOperand(2);
+ if (!Op2.isReg())
+ report_fatal_error("Peephole: call to builtin function with "
+ "wrong signature, not consuming reg");
+ MachineOperand &MO = MI.getOperand(0);
+ unsigned OldReg = MO.getReg();
+ unsigned NewReg = Op2.getReg();
+
+ if (MRI.getRegClass(NewReg) != MRI.getRegClass(OldReg))
+ report_fatal_error("Peephole: call to builtin function with "
+ "wrong signature, from/to mismatch");
+ Changed |= MaybeRewriteToDrop(OldReg, NewReg, MO, MFI, MRI);
+ }
+ }
}
+ break;
}
+ // Optimize away an explicit void return at the end of the function.
+ case WebAssembly::RETURN_I32:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I32,
+ WebAssembly::COPY_LOCAL_I32);
+ break;
+ case WebAssembly::RETURN_I64:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I64,
+ WebAssembly::COPY_LOCAL_I64);
+ break;
+ case WebAssembly::RETURN_F32:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F32,
+ WebAssembly::COPY_LOCAL_F32);
+ break;
+ case WebAssembly::RETURN_F64:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F64,
+ WebAssembly::COPY_LOCAL_F64);
+ break;
+ case WebAssembly::RETURN_VOID:
+ if (!DisableWebAssemblyFallthroughReturnOpt &&
+ &MBB == &MF.back() && &MI == &MBB.back())
+ MI.setDesc(TII.get(WebAssembly::FALLTHROUGH_RETURN_VOID));
+ break;
}
return Changed;
diff --git a/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
new file mode 100644
index 000000000000..30444ac598a4
--- /dev/null
+++ b/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
@@ -0,0 +1,136 @@
+//===- WebAssemblyPrepareForLiveIntervals.cpp - Prepare for LiveIntervals -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Fix up code to meet LiveInterval's requirements.
+///
+/// Some CodeGen passes don't preserve LiveInterval's requirements, because
+/// they run after register allocation and it isn't important. However,
+/// WebAssembly runs LiveIntervals in a late pass. This pass transforms code
+/// to meet LiveIntervals' requirements; primarily, it ensures that all
+/// virtual register uses have definitions (IMPLICIT_DEF definitions if
+/// nothing else).
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-prepare-for-live-intervals"
+
+namespace {
+class WebAssemblyPrepareForLiveIntervals final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyPrepareForLiveIntervals() : MachineFunctionPass(ID) {}
+
+private:
+ const char *getPassName() const override {
+ return "WebAssembly Prepare For LiveIntervals";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblyPrepareForLiveIntervals::ID = 0;
+FunctionPass *llvm::createWebAssemblyPrepareForLiveIntervals() {
+ return new WebAssemblyPrepareForLiveIntervals();
+}
+
+/// Test whether the given instruction is an ARGUMENT.
+static bool IsArgument(const MachineInstr *MI) {
+ switch (MI->getOpcode()) {
+ case WebAssembly::ARGUMENT_I32:
+ case WebAssembly::ARGUMENT_I64:
+ case WebAssembly::ARGUMENT_F32:
+ case WebAssembly::ARGUMENT_F64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Test whether the given register has an ARGUMENT def.
+static bool HasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) {
+ for (auto &Def : MRI.def_instructions(Reg))
+ if (IsArgument(&Def))
+ return true;
+ return false;
+}
+
+bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Prepare For LiveIntervals **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ bool Changed = false;
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ MachineBasicBlock &Entry = *MF.begin();
+
+ assert(!mustPreserveAnalysisID(LiveIntervalsID) &&
+ "LiveIntervals shouldn't be active yet!");
+
+ // We don't preserve SSA form.
+ MRI.leaveSSA();
+
+ // BranchFolding and perhaps other passes don't preserve IMPLICIT_DEF
+ // instructions. LiveIntervals requires that all paths to virtual register
+ // uses provide a definition. Insert IMPLICIT_DEFs in the entry block to
+ // conservatively satisfy this.
+ //
+ // TODO: This is fairly heavy-handed; find a better approach.
+ //
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+
+ // Skip unused registers.
+ if (MRI.use_nodbg_empty(Reg))
+ continue;
+
+ // Skip registers that have an ARGUMENT definition.
+ if (HasArgumentDef(Reg, MRI))
+ continue;
+
+ BuildMI(Entry, Entry.begin(), DebugLoc(),
+ TII.get(WebAssembly::IMPLICIT_DEF), Reg);
+ Changed = true;
+ }
+
+ // Move ARGUMENT_* instructions to the top of the entry block, so that their
+ // liveness reflects the fact that these really are live-in values.
+ for (auto MII = Entry.begin(), MIE = Entry.end(); MII != MIE; ) {
+ MachineInstr *MI = &*MII++;
+ if (IsArgument(MI)) {
+ MI->removeFromParent();
+ Entry.insert(Entry.begin(), MI);
+ }
+ }
+
+ // Ok, we're now ready to run LiveIntervalAnalysis again.
+ MF.getProperties().set(MachineFunctionProperties::Property::TracksLiveness);
+
+ return Changed;
+}
diff --git a/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
index 9ec66595d8da..dedd9108dfd5 100644
--- a/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
@@ -66,7 +66,7 @@ static float computeWeight(const MachineRegisterInfo *MRI,
float weight = 0.0f;
for (MachineOperand &MO : MRI->reg_nodbg_operands(VReg))
weight += LiveIntervals::getSpillWeight(MO.isDef(), MO.isUse(), MBFI,
- MO.getParent());
+ *MO.getParent());
return weight;
}
@@ -99,7 +99,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
unsigned VReg = TargetRegisterInfo::index2VirtReg(i);
if (MFI.isVRegStackified(VReg))
continue;
- // Skip unused registers, which can use $discard.
+ // Skip unused registers, which can use $drop.
if (MRI->use_empty(VReg))
continue;
diff --git a/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
index f621db070b5b..4a8fd96f8324 100644
--- a/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
@@ -18,8 +18,8 @@
#include "WebAssemblyMachineFunctionInfo.h"
#include "WebAssemblySubtarget.h"
#include "llvm/ADT/SCCIterator.h"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -61,7 +61,6 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
MachineRegisterInfo &MRI = MF.getRegInfo();
- const MachineFrameInfo &FrameInfo = *MF.getFrameInfo();
MFI.initWARegs();
@@ -73,9 +72,13 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
case WebAssembly::ARGUMENT_I32:
case WebAssembly::ARGUMENT_I64:
case WebAssembly::ARGUMENT_F32:
- case WebAssembly::ARGUMENT_F64:
- MFI.setWAReg(MI.getOperand(0).getReg(), MI.getOperand(1).getImm());
+ case WebAssembly::ARGUMENT_F64: {
+ int64_t Imm = MI.getOperand(1).getImm();
+ DEBUG(dbgs() << "Arg VReg " << MI.getOperand(0).getReg() << " -> WAReg "
+ << Imm << "\n");
+ MFI.setWAReg(MI.getOperand(0).getReg(), Imm);
break;
+ }
default:
break;
}
@@ -84,26 +87,27 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
// Then assign regular WebAssembly registers for all remaining used
// virtual registers. TODO: Consider sorting the registers by frequency of
// use, to maximize usage of small immediate fields.
- unsigned NumArgRegs = MFI.getParams().size();
unsigned NumVRegs = MF.getRegInfo().getNumVirtRegs();
unsigned NumStackRegs = 0;
- unsigned CurReg = 0;
+ // Start the numbering for locals after the arg regs
+ unsigned CurReg = MFI.getParams().size();
for (unsigned VRegIdx = 0; VRegIdx < NumVRegs; ++VRegIdx) {
unsigned VReg = TargetRegisterInfo::index2VirtReg(VRegIdx);
+ // Skip unused registers.
+ if (MRI.use_empty(VReg))
+ continue;
// Handle stackified registers.
if (MFI.isVRegStackified(VReg)) {
+ DEBUG(dbgs() << "VReg " << VReg << " -> WAReg "
+ << (INT32_MIN | NumStackRegs) << "\n");
MFI.setWAReg(VReg, INT32_MIN | NumStackRegs++);
continue;
}
- // Skip unused registers.
- if (MRI.use_empty(VReg))
- continue;
- if (MFI.getWAReg(VReg) == WebAssemblyFunctionInfo::UnusedReg)
- MFI.setWAReg(VReg, NumArgRegs + CurReg++);
+ if (MFI.getWAReg(VReg) == WebAssemblyFunctionInfo::UnusedReg) {
+ DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " << CurReg << "\n");
+ MFI.setWAReg(VReg, CurReg++);
+ }
}
- // Allocate locals for used physical registers
- if (FrameInfo.getStackSize() > 0)
- MFI.addPReg(WebAssembly::SP32, CurReg++);
return true;
}
diff --git a/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
index 537c147e6142..0aa3b621da32 100644
--- a/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -23,9 +23,12 @@
#include "WebAssembly.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" // for WebAssembly::ARGUMENT_*
#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
@@ -43,12 +46,13 @@ class WebAssemblyRegStackify final : public MachineFunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<AAResultsWrapperPass>();
+ AU.addRequired<MachineDominatorTree>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<MachineBlockFrequencyInfo>();
AU.addPreserved<SlotIndexes>();
AU.addPreserved<LiveIntervals>();
- AU.addPreservedID(MachineDominatorsID);
AU.addPreservedID(LiveVariablesID);
+ AU.addPreserved<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -82,17 +86,197 @@ static void ImposeStackOrdering(MachineInstr *MI) {
/*isImp=*/true));
}
+// Determine whether a call to the callee referenced by
+// MI->getOperand(CalleeOpNo) reads memory, writes memory, and/or has side
+// effects.
+static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read,
+ bool &Write, bool &Effects, bool &StackPointer) {
+ // All calls can use the stack pointer.
+ StackPointer = true;
+
+ const MachineOperand &MO = MI.getOperand(CalleeOpNo);
+ if (MO.isGlobal()) {
+ const Constant *GV = MO.getGlobal();
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ if (!GA->isInterposable())
+ GV = GA->getAliasee();
+
+ if (const Function *F = dyn_cast<Function>(GV)) {
+ if (!F->doesNotThrow())
+ Effects = true;
+ if (F->doesNotAccessMemory())
+ return;
+ if (F->onlyReadsMemory()) {
+ Read = true;
+ return;
+ }
+ }
+ }
+
+ // Assume the worst.
+ Write = true;
+ Read = true;
+ Effects = true;
+}
+
+// Determine whether MI reads memory, writes memory, has side effects,
+// and/or uses the __stack_pointer value.
+static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read,
+ bool &Write, bool &Effects, bool &StackPointer) {
+ assert(!MI.isPosition());
+ assert(!MI.isTerminator());
+
+ if (MI.isDebugValue())
+ return;
+
+ // Check for loads.
+ if (MI.mayLoad() && !MI.isInvariantLoad(&AA))
+ Read = true;
+
+ // Check for stores.
+ if (MI.mayStore()) {
+ Write = true;
+
+ // Check for stores to __stack_pointer.
+ for (auto MMO : MI.memoperands()) {
+ const MachinePointerInfo &MPI = MMO->getPointerInfo();
+ if (MPI.V.is<const PseudoSourceValue *>()) {
+ auto PSV = MPI.V.get<const PseudoSourceValue *>();
+ if (const ExternalSymbolPseudoSourceValue *EPSV =
+ dyn_cast<ExternalSymbolPseudoSourceValue>(PSV))
+ if (StringRef(EPSV->getSymbol()) == "__stack_pointer")
+ StackPointer = true;
+ }
+ }
+ } else if (MI.hasOrderedMemoryRef()) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::DIV_S_I32: case WebAssembly::DIV_S_I64:
+ case WebAssembly::REM_S_I32: case WebAssembly::REM_S_I64:
+ case WebAssembly::DIV_U_I32: case WebAssembly::DIV_U_I64:
+ case WebAssembly::REM_U_I32: case WebAssembly::REM_U_I64:
+ case WebAssembly::I32_TRUNC_S_F32: case WebAssembly::I64_TRUNC_S_F32:
+ case WebAssembly::I32_TRUNC_S_F64: case WebAssembly::I64_TRUNC_S_F64:
+ case WebAssembly::I32_TRUNC_U_F32: case WebAssembly::I64_TRUNC_U_F32:
+ case WebAssembly::I32_TRUNC_U_F64: case WebAssembly::I64_TRUNC_U_F64:
+ // These instruction have hasUnmodeledSideEffects() returning true
+ // because they trap on overflow and invalid so they can't be arbitrarily
+ // moved, however hasOrderedMemoryRef() interprets this plus their lack
+ // of memoperands as having a potential unknown memory reference.
+ break;
+ default:
+ // Record volatile accesses, unless it's a call, as calls are handled
+ // specially below.
+ if (!MI.isCall()) {
+ Write = true;
+ Effects = true;
+ }
+ break;
+ }
+ }
+
+ // Check for side effects.
+ if (MI.hasUnmodeledSideEffects()) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::DIV_S_I32: case WebAssembly::DIV_S_I64:
+ case WebAssembly::REM_S_I32: case WebAssembly::REM_S_I64:
+ case WebAssembly::DIV_U_I32: case WebAssembly::DIV_U_I64:
+ case WebAssembly::REM_U_I32: case WebAssembly::REM_U_I64:
+ case WebAssembly::I32_TRUNC_S_F32: case WebAssembly::I64_TRUNC_S_F32:
+ case WebAssembly::I32_TRUNC_S_F64: case WebAssembly::I64_TRUNC_S_F64:
+ case WebAssembly::I32_TRUNC_U_F32: case WebAssembly::I64_TRUNC_U_F32:
+ case WebAssembly::I32_TRUNC_U_F64: case WebAssembly::I64_TRUNC_U_F64:
+ // These instructions have hasUnmodeledSideEffects() returning true
+ // because they trap on overflow and invalid so they can't be arbitrarily
+ // moved, however in the specific case of register stackifying, it is safe
+ // to move them because overflow and invalid are Undefined Behavior.
+ break;
+ default:
+ Effects = true;
+ break;
+ }
+ }
+
+ // Analyze calls.
+ if (MI.isCall()) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::CALL_VOID:
+ case WebAssembly::CALL_INDIRECT_VOID:
+ QueryCallee(MI, 0, Read, Write, Effects, StackPointer);
+ break;
+ case WebAssembly::CALL_I32: case WebAssembly::CALL_I64:
+ case WebAssembly::CALL_F32: case WebAssembly::CALL_F64:
+ case WebAssembly::CALL_INDIRECT_I32: case WebAssembly::CALL_INDIRECT_I64:
+ case WebAssembly::CALL_INDIRECT_F32: case WebAssembly::CALL_INDIRECT_F64:
+ QueryCallee(MI, 1, Read, Write, Effects, StackPointer);
+ break;
+ default:
+ llvm_unreachable("unexpected call opcode");
+ }
+ }
+}
+
+// Test whether Def is safe and profitable to rematerialize.
+static bool ShouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA,
+ const WebAssemblyInstrInfo *TII) {
+ return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def, &AA);
+}
+
+// Identify the definition for this register at this point. This is a
+// generalization of MachineRegisterInfo::getUniqueVRegDef that uses
+// LiveIntervals to handle complex cases.
+static MachineInstr *GetVRegDef(unsigned Reg, const MachineInstr *Insert,
+ const MachineRegisterInfo &MRI,
+ const LiveIntervals &LIS)
+{
+ // Most registers are in SSA form here so we try a quick MRI query first.
+ if (MachineInstr *Def = MRI.getUniqueVRegDef(Reg))
+ return Def;
+
+ // MRI doesn't know what the Def is. Try asking LIS.
+ if (const VNInfo *ValNo = LIS.getInterval(Reg).getVNInfoBefore(
+ LIS.getInstructionIndex(*Insert)))
+ return LIS.getInstructionFromIndex(ValNo->def);
+
+ return nullptr;
+}
+
+// Test whether Reg, as defined at Def, has exactly one use. This is a
+// generalization of MachineRegisterInfo::hasOneUse that uses LiveIntervals
+// to handle complex cases.
+static bool HasOneUse(unsigned Reg, MachineInstr *Def,
+ MachineRegisterInfo &MRI, MachineDominatorTree &MDT,
+ LiveIntervals &LIS) {
+ // Most registers are in SSA form here so we try a quick MRI query first.
+ if (MRI.hasOneUse(Reg))
+ return true;
+
+ bool HasOne = false;
+ const LiveInterval &LI = LIS.getInterval(Reg);
+ const VNInfo *DefVNI = LI.getVNInfoAt(
+ LIS.getInstructionIndex(*Def).getRegSlot());
+ assert(DefVNI);
+ for (auto I : MRI.use_nodbg_operands(Reg)) {
+ const auto &Result = LI.Query(LIS.getInstructionIndex(*I.getParent()));
+ if (Result.valueIn() == DefVNI) {
+ if (!Result.isKill())
+ return false;
+ if (HasOne)
+ return false;
+ HasOne = true;
+ }
+ }
+ return HasOne;
+}
+
// Test whether it's safe to move Def to just before Insert.
// TODO: Compute memory dependencies in a way that doesn't require always
// walking the block.
// TODO: Compute memory dependencies in a way that uses AliasAnalysis to be
// more precise.
static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
- AliasAnalysis &AA, LiveIntervals &LIS,
- MachineRegisterInfo &MRI) {
+ AliasAnalysis &AA, const LiveIntervals &LIS,
+ const MachineRegisterInfo &MRI) {
assert(Def->getParent() == Insert->getParent());
- bool SawStore = false, SawSideEffects = false;
- MachineBasicBlock::const_iterator D(Def), I(Insert);
// Check for register dependencies.
for (const MachineOperand &MO : Def->operands()) {
@@ -106,6 +290,10 @@ static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
continue;
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ // Ignore ARGUMENTS; it's just used to keep the ARGUMENT_* instructions
+ // from moving down, and we've already checked for that.
+ if (Reg == WebAssembly::ARGUMENTS)
+ continue;
// If the physical register is never modified, ignore it.
if (!MRI.isPhysRegModified(Reg))
continue;
@@ -114,24 +302,404 @@ static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
}
// Ask LiveIntervals whether moving this virtual register use or def to
- // Insert will change value numbers are seen.
+ // Insert will change which value numbers are seen.
+ //
+ // If the operand is a use of a register that is also defined in the same
+ // instruction, test that the newly defined value reaches the insert point,
+ // since the operand will be moving along with the def.
const LiveInterval &LI = LIS.getInterval(Reg);
- VNInfo *DefVNI = MO.isDef() ?
- LI.getVNInfoAt(LIS.getInstructionIndex(Def).getRegSlot()) :
- LI.getVNInfoBefore(LIS.getInstructionIndex(Def));
+ VNInfo *DefVNI =
+ (MO.isDef() || Def->definesRegister(Reg)) ?
+ LI.getVNInfoAt(LIS.getInstructionIndex(*Def).getRegSlot()) :
+ LI.getVNInfoBefore(LIS.getInstructionIndex(*Def));
assert(DefVNI && "Instruction input missing value number");
- VNInfo *InsVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(Insert));
+ VNInfo *InsVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*Insert));
if (InsVNI && DefVNI != InsVNI)
return false;
}
- // Check for memory dependencies and side effects.
- for (--I; I != D; --I)
- SawSideEffects |= I->isSafeToMove(&AA, SawStore);
- return !(SawStore && Def->mayLoad() && !Def->isInvariantLoad(&AA)) &&
- !(SawSideEffects && !Def->isSafeToMove(&AA, SawStore));
+ bool Read = false, Write = false, Effects = false, StackPointer = false;
+ Query(*Def, AA, Read, Write, Effects, StackPointer);
+
+ // If the instruction does not access memory and has no side effects, it has
+ // no additional dependencies.
+ if (!Read && !Write && !Effects && !StackPointer)
+ return true;
+
+ // Scan through the intervening instructions between Def and Insert.
+ MachineBasicBlock::const_iterator D(Def), I(Insert);
+ for (--I; I != D; --I) {
+ bool InterveningRead = false;
+ bool InterveningWrite = false;
+ bool InterveningEffects = false;
+ bool InterveningStackPointer = false;
+ Query(*I, AA, InterveningRead, InterveningWrite, InterveningEffects,
+ InterveningStackPointer);
+ if (Effects && InterveningEffects)
+ return false;
+ if (Read && InterveningWrite)
+ return false;
+ if (Write && (InterveningRead || InterveningWrite))
+ return false;
+ if (StackPointer && InterveningStackPointer)
+ return false;
+ }
+
+ return true;
+}
+
+/// Test whether OneUse, a use of Reg, dominates all of Reg's other uses.
+static bool OneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse,
+ const MachineBasicBlock &MBB,
+ const MachineRegisterInfo &MRI,
+ const MachineDominatorTree &MDT,
+ LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI) {
+ const LiveInterval &LI = LIS.getInterval(Reg);
+
+ const MachineInstr *OneUseInst = OneUse.getParent();
+ VNInfo *OneUseVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*OneUseInst));
+
+ for (const MachineOperand &Use : MRI.use_operands(Reg)) {
+ if (&Use == &OneUse)
+ continue;
+
+ const MachineInstr *UseInst = Use.getParent();
+ VNInfo *UseVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*UseInst));
+
+ if (UseVNI != OneUseVNI)
+ continue;
+
+ const MachineInstr *OneUseInst = OneUse.getParent();
+ if (UseInst == OneUseInst) {
+ // Another use in the same instruction. We need to ensure that the one
+ // selected use happens "before" it.
+ if (&OneUse > &Use)
+ return false;
+ } else {
+ // Test that the use is dominated by the one selected use.
+ while (!MDT.dominates(OneUseInst, UseInst)) {
+ // Actually, dominating is over-conservative. Test that the use would
+ // happen after the one selected use in the stack evaluation order.
+ //
+ // This is needed as a consequence of using implicit get_locals for
+ // uses and implicit set_locals for defs.
+ if (UseInst->getDesc().getNumDefs() == 0)
+ return false;
+ const MachineOperand &MO = UseInst->getOperand(0);
+ if (!MO.isReg())
+ return false;
+ unsigned DefReg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(DefReg) ||
+ !MFI.isVRegStackified(DefReg))
+ return false;
+ assert(MRI.hasOneUse(DefReg));
+ const MachineOperand &NewUse = *MRI.use_begin(DefReg);
+ const MachineInstr *NewUseInst = NewUse.getParent();
+ if (NewUseInst == OneUseInst) {
+ if (&OneUse > &NewUse)
+ return false;
+ break;
+ }
+ UseInst = NewUseInst;
+ }
+ }
+ }
+ return true;
+}
+
+/// Get the appropriate tee_local opcode for the given register class.
+static unsigned GetTeeLocalOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::TEE_LOCAL_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::TEE_LOCAL_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::TEE_LOCAL_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::TEE_LOCAL_F64;
+ llvm_unreachable("Unexpected register class");
+}
+
+// Shrink LI to its uses, cleaning up LI.
+static void ShrinkToUses(LiveInterval &LI, LiveIntervals &LIS) {
+ if (LIS.shrinkToUses(&LI)) {
+ SmallVector<LiveInterval*, 4> SplitLIs;
+ LIS.splitSeparateComponents(LI, SplitLIs);
+ }
+}
+
+/// A single-use def in the same block with no intervening memory or register
+/// dependencies; move the def down and nest it with the current instruction.
+static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand& Op,
+ MachineInstr *Def,
+ MachineBasicBlock &MBB,
+ MachineInstr *Insert, LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI) {
+ DEBUG(dbgs() << "Move for single use: "; Def->dump());
+
+ MBB.splice(Insert, &MBB, Def);
+ LIS.handleMove(*Def);
+
+ if (MRI.hasOneDef(Reg) && MRI.hasOneUse(Reg)) {
+ // No one else is using this register for anything so we can just stackify
+ // it in place.
+ MFI.stackifyVReg(Reg);
+ } else {
+ // The register may have unrelated uses or defs; create a new register for
+ // just our one def and use so that we can stackify it.
+ unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
+ Def->getOperand(0).setReg(NewReg);
+ Op.setReg(NewReg);
+
+ // Tell LiveIntervals about the new register.
+ LIS.createAndComputeVirtRegInterval(NewReg);
+
+ // Tell LiveIntervals about the changes to the old register.
+ LiveInterval &LI = LIS.getInterval(Reg);
+ LI.removeSegment(LIS.getInstructionIndex(*Def).getRegSlot(),
+ LIS.getInstructionIndex(*Op.getParent()).getRegSlot(),
+ /*RemoveDeadValNo=*/true);
+
+ MFI.stackifyVReg(NewReg);
+
+ DEBUG(dbgs() << " - Replaced register: "; Def->dump());
+ }
+
+ ImposeStackOrdering(Def);
+ return Def;
+}
+
+/// A trivially cloneable instruction; clone it and nest the new copy with the
+/// current instruction.
+static MachineInstr *RematerializeCheapDef(
+ unsigned Reg, MachineOperand &Op, MachineInstr &Def, MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI,
+ const WebAssemblyInstrInfo *TII, const WebAssemblyRegisterInfo *TRI) {
+ DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump());
+ DEBUG(dbgs() << " - for use in "; Op.getParent()->dump());
+
+ unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
+ TII->reMaterialize(MBB, Insert, NewReg, 0, Def, *TRI);
+ Op.setReg(NewReg);
+ MachineInstr *Clone = &*std::prev(Insert);
+ LIS.InsertMachineInstrInMaps(*Clone);
+ LIS.createAndComputeVirtRegInterval(NewReg);
+ MFI.stackifyVReg(NewReg);
+ ImposeStackOrdering(Clone);
+
+ DEBUG(dbgs() << " - Cloned to "; Clone->dump());
+
+ // Shrink the interval.
+ bool IsDead = MRI.use_empty(Reg);
+ if (!IsDead) {
+ LiveInterval &LI = LIS.getInterval(Reg);
+ ShrinkToUses(LI, LIS);
+ IsDead = !LI.liveAt(LIS.getInstructionIndex(Def).getDeadSlot());
+ }
+
+ // If that was the last use of the original, delete the original.
+ if (IsDead) {
+ DEBUG(dbgs() << " - Deleting original\n");
+ SlotIndex Idx = LIS.getInstructionIndex(Def).getRegSlot();
+ LIS.removePhysRegDefAt(WebAssembly::ARGUMENTS, Idx);
+ LIS.removeInterval(Reg);
+ LIS.RemoveMachineInstrFromMaps(Def);
+ Def.eraseFromParent();
+ }
+
+ return Clone;
}
+/// A multiple-use def in the same block with no intervening memory or register
+/// dependencies; move the def down, nest it with the current instruction, and
+/// insert a tee_local to satisfy the rest of the uses. As an illustration,
+/// rewrite this:
+///
+/// Reg = INST ... // Def
+/// INST ..., Reg, ... // Insert
+/// INST ..., Reg, ...
+/// INST ..., Reg, ...
+///
+/// to this:
+///
+/// DefReg = INST ... // Def (to become the new Insert)
+/// TeeReg, Reg = TEE_LOCAL_... DefReg
+/// INST ..., TeeReg, ... // Insert
+/// INST ..., Reg, ...
+/// INST ..., Reg, ...
+///
+/// with DefReg and TeeReg stackified. This eliminates a get_local from the
+/// resulting code.
+static MachineInstr *MoveAndTeeForMultiUse(
+ unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB,
+ MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII) {
+ DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump());
+
+ // Move Def into place.
+ MBB.splice(Insert, &MBB, Def);
+ LIS.handleMove(*Def);
+
+ // Create the Tee and attach the registers.
+ const auto *RegClass = MRI.getRegClass(Reg);
+ unsigned TeeReg = MRI.createVirtualRegister(RegClass);
+ unsigned DefReg = MRI.createVirtualRegister(RegClass);
+ MachineOperand &DefMO = Def->getOperand(0);
+ MachineInstr *Tee = BuildMI(MBB, Insert, Insert->getDebugLoc(),
+ TII->get(GetTeeLocalOpcode(RegClass)), TeeReg)
+ .addReg(Reg, RegState::Define)
+ .addReg(DefReg, getUndefRegState(DefMO.isDead()));
+ Op.setReg(TeeReg);
+ DefMO.setReg(DefReg);
+ SlotIndex TeeIdx = LIS.InsertMachineInstrInMaps(*Tee).getRegSlot();
+ SlotIndex DefIdx = LIS.getInstructionIndex(*Def).getRegSlot();
+
+ // Tell LiveIntervals we moved the original vreg def from Def to Tee.
+ LiveInterval &LI = LIS.getInterval(Reg);
+ LiveInterval::iterator I = LI.FindSegmentContaining(DefIdx);
+ VNInfo *ValNo = LI.getVNInfoAt(DefIdx);
+ I->start = TeeIdx;
+ ValNo->def = TeeIdx;
+ ShrinkToUses(LI, LIS);
+
+ // Finish stackifying the new regs.
+ LIS.createAndComputeVirtRegInterval(TeeReg);
+ LIS.createAndComputeVirtRegInterval(DefReg);
+ MFI.stackifyVReg(DefReg);
+ MFI.stackifyVReg(TeeReg);
+ ImposeStackOrdering(Def);
+ ImposeStackOrdering(Tee);
+
+ DEBUG(dbgs() << " - Replaced register: "; Def->dump());
+ DEBUG(dbgs() << " - Tee instruction: "; Tee->dump());
+ return Def;
+}
+
+namespace {
+/// A stack for walking the tree of instructions being built, visiting the
+/// MachineOperands in DFS order.
+class TreeWalkerState {
+ typedef MachineInstr::mop_iterator mop_iterator;
+ typedef std::reverse_iterator<mop_iterator> mop_reverse_iterator;
+ typedef iterator_range<mop_reverse_iterator> RangeTy;
+ SmallVector<RangeTy, 4> Worklist;
+
+public:
+ explicit TreeWalkerState(MachineInstr *Insert) {
+ const iterator_range<mop_iterator> &Range = Insert->explicit_uses();
+ if (Range.begin() != Range.end())
+ Worklist.push_back(reverse(Range));
+ }
+
+ bool Done() const { return Worklist.empty(); }
+
+ MachineOperand &Pop() {
+ RangeTy &Range = Worklist.back();
+ MachineOperand &Op = *Range.begin();
+ Range = drop_begin(Range, 1);
+ if (Range.begin() == Range.end())
+ Worklist.pop_back();
+ assert((Worklist.empty() ||
+ Worklist.back().begin() != Worklist.back().end()) &&
+ "Empty ranges shouldn't remain in the worklist");
+ return Op;
+ }
+
+ /// Push Instr's operands onto the stack to be visited.
+ void PushOperands(MachineInstr *Instr) {
+ const iterator_range<mop_iterator> &Range(Instr->explicit_uses());
+ if (Range.begin() != Range.end())
+ Worklist.push_back(reverse(Range));
+ }
+
+ /// Some of Instr's operands are on the top of the stack; remove them and
+ /// re-insert them starting from the beginning (because we've commuted them).
+ void ResetTopOperands(MachineInstr *Instr) {
+ assert(HasRemainingOperands(Instr) &&
+ "Reseting operands should only be done when the instruction has "
+ "an operand still on the stack");
+ Worklist.back() = reverse(Instr->explicit_uses());
+ }
+
+ /// Test whether Instr has operands remaining to be visited at the top of
+ /// the stack.
+ bool HasRemainingOperands(const MachineInstr *Instr) const {
+ if (Worklist.empty())
+ return false;
+ const RangeTy &Range = Worklist.back();
+ return Range.begin() != Range.end() && Range.begin()->getParent() == Instr;
+ }
+
+ /// Test whether the given register is present on the stack, indicating an
+ /// operand in the tree that we haven't visited yet. Moving a definition of
+ /// Reg to a point in the tree after that would change its value.
+ ///
+ /// This is needed as a consequence of using implicit get_locals for
+ /// uses and implicit set_locals for defs.
+ bool IsOnStack(unsigned Reg) const {
+ for (const RangeTy &Range : Worklist)
+ for (const MachineOperand &MO : Range)
+ if (MO.isReg() && MO.getReg() == Reg)
+ return true;
+ return false;
+ }
+};
+
+/// State to keep track of whether commuting is in flight or whether it's been
+/// tried for the current instruction and didn't work.
+class CommutingState {
+ /// There are effectively three states: the initial state where we haven't
+ /// started commuting anything and we don't know anything yet, the tenative
+ /// state where we've commuted the operands of the current instruction and are
+ /// revisting it, and the declined state where we've reverted the operands
+ /// back to their original order and will no longer commute it further.
+ bool TentativelyCommuting;
+ bool Declined;
+
+ /// During the tentative state, these hold the operand indices of the commuted
+ /// operands.
+ unsigned Operand0, Operand1;
+
+public:
+ CommutingState() : TentativelyCommuting(false), Declined(false) {}
+
+ /// Stackification for an operand was not successful due to ordering
+ /// constraints. If possible, and if we haven't already tried it and declined
+ /// it, commute Insert's operands and prepare to revisit it.
+ void MaybeCommute(MachineInstr *Insert, TreeWalkerState &TreeWalker,
+ const WebAssemblyInstrInfo *TII) {
+ if (TentativelyCommuting) {
+ assert(!Declined &&
+ "Don't decline commuting until you've finished trying it");
+ // Commuting didn't help. Revert it.
+ TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
+ TentativelyCommuting = false;
+ Declined = true;
+ } else if (!Declined && TreeWalker.HasRemainingOperands(Insert)) {
+ Operand0 = TargetInstrInfo::CommuteAnyOperandIndex;
+ Operand1 = TargetInstrInfo::CommuteAnyOperandIndex;
+ if (TII->findCommutedOpIndices(*Insert, Operand0, Operand1)) {
+ // Tentatively commute the operands and try again.
+ TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
+ TreeWalker.ResetTopOperands(Insert);
+ TentativelyCommuting = true;
+ Declined = false;
+ }
+ }
+ }
+
+ /// Stackification for some operand was successful. Reset to the default
+ /// state.
+ void Reset() {
+ TentativelyCommuting = false;
+ Declined = false;
+ }
+};
+} // end anonymous namespace
+
bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** Register Stackifying **********\n"
"********** Function: "
@@ -140,7 +708,10 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
MachineRegisterInfo &MRI = MF.getRegInfo();
WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ const auto *TRI = MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
+ MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
// Walk the instructions from the bottom up. Currently we don't look past
@@ -151,33 +722,37 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
// iterating over it and the end iterator may change.
for (auto MII = MBB.rbegin(); MII != MBB.rend(); ++MII) {
MachineInstr *Insert = &*MII;
- // Don't nest anything inside a phi.
- if (Insert->getOpcode() == TargetOpcode::PHI)
- break;
-
// Don't nest anything inside an inline asm, because we don't have
// constraints for $push inputs.
if (Insert->getOpcode() == TargetOpcode::INLINEASM)
- break;
+ continue;
+
+ // Ignore debugging intrinsics.
+ if (Insert->getOpcode() == TargetOpcode::DBG_VALUE)
+ continue;
// Iterate through the inputs in reverse order, since we'll be pulling
// operands off the stack in LIFO order.
- bool AnyStackified = false;
- for (MachineOperand &Op : reverse(Insert->uses())) {
+ CommutingState Commuting;
+ TreeWalkerState TreeWalker(Insert);
+ while (!TreeWalker.Done()) {
+ MachineOperand &Op = TreeWalker.Pop();
+
// We're only interested in explicit virtual register operands.
- if (!Op.isReg() || Op.isImplicit() || !Op.isUse())
+ if (!Op.isReg())
continue;
unsigned Reg = Op.getReg();
-
- // Only consider registers with a single definition.
- // TODO: Eventually we may relax this, to stackify phi transfers.
- MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
- if (!Def)
+ assert(Op.isUse() && "explicit_uses() should only iterate over uses");
+ assert(!Op.isImplicit() &&
+ "explicit_uses() should only iterate over explicit operands");
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
- // There's no use in nesting implicit defs inside anything.
- if (Def->getOpcode() == TargetOpcode::IMPLICIT_DEF)
+ // Identify the definition for this register at this point. Most
+ // registers are in SSA form here so we try a quick MRI query first.
+ MachineInstr *Def = GetVRegDef(Reg, Insert, MRI, LIS);
+ if (!Def)
continue;
// Don't nest an INLINE_ASM def into anything, because we don't have
@@ -185,10 +760,6 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
if (Def->getOpcode() == TargetOpcode::INLINEASM)
continue;
- // Don't nest PHIs inside of anything.
- if (Def->getOpcode() == TargetOpcode::PHI)
- continue;
-
// Argument instructions represent live-in registers and not real
// instructions.
if (Def->getOpcode() == WebAssembly::ARGUMENT_I32 ||
@@ -197,38 +768,53 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
Def->getOpcode() == WebAssembly::ARGUMENT_F64)
continue;
- // Single-use expression trees require defs that have one use.
- // TODO: Eventually we'll relax this, to take advantage of set_local
- // returning its result.
- if (!MRI.hasOneUse(Reg))
- continue;
-
- // For now, be conservative and don't look across block boundaries.
- // TODO: Be more aggressive?
- if (Def->getParent() != &MBB)
+ // Decide which strategy to take. Prefer to move a single-use value
+ // over cloning it, and prefer cloning over introducing a tee_local.
+ // For moving, we require the def to be in the same block as the use;
+ // this makes things simpler (LiveIntervals' handleMove function only
+ // supports intra-block moves) and it's MachineSink's job to catch all
+ // the sinking opportunities anyway.
+ bool SameBlock = Def->getParent() == &MBB;
+ bool CanMove = SameBlock && IsSafeToMove(Def, Insert, AA, LIS, MRI) &&
+ !TreeWalker.IsOnStack(Reg);
+ if (CanMove && HasOneUse(Reg, Def, MRI, MDT, LIS)) {
+ Insert = MoveForSingleUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI);
+ } else if (ShouldRematerialize(*Def, AA, TII)) {
+ Insert =
+ RematerializeCheapDef(Reg, Op, *Def, MBB, Insert->getIterator(),
+ LIS, MFI, MRI, TII, TRI);
+ } else if (CanMove &&
+ OneUseDominatesOtherUses(Reg, Op, MBB, MRI, MDT, LIS, MFI)) {
+ Insert = MoveAndTeeForMultiUse(Reg, Op, Def, MBB, Insert, LIS, MFI,
+ MRI, TII);
+ } else {
+ // We failed to stackify the operand. If the problem was ordering
+ // constraints, Commuting may be able to help.
+ if (!CanMove && SameBlock)
+ Commuting.MaybeCommute(Insert, TreeWalker, TII);
+ // Proceed to the next operand.
continue;
+ }
- // Don't move instructions that have side effects or memory dependencies
- // or other complications.
- if (!IsSafeToMove(Def, Insert, AA, LIS, MRI))
- continue;
+ // We stackified an operand. Add the defining instruction's operands to
+ // the worklist stack now to continue to build an ever deeper tree.
+ Commuting.Reset();
+ TreeWalker.PushOperands(Insert);
+ }
+ // If we stackified any operands, skip over the tree to start looking for
+ // the next instruction we can build a tree on.
+ if (Insert != &*MII) {
+ ImposeStackOrdering(&*MII);
+ MII = std::prev(
+ llvm::make_reverse_iterator(MachineBasicBlock::iterator(Insert)));
Changed = true;
- AnyStackified = true;
- // Move the def down and nest it in the current instruction.
- MBB.splice(Insert, &MBB, Def);
- LIS.handleMove(Def);
- MFI.stackifyVReg(Reg);
- ImposeStackOrdering(Def);
- Insert = Def;
}
- if (AnyStackified)
- ImposeStackOrdering(&*MII);
}
}
- // If we used EXPR_STACK anywhere, add it to the live-in sets everywhere
- // so that it never looks like a use-before-def.
+ // If we used EXPR_STACK anywhere, add it to the live-in sets everywhere so
+ // that it never looks like a use-before-def.
if (Changed) {
MF.getRegInfo().addLiveIn(WebAssembly::EXPR_STACK);
for (MachineBasicBlock &MBB : MF)
@@ -236,30 +822,30 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
}
#ifndef NDEBUG
- // Verify that pushes and pops are performed in FIFO order.
+ // Verify that pushes and pops are performed in LIFO order.
SmallVector<unsigned, 0> Stack;
for (MachineBasicBlock &MBB : MF) {
for (MachineInstr &MI : MBB) {
+ if (MI.isDebugValue())
+ continue;
for (MachineOperand &MO : reverse(MI.explicit_operands())) {
if (!MO.isReg())
continue;
- unsigned VReg = MO.getReg();
-
- // Don't stackify physregs like SP or FP.
- if (!TargetRegisterInfo::isVirtualRegister(VReg))
- continue;
+ unsigned Reg = MO.getReg();
- if (MFI.isVRegStackified(VReg)) {
+ if (MFI.isVRegStackified(Reg)) {
if (MO.isDef())
- Stack.push_back(VReg);
+ Stack.push_back(Reg);
else
- assert(Stack.pop_back_val() == VReg);
+ assert(Stack.pop_back_val() == Reg &&
+ "Register stack pop should be paired with a push");
}
}
}
// TODO: Generalize this code to support keeping values on the stack across
// basic block boundaries.
- assert(Stack.empty());
+ assert(Stack.empty() &&
+ "Register stack pushes and pops should be balanced");
}
#endif
diff --git a/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp b/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
index 90d8dda530ba..239fe89b7ef9 100644
--- a/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
@@ -52,43 +52,74 @@ WebAssemblyRegisterInfo::getReservedRegs(const MachineFunction & /*MF*/) const {
}
void WebAssemblyRegisterInfo::eliminateFrameIndex(
- MachineBasicBlock::iterator II, int SPAdj,
- unsigned FIOperandNum, RegScavenger * /*RS*/) const {
+ MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum,
+ RegScavenger * /*RS*/) const {
assert(SPAdj == 0);
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
- const MachineFrameInfo& MFI = *MF.getFrameInfo();
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
int64_t FrameOffset = MFI.getStackSize() + MFI.getObjectOffset(FrameIndex);
- if (MI.mayLoadOrStore()) {
- // If this is a load or store, make it relative to SP and fold the frame
- // offset directly in.
+ // If this is the address operand of a load or store, make it relative to SP
+ // and fold the frame offset directly in.
+ if (MI.mayLoadOrStore() && FIOperandNum == WebAssembly::MemOpAddressOperandNo) {
assert(FrameOffset >= 0 && MI.getOperand(1).getImm() >= 0);
int64_t Offset = MI.getOperand(1).getImm() + FrameOffset;
- if (static_cast<uint64_t>(Offset) > std::numeric_limits<uint32_t>::max()) {
- // If this happens the program is invalid, but better to error here than
- // generate broken code.
- report_fatal_error("Memory offset field overflow");
+ if (static_cast<uint64_t>(Offset) <= std::numeric_limits<uint32_t>::max()) {
+ MI.getOperand(FIOperandNum - 1).setImm(Offset);
+ MI.getOperand(FIOperandNum)
+ .ChangeToRegister(WebAssembly::SP32, /*IsDef=*/false);
+ return;
}
- MI.getOperand(1).setImm(Offset);
- MI.getOperand(2).ChangeToRegister(WebAssembly::SP32, /*IsDef=*/false);
- } else {
- // Otherwise create an i32.add SP, offset and make it the operand.
- auto &MRI = MF.getRegInfo();
- const auto *TII = MF.getSubtarget().getInstrInfo();
+ }
+
+ // If this is an address being added to a constant, fold the frame offset
+ // into the constant.
+ if (MI.getOpcode() == WebAssembly::ADD_I32) {
+ MachineOperand &OtherMO = MI.getOperand(3 - FIOperandNum);
+ if (OtherMO.isReg()) {
+ unsigned OtherMOReg = OtherMO.getReg();
+ if (TargetRegisterInfo::isVirtualRegister(OtherMOReg)) {
+ MachineInstr *Def = MF.getRegInfo().getUniqueVRegDef(OtherMOReg);
+ // TODO: For now we just opportunistically do this in the case where
+ // the CONST_I32 happens to have exactly one def and one use. We
+ // should generalize this to optimize in more cases.
+ if (Def && Def->getOpcode() == WebAssembly::CONST_I32 &&
+ MRI.hasOneNonDBGUse(Def->getOperand(0).getReg())) {
+ MachineOperand &ImmMO = Def->getOperand(1);
+ ImmMO.setImm(ImmMO.getImm() + uint32_t(FrameOffset));
+ MI.getOperand(FIOperandNum)
+ .ChangeToRegister(WebAssembly::SP32, /*IsDef=*/false);
+ return;
+ }
+ }
+ }
+ }
+
+ // Otherwise create an i32.add SP, offset and make it the operand.
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
- unsigned OffsetReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
- BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(WebAssembly::CONST_I32), OffsetReg)
+ unsigned FIRegOperand = WebAssembly::SP32;
+ if (FrameOffset) {
+ // Create i32.add SP, offset and make it the operand.
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned OffsetOp = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, *II, II->getDebugLoc(), TII->get(WebAssembly::CONST_I32),
+ OffsetOp)
.addImm(FrameOffset);
- BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(WebAssembly::ADD_I32), OffsetReg)
+ FIRegOperand = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, *II, II->getDebugLoc(), TII->get(WebAssembly::ADD_I32),
+ FIRegOperand)
.addReg(WebAssembly::SP32)
- .addReg(OffsetReg);
- MI.getOperand(FIOperandNum).ChangeToRegister(OffsetReg, /*IsDef=*/false);
+ .addReg(OffsetOp);
}
+ MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*IsDef=*/false);
}
unsigned
diff --git a/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp b/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
new file mode 100644
index 000000000000..11bda47eac56
--- /dev/null
+++ b/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
@@ -0,0 +1,97 @@
+//===-- WebAssemblyReplacePhysRegs.cpp - Replace phys regs with virt regs -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a pass that replaces physical registers with
+/// virtual registers.
+///
+/// LLVM expects certain physical registers, such as a stack pointer. However,
+/// WebAssembly doesn't actually have such physical registers. This pass is run
+/// once LLVM no longer needs these registers, and replaces them with virtual
+/// registers, so they can participate in register stackifying and coloring in
+/// the normal way.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-replace-phys-regs"
+
+namespace {
+class WebAssemblyReplacePhysRegs final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyReplacePhysRegs() : MachineFunctionPass(ID) {}
+
+private:
+ const char *getPassName() const override {
+ return "WebAssembly Replace Physical Registers";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblyReplacePhysRegs::ID = 0;
+FunctionPass *llvm::createWebAssemblyReplacePhysRegs() {
+ return new WebAssemblyReplacePhysRegs();
+}
+
+bool WebAssemblyReplacePhysRegs::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Replace Physical Registers **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const auto &TRI = *MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
+ bool Changed = false;
+
+ assert(!mustPreserveAnalysisID(LiveIntervalsID) &&
+ "LiveIntervals shouldn't be active yet!");
+ // We don't preserve SSA or liveness.
+ MRI.leaveSSA();
+ MRI.invalidateLiveness();
+
+ for (unsigned PReg = WebAssembly::NoRegister + 1;
+ PReg < WebAssembly::NUM_TARGET_REGS; ++PReg) {
+ // Skip fake registers that are never used explicitly.
+ if (PReg == WebAssembly::EXPR_STACK || PReg == WebAssembly::ARGUMENTS)
+ continue;
+
+ // Replace explicit uses of the physical register with a virtual register.
+ const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(PReg);
+ unsigned VReg = WebAssembly::NoRegister;
+ for (auto I = MRI.reg_begin(PReg), E = MRI.reg_end(); I != E; ) {
+ MachineOperand &MO = *I++;
+ if (!MO.isImplicit()) {
+ if (VReg == WebAssembly::NoRegister)
+ VReg = MRI.createVirtualRegister(RC);
+ MO.setReg(VReg);
+ Changed = true;
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h b/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h
index 13d96671276d..533c66b7a22f 100644
--- a/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h
+++ b/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h
@@ -9,18 +9,18 @@
///
/// \file
/// \brief This file defines the WebAssembly subclass for
-/// TargetSelectionDAGInfo.
+/// SelectionDAGTargetInfo.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSELECTIONDAGINFO_H
#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSELECTIONDAGINFO_H
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
namespace llvm {
-class WebAssemblySelectionDAGInfo final : public TargetSelectionDAGInfo {
+class WebAssemblySelectionDAGInfo final : public SelectionDAGTargetInfo {
public:
~WebAssemblySelectionDAGInfo() override;
};
diff --git a/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
new file mode 100644
index 000000000000..4ebea68c58a5
--- /dev/null
+++ b/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
@@ -0,0 +1,114 @@
+//=- WebAssemblySetP2AlignOperands.cpp - Set alignments on loads and stores -=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file sets the p2align operands on load and store instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-set-p2align-operands"
+
+namespace {
+class WebAssemblySetP2AlignOperands final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblySetP2AlignOperands() : MachineFunctionPass(ID) {}
+
+ const char *getPassName() const override {
+ return "WebAssembly Set p2align Operands";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblySetP2AlignOperands::ID = 0;
+FunctionPass *llvm::createWebAssemblySetP2AlignOperands() {
+ return new WebAssemblySetP2AlignOperands();
+}
+
+bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Set p2align Operands **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ bool Changed = false;
+
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::LOAD_I32:
+ case WebAssembly::LOAD_I64:
+ case WebAssembly::LOAD_F32:
+ case WebAssembly::LOAD_F64:
+ case WebAssembly::LOAD8_S_I32:
+ case WebAssembly::LOAD8_U_I32:
+ case WebAssembly::LOAD16_S_I32:
+ case WebAssembly::LOAD16_U_I32:
+ case WebAssembly::LOAD8_S_I64:
+ case WebAssembly::LOAD8_U_I64:
+ case WebAssembly::LOAD16_S_I64:
+ case WebAssembly::LOAD16_U_I64:
+ case WebAssembly::LOAD32_S_I64:
+ case WebAssembly::LOAD32_U_I64:
+ case WebAssembly::STORE_I32:
+ case WebAssembly::STORE_I64:
+ case WebAssembly::STORE_F32:
+ case WebAssembly::STORE_F64:
+ case WebAssembly::STORE8_I32:
+ case WebAssembly::STORE16_I32:
+ case WebAssembly::STORE8_I64:
+ case WebAssembly::STORE16_I64:
+ case WebAssembly::STORE32_I64: {
+ assert(MI.getOperand(3).getImm() == 0 &&
+ "ISel should set p2align operands to 0");
+ assert(MI.hasOneMemOperand() &&
+ "Load and store instructions have exactly one mem operand");
+ assert((*MI.memoperands_begin())->getSize() ==
+ (UINT64_C(1)
+ << WebAssembly::GetDefaultP2Align(MI.getOpcode())) &&
+ "Default p2align value should be natural");
+ assert(MI.getDesc().OpInfo[3].OperandType ==
+ WebAssembly::OPERAND_P2ALIGN &&
+ "Load and store instructions should have a p2align operand");
+ uint64_t P2Align = Log2_64((*MI.memoperands_begin())->getAlignment());
+
+ // WebAssembly does not currently support supernatural alignment.
+ P2Align = std::min(
+ P2Align, uint64_t(WebAssembly::GetDefaultP2Align(MI.getOpcode())));
+
+ MI.getOperand(3).setImm(P2Align);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp b/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
index 4e08b2b079eb..1e9a773ae628 100644
--- a/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
@@ -17,12 +17,19 @@
/// potentially also exposing the store to register stackifying. These both can
/// reduce get_local/set_local traffic.
///
+/// This pass also performs this optimization for memcpy, memmove, and memset
+/// calls, since the LLVM intrinsics for these return void so they can't use the
+/// returned attribute and consequently aren't handled by the OptimizeReturned
+/// pass.
+///
//===----------------------------------------------------------------------===//
#include "WebAssembly.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "WebAssemblyMachineFunctionInfo.h"
#include "WebAssemblySubtarget.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -49,6 +56,10 @@ public:
AU.addPreserved<MachineBlockFrequencyInfo>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -63,17 +74,127 @@ FunctionPass *llvm::createWebAssemblyStoreResults() {
return new WebAssemblyStoreResults();
}
+// Replace uses of FromReg with ToReg if they are dominated by MI.
+static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
+ unsigned FromReg, unsigned ToReg,
+ const MachineRegisterInfo &MRI,
+ MachineDominatorTree &MDT,
+ LiveIntervals &LIS) {
+ bool Changed = false;
+
+ LiveInterval *FromLI = &LIS.getInterval(FromReg);
+ LiveInterval *ToLI = &LIS.getInterval(ToReg);
+
+ SlotIndex FromIdx = LIS.getInstructionIndex(MI).getRegSlot();
+ VNInfo *FromVNI = FromLI->getVNInfoAt(FromIdx);
+
+ SmallVector<SlotIndex, 4> Indices;
+
+ for (auto I = MRI.use_begin(FromReg), E = MRI.use_end(); I != E;) {
+ MachineOperand &O = *I++;
+ MachineInstr *Where = O.getParent();
+
+ // Check that MI dominates the instruction in the normal way.
+ if (&MI == Where || !MDT.dominates(&MI, Where))
+ continue;
+
+ // If this use gets a different value, skip it.
+ SlotIndex WhereIdx = LIS.getInstructionIndex(*Where);
+ VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx);
+ if (WhereVNI && WhereVNI != FromVNI)
+ continue;
+
+ // Make sure ToReg isn't clobbered before it gets there.
+ VNInfo *ToVNI = ToLI->getVNInfoAt(WhereIdx);
+ if (ToVNI && ToVNI != FromVNI)
+ continue;
+
+ Changed = true;
+ DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
+ << MI << "\n");
+ O.setReg(ToReg);
+
+ // If the store's def was previously dead, it is no longer.
+ if (!O.isUndef()) {
+ MI.getOperand(0).setIsDead(false);
+
+ Indices.push_back(WhereIdx.getRegSlot());
+ }
+ }
+
+ if (Changed) {
+ // Extend ToReg's liveness.
+ LIS.extendToIndices(*ToLI, Indices);
+
+ // Shrink FromReg's liveness.
+ LIS.shrinkToUses(FromLI);
+
+ // If we replaced all dominated uses, FromReg is now killed at MI.
+ if (!FromLI->liveAt(FromIdx.getDeadSlot()))
+ MI.addRegisterKilled(FromReg,
+ MBB.getParent()->getSubtarget<WebAssemblySubtarget>()
+ .getRegisterInfo());
+ }
+
+ return Changed;
+}
+
+static bool optimizeStore(MachineBasicBlock &MBB, MachineInstr &MI,
+ const MachineRegisterInfo &MRI,
+ MachineDominatorTree &MDT,
+ LiveIntervals &LIS) {
+ unsigned ToReg = MI.getOperand(0).getReg();
+ unsigned FromReg = MI.getOperand(WebAssembly::StoreValueOperandNo).getReg();
+ return ReplaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS);
+}
+
+static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI,
+ const MachineRegisterInfo &MRI,
+ MachineDominatorTree &MDT,
+ LiveIntervals &LIS,
+ const WebAssemblyTargetLowering &TLI,
+ const TargetLibraryInfo &LibInfo) {
+ MachineOperand &Op1 = MI.getOperand(1);
+ if (!Op1.isSymbol())
+ return false;
+
+ StringRef Name(Op1.getSymbolName());
+ bool callReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
+ Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
+ Name == TLI.getLibcallName(RTLIB::MEMSET);
+ if (!callReturnsInput)
+ return false;
+
+ LibFunc::Func Func;
+ if (!LibInfo.getLibFunc(Name, Func))
+ return false;
+
+ unsigned FromReg = MI.getOperand(2).getReg();
+ unsigned ToReg = MI.getOperand(0).getReg();
+ if (MRI.getRegClass(FromReg) != MRI.getRegClass(ToReg))
+ report_fatal_error("Store results: call to builtin function with wrong "
+ "signature, from/to mismatch");
+ return ReplaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS);
+}
+
bool WebAssemblyStoreResults::runOnMachineFunction(MachineFunction &MF) {
DEBUG({
dbgs() << "********** Store Results **********\n"
<< "********** Function: " << MF.getName() << '\n';
});
- const MachineRegisterInfo &MRI = MF.getRegInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+ const WebAssemblyTargetLowering &TLI =
+ *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
+ const auto &LibInfo = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ LiveIntervals &LIS = getAnalysis<LiveIntervals>();
bool Changed = false;
- assert(MRI.isSSA() && "StoreResults depends on SSA form");
+ // We don't preserve SSA form.
+ MRI.leaveSSA();
+
+ assert(MRI.tracksLiveness() && "StoreResults expects liveness tracking");
for (auto &MBB : MF) {
DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n');
@@ -90,33 +211,12 @@ bool WebAssemblyStoreResults::runOnMachineFunction(MachineFunction &MF) {
case WebAssembly::STORE_F64:
case WebAssembly::STORE_I32:
case WebAssembly::STORE_I64:
- unsigned ToReg = MI.getOperand(0).getReg();
- unsigned FromReg = MI.getOperand(3).getReg();
- for (auto I = MRI.use_begin(FromReg), E = MRI.use_end(); I != E;) {
- MachineOperand &O = *I++;
- MachineInstr *Where = O.getParent();
- if (Where->getOpcode() == TargetOpcode::PHI) {
- // PHIs use their operands on their incoming CFG edges rather than
- // in their parent blocks. Get the basic block paired with this use
- // of FromReg and check that MI's block dominates it.
- MachineBasicBlock *Pred =
- Where->getOperand(&O - &Where->getOperand(0) + 1).getMBB();
- if (!MDT.dominates(&MBB, Pred))
- continue;
- } else {
- // For a non-PHI, check that MI dominates the instruction in the
- // normal way.
- if (&MI == Where || !MDT.dominates(&MI, Where))
- continue;
- }
- Changed = true;
- DEBUG(dbgs() << "Setting operand " << O << " in " << *Where
- << " from " << MI << "\n");
- O.setReg(ToReg);
- // If the store's def was previously dead, it is no longer. But the
- // dead flag shouldn't be set yet.
- assert(!MI.getOperand(0).isDead() && "Dead flag set on store result");
- }
+ Changed |= optimizeStore(MBB, MI, MRI, MDT, LIS);
+ break;
+ case WebAssembly::CALL_I32:
+ case WebAssembly::CALL_I64:
+ Changed |= optimizeCall(MBB, MI, MRI, MDT, LIS, TLI, LibInfo);
+ break;
}
}
diff --git a/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
index cb2d5a63a19f..ce39051b0555 100644
--- a/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
+++ b/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -13,9 +13,9 @@
///
//===----------------------------------------------------------------------===//
-#include "WebAssemblyInstrInfo.h"
-#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "WebAssemblySubtarget.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyInstrInfo.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -45,5 +45,11 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT,
InstrInfo(initializeSubtargetDependencies(FS)), TSInfo(),
TLInfo(TM, *this) {}
-bool WebAssemblySubtarget::enableMachineScheduler() const { return true; }
+bool WebAssemblySubtarget::enableMachineScheduler() const {
+ // Disable the MachineScheduler for now. Even with ShouldTrackPressure set and
+ // enableMachineSchedDefaultSched overridden, it appears to have an overall
+ // negative effect for the kinds of register optimizations we're doing.
+ return false;
+}
+
bool WebAssemblySubtarget::useAA() const { return true; }
diff --git a/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index b290b4bf7440..32154af3c1c2 100644
--- a/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -20,8 +20,8 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Function.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Scalar.h"
@@ -39,16 +39,23 @@ extern "C" void LLVMInitializeWebAssemblyTarget() {
// WebAssembly Lowering public interface.
//===----------------------------------------------------------------------===//
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::PIC_;
+ return *RM;
+}
+
/// Create an WebAssembly architecture model.
///
WebAssemblyTargetMachine::WebAssemblyTargetMachine(
const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
+ const TargetOptions &Options, Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
: LLVMTargetMachine(T,
TT.isArch64Bit() ? "e-m:e-p:64:64-i64:64-n32:64-S128"
: "e-m:e-p:32:32-i64:64-n32:64-S128",
- TT, CPU, FS, Options, RM, CM, OL),
+ TT, CPU, FS, Options, getEffectiveRelocModel(RM),
+ CM, OL),
TLOF(make_unique<WebAssemblyTargetObjectFile>()) {
// WebAssembly type-checks expressions, but a noreturn function with a return
// type that doesn't match the context will cause a check failure. So we lower
@@ -58,9 +65,9 @@ WebAssemblyTargetMachine::WebAssemblyTargetMachine(
initAsmInfo();
- // We need a reducible CFG, so disable some optimizations which tend to
- // introduce irreducibility.
- setRequiresStructuredCFG(true);
+ // Note that we don't use setRequiresStructuredCFG(true). It disables
+ // optimizations than we're ok with, and want, such as critical edge
+ // splitting and tail merging.
}
WebAssemblyTargetMachine::~WebAssemblyTargetMachine() {}
@@ -103,9 +110,8 @@ public:
void addIRPasses() override;
bool addInstSelector() override;
- bool addILPOpts() override;
- void addPreRegAlloc() override;
void addPostRegAlloc() override;
+ bool addGCPasses() override { return false; }
void addPreEmitPass() override;
};
} // end anonymous namespace
@@ -140,7 +146,8 @@ void WebAssemblyPassConfig::addIRPasses() {
addPass(createAtomicExpandPass(TM));
// Optimize "returned" function attributes.
- addPass(createWebAssemblyOptimizeReturned());
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createWebAssemblyOptimizeReturned());
TargetPassConfig::addIRPasses();
}
@@ -153,58 +160,75 @@ bool WebAssemblyPassConfig::addInstSelector() {
// so that we can fix up the ARGUMENT instructions before anything else
// sees them in the wrong place.
addPass(createWebAssemblyArgumentMove());
+ // Set the p2align operands. This information is present during ISel, however
+ // it's inconvenient to collect. Collect it now, and update the immediate
+ // operands.
+ addPass(createWebAssemblySetP2AlignOperands());
return false;
}
-bool WebAssemblyPassConfig::addILPOpts() {
- (void)TargetPassConfig::addILPOpts();
- return true;
-}
-
-void WebAssemblyPassConfig::addPreRegAlloc() {
- TargetPassConfig::addPreRegAlloc();
-
- // Prepare store instructions for register stackifying.
- addPass(createWebAssemblyStoreResults());
-}
-
void WebAssemblyPassConfig::addPostRegAlloc() {
// TODO: The following CodeGen passes don't currently support code containing
// virtual registers. Consider removing their restrictions and re-enabling
// them.
- //
- // We use our own PrologEpilogInserter which is very slightly modified to
- // tolerate virtual registers.
- disablePass(&PrologEpilogCodeInserterID);
- // Fails with: should be run after register allocation.
- disablePass(&MachineCopyPropagationID);
- // Mark registers as representing wasm's expression stack.
- addPass(createWebAssemblyRegStackify());
+ // Has no asserts of its own, but was not written to handle virtual regs.
+ disablePass(&ShrinkWrapID);
- // Run the register coloring pass to reduce the total number of registers.
- addPass(createWebAssemblyRegColoring());
+ // These functions all require the AllVRegsAllocated property.
+ disablePass(&MachineCopyPropagationID);
+ disablePass(&PostRASchedulerID);
+ disablePass(&FuncletLayoutID);
+ disablePass(&StackMapLivenessID);
+ disablePass(&LiveDebugValuesID);
+ disablePass(&PatchableFunctionID);
TargetPassConfig::addPostRegAlloc();
-
- // Run WebAssembly's version of the PrologEpilogInserter. Target-independent
- // PEI runs after PostRegAlloc and after ShrinkWrap. Putting it here will run
- // PEI before ShrinkWrap but otherwise in the same position in the order.
- addPass(createWebAssemblyPEI());
}
void WebAssemblyPassConfig::addPreEmitPass() {
TargetPassConfig::addPreEmitPass();
+ // Now that we have a prologue and epilogue and all frame indices are
+ // rewritten, eliminate SP and FP. This allows them to be stackified,
+ // colored, and numbered with the rest of the registers.
+ addPass(createWebAssemblyReplacePhysRegs());
+
+ if (getOptLevel() != CodeGenOpt::None) {
+ // LiveIntervals isn't commonly run this late. Re-establish preconditions.
+ addPass(createWebAssemblyPrepareForLiveIntervals());
+
+ // Depend on LiveIntervals and perform some optimizations on it.
+ addPass(createWebAssemblyOptimizeLiveIntervals());
+
+ // Prepare store instructions for register stackifying.
+ addPass(createWebAssemblyStoreResults());
+
+ // Mark registers as representing wasm's expression stack. This is a key
+ // code-compression technique in WebAssembly. We run this pass (and
+ // StoreResults above) very late, so that it sees as much code as possible,
+ // including code emitted by PEI and expanded by late tail duplication.
+ addPass(createWebAssemblyRegStackify());
+
+ // Run the register coloring pass to reduce the total number of registers.
+ // This runs after stackification so that it doesn't consider registers
+ // that become stackified.
+ addPass(createWebAssemblyRegColoring());
+ }
+
+ // Eliminate multiple-entry loops.
+ addPass(createWebAssemblyFixIrreducibleControlFlow());
+
// Put the CFG in structured form; insert BLOCK and LOOP markers.
addPass(createWebAssemblyCFGStackify());
// Lower br_unless into br_if.
addPass(createWebAssemblyLowerBrUnless());
+ // Perform the very last peephole optimizations on the code.
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createWebAssemblyPeephole());
+
// Create a mapping from LLVM CodeGen virtual registers to wasm registers.
addPass(createWebAssemblyRegNumbering());
-
- // Perform the very last peephole optimizations on the code.
- addPass(createWebAssemblyPeephole());
}
diff --git a/lib/Target/WebAssembly/WebAssemblyTargetMachine.h b/lib/Target/WebAssembly/WebAssemblyTargetMachine.h
index 3226edcdc614..52a2ef78736a 100644
--- a/lib/Target/WebAssembly/WebAssemblyTargetMachine.h
+++ b/lib/Target/WebAssembly/WebAssemblyTargetMachine.h
@@ -28,7 +28,7 @@ class WebAssemblyTargetMachine final : public LLVMTargetMachine {
public:
WebAssemblyTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
~WebAssemblyTargetMachine() override;
@@ -44,6 +44,8 @@ public:
/// \brief Get the TargetIRAnalysis for this target.
TargetIRAnalysis getTargetIRAnalysis() override;
+
+ bool usesPhysRegsForPEI() const override { return false; }
};
} // end namespace llvm
diff --git a/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
index 356631711921..bf546dab5fbb 100644
--- a/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
+++ b/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
@@ -25,3 +25,59 @@ WebAssemblyTTIImpl::getPopcntSupport(unsigned TyWidth) const {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
return TargetTransformInfo::PSK_FastHardware;
}
+
+unsigned WebAssemblyTTIImpl::getNumberOfRegisters(bool Vector) {
+ unsigned Result = BaseT::getNumberOfRegisters(Vector);
+
+ // For SIMD, use at least 16 registers, as a rough guess.
+ if (Vector)
+ Result = std::max(Result, 16u);
+
+ return Result;
+}
+
+unsigned WebAssemblyTTIImpl::getRegisterBitWidth(bool Vector) {
+ if (Vector && getST()->hasSIMD128())
+ return 128;
+
+ return 64;
+}
+
+unsigned WebAssemblyTTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
+ TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo) {
+
+ unsigned Cost = BasicTTIImplBase<WebAssemblyTTIImpl>::getArithmeticInstrCost(
+ Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ switch (Opcode) {
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::Shl:
+ // SIMD128's shifts currently only accept a scalar shift count. For each
+ // element, we'll need to extract, op, insert. The following is a rough
+ // approxmation.
+ if (Opd2Info != TTI::OK_UniformValue &&
+ Opd2Info != TTI::OK_UniformConstantValue)
+ Cost = VTy->getNumElements() *
+ (TargetTransformInfo::TCC_Basic +
+ getArithmeticInstrCost(Opcode, VTy->getElementType()) +
+ TargetTransformInfo::TCC_Basic);
+ break;
+ }
+ }
+ return Cost;
+}
+
+unsigned WebAssemblyTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) {
+ unsigned Cost = BasicTTIImplBase::getVectorInstrCost(Opcode, Val, Index);
+
+ // SIMD128's insert/extract currently only take constant indices.
+ if (Index == -1u)
+ return Cost + 25 * TargetTransformInfo::TCC_Expensive;
+
+ return Cost;
+}
diff --git a/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
index 26dc388cc922..fe99e96eb3b8 100644
--- a/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
+++ b/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
@@ -61,7 +61,15 @@ public:
/// \name Vector TTI Implementations
/// @{
- // TODO: Implement Vector TTI for WebAssembly
+ unsigned getNumberOfRegisters(bool Vector);
+ unsigned getRegisterBitWidth(bool Vector);
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
/// @}
};
diff --git a/lib/Target/WebAssembly/known_gcc_test_failures.txt b/lib/Target/WebAssembly/known_gcc_test_failures.txt
index 91b3fff05dca..f07400021dc1 100644
--- a/lib/Target/WebAssembly/known_gcc_test_failures.txt
+++ b/lib/Target/WebAssembly/known_gcc_test_failures.txt
@@ -1,253 +1,15 @@
# Tests which are known to fail from the GCC torture test suite.
-# Core dump.
-920908-1.c
-pr38151.c
-va-arg-22.c
-
-# TargetRegisterInfo.h:315: static unsigned int llvm::TargetRegisterInfo::virtReg2Index(unsigned int): Assertion `isVirtualRegister(Reg) && "Not a virtual register"' failed.
-struct-ret-1.c
-va-arg-11.c
-va-arg-21.c
-va-arg-24.c
-va-arg-trap-1.c
-
-# WebAssemblyCFGStackify.cpp:211: void SortBlocks(llvm::MachineFunction&, const llvm::MachineLoopInfo&): Assertion `L->contains( MLI.getLoopFor(&*prev(MachineFunction::iterator(&MBB)))) && "Loop isn't contiguous"' failed.
-20000815-1.c
-20010129-1.c
-930628-1.c
-980707-1.c
-
-# WebAssemblyISelLowering.cpp:316: virtual llvm::SDValue llvm::WebAssemblyTargetLowering::LowerCall(llvm::TargetLowering::CallLoweringInfo&, llvm::SmallVectorImpl<llvm::SDValue>&) const: Assertion `!Out.Flags.isByVal() && "byval is not valid for return values"' failed.
-20030914-2.c
-20040703-1.c
-20081117-1.c
-920625-1.c
-931004-11.c
-931004-13.c
-980223.c
-bitfld-5.c
-complex-7.c
-pr38969.c
-pr51323.c
-pr52129.c
-pr57130.c
-
-# These were previously "Cannot select FrameIndex." Now most of them fail
-# because they contain call frame pseudos (e.g. call a vararg func),
-# frame pointers, or similar. This list will be updated again soon.
-20000519-1.c
-20000706-4.c
-20000706-5.c
-20000801-2.c
-20000801-4.c
-20011126-2.c
-
-20020529-1.c
-20021024-1.c
-
-20030828-1.c
-20030914-1.c
-
+# Computed gotos are not supported (Cannot select BlockAddress/BRIND)
20040302-1.c
-20040625-1.c
-20040823-1.c
-
-20041113-1.c
-
-20041214-1.c
-
-20050826-2.c
-
-20071213-1.c
-
-20080506-2.c
-20080519-1.c
-
-20081103-1.c
-20090113-1.c
-20090113-2.c
-20090113-3.c
-
-20090623-1.c
-
-920501-6.c
-920501-8.c
-920726-1.c
-930518-1.c
-
-931004-10.c
-931004-12.c
-931004-14.c
-931004-2.c
-931004-4.c
-931004-6.c
-931004-8.c
-
-980205.c
-980608-1.c
-980709-1.c
-980716-1.c
-990127-1.c
-
-991216-2.c
-
-#cbrt.c
-complex-5.c
-complex-6.c
-
-enum-3.c
-fprintf-chk-1.c
-frame-address.c
-loop-15.c
-loop-ivopts-2.c
-mayalias-3.c
-
-multi-ix.c
-
-pr20466-1.c
-
-
-pr28778.c
-pr28982b.c
-
-pr30778.c
-pr31448-2.c
-pr31448.c
-
-pr33870-1.c
-pr33870.c
-
-pr38051.c
-
-pr39100.c
-
-pr39339.c
-
-pr43987.c
-
-pr44575.c
-
-pr44942.c
-pr46309.c
-pr47538.c
-pr47925.c
-
-pr49390.c
-pr49419.c
-
-#pr51877.c
-
-#pr52979-1.c
-#pr52979-2.c
-pr53645-2.c
-pr53645.c
-
-pr56205.c
-
-pr56866.c
-
-pr57876.c
-pr58277-1.c
-
-pr59643.c
-
-printf-chk-1.c
-pta-field-1.c
-pta-field-2.c
-
-stdarg-1.c
-stdarg-2.c
-stdarg-3.c
-stdarg-4.c
-strct-stdarg-1.c
-strct-varg-1.c
-
-va-arg-1.c
-va-arg-10.c
-va-arg-12.c
-va-arg-13.c
-va-arg-14.c
-va-arg-15.c
-va-arg-16.c
-va-arg-17.c
-va-arg-18.c
-va-arg-19.c
-va-arg-2.c
-va-arg-20.c
-va-arg-23.c
-va-arg-26.c
-va-arg-4.c
-va-arg-5.c
-va-arg-6.c
-va-arg-7.c
-va-arg-8.c
-va-arg-9.c
-va-arg-pack-1.c
-vfprintf-1.c
-vfprintf-chk-1.c
-vprintf-1.c
-vprintf-chk-1.c
-
-# Cannot select callseq_end.
-20040811-1.c
-pr43220.c
-vla-dealloc-1.c
-
-# Cannot select brind.
20071210-1.c
920501-4.c
920501-5.c
-
-# Cannot select BlockAddress.
comp-goto-1.c
980526-1.c
990208-1.c
-# WebAssembly hasn't implemented byval arguments.
-20000412-3.c
-20000419-1.c
-20000706-1.c
-20000706-2.c
-20000707-1.c
-20000717-1.c
-20000717-5.c
-20000808-1.c
-20010605-2.c
-20011113-1.c
-20020215-1.c
-20020810-1.c
-20021118-1.c
-20040707-1.c
-20040709-1.c
-20040709-2.c
-20041201-1.c
-20050713-1.c
-20070614-1.c
-920908-2.c
-921112-1.c
-921117-1.c
-921123-2.c
-921204-1.c
-930126-1.c
-930208-1.c
-931004-5.c
-931004-9.c
-931031-1.c
-950607-2.c
-960416-1.c
-990525-1.c
-991118-1.c
-bf64-1.c
-complex-1.c
-complex-2.c
-pr15262-2.c
-pr20621-1.c
-pr23135.c
-pr30185.c
-pr42248.c
-
-# unimplemented operation lowering.
+# WebAssembly hasn't implemented (will never?) __builtin_return_address
20010122-1.c
20030323-1.c
20030811-1.c
@@ -255,7 +17,6 @@ pr17377.c
# Error: invalid output constraint '=t' in asm.
990413-2.c
-990826-0.c
# Error: __builtin_setjmp / __builtin_longjmp is not supported for the current target.
built-in-setjmp.c
@@ -300,10 +61,9 @@ pr51447.c
20070919-1.c
align-nest.c
pr41935.c
-20050107-1.c
-20050119-1.c
-20050119-2.c
920302-1.c
920501-3.c
920728-1.c
pr28865.c
+widechar-2.c
+pr41463.c
diff --git a/lib/Target/X86/AsmParser/Makefile b/lib/Target/X86/AsmParser/Makefile
deleted file mode 100644
index f834dfc300a1..000000000000
--- a/lib/Target/X86/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/X86/AsmParser/Makefile -------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86AsmParser
-
-# Hack: we need to include 'main' X86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
index 09cc53a8e6d3..c38a7d1dd44d 100644
--- a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/X86BaseInfo.h"
#include "X86AsmInstrumentation.h"
+#include "MCTargetDesc/X86BaseInfo.h"
#include "X86Operand.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
@@ -18,9 +18,9 @@
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Support/CommandLine.h"
#include <algorithm>
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 4d8ffac1a82b..4e0ad8bfe1f1 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -11,7 +11,6 @@
#include "X86AsmInstrumentation.h"
#include "X86AsmParserCommon.h"
#include "X86Operand.h"
-#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -24,12 +23,12 @@
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -683,9 +682,14 @@ private:
std::unique_ptr<X86Operand> DefaultMemSIOperand(SMLoc Loc);
std::unique_ptr<X86Operand> DefaultMemDIOperand(SMLoc Loc);
- void AddDefaultSrcDestOperands(
- OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
- std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
+ bool IsSIReg(unsigned Reg);
+ unsigned GetSIDIForRegClass(unsigned RegClassID, unsigned Reg, bool IsSIReg);
+ void
+ AddDefaultSrcDestOperands(OperandVector &Operands,
+ std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
+ std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
+ bool VerifyAndAdjustOperands(OperandVector &OrigOperands,
+ OperandVector &FinalOperands);
std::unique_ptr<X86Operand> ParseOperand();
std::unique_ptr<X86Operand> ParseATTOperand();
std::unique_ptr<X86Operand> ParseIntelOperand();
@@ -747,11 +751,6 @@ private:
bool OmitRegisterFromClobberLists(unsigned RegNo) override;
- /// doSrcDstMatch - Returns true if operands are matching in their
- /// word size (%si and %di, %esi and %edi, etc.). Order depends on
- /// the parsing mode (Intel vs. AT&T).
- bool doSrcDstMatch(X86Operand &Op1, X86Operand &Op2);
-
/// Parses AVX512 specific operand primitives: masked registers ({%k<NUM>}, {z})
/// and memory broadcasting ({1to<NUM>}) primitives, updating Operands vector if required.
/// \return \c true if no parsing errors occurred, \c false otherwise.
@@ -867,27 +866,6 @@ static bool CheckBaseRegAndIndexReg(unsigned BaseReg, unsigned IndexReg,
return false;
}
-bool X86AsmParser::doSrcDstMatch(X86Operand &Op1, X86Operand &Op2)
-{
- // Return true and let a normal complaint about bogus operands happen.
- if (!Op1.isMem() || !Op2.isMem())
- return true;
-
- // Actually these might be the other way round if Intel syntax is
- // being used. It doesn't matter.
- unsigned diReg = Op1.Mem.BaseReg;
- unsigned siReg = Op2.Mem.BaseReg;
-
- if (X86MCRegisterClasses[X86::GR16RegClassID].contains(siReg))
- return X86MCRegisterClasses[X86::GR16RegClassID].contains(diReg);
- if (X86MCRegisterClasses[X86::GR32RegClassID].contains(siReg))
- return X86MCRegisterClasses[X86::GR32RegClassID].contains(diReg);
- if (X86MCRegisterClasses[X86::GR64RegClassID].contains(siReg))
- return X86MCRegisterClasses[X86::GR64RegClassID].contains(diReg);
- // Again, return true and let another error happen.
- return true;
-}
-
bool X86AsmParser::ParseRegister(unsigned &RegNo,
SMLoc &StartLoc, SMLoc &EndLoc) {
MCAsmParser &Parser = getParser();
@@ -929,10 +907,16 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo,
if (RegNo == X86::RIZ ||
X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
X86II::isX86_64NonExtLowByteReg(RegNo) ||
- X86II::isX86_64ExtendedReg(RegNo))
+ X86II::isX86_64ExtendedReg(RegNo) ||
+ X86II::is32ExtendedReg(RegNo))
return Error(StartLoc, "register %"
+ Tok.getString() + " is only available in 64-bit mode",
SMRange(StartLoc, EndLoc));
+ } else if (!getSTI().getFeatureBits()[X86::FeatureAVX512]) {
+ if (X86II::is32ExtendedReg(RegNo))
+ return Error(StartLoc, "register %"
+ + Tok.getString() + " is only available with AVX512",
+ SMRange(StartLoc, EndLoc));
}
// Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
@@ -1025,6 +1009,33 @@ std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
Loc, Loc, 0);
}
+bool X86AsmParser::IsSIReg(unsigned Reg) {
+ switch (Reg) {
+ default: llvm_unreachable("Only (R|E)SI and (R|E)DI are expected!");
+ case X86::RSI:
+ case X86::ESI:
+ case X86::SI:
+ return true;
+ case X86::RDI:
+ case X86::EDI:
+ case X86::DI:
+ return false;
+ }
+}
+
+unsigned X86AsmParser::GetSIDIForRegClass(unsigned RegClassID, unsigned Reg,
+ bool IsSIReg) {
+ switch (RegClassID) {
+ default: llvm_unreachable("Unexpected register class");
+ case X86::GR64RegClassID:
+ return IsSIReg ? X86::RSI : X86::RDI;
+ case X86::GR32RegClassID:
+ return IsSIReg ? X86::ESI : X86::EDI;
+ case X86::GR16RegClassID:
+ return IsSIReg ? X86::SI : X86::DI;
+ }
+}
+
void X86AsmParser::AddDefaultSrcDestOperands(
OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
@@ -1038,6 +1049,88 @@ void X86AsmParser::AddDefaultSrcDestOperands(
}
}
+bool X86AsmParser::VerifyAndAdjustOperands(OperandVector &OrigOperands,
+ OperandVector &FinalOperands) {
+
+ if (OrigOperands.size() > 1) {
+ // Check if sizes match, OrigOperands also contains the instruction name
+ assert(OrigOperands.size() == FinalOperands.size() + 1 &&
+ "Operand size mismatch");
+
+ SmallVector<std::pair<SMLoc, std::string>, 2> Warnings;
+ // Verify types match
+ int RegClassID = -1;
+ for (unsigned int i = 0; i < FinalOperands.size(); ++i) {
+ X86Operand &OrigOp = static_cast<X86Operand &>(*OrigOperands[i + 1]);
+ X86Operand &FinalOp = static_cast<X86Operand &>(*FinalOperands[i]);
+
+ if (FinalOp.isReg() &&
+ (!OrigOp.isReg() || FinalOp.getReg() != OrigOp.getReg()))
+ // Return false and let a normal complaint about bogus operands happen
+ return false;
+
+ if (FinalOp.isMem()) {
+
+ if (!OrigOp.isMem())
+ // Return false and let a normal complaint about bogus operands happen
+ return false;
+
+ unsigned OrigReg = OrigOp.Mem.BaseReg;
+ unsigned FinalReg = FinalOp.Mem.BaseReg;
+
+ // If we've already encounterd a register class, make sure all register
+ // bases are of the same register class
+ if (RegClassID != -1 &&
+ !X86MCRegisterClasses[RegClassID].contains(OrigReg)) {
+ return Error(OrigOp.getStartLoc(),
+ "mismatching source and destination index registers");
+ }
+
+ if (X86MCRegisterClasses[X86::GR64RegClassID].contains(OrigReg))
+ RegClassID = X86::GR64RegClassID;
+ else if (X86MCRegisterClasses[X86::GR32RegClassID].contains(OrigReg))
+ RegClassID = X86::GR32RegClassID;
+ else if (X86MCRegisterClasses[X86::GR16RegClassID].contains(OrigReg))
+ RegClassID = X86::GR16RegClassID;
+ else
+ // Unexpected register class type
+ // Return false and let a normal complaint about bogus operands happen
+ return false;
+
+ bool IsSI = IsSIReg(FinalReg);
+ FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
+
+ if (FinalReg != OrigReg) {
+ std::string RegName = IsSI ? "ES:(R|E)SI" : "ES:(R|E)DI";
+ Warnings.push_back(std::make_pair(
+ OrigOp.getStartLoc(),
+ "memory operand is only for determining the size, " + RegName +
+ " will be used for the location"));
+ }
+
+ FinalOp.Mem.Size = OrigOp.Mem.Size;
+ FinalOp.Mem.SegReg = OrigOp.Mem.SegReg;
+ FinalOp.Mem.BaseReg = FinalReg;
+ }
+ }
+
+ // Produce warnings only if all the operands passed the adjustment - prevent
+ // legal cases like "movsd (%rax), %xmm0" mistakenly produce warnings
+ for (auto &WarningMsg : Warnings) {
+ Warning(WarningMsg.first, WarningMsg.second);
+ }
+
+ // Remove old operands
+ for (unsigned int i = 0; i < FinalOperands.size(); ++i)
+ OrigOperands.pop_back();
+ }
+ // OrigOperands.append(FinalOperands.begin(), FinalOperands.end());
+ for (unsigned int i = 0; i < FinalOperands.size(); ++i)
+ OrigOperands.push_back(std::move(FinalOperands[i]));
+
+ return false;
+}
+
std::unique_ptr<X86Operand> X86AsmParser::ParseOperand() {
if (isParsingIntelSyntax())
return ParseIntelOperand();
@@ -1301,7 +1394,7 @@ X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
return ErrorOperand(BracLoc, "Expected '[' token!");
Parser.Lex(); // Eat '['
- SMLoc StartInBrac = Tok.getLoc();
+ SMLoc StartInBrac = Parser.getTok().getLoc();
// Parse [ Symbol + ImmDisp ] and [ BaseReg + Scale*IndexReg + ImmDisp ]. We
// may have already parsed an immediate displacement before the bracketed
// expression.
@@ -1330,7 +1423,10 @@ X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
// Parse struct field access. Intel requires a dot, but MSVC doesn't. MSVC
// will in fact do global lookup the field name inside all global typedefs,
// but we don't emulate that.
- if (Tok.getString().find('.') != StringRef::npos) {
+ if ((Parser.getTok().getKind() == AsmToken::Identifier ||
+ Parser.getTok().getKind() == AsmToken::Dot ||
+ Parser.getTok().getKind() == AsmToken::Real) &&
+ Parser.getTok().getString().find('.') != StringRef::npos) {
const MCExpr *NewDisp;
if (ParseIntelDotOperator(Disp, NewDisp))
return nullptr;
@@ -2087,22 +2183,36 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
unsigned ComparisonCode = StringSwitch<unsigned>(
PatchedName.slice(CCIdx, PatchedName.size() - 2))
.Case("eq", 0x00)
+ .Case("eq_oq", 0x00)
.Case("lt", 0x01)
+ .Case("lt_os", 0x01)
.Case("le", 0x02)
+ .Case("le_os", 0x02)
.Case("unord", 0x03)
+ .Case("unord_q", 0x03)
.Case("neq", 0x04)
+ .Case("neq_uq", 0x04)
.Case("nlt", 0x05)
+ .Case("nlt_us", 0x05)
.Case("nle", 0x06)
+ .Case("nle_us", 0x06)
.Case("ord", 0x07)
+ .Case("ord_q", 0x07)
/* AVX only from here */
.Case("eq_uq", 0x08)
.Case("nge", 0x09)
+ .Case("nge_us", 0x09)
.Case("ngt", 0x0A)
+ .Case("ngt_us", 0x0A)
.Case("false", 0x0B)
+ .Case("false_oq", 0x0B)
.Case("neq_oq", 0x0C)
.Case("ge", 0x0D)
+ .Case("ge_os", 0x0D)
.Case("gt", 0x0E)
+ .Case("gt_os", 0x0E)
.Case("true", 0x0F)
+ .Case("true_uq", 0x0F)
.Case("eq_os", 0x10)
.Case("lt_oq", 0x11)
.Case("le_oq", 0x12)
@@ -2196,6 +2306,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Name == "repne" || Name == "repnz" ||
Name == "rex64" || Name == "data16";
+ bool CurlyAsEndOfStatement = false;
// This does the actual operand parsing. Don't parse any more if we have a
// prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
// just want to parse the "lock" as the first instruction and the "incl" as
@@ -2223,7 +2334,12 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
break;
}
- if (getLexer().isNot(AsmToken::EndOfStatement))
+ // In MS inline asm curly braces mark the begining/end of a block, therefore
+ // they should be interepreted as end of statement
+ CurlyAsEndOfStatement =
+ isParsingIntelSyntax() && isParsingInlineAsm() &&
+ (getLexer().is(AsmToken::LCurly) || getLexer().is(AsmToken::RCurly));
+ if (getLexer().isNot(AsmToken::EndOfStatement) && !CurlyAsEndOfStatement)
return ErrorAndEatStatement(getLexer().getLoc(),
"unexpected token in argument list");
}
@@ -2232,6 +2348,10 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
if (getLexer().is(AsmToken::EndOfStatement) ||
(isPrefix && getLexer().is(AsmToken::Slash)))
Parser.Lex();
+ else if (CurlyAsEndOfStatement)
+ // Add an actual EndOfStatement before the curly brace
+ Info.AsmRewrites->emplace_back(AOK_EndOfStatement,
+ getLexer().getTok().getLoc(), 0);
// This is for gas compatibility and cannot be done in td.
// Adding "p" for some floating point with no argument.
@@ -2247,10 +2367,11 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
}
- // This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
+ // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" ->
// "outb %al, %dx". Out doesn't take a memory form, but this is a widely
// documented form in various unofficial manuals, so a lot of code uses it.
- if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
+ if ((Name == "outb" || Name == "outsb" || Name == "outw" || Name == "outsw" ||
+ Name == "outl" || Name == "outsl" || Name == "out" || Name == "outs") &&
Operands.size() == 3) {
X86Operand &Op = (X86Operand &)*Operands.back();
if (Op.isMem() && Op.Mem.SegReg == 0 &&
@@ -2261,8 +2382,9 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
}
}
- // Same hack for "in[bwl]? (%dx), %al" -> "inb %dx, %al".
- if ((Name == "inb" || Name == "inw" || Name == "inl" || Name == "in") &&
+ // Same hack for "in[s]?[bwl]? (%dx), %al" -> "inb %dx, %al".
+ if ((Name == "inb" || Name == "insb" || Name == "inw" || Name == "insw" ||
+ Name == "inl" || Name == "insl" || Name == "in" || Name == "ins") &&
Operands.size() == 3) {
X86Operand &Op = (X86Operand &)*Operands[1];
if (Op.isMem() && Op.Mem.SegReg == 0 &&
@@ -2274,84 +2396,92 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
}
}
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 2> TmpOperands;
+ bool HadVerifyError = false;
+
// Append default arguments to "ins[bwld]"
- if (Name.startswith("ins") && Operands.size() == 1 &&
- (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd")) {
- AddDefaultSrcDestOperands(Operands,
+ if (Name.startswith("ins") &&
+ (Operands.size() == 1 || Operands.size() == 3) &&
+ (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd" ||
+ Name == "ins")) {
+
+ AddDefaultSrcDestOperands(TmpOperands,
X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
DefaultMemDIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Append default arguments to "outs[bwld]"
- if (Name.startswith("outs") && Operands.size() == 1 &&
+ if (Name.startswith("outs") &&
+ (Operands.size() == 1 || Operands.size() == 3) &&
(Name == "outsb" || Name == "outsw" || Name == "outsl" ||
- Name == "outsd" )) {
- AddDefaultSrcDestOperands(Operands,
- DefaultMemSIOperand(NameLoc),
+ Name == "outsd" || Name == "outs")) {
+ AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate
// values of $SIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
- if (Name.startswith("lods") && Operands.size() == 1 &&
+ if (Name.startswith("lods") &&
+ (Operands.size() == 1 || Operands.size() == 2) &&
(Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
- Name == "lodsl" || Name == "lodsd" || Name == "lodsq"))
- Operands.push_back(DefaultMemSIOperand(NameLoc));
+ Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) {
+ TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
+ }
// Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate
// values of $DIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
- if (Name.startswith("stos") && Operands.size() == 1 &&
+ if (Name.startswith("stos") &&
+ (Operands.size() == 1 || Operands.size() == 2) &&
(Name == "stos" || Name == "stosb" || Name == "stosw" ||
- Name == "stosl" || Name == "stosd" || Name == "stosq"))
- Operands.push_back(DefaultMemDIOperand(NameLoc));
+ Name == "stosl" || Name == "stosd" || Name == "stosq")) {
+ TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
+ }
// Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate
// values of $DIREG according to the mode. It would be nice if this
// could be achieved with InstAlias in the tables.
- if (Name.startswith("scas") && Operands.size() == 1 &&
+ if (Name.startswith("scas") &&
+ (Operands.size() == 1 || Operands.size() == 2) &&
(Name == "scas" || Name == "scasb" || Name == "scasw" ||
- Name == "scasl" || Name == "scasd" || Name == "scasq"))
- Operands.push_back(DefaultMemDIOperand(NameLoc));
+ Name == "scasl" || Name == "scasd" || Name == "scasq")) {
+ TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
+ }
// Add default SI and DI operands to "cmps[bwlq]".
if (Name.startswith("cmps") &&
+ (Operands.size() == 1 || Operands.size() == 3) &&
(Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" ||
Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) {
- if (Operands.size() == 1) {
- AddDefaultSrcDestOperands(Operands,
- DefaultMemDIOperand(NameLoc),
- DefaultMemSIOperand(NameLoc));
- } else if (Operands.size() == 3) {
- X86Operand &Op = (X86Operand &)*Operands[1];
- X86Operand &Op2 = (X86Operand &)*Operands[2];
- if (!doSrcDstMatch(Op, Op2))
- return Error(Op.getStartLoc(),
- "mismatching source and destination index registers");
- }
+ AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
+ DefaultMemSIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
}
// Add default SI and DI operands to "movs[bwlq]".
- if ((Name.startswith("movs") &&
- (Name == "movs" || Name == "movsb" || Name == "movsw" ||
- Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
- (Name.startswith("smov") &&
- (Name == "smov" || Name == "smovb" || Name == "smovw" ||
- Name == "smovl" || Name == "smovd" || Name == "smovq"))) {
- if (Operands.size() == 1) {
- if (Name == "movsd")
- Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
- AddDefaultSrcDestOperands(Operands,
- DefaultMemSIOperand(NameLoc),
- DefaultMemDIOperand(NameLoc));
- } else if (Operands.size() == 3) {
- X86Operand &Op = (X86Operand &)*Operands[1];
- X86Operand &Op2 = (X86Operand &)*Operands[2];
- if (!doSrcDstMatch(Op, Op2))
- return Error(Op.getStartLoc(),
- "mismatching source and destination index registers");
- }
+ if (((Name.startswith("movs") &&
+ (Name == "movs" || Name == "movsb" || Name == "movsw" ||
+ Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
+ (Name.startswith("smov") &&
+ (Name == "smov" || Name == "smovb" || Name == "smovw" ||
+ Name == "smovl" || Name == "smovd" || Name == "smovq"))) &&
+ (Operands.size() == 1 || Operands.size() == 3)) {
+ if (Name == "movsd" && Operands.size() == 1)
+ Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
+ AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
+ DefaultMemDIOperand(NameLoc));
+ HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
+ }
+
+ // Check if we encountered an error for one the string insturctions
+ if (HadVerifyError) {
+ return HadVerifyError;
}
// FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
@@ -2387,64 +2517,22 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
}
}
+ // Transforms "xlat mem8" into "xlatb"
+ if ((Name == "xlat" || Name == "xlatb") && Operands.size() == 2) {
+ X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
+ if (Op1.isMem8()) {
+ Warning(Op1.getStartLoc(), "memory operand is only for determining the "
+ "size, (R|E)BX will be used for the location");
+ Operands.pop_back();
+ static_cast<X86Operand &>(*Operands[0]).setTokenValue("xlatb");
+ }
+ }
+
return false;
}
bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
- switch (Inst.getOpcode()) {
- default: return false;
- case X86::VMOVZPQILo2PQIrr:
- case X86::VMOVAPDrr:
- case X86::VMOVAPDYrr:
- case X86::VMOVAPSrr:
- case X86::VMOVAPSYrr:
- case X86::VMOVDQArr:
- case X86::VMOVDQAYrr:
- case X86::VMOVDQUrr:
- case X86::VMOVDQUYrr:
- case X86::VMOVUPDrr:
- case X86::VMOVUPDYrr:
- case X86::VMOVUPSrr:
- case X86::VMOVUPSYrr: {
- if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) ||
- !X86II::isX86_64ExtendedReg(Inst.getOperand(1).getReg()))
- return false;
-
- unsigned NewOpc;
- switch (Inst.getOpcode()) {
- default: llvm_unreachable("Invalid opcode");
- case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break;
- case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
- case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
- case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
- case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
- case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
- case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
- case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
- case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
- case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
- case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
- case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
- case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
- }
- Inst.setOpcode(NewOpc);
- return true;
- }
- case X86::VMOVSDrr:
- case X86::VMOVSSrr: {
- if (X86II::isX86_64ExtendedReg(Inst.getOperand(0).getReg()) ||
- !X86II::isX86_64ExtendedReg(Inst.getOperand(2).getReg()))
- return false;
- unsigned NewOpc;
- switch (Inst.getOpcode()) {
- default: llvm_unreachable("Invalid opcode");
- case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
- case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
- }
- Inst.setOpcode(NewOpc);
- return true;
- }
- }
+ return false;
}
static const char *getSubtargetFeatureName(uint64_t Val);
diff --git a/lib/Target/X86/AsmParser/X86AsmParserCommon.h b/lib/Target/X86/AsmParser/X86AsmParserCommon.h
index 54538c804a03..c45a3f14ef11 100644
--- a/lib/Target/X86/AsmParser/X86AsmParserCommon.h
+++ b/lib/Target/X86/AsmParser/X86AsmParserCommon.h
@@ -10,6 +10,8 @@
#ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86ASMPARSERCOMMON_H
#define LLVM_LIB_TARGET_X86_ASMPARSER_X86ASMPARSERCOMMON_H
+#include "llvm/Support/MathExtras.h"
+
namespace llvm {
inline bool isImmSExti16i8Value(uint64_t Value) {
diff --git a/lib/Target/X86/AsmParser/X86Operand.h b/lib/Target/X86/AsmParser/X86Operand.h
index 7ec02408ffa4..a04c2f5c84a5 100644
--- a/lib/Target/X86/AsmParser/X86Operand.h
+++ b/lib/Target/X86/AsmParser/X86Operand.h
@@ -233,46 +233,47 @@ struct X86Operand : public MCParsedAsmOperand {
bool isMem512() const {
return Kind == Memory && (!Mem.Size || Mem.Size == 512);
}
+ bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
+ }
- bool isMemVX32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
+ bool isMem64_RC128() const {
+ return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15);
+ }
+ bool isMem128_RC128() const {
+ return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15);
}
- bool isMemVX32X() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM31;
+ bool isMem128_RC256() const {
+ return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15);
}
- bool isMemVY32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
+ bool isMem256_RC128() const {
+ return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15);
}
- bool isMemVY32X() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM31;
+ bool isMem256_RC256() const {
+ return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15);
+ }
+
+ bool isMem64_RC128X() const {
+ return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31);
}
- bool isMemVX64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
+ bool isMem128_RC128X() const {
+ return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31);
}
- bool isMemVX64X() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM31;
+ bool isMem128_RC256X() const {
+ return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31);
}
- bool isMemVY64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
+ bool isMem256_RC128X() const {
+ return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31);
}
- bool isMemVY64X() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM31;
+ bool isMem256_RC256X() const {
+ return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31);
}
- bool isMemVZ32() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
- getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
+ bool isMem512_RC256X() const {
+ return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31);
}
- bool isMemVZ64() const {
- return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
- getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
+ bool isMem512_RC512() const {
+ return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
}
bool isAbsMem() const {
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt
index 55949155da9e..894090f78977 100644
--- a/lib/Target/X86/CMakeLists.txt
+++ b/lib/Target/X86/CMakeLists.txt
@@ -17,6 +17,9 @@ set(sources
X86CallFrameOptimization.cpp
X86ExpandPseudo.cpp
X86FastISel.cpp
+ X86FixupBWInsts.cpp
+ X86FixupLEAs.cpp
+ X86FixupSetCC.cpp
X86FloatingPoint.cpp
X86FrameLowering.cpp
X86ISelDAGToDAG.cpp
@@ -24,6 +27,7 @@ set(sources
X86InstrInfo.cpp
X86MCInstLower.cpp
X86MachineFunctionInfo.cpp
+ X86OptimizeLEAs.cpp
X86PadShortFunction.cpp
X86RegisterInfo.cpp
X86SelectionDAGInfo.cpp
@@ -33,9 +37,8 @@ set(sources
X86TargetObjectFile.cpp
X86TargetTransformInfo.cpp
X86VZeroUpper.cpp
- X86FixupLEAs.cpp
+ X86WinAllocaExpander.cpp
X86WinEHState.cpp
- X86OptimizeLEAs.cpp
)
add_llvm_target(X86CodeGen ${sources})
diff --git a/lib/Target/X86/Disassembler/Makefile b/lib/Target/X86/Disassembler/Makefile
deleted file mode 100644
index 51e7b828cf2a..000000000000
--- a/lib/Target/X86/Disassembler/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- lib/Target/X86/Disassembler/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86Disassembler
-
-# Hack: we need to include 'main' x86 target directory to grab private headers.
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
-
-.PHONY: $(PROJ_SRC_DIR)/X86DisassemblerDecoder.c
diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp
index ce8fcf164668..008dead5d0a5 100644
--- a/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -10,14 +10,74 @@
// This file is part of the X86 Disassembler.
// It contains code to translate the data produced by the decoder into
// MCInsts.
-// Documentation for the disassembler can be found in X86Disassembler.h.
+//
+//
+// The X86 disassembler is a table-driven disassembler for the 16-, 32-, and
+// 64-bit X86 instruction sets. The main decode sequence for an assembly
+// instruction in this disassembler is:
+//
+// 1. Read the prefix bytes and determine the attributes of the instruction.
+// These attributes, recorded in enum attributeBits
+// (X86DisassemblerDecoderCommon.h), form a bitmask. The table CONTEXTS_SYM
+// provides a mapping from bitmasks to contexts, which are represented by
+// enum InstructionContext (ibid.).
+//
+// 2. Read the opcode, and determine what kind of opcode it is. The
+// disassembler distinguishes four kinds of opcodes, which are enumerated in
+// OpcodeType (X86DisassemblerDecoderCommon.h): one-byte (0xnn), two-byte
+// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a
+// (0x0f 0x3a 0xnn). Mandatory prefixes are treated as part of the context.
+//
+// 3. Depending on the opcode type, look in one of four ClassDecision structures
+// (X86DisassemblerDecoderCommon.h). Use the opcode class to determine which
+// OpcodeDecision (ibid.) to look the opcode in. Look up the opcode, to get
+// a ModRMDecision (ibid.).
+//
+// 4. Some instructions, such as escape opcodes or extended opcodes, or even
+// instructions that have ModRM*Reg / ModRM*Mem forms in LLVM, need the
+// ModR/M byte to complete decode. The ModRMDecision's type is an entry from
+// ModRMDecisionType (X86DisassemblerDecoderCommon.h) that indicates if the
+// ModR/M byte is required and how to interpret it.
+//
+// 5. After resolving the ModRMDecision, the disassembler has a unique ID
+// of type InstrUID (X86DisassemblerDecoderCommon.h). Looking this ID up in
+// INSTRUCTIONS_SYM yields the name of the instruction and the encodings and
+// meanings of its operands.
+//
+// 6. For each operand, its encoding is an entry from OperandEncoding
+// (X86DisassemblerDecoderCommon.h) and its type is an entry from
+// OperandType (ibid.). The encoding indicates how to read it from the
+// instruction; the type indicates how to interpret the value once it has
+// been read. For example, a register operand could be stored in the R/M
+// field of the ModR/M byte, the REG field of the ModR/M byte, or added to
+// the main opcode. This is orthogonal from its meaning (an GPR or an XMM
+// register, for instance). Given this information, the operands can be
+// extracted and interpreted.
+//
+// 7. As the last step, the disassembler translates the instruction information
+// and operands into a format understandable by the client - in this case, an
+// MCInst for use by the MC infrastructure.
+//
+// The disassembler is broken broadly into two parts: the table emitter that
+// emits the instruction decode tables discussed above during compilation, and
+// the disassembler itself. The table emitter is documented in more detail in
+// utils/TableGen/X86DisassemblerEmitter.h.
+//
+// X86Disassembler.cpp contains the code responsible for step 7, and for
+// invoking the decoder to execute steps 1-6.
+// X86DisassemblerDecoderCommon.h contains the definitions needed by both the
+// table emitter and the disassembler.
+// X86DisassemblerDecoder.h contains the public interface of the decoder,
+// factored out into C for possible use by other projects.
+// X86DisassemblerDecoder.c contains the source code of the decoder, which is
+// responsible for steps 1-6.
//
//===----------------------------------------------------------------------===//
-#include "X86Disassembler.h"
#include "X86DisassemblerDecoder.h"
+#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -31,13 +91,6 @@ using namespace llvm::X86Disassembler;
#define DEBUG_TYPE "x86-disassembler"
-#define GET_REGINFO_ENUM
-#include "X86GenRegisterInfo.inc"
-#define GET_INSTRINFO_ENUM
-#include "X86GenInstrInfo.inc"
-#define GET_SUBTARGETINFO_ENUM
-#include "X86GenSubtargetInfo.inc"
-
void llvm::X86Disassembler::Debug(const char *file, unsigned line,
const char *s) {
dbgs() << file << ":" << line << ": " << s;
@@ -67,14 +120,34 @@ namespace X86 {
};
}
-extern Target TheX86_32Target, TheX86_64Target;
-
}
static bool translateInstruction(MCInst &target,
InternalInstruction &source,
const MCDisassembler *Dis);
+namespace {
+
+/// Generic disassembler for all X86 platforms. All each platform class should
+/// have to do is subclass the constructor, and provide a different
+/// disassemblerMode value.
+class X86GenericDisassembler : public MCDisassembler {
+ std::unique_ptr<const MCInstrInfo> MII;
+public:
+ X86GenericDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
+ std::unique_ptr<const MCInstrInfo> MII);
+public:
+ DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
+ ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const override;
+
+private:
+ DisassemblerMode fMode;
+};
+
+}
+
X86GenericDisassembler::X86GenericDisassembler(
const MCSubtargetInfo &STI,
MCContext &Ctx,
@@ -826,7 +899,6 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
case TYPE_R64:
case TYPE_Rv:
case TYPE_MM64:
- case TYPE_XMM:
case TYPE_XMM32:
case TYPE_XMM64:
case TYPE_XMM128:
@@ -911,14 +983,6 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
return translateMaskRegister(mcInst, insn.writemask);
CASE_ENCODING_RM:
return translateRM(mcInst, operand, insn, Dis);
- case ENCODING_CB:
- case ENCODING_CW:
- case ENCODING_CD:
- case ENCODING_CP:
- case ENCODING_CO:
- case ENCODING_CT:
- debug("Translation of code offsets isn't supported.");
- return true;
case ENCODING_IB:
case ENCODING_IW:
case ENCODING_ID:
@@ -997,7 +1061,7 @@ static MCDisassembler *createX86Disassembler(const Target &T,
const MCSubtargetInfo &STI,
MCContext &Ctx) {
std::unique_ptr<const MCInstrInfo> MII(T.createMCInstrInfo());
- return new X86Disassembler::X86GenericDisassembler(STI, Ctx, std::move(MII));
+ return new X86GenericDisassembler(STI, Ctx, std::move(MII));
}
extern "C" void LLVMInitializeX86Disassembler() {
diff --git a/lib/Target/X86/Disassembler/X86Disassembler.h b/lib/Target/X86/Disassembler/X86Disassembler.h
deleted file mode 100644
index d7f426b2641d..000000000000
--- a/lib/Target/X86/Disassembler/X86Disassembler.h
+++ /dev/null
@@ -1,112 +0,0 @@
-//===-- X86Disassembler.h - Disassembler for x86 and x86_64 -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// The X86 disassembler is a table-driven disassembler for the 16-, 32-, and
-// 64-bit X86 instruction sets. The main decode sequence for an assembly
-// instruction in this disassembler is:
-//
-// 1. Read the prefix bytes and determine the attributes of the instruction.
-// These attributes, recorded in enum attributeBits
-// (X86DisassemblerDecoderCommon.h), form a bitmask. The table CONTEXTS_SYM
-// provides a mapping from bitmasks to contexts, which are represented by
-// enum InstructionContext (ibid.).
-//
-// 2. Read the opcode, and determine what kind of opcode it is. The
-// disassembler distinguishes four kinds of opcodes, which are enumerated in
-// OpcodeType (X86DisassemblerDecoderCommon.h): one-byte (0xnn), two-byte
-// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a
-// (0x0f 0x3a 0xnn). Mandatory prefixes are treated as part of the context.
-//
-// 3. Depending on the opcode type, look in one of four ClassDecision structures
-// (X86DisassemblerDecoderCommon.h). Use the opcode class to determine which
-// OpcodeDecision (ibid.) to look the opcode in. Look up the opcode, to get
-// a ModRMDecision (ibid.).
-//
-// 4. Some instructions, such as escape opcodes or extended opcodes, or even
-// instructions that have ModRM*Reg / ModRM*Mem forms in LLVM, need the
-// ModR/M byte to complete decode. The ModRMDecision's type is an entry from
-// ModRMDecisionType (X86DisassemblerDecoderCommon.h) that indicates if the
-// ModR/M byte is required and how to interpret it.
-//
-// 5. After resolving the ModRMDecision, the disassembler has a unique ID
-// of type InstrUID (X86DisassemblerDecoderCommon.h). Looking this ID up in
-// INSTRUCTIONS_SYM yields the name of the instruction and the encodings and
-// meanings of its operands.
-//
-// 6. For each operand, its encoding is an entry from OperandEncoding
-// (X86DisassemblerDecoderCommon.h) and its type is an entry from
-// OperandType (ibid.). The encoding indicates how to read it from the
-// instruction; the type indicates how to interpret the value once it has
-// been read. For example, a register operand could be stored in the R/M
-// field of the ModR/M byte, the REG field of the ModR/M byte, or added to
-// the main opcode. This is orthogonal from its meaning (an GPR or an XMM
-// register, for instance). Given this information, the operands can be
-// extracted and interpreted.
-//
-// 7. As the last step, the disassembler translates the instruction information
-// and operands into a format understandable by the client - in this case, an
-// MCInst for use by the MC infrastructure.
-//
-// The disassembler is broken broadly into two parts: the table emitter that
-// emits the instruction decode tables discussed above during compilation, and
-// the disassembler itself. The table emitter is documented in more detail in
-// utils/TableGen/X86DisassemblerEmitter.h.
-//
-// X86Disassembler.h contains the public interface for the disassembler,
-// adhering to the MCDisassembler interface.
-// X86Disassembler.cpp contains the code responsible for step 7, and for
-// invoking the decoder to execute steps 1-6.
-// X86DisassemblerDecoderCommon.h contains the definitions needed by both the
-// table emitter and the disassembler.
-// X86DisassemblerDecoder.h contains the public interface of the decoder,
-// factored out into C for possible use by other projects.
-// X86DisassemblerDecoder.c contains the source code of the decoder, which is
-// responsible for steps 1-6.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_X86_DISASSEMBLER_X86DISASSEMBLER_H
-#define LLVM_LIB_TARGET_X86_DISASSEMBLER_X86DISASSEMBLER_H
-
-#include "X86DisassemblerDecoderCommon.h"
-#include "llvm/MC/MCDisassembler.h"
-
-namespace llvm {
-
-class MCInst;
-class MCInstrInfo;
-class MCSubtargetInfo;
-class MemoryObject;
-class raw_ostream;
-
-namespace X86Disassembler {
-
-/// Generic disassembler for all X86 platforms. All each platform class should
-/// have to do is subclass the constructor, and provide a different
-/// disassemblerMode value.
-class X86GenericDisassembler : public MCDisassembler {
- std::unique_ptr<const MCInstrInfo> MII;
-public:
- X86GenericDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
- std::unique_ptr<const MCInstrInfo> MII);
-public:
- DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
- ArrayRef<uint8_t> Bytes, uint64_t Address,
- raw_ostream &vStream,
- raw_ostream &cStream) const override;
-
-private:
- DisassemblerMode fMode;
-};
-
-} // namespace X86Disassembler
-
-} // namespace llvm
-
-#endif
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
index 040143b15587..b0a150ab564d 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.cpp
@@ -53,7 +53,6 @@ struct ContextDecision {
#define debug(s) do { } while (0)
#endif
-
/*
* contextForAttrs - Client for the instruction context table. Takes a set of
* attributes and returns the appropriate decode context.
@@ -276,8 +275,6 @@ static void dbgprintf(struct InternalInstruction* insn,
va_end(ap);
insn->dlog(insn->dlogArg, buffer);
-
- return;
}
/*
@@ -1453,10 +1450,10 @@ static int readModRM(struct InternalInstruction* insn) {
}
#define GENERIC_FIXUP_FUNC(name, base, prefix) \
- static uint8_t name(struct InternalInstruction *insn, \
- OperandType type, \
- uint8_t index, \
- uint8_t *valid) { \
+ static uint16_t name(struct InternalInstruction *insn, \
+ OperandType type, \
+ uint8_t index, \
+ uint8_t *valid) { \
*valid = 1; \
switch (type) { \
default: \
@@ -1485,7 +1482,6 @@ static int readModRM(struct InternalInstruction* insn) {
case TYPE_XMM128: \
case TYPE_XMM64: \
case TYPE_XMM32: \
- case TYPE_XMM: \
return prefix##_XMM0 + index; \
case TYPE_VK1: \
case TYPE_VK2: \
@@ -1507,6 +1503,10 @@ static int readModRM(struct InternalInstruction* insn) {
return prefix##_DR0 + index; \
case TYPE_CONTROLREG: \
return prefix##_CR0 + index; \
+ case TYPE_BNDR: \
+ if (index > 3) \
+ *valid = 0; \
+ return prefix##_BND0 + index; \
} \
}
@@ -1763,14 +1763,6 @@ static int readOperands(struct InternalInstruction* insn) {
if (Op.encoding != ENCODING_REG && insn->eaDisplacement == EA_DISP_8)
insn->displacement *= 1 << (Op.encoding - ENCODING_RM);
break;
- case ENCODING_CB:
- case ENCODING_CW:
- case ENCODING_CD:
- case ENCODING_CP:
- case ENCODING_CO:
- case ENCODING_CT:
- dbgprintf(insn, "We currently don't hande code-offset encodings");
- return -1;
case ENCODING_IB:
if (sawRegImm) {
/* Saw a register immediate so don't read again and instead split the
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index 28a628e5066b..24d24a265b49 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -369,6 +369,12 @@ namespace X86Disassembler {
ENTRY(CR14) \
ENTRY(CR15)
+#define REGS_BOUND \
+ ENTRY(BND0) \
+ ENTRY(BND1) \
+ ENTRY(BND2) \
+ ENTRY(BND3)
+
#define ALL_EA_BASES \
EA_BASES_16BIT \
EA_BASES_32BIT \
@@ -391,6 +397,7 @@ namespace X86Disassembler {
REGS_SEGMENT \
REGS_DEBUG \
REGS_CONTROL \
+ REGS_BOUND \
ENTRY(RIP)
/// \brief All possible values of the base field for effective-address
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index 301db72feafb..0a835b876d90 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -352,12 +352,6 @@ enum ModRMDecisionType {
ENUM_ENTRY(ENCODING_RM_CD64,"R/M operand with CDisp scaling of 64") \
ENUM_ENTRY(ENCODING_VVVV, "Register operand in VEX.vvvv byte.") \
ENUM_ENTRY(ENCODING_WRITEMASK, "Register operand in EVEX.aaa byte.") \
- ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \
- ENUM_ENTRY(ENCODING_CW, "2-byte") \
- ENUM_ENTRY(ENCODING_CD, "4-byte") \
- ENUM_ENTRY(ENCODING_CP, "6-byte") \
- ENUM_ENTRY(ENCODING_CO, "8-byte") \
- ENUM_ENTRY(ENCODING_CT, "10-byte") \
ENUM_ENTRY(ENCODING_IB, "1-byte immediate") \
ENUM_ENTRY(ENCODING_IW, "2-byte") \
ENUM_ENTRY(ENCODING_ID, "4-byte") \
@@ -436,14 +430,11 @@ enum OperandEncoding {
ENUM_ENTRY(TYPE_MOFFS16, "2-byte") \
ENUM_ENTRY(TYPE_MOFFS32, "4-byte") \
ENUM_ENTRY(TYPE_MOFFS64, "8-byte") \
- ENUM_ENTRY(TYPE_SREG, "Byte with single bit set: 0 = ES, 1 = CS, " \
- "2 = SS, 3 = DS, 4 = FS, 5 = GS") \
ENUM_ENTRY(TYPE_M32FP, "32-bit IEE754 memory floating-point operand") \
ENUM_ENTRY(TYPE_M64FP, "64-bit") \
ENUM_ENTRY(TYPE_M80FP, "80-bit extended") \
ENUM_ENTRY(TYPE_ST, "Position on the floating-point stack") \
ENUM_ENTRY(TYPE_MM64, "8-byte MMX register") \
- ENUM_ENTRY(TYPE_XMM, "XMM register operand") \
ENUM_ENTRY(TYPE_XMM32, "4-byte XMM register or memory operand") \
ENUM_ENTRY(TYPE_XMM64, "8-byte") \
ENUM_ENTRY(TYPE_XMM128, "16-byte") \
@@ -456,7 +447,6 @@ enum OperandEncoding {
ENUM_ENTRY(TYPE_VK16, "16-bit") \
ENUM_ENTRY(TYPE_VK32, "32-bit") \
ENUM_ENTRY(TYPE_VK64, "64-bit") \
- ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \
ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \
ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \
ENUM_ENTRY(TYPE_CONTROLREG, "Control register operand") \
diff --git a/lib/Target/X86/InstPrinter/Makefile b/lib/Target/X86/InstPrinter/Makefile
deleted file mode 100644
index c82aa330a20c..000000000000
--- a/lib/Target/X86/InstPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/X86/AsmPrinter/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86AsmPrinter
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index b4c0bc4cd4d9..3a5d056888a1 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -25,7 +25,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
-#include <map>
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
@@ -166,17 +165,25 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
if (Op.isReg()) {
printRegName(O, Op.getReg());
} else if (Op.isImm()) {
- // Print X86 immediates as signed values.
- O << markup("<imm:") << '$' << formatImm((int64_t)Op.getImm())
- << markup(">");
+ // Print immediates as signed values.
+ int64_t Imm = Op.getImm();
+ O << markup("<imm:") << '$' << formatImm(Imm) << markup(">");
+
+ // TODO: This should be in a helper function in the base class, so it can
+ // be used by other printers.
// If there are no instruction-specific comments, add a comment clarifying
// the hex value of the immediate operand when it isn't in the range
// [-256,255].
- if (CommentStream && !HasCustomInstComment &&
- (Op.getImm() > 255 || Op.getImm() < -256))
- *CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Op.getImm());
-
+ if (CommentStream && !HasCustomInstComment && (Imm > 255 || Imm < -256)) {
+ // Don't print unnecessary hex sign bits.
+ if (Imm == (int16_t)(Imm))
+ *CommentStream << format("imm = 0x%" PRIX16 "\n", (uint16_t)Imm);
+ else if (Imm == (int32_t)(Imm))
+ *CommentStream << format("imm = 0x%" PRIX32 "\n", (uint32_t)Imm);
+ else
+ *CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Imm);
+ }
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << markup("<imm:") << '$';
diff --git a/lib/Target/X86/InstPrinter/X86InstComments.cpp b/lib/Target/X86/InstPrinter/X86InstComments.cpp
index 73f654cba38c..f5379566b619 100644
--- a/lib/Target/X86/InstPrinter/X86InstComments.cpp
+++ b/lib/Target/X86/InstPrinter/X86InstComments.cpp
@@ -21,6 +21,143 @@
using namespace llvm;
+#define CASE_SSE_INS_COMMON(Inst, src) \
+ case X86::Inst##src:
+
+#define CASE_AVX_INS_COMMON(Inst, Suffix, src) \
+ case X86::V##Inst##Suffix##src:
+
+#define CASE_MASK_INS_COMMON(Inst, Suffix, src) \
+ case X86::V##Inst##Suffix##src##k:
+
+#define CASE_MASKZ_INS_COMMON(Inst, Suffix, src) \
+ case X86::V##Inst##Suffix##src##kz:
+
+#define CASE_AVX512_INS_COMMON(Inst, Suffix, src) \
+ CASE_AVX_INS_COMMON(Inst, Suffix, src) \
+ CASE_MASK_INS_COMMON(Inst, Suffix, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Suffix, src)
+
+#define CASE_MOVDUP(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
+ CASE_AVX_INS_COMMON(Inst, , r##src) \
+ CASE_AVX_INS_COMMON(Inst, Y, r##src) \
+ CASE_SSE_INS_COMMON(Inst, r##src)
+
+#define CASE_MASK_MOVDUP(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_MASKZ_MOVDUP(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_PMOVZX(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
+ CASE_AVX_INS_COMMON(Inst, , r##src) \
+ CASE_AVX_INS_COMMON(Inst, Y, r##src) \
+ CASE_SSE_INS_COMMON(Inst, r##src)
+
+#define CASE_MASK_PMOVZX(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_MASKZ_PMOVZX(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_UNPCK(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, r##src) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, r##src) \
+ CASE_AVX_INS_COMMON(Inst, , r##src) \
+ CASE_AVX_INS_COMMON(Inst, Y, r##src) \
+ CASE_SSE_INS_COMMON(Inst, r##src)
+
+#define CASE_MASK_UNPCK(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASK_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_MASKZ_UNPCK(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, r##src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, r##src)
+
+#define CASE_SHUF(Inst, suf) \
+ CASE_AVX512_INS_COMMON(Inst, Z, suf) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, suf) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, suf) \
+ CASE_AVX_INS_COMMON(Inst, , suf) \
+ CASE_AVX_INS_COMMON(Inst, Y, suf) \
+ CASE_SSE_INS_COMMON(Inst, suf)
+
+#define CASE_MASK_SHUF(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, r##src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z256, r##src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z128, r##src##i)
+
+#define CASE_MASKZ_SHUF(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, r##src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, r##src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, r##src##i)
+
+#define CASE_VPERMILPI(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, src##i) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, src##i) \
+ CASE_AVX512_INS_COMMON(Inst, Z128, src##i) \
+ CASE_AVX_INS_COMMON(Inst, , src##i) \
+ CASE_AVX_INS_COMMON(Inst, Y, src##i)
+
+#define CASE_MASK_VPERMILPI(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z256, src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z128, src##i)
+
+#define CASE_MASKZ_VPERMILPI(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z128, src##i)
+
+#define CASE_VPERM(Inst, src) \
+ CASE_AVX512_INS_COMMON(Inst, Z, src##i) \
+ CASE_AVX512_INS_COMMON(Inst, Z256, src##i) \
+ CASE_AVX_INS_COMMON(Inst, Y, src##i)
+
+#define CASE_MASK_VPERM(Inst, src) \
+ CASE_MASK_INS_COMMON(Inst, Z, src##i) \
+ CASE_MASK_INS_COMMON(Inst, Z256, src##i)
+
+#define CASE_MASKZ_VPERM(Inst, src) \
+ CASE_MASKZ_INS_COMMON(Inst, Z, src##i) \
+ CASE_MASKZ_INS_COMMON(Inst, Z256, src##i)
+
+#define CASE_VSHUF(Inst, src) \
+ CASE_AVX512_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
+ CASE_AVX512_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
+ CASE_AVX512_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
+ CASE_AVX512_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
+
+#define CASE_MASK_VSHUF(Inst, src) \
+ CASE_MASK_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
+ CASE_MASK_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
+ CASE_MASK_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
+ CASE_MASK_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
+
+#define CASE_MASKZ_VSHUF(Inst, src) \
+ CASE_MASKZ_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
+ CASE_MASKZ_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
+ CASE_MASKZ_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
+ CASE_MASKZ_INS_COMMON(SHUFI##Inst, Z256, r##src##i)
+
static unsigned getVectorRegSize(unsigned RegNo) {
if (X86::ZMM0 <= RegNo && RegNo <= X86::ZMM31)
return 512;
@@ -41,159 +178,184 @@ static MVT getRegOperandVectorVT(const MCInst *MI, const MVT &ScalarVT,
getVectorRegSize(OpReg)/ScalarVT.getSizeInBits());
}
-/// \brief Extracts the src/dst types for a given zero extension instruction.
-/// \note While the number of elements in DstVT type correct, the
-/// number in the SrcVT type is expanded to fill the src xmm register and the
-/// upper elements may not be included in the dst xmm/ymm register.
-static void getZeroExtensionTypes(const MCInst *MI, MVT &SrcVT, MVT &DstVT) {
+/// \brief Extracts the dst type for a given zero extension instruction.
+static MVT getZeroExtensionResultType(const MCInst *MI) {
switch (MI->getOpcode()) {
default:
llvm_unreachable("Unknown zero extension instruction");
- // i8 zero extension
- case X86::PMOVZXBWrm:
- case X86::PMOVZXBWrr:
- case X86::VPMOVZXBWrm:
- case X86::VPMOVZXBWrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v8i16;
- break;
- case X86::VPMOVZXBWYrm:
- case X86::VPMOVZXBWYrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v16i16;
- break;
- case X86::PMOVZXBDrm:
- case X86::PMOVZXBDrr:
- case X86::VPMOVZXBDrm:
- case X86::VPMOVZXBDrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v4i32;
- break;
- case X86::VPMOVZXBDYrm:
- case X86::VPMOVZXBDYrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v8i32;
- break;
- case X86::PMOVZXBQrm:
- case X86::PMOVZXBQrr:
- case X86::VPMOVZXBQrm:
- case X86::VPMOVZXBQrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v2i64;
- break;
- case X86::VPMOVZXBQYrm:
- case X86::VPMOVZXBQYrr:
- SrcVT = MVT::v16i8;
- DstVT = MVT::v4i64;
- break;
- // i16 zero extension
- case X86::PMOVZXWDrm:
- case X86::PMOVZXWDrr:
- case X86::VPMOVZXWDrm:
- case X86::VPMOVZXWDrr:
- SrcVT = MVT::v8i16;
- DstVT = MVT::v4i32;
- break;
- case X86::VPMOVZXWDYrm:
- case X86::VPMOVZXWDYrr:
- SrcVT = MVT::v8i16;
- DstVT = MVT::v8i32;
- break;
- case X86::PMOVZXWQrm:
- case X86::PMOVZXWQrr:
- case X86::VPMOVZXWQrm:
- case X86::VPMOVZXWQrr:
- SrcVT = MVT::v8i16;
- DstVT = MVT::v2i64;
- break;
- case X86::VPMOVZXWQYrm:
- case X86::VPMOVZXWQYrr:
- SrcVT = MVT::v8i16;
- DstVT = MVT::v4i64;
- break;
- // i32 zero extension
- case X86::PMOVZXDQrm:
- case X86::PMOVZXDQrr:
- case X86::VPMOVZXDQrm:
- case X86::VPMOVZXDQrr:
- SrcVT = MVT::v4i32;
- DstVT = MVT::v2i64;
- break;
- case X86::VPMOVZXDQYrm:
- case X86::VPMOVZXDQYrr:
- SrcVT = MVT::v4i32;
- DstVT = MVT::v4i64;
- break;
+ // zero extension to i16
+ CASE_PMOVZX(PMOVZXBW, m)
+ CASE_PMOVZX(PMOVZXBW, r)
+ return getRegOperandVectorVT(MI, MVT::i16, 0);
+ // zero extension to i32
+ CASE_PMOVZX(PMOVZXBD, m)
+ CASE_PMOVZX(PMOVZXBD, r)
+ CASE_PMOVZX(PMOVZXWD, m)
+ CASE_PMOVZX(PMOVZXWD, r)
+ return getRegOperandVectorVT(MI, MVT::i32, 0);
+ // zero extension to i64
+ CASE_PMOVZX(PMOVZXBQ, m)
+ CASE_PMOVZX(PMOVZXBQ, r)
+ CASE_PMOVZX(PMOVZXWQ, m)
+ CASE_PMOVZX(PMOVZXWQ, r)
+ CASE_PMOVZX(PMOVZXDQ, m)
+ CASE_PMOVZX(PMOVZXDQ, r)
+ return getRegOperandVectorVT(MI, MVT::i64, 0);
}
}
-#define CASE_MASK_INS_COMMON(Inst, Suffix, src) \
- case X86::V##Inst##Suffix##src: \
- case X86::V##Inst##Suffix##src##k: \
- case X86::V##Inst##Suffix##src##kz:
-
-#define CASE_SSE_INS_COMMON(Inst, src) \
- case X86::Inst##src:
-
-#define CASE_AVX_INS_COMMON(Inst, Suffix, src) \
- case X86::V##Inst##Suffix##src:
+/// Wraps the destination register name with AVX512 mask/maskz filtering.
+static std::string getMaskName(const MCInst *MI, const char *DestName,
+ const char *(*getRegName)(unsigned)) {
+ std::string OpMaskName(DestName);
-#define CASE_MOVDUP(Inst, src) \
- CASE_MASK_INS_COMMON(Inst, Z, r##src) \
- CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
- CASE_MASK_INS_COMMON(Inst, Z128, r##src) \
- CASE_AVX_INS_COMMON(Inst, , r##src) \
- CASE_AVX_INS_COMMON(Inst, Y, r##src) \
- CASE_SSE_INS_COMMON(Inst, r##src) \
-
-#define CASE_UNPCK(Inst, src) \
- CASE_MASK_INS_COMMON(Inst, Z, r##src) \
- CASE_MASK_INS_COMMON(Inst, Z256, r##src) \
- CASE_MASK_INS_COMMON(Inst, Z128, r##src) \
- CASE_AVX_INS_COMMON(Inst, , r##src) \
- CASE_AVX_INS_COMMON(Inst, Y, r##src) \
- CASE_SSE_INS_COMMON(Inst, r##src) \
-
-#define CASE_SHUF(Inst, src) \
- CASE_MASK_INS_COMMON(Inst, Z, r##src##i) \
- CASE_MASK_INS_COMMON(Inst, Z256, r##src##i) \
- CASE_MASK_INS_COMMON(Inst, Z128, r##src##i) \
- CASE_AVX_INS_COMMON(Inst, , r##src##i) \
- CASE_AVX_INS_COMMON(Inst, Y, r##src##i) \
- CASE_SSE_INS_COMMON(Inst, r##src##i) \
-
-#define CASE_VPERM(Inst, src) \
- CASE_MASK_INS_COMMON(Inst, Z, src##i) \
- CASE_MASK_INS_COMMON(Inst, Z256, src##i) \
- CASE_MASK_INS_COMMON(Inst, Z128, src##i) \
- CASE_AVX_INS_COMMON(Inst, , src##i) \
- CASE_AVX_INS_COMMON(Inst, Y, src##i) \
+ bool MaskWithZero = false;
+ const char *MaskRegName = nullptr;
-#define CASE_VSHUF(Inst, src) \
- CASE_MASK_INS_COMMON(SHUFF##Inst, Z, r##src##i) \
- CASE_MASK_INS_COMMON(SHUFI##Inst, Z, r##src##i) \
- CASE_MASK_INS_COMMON(SHUFF##Inst, Z256, r##src##i) \
- CASE_MASK_INS_COMMON(SHUFI##Inst, Z256, r##src##i) \
-
-/// \brief Extracts the types and if it has memory operand for a given
-/// (SHUFF32x4/SHUFF64x2/SHUFI32x4/SHUFI64x2) instruction.
-static void getVSHUF64x2FamilyInfo(const MCInst *MI, MVT &VT, bool &HasMemOp) {
- HasMemOp = false;
switch (MI->getOpcode()) {
default:
- llvm_unreachable("Unknown VSHUF64x2 family instructions.");
+ return OpMaskName;
+ CASE_MASKZ_MOVDUP(MOVDDUP, m)
+ CASE_MASKZ_MOVDUP(MOVDDUP, r)
+ CASE_MASKZ_MOVDUP(MOVSHDUP, m)
+ CASE_MASKZ_MOVDUP(MOVSHDUP, r)
+ CASE_MASKZ_MOVDUP(MOVSLDUP, m)
+ CASE_MASKZ_MOVDUP(MOVSLDUP, r)
+ CASE_MASKZ_PMOVZX(PMOVZXBD, m)
+ CASE_MASKZ_PMOVZX(PMOVZXBD, r)
+ CASE_MASKZ_PMOVZX(PMOVZXBQ, m)
+ CASE_MASKZ_PMOVZX(PMOVZXBQ, r)
+ CASE_MASKZ_PMOVZX(PMOVZXBW, m)
+ CASE_MASKZ_PMOVZX(PMOVZXBW, r)
+ CASE_MASKZ_PMOVZX(PMOVZXDQ, m)
+ CASE_MASKZ_PMOVZX(PMOVZXDQ, r)
+ CASE_MASKZ_PMOVZX(PMOVZXWD, m)
+ CASE_MASKZ_PMOVZX(PMOVZXWD, r)
+ CASE_MASKZ_PMOVZX(PMOVZXWQ, m)
+ CASE_MASKZ_PMOVZX(PMOVZXWQ, r)
+ CASE_MASKZ_UNPCK(PUNPCKHBW, m)
+ CASE_MASKZ_UNPCK(PUNPCKHBW, r)
+ CASE_MASKZ_UNPCK(PUNPCKHWD, m)
+ CASE_MASKZ_UNPCK(PUNPCKHWD, r)
+ CASE_MASKZ_UNPCK(PUNPCKHDQ, m)
+ CASE_MASKZ_UNPCK(PUNPCKHDQ, r)
+ CASE_MASKZ_UNPCK(PUNPCKLBW, m)
+ CASE_MASKZ_UNPCK(PUNPCKLBW, r)
+ CASE_MASKZ_UNPCK(PUNPCKLWD, m)
+ CASE_MASKZ_UNPCK(PUNPCKLWD, r)
+ CASE_MASKZ_UNPCK(PUNPCKLDQ, m)
+ CASE_MASKZ_UNPCK(PUNPCKLDQ, r)
+ CASE_MASKZ_UNPCK(UNPCKHPD, m)
+ CASE_MASKZ_UNPCK(UNPCKHPD, r)
+ CASE_MASKZ_UNPCK(UNPCKHPS, m)
+ CASE_MASKZ_UNPCK(UNPCKHPS, r)
+ CASE_MASKZ_UNPCK(UNPCKLPD, m)
+ CASE_MASKZ_UNPCK(UNPCKLPD, r)
+ CASE_MASKZ_UNPCK(UNPCKLPS, m)
+ CASE_MASKZ_UNPCK(UNPCKLPS, r)
+ CASE_MASKZ_SHUF(PALIGNR, r)
+ CASE_MASKZ_SHUF(PALIGNR, m)
+ CASE_MASKZ_SHUF(SHUFPD, m)
+ CASE_MASKZ_SHUF(SHUFPD, r)
+ CASE_MASKZ_SHUF(SHUFPS, m)
+ CASE_MASKZ_SHUF(SHUFPS, r)
+ CASE_MASKZ_VPERMILPI(PERMILPD, m)
+ CASE_MASKZ_VPERMILPI(PERMILPD, r)
+ CASE_MASKZ_VPERMILPI(PERMILPS, m)
+ CASE_MASKZ_VPERMILPI(PERMILPS, r)
+ CASE_MASKZ_VPERMILPI(PSHUFD, m)
+ CASE_MASKZ_VPERMILPI(PSHUFD, r)
+ CASE_MASKZ_VPERMILPI(PSHUFHW, m)
+ CASE_MASKZ_VPERMILPI(PSHUFHW, r)
+ CASE_MASKZ_VPERMILPI(PSHUFLW, m)
+ CASE_MASKZ_VPERMILPI(PSHUFLW, r)
+ CASE_MASKZ_VPERM(PERMPD, m)
+ CASE_MASKZ_VPERM(PERMPD, r)
+ CASE_MASKZ_VPERM(PERMQ, m)
+ CASE_MASKZ_VPERM(PERMQ, r)
+ CASE_MASKZ_VSHUF(64X2, m)
+ CASE_MASKZ_VSHUF(64X2, r)
+ CASE_MASKZ_VSHUF(32X4, m)
+ CASE_MASKZ_VSHUF(32X4, r)
+ MaskWithZero = true;
+ MaskRegName = getRegName(MI->getOperand(1).getReg());
break;
- CASE_VSHUF(64X2, m)
- HasMemOp = true; // FALL THROUGH.
- CASE_VSHUF(64X2, r)
- VT = getRegOperandVectorVT(MI, MVT::i64, 0);
- break;
- CASE_VSHUF(32X4, m)
- HasMemOp = true; // FALL THROUGH.
- CASE_VSHUF(32X4, r)
- VT = getRegOperandVectorVT(MI, MVT::i32, 0);
+ CASE_MASK_MOVDUP(MOVDDUP, m)
+ CASE_MASK_MOVDUP(MOVDDUP, r)
+ CASE_MASK_MOVDUP(MOVSHDUP, m)
+ CASE_MASK_MOVDUP(MOVSHDUP, r)
+ CASE_MASK_MOVDUP(MOVSLDUP, m)
+ CASE_MASK_MOVDUP(MOVSLDUP, r)
+ CASE_MASK_PMOVZX(PMOVZXBD, m)
+ CASE_MASK_PMOVZX(PMOVZXBD, r)
+ CASE_MASK_PMOVZX(PMOVZXBQ, m)
+ CASE_MASK_PMOVZX(PMOVZXBQ, r)
+ CASE_MASK_PMOVZX(PMOVZXBW, m)
+ CASE_MASK_PMOVZX(PMOVZXBW, r)
+ CASE_MASK_PMOVZX(PMOVZXDQ, m)
+ CASE_MASK_PMOVZX(PMOVZXDQ, r)
+ CASE_MASK_PMOVZX(PMOVZXWD, m)
+ CASE_MASK_PMOVZX(PMOVZXWD, r)
+ CASE_MASK_PMOVZX(PMOVZXWQ, m)
+ CASE_MASK_PMOVZX(PMOVZXWQ, r)
+ CASE_MASK_UNPCK(PUNPCKHBW, m)
+ CASE_MASK_UNPCK(PUNPCKHBW, r)
+ CASE_MASK_UNPCK(PUNPCKHWD, m)
+ CASE_MASK_UNPCK(PUNPCKHWD, r)
+ CASE_MASK_UNPCK(PUNPCKHDQ, m)
+ CASE_MASK_UNPCK(PUNPCKHDQ, r)
+ CASE_MASK_UNPCK(PUNPCKLBW, m)
+ CASE_MASK_UNPCK(PUNPCKLBW, r)
+ CASE_MASK_UNPCK(PUNPCKLWD, m)
+ CASE_MASK_UNPCK(PUNPCKLWD, r)
+ CASE_MASK_UNPCK(PUNPCKLDQ, m)
+ CASE_MASK_UNPCK(PUNPCKLDQ, r)
+ CASE_MASK_UNPCK(UNPCKHPD, m)
+ CASE_MASK_UNPCK(UNPCKHPD, r)
+ CASE_MASK_UNPCK(UNPCKHPS, m)
+ CASE_MASK_UNPCK(UNPCKHPS, r)
+ CASE_MASK_UNPCK(UNPCKLPD, m)
+ CASE_MASK_UNPCK(UNPCKLPD, r)
+ CASE_MASK_UNPCK(UNPCKLPS, m)
+ CASE_MASK_UNPCK(UNPCKLPS, r)
+ CASE_MASK_SHUF(PALIGNR, r)
+ CASE_MASK_SHUF(PALIGNR, m)
+ CASE_MASK_SHUF(SHUFPD, m)
+ CASE_MASK_SHUF(SHUFPD, r)
+ CASE_MASK_SHUF(SHUFPS, m)
+ CASE_MASK_SHUF(SHUFPS, r)
+ CASE_MASK_VPERMILPI(PERMILPD, m)
+ CASE_MASK_VPERMILPI(PERMILPD, r)
+ CASE_MASK_VPERMILPI(PERMILPS, m)
+ CASE_MASK_VPERMILPI(PERMILPS, r)
+ CASE_MASK_VPERMILPI(PSHUFD, m)
+ CASE_MASK_VPERMILPI(PSHUFD, r)
+ CASE_MASK_VPERMILPI(PSHUFHW, m)
+ CASE_MASK_VPERMILPI(PSHUFHW, r)
+ CASE_MASK_VPERMILPI(PSHUFLW, m)
+ CASE_MASK_VPERMILPI(PSHUFLW, r)
+ CASE_MASK_VPERM(PERMPD, m)
+ CASE_MASK_VPERM(PERMPD, r)
+ CASE_MASK_VPERM(PERMQ, m)
+ CASE_MASK_VPERM(PERMQ, r)
+ CASE_MASK_VSHUF(64X2, m)
+ CASE_MASK_VSHUF(64X2, r)
+ CASE_MASK_VSHUF(32X4, m)
+ CASE_MASK_VSHUF(32X4, r)
+ MaskRegName = getRegName(MI->getOperand(2).getReg());
break;
}
+
+ // MASK: zmmX {%kY}
+ OpMaskName += " {%";
+ OpMaskName += MaskRegName;
+ OpMaskName += "}";
+
+ // MASKZ: zmmX {%kY} {z}
+ if (MaskWithZero)
+ OpMaskName += " {z}";
+
+ return OpMaskName;
}
//===----------------------------------------------------------------------===//
@@ -208,6 +370,8 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// If this is a shuffle operation, the switch should fill in this state.
SmallVector<int, 8> ShuffleMask;
const char *DestName = nullptr, *Src1Name = nullptr, *Src2Name = nullptr;
+ unsigned NumOperands = MI->getNumOperands();
+ bool RegForm = false;
switch (MI->getOpcode()) {
default:
@@ -222,9 +386,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::BLENDPDrmi:
case X86::VBLENDPDrmi:
case X86::VBLENDPDYrmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeBLENDMask(getRegOperandVectorVT(MI, MVT::f64, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
@@ -238,9 +402,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::BLENDPSrmi:
case X86::VBLENDPSrmi:
case X86::VBLENDPSYrmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeBLENDMask(getRegOperandVectorVT(MI, MVT::f32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
@@ -254,9 +418,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PBLENDWrmi:
case X86::VPBLENDWrmi:
case X86::VPBLENDWYrmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeBLENDMask(getRegOperandVectorVT(MI, MVT::i16, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
@@ -268,9 +432,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::VPBLENDDrmi:
case X86::VPBLENDDYrmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeBLENDMask(getRegOperandVectorVT(MI, MVT::i32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
@@ -278,14 +442,16 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::INSERTPSrr:
case X86::VINSERTPSrr:
+ case X86::VINSERTPSzrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::INSERTPSrm:
case X86::VINSERTPSrm:
+ case X86::VINSERTPSzrm:
DestName = getRegName(MI->getOperand(0).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
- DecodeINSERTPSMask(MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ if (MI->getOperand(NumOperands - 1).isImm())
+ DecodeINSERTPSMask(MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
@@ -307,8 +473,40 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
DecodeMOVHLPSMask(2, ShuffleMask);
break;
+ case X86::MOVHPDrm:
+ case X86::VMOVHPDrm:
+ case X86::VMOVHPDZ128rm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeInsertElementMask(MVT::v2f64, 1, 1, ShuffleMask);
+ break;
+
+ case X86::MOVHPSrm:
+ case X86::VMOVHPSrm:
+ case X86::VMOVHPSZ128rm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeInsertElementMask(MVT::v4f32, 2, 2, ShuffleMask);
+ break;
+
+ case X86::MOVLPDrm:
+ case X86::VMOVLPDrm:
+ case X86::VMOVLPDZ128rm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeInsertElementMask(MVT::v2f64, 0, 1, ShuffleMask);
+ break;
+
+ case X86::MOVLPSrm:
+ case X86::VMOVLPSrm:
+ case X86::VMOVLPSZ128rm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeInsertElementMask(MVT::v4f32, 0, 2, ShuffleMask);
+ break;
+
CASE_MOVDUP(MOVSLDUP, r)
- Src1Name = getRegName(MI->getOperand(MI->getNumOperands() - 1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
// FALL THROUGH.
CASE_MOVDUP(MOVSLDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -316,7 +514,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
break;
CASE_MOVDUP(MOVSHDUP, r)
- Src1Name = getRegName(MI->getOperand(MI->getNumOperands() - 1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
// FALL THROUGH.
CASE_MOVDUP(MOVSHDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -324,7 +522,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
break;
CASE_MOVDUP(MOVDDUP, r)
- Src1Name = getRegName(MI->getOperand(MI->getNumOperands() - 1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
// FALL THROUGH.
CASE_MOVDUP(MOVDDUP, m)
DestName = getRegName(MI->getOperand(0).getReg());
@@ -334,83 +532,80 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSLLDQri:
case X86::VPSLLDQri:
case X86::VPSLLDQYri:
+ case X86::VPSLLDQZ128rr:
+ case X86::VPSLLDQZ256rr:
+ case X86::VPSLLDQZ512rr:
Src1Name = getRegName(MI->getOperand(1).getReg());
+ case X86::VPSLLDQZ128rm:
+ case X86::VPSLLDQZ256rm:
+ case X86::VPSLLDQZ512rm:
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSLLDQMask(getRegOperandVectorVT(MI, MVT::i8, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
case X86::PSRLDQri:
case X86::VPSRLDQri:
case X86::VPSRLDQYri:
+ case X86::VPSRLDQZ128rr:
+ case X86::VPSRLDQZ256rr:
+ case X86::VPSRLDQZ512rr:
Src1Name = getRegName(MI->getOperand(1).getReg());
+ case X86::VPSRLDQZ128rm:
+ case X86::VPSRLDQZ256rm:
+ case X86::VPSRLDQZ512rm:
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSRLDQMask(getRegOperandVectorVT(MI, MVT::i8, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
- case X86::PALIGNR128rr:
- case X86::VPALIGNR128rr:
- case X86::VPALIGNR256rr:
- Src1Name = getRegName(MI->getOperand(2).getReg());
+ CASE_SHUF(PALIGNR, rri)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
// FALL THROUGH.
- case X86::PALIGNR128rm:
- case X86::VPALIGNR128rm:
- case X86::VPALIGNR256rm:
- Src2Name = getRegName(MI->getOperand(1).getReg());
+ CASE_SHUF(PALIGNR, rmi)
+ Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePALIGNRMask(getRegOperandVectorVT(MI, MVT::i8, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
- case X86::PSHUFDri:
- case X86::VPSHUFDri:
- case X86::VPSHUFDYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_SHUF(PSHUFD, ri)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- case X86::PSHUFDmi:
- case X86::VPSHUFDmi:
- case X86::VPSHUFDYmi:
+ CASE_SHUF(PSHUFD, mi)
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFMask(getRegOperandVectorVT(MI, MVT::i32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
- case X86::PSHUFHWri:
- case X86::VPSHUFHWri:
- case X86::VPSHUFHWYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_SHUF(PSHUFHW, ri)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- case X86::PSHUFHWmi:
- case X86::VPSHUFHWmi:
- case X86::VPSHUFHWYmi:
+ CASE_SHUF(PSHUFHW, mi)
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFHWMask(getRegOperandVectorVT(MI, MVT::i16, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
- case X86::PSHUFLWri:
- case X86::VPSHUFLWri:
- case X86::VPSHUFLWYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_SHUF(PSHUFLW, ri)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- case X86::PSHUFLWmi:
- case X86::VPSHUFLWmi:
- case X86::VPSHUFLWYmi:
+ CASE_SHUF(PSHUFLW, mi)
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFLWMask(getRegOperandVectorVT(MI, MVT::i16, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
@@ -419,9 +614,9 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::MMX_PSHUFWmi:
DestName = getRegName(MI->getOperand(0).getReg());
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFMask(MVT::v4i16,
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
break;
@@ -435,188 +630,204 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
CASE_UNPCK(PUNPCKHBW, r)
case X86::MMX_PUNPCKHBWirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKHBW, m)
case X86::MMX_PUNPCKHBWirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::i8, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKHWD, r)
case X86::MMX_PUNPCKHWDirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKHWD, m)
case X86::MMX_PUNPCKHWDirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::i16, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKHDQ, r)
case X86::MMX_PUNPCKHDQirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKHDQ, m)
case X86::MMX_PUNPCKHDQirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::i32, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKHQDQ, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKHQDQ, m)
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::i64, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKLBW, r)
case X86::MMX_PUNPCKLBWirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKLBW, m)
case X86::MMX_PUNPCKLBWirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::i8, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKLWD, r)
case X86::MMX_PUNPCKLWDirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKLWD, m)
case X86::MMX_PUNPCKLWDirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::i16, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKLDQ, r)
case X86::MMX_PUNPCKLDQirr:
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKLDQ, m)
case X86::MMX_PUNPCKLDQirm:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::i32, 0), ShuffleMask);
break;
CASE_UNPCK(PUNPCKLQDQ, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(PUNPCKLQDQ, m)
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::i64, 0), ShuffleMask);
break;
- CASE_SHUF(SHUFPD, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ CASE_SHUF(SHUFPD, rri)
+ Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
// FALL THROUGH.
- CASE_SHUF(SHUFPD, m)
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ CASE_SHUF(SHUFPD, rmi)
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeSHUFPMask(getRegOperandVectorVT(MI, MVT::f64, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
- CASE_SHUF(SHUFPS, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ CASE_SHUF(SHUFPS, rri)
+ Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
// FALL THROUGH.
- CASE_SHUF(SHUFPS, m)
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ CASE_SHUF(SHUFPS, rmi)
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeSHUFPMask(getRegOperandVectorVT(MI, MVT::f32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
CASE_VSHUF(64X2, r)
+ Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
+ // FALL THROUGH.
CASE_VSHUF(64X2, m)
+ decodeVSHUF64x2FamilyMask(getRegOperandVectorVT(MI, MVT::i64, 0),
+ MI->getOperand(NumOperands - 1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
CASE_VSHUF(32X4, r)
- CASE_VSHUF(32X4, m) {
- MVT VT;
- bool HasMemOp;
- unsigned NumOp = MI->getNumOperands();
- getVSHUF64x2FamilyInfo(MI, VT, HasMemOp);
- decodeVSHUF64x2FamilyMask(VT, MI->getOperand(NumOp - 1).getImm(),
+ Src2Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ RegForm = true;
+ // FALL THROUGH.
+ CASE_VSHUF(32X4, m)
+ decodeVSHUF64x2FamilyMask(getRegOperandVectorVT(MI, MVT::i32, 0),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
- if (HasMemOp) {
- assert((NumOp >= 8) && "Expected at least 8 operands!");
- Src1Name = getRegName(MI->getOperand(NumOp - 7).getReg());
- } else {
- assert((NumOp >= 4) && "Expected at least 4 operands!");
- Src2Name = getRegName(MI->getOperand(NumOp - 2).getReg());
- Src1Name = getRegName(MI->getOperand(NumOp - 3).getReg());
- }
break;
- }
CASE_UNPCK(UNPCKLPD, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(UNPCKLPD, m)
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::f64, 0), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
CASE_UNPCK(UNPCKLPS, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(UNPCKLPS, m)
DecodeUNPCKLMask(getRegOperandVectorVT(MI, MVT::f32, 0), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
CASE_UNPCK(UNPCKHPD, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(UNPCKHPD, m)
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::f64, 0), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
CASE_UNPCK(UNPCKHPS, r)
- Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src2Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ RegForm = true;
// FALL THROUGH.
CASE_UNPCK(UNPCKHPS, m)
DecodeUNPCKHMask(getRegOperandVectorVT(MI, MVT::f32, 0), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ Src1Name = getRegName(MI->getOperand(NumOperands-(RegForm?2:6)).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
- CASE_VPERM(PERMILPS, r)
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_VPERMILPI(PERMILPS, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- CASE_VPERM(PERMILPS, m)
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ CASE_VPERMILPI(PERMILPS, m)
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFMask(getRegOperandVectorVT(MI, MVT::f32, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
- CASE_VPERM(PERMILPD, r)
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_VPERMILPI(PERMILPD, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- CASE_VPERM(PERMILPD, m)
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ CASE_VPERMILPI(PERMILPD, m)
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodePSHUFMask(getRegOperandVectorVT(MI, MVT::f64, 0),
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -628,44 +839,58 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::VPERM2F128rm:
case X86::VPERM2I128rm:
// For instruction comments purpose, assume the 256-bit vector is v4i64.
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
+ if (MI->getOperand(NumOperands - 1).isImm())
DecodeVPERM2X128Mask(MVT::v4i64,
- MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
DestName = getRegName(MI->getOperand(0).getReg());
break;
- case X86::VPERMQYri:
- case X86::VPERMPDYri:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ CASE_VPERM(PERMPD, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
// FALL THROUGH.
- case X86::VPERMQYmi:
- case X86::VPERMPDYmi:
- if (MI->getOperand(MI->getNumOperands() - 1).isImm())
- DecodeVPERMMask(MI->getOperand(MI->getNumOperands() - 1).getImm(),
+ CASE_VPERM(PERMPD, m)
+ if (MI->getOperand(NumOperands - 1).isImm())
+ DecodeVPERMMask(getRegOperandVectorVT(MI, MVT::f64, 0),
+ MI->getOperand(NumOperands - 1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ CASE_VPERM(PERMQ, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
+ // FALL THROUGH.
+ CASE_VPERM(PERMQ, m)
+ if (MI->getOperand(NumOperands - 1).isImm())
+ DecodeVPERMMask(getRegOperandVectorVT(MI, MVT::i64, 0),
+ MI->getOperand(NumOperands - 1).getImm(),
ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::MOVSDrr:
case X86::VMOVSDrr:
+ case X86::VMOVSDZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::MOVSDrm:
case X86::VMOVSDrm:
+ case X86::VMOVSDZrm:
DecodeScalarMoveMask(MVT::v2f64, nullptr == Src2Name, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::MOVSSrr:
case X86::VMOVSSrr:
+ case X86::VMOVSSZrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::MOVSSrm:
case X86::VMOVSSrm:
+ case X86::VMOVSSZrm:
DecodeScalarMoveMask(MVT::v4f32, nullptr == Src2Name, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -681,6 +906,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MOVZQI2PQIrm:
case X86::MOVZPQILo2PQIrm:
case X86::VMOVQI2PQIrm:
+ case X86::VMOVQI2PQIZrm:
case X86::VMOVZQI2PQIrm:
case X86::VMOVZPQILo2PQIrm:
case X86::VMOVZPQILo2PQIZrm:
@@ -690,6 +916,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::MOVDI2PDIrm:
case X86::VMOVDI2PDIrm:
+ case X86::VMOVDI2PDIZrm:
DecodeZeroMoveLowMask(MVT::v4i32, ShuffleMask);
DestName = getRegName(MI->getOperand(0).getReg());
break;
@@ -717,49 +944,41 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
Src2Name = getRegName(MI->getOperand(2).getReg());
break;
- case X86::PMOVZXBWrr:
- case X86::PMOVZXBDrr:
- case X86::PMOVZXBQrr:
- case X86::PMOVZXWDrr:
- case X86::PMOVZXWQrr:
- case X86::PMOVZXDQrr:
- case X86::VPMOVZXBWrr:
- case X86::VPMOVZXBDrr:
- case X86::VPMOVZXBQrr:
- case X86::VPMOVZXWDrr:
- case X86::VPMOVZXWQrr:
- case X86::VPMOVZXDQrr:
- case X86::VPMOVZXBWYrr:
- case X86::VPMOVZXBDYrr:
- case X86::VPMOVZXBQYrr:
- case X86::VPMOVZXWDYrr:
- case X86::VPMOVZXWQYrr:
- case X86::VPMOVZXDQYrr:
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ case X86::VBROADCASTF128:
+ case X86::VBROADCASTI128:
+ DecodeSubVectorBroadcast(MVT::v4f64, MVT::v2f64, ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ CASE_PMOVZX(PMOVZXBW, r)
+ CASE_PMOVZX(PMOVZXBD, r)
+ CASE_PMOVZX(PMOVZXBQ, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ // FALL THROUGH.
+ CASE_PMOVZX(PMOVZXBW, m)
+ CASE_PMOVZX(PMOVZXBD, m)
+ CASE_PMOVZX(PMOVZXBQ, m)
+ DecodeZeroExtendMask(MVT::i8, getZeroExtensionResultType(MI), ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ CASE_PMOVZX(PMOVZXWD, r)
+ CASE_PMOVZX(PMOVZXWQ, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
// FALL THROUGH.
- case X86::PMOVZXBWrm:
- case X86::PMOVZXBDrm:
- case X86::PMOVZXBQrm:
- case X86::PMOVZXWDrm:
- case X86::PMOVZXWQrm:
- case X86::PMOVZXDQrm:
- case X86::VPMOVZXBWrm:
- case X86::VPMOVZXBDrm:
- case X86::VPMOVZXBQrm:
- case X86::VPMOVZXWDrm:
- case X86::VPMOVZXWQrm:
- case X86::VPMOVZXDQrm:
- case X86::VPMOVZXBWYrm:
- case X86::VPMOVZXBDYrm:
- case X86::VPMOVZXBQYrm:
- case X86::VPMOVZXWDYrm:
- case X86::VPMOVZXWQYrm:
- case X86::VPMOVZXDQYrm: {
- MVT SrcVT, DstVT;
- getZeroExtensionTypes(MI, SrcVT, DstVT);
- DecodeZeroExtendMask(SrcVT, DstVT, ShuffleMask);
- DestName = getRegName(MI->getOperand(0).getReg());
- } break;
+ CASE_PMOVZX(PMOVZXWD, m)
+ CASE_PMOVZX(PMOVZXWQ, m)
+ DecodeZeroExtendMask(MVT::i16, getZeroExtensionResultType(MI), ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+
+ CASE_PMOVZX(PMOVZXDQ, r)
+ Src1Name = getRegName(MI->getOperand(NumOperands - 1).getReg());
+ // FALL THROUGH.
+ CASE_PMOVZX(PMOVZXDQ, m)
+ DecodeZeroExtendMask(MVT::i32, getZeroExtensionResultType(MI), ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
}
// The only comments we decode are shuffles, so give up if we were unable to
@@ -768,7 +987,7 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
return false;
if (!DestName) DestName = Src1Name;
- OS << (DestName ? DestName : "mem") << " = ";
+ OS << (DestName ? getMaskName(MI, DestName, getRegName) : "mem") << " = ";
// If the two sources are the same, canonicalize the input elements to be
// from the first src so that we get larger element spans.
diff --git a/lib/Target/X86/MCTargetDesc/CMakeLists.txt b/lib/Target/X86/MCTargetDesc/CMakeLists.txt
index 129c28d804ef..33df9ec7dcde 100644
--- a/lib/Target/X86/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/X86/MCTargetDesc/CMakeLists.txt
@@ -7,6 +7,4 @@ add_llvm_library(LLVMX86Desc
X86ELFObjectWriter.cpp
X86WinCOFFStreamer.cpp
X86WinCOFFObjectWriter.cpp
- X86MachORelocationInfo.cpp
- X86ELFRelocationInfo.cpp
)
diff --git a/lib/Target/X86/MCTargetDesc/Makefile b/lib/Target/X86/MCTargetDesc/Makefile
deleted file mode 100644
index b19774ee379e..000000000000
--- a/lib/Target/X86/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/X86/TargetDesc/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86Desc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 135c32bf8c3b..e77a0dc9bc27 100644
--- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -21,7 +21,7 @@
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MachO.h"
@@ -43,8 +43,11 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
return 1;
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
case X86::reloc_global_offset_table:
case FK_SecRel_4:
case FK_Data_4:
@@ -72,7 +75,8 @@ class X86AsmBackend : public MCAsmBackend {
const uint64_t MaxNopLength;
public:
X86AsmBackend(const Target &T, StringRef CPU)
- : MCAsmBackend(), CPU(CPU), MaxNopLength(CPU == "slm" ? 7 : 15) {
+ : MCAsmBackend(), CPU(CPU),
+ MaxNopLength((CPU == "slm" || CPU == "lakemont") ? 7 : 15) {
HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" &&
CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" &&
CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" &&
@@ -86,10 +90,14 @@ public:
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
- { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
- { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
- { "reloc_signed_4byte", 0, 4 * 8, 0},
- { "reloc_global_offset_table", 0, 4 * 8, 0}
+ {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_signed_4byte", 0, 32, 0},
+ {"reloc_signed_4byte_relax", 0, 32, 0},
+ {"reloc_global_offset_table", 0, 32, 0},
+ {"reloc_global_offset_table8", 0, 64, 0},
};
if (Kind < FirstTargetFixupKind)
@@ -124,38 +132,57 @@ public:
const MCRelaxableFragment *DF,
const MCAsmLayout &Layout) const override;
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override;
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
};
} // end anonymous namespace
-static unsigned getRelaxedOpcodeBranch(unsigned Op) {
+static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool is16BitMode) {
+ unsigned Op = Inst.getOpcode();
switch (Op) {
default:
return Op;
-
- case X86::JAE_1: return X86::JAE_4;
- case X86::JA_1: return X86::JA_4;
- case X86::JBE_1: return X86::JBE_4;
- case X86::JB_1: return X86::JB_4;
- case X86::JE_1: return X86::JE_4;
- case X86::JGE_1: return X86::JGE_4;
- case X86::JG_1: return X86::JG_4;
- case X86::JLE_1: return X86::JLE_4;
- case X86::JL_1: return X86::JL_4;
- case X86::JMP_1: return X86::JMP_4;
- case X86::JNE_1: return X86::JNE_4;
- case X86::JNO_1: return X86::JNO_4;
- case X86::JNP_1: return X86::JNP_4;
- case X86::JNS_1: return X86::JNS_4;
- case X86::JO_1: return X86::JO_4;
- case X86::JP_1: return X86::JP_4;
- case X86::JS_1: return X86::JS_4;
+ case X86::JAE_1:
+ return (is16BitMode) ? X86::JAE_2 : X86::JAE_4;
+ case X86::JA_1:
+ return (is16BitMode) ? X86::JA_2 : X86::JA_4;
+ case X86::JBE_1:
+ return (is16BitMode) ? X86::JBE_2 : X86::JBE_4;
+ case X86::JB_1:
+ return (is16BitMode) ? X86::JB_2 : X86::JB_4;
+ case X86::JE_1:
+ return (is16BitMode) ? X86::JE_2 : X86::JE_4;
+ case X86::JGE_1:
+ return (is16BitMode) ? X86::JGE_2 : X86::JGE_4;
+ case X86::JG_1:
+ return (is16BitMode) ? X86::JG_2 : X86::JG_4;
+ case X86::JLE_1:
+ return (is16BitMode) ? X86::JLE_2 : X86::JLE_4;
+ case X86::JL_1:
+ return (is16BitMode) ? X86::JL_2 : X86::JL_4;
+ case X86::JMP_1:
+ return (is16BitMode) ? X86::JMP_2 : X86::JMP_4;
+ case X86::JNE_1:
+ return (is16BitMode) ? X86::JNE_2 : X86::JNE_4;
+ case X86::JNO_1:
+ return (is16BitMode) ? X86::JNO_2 : X86::JNO_4;
+ case X86::JNP_1:
+ return (is16BitMode) ? X86::JNP_2 : X86::JNP_4;
+ case X86::JNS_1:
+ return (is16BitMode) ? X86::JNS_2 : X86::JNS_4;
+ case X86::JO_1:
+ return (is16BitMode) ? X86::JO_2 : X86::JO_4;
+ case X86::JP_1:
+ return (is16BitMode) ? X86::JP_2 : X86::JP_4;
+ case X86::JS_1:
+ return (is16BitMode) ? X86::JS_2 : X86::JS_4;
}
}
-static unsigned getRelaxedOpcodeArith(unsigned Op) {
+static unsigned getRelaxedOpcodeArith(const MCInst &Inst) {
+ unsigned Op = Inst.getOpcode();
switch (Op) {
default:
return Op;
@@ -239,20 +266,20 @@ static unsigned getRelaxedOpcodeArith(unsigned Op) {
}
}
-static unsigned getRelaxedOpcode(unsigned Op) {
- unsigned R = getRelaxedOpcodeArith(Op);
- if (R != Op)
+static unsigned getRelaxedOpcode(const MCInst &Inst, bool is16BitMode) {
+ unsigned R = getRelaxedOpcodeArith(Inst);
+ if (R != Inst.getOpcode())
return R;
- return getRelaxedOpcodeBranch(Op);
+ return getRelaxedOpcodeBranch(Inst, is16BitMode);
}
bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
- // Branches can always be relaxed.
- if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
+ // Branches can always be relaxed in either mode.
+ if (getRelaxedOpcodeBranch(Inst, false) != Inst.getOpcode())
return true;
// Check if this instruction is ever relaxable.
- if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
+ if (getRelaxedOpcodeArith(Inst) == Inst.getOpcode())
return false;
@@ -275,9 +302,12 @@ bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
// FIXME: Can tblgen help at all here to verify there aren't other instructions
// we can relax?
-void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+void X86AsmBackend::relaxInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI,
+ MCInst &Res) const {
// The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
- unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
+ bool is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
+ unsigned RelaxedOp = getRelaxedOpcode(Inst, is16BitMode);
if (RelaxedOp == Inst.getOpcode()) {
SmallString<256> Tmp;
@@ -405,6 +435,14 @@ public:
, Is64Bit(is64Bit) {
}
+ Optional<MCFixupKind> getFixupKind(StringRef Name) const override {
+ return StringSwitch<Optional<MCFixupKind>>(Name)
+ .Case("dir32", FK_Data_4)
+ .Case("secrel32", FK_SecRel_4)
+ .Case("secidx", FK_SecRel_2)
+ .Default(MCAsmBackend::getFixupKind(Name));
+ }
+
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86WinCOFFObjectWriter(OS, Is64Bit);
}
@@ -803,7 +841,7 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
if (TheTriple.isOSBinFormatMachO())
return new DarwinX86_32AsmBackend(T, MRI, CPU);
- if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
+ if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
return new WindowsX86AsmBackend(T, false, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
@@ -826,7 +864,7 @@ MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
return new DarwinX86_64AsmBackend(T, MRI, CPU, CS);
}
- if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
+ if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
return new WindowsX86AsmBackend(T, true, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 9ff85b9154f8..b4195176f904 100644
--- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -186,11 +186,6 @@ namespace X86II {
/// dllimport linkage on windows.
MO_DLLIMPORT,
- /// MO_DARWIN_STUB - On a symbol operand "FOO", this indicates that the
- /// reference is actually to the "FOO$stub" symbol. This is used for calls
- /// and jumps to external functions on Tiger and earlier.
- MO_DARWIN_STUB,
-
/// MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the
/// reference is actually to the "FOO$non_lazy_ptr" symbol, which is a
/// non-PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
@@ -201,12 +196,6 @@ namespace X86II {
/// a PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
MO_DARWIN_NONLAZY_PIC_BASE,
- /// MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this
- /// indicates that the reference is actually to "FOO$non_lazy_ptr -PICBASE",
- /// which is a PIC-base-relative reference to a hidden dyld lazy pointer
- /// stub.
- MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE,
-
/// MO_TLVP - On a symbol operand this indicates that the immediate is
/// some TLS offset.
///
@@ -667,7 +656,7 @@ namespace X86II {
/// is duplicated in the MCInst (e.g. "EAX = addl EAX, [mem]") it is only
/// counted as one operand.
///
- inline int getMemoryOperandNo(uint64_t TSFlags, unsigned Opcode) {
+ inline int getMemoryOperandNo(uint64_t TSFlags) {
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
@@ -734,12 +723,12 @@ namespace X86II {
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or
/// higher) register? e.g. r8, xmm8, xmm13, etc.
inline bool isX86_64ExtendedReg(unsigned RegNo) {
- if ((RegNo > X86::XMM7 && RegNo <= X86::XMM15) ||
- (RegNo > X86::XMM23 && RegNo <= X86::XMM31) ||
- (RegNo > X86::YMM7 && RegNo <= X86::YMM15) ||
- (RegNo > X86::YMM23 && RegNo <= X86::YMM31) ||
- (RegNo > X86::ZMM7 && RegNo <= X86::ZMM15) ||
- (RegNo > X86::ZMM23 && RegNo <= X86::ZMM31))
+ if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM15) ||
+ (RegNo >= X86::XMM24 && RegNo <= X86::XMM31) ||
+ (RegNo >= X86::YMM8 && RegNo <= X86::YMM15) ||
+ (RegNo >= X86::YMM24 && RegNo <= X86::YMM31) ||
+ (RegNo >= X86::ZMM8 && RegNo <= X86::ZMM15) ||
+ (RegNo >= X86::ZMM24 && RegNo <= X86::ZMM31))
return true;
switch (RegNo) {
@@ -762,9 +751,9 @@ namespace X86II {
/// is32ExtendedReg - Is the MemoryOperand a 32 extended (zmm16 or higher)
/// registers? e.g. zmm21, etc.
static inline bool is32ExtendedReg(unsigned RegNo) {
- return ((RegNo > X86::XMM15 && RegNo <= X86::XMM31) ||
- (RegNo > X86::YMM15 && RegNo <= X86::YMM31) ||
- (RegNo > X86::ZMM15 && RegNo <= X86::ZMM31));
+ return ((RegNo >= X86::XMM16 && RegNo <= X86::XMM31) ||
+ (RegNo >= X86::YMM16 && RegNo <= X86::YMM31) ||
+ (RegNo >= X86::ZMM16 && RegNo <= X86::ZMM31));
}
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index 736c39dfb6f1..da69da51df10 100644
--- a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -9,6 +9,8 @@
#include "MCTargetDesc/X86FixupKinds.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCValue.h"
@@ -25,8 +27,8 @@ namespace {
~X86ELFObjectWriter() override;
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
};
}
@@ -56,6 +58,7 @@ static X86_64RelType getType64(unsigned Kind,
case FK_Data_8:
return RT64_64;
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_None && !IsPCRel)
return RT64_32S;
return RT64_32;
@@ -66,6 +69,8 @@ static X86_64RelType getType64(unsigned Kind,
case FK_Data_4:
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
return RT64_32;
case FK_PCRel_2:
@@ -77,8 +82,16 @@ static X86_64RelType getType64(unsigned Kind,
}
}
-static unsigned getRelocType64(MCSymbolRefExpr::VariantKind Modifier,
- X86_64RelType Type, bool IsPCRel) {
+static void checkIs32(MCContext &Ctx, SMLoc Loc, X86_64RelType Type) {
+ if (Type != RT64_32)
+ Ctx.reportError(Loc,
+ "32 bit reloc applied to a field with a different size");
+}
+
+static unsigned getRelocType64(MCContext &Ctx, SMLoc Loc,
+ MCSymbolRefExpr::VariantKind Modifier,
+ X86_64RelType Type, bool IsPCRel,
+ unsigned Kind) {
switch (Modifier) {
default:
llvm_unreachable("Unimplemented");
@@ -146,21 +159,38 @@ static unsigned getRelocType64(MCSymbolRefExpr::VariantKind Modifier,
case RT64_8:
llvm_unreachable("Unimplemented");
}
+ case MCSymbolRefExpr::VK_TLSCALL:
+ return ELF::R_X86_64_TLSDESC_CALL;
+ case MCSymbolRefExpr::VK_TLSDESC:
+ return ELF::R_X86_64_GOTPC32_TLSDESC;
case MCSymbolRefExpr::VK_TLSGD:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_TLSGD;
case MCSymbolRefExpr::VK_GOTTPOFF:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_GOTTPOFF;
case MCSymbolRefExpr::VK_TLSLD:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_TLSLD;
case MCSymbolRefExpr::VK_PLT:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_PLT32;
case MCSymbolRefExpr::VK_GOTPCREL:
- assert(Type == RT64_32);
- return ELF::R_X86_64_GOTPCREL;
+ checkIs32(Ctx, Loc, Type);
+ // Older versions of ld.bfd/ld.gold/lld
+ // do not support GOTPCRELX/REX_GOTPCRELX,
+ // and we want to keep back-compatibility.
+ if (!Ctx.getAsmInfo()->canRelaxRelocations())
+ return ELF::R_X86_64_GOTPCREL;
+ switch (Kind) {
+ default:
+ return ELF::R_X86_64_GOTPCREL;
+ case X86::reloc_riprel_4byte_relax:
+ return ELF::R_X86_64_GOTPCRELX;
+ case X86::reloc_riprel_4byte_relax_rex:
+ case X86::reloc_riprel_4byte_movq_load:
+ return ELF::R_X86_64_REX_GOTPCRELX;
+ }
}
}
@@ -181,8 +211,10 @@ static X86_32RelType getType32(X86_64RelType T) {
llvm_unreachable("unexpected relocation type!");
}
-static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
- X86_32RelType Type, bool IsPCRel) {
+static unsigned getRelocType32(MCContext &Ctx,
+ MCSymbolRefExpr::VariantKind Modifier,
+ X86_32RelType Type, bool IsPCRel,
+ unsigned Kind) {
switch (Modifier) {
default:
llvm_unreachable("Unimplemented");
@@ -197,7 +229,15 @@ static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
}
case MCSymbolRefExpr::VK_GOT:
assert(Type == RT32_32);
- return IsPCRel ? ELF::R_386_GOTPC : ELF::R_386_GOT32;
+ if (IsPCRel)
+ return ELF::R_386_GOTPC;
+ // Older versions of ld.bfd/ld.gold/lld do not support R_386_GOT32X and we
+ // want to maintain compatibility.
+ if (!Ctx.getAsmInfo()->canRelaxRelocations())
+ return ELF::R_386_GOT32;
+
+ return Kind == X86::reloc_signed_4byte_relax ? ELF::R_386_GOT32X
+ : ELF::R_386_GOT32;
case MCSymbolRefExpr::VK_GOTOFF:
assert(Type == RT32_32);
assert(!IsPCRel);
@@ -240,17 +280,18 @@ static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
}
}
-unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned X86ELFObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
- X86_64RelType Type = getType64(Fixup.getKind(), Modifier, IsPCRel);
+ unsigned Kind = Fixup.getKind();
+ X86_64RelType Type = getType64(Kind, Modifier, IsPCRel);
if (getEMachine() == ELF::EM_X86_64)
- return getRelocType64(Modifier, Type, IsPCRel);
+ return getRelocType64(Ctx, Fixup.getLoc(), Modifier, Type, IsPCRel, Kind);
assert((getEMachine() == ELF::EM_386 || getEMachine() == ELF::EM_IAMCU) &&
"Unsupported ELF machine type.");
- return getRelocType32(Modifier, getType32(Type), IsPCRel);
+ return getRelocType32(Ctx, Modifier, getType32(Type), IsPCRel, Kind);
}
MCObjectWriter *llvm::createX86ELFObjectWriter(raw_pwrite_stream &OS,
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
deleted file mode 100644
index ddb764facdbf..000000000000
--- a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//===-- X86ELFRelocationInfo.cpp ----------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/X86MCTargetDesc.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCRelocationInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Object/ELFObjectFile.h"
-#include "llvm/Support/ELF.h"
-
-using namespace llvm;
-using namespace object;
-using namespace ELF;
-
-namespace {
-class X86_64ELFRelocationInfo : public MCRelocationInfo {
-public:
- X86_64ELFRelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
-
- const MCExpr *createExprForRelocation(RelocationRef Rel) override {
- uint64_t RelType = Rel.getType();
- elf_symbol_iterator SymI = Rel.getSymbol();
-
- ErrorOr<StringRef> SymNameOrErr = SymI->getName();
- if (std::error_code EC = SymNameOrErr.getError())
- report_fatal_error(EC.message());
- StringRef SymName = *SymNameOrErr;
-
- ErrorOr<uint64_t> SymAddr = SymI->getAddress();
- if (std::error_code EC = SymAddr.getError())
- report_fatal_error(EC.message());
- uint64_t SymSize = SymI->getSize();
- int64_t Addend = *ELFRelocationRef(Rel).getAddend();
-
- MCSymbol *Sym = Ctx.getOrCreateSymbol(SymName);
- // FIXME: check that the value is actually the same.
- if (!Sym->isVariable())
- Sym->setVariableValue(MCConstantExpr::create(*SymAddr, Ctx));
-
- const MCExpr *Expr = nullptr;
- // If hasAddend is true, then we need to add Addend (r_addend) to Expr.
- bool hasAddend = false;
-
- // The AMD64 SysV ABI says:
- // A: the addend used to compute the value of the relocatable field.
- // B: the base address at which a shared object has been loaded into memory
- // during execution. Generally, a shared object is built with a 0 base
- // virtual address, but the execution address will be different.
- // G: the offset into the global offset table at which the relocation
- // entry's symbol will reside during execution.
- // GOT: the address of the global offset table.
- // L: the place (section offset or address) of the Procedure Linkage Table
- // entry for a symbol.
- // P: the place (section offset or address) of the storage unit being
- // relocated (computed using r_offset).
- // S: the value of the symbol whose index resides in the relocation entry.
- // Z: the size of the symbol whose index resides in the relocation entry.
-
- switch(RelType) {
- case R_X86_64_NONE:
- case R_X86_64_COPY:
- // none
- break;
- case R_X86_64_64:
- case R_X86_64_16:
- case R_X86_64_8:
- // S + A
- case R_X86_64_32:
- case R_X86_64_32S:
- // S + A (We don't care about the result not fitting in 32 bits.)
- case R_X86_64_PC32:
- case R_X86_64_PC16:
- case R_X86_64_PC8:
- case R_X86_64_PC64:
- // S + A - P (P/pcrel is implicit)
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- case R_X86_64_GOT32:
- case R_X86_64_GOT64:
- case R_X86_64_GOTPC32:
- case R_X86_64_GOTPC64:
- case R_X86_64_GOTPLT64:
- // G + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOT, Ctx);
- break;
- case R_X86_64_PLT32:
- // L + A - P -> S@PLT + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_PLT, Ctx);
- break;
- case R_X86_64_GLOB_DAT:
- case R_X86_64_JUMP_SLOT:
- // S
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- case R_X86_64_GOTPCREL:
- case R_X86_64_GOTPCREL64:
- // G + GOT + A - P -> S@GOTPCREL + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx);
- break;
- case R_X86_64_GOTOFF64:
- // S + A - GOT
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTOFF, Ctx);
- break;
- case R_X86_64_PLTOFF64:
- // L + A - GOT
- break;
- case R_X86_64_SIZE32:
- case R_X86_64_SIZE64:
- // Z + A
- Expr = MCConstantExpr::create(SymSize, Ctx);
- break;
- default:
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- }
- if (Expr && hasAddend && Addend != 0)
- Expr = MCBinaryExpr::createAdd(Expr,
- MCConstantExpr::create(Addend, Ctx),
- Ctx);
- return Expr;
- }
-};
-} // End unnamed namespace
-
-/// createX86ELFRelocationInfo - Construct an X86 Mach-O RelocationInfo.
-MCRelocationInfo *llvm::createX86_64ELFRelocationInfo(MCContext &Ctx) {
- // We only handle x86-64 for now.
- return new X86_64ELFRelocationInfo(Ctx);
-}
diff --git a/lib/Target/X86/MCTargetDesc/X86FixupKinds.h b/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
index 4899900dcef9..dfdc9ec29aec 100644
--- a/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
+++ b/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
@@ -17,9 +17,15 @@ namespace X86 {
enum Fixups {
reloc_riprel_4byte = FirstTargetFixupKind, // 32-bit rip-relative
reloc_riprel_4byte_movq_load, // 32-bit rip-relative in movq
+ reloc_riprel_4byte_relax, // 32-bit rip-relative in relaxable
+ // instruction
+ reloc_riprel_4byte_relax_rex, // 32-bit rip-relative in relaxable
+ // instruction with rex prefix
reloc_signed_4byte, // 32-bit signed. Unlike FK_Data_4
// this will be sign extended at
// runtime.
+ reloc_signed_4byte_relax, // like reloc_signed_4byte, but
+ // in a relaxable instruction.
reloc_global_offset_table, // 32-bit, relative to the start
// of the instruction. Used only
// for _GLOBAL_OFFSET_TABLE_.
diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index fc0b0f89e23d..b7c56cec2db8 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -35,7 +35,7 @@ AsmWriterFlavor("x86-asm-syntax", cl::init(ATT),
clEnumValEnd));
static cl::opt<bool>
-MarkedJTDataRegions("mark-data-regions", cl::init(false),
+MarkedJTDataRegions("mark-data-regions", cl::init(true),
cl::desc("Mark code section jump table data regions."),
cl::Hidden);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index dfab6ec10775..96c2e81c332a 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -76,36 +76,16 @@ public:
return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
}
- // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
- // 0-7 and the difference between the 2 groups is given by the REX prefix.
- // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
- // in 1's complement form, example:
- //
- // ModRM field => XMM9 => 1
- // VEX.VVVV => XMM9 => ~9
- //
- // See table 4-35 of Intel AVX Programming Reference for details.
- unsigned char getVEXRegisterEncoding(const MCInst &MI,
- unsigned OpNum) const {
- unsigned SrcReg = MI.getOperand(OpNum).getReg();
- unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
- if (X86II::isX86_64ExtendedReg(SrcReg))
- SrcRegNum |= 8;
-
- // The registers represented through VEX_VVVV should
- // be encoded in 1's complement form.
- return (~SrcRegNum) & 0xf;
+ unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const {
+ return Ctx.getRegisterInfo()->getEncodingValue(
+ MI.getOperand(OpNum).getReg());
}
- unsigned char getWriteMaskRegisterEncoding(const MCInst &MI,
- unsigned OpNum) const {
- assert(X86::K0 != MI.getOperand(OpNum).getReg() &&
- "Invalid mask register as write-mask!");
- unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum));
- return MaskRegNum;
+ bool isX86_64ExtendedReg(const MCInst &MI, unsigned OpNum) const {
+ return (getX86RegEncoding(MI, OpNum) >> 3) & 1;
}
- void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
+ void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const {
OS << (char)C;
++CurByte;
}
@@ -125,8 +105,8 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
int ImmOffset = 0) const;
- inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
- unsigned RM) {
+ inline static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode,
+ unsigned RM) {
assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
return RM | (RegOpcode << 3) | (Mod << 6);
}
@@ -142,11 +122,9 @@ public:
EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
}
-
- void EmitMemModRMByte(const MCInst &MI, unsigned Op,
- unsigned RegOpcodeField,
- uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups,
+ void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
+ uint64_t TSFlags, bool Rex, unsigned &CurByte,
+ raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
void encodeInstruction(const MCInst &MI, raw_ostream &OS,
@@ -160,10 +138,12 @@ public:
void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
const MCInst &MI, raw_ostream &OS) const;
- void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
+ bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
const MCInst &MI, const MCInstrDesc &Desc,
- const MCSubtargetInfo &STI,
- raw_ostream &OS) const;
+ const MCSubtargetInfo &STI, raw_ostream &OS) const;
+
+ uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
+ int MemOperand, const MCInstrDesc &Desc) const;
};
} // end anonymous namespace
@@ -177,7 +157,7 @@ MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
/// isDisp8 - Return true if this signed displacement fits in a 8-bit
/// sign-extended field.
static bool isDisp8(int Value) {
- return Value == (signed char)Value;
+ return Value == (int8_t)Value;
}
/// isCDisp8 - Return true if this signed displacement fits in a 8-bit
@@ -198,7 +178,7 @@ static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
if (Value & Mask) // Unaligned offset
return false;
Value /= (int)CD8_Scale;
- bool Ret = (Value == (signed char)Value);
+ bool Ret = (Value == (int8_t)Value);
if (Ret)
CValue = Value;
@@ -231,6 +211,10 @@ static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
(IndexReg.getReg() != 0 &&
X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
return true;
+ if (BaseReg.getReg() == X86::EIP) {
+ assert(IndexReg.getReg() == 0 && "Invalid eip-based address.");
+ return true;
+ }
return false;
}
@@ -343,7 +327,9 @@ EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
// the start of the field, not the end of the field.
if (FixupKind == FK_PCRel_4 ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
- FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex))
ImmOffset -= 4;
if (FixupKind == FK_PCRel_2)
ImmOffset -= 2;
@@ -359,12 +345,12 @@ EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
EmitConstant(0, Size, CurByte, OS);
}
-void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
+void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
unsigned RegOpcodeField,
- uint64_t TSFlags, unsigned &CurByte,
- raw_ostream &OS,
+ uint64_t TSFlags, bool Rex,
+ unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const{
+ const MCSubtargetInfo &STI) const {
const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
@@ -373,18 +359,38 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
// Handle %rip relative addressing.
- if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
+ if (BaseReg == X86::RIP ||
+ BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode");
assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
- unsigned FixupKind = X86::reloc_riprel_4byte;
-
+ unsigned Opcode = MI.getOpcode();
// movq loads are handled with a special relocation form which allows the
// linker to eliminate some loads for GOT references which end up in the
// same linkage unit.
- if (MI.getOpcode() == X86::MOV64rm)
- FixupKind = X86::reloc_riprel_4byte_movq_load;
+ unsigned FixupKind = [=]() {
+ switch (Opcode) {
+ default:
+ return X86::reloc_riprel_4byte;
+ case X86::MOV64rm:
+ assert(Rex);
+ return X86::reloc_riprel_4byte_movq_load;
+ case X86::CALL64m:
+ case X86::JMP64m:
+ case X86::TEST64rm:
+ case X86::ADC64rm:
+ case X86::ADD64rm:
+ case X86::AND64rm:
+ case X86::CMP64rm:
+ case X86::OR64rm:
+ case X86::SBB64rm:
+ case X86::SUB64rm:
+ case X86::XOR64rm:
+ return Rex ? X86::reloc_riprel_4byte_relax_rex
+ : X86::reloc_riprel_4byte_relax;
+ }
+ }();
// rip-relative addressing is actually relative to the *next* instruction.
// Since an immediate can follow the mod/rm byte for an instruction, this
@@ -510,8 +516,11 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
- EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
- CurByte, OS, Fixups);
+ unsigned Opcode = MI.getOpcode();
+ unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
+ : X86::reloc_signed_4byte;
+ EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS,
+ Fixups);
return;
}
@@ -603,26 +612,26 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 1: Same as REX_R=0 (must be 1 in 32-bit mode)
// 0: Same as REX_R=1 (64 bit mode only)
//
- unsigned char VEX_R = 0x1;
- unsigned char EVEX_R2 = 0x1;
+ uint8_t VEX_R = 0x1;
+ uint8_t EVEX_R2 = 0x1;
// VEX_X: equivalent to REX.X, only used when a
// register is used for index in SIB Byte.
//
// 1: Same as REX.X=0 (must be 1 in 32-bit mode)
// 0: Same as REX.X=1 (64-bit mode only)
- unsigned char VEX_X = 0x1;
+ uint8_t VEX_X = 0x1;
// VEX_B:
//
// 1: Same as REX_B=0 (ignored in 32-bit mode)
// 0: Same as REX_B=1 (64 bit mode only)
//
- unsigned char VEX_B = 0x1;
+ uint8_t VEX_B = 0x1;
// VEX_W: opcode specific (use like REX.W, or used for
// opcode extension, or ignored, depending on the opcode byte)
- unsigned char VEX_W = 0;
+ uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0;
// VEX_5M (VEX m-mmmmm field):
//
@@ -634,20 +643,31 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b01000: XOP map select - 08h instructions with imm byte
// 0b01001: XOP map select - 09h instructions with no imm byte
// 0b01010: XOP map select - 0Ah instructions with imm dword
- unsigned char VEX_5M = 0;
+ uint8_t VEX_5M;
+ switch (TSFlags & X86II::OpMapMask) {
+ default: llvm_unreachable("Invalid prefix!");
+ case X86II::TB: VEX_5M = 0x1; break; // 0F
+ case X86II::T8: VEX_5M = 0x2; break; // 0F 38
+ case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
+ case X86II::XOP8: VEX_5M = 0x8; break;
+ case X86II::XOP9: VEX_5M = 0x9; break;
+ case X86II::XOPA: VEX_5M = 0xA; break;
+ }
// VEX_4V (VEX vvvv field): a register specifier
// (in 1's complement form) or 1111 if unused.
- unsigned char VEX_4V = 0xf;
- unsigned char EVEX_V2 = 0x1;
+ uint8_t VEX_4V = 0xf;
+ uint8_t EVEX_V2 = 0x1;
- // VEX_L (Vector Length):
+ // EVEX_L2/VEX_L (Vector Length):
//
- // 0: scalar or 128-bit vector
- // 1: 256-bit vector
+ // L2 L
+ // 0 0: scalar or 128-bit vector
+ // 0 1: 256-bit vector
+ // 1 0: 512-bit vector
//
- unsigned char VEX_L = 0;
- unsigned char EVEX_L2 = 0;
+ uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0;
+ uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0;
// VEX_PP: opcode extension providing equivalent
// functionality of a SIMD prefix
@@ -657,56 +677,32 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b10: F3
// 0b11: F2
//
- unsigned char VEX_PP = 0;
+ uint8_t VEX_PP;
+ switch (TSFlags & X86II::OpPrefixMask) {
+ default: llvm_unreachable("Invalid op prefix!");
+ case X86II::PS: VEX_PP = 0x0; break; // none
+ case X86II::PD: VEX_PP = 0x1; break; // 66
+ case X86II::XS: VEX_PP = 0x2; break; // F3
+ case X86II::XD: VEX_PP = 0x3; break; // F2
+ }
// EVEX_U
- unsigned char EVEX_U = 1; // Always '1' so far
+ uint8_t EVEX_U = 1; // Always '1' so far
// EVEX_z
- unsigned char EVEX_z = 0;
+ uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0;
// EVEX_b
- unsigned char EVEX_b = 0;
+ uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0;
// EVEX_rc
- unsigned char EVEX_rc = 0;
+ uint8_t EVEX_rc = 0;
// EVEX_aaa
- unsigned char EVEX_aaa = 0;
+ uint8_t EVEX_aaa = 0;
bool EncodeRC = false;
- if (TSFlags & X86II::VEX_W)
- VEX_W = 1;
-
- if (TSFlags & X86II::VEX_L)
- VEX_L = 1;
- if (TSFlags & X86II::EVEX_L2)
- EVEX_L2 = 1;
-
- if (HasEVEX_K && (TSFlags & X86II::EVEX_Z))
- EVEX_z = 1;
-
- if ((TSFlags & X86II::EVEX_B))
- EVEX_b = 1;
-
- switch (TSFlags & X86II::OpPrefixMask) {
- default: break; // VEX_PP already correct
- case X86II::PD: VEX_PP = 0x1; break; // 66
- case X86II::XS: VEX_PP = 0x2; break; // F3
- case X86II::XD: VEX_PP = 0x3; break; // F2
- }
-
- switch (TSFlags & X86II::OpMapMask) {
- default: llvm_unreachable("Invalid prefix!");
- case X86II::TB: VEX_5M = 0x1; break; // 0F
- case X86II::T8: VEX_5M = 0x2; break; // 0F 38
- case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
- case X86II::XOP8: VEX_5M = 0x8; break;
- case X86II::XOP9: VEX_5M = 0x9; break;
- case X86II::XOPA: VEX_5M = 0xA; break;
- }
-
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = X86II::getOperandBias(Desc);
@@ -721,38 +717,30 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MemAddr, src1(VEX_4V), src2(ModR/M)
// MemAddr, src1(ModR/M), imm8
//
- if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- EVEX_V2 = 0x0;
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
+ if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
+ EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
CurOp += X86::AddrNumOperands;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- const MCOperand &MO = MI.getOperand(CurOp);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MO.getReg()))
- EVEX_R2 = 0x0;
- }
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
break;
}
- case X86II::MRMSrcMem:
+ case X86II::MRMSrcMem: {
// MRMSrcMem instructions forms:
// src1(ModR/M), MemAddr
// src1(ModR/M), src2(VEX_4V), MemAddr
@@ -762,31 +750,25 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- EVEX_V2 = 0x0;
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
+ if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
+ EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
if (HasVEX_4VOp3)
// Instruction format for 4VOp3:
@@ -794,8 +776,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// CurOp points to start of the MemoryOperand,
// it skips TIED_TO operands if exist, then increments past src1.
// CurOp + X86::AddrNumOperands will point to src3.
- VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
+ VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
break;
+ }
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
@@ -804,24 +787,21 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MemAddr
// src1(VEX_4V), MemAddr
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
-
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
+
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
break;
}
- case X86II::MRMSrcReg:
+ case X86II::MRMSrcReg: {
// MRMSrcReg instructions forms:
// dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M), src1(ModR/M)
@@ -830,32 +810,27 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasMemOp4) // Skip second register source (encoded in I8IMM)
CurOp++;
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
- CurOp++;
+ RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
if (HasVEX_4VOp3)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+ VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
if (EVEX_b) {
if (HasEVEX_RC) {
unsigned RcOperand = NumOps-1;
@@ -865,55 +840,52 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
EncodeRC = true;
}
break;
- case X86II::MRMDestReg:
+ }
+ case X86II::MRMDestReg: {
// MRMDestReg instructions forms:
// dst(ModR/M), src(ModR/M)
// dst(ModR/M), src(ModR/M), imm8
// dst(ModR/M), src1(VEX_4V), src2(ModR/M)
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
+ RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (EVEX_b)
EncodeRC = true;
break;
+ }
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
- case X86II::MRM6r: case X86II::MRM7r:
+ case X86II::MRM6r: case X86II::MRM7r: {
// MRM0r-MRM7r instructions forms:
// dst(VEX_4V), src(ModR/M), imm8
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
break;
}
+ }
if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
// VEX opcode prefix can have 2 or 3 bytes
@@ -931,7 +903,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// +-----+ +--------------+ +-------------------+
// | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
// +-----+ +--------------+ +-------------------+
- unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
+ uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
// Can we use the 2 byte VEX prefix?
if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
@@ -954,8 +926,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
assert((VEX_5M & 0x3) == VEX_5M
&& "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
- VEX_5M &= 0x3;
-
EmitByte(0x62, CurByte, OS);
EmitByte((VEX_R << 7) |
(VEX_X << 6) |
@@ -968,26 +938,27 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
VEX_PP, CurByte, OS);
if (EncodeRC)
EmitByte((EVEX_z << 7) |
- (EVEX_rc << 5) |
- (EVEX_b << 4) |
- (EVEX_V2 << 3) |
- EVEX_aaa, CurByte, OS);
+ (EVEX_rc << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
else
EmitByte((EVEX_z << 7) |
- (EVEX_L2 << 6) |
- (VEX_L << 5) |
- (EVEX_b << 4) |
- (EVEX_V2 << 3) |
- EVEX_aaa, CurByte, OS);
+ (EVEX_L2 << 6) |
+ (VEX_L << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
}
}
/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
/// size, and 3) use of X86-64 extended registers.
-static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
- const MCInstrDesc &Desc) {
- unsigned REX = 0;
+uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
+ int MemOperand,
+ const MCInstrDesc &Desc) const {
+ uint8_t REX = 0;
bool UsesHighByteReg = false;
if (TSFlags & X86II::REX_W)
@@ -996,13 +967,10 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
if (MI.getNumOperands() == 0) return REX;
unsigned NumOps = MI.getNumOperands();
- // FIXME: MCInst should explicitize the two-addrness.
- bool isTwoAddr = NumOps > 1 &&
- Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
+ unsigned CurOp = X86II::getOperandBias(Desc);
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
- unsigned i = isTwoAddr ? 1 : 0;
- for (; i != NumOps; ++i) {
+ for (unsigned i = CurOp; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
@@ -1016,65 +984,44 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
}
switch (TSFlags & X86II::FormMask) {
+ case X86II::AddRegFrm:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
+ break;
case X86II::MRMSrcReg:
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2; // set REX.R
- i = isTwoAddr ? 2 : 1;
- for (; i != NumOps; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 0; // set REX.B
- }
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
break;
case X86II::MRMSrcMem: {
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2; // set REX.R
- unsigned Bit = 0;
- i = isTwoAddr ? 2 : 1;
- for (; i != NumOps; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
- Bit++;
- }
- }
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
+ CurOp += X86::AddrNumOperands;
break;
}
+ case X86II::MRMDestReg:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ break;
+ case X86II::MRMDestMem:
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
+ CurOp += X86::AddrNumOperands;
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ break;
case X86II::MRMXm:
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
- case X86II::MRMDestMem: {
- unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
- i = isTwoAddr ? 1 : 0;
- if (NumOps > e && MI.getOperand(e).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
- REX |= 1 << 2; // set REX.R
- unsigned Bit = 0;
- for (; i != e; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
- Bit++;
- }
- }
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
break;
- }
- default:
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 0; // set REX.B
- i = isTwoAddr ? 2 : 1;
- for (unsigned e = NumOps; i != e; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 2; // set REX.R
- }
+ case X86II::MRMXr:
+ case X86II::MRM0r: case X86II::MRM1r:
+ case X86II::MRM2r: case X86II::MRM3r:
+ case X86II::MRM4r: case X86II::MRM5r:
+ case X86II::MRM6r: case X86II::MRM7r:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
break;
}
if (REX && UsesHighByteReg)
@@ -1101,16 +1048,18 @@ void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
}
}
-/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
+/// Emit all instruction prefixes prior to the opcode.
///
/// MemOperand is the operand # of the start of a memory operand if present. If
/// Not present, it is -1.
-void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+///
+/// Returns true if a REX prefix was used.
+bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
const MCSubtargetInfo &STI,
raw_ostream &OS) const {
-
+ bool Ret = false;
// Emit the operand size opcode prefix as needed.
if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32
: X86II::OpSize16))
@@ -1135,8 +1084,10 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// Handle REX prefix.
// FIXME: Can this come before F2 etc to simplify emission?
if (is64BitMode(STI)) {
- if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
+ if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) {
EmitByte(0x40 | REX, CurByte, OS);
+ Ret = true;
+ }
}
// 0x0F escape code must be emitted just before the opcode.
@@ -1156,6 +1107,7 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
EmitByte(0x3A, CurByte, OS);
break;
}
+ return Ret;
}
void X86MCCodeEmitter::
@@ -1183,14 +1135,18 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
- const unsigned MemOp4_I8IMMOperand = 2;
+ bool HasVEX_I8IMM = TSFlags & X86II::VEX_I8IMM;
+ assert((!HasMemOp4 || HasVEX_I8IMM) && "MemOp4 should imply VEX_I8IMM");
// It uses the EVEX.aaa field?
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
+ // Used if a register is encoded in 7:4 of immediate.
+ unsigned I8RegNum = 0;
+
// Determine where the memory operand starts, if present.
- int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
+ int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
if (MemoryOperand != -1) MemoryOperand += CurOp;
// Emit segment override opcode prefix as needed.
@@ -1226,19 +1182,20 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (need_address_override)
EmitByte(0x67, CurByte, OS);
+ bool Rex = false;
if (Encoding == 0)
- EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
+ Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
else
EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
- unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
+ uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
if (TSFlags & X86II::Has3DNow0F0FOpcode)
BaseOpcode = 0x0F; // Weird 3DNow! encoding.
- unsigned SrcRegNum = 0;
- switch (TSFlags & X86II::FormMask) {
- default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
+ uint64_t Form = TSFlags & X86II::FormMask;
+ switch (Form) {
+ default: errs() << "FORM: " << Form << "\n";
llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
case X86II::Pseudo:
llvm_unreachable("Pseudo instruction shouldn't be emitted");
@@ -1315,12 +1272,12 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
break;
- case X86II::MRMDestReg:
+ case X86II::MRMDestReg: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + 1;
+ unsigned SrcRegNum = CurOp + 1;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
@@ -1329,71 +1286,68 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
CurOp = SrcRegNum + 1;
break;
-
- case X86II::MRMDestMem:
+ }
+ case X86II::MRMDestMem: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + X86::AddrNumOperands;
+ unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
- EmitMemModRMByte(MI, CurOp,
- GetX86RegNum(MI.getOperand(SrcRegNum)),
- TSFlags, CurByte, OS, Fixups, STI);
+ emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
+ Rex, CurByte, OS, Fixups, STI);
CurOp = SrcRegNum + 1;
break;
-
- case X86II::MRMSrcReg:
+ }
+ case X86II::MRMSrcReg: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + 1;
+ unsigned SrcRegNum = CurOp + 1;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
- if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
- ++SrcRegNum;
+ if (HasMemOp4) // Capture 2nd src (which is encoded in I8IMM)
+ I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
EmitRegModRMByte(MI.getOperand(SrcRegNum),
GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
-
- // 2 operands skipped with HasMemOp4, compensate accordingly
- CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
+ CurOp = SrcRegNum + 1;
if (HasVEX_4VOp3)
++CurOp;
+ if (!HasMemOp4 && HasVEX_I8IMM)
+ I8RegNum = getX86RegEncoding(MI, CurOp++);
// do not count the rounding control operand
if (HasEVEX_RC)
- NumOps--;
+ --NumOps;
break;
-
+ }
case X86II::MRMSrcMem: {
- int AddrOperands = X86::AddrNumOperands;
unsigned FirstMemOp = CurOp+1;
- if (HasEVEX_K) { // Skip writemask
- ++AddrOperands;
+ if (HasEVEX_K) // Skip writemask
++FirstMemOp;
- }
- if (HasVEX_4V) {
- ++AddrOperands;
+ if (HasVEX_4V)
++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
- }
- if (HasMemOp4) // Skip second register source (encoded in I8IMM)
- ++FirstMemOp;
+
+ if (HasMemOp4) // Capture second register source (encoded in I8IMM)
+ I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
EmitByte(BaseOpcode, CurByte, OS);
- EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
- TSFlags, CurByte, OS, Fixups, STI);
- CurOp += AddrOperands + 1;
+ emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
+ TSFlags, Rex, CurByte, OS, Fixups, STI);
+ CurOp = FirstMemOp + X86::AddrNumOperands;
if (HasVEX_4VOp3)
++CurOp;
+ if (!HasMemOp4 && HasVEX_I8IMM)
+ I8RegNum = getX86RegEncoding(MI, CurOp++);
break;
}
@@ -1407,7 +1361,6 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (HasEVEX_K) // Skip writemask
++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
- uint64_t Form = TSFlags & X86II::FormMask;
EmitRegModRMByte(MI.getOperand(CurOp++),
(Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
CurByte, OS);
@@ -1424,9 +1377,9 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (HasEVEX_K) // Skip writemask
++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
- uint64_t Form = TSFlags & X86II::FormMask;
- EmitMemModRMByte(MI, CurOp, (Form == X86II::MRMXm) ? 0 : Form-X86II::MRM0m,
- TSFlags, CurByte, OS, Fixups, STI);
+ emitMemModRMByte(MI, CurOp,
+ (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
+ Rex, CurByte, OS, Fixups, STI);
CurOp += X86::AddrNumOperands;
break;
}
@@ -1453,38 +1406,27 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE:
case X86II::MRM_FF:
EmitByte(BaseOpcode, CurByte, OS);
-
- uint64_t Form = TSFlags & X86II::FormMask;
EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS);
break;
}
- // If there is a remaining operand, it must be a trailing immediate. Emit it
- // according to the right size for the instruction. Some instructions
- // (SSE4a extrq and insertq) have two trailing immediates.
- while (CurOp != NumOps && NumOps - CurOp <= 2) {
+ if (HasVEX_I8IMM) {
// The last source register of a 4 operand instruction in AVX is encoded
// in bits[7:4] of a immediate byte.
- if (TSFlags & X86II::VEX_I8IMM) {
- const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
- : CurOp);
- ++CurOp;
- unsigned RegNum = GetX86RegNum(MO) << 4;
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- RegNum |= 1 << 7;
- // If there is an additional 5th operand it must be an immediate, which
- // is encoded in bits[3:0]
- if (CurOp != NumOps) {
- const MCOperand &MIMM = MI.getOperand(CurOp++);
- if (MIMM.isImm()) {
- unsigned Val = MIMM.getImm();
- assert(Val < 16 && "Immediate operand value out of range");
- RegNum |= Val;
- }
- }
- EmitImmediate(MCOperand::createImm(RegNum), MI.getLoc(), 1, FK_Data_1,
- CurByte, OS, Fixups);
- } else {
+ assert(I8RegNum < 16 && "Register encoding out of range");
+ I8RegNum <<= 4;
+ if (CurOp != NumOps) {
+ unsigned Val = MI.getOperand(CurOp++).getImm();
+ assert(Val < 16 && "Immediate operand value out of range");
+ I8RegNum |= Val;
+ }
+ EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
+ CurByte, OS, Fixups);
+ } else {
+ // If there is a remaining operand, it must be a trailing immediate. Emit it
+ // according to the right size for the instruction. Some instructions
+ // (SSE4a extrq and insertq) have two trailing immediates.
+ while (CurOp != NumOps && NumOps - CurOp <= 2) {
EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
CurByte, OS, Fixups);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 53a6550acdd5..311a8d677eea 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -16,7 +16,6 @@
#include "InstPrinter/X86IntelInstPrinter.h"
#include "X86MCAsmInfo.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -66,12 +65,59 @@ unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
return DWARFFlavour::X86_32_Generic;
}
-void X86_MC::InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI) {
+void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
// FIXME: TableGen these.
- for (unsigned Reg = X86::NoRegister+1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
+ for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
unsigned SEH = MRI->getEncodingValue(Reg);
MRI->mapLLVMRegToSEHReg(Reg, SEH);
}
+
+ // These CodeView registers are numbered sequentially starting at value 1.
+ static const MCPhysReg LowCVRegs[] = {
+ X86::AL, X86::CL, X86::DL, X86::BL, X86::AH, X86::CH,
+ X86::DH, X86::BH, X86::AX, X86::CX, X86::DX, X86::BX,
+ X86::SP, X86::BP, X86::SI, X86::DI, X86::EAX, X86::ECX,
+ X86::EDX, X86::EBX, X86::ESP, X86::EBP, X86::ESI, X86::EDI,
+ };
+ unsigned CVLowRegStart = 1;
+ for (unsigned I = 0; I < array_lengthof(LowCVRegs); ++I)
+ MRI->mapLLVMRegToCVReg(LowCVRegs[I], I + CVLowRegStart);
+
+ MRI->mapLLVMRegToCVReg(X86::EFLAGS, 34);
+
+ // The x87 registers start at 128 and are numbered sequentially.
+ unsigned FP0Start = 128;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::FP0 + I, FP0Start + I);
+
+ // The low 8 XMM registers start at 154 and are numbered sequentially.
+ unsigned CVXMM0Start = 154;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::XMM0 + I, CVXMM0Start + I);
+
+ // The high 8 XMM registers start at 252 and are numbered sequentially.
+ unsigned CVXMM8Start = 252;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::XMM8 + I, CVXMM8Start + I);
+
+ // FIXME: XMM16 and above from AVX512 not yet documented.
+
+ // AMD64 registers start at 324 and count up.
+ unsigned CVX64RegStart = 324;
+ static const MCPhysReg CVX64Regs[] = {
+ X86::SIL, X86::DIL, X86::BPL, X86::SPL, X86::RAX, X86::RBX,
+ X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::RBP, X86::RSP,
+ X86::R8, X86::R9, X86::R10, X86::R11, X86::R12, X86::R13,
+ X86::R14, X86::R15, X86::R8B, X86::R9B, X86::R10B, X86::R11B,
+ X86::R12B, X86::R13B, X86::R14B, X86::R15B, X86::R8W, X86::R9W,
+ X86::R10W, X86::R11W, X86::R12W, X86::R13W, X86::R14W, X86::R15W,
+ X86::R8D, X86::R9D, X86::R10D, X86::R11D, X86::R12D, X86::R13D,
+ X86::R14D, X86::R15D, X86::YMM0, X86::YMM1, X86::YMM2, X86::YMM3,
+ X86::YMM4, X86::YMM5, X86::YMM6, X86::YMM7, X86::YMM8, X86::YMM9,
+ X86::YMM10, X86::YMM11, X86::YMM12, X86::YMM13, X86::YMM14, X86::YMM15,
+ };
+ for (unsigned I = 0; I < array_lengthof(CVX64Regs); ++I)
+ MRI->mapLLVMRegToCVReg(CVX64Regs[I], CVX64RegStart + I);
}
MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
@@ -105,7 +151,7 @@ static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
MCRegisterInfo *X = new MCRegisterInfo();
InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
X86_MC::getDwarfRegFlavour(TT, true), RA);
- X86_MC::InitLLVM2SEHRegisterMapping(X);
+ X86_MC::initLLVMToSEHAndCVRegMapping(X);
return X;
}
@@ -152,53 +198,16 @@ static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createX86MCCodeGenInfo(const Triple &TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
-
+static void adjustCodeGenOpts(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
bool is64Bit = TT.getArch() == Triple::x86_64;
- if (RM == Reloc::Default) {
- // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode.
- // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we
- // use static relocation model by default.
- if (TT.isOSDarwin()) {
- if (is64Bit)
- RM = Reloc::PIC_;
- else
- RM = Reloc::DynamicNoPIC;
- } else if (TT.isOSWindows() && is64Bit)
- RM = Reloc::PIC_;
- else
- RM = Reloc::Static;
- }
-
- // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC
- // is defined as a model for code which may be used in static or dynamic
- // executables but not necessarily a shared library. On X86-32 we just
- // compile in -static mode, in x86-64 we use PIC.
- if (RM == Reloc::DynamicNoPIC) {
- if (is64Bit)
- RM = Reloc::PIC_;
- else if (!TT.isOSDarwin())
- RM = Reloc::Static;
- }
-
- // If we are on Darwin, disallow static relocation model in X86-64 mode, since
- // the Mach-O file format doesn't support it.
- if (RM == Reloc::Static && TT.isOSDarwin() && is64Bit)
- RM = Reloc::PIC_;
-
// For static codegen, if we're not already set, use Small codegen.
if (CM == CodeModel::Default)
CM = CodeModel::Small;
else if (CM == CodeModel::JITDefault)
// 64-bit JIT places everything in the same buffer except external funcs.
CM = is64Bit ? CodeModel::Large : CodeModel::Small;
-
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
@@ -215,10 +224,6 @@ static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple,
MCContext &Ctx) {
- if (TheTriple.isOSBinFormatMachO() && TheTriple.getArch() == Triple::x86_64)
- return createX86_64MachORelocationInfo(Ctx);
- else if (TheTriple.isOSBinFormatELF())
- return createX86_64ELFRelocationInfo(Ctx);
// Default to the stock relocation info.
return llvm::createMCRelocationInfo(TheTriple, Ctx);
}
@@ -234,7 +239,7 @@ extern "C" void LLVMInitializeX86TargetMC() {
RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo);
// Register the MC codegen info.
- RegisterMCCodeGenInfoFn Y(*T, createX86MCCodeGenInfo);
+ RegisterMCAdjustCodeGenOptsFn Y(*T, adjustCodeGenOpts);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createX86MCInstrInfo);
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index 2d2836ff07c5..ca4f0d3e17d5 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -14,6 +14,7 @@
#ifndef LLVM_LIB_TARGET_X86_MCTARGETDESC_X86MCTARGETDESC_H
#define LLVM_LIB_TARGET_X86_MCTARGETDESC_X86MCTARGETDESC_H
+#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/DataTypes.h"
#include <string>
@@ -26,7 +27,6 @@ class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class MCRelocationInfo;
-class MCStreamer;
class Target;
class Triple;
class StringRef;
@@ -56,7 +56,7 @@ std::string ParseX86Triple(const Triple &TT);
unsigned getDwarfRegFlavour(const Triple &TT, bool isEH);
-void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI);
+void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI);
/// Create a X86 MCSubtargetInfo instance. This is exposed so Asm parser, etc.
/// do not need to go through TargetRegistry.
@@ -93,12 +93,6 @@ MCObjectWriter *createX86ELFObjectWriter(raw_pwrite_stream &OS, bool IsELF64,
MCObjectWriter *createX86WinCOFFObjectWriter(raw_pwrite_stream &OS,
bool Is64Bit);
-/// Construct X86-64 Mach-O relocation info.
-MCRelocationInfo *createX86_64MachORelocationInfo(MCContext &Ctx);
-
-/// Construct X86-64 ELF relocation info.
-MCRelocationInfo *createX86_64ELFRelocationInfo(MCContext &Ctx);
-
/// Returns the sub or super register of a specific X86 register.
/// e.g. getX86SubSuperRegister(X86::EAX, 16) returns X86::AX.
/// Aborts on error.
diff --git a/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
deleted file mode 100644
index 9bfe999424fa..000000000000
--- a/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-//===-- X86MachORelocationInfo.cpp ----------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/X86MCTargetDesc.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCRelocationInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Object/MachO.h"
-
-using namespace llvm;
-using namespace object;
-using namespace MachO;
-
-namespace {
-class X86_64MachORelocationInfo : public MCRelocationInfo {
-public:
- X86_64MachORelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
-
- const MCExpr *createExprForRelocation(RelocationRef Rel) override {
- const MachOObjectFile *Obj = cast<MachOObjectFile>(Rel.getObject());
-
- uint64_t RelType = Rel.getType();
- symbol_iterator SymI = Rel.getSymbol();
-
- ErrorOr<StringRef> SymNameOrErr = SymI->getName();
- if (std::error_code EC = SymNameOrErr.getError())
- report_fatal_error(EC.message());
- StringRef SymName = *SymNameOrErr;
- uint64_t SymAddr = SymI->getValue();
-
- any_relocation_info RE = Obj->getRelocation(Rel.getRawDataRefImpl());
- bool isPCRel = Obj->getAnyRelocationPCRel(RE);
-
- MCSymbol *Sym = Ctx.getOrCreateSymbol(SymName);
- // FIXME: check that the value is actually the same.
- if (!Sym->isVariable())
- Sym->setVariableValue(MCConstantExpr::create(SymAddr, Ctx));
- const MCExpr *Expr = nullptr;
-
- switch(RelType) {
- case X86_64_RELOC_TLV:
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
- break;
- case X86_64_RELOC_SIGNED_4:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(4, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_SIGNED_2:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(2, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_SIGNED_1:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(1, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_GOT_LOAD:
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx);
- break;
- case X86_64_RELOC_GOT:
- Expr = MCSymbolRefExpr::create(Sym, isPCRel ?
- MCSymbolRefExpr::VK_GOTPCREL :
- MCSymbolRefExpr::VK_GOT,
- Ctx);
- break;
- case X86_64_RELOC_SUBTRACTOR:
- {
- Rel.moveNext();
- any_relocation_info RENext =
- Obj->getRelocation(Rel.getRawDataRefImpl());
-
- // X86_64_SUBTRACTOR must be followed by a relocation of type
- // X86_64_RELOC_UNSIGNED.
- // NOTE: Scattered relocations don't exist on x86_64.
- unsigned RType = Obj->getAnyRelocationType(RENext);
- if (RType != X86_64_RELOC_UNSIGNED)
- report_fatal_error("Expected X86_64_RELOC_UNSIGNED after "
- "X86_64_RELOC_SUBTRACTOR.");
-
- const MCExpr *LHS = MCSymbolRefExpr::create(Sym, Ctx);
-
- symbol_iterator RSymI = Rel.getSymbol();
- uint64_t RSymAddr = RSymI->getValue();
- ErrorOr<StringRef> RSymName = RSymI->getName();
- if (std::error_code EC = RSymName.getError())
- report_fatal_error(EC.message());
-
- MCSymbol *RSym = Ctx.getOrCreateSymbol(*RSymName);
- if (!RSym->isVariable())
- RSym->setVariableValue(MCConstantExpr::create(RSymAddr, Ctx));
-
- const MCExpr *RHS = MCSymbolRefExpr::create(RSym, Ctx);
-
- Expr = MCBinaryExpr::createSub(LHS, RHS, Ctx);
- break;
- }
- default:
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- }
- return Expr;
- }
-};
-} // End unnamed namespace
-
-/// createX86_64MachORelocationInfo - Construct an X86-64 Mach-O RelocationInfo.
-MCRelocationInfo *llvm::createX86_64MachORelocationInfo(MCContext &Ctx) {
- return new X86_64MachORelocationInfo(Ctx);
-}
diff --git a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index 191ebeac7265..297926ddcfda 100644
--- a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -73,7 +73,9 @@ public:
static bool isFixupKindRIPRel(unsigned Kind) {
return Kind == X86::reloc_riprel_4byte ||
- Kind == X86::reloc_riprel_4byte_movq_load;
+ Kind == X86::reloc_riprel_4byte_movq_load ||
+ Kind == X86::reloc_riprel_4byte_relax ||
+ Kind == X86::reloc_riprel_4byte_relax_rex;
}
static unsigned getFixupKindLog2Size(unsigned Kind) {
@@ -87,8 +89,11 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
case FK_PCRel_4:
// FIXME: Remove these!!!
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
case FK_Data_4: return 2;
case FK_Data_8: return 3;
}
diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index bd1bc9943b6d..33376b6d1b90 100644
--- a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -53,11 +53,16 @@ unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
return COFF::IMAGE_REL_AMD64_REL32;
case FK_Data_4:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
return COFF::IMAGE_REL_AMD64_ADDR32NB;
+ if (Modifier == MCSymbolRefExpr::VK_SECREL)
+ return COFF::IMAGE_REL_AMD64_SECREL;
return COFF::IMAGE_REL_AMD64_ADDR32;
case FK_Data_8:
return COFF::IMAGE_REL_AMD64_ADDR64;
@@ -76,8 +81,11 @@ unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
return COFF::IMAGE_REL_I386_REL32;
case FK_Data_4:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
return COFF::IMAGE_REL_I386_DIR32NB;
+ if (Modifier == MCSymbolRefExpr::VK_SECREL)
+ return COFF::IMAGE_REL_AMD64_SECREL;
return COFF::IMAGE_REL_I386_DIR32;
case FK_SecRel_2:
return COFF::IMAGE_REL_I386_SECTION;
diff --git a/lib/Target/X86/Makefile b/lib/Target/X86/Makefile
deleted file mode 100644
index e518fecf044f..000000000000
--- a/lib/Target/X86/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- lib/Target/X86/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMX86CodeGen
-TARGET = X86
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = X86GenRegisterInfo.inc X86GenInstrInfo.inc \
- X86GenAsmWriter.inc X86GenAsmMatcher.inc \
- X86GenAsmWriter1.inc X86GenDAGISel.inc \
- X86GenDisassemblerTables.inc X86GenFastISel.inc \
- X86GenCallingConv.inc X86GenSubtargetInfo.inc
-
-DIRS = InstPrinter AsmParser Disassembler TargetInfo MCTargetDesc Utils
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/README-X86-64.txt b/lib/Target/X86/README-X86-64.txt
index bcfdf0bc56b2..09626e13849d 100644
--- a/lib/Target/X86/README-X86-64.txt
+++ b/lib/Target/X86/README-X86-64.txt
@@ -170,7 +170,7 @@ generated for it. The primary issue with the result is that it doesn't do any
of the optimizations which are possible if we know the address of a va_list
in the current function is never taken:
1. We shouldn't spill the XMM registers because we only call va_arg with "int".
-2. It would be nice if we could scalarrepl the va_list.
+2. It would be nice if we could sroa the va_list.
3. Probably overkill, but it'd be cool if we could peel off the first five
iterations of the loop.
diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt
index 19a183201755..799157c926e6 100644
--- a/lib/Target/X86/README.txt
+++ b/lib/Target/X86/README.txt
@@ -50,8 +50,8 @@ Some isel ideas:
2. Code duplication (addressing mode) during isel.
3. Other ideas from "Register-Sensitive Selection, Duplication, and
Sequencing of Instructions".
-4. Scheduling for reduced register pressure. E.g. "Minimum Register
- Instruction Sequence Problem: Revisiting Optimal Code Generation for DAGs"
+4. Scheduling for reduced register pressure. E.g. "Minimum Register
+ Instruction Sequence Problem: Revisiting Optimal Code Generation for DAGs"
and other related papers.
http://citeseer.ist.psu.edu/govindarajan01minimum.html
@@ -73,7 +73,7 @@ It appears icc use push for parameter passing. Need to investigate.
//===---------------------------------------------------------------------===//
The instruction selector sometimes misses folding a load into a compare. The
-pattern is written as (cmp reg, (load p)). Because the compare isn't
+pattern is written as (cmp reg, (load p)). Because the compare isn't
commutative, it is not matched with the load on both sides. The dag combiner
should be made smart enough to canonicalize the load into the RHS of a compare
when it can invert the result of the compare for free.
diff --git a/lib/Target/X86/TargetInfo/Makefile b/lib/Target/X86/TargetInfo/Makefile
deleted file mode 100644
index ee91982df0c8..000000000000
--- a/lib/Target/X86/TargetInfo/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/X86/TargetInfo/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86Info
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/Utils/Makefile b/lib/Target/X86/Utils/Makefile
deleted file mode 100644
index 1df6f0f561d4..000000000000
--- a/lib/Target/X86/Utils/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/X86/Utils/Makefile -----------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86Utils
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index 619f7c8d25df..18f71675437b 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.cpp
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "X86ShuffleDecode.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/MachineValueType.h"
//===----------------------------------------------------------------------===//
@@ -44,6 +45,17 @@ void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
if (ZMask & 8) ShuffleMask[3] = SM_SentinelZero;
}
+void DecodeInsertElementMask(MVT VT, unsigned Idx, unsigned Len,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+ assert((Idx + Len) <= NumElts && "Insertion out of range");
+
+ for (unsigned i = 0; i != NumElts; ++i)
+ ShuffleMask.push_back(i);
+ for (unsigned i = 0; i != Len; ++i)
+ ShuffleMask[Idx + i] = NumElts + i;
+}
+
// <3,1> or <6,7,2,3>
void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
for (unsigned i = NElts / 2; i != NElts; ++i)
@@ -263,6 +275,25 @@ void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask) {
}
}
+/// Decodes a broadcast of the first element of a vector.
+void DecodeVectorBroadcast(MVT DstVT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = DstVT.getVectorNumElements();
+ ShuffleMask.append(NumElts, 0);
+}
+
+/// Decodes a broadcast of a subvector to a larger vector type.
+void DecodeSubVectorBroadcast(MVT DstVT, MVT SrcVT,
+ SmallVectorImpl<int> &ShuffleMask) {
+ assert(SrcVT.getScalarType() == DstVT.getScalarType() &&
+ "Non matching vector element types");
+ unsigned NumElts = SrcVT.getVectorNumElements();
+ unsigned Scale = DstVT.getSizeInBits() / SrcVT.getSizeInBits();
+
+ for (unsigned i = 0; i != Scale; ++i)
+ for (unsigned j = 0; j != NumElts; ++j)
+ ShuffleMask.push_back(j);
+}
+
/// \brief Decode a shuffle packed values at 128-bit granularity
/// (SHUFF32x4/SHUFF64x2/SHUFI32x4/SHUFI64x2)
/// immediate mask into a shuffle mask.
@@ -303,9 +334,9 @@ void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
ShuffleMask.push_back(M);
continue;
}
- // For AVX vectors with 32 bytes the base of the shuffle is the half of
- // the vector we're inside.
- int Base = i < 16 ? 0 : 16;
+ // For 256/512-bit vectors the base of the shuffle is the 128-bit
+ // subvector we're inside.
+ int Base = (i / 16) * 16;
// If the high bit (7) of the byte is set, the element is zeroed.
if (M & (1 << 7))
ShuffleMask.push_back(SM_SentinelZero);
@@ -331,23 +362,62 @@ void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
}
}
-/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
-/// No VT provided since it only works on 256-bit, 4 element vectors.
-void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
- for (unsigned i = 0; i != 4; ++i) {
- ShuffleMask.push_back((Imm >> (2 * i)) & 3);
+void DecodeVPPERMMask(ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask) {
+ assert(RawMask.size() == 16 && "Illegal VPPERM shuffle mask size");
+
+ // VPPERM Operation
+ // Bits[4:0] - Byte Index (0 - 31)
+ // Bits[7:5] - Permute Operation
+ //
+ // Permute Operation:
+ // 0 - Source byte (no logical operation).
+ // 1 - Invert source byte.
+ // 2 - Bit reverse of source byte.
+ // 3 - Bit reverse of inverted source byte.
+ // 4 - 00h (zero - fill).
+ // 5 - FFh (ones - fill).
+ // 6 - Most significant bit of source byte replicated in all bit positions.
+ // 7 - Invert most significant bit of source byte and replicate in all bit positions.
+ for (int i = 0, e = RawMask.size(); i < e; ++i) {
+ uint64_t M = RawMask[i];
+ if (M == (uint64_t)SM_SentinelUndef) {
+ ShuffleMask.push_back(M);
+ continue;
+ }
+
+ uint64_t PermuteOp = (M >> 5) & 0x7;
+ if (PermuteOp == 4) {
+ ShuffleMask.push_back(SM_SentinelZero);
+ continue;
+ }
+ if (PermuteOp != 0) {
+ ShuffleMask.clear();
+ return;
+ }
+
+ uint64_t Index = M & 0x1F;
+ ShuffleMask.push_back((int)Index);
}
}
-void DecodeZeroExtendMask(MVT SrcVT, MVT DstVT, SmallVectorImpl<int> &Mask) {
+/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
+void DecodeVPERMMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ assert((VT.is256BitVector() || VT.is512BitVector()) &&
+ (VT.getScalarSizeInBits() == 64) && "Unexpected vector value type");
+ unsigned NumElts = VT.getVectorNumElements();
+ for (unsigned l = 0; l != NumElts; l += 4)
+ for (unsigned i = 0; i != 4; ++i)
+ ShuffleMask.push_back(l + ((Imm >> (2 * i)) & 3));
+}
+
+void DecodeZeroExtendMask(MVT SrcScalarVT, MVT DstVT, SmallVectorImpl<int> &Mask) {
unsigned NumDstElts = DstVT.getVectorNumElements();
- unsigned SrcScalarBits = SrcVT.getScalarSizeInBits();
+ unsigned SrcScalarBits = SrcScalarVT.getSizeInBits();
unsigned DstScalarBits = DstVT.getScalarSizeInBits();
unsigned Scale = DstScalarBits / SrcScalarBits;
assert(SrcScalarBits < DstScalarBits &&
"Expected zero extension mask to increase scalar size");
- assert(SrcVT.getVectorNumElements() >= NumDstElts &&
- "Too many zero extension lanes");
for (unsigned i = 0; i != NumDstElts; i++) {
Mask.push_back(i);
@@ -445,18 +515,78 @@ void DecodeINSERTQIMask(int Len, int Idx,
ShuffleMask.push_back(SM_SentinelUndef);
}
+void DecodeVPERMILPMask(MVT VT, ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned VecSize = VT.getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
+ unsigned NumLanes = VecSize / 128;
+ unsigned NumEltsPerLane = VT.getVectorNumElements() / NumLanes;
+ assert((VecSize == 128 || VecSize == 256 || VecSize == 512) &&
+ "Unexpected vector size");
+ assert((EltSize == 32 || EltSize == 64) && "Unexpected element size");
+
+ for (unsigned i = 0, e = RawMask.size(); i < e; ++i) {
+ uint64_t M = RawMask[i];
+ M = (EltSize == 64 ? ((M >> 1) & 0x1) : (M & 0x3));
+ unsigned LaneOffset = i & ~(NumEltsPerLane - 1);
+ ShuffleMask.push_back((int)(LaneOffset + M));
+ }
+}
+
+void DecodeVPERMIL2PMask(MVT VT, unsigned M2Z, ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask) {
+ unsigned VecSize = VT.getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
+ unsigned NumLanes = VecSize / 128;
+ unsigned NumEltsPerLane = VT.getVectorNumElements() / NumLanes;
+ assert((VecSize == 128 || VecSize == 256) &&
+ "Unexpected vector size");
+ assert((EltSize == 32 || EltSize == 64) && "Unexpected element size");
+
+ for (unsigned i = 0, e = RawMask.size(); i < e; ++i) {
+ // VPERMIL2 Operation.
+ // Bits[3] - Match Bit.
+ // Bits[2:1] - (Per Lane) PD Shuffle Mask.
+ // Bits[2:0] - (Per Lane) PS Shuffle Mask.
+ uint64_t Selector = RawMask[i];
+ unsigned MatchBit = (Selector >> 3) & 0x1;
+
+ // M2Z[0:1] MatchBit
+ // 0Xb X Source selected by Selector index.
+ // 10b 0 Source selected by Selector index.
+ // 10b 1 Zero.
+ // 11b 0 Zero.
+ // 11b 1 Source selected by Selector index.
+ if ((M2Z & 0x2) != 0 && MatchBit != (M2Z & 0x1)) {
+ ShuffleMask.push_back(SM_SentinelZero);
+ continue;
+ }
+
+ unsigned Index = i & ~(NumEltsPerLane - 1);
+ if (EltSize == 64)
+ Index += (Selector >> 1) & 0x1;
+ else
+ Index += Selector & 0x3;
+
+ unsigned SrcOffset = (Selector >> 2) & 1;
+ ShuffleMask.push_back((int)(SrcOffset + Index));
+ }
+}
+
void DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask) {
- for (int i = 0, e = RawMask.size(); i < e; ++i) {
- uint64_t M = RawMask[i];
+ uint64_t EltMaskSize = RawMask.size() - 1;
+ for (auto M : RawMask) {
+ M &= EltMaskSize;
ShuffleMask.push_back((int)M);
}
}
void DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask) {
- for (int i = 0, e = RawMask.size(); i < e; ++i) {
- uint64_t M = RawMask[i];
+ uint64_t EltMaskSize = (RawMask.size() * 2) - 1;
+ for (auto M : RawMask) {
+ M &= EltMaskSize;
ShuffleMask.push_back((int)M);
}
}
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.h b/lib/Target/X86/Utils/X86ShuffleDecode.h
index 72db6a81912b..dc21c19752c3 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.h
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -16,23 +16,31 @@
#define LLVM_LIB_TARGET_X86_UTILS_X86SHUFFLEDECODE_H
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/ArrayRef.h"
//===----------------------------------------------------------------------===//
// Vector Mask Decoding
//===----------------------------------------------------------------------===//
namespace llvm {
+template <typename T> class ArrayRef;
class MVT;
enum { SM_SentinelUndef = -1, SM_SentinelZero = -2 };
+/// Decode a 128-bit INSERTPS instruction as a v4f32 shuffle mask.
void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-// <3,1> or <6,7,2,3>
+// Insert the bottom Len elements from a second source into a vector starting at
+// element Idx.
+void DecodeInsertElementMask(MVT VT, unsigned Idx, unsigned Len,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a MOVHLPS instruction as a v2f64/v4f32 shuffle mask.
+/// i.e. <3,1> or <6,7,2,3>
void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
-// <0,2> or <0,1,4,5>
+/// Decode a MOVLHPS instruction as a v2f64/v4f32 shuffle mask.
+/// i.e. <0,2> or <0,1,4,5>
void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
void DecodeMOVSLDUPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
@@ -47,74 +55,104 @@ void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+/// Decodes the shuffle masks for pshufd/pshufw/vpermilpd/vpermilps.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodePSHUFMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+/// Decodes the shuffle masks for pshufhw.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodePSHUFHWMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-void DecodePSHUFLWMask(MVT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+/// Decodes the shuffle masks for pshuflw.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
+void DecodePSHUFLWMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decodes a PSWAPD 3DNow! instruction.
+/// Decodes a PSWAPD 3DNow! instruction.
void DecodePSWAPMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
-/// the type of the vector allowing it to handle different datatypes and vector
-/// widths.
+/// Decodes the shuffle masks for shufp*.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodeSHUFPMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
-/// and punpckh*. VT indicates the type of the vector allowing it to handle
-/// different datatypes and vector widths.
+/// Decodes the shuffle masks for unpckhps/unpckhpd and punpckh*.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodeUNPCKHMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
-/// and punpckl*. VT indicates the type of the vector allowing it to handle
-/// different datatypes and vector widths.
+/// Decodes the shuffle masks for unpcklps/unpcklpd and punpckl*.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
void DecodeUNPCKLMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a PSHUFB mask from a raw array of constants such as from
+/// Decodes a broadcast of the first element of a vector.
+void DecodeVectorBroadcast(MVT DstVT, SmallVectorImpl<int> &ShuffleMask);
+
+/// Decodes a broadcast of a subvector to a larger vector type.
+void DecodeSubVectorBroadcast(MVT DstVT, MVT SrcVT,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a PSHUFB mask from a raw array of constants such as from
/// BUILD_VECTOR.
void DecodePSHUFBMask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a BLEND immediate mask into a shuffle mask.
+/// Decode a BLEND immediate mask into a shuffle mask.
void DecodeBLENDMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
void DecodeVPERM2X128Mask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a shuffle packed values at 128-bit granularity
+/// Decode a shuffle packed values at 128-bit granularity
/// immediate mask into a shuffle mask.
void decodeVSHUF64x2FamilyMask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask);
-/// DecodeVPERMMask - this decodes the shuffle masks for VPERMQ/VPERMPD.
-/// No VT provided since it only works on 256-bit, 4 element vectors.
-void DecodeVPERMMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
+/// Decodes the shuffle masks for VPERMQ/VPERMPD.
+void DecodeVPERMMask(MVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a zero extension instruction as a shuffle mask.
-void DecodeZeroExtendMask(MVT SrcVT, MVT DstVT,
+/// Decode a VPPERM mask from a raw array of constants such as from
+/// BUILD_VECTOR.
+/// This can only basic masks (permutes + zeros), not any of the other
+/// operations that VPPERM can perform.
+void DecodeVPPERMMask(ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a zero extension instruction as a shuffle mask.
+void DecodeZeroExtendMask(MVT SrcScalarVT, MVT DstVT,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a move lower and zero upper instruction as a shuffle mask.
+/// Decode a move lower and zero upper instruction as a shuffle mask.
void DecodeZeroMoveLowMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a scalar float move instruction as a shuffle mask.
+/// Decode a scalar float move instruction as a shuffle mask.
void DecodeScalarMoveMask(MVT VT, bool IsLoad,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a SSE4A EXTRQ instruction as a v16i8 shuffle mask.
+/// Decode a SSE4A EXTRQ instruction as a v16i8 shuffle mask.
void DecodeEXTRQIMask(int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a SSE4A INSERTQ instruction as a v16i8 shuffle mask.
+/// Decode a SSE4A INSERTQ instruction as a v16i8 shuffle mask.
void DecodeINSERTQIMask(int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERM W/D/Q/PS/PD mask from a raw array of constants.
+/// Decode a VPERMILPD/VPERMILPS variable mask from a raw array of constants.
+void DecodeVPERMILPMask(MVT VT, ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a VPERMIL2PD/VPERMIL2PS variable mask from a raw array of constants.
+void DecodeVPERMIL2PMask(MVT VT, unsigned M2Z, ArrayRef<uint64_t> RawMask,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a VPERM W/D/Q/PS/PD mask from a raw array of constants.
void DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERMT2 W/D/Q/PS/PD mask from a raw array of constants.
+/// Decode a VPERMT2 W/D/Q/PS/PD mask from a raw array of constants.
void DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask);
} // llvm namespace
diff --git a/lib/Target/X86/X86.h b/lib/Target/X86/X86.h
index 01e65b89f480..23d6c7120a4b 100644
--- a/lib/Target/X86/X86.h
+++ b/lib/Target/X86/X86.h
@@ -21,6 +21,7 @@ namespace llvm {
class FunctionPass;
class ImmutablePass;
+class PassRegistry;
class X86TargetMachine;
/// This pass converts a legalized DAG into a X86-specific DAG, ready for
@@ -58,6 +59,12 @@ FunctionPass *createX86FixupLEAs();
/// recalculations.
FunctionPass *createX86OptimizeLEAs();
+/// Return a pass that transforms setcc + movzx pairs into xor + setcc.
+FunctionPass *createX86FixupSetCC();
+
+/// Return a pass that expands WinAlloca pseudo-instructions.
+FunctionPass *createX86WinAllocaExpander();
+
/// Return a pass that optimizes the code-size of x86 call sequences. This is
/// done by replacing esp-relative movs with pushes.
FunctionPass *createX86CallFrameOptimization();
@@ -72,6 +79,14 @@ FunctionPass *createX86WinEHStatePass();
/// must run after prologue/epilogue insertion and before lowering
/// the MachineInstr to MC.
FunctionPass *createX86ExpandPseudoPass();
+
+/// Return a Machine IR pass that selectively replaces
+/// certain byte and word instructions by equivalent 32 bit instructions,
+/// in order to eliminate partial register usage, false dependences on
+/// the upper portions of registers, and to save code size.
+FunctionPass *createX86FixupBWInsts();
+
+void initializeFixupBWInstPassPass(PassRegistry &);
} // End llvm namespace
#endif
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td
index 8902a8534256..8267a84518fc 100644
--- a/lib/Target/X86/X86.td
+++ b/lib/Target/X86/X86.td
@@ -31,6 +31,9 @@ def Mode16Bit : SubtargetFeature<"16bit-mode", "In16BitMode", "true",
// X86 Subtarget features
//===----------------------------------------------------------------------===//
+def FeatureX87 : SubtargetFeature<"x87","HasX87", "true",
+ "Enable X87 float instructions">;
+
def FeatureCMOV : SubtargetFeature<"cmov","HasCMov", "true",
"Enable conditional move instructions">;
@@ -125,6 +128,9 @@ def FeatureCDI : SubtargetFeature<"avx512cd", "HasCDI", "true",
def FeaturePFI : SubtargetFeature<"avx512pf", "HasPFI", "true",
"Enable AVX-512 PreFetch Instructions",
[FeatureAVX512]>;
+def FeaturePREFETCHWT1 : SubtargetFeature<"prefetchwt1", "HasPFPREFETCHWT1",
+ "true",
+ "Prefetch with Intent to Write and T1 Hint">;
def FeatureDQI : SubtargetFeature<"avx512dq", "HasDQI", "true",
"Enable AVX-512 Doubleword and Quadword Instructions",
[FeatureAVX512]>;
@@ -134,6 +140,12 @@ def FeatureBWI : SubtargetFeature<"avx512bw", "HasBWI", "true",
def FeatureVLX : SubtargetFeature<"avx512vl", "HasVLX", "true",
"Enable AVX-512 Vector Length eXtensions",
[FeatureAVX512]>;
+def FeatureVBMI : SubtargetFeature<"avx512vbmi", "HasVBMI", "true",
+ "Enable AVX-512 Vector Bit Manipulation Instructions",
+ [FeatureAVX512]>;
+def FeatureIFMA : SubtargetFeature<"avx512ifma", "HasIFMA", "true",
+ "Enable AVX-512 Integer Fused Multiple-Add",
+ [FeatureAVX512]>;
def FeaturePKU : SubtargetFeature<"pku", "HasPKU", "true",
"Enable protection keys">;
def FeaturePCLMUL : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
@@ -186,6 +198,8 @@ def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true",
"Support RDSEED instruction">;
def FeatureLAHFSAHF : SubtargetFeature<"sahf", "HasLAHFSAHF", "true",
"Support LAHF and SAHF instructions">;
+def FeatureMWAITX : SubtargetFeature<"mwaitx", "HasMWAITX", "true",
+ "Enable MONITORX/MWAITX timer functionality">;
def FeatureMPX : SubtargetFeature<"mpx", "HasMPX", "true",
"Support MPX instructions">;
def FeatureLEAForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
@@ -199,6 +213,20 @@ def FeatureSlowDivide64 : SubtargetFeature<"idivq-to-divw",
def FeaturePadShortFunctions : SubtargetFeature<"pad-short-functions",
"PadShortFunctions", "true",
"Pad short functions">;
+def FeatureINVPCID : SubtargetFeature<"invpcid", "HasInvPCId", "true",
+ "Invalidate Process-Context Identifier">;
+def FeatureVMFUNC : SubtargetFeature<"vmfunc", "HasVMFUNC", "true",
+ "VM Functions">;
+def FeatureSMAP : SubtargetFeature<"smap", "HasSMAP", "true",
+ "Supervisor Mode Access Protection">;
+def FeatureSGX : SubtargetFeature<"sgx", "HasSGX", "true",
+ "Enable Software Guard Extensions">;
+def FeatureCLFLUSHOPT : SubtargetFeature<"clflushopt", "HasCLFLUSHOPT", "true",
+ "Flush A Cache Line Optimized">;
+def FeaturePCOMMIT : SubtargetFeature<"pcommit", "HasPCOMMIT", "true",
+ "Enable Persistent Commit">;
+def FeatureCLWB : SubtargetFeature<"clwb", "HasCLWB", "true",
+ "Cache Line Write Back">;
// TODO: This feature ought to be renamed.
// What it really refers to are CPUs for which certain instructions
// (which ones besides the example below?) are microcoded.
@@ -216,6 +244,11 @@ def FeatureSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true",
def FeatureSoftFloat
: SubtargetFeature<"soft-float", "UseSoftFloat", "true",
"Use software floating point features.">;
+// On at least some AMD processors, there is no performance hazard to writing
+// only the lower parts of a YMM register without clearing the upper part.
+def FeatureFastPartialYMMWrite
+ : SubtargetFeature<"fast-partial-ymm-write", "HasFastPartialYMMWrite",
+ "true", "Partial writes to YMM registers are fast">;
//===----------------------------------------------------------------------===//
// X86 processors supported.
@@ -231,37 +264,57 @@ def ProcIntelSLM : SubtargetFeature<"slm", "X86ProcFamily", "IntelSLM",
class Proc<string Name, list<SubtargetFeature> Features>
: ProcessorModel<Name, GenericModel, Features>;
-def : Proc<"generic", [FeatureSlowUAMem16]>;
-def : Proc<"i386", [FeatureSlowUAMem16]>;
-def : Proc<"i486", [FeatureSlowUAMem16]>;
-def : Proc<"i586", [FeatureSlowUAMem16]>;
-def : Proc<"pentium", [FeatureSlowUAMem16]>;
-def : Proc<"pentium-mmx", [FeatureSlowUAMem16, FeatureMMX]>;
-def : Proc<"i686", [FeatureSlowUAMem16]>;
-def : Proc<"pentiumpro", [FeatureSlowUAMem16, FeatureCMOV]>;
-def : Proc<"pentium2", [FeatureSlowUAMem16, FeatureMMX, FeatureCMOV,
- FeatureFXSR]>;
-def : Proc<"pentium3", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1,
- FeatureFXSR]>;
-def : Proc<"pentium3m", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1,
- FeatureFXSR, FeatureSlowBTMem]>;
-def : Proc<"pentium-m", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
- FeatureFXSR, FeatureSlowBTMem]>;
-def : Proc<"pentium4", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
- FeatureFXSR]>;
-def : Proc<"pentium4m", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
- FeatureFXSR, FeatureSlowBTMem]>;
+def : Proc<"generic", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"i386", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"i486", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"i586", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"pentium", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"pentium-mmx", [FeatureX87, FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"i686", [FeatureX87, FeatureSlowUAMem16]>;
+def : Proc<"pentiumpro", [FeatureX87, FeatureSlowUAMem16, FeatureCMOV]>;
+def : Proc<"pentium2", [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureCMOV, FeatureFXSR]>;
+def : Proc<"pentium3", [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE1, FeatureFXSR]>;
+def : Proc<"pentium3m", [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE1, FeatureFXSR, FeatureSlowBTMem]>;
+
+// Enable the PostRAScheduler for SSE2 and SSE3 class cpus.
+// The intent is to enable it for pentium4 which is the current default
+// processor in a vanilla 32-bit clang compilation when no specific
+// architecture is specified. This generally gives a nice performance
+// increase on silvermont, with largely neutral behavior on other
+// contemporary large core processors.
+// pentium-m, pentium4m, prescott and nocona are included as a preventative
+// measure to avoid performance surprises, in case clang's default cpu
+// changes slightly.
+
+def : ProcessorModel<"pentium-m", GenericPostRAModel,
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE2, FeatureFXSR, FeatureSlowBTMem]>;
+
+def : ProcessorModel<"pentium4", GenericPostRAModel,
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE2, FeatureFXSR]>;
+
+def : ProcessorModel<"pentium4m", GenericPostRAModel,
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE2, FeatureFXSR, FeatureSlowBTMem]>;
+
+// Intel Quark.
+def : Proc<"lakemont", []>;
// Intel Core Duo.
def : ProcessorModel<"yonah", SandyBridgeModel,
- [FeatureSlowUAMem16, FeatureMMX, FeatureSSE3, FeatureFXSR,
- FeatureSlowBTMem]>;
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX, FeatureSSE3,
+ FeatureFXSR, FeatureSlowBTMem]>;
// NetBurst.
-def : Proc<"prescott",
- [FeatureSlowUAMem16, FeatureMMX, FeatureSSE3, FeatureFXSR,
- FeatureSlowBTMem]>;
-def : Proc<"nocona", [
+def : ProcessorModel<"prescott", GenericPostRAModel,
+ [FeatureX87, FeatureSlowUAMem16, FeatureMMX, FeatureSSE3,
+ FeatureFXSR, FeatureSlowBTMem]>;
+def : ProcessorModel<"nocona", GenericPostRAModel, [
+ FeatureX87,
FeatureSlowUAMem16,
FeatureMMX,
FeatureSSE3,
@@ -272,6 +325,7 @@ def : Proc<"nocona", [
// Intel Core 2 Solo/Duo.
def : ProcessorModel<"core2", SandyBridgeModel, [
+ FeatureX87,
FeatureSlowUAMem16,
FeatureMMX,
FeatureSSSE3,
@@ -281,6 +335,7 @@ def : ProcessorModel<"core2", SandyBridgeModel, [
FeatureLAHFSAHF
]>;
def : ProcessorModel<"penryn", SandyBridgeModel, [
+ FeatureX87,
FeatureSlowUAMem16,
FeatureMMX,
FeatureSSE41,
@@ -293,6 +348,7 @@ def : ProcessorModel<"penryn", SandyBridgeModel, [
// Atom CPUs.
class BonnellProc<string Name> : ProcessorModel<Name, AtomModel, [
ProcIntelAtom,
+ FeatureX87,
FeatureSlowUAMem16,
FeatureMMX,
FeatureSSSE3,
@@ -313,6 +369,7 @@ def : BonnellProc<"atom">; // Pin the generic name to the baseline.
class SilvermontProc<string Name> : ProcessorModel<Name, SLMModel, [
ProcIntelSLM,
+ FeatureX87,
FeatureMMX,
FeatureSSE42,
FeatureFXSR,
@@ -334,6 +391,7 @@ def : SilvermontProc<"slm">; // Legacy alias.
// "Arrandale" along with corei3 and corei5
class NehalemProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+ FeatureX87,
FeatureMMX,
FeatureSSE42,
FeatureFXSR,
@@ -348,6 +406,7 @@ def : NehalemProc<"corei7">;
// Westmere is a similar machine to nehalem with some additional features.
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
class WestmereProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+ FeatureX87,
FeatureMMX,
FeatureSSE42,
FeatureFXSR,
@@ -360,15 +419,24 @@ class WestmereProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
]>;
def : WestmereProc<"westmere">;
+class ProcessorFeatures<list<SubtargetFeature> Inherited,
+ list<SubtargetFeature> NewFeatures> {
+ list<SubtargetFeature> Value = !listconcat(Inherited, NewFeatures);
+}
+
+class ProcModel<string Name, SchedMachineModel Model,
+ list<SubtargetFeature> ProcFeatures,
+ list<SubtargetFeature> OtherFeatures> :
+ ProcessorModel<Name, Model, !listconcat(ProcFeatures, OtherFeatures)>;
+
// SSE is not listed here since llvm treats AVX as a reimplementation of SSE,
// rather than a superset.
-class SandyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+def SNBFeatures : ProcessorFeatures<[], [
+ FeatureX87,
FeatureMMX,
FeatureAVX,
FeatureFXSR,
FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeatureSlowUAMem32,
FeaturePOPCNT,
FeatureAES,
FeaturePCLMUL,
@@ -376,198 +444,166 @@ class SandyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
FeatureXSAVEOPT,
FeatureLAHFSAHF
]>;
+
+class SandyBridgeProc<string Name> : ProcModel<Name, SandyBridgeModel,
+ SNBFeatures.Value, [
+ FeatureSlowBTMem,
+ FeatureSlowUAMem32
+]>;
def : SandyBridgeProc<"sandybridge">;
def : SandyBridgeProc<"corei7-avx">; // Legacy alias.
-class IvyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
- FeatureMMX,
- FeatureAVX,
- FeatureFXSR,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeatureSlowUAMem32,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureXSAVE,
- FeatureXSAVEOPT,
+def IVBFeatures : ProcessorFeatures<SNBFeatures.Value, [
FeatureRDRAND,
FeatureF16C,
- FeatureFSGSBase,
- FeatureLAHFSAHF
+ FeatureFSGSBase
+]>;
+
+class IvyBridgeProc<string Name> : ProcModel<Name, SandyBridgeModel,
+ IVBFeatures.Value, [
+ FeatureSlowBTMem,
+ FeatureSlowUAMem32
]>;
def : IvyBridgeProc<"ivybridge">;
def : IvyBridgeProc<"core-avx-i">; // Legacy alias.
-class HaswellProc<string Name> : ProcessorModel<Name, HaswellModel, [
- FeatureMMX,
+def HSWFeatures : ProcessorFeatures<IVBFeatures.Value, [
FeatureAVX2,
- FeatureFXSR,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureRDRAND,
- FeatureXSAVE,
- FeatureXSAVEOPT,
- FeatureF16C,
- FeatureFSGSBase,
- FeatureMOVBE,
- FeatureLZCNT,
FeatureBMI,
FeatureBMI2,
FeatureFMA,
+ FeatureLZCNT,
+ FeatureMOVBE,
+ FeatureINVPCID,
+ FeatureVMFUNC,
FeatureRTM,
FeatureHLE,
- FeatureSlowIncDec,
- FeatureLAHFSAHF
+ FeatureSlowIncDec
]>;
+
+class HaswellProc<string Name> : ProcModel<Name, HaswellModel,
+ HSWFeatures.Value, []>;
def : HaswellProc<"haswell">;
def : HaswellProc<"core-avx2">; // Legacy alias.
-class BroadwellProc<string Name> : ProcessorModel<Name, HaswellModel, [
- FeatureMMX,
- FeatureAVX2,
- FeatureFXSR,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureXSAVE,
- FeatureXSAVEOPT,
- FeatureRDRAND,
- FeatureF16C,
- FeatureFSGSBase,
- FeatureMOVBE,
- FeatureLZCNT,
- FeatureBMI,
- FeatureBMI2,
- FeatureFMA,
- FeatureRTM,
- FeatureHLE,
+def BDWFeatures : ProcessorFeatures<HSWFeatures.Value, [
FeatureADX,
FeatureRDSEED,
- FeatureSlowIncDec,
- FeatureLAHFSAHF
+ FeatureSMAP
]>;
+class BroadwellProc<string Name> : ProcModel<Name, HaswellModel,
+ BDWFeatures.Value, []>;
def : BroadwellProc<"broadwell">;
+def SKLFeatures : ProcessorFeatures<BDWFeatures.Value, [
+ FeatureMPX,
+ FeatureXSAVEC,
+ FeatureXSAVES,
+ FeatureSGX,
+ FeatureCLFLUSHOPT
+]>;
+
+// FIXME: define SKL model
+class SkylakeClientProc<string Name> : ProcModel<Name, HaswellModel,
+ SKLFeatures.Value, []>;
+def : SkylakeClientProc<"skylake">;
+
// FIXME: define KNL model
-class KnightsLandingProc<string Name> : ProcessorModel<Name, HaswellModel, [
- FeatureMMX,
+class KnightsLandingProc<string Name> : ProcModel<Name, HaswellModel,
+ IVBFeatures.Value, [
FeatureAVX512,
- FeatureFXSR,
FeatureERI,
FeatureCDI,
FeaturePFI,
- FeatureCMPXCHG16B,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureXSAVE,
- FeatureXSAVEOPT,
- FeatureRDRAND,
- FeatureF16C,
- FeatureFSGSBase,
+ FeaturePREFETCHWT1,
+ FeatureADX,
+ FeatureRDSEED,
FeatureMOVBE,
FeatureLZCNT,
FeatureBMI,
FeatureBMI2,
- FeatureFMA,
- FeatureRTM,
- FeatureHLE,
- FeatureSlowIncDec,
- FeatureMPX,
- FeatureLAHFSAHF
+ FeatureFMA
]>;
def : KnightsLandingProc<"knl">;
-// FIXME: define SKX model
-class SkylakeProc<string Name> : ProcessorModel<Name, HaswellModel, [
- FeatureMMX,
+def SKXFeatures : ProcessorFeatures<SKLFeatures.Value, [
FeatureAVX512,
- FeatureFXSR,
FeatureCDI,
FeatureDQI,
FeatureBWI,
FeatureVLX,
FeaturePKU,
- FeatureCMPXCHG16B,
- FeatureSlowBTMem,
- FeaturePOPCNT,
- FeatureAES,
- FeaturePCLMUL,
- FeatureXSAVE,
- FeatureXSAVEOPT,
- FeatureRDRAND,
- FeatureF16C,
- FeatureFSGSBase,
- FeatureMOVBE,
- FeatureLZCNT,
- FeatureBMI,
- FeatureBMI2,
- FeatureFMA,
- FeatureRTM,
- FeatureHLE,
- FeatureADX,
- FeatureRDSEED,
- FeatureSlowIncDec,
- FeatureMPX,
- FeatureXSAVEC,
- FeatureXSAVES,
- FeatureLAHFSAHF
+ FeaturePCOMMIT,
+ FeatureCLWB
]>;
-def : SkylakeProc<"skylake">;
-def : SkylakeProc<"skx">; // Legacy alias.
+// FIXME: define SKX model
+class SkylakeServerProc<string Name> : ProcModel<Name, HaswellModel,
+ SKXFeatures.Value, []>;
+def : SkylakeServerProc<"skylake-avx512">;
+def : SkylakeServerProc<"skx">; // Legacy alias.
+
+def CNLFeatures : ProcessorFeatures<SKXFeatures.Value, [
+ FeatureVBMI,
+ FeatureIFMA,
+ FeatureSHA
+]>;
+
+class CannonlakeProc<string Name> : ProcModel<Name, HaswellModel,
+ CNLFeatures.Value, []>;
+def : CannonlakeProc<"cannonlake">;
// AMD CPUs.
-def : Proc<"k6", [FeatureSlowUAMem16, FeatureMMX]>;
-def : Proc<"k6-2", [FeatureSlowUAMem16, Feature3DNow]>;
-def : Proc<"k6-3", [FeatureSlowUAMem16, Feature3DNow]>;
-def : Proc<"athlon", [FeatureSlowUAMem16, Feature3DNowA,
+def : Proc<"k6", [FeatureX87, FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"k6-2", [FeatureX87, FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"k6-3", [FeatureX87, FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"athlon", [FeatureX87, FeatureSlowUAMem16, Feature3DNowA,
FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"athlon-tbird", [FeatureSlowUAMem16, Feature3DNowA,
+def : Proc<"athlon-tbird", [FeatureX87, FeatureSlowUAMem16, Feature3DNowA,
FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"athlon-4", [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
- FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"athlon-xp", [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
- FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"athlon-mp", [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
- FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
-def : Proc<"k8", [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
- FeatureSlowSHLD]>;
-def : Proc<"opteron", [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
- FeatureSlowSHLD]>;
-def : Proc<"athlon64", [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
+def : Proc<"athlon-4", [FeatureX87, FeatureSlowUAMem16, FeatureSSE1,
+ Feature3DNowA, FeatureFXSR, FeatureSlowBTMem,
FeatureSlowSHLD]>;
-def : Proc<"athlon-fx", [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
+def : Proc<"athlon-xp", [FeatureX87, FeatureSlowUAMem16, FeatureSSE1,
+ Feature3DNowA, FeatureFXSR, FeatureSlowBTMem,
FeatureSlowSHLD]>;
-def : Proc<"k8-sse3", [FeatureSlowUAMem16, FeatureSSE3, Feature3DNowA,
- FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
+def : Proc<"athlon-mp", [FeatureX87, FeatureSlowUAMem16, FeatureSSE1,
+ Feature3DNowA, FeatureFXSR, FeatureSlowBTMem,
FeatureSlowSHLD]>;
-def : Proc<"opteron-sse3", [FeatureSlowUAMem16, FeatureSSE3, Feature3DNowA,
- FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
- FeatureSlowSHLD]>;
-def : Proc<"athlon64-sse3", [FeatureSlowUAMem16, FeatureSSE3, Feature3DNowA,
- FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
- FeatureSlowSHLD]>;
-def : Proc<"amdfam10", [FeatureSSE4A, Feature3DNowA, FeatureFXSR,
- FeatureCMPXCHG16B, FeatureLZCNT, FeaturePOPCNT,
- FeatureSlowBTMem, FeatureSlowSHLD, FeatureLAHFSAHF]>;
-def : Proc<"barcelona", [FeatureSSE4A, Feature3DNowA, FeatureFXSR,
- FeatureCMPXCHG16B, FeatureLZCNT, FeaturePOPCNT,
- FeatureSlowBTMem, FeatureSlowSHLD, FeatureLAHFSAHF]>;
+def : Proc<"k8", [FeatureX87, FeatureSlowUAMem16, FeatureSSE2,
+ Feature3DNowA, FeatureFXSR, Feature64Bit,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"opteron", [FeatureX87, FeatureSlowUAMem16, FeatureSSE2,
+ Feature3DNowA, FeatureFXSR, Feature64Bit,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon64", [FeatureX87, FeatureSlowUAMem16, FeatureSSE2,
+ Feature3DNowA, FeatureFXSR, Feature64Bit,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon-fx", [FeatureX87, FeatureSlowUAMem16, FeatureSSE2,
+ Feature3DNowA, FeatureFXSR, Feature64Bit,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"k8-sse3", [FeatureX87, FeatureSlowUAMem16, FeatureSSE3,
+ Feature3DNowA, FeatureFXSR, FeatureCMPXCHG16B,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"opteron-sse3", [FeatureX87, FeatureSlowUAMem16, FeatureSSE3,
+ Feature3DNowA, FeatureFXSR, FeatureCMPXCHG16B,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon64-sse3", [FeatureX87, FeatureSlowUAMem16, FeatureSSE3,
+ Feature3DNowA, FeatureFXSR, FeatureCMPXCHG16B,
+ FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"amdfam10", [FeatureX87, FeatureSSE4A, Feature3DNowA,
+ FeatureFXSR, FeatureCMPXCHG16B, FeatureLZCNT,
+ FeaturePOPCNT, FeatureSlowBTMem, FeatureSlowSHLD,
+ FeatureLAHFSAHF]>;
+def : Proc<"barcelona", [FeatureX87, FeatureSSE4A, Feature3DNowA,
+ FeatureFXSR, FeatureCMPXCHG16B, FeatureLZCNT,
+ FeaturePOPCNT, FeatureSlowBTMem, FeatureSlowSHLD,
+ FeatureLAHFSAHF]>;
// Bobcat
def : Proc<"btver1", [
+ FeatureX87,
FeatureMMX,
FeatureSSSE3,
FeatureSSE4A,
@@ -576,13 +612,13 @@ def : Proc<"btver1", [
FeaturePRFCHW,
FeatureLZCNT,
FeaturePOPCNT,
- FeatureXSAVE,
FeatureSlowSHLD,
FeatureLAHFSAHF
]>;
// Jaguar
def : ProcessorModel<"btver2", BtVer2Model, [
+ FeatureX87,
FeatureMMX,
FeatureAVX,
FeatureFXSR,
@@ -599,11 +635,13 @@ def : ProcessorModel<"btver2", BtVer2Model, [
FeatureXSAVE,
FeatureXSAVEOPT,
FeatureSlowSHLD,
- FeatureLAHFSAHF
+ FeatureLAHFSAHF,
+ FeatureFastPartialYMMWrite
]>;
// Bulldozer
def : Proc<"bdver1", [
+ FeatureX87,
FeatureXOP,
FeatureFMA4,
FeatureCMPXCHG16B,
@@ -622,6 +660,7 @@ def : Proc<"bdver1", [
]>;
// Piledriver
def : Proc<"bdver2", [
+ FeatureX87,
FeatureXOP,
FeatureFMA4,
FeatureCMPXCHG16B,
@@ -645,6 +684,7 @@ def : Proc<"bdver2", [
// Steamroller
def : Proc<"bdver3", [
+ FeatureX87,
FeatureXOP,
FeatureFMA4,
FeatureCMPXCHG16B,
@@ -670,6 +710,7 @@ def : Proc<"bdver3", [
// Excavator
def : Proc<"bdver4", [
+ FeatureX87,
FeatureMMX,
FeatureAVX2,
FeatureFXSR,
@@ -689,15 +730,17 @@ def : Proc<"bdver4", [
FeatureFMA,
FeatureXSAVEOPT,
FeatureFSGSBase,
- FeatureLAHFSAHF
+ FeatureLAHFSAHF,
+ FeatureMWAITX
]>;
-def : Proc<"geode", [FeatureSlowUAMem16, Feature3DNowA]>;
+def : Proc<"geode", [FeatureX87, FeatureSlowUAMem16, Feature3DNowA]>;
-def : Proc<"winchip-c6", [FeatureSlowUAMem16, FeatureMMX]>;
-def : Proc<"winchip2", [FeatureSlowUAMem16, Feature3DNow]>;
-def : Proc<"c3", [FeatureSlowUAMem16, Feature3DNow]>;
-def : Proc<"c3-2", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1, FeatureFXSR]>;
+def : Proc<"winchip-c6", [FeatureX87, FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"winchip2", [FeatureX87, FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"c3", [FeatureX87, FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"c3-2", [FeatureX87, FeatureSlowUAMem16, FeatureMMX,
+ FeatureSSE1, FeatureFXSR]>;
// We also provide a generic 64-bit specific x86 processor model which tries to
// be good for modern chips without enabling instruction set encodings past the
@@ -710,8 +753,8 @@ def : Proc<"c3-2", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1, FeatureFXSR]>;
// knobs which need to be tuned differently for AMD chips, we might consider
// forming a common base for them.
def : ProcessorModel<"x86-64", SandyBridgeModel,
- [FeatureMMX, FeatureSSE2, FeatureFXSR, Feature64Bit,
- FeatureSlowBTMem ]>;
+ [FeatureX87, FeatureMMX, FeatureSSE2, FeatureFXSR,
+ Feature64Bit, FeatureSlowBTMem ]>;
//===----------------------------------------------------------------------===//
// Register File Description
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index 2170e62e30fd..67e51f1e9194 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -17,7 +17,6 @@
#include "MCTargetDesc/X86BaseInfo.h"
#include "X86InstrInfo.h"
#include "X86MachineFunctionInfo.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/MachineValueType.h"
@@ -28,6 +27,7 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSectionCOFF.h"
@@ -50,6 +50,9 @@ bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
Subtarget = &MF.getSubtarget<X86Subtarget>();
SMShadowTracker.startFunction(MF);
+ CodeEmitter.reset(TM.getTarget().createMCCodeEmitter(
+ *MF.getSubtarget().getInstrInfo(), *MF.getSubtarget().getRegisterInfo(),
+ MF.getContext()));
SetupMachineFunction(MF);
@@ -66,6 +69,9 @@ bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Emit the rest of the function body.
EmitFunctionBody();
+ // Emit the XRay table for this function.
+ EmitXRayTable();
+
// We didn't modify anything.
return false;
}
@@ -85,11 +91,8 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
const GlobalValue *GV = MO.getGlobal();
MCSymbol *GVSym;
- if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB)
- GVSym = P.getSymbolWithGlobalValueBase(GV, "$stub");
- else if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
- MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
- MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
+ if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
+ MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE)
GVSym = P.getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
else
GVSym = P.getSymbol(GV);
@@ -107,21 +110,6 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
if (!StubSym.getPointer())
StubSym = MachineModuleInfoImpl::
StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
- } else if (MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE){
- MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- MachineModuleInfoImpl::StubValueTy &StubSym =
- P.MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(
- Sym);
- if (!StubSym.getPointer())
- StubSym = MachineModuleInfoImpl::
- StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
- } else if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
- MCSymbol *Sym = P.getSymbolWithGlobalValueBase(GV, "$stub");
- MachineModuleInfoImpl::StubValueTy &StubSym =
- P.MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (!StubSym.getPointer())
- StubSym = MachineModuleInfoImpl::
- StubValueTy(P.getSymbol(GV), !GV->hasInternalLinkage());
}
// If the name begins with a dollar-sign, enclose it in parens. We do this
@@ -145,7 +133,6 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
break;
case X86II::MO_DARWIN_NONLAZY:
case X86II::MO_DLLIMPORT:
- case X86II::MO_DARWIN_STUB:
// These affect the name of the symbol, not any suffix.
break;
case X86II::MO_GOT_ABSOLUTE_ADDRESS:
@@ -155,7 +142,6 @@ static void printSymbolOperand(X86AsmPrinter &P, const MachineOperand &MO,
break;
case X86II::MO_PIC_BASE_OFFSET:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
O << '-';
P.MF->getPICBaseSymbol()->print(O, P.MAI);
break;
@@ -294,7 +280,7 @@ static void printLeaMemReference(X86AsmPrinter &P, const MachineInstr *MI,
static void printMemReference(X86AsmPrinter &P, const MachineInstr *MI,
unsigned Op, raw_ostream &O,
const char *Modifier = nullptr) {
- assert(isMem(MI, Op) && "Invalid memory reference!");
+ assert(isMem(*MI, Op) && "Invalid memory reference!");
const MachineOperand &Segment = MI->getOperand(Op+X86::AddrSegmentReg);
if (Segment.getReg()) {
printOperand(P, MI, Op+X86::AddrSegmentReg, O, Modifier);
@@ -535,6 +521,12 @@ void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
}
}
OutStreamer->EmitSyntaxDirective();
+
+ // If this is not inline asm and we're in 16-bit
+ // mode prefix assembly with .code16.
+ bool is16 = TT.getEnvironment() == Triple::CODE16;
+ if (M.getModuleInlineAsm().empty() && is16)
+ OutStreamer->EmitAssemblerFlag(MCAF_Code16);
}
static void
@@ -568,8 +560,9 @@ MCSymbol *X86AsmPrinter::GetCPISymbol(unsigned CPID) const {
const DataLayout &DL = MF->getDataLayout();
SectionKind Kind = CPE.getSectionKind(&DL);
const Constant *C = CPE.Val.ConstVal;
+ unsigned Align = CPE.Alignment;
if (const MCSectionCOFF *S = dyn_cast<MCSectionCOFF>(
- getObjFileLowering().getSectionForConstant(DL, Kind, C))) {
+ getObjFileLowering().getSectionForConstant(DL, Kind, C, Align))) {
if (MCSymbol *Sym = S->getCOMDATSymbol()) {
if (Sym->isUndefined())
OutStreamer->EmitSymbolAttribute(Sym, MCSA_Global);
@@ -593,30 +586,6 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
// Output stubs for dynamically-linked functions.
MachineModuleInfoMachO::SymbolListTy Stubs;
- Stubs = MMIMacho.GetFnStubList();
- if (!Stubs.empty()) {
- MCSection *TheSection = OutContext.getMachOSection(
- "__IMPORT", "__jump_table",
- MachO::S_SYMBOL_STUBS | MachO::S_ATTR_SELF_MODIFYING_CODE |
- MachO::S_ATTR_PURE_INSTRUCTIONS,
- 5, SectionKind::getMetadata());
- OutStreamer->SwitchSection(TheSection);
-
- for (const auto &Stub : Stubs) {
- // L_foo$stub:
- OutStreamer->EmitLabel(Stub.first);
- // .indirect_symbol _foo
- OutStreamer->EmitSymbolAttribute(Stub.second.getPointer(),
- MCSA_IndirectSymbol);
- // hlt; hlt; hlt; hlt; hlt hlt = 0xf4.
- const char HltInsts[] = "\xf4\xf4\xf4\xf4\xf4";
- OutStreamer->EmitBytes(StringRef(HltInsts, 5));
- }
-
- Stubs.clear();
- OutStreamer->AddBlankLine();
- }
-
// Output stubs for external and common global variables.
Stubs = MMIMacho.GetGVStubList();
if (!Stubs.empty()) {
@@ -632,20 +601,6 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer->AddBlankLine();
}
- Stubs = MMIMacho.GetHiddenGVStubList();
- if (!Stubs.empty()) {
- MCSection *TheSection = OutContext.getMachOSection(
- "__IMPORT", "__pointers", MachO::S_NON_LAZY_SYMBOL_POINTERS,
- SectionKind::getMetadata());
- OutStreamer->SwitchSection(TheSection);
-
- for (auto &Stub : Stubs)
- emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second);
-
- Stubs.clear();
- OutStreamer->AddBlankLine();
- }
-
SM.serializeToStackMapSection();
FM.serializeToFaultMapSection();
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 9c8bd98dbade..dcb7b5a3466f 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -29,6 +29,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
StackMaps SM;
FaultMaps FM;
+ std::unique_ptr<MCCodeEmitter> CodeEmitter;
// This utility class tracks the length of a stackmap instruction's 'shadow'.
// It is used by the X86AsmPrinter to ensure that the stackmap shadow
@@ -40,10 +41,11 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
// few instruction bytes to cover the shadow are NOPs used for padding.
class StackMapShadowTracker {
public:
- StackMapShadowTracker(TargetMachine &TM);
- ~StackMapShadowTracker();
- void startFunction(MachineFunction &MF);
- void count(MCInst &Inst, const MCSubtargetInfo &STI);
+ void startFunction(MachineFunction &MF) {
+ this->MF = &MF;
+ }
+ void count(MCInst &Inst, const MCSubtargetInfo &STI,
+ MCCodeEmitter *CodeEmitter);
// Called to signal the start of a shadow of RequiredSize bytes.
void reset(unsigned RequiredSize) {
@@ -56,21 +58,40 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
// to emit any necessary padding-NOPs.
void emitShadowPadding(MCStreamer &OutStreamer, const MCSubtargetInfo &STI);
private:
- TargetMachine &TM;
const MachineFunction *MF;
- std::unique_ptr<MCCodeEmitter> CodeEmitter;
- bool InShadow;
+ bool InShadow = false;
// RequiredShadowSize holds the length of the shadow specified in the most
// recently encountered STACKMAP instruction.
// CurrentShadowSize counts the number of bytes encoded since the most
// recently encountered STACKMAP, stopping when that number is greater than
// or equal to RequiredShadowSize.
- unsigned RequiredShadowSize, CurrentShadowSize;
+ unsigned RequiredShadowSize = 0, CurrentShadowSize = 0;
};
StackMapShadowTracker SMShadowTracker;
+ // This describes the kind of sled we're storing in the XRay table.
+ enum class SledKind : uint8_t {
+ FUNCTION_ENTER = 0,
+ FUNCTION_EXIT = 1,
+ TAIL_CALL = 2,
+ };
+
+ // The table will contain these structs that point to the sled, the function
+ // containing the sled, and what kind of sled (and whether they should always
+ // be instrumented).
+ struct XRayFunctionEntry {
+ const MCSymbol *Sled;
+ const MCSymbol *Function;
+ SledKind Kind;
+ bool AlwaysInstrument;
+ const class Function *Fn;
+ };
+
+ // All the sleds to be emitted.
+ std::vector<XRayFunctionEntry> Sleds;
+
// All instructions emitted by the X86AsmPrinter should use this helper
// method.
//
@@ -82,14 +103,26 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void LowerPATCHPOINT(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerSTATEPOINT(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerFAULTING_LOAD_OP(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerPATCHABLE_OP(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerTlsAddr(X86MCInstLower &MCInstLowering, const MachineInstr &MI);
- public:
- explicit X86AsmPrinter(TargetMachine &TM,
- std::unique_ptr<MCStreamer> Streamer)
- : AsmPrinter(TM, std::move(Streamer)), SM(*this), FM(*this),
- SMShadowTracker(TM) {}
+ // XRay-specific lowering for X86.
+ void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
+ X86MCInstLower &MCIL);
+ void LowerPATCHABLE_RET(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+
+ // Helper function that emits the XRay sleds we've collected for a particular
+ // function.
+ void EmitXRayTable();
+
+ // Helper function to record a given XRay sled.
+ void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind);
+public:
+ explicit X86AsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)), SM(*this), FM(*this) {}
const char *getPassName() const override {
return "X86 Assembly / Object Emitter";
diff --git a/lib/Target/X86/X86CallFrameOptimization.cpp b/lib/Target/X86/X86CallFrameOptimization.cpp
index fc6ee1752f1f..b16fa76c73fa 100644
--- a/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -10,9 +10,9 @@
// This file defines a pass that optimizes call sequences on x86.
// Currently, it converts movs of function parameters onto the stack into
// pushes. This is beneficial for two main reasons:
-// 1) The push instruction encoding is much smaller than an esp-relative mov
+// 1) The push instruction encoding is much smaller than a stack-ptr-based mov.
// 2) It is possible to push memory arguments directly. So, if the
-// the transformation is preformed pre-reg-alloc, it can help relieve
+// the transformation is performed pre-reg-alloc, it can help relieve
// register pressure.
//
//===----------------------------------------------------------------------===//
@@ -21,8 +21,8 @@
#include "X86.h"
#include "X86InstrInfo.h"
-#include "X86Subtarget.h"
#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -55,7 +55,7 @@ private:
struct CallContext {
CallContext()
: FrameSetup(nullptr), Call(nullptr), SPCopy(nullptr), ExpectedDist(0),
- MovVector(4, nullptr), NoStackParams(false), UsePush(false){}
+ MovVector(4, nullptr), NoStackParams(false), UsePush(false) {}
// Iterator referring to the frame setup instruction
MachineBasicBlock::iterator FrameSetup;
@@ -75,7 +75,7 @@ private:
// True if this call site has no stack parameters
bool NoStackParams;
- // True of this callsite can use push instructions
+ // True if this call site can use push instructions
bool UsePush;
};
@@ -88,7 +88,7 @@ private:
void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, CallContext &Context);
- bool adjustCallSequence(MachineFunction &MF, const CallContext &Context);
+ void adjustCallSequence(MachineFunction &MF, const CallContext &Context);
MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
unsigned Reg);
@@ -105,12 +105,14 @@ private:
const TargetInstrInfo *TII;
const X86FrameLowering *TFL;
const X86Subtarget *STI;
- const MachineRegisterInfo *MRI;
+ MachineRegisterInfo *MRI;
+ unsigned SlotSize;
+ unsigned Log2SlotSize;
static char ID;
};
char X86CallFrameOptimization::ID = 0;
-}
+} // end anonymous namespace
FunctionPass *llvm::createX86CallFrameOptimization() {
return new X86CallFrameOptimization();
@@ -123,22 +125,19 @@ bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
if (NoX86CFOpt.getValue())
return false;
- // We currently only support call sequences where *all* parameters.
- // are passed on the stack.
- // No point in running this in 64-bit mode, since some arguments are
- // passed in-register in all common calling conventions, so the pattern
- // we're looking for will never match.
- if (STI->is64Bit())
- return false;
-
// We can't encode multiple DW_CFA_GNU_args_size or DW_CFA_def_cfa_offset
// in the compact unwind encoding that Darwin uses. So, bail if there
// is a danger of that being generated.
- if (STI->isTargetDarwin() &&
- (!MF.getMMI().getLandingPads().empty() ||
+ if (STI->isTargetDarwin() &&
+ (!MF.getMMI().getLandingPads().empty() ||
(MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF))))
return false;
+ // It is not valid to change the stack pointer outside the prolog/epilog
+ // on 64-bit Windows.
+ if (STI->isTargetWin64())
+ return false;
+
// You would expect straight-line code between call-frame setup and
// call-frame destroy. You would be wrong. There are circumstances (e.g.
// CMOV_GR8 expansion of a select that feeds a function call!) where we can
@@ -169,10 +168,10 @@ bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
return true;
}
-// Check whether this trasnformation is profitable for a particular
+// Check whether this transformation is profitable for a particular
// function - in terms of code size.
-bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
- ContextVector &CallSeqVector) {
+bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
+ ContextVector &CallSeqVector) {
// This transformation is always a win when we do not expect to have
// a reserved call frame. Under other circumstances, it may be either
// a win or a loss, and requires a heuristic.
@@ -180,10 +179,6 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
if (CannotReserveFrame)
return true;
- // Don't do this when not optimizing for size.
- if (!MF.getFunction()->optForSize())
- return false;
-
unsigned StackAlign = TFL->getStackAlignment();
int64_t Advantage = 0;
@@ -206,16 +201,16 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
// We can use pushes. First, account for the fixed costs.
// We'll need a add after the call.
Advantage -= 3;
- // If we have to realign the stack, we'll also need and sub before
+ // If we have to realign the stack, we'll also need a sub before
if (CC.ExpectedDist % StackAlign)
Advantage -= 3;
// Now, for each push, we save ~3 bytes. For small constants, we actually,
// save more (up to 5 bytes), but 3 should be a good approximation.
- Advantage += (CC.ExpectedDist / 4) * 3;
+ Advantage += (CC.ExpectedDist >> Log2SlotSize) * 3;
}
}
- return (Advantage >= 0);
+ return Advantage >= 0;
}
bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
@@ -224,6 +219,12 @@ bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
TFL = STI->getFrameLowering();
MRI = &MF.getRegInfo();
+ const X86RegisterInfo &RegInfo =
+ *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
+ SlotSize = RegInfo.getSlotSize();
+ assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
+ Log2SlotSize = Log2_32(SlotSize);
+
if (!isLegal(MF))
return false;
@@ -233,20 +234,23 @@ bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
ContextVector CallSeqVector;
- for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
- for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
- if (I->getOpcode() == FrameSetupOpcode) {
+ for (auto &MBB : MF)
+ for (auto &MI : MBB)
+ if (MI.getOpcode() == FrameSetupOpcode) {
CallContext Context;
- collectCallInfo(MF, *BB, I, Context);
+ collectCallInfo(MF, MBB, MI, Context);
CallSeqVector.push_back(Context);
}
if (!isProfitable(MF, CallSeqVector))
return false;
- for (auto CC : CallSeqVector)
- if (CC.UsePush)
- Changed |= adjustCallSequence(MF, CC);
+ for (auto CC : CallSeqVector) {
+ if (CC.UsePush) {
+ adjustCallSequence(MF, CC);
+ Changed = true;
+ }
+ }
return Changed;
}
@@ -260,7 +264,8 @@ X86CallFrameOptimization::classifyInstruction(
// The instructions we actually care about are movs onto the stack
int Opcode = MI->getOpcode();
- if (Opcode == X86::MOV32mi || Opcode == X86::MOV32mr)
+ if (Opcode == X86::MOV32mi || Opcode == X86::MOV32mr ||
+ Opcode == X86::MOV64mi32 || Opcode == X86::MOV64mr)
return Convert;
// Not all calling conventions have only stack MOVs between the stack
@@ -315,8 +320,8 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
CallContext &Context) {
// Check that this particular call sequence is amenable to the
// transformation.
- const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
- STI->getRegisterInfo());
+ const X86RegisterInfo &RegInfo =
+ *static_cast<const X86RegisterInfo *>(STI->getRegisterInfo());
unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
// We expect to enter this at the beginning of a call sequence
@@ -326,7 +331,8 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
// How much do we adjust the stack? This puts an upper bound on
// the number of parameters actually passed on it.
- unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4;
+ unsigned int MaxAdjust =
+ FrameSetup->getOperand(0).getImm() >> Log2SlotSize;
// A zero adjustment means no stack parameters
if (!MaxAdjust) {
@@ -340,19 +346,19 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
while (I->getOpcode() == X86::LEA32r)
++I;
- // We expect a copy instruction here.
- // TODO: The copy instruction is a lowering artifact.
- // We should also support a copy-less version, where the stack
- // pointer is used directly.
- if (!I->isCopy() || !I->getOperand(0).isReg())
- return;
- Context.SPCopy = I++;
-
- unsigned StackPtr = Context.SPCopy->getOperand(0).getReg();
+ unsigned StackPtr = RegInfo.getStackRegister();
+ // SelectionDAG (but not FastISel) inserts a copy of ESP into a virtual
+ // register here. If it's there, use that virtual register as stack pointer
+ // instead.
+ if (I->isCopy() && I->getOperand(0).isReg() && I->getOperand(1).isReg() &&
+ I->getOperand(1).getReg() == StackPtr) {
+ Context.SPCopy = &*I++;
+ StackPtr = Context.SPCopy->getOperand(0).getReg();
+ }
// Scan the call setup sequence for the pattern we're looking for.
- // We only handle a simple case - a sequence of MOV32mi or MOV32mr
- // instructions, that push a sequence of 32-bit values onto the stack, with
+ // We only handle a simple case - a sequence of store instructions that
+ // push a sequence of stack-slot-aligned values onto the stack, with
// no gaps between them.
if (MaxAdjust > 4)
Context.MovVector.resize(MaxAdjust, nullptr);
@@ -367,9 +373,9 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
continue;
}
- // We know the instruction is a MOV32mi/MOV32mr.
+ // We know the instruction has a supported store opcode.
// We only want movs of the form:
- // movl imm/r32, k(%esp)
+ // mov imm/reg, k(%StackPtr)
// If we run into something else, bail.
// Note that AddrBaseReg may, counter to its name, not be a register,
// but rather a frame index.
@@ -390,9 +396,9 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
"Negative stack displacement when passing parameters");
// We really don't want to consider the unaligned case.
- if (StackDisp % 4)
+ if (StackDisp & (SlotSize - 1))
return;
- StackDisp /= 4;
+ StackDisp >>= Log2SlotSize;
assert((size_t)StackDisp < Context.MovVector.size() &&
"Function call has more parameters than the stack is adjusted for.");
@@ -400,7 +406,7 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
// If the same stack slot is being filled twice, something's fishy.
if (Context.MovVector[StackDisp] != nullptr)
return;
- Context.MovVector[StackDisp] = I;
+ Context.MovVector[StackDisp] = &*I;
for (const MachineOperand &MO : I->uses()) {
if (!MO.isReg())
@@ -418,14 +424,14 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
if (I == MBB.end() || !I->isCall())
return;
- Context.Call = I;
+ Context.Call = &*I;
if ((++I)->getOpcode() != FrameDestroyOpcode)
return;
// Now, go through the vector, and see that we don't have any gaps,
- // but only a series of 32-bit MOVs.
+ // but only a series of MOVs.
auto MMI = Context.MovVector.begin(), MME = Context.MovVector.end();
- for (; MMI != MME; ++MMI, Context.ExpectedDist += 4)
+ for (; MMI != MME; ++MMI, Context.ExpectedDist += SlotSize)
if (*MMI == nullptr)
break;
@@ -440,10 +446,9 @@ void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
return;
Context.UsePush = true;
- return;
}
-bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
+void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
const CallContext &Context) {
// Ok, we can in fact do the transformation for this call.
// Do not remove the FrameSetup instruction, but adjust the parameters.
@@ -453,15 +458,21 @@ bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
FrameSetup->getOperand(1).setImm(Context.ExpectedDist);
DebugLoc DL = FrameSetup->getDebugLoc();
+ bool Is64Bit = STI->is64Bit();
// Now, iterate through the vector in reverse order, and replace the movs
// with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
// replace uses.
- for (int Idx = (Context.ExpectedDist / 4) - 1; Idx >= 0; --Idx) {
+ for (int Idx = (Context.ExpectedDist >> Log2SlotSize) - 1; Idx >= 0; --Idx) {
MachineBasicBlock::iterator MOV = *Context.MovVector[Idx];
MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);
MachineBasicBlock::iterator Push = nullptr;
- if (MOV->getOpcode() == X86::MOV32mi) {
- unsigned PushOpcode = X86::PUSHi32;
+ unsigned PushOpcode;
+ switch (MOV->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected Opcode!");
+ case X86::MOV32mi:
+ case X86::MOV64mi32:
+ PushOpcode = Is64Bit ? X86::PUSH64i32 : X86::PUSHi32;
// If the operand is a small (8-bit) immediate, we can use a
// PUSH instruction with a shorter encoding.
// Note that isImm() may fail even though this is a MOVmi, because
@@ -469,13 +480,27 @@ bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
if (PushOp.isImm()) {
int64_t Val = PushOp.getImm();
if (isInt<8>(Val))
- PushOpcode = X86::PUSH32i8;
+ PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8;
}
Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
- .addOperand(PushOp);
- } else {
+ .addOperand(PushOp);
+ break;
+ case X86::MOV32mr:
+ case X86::MOV64mr:
unsigned int Reg = PushOp.getReg();
+ // If storing a 32-bit vreg on 64-bit targets, extend to a 64-bit vreg
+ // in preparation for the PUSH64. The upper 32 bits can be undef.
+ if (Is64Bit && MOV->getOpcode() == X86::MOV32mr) {
+ unsigned UndefReg = MRI->createVirtualRegister(&X86::GR64RegClass);
+ Reg = MRI->createVirtualRegister(&X86::GR64RegClass);
+ BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg);
+ BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg)
+ .addReg(UndefReg)
+ .addOperand(PushOp)
+ .addImm(X86::sub_32bit);
+ }
+
// If PUSHrmm is not slow on this target, try to fold the source of the
// push into the instruction.
bool SlowPUSHrmm = STI->isAtom() || STI->isSLM();
@@ -484,7 +509,8 @@ bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
// conservative about that.
MachineInstr *DefMov = nullptr;
if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
- Push = BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32rmm));
+ PushOpcode = Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm;
+ Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode));
unsigned NumOps = DefMov->getDesc().getNumOperands();
for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
@@ -492,33 +518,34 @@ bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
DefMov->eraseFromParent();
} else {
- Push = BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32r))
- .addReg(Reg)
- .getInstr();
+ PushOpcode = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
+ Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
+ .addReg(Reg)
+ .getInstr();
}
+ break;
}
// For debugging, when using SP-based CFA, we need to adjust the CFA
// offset after each push.
// TODO: This is needed only if we require precise CFA.
if (!TFL->hasFP(MF))
- TFL->BuildCFI(MBB, std::next(Push), DL,
- MCCFIInstruction::createAdjustCfaOffset(nullptr, 4));
+ TFL->BuildCFI(
+ MBB, std::next(Push), DL,
+ MCCFIInstruction::createAdjustCfaOffset(nullptr, SlotSize));
MBB.erase(MOV);
}
// The stack-pointer copy is no longer used in the call sequences.
// There should not be any other users, but we can't commit to that, so:
- if (MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
+ if (Context.SPCopy && MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
Context.SPCopy->eraseFromParent();
// Once we've done this, we need to make sure PEI doesn't assume a reserved
// frame.
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
FuncInfo->setHasPushSequences(true);
-
- return true;
}
MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
@@ -540,19 +567,20 @@ MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
if (!MRI->hasOneNonDBGUse(Reg))
return nullptr;
- MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);
+ MachineInstr &DefMI = *MRI->getVRegDef(Reg);
// Make sure the def is a MOV from memory.
- // If the def is an another block, give up.
- if (DefMI->getOpcode() != X86::MOV32rm ||
- DefMI->getParent() != FrameSetup->getParent())
+ // If the def is in another block, give up.
+ if ((DefMI.getOpcode() != X86::MOV32rm &&
+ DefMI.getOpcode() != X86::MOV64rm) ||
+ DefMI.getParent() != FrameSetup->getParent())
return nullptr;
// Make sure we don't have any instructions between DefMI and the
// push that make folding the load illegal.
- for (auto I = DefMI; I != FrameSetup; ++I)
+ for (MachineBasicBlock::iterator I = DefMI; I != FrameSetup; ++I)
if (I->isLoadFoldBarrier())
return nullptr;
- return DefMI;
+ return &DefMI;
}
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td
index ed2e88067168..4cb62b56bce4 100644
--- a/lib/Target/X86/X86CallingConv.td
+++ b/lib/Target/X86/X86CallingConv.td
@@ -162,6 +162,9 @@ def RetCC_X86_64_C : CallingConv<[
// MMX vector types are always returned in XMM0.
CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
+
+ CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
+
CCDelegateTo<RetCC_X86Common>
]>;
@@ -192,6 +195,24 @@ def RetCC_X86_64_WebKit_JS : CallingConv<[
CCIfType<[i64], CCAssignToReg<[RAX]>>
]>;
+def RetCC_X86_64_Swift : CallingConv<[
+ // For integers, ECX, R8D can be used as extra return registers.
+ CCIfType<[i1], CCPromoteToType<i8>>,
+ CCIfType<[i8] , CCAssignToReg<[AL, DL, CL, R8B]>>,
+ CCIfType<[i16], CCAssignToReg<[AX, DX, CX, R8W]>>,
+ CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX, R8D]>>,
+ CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX, R8]>>,
+
+ // XMM0, XMM1, XMM2 and XMM3 can be used to return FP values.
+ CCIfType<[f32], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+ CCIfType<[f64], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+ CCIfType<[f128], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+
+ // MMX vector types are returned in XMM0, XMM1, XMM2 and XMM3.
+ CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+ CCDelegateTo<RetCC_X86Common>
+]>;
+
// X86-64 AnyReg return-value convention. No explicit register is specified for
// the return-value. The register allocator is allowed and expected to choose
// any free register.
@@ -234,6 +255,9 @@ def RetCC_X86_64 : CallingConv<[
CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
+ // Handle Swift calls.
+ CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
+
// Handle explicit CC selection
CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
@@ -273,6 +297,16 @@ def CC_X86_64_C : CallingConv<[
CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
CCIfNest<CCAssignToReg<[R10]>>,
+ // Pass SwiftSelf in a callee saved register.
+ CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
+
+ // A SwiftError is passed in R12.
+ CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
+
+ // For Swift Calling Convention, pass sret in %RAX.
+ CCIfCC<"CallingConv::Swift",
+ CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
+
// The first 6 integer arguments are passed in integer registers.
CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
@@ -770,6 +804,9 @@ def CC_X86_64_Intr : CallingConv<[
// This is the root argument convention for the X86-32 backend.
def CC_X86_32 : CallingConv<[
+ // X86_INTR calling convention is valid in MCU target and should override the
+ // MCU calling convention. Thus, this should be checked before isTargetMCU().
+ CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_32_Intr>>,
CCIfSubtarget<"isTargetMCU()", CCDelegateTo<CC_X86_32_MCU>>,
CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_32_VectorCall>>,
@@ -777,7 +814,6 @@ def CC_X86_32 : CallingConv<[
CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
- CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_32_Intr>>,
// Otherwise, drop to normal X86-32 CC
CCDelegateTo<CC_X86_32_C>
@@ -819,6 +855,8 @@ def CSR_NoRegs : CalleeSavedRegs<(add)>;
def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
+def CSR_64_SwiftError : CalleeSavedRegs<(sub CSR_64, R12)>;
+
def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
@@ -852,15 +890,23 @@ def CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
(sequence "XMM%u", 0, 15))>;
def CSR_32_AllRegs : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI,
- EDI, ESP)>;
+ EDI)>;
def CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs,
(sequence "XMM%u", 0, 7))>;
-
-def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX, RSP,
- (sequence "XMM%u", 16, 31))>;
-def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX, RSP,
- (sequence "YMM%u", 0, 31)),
+def CSR_32_AllRegs_AVX : CalleeSavedRegs<(add CSR_32_AllRegs,
+ (sequence "YMM%u", 0, 7))>;
+def CSR_32_AllRegs_AVX512 : CalleeSavedRegs<(add CSR_32_AllRegs,
+ (sequence "ZMM%u", 0, 7),
+ (sequence "K%u", 0, 7))>;
+
+def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX)>;
+def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
+ (sequence "YMM%u", 0, 15)),
(sequence "XMM%u", 0, 15))>;
+def CSR_64_AllRegs_AVX512 : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
+ (sequence "ZMM%u", 0, 31),
+ (sequence "K%u", 0, 7)),
+ (sequence "XMM%u", 0, 15))>;
// Standard C + YMM6-15
def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
diff --git a/lib/Target/X86/X86ExpandPseudo.cpp b/lib/Target/X86/X86ExpandPseudo.cpp
index a09d06519376..093fed7276f7 100644
--- a/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/lib/Target/X86/X86ExpandPseudo.cpp
@@ -44,10 +44,16 @@ public:
const X86Subtarget *STI;
const X86InstrInfo *TII;
const X86RegisterInfo *TRI;
+ const X86MachineFunctionInfo *X86FI;
const X86FrameLowering *X86FL;
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "X86 pseudo instruction expansion pass";
}
@@ -83,11 +89,18 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// Adjust stack pointer.
int StackAdj = StackAdjust.getImm();
+ int MaxTCDelta = X86FI->getTCReturnAddrDelta();
+ int Offset = 0;
+ assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
+
+ // Incoporate the retaddr area.
+ Offset = StackAdj-MaxTCDelta;
+ assert(Offset >= 0 && "Offset should never be negative");
- if (StackAdj) {
+ if (Offset) {
// Check for possible merge with preceding ADD instruction.
- StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true);
- X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
+ Offset += X86FL->mergeSPUpdates(MBB, MBBI, true);
+ X86FL->emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
}
// Jump to label or value in register.
@@ -121,8 +134,8 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
.addReg(JumpTarget.getReg(), RegState::Kill);
}
- MachineInstr *NewMI = std::prev(MBBI);
- NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), MBBI);
+ MachineInstr &NewMI = *std::prev(MBBI);
+ NewMI.copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
// Delete the pseudo instruction TCRETURN.
MBB.erase(MBBI);
@@ -152,6 +165,32 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MBB.erase(MBBI);
return true;
}
+ case X86::RET: {
+ // Adjust stack to erase error code
+ int64_t StackAdj = MBBI->getOperand(0).getImm();
+ MachineInstrBuilder MIB;
+ if (StackAdj == 0) {
+ MIB = BuildMI(MBB, MBBI, DL,
+ TII->get(STI->is64Bit() ? X86::RETQ : X86::RETL));
+ } else if (isUInt<16>(StackAdj)) {
+ MIB = BuildMI(MBB, MBBI, DL,
+ TII->get(STI->is64Bit() ? X86::RETIQ : X86::RETIL))
+ .addImm(StackAdj);
+ } else {
+ assert(!STI->is64Bit() &&
+ "shouldn't need to do this for x86_64 targets!");
+ // A ret can only handle immediates as big as 2**16-1. If we need to pop
+ // off bytes before the return address, we must do it manually.
+ BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r)).addReg(X86::ECX, RegState::Define);
+ X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
+ BuildMI(MBB, MBBI, DL, TII->get(X86::PUSH32r)).addReg(X86::ECX);
+ MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RETL));
+ }
+ for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I)
+ MIB.addOperand(MBBI->getOperand(I));
+ MBB.erase(MBBI);
+ return true;
+ }
case X86::EH_RESTORE: {
// Restore ESP and EBP, and optionally ESI if required.
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
@@ -160,6 +199,38 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MBBI->eraseFromParent();
return true;
}
+ case X86::LCMPXCHG8B_SAVE_EBX:
+ case X86::LCMPXCHG16B_SAVE_RBX: {
+ // Perform the following transformation.
+ // SaveRbx = pseudocmpxchg Addr, <4 opds for the address>, InArg, SaveRbx
+ // =>
+ // [E|R]BX = InArg
+ // actualcmpxchg Addr
+ // [E|R]BX = SaveRbx
+ const MachineOperand &InArg = MBBI->getOperand(6);
+ unsigned SaveRbx = MBBI->getOperand(7).getReg();
+
+ unsigned ActualInArg =
+ Opcode == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
+ // Copy the input argument of the pseudo into the argument of the
+ // actual instruction.
+ TII->copyPhysReg(MBB, MBBI, DL, ActualInArg, InArg.getReg(),
+ InArg.isKill());
+ // Create the actual instruction.
+ unsigned ActualOpc =
+ Opcode == X86::LCMPXCHG8B_SAVE_EBX ? X86::LCMPXCHG8B : X86::LCMPXCHG16B;
+ MachineInstr *NewInstr = BuildMI(MBB, MBBI, DL, TII->get(ActualOpc));
+ // Copy the operands related to the address.
+ for (unsigned Idx = 1; Idx < 6; ++Idx)
+ NewInstr->addOperand(MBBI->getOperand(Idx));
+ // Finally, restore the value of RBX.
+ TII->copyPhysReg(MBB, MBBI, DL, ActualInArg, SaveRbx,
+ /*SrcIsKill*/ true);
+
+ // Delete the pseudo.
+ MBBI->eraseFromParent();
+ return true;
+ }
}
llvm_unreachable("Previous switch has a fallthrough?");
}
@@ -184,6 +255,7 @@ bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
STI = &static_cast<const X86Subtarget &>(MF.getSubtarget());
TII = STI->getInstrInfo();
TRI = STI->getRegisterInfo();
+ X86FI = MF.getInfo<X86MachineFunctionInfo>();
X86FL = STI->getFrameLowering();
bool Modified = false;
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index f48b47934e03..dfe3c80be21d 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -22,7 +22,6 @@
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
-#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -30,6 +29,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
@@ -82,7 +82,8 @@ public:
#include "X86GenFastISel.inc"
private:
- bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL);
+ bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT,
+ const DebugLoc &DL);
bool X86FastEmitLoad(EVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
unsigned &ResultReg, unsigned Alignment = 1);
@@ -347,6 +348,11 @@ bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
MachineMemOperand *MMO, unsigned &ResultReg,
unsigned Alignment) {
+ bool HasSSE41 = Subtarget->hasSSE41();
+ bool HasAVX = Subtarget->hasAVX();
+ bool HasAVX2 = Subtarget->hasAVX2();
+ bool IsNonTemporal = MMO && MMO->isNonTemporal();
+
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = nullptr;
@@ -372,7 +378,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
break;
case MVT::f32:
if (X86ScalarSSEf32) {
- Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
+ Opc = HasAVX ? X86::VMOVSSrm : X86::MOVSSrm;
RC = &X86::FR32RegClass;
} else {
Opc = X86::LD_Fp32m;
@@ -381,7 +387,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
break;
case MVT::f64:
if (X86ScalarSSEf64) {
- Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
+ Opc = HasAVX ? X86::VMOVSDrm : X86::MOVSDrm;
RC = &X86::FR64RegClass;
} else {
Opc = X86::LD_Fp64m;
@@ -392,29 +398,91 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM,
// No f80 support yet.
return false;
case MVT::v4f32:
- if (Alignment >= 16)
- Opc = Subtarget->hasAVX() ? X86::VMOVAPSrm : X86::MOVAPSrm;
+ if (IsNonTemporal && Alignment >= 16 && HasSSE41)
+ Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
+ else if (Alignment >= 16)
+ Opc = HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
else
- Opc = Subtarget->hasAVX() ? X86::VMOVUPSrm : X86::MOVUPSrm;
+ Opc = HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
RC = &X86::VR128RegClass;
break;
case MVT::v2f64:
- if (Alignment >= 16)
- Opc = Subtarget->hasAVX() ? X86::VMOVAPDrm : X86::MOVAPDrm;
+ if (IsNonTemporal && Alignment >= 16 && HasSSE41)
+ Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
+ else if (Alignment >= 16)
+ Opc = HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
else
- Opc = Subtarget->hasAVX() ? X86::VMOVUPDrm : X86::MOVUPDrm;
+ Opc = HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
RC = &X86::VR128RegClass;
break;
case MVT::v4i32:
case MVT::v2i64:
case MVT::v8i16:
case MVT::v16i8:
- if (Alignment >= 16)
- Opc = Subtarget->hasAVX() ? X86::VMOVDQArm : X86::MOVDQArm;
+ if (IsNonTemporal && Alignment >= 16)
+ Opc = HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
+ else if (Alignment >= 16)
+ Opc = HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
else
- Opc = Subtarget->hasAVX() ? X86::VMOVDQUrm : X86::MOVDQUrm;
+ Opc = HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
RC = &X86::VR128RegClass;
break;
+ case MVT::v8f32:
+ assert(HasAVX);
+ if (IsNonTemporal && Alignment >= 32 && HasAVX2)
+ Opc = X86::VMOVNTDQAYrm;
+ else
+ Opc = (Alignment >= 32) ? X86::VMOVAPSYrm : X86::VMOVUPSYrm;
+ RC = &X86::VR256RegClass;
+ break;
+ case MVT::v4f64:
+ assert(HasAVX);
+ if (IsNonTemporal && Alignment >= 32 && HasAVX2)
+ Opc = X86::VMOVNTDQAYrm;
+ else
+ Opc = (Alignment >= 32) ? X86::VMOVAPDYrm : X86::VMOVUPDYrm;
+ RC = &X86::VR256RegClass;
+ break;
+ case MVT::v8i32:
+ case MVT::v4i64:
+ case MVT::v16i16:
+ case MVT::v32i8:
+ assert(HasAVX);
+ if (IsNonTemporal && Alignment >= 32 && HasAVX2)
+ Opc = X86::VMOVNTDQAYrm;
+ else
+ Opc = (Alignment >= 32) ? X86::VMOVDQAYrm : X86::VMOVDQUYrm;
+ RC = &X86::VR256RegClass;
+ break;
+ case MVT::v16f32:
+ assert(Subtarget->hasAVX512());
+ if (IsNonTemporal && Alignment >= 64)
+ Opc = X86::VMOVNTDQAZrm;
+ else
+ Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
+ RC = &X86::VR512RegClass;
+ break;
+ case MVT::v8f64:
+ assert(Subtarget->hasAVX512());
+ if (IsNonTemporal && Alignment >= 64)
+ Opc = X86::VMOVNTDQAZrm;
+ else
+ Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
+ RC = &X86::VR512RegClass;
+ break;
+ case MVT::v8i64:
+ case MVT::v16i32:
+ case MVT::v32i16:
+ case MVT::v64i8:
+ assert(Subtarget->hasAVX512());
+ // Note: There are a lot more choices based on type with AVX-512, but
+ // there's really no advantage when the load isn't masked.
+ if (IsNonTemporal && Alignment >= 64)
+ Opc = X86::VMOVNTDQAZrm;
+ else
+ Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
+ RC = &X86::VR512RegClass;
+ break;
}
ResultReg = createResultReg(RC);
@@ -507,12 +575,70 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
else
Opc = HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
} else
- Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr;
+ Opc = HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
+ break;
+ case MVT::v8f32:
+ assert(HasAVX);
+ if (Aligned)
+ Opc = IsNonTemporal ? X86::VMOVNTPSYmr : X86::VMOVAPSYmr;
+ else
+ Opc = X86::VMOVUPSYmr;
+ break;
+ case MVT::v4f64:
+ assert(HasAVX);
+ if (Aligned) {
+ Opc = IsNonTemporal ? X86::VMOVNTPDYmr : X86::VMOVAPDYmr;
+ } else
+ Opc = X86::VMOVUPDYmr;
+ break;
+ case MVT::v8i32:
+ case MVT::v4i64:
+ case MVT::v16i16:
+ case MVT::v32i8:
+ assert(HasAVX);
+ if (Aligned)
+ Opc = IsNonTemporal ? X86::VMOVNTDQYmr : X86::VMOVDQAYmr;
+ else
+ Opc = X86::VMOVDQUYmr;
+ break;
+ case MVT::v16f32:
+ assert(Subtarget->hasAVX512());
+ if (Aligned)
+ Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
+ else
+ Opc = X86::VMOVUPSZmr;
+ break;
+ case MVT::v8f64:
+ assert(Subtarget->hasAVX512());
+ if (Aligned) {
+ Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
+ } else
+ Opc = X86::VMOVUPDZmr;
+ break;
+ case MVT::v8i64:
+ case MVT::v16i32:
+ case MVT::v32i16:
+ case MVT::v64i8:
+ assert(Subtarget->hasAVX512());
+ // Note: There are a lot more choices based on type with AVX-512, but
+ // there's really no advantage when the store isn't masked.
+ if (Aligned)
+ Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
+ else
+ Opc = X86::VMOVDQU64Zmr;
break;
}
+ const MCInstrDesc &Desc = TII.get(Opc);
+ // Some of the instructions in the previous switch use FR128 instead
+ // of FR32 for ValReg. Make sure the register we feed the instruction
+ // matches its register class constraints.
+ // Note: This is fine to do a copy from FR32 to FR128, this is the
+ // same registers behind the scene and actually why it did not trigger
+ // any bugs before.
+ ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc);
addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
if (MMO)
MIB->addMemOperand(*FuncInfo.MF, MMO);
@@ -598,7 +724,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
AM.GV = GV;
// Allow the subtarget to classify the global.
- unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
+ unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
// If this reference is relative to the pic base, set it now.
if (isGlobalRelativeToPICBase(GVFlags)) {
@@ -831,9 +957,8 @@ redo_gep:
// our address and just match the value instead of completely failing.
AM = SavedAM;
- for (SmallVectorImpl<const Value *>::reverse_iterator
- I = GEPs.rbegin(), E = GEPs.rend(); I != E; ++I)
- if (handleConstantAddresses(*I, AM))
+ for (const Value *I : reverse(GEPs))
+ if (handleConstantAddresses(I, AM))
return true;
return false;
@@ -938,10 +1063,8 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
// base and index registers are unused.
assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
AM.Base.Reg = X86::RIP;
- } else if (Subtarget->isPICStyleStubPIC()) {
- AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET;
- } else if (Subtarget->isPICStyleGOT()) {
- AM.GVOpFlags = X86II::MO_GOTOFF;
+ } else {
+ AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr);
}
return true;
@@ -972,6 +1095,21 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
if (S->isAtomic())
return false;
+ const Value *PtrV = I->getOperand(1);
+ if (TLI.supportSwiftError()) {
+ // Swifterror values can come from either a function parameter with
+ // swifterror attribute or an alloca with swifterror attribute.
+ if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
+ if (Arg->hasSwiftErrorAttr())
+ return false;
+ }
+
+ if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
+ if (Alloca->isSwiftError())
+ return false;
+ }
+ }
+
const Value *Val = S->getValueOperand();
const Value *Ptr = S->getPointerOperand();
@@ -1002,6 +1140,10 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
if (!FuncInfo.CanLowerReturn)
return false;
+ if (TLI.supportSwiftError() &&
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ return false;
+
if (TLI.supportSplitCSR(FuncInfo.MF))
return false;
@@ -1009,14 +1151,14 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
if (CC != CallingConv::C &&
CC != CallingConv::Fast &&
CC != CallingConv::X86_FastCall &&
- CC != CallingConv::X86_64_SysV)
+ CC != CallingConv::X86_StdCall &&
+ CC != CallingConv::X86_ThisCall &&
+ CC != CallingConv::X86_64_SysV &&
+ CC != CallingConv::X86_64_Win64)
return false;
- if (Subtarget->isCallingConvWin64(CC))
- return false;
-
- // Don't handle popping bytes on return for now.
- if (X86MFInfo->getBytesToPopOnReturn() != 0)
+ // Don't handle popping bytes if they don't fit the ret's immediate.
+ if (!isUInt<16>(X86MFInfo->getBytesToPopOnReturn()))
return false;
// fastcc with -tailcallopt is intended to provide a guaranteed
@@ -1101,11 +1243,14 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
RetRegs.push_back(VA.getLocReg());
}
+ // Swift calling convention does not require we copy the sret argument
+ // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
+
// All x86 ABIs require that for returning structs by value we copy
// the sret argument into %rax/%eax (depending on ABI) for the return.
// We saved the argument into a virtual register in the entry block,
// so now we copy the value out and into %rax/%eax.
- if (F.hasStructRetAttr()) {
+ if (F.hasStructRetAttr() && CC != CallingConv::Swift) {
unsigned Reg = X86MFInfo->getSRetReturnReg();
assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments()!");
@@ -1116,9 +1261,15 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
}
// Now emit the RET.
- MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
+ MachineInstrBuilder MIB;
+ if (X86MFInfo->getBytesToPopOnReturn()) {
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Subtarget->is64Bit() ? X86::RETIQ : X86::RETIL))
+ .addImm(X86MFInfo->getBytesToPopOnReturn());
+ } else {
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
+ }
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
MIB.addReg(RetRegs[i], RegState::Implicit);
return true;
@@ -1133,6 +1284,21 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) {
if (LI->isAtomic())
return false;
+ const Value *SV = I->getOperand(0);
+ if (TLI.supportSwiftError()) {
+ // Swifterror values can come from either a function parameter with
+ // swifterror attribute or an alloca with swifterror attribute.
+ if (const Argument *Arg = dyn_cast<Argument>(SV)) {
+ if (Arg->hasSwiftErrorAttr())
+ return false;
+ }
+
+ if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
+ if (Alloca->isSwiftError())
+ return false;
+ }
+ }
+
MVT VT;
if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
return false;
@@ -1204,8 +1370,8 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
}
}
-bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
- EVT VT, DebugLoc CurDbgLoc) {
+bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
+ const DebugLoc &CurDbgLoc) {
unsigned Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false;
@@ -1244,6 +1410,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
return false;
+ if (I->getType()->isIntegerTy(1) && Subtarget->hasAVX512())
+ return false;
+
// Try to optimize or fold the cmp.
CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
unsigned ResultReg = 0;
@@ -2294,8 +2463,10 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// register class VR128 by method 'constrainOperandRegClass' which is
// directly called by 'fastEmitInst_ri'.
// Instruction VCVTPS2PHrr takes an extra immediate operand which is
- // used to provide rounding control.
- InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 0);
+ // used to provide rounding control: use MXCSR.RC, encoded as 0b100.
+ // It's consistent with the other FP instructions, which are usually
+ // controlled by MXCSR.
+ InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 4);
// Move the lower 32-bits of ResultReg to another register of class GR32.
ResultReg = createResultReg(&X86::GR32RegClass);
@@ -2477,7 +2648,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
// is not generated by FastISel yet.
// FIXME: Update this code once tablegen can handle it.
- static const unsigned SqrtOpc[2][2] = {
+ static const uint16_t SqrtOpc[2][2] = {
{X86::SQRTSSr, X86::VSQRTSSr},
{X86::SQRTSDr, X86::VSQRTSDr}
};
@@ -2577,7 +2748,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
unsigned ResultReg = 0;
// Check if we have an immediate version.
if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
- static const unsigned Opc[2][4] = {
+ static const uint16_t Opc[2][4] = {
{ X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
{ X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
};
@@ -2607,9 +2778,9 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
// it manually.
if (BaseOpc == X86ISD::UMUL && !ResultReg) {
- static const unsigned MULOpc[] =
+ static const uint16_t MULOpc[] =
{ X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
- static const unsigned Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
+ static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
// First copy the first operand into RAX, which is an implicit input to
// the X86::MUL*r instruction.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -2618,7 +2789,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
} else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
- static const unsigned MULOpc[] =
+ static const uint16_t MULOpc[] =
{ X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
if (VT == MVT::i8) {
// Copy the first operand into AL, which is an implicit input to the
@@ -2671,7 +2842,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!isTypeLegal(RetTy, VT))
return false;
- static const unsigned CvtOpc[2][2][2] = {
+ static const uint16_t CvtOpc[2][2][2] = {
{ { X86::CVTTSS2SIrr, X86::VCVTTSS2SIrr },
{ X86::CVTTSS2SI64rr, X86::VCVTTSS2SI64rr } },
{ { X86::CVTTSD2SIrr, X86::VCVTTSD2SIrr },
@@ -2742,6 +2913,8 @@ bool X86FastISel::fastLowerArguments() {
if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::SwiftSelf) ||
+ F->getAttributes().hasAttribute(Idx, Attribute::SwiftError) ||
F->getAttributes().hasAttribute(Idx, Attribute::Nest))
return false;
@@ -2809,9 +2982,9 @@ bool X86FastISel::fastLowerArguments() {
return true;
}
-static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget,
- CallingConv::ID CC,
- ImmutableCallSite *CS) {
+static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
+ CallingConv::ID CC,
+ ImmutableCallSite *CS) {
if (Subtarget->is64Bit())
return 0;
if (Subtarget->getTargetTriple().isOSMSVCRT())
@@ -2849,7 +3022,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
case CallingConv::C:
case CallingConv::Fast:
case CallingConv::WebKit_JS:
+ case CallingConv::Swift:
case CallingConv::X86_FastCall:
+ case CallingConv::X86_StdCall:
+ case CallingConv::X86_ThisCall:
case CallingConv::X86_64_Win64:
case CallingConv::X86_64_SysV:
break;
@@ -2873,10 +3049,9 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
if (CLI.CS && CLI.CS->hasInAllocaArgument())
return false;
- // Fast-isel doesn't know about callee-pop yet.
- if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
- TM.Options.GuaranteedTailCallOpt))
- return false;
+ for (auto Flag : CLI.OutFlags)
+ if (Flag.isSwiftError())
+ return false;
SmallVector<MVT, 16> OutVTs;
SmallVector<unsigned, 16> ArgRegs;
@@ -2964,6 +3139,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
case CCValAssign::SExt: {
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
"Unexpected extend");
+
+ if (ArgVT.SimpleTy == MVT::i1)
+ return false;
+
bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
ArgVT, ArgReg);
assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
@@ -2973,6 +3152,17 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
case CCValAssign::ZExt: {
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
"Unexpected extend");
+
+ // Handle zero-extension from i1 to i8, which is common.
+ if (ArgVT.SimpleTy == MVT::i1) {
+ // Set the high bits to zero.
+ ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false);
+ ArgVT = MVT::i8;
+
+ if (ArgReg == 0)
+ return false;
+ }
+
bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
ArgVT, ArgReg);
assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
@@ -3113,25 +3303,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
// See if we need any target-specific flags on the GV operand.
- unsigned char OpFlags = 0;
-
- // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
- // external symbols most go through the PLT in PIC mode. If the symbol
- // has hidden or protected visibility, or if it is static or local, then
- // we don't need to use the PLT - we can directly call it.
- if (Subtarget->isTargetELF() &&
- TM.getRelocationModel() == Reloc::PIC_ &&
- GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
- OpFlags = X86II::MO_PLT;
- } else if (Subtarget->isPICStyleStubAny() &&
- !GV->isStrongDefinitionForLinker() &&
- (!Subtarget->getTargetTriple().isMacOSX() ||
- Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = X86II::MO_DARWIN_STUB;
- }
+ unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
+ // Ignore NonLazyBind attribute in FastISel
+ if (OpFlags == X86II::MO_GOTPCREL)
+ OpFlags = 0;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
if (Symbol)
@@ -3157,7 +3332,10 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
// Issue CALLSEQ_END
unsigned NumBytesForCalleeToPop =
- computeBytesPoppedByCallee(Subtarget, CC, CLI.CS);
+ X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
+ TM.Options.GuaranteedTailCallOpt)
+ ? NumBytes // Callee pops everything.
+ : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CS);
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
.addImm(NumBytes).addImm(NumBytesForCalleeToPop);
@@ -3398,17 +3576,13 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
- unsigned char OpFlag = 0;
- if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
+ unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
+ if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- } else if (Subtarget->isPICStyleGOT()) {
- OpFlag = X86II::MO_GOTOFF;
+ else if (OpFlag == X86II::MO_GOTOFF)
PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- } else if (Subtarget->isPICStyleRIPRel() &&
- TM.getCodeModel() == CodeModel::Small) {
+ else if (Subtarget->is64Bit() && TM.getCodeModel() == CodeModel::Small)
PICBase = X86::RIP;
- }
// Create the load from the constant pool.
unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
@@ -3572,7 +3746,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
AM.getFullAddress(AddrOps);
MachineInstr *Result = XII.foldMemoryOperandImpl(
- *FuncInfo.MF, MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
+ *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
/*AllowCommute=*/true);
if (!Result)
return false;
diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp
new file mode 100644
index 000000000000..90e758dc2e02
--- /dev/null
+++ b/lib/Target/X86/X86FixupBWInsts.cpp
@@ -0,0 +1,371 @@
+//===-- X86FixupBWInsts.cpp - Fixup Byte or Word instructions -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines the pass that looks through the machine instructions
+/// late in the compilation, and finds byte or word instructions that
+/// can be profitably replaced with 32 bit instructions that give equivalent
+/// results for the bits of the results that are used. There are two possible
+/// reasons to do this.
+///
+/// One reason is to avoid false-dependences on the upper portions
+/// of the registers. Only instructions that have a destination register
+/// which is not in any of the source registers can be affected by this.
+/// Any instruction where one of the source registers is also the destination
+/// register is unaffected, because it has a true dependence on the source
+/// register already. So, this consideration primarily affects load
+/// instructions and register-to-register moves. It would
+/// seem like cmov(s) would also be affected, but because of the way cmov is
+/// really implemented by most machines as reading both the destination and
+/// and source regsters, and then "merging" the two based on a condition,
+/// it really already should be considered as having a true dependence on the
+/// destination register as well.
+///
+/// The other reason to do this is for potential code size savings. Word
+/// operations need an extra override byte compared to their 32 bit
+/// versions. So this can convert many word operations to their larger
+/// size, saving a byte in encoding. This could introduce partial register
+/// dependences where none existed however. As an example take:
+/// orw ax, $0x1000
+/// addw ax, $3
+/// now if this were to get transformed into
+/// orw ax, $1000
+/// addl eax, $3
+/// because the addl encodes shorter than the addw, this would introduce
+/// a use of a register that was only partially written earlier. On older
+/// Intel processors this can be quite a performance penalty, so this should
+/// probably only be done when it can be proven that a new partial dependence
+/// wouldn't be created, or when your know a newer processor is being
+/// targeted, or when optimizing for minimum code size.
+///
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+using namespace llvm;
+
+#define FIXUPBW_DESC "X86 Byte/Word Instruction Fixup"
+#define FIXUPBW_NAME "x86-fixup-bw-insts"
+
+#define DEBUG_TYPE FIXUPBW_NAME
+
+// Option to allow this optimization pass to have fine-grained control.
+// This is turned off by default so as not to affect a large number of
+// existing lit tests.
+static cl::opt<bool>
+ FixupBWInsts("fixup-byte-word-insts",
+ cl::desc("Change byte and word instructions to larger sizes"),
+ cl::init(true), cl::Hidden);
+
+namespace {
+class FixupBWInstPass : public MachineFunctionPass {
+ /// Loop over all of the instructions in the basic block replacing applicable
+ /// byte or word instructions with better alternatives.
+ void processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
+
+ /// This sets the \p SuperDestReg to the 32 bit super reg of the original
+ /// destination register of the MachineInstr passed in. It returns true if
+ /// that super register is dead just prior to \p OrigMI, and false if not.
+ bool getSuperRegDestIfDead(MachineInstr *OrigMI,
+ unsigned &SuperDestReg) const;
+
+ /// Change the MachineInstr \p MI into the equivalent extending load to 32 bit
+ /// register if it is safe to do so. Return the replacement instruction if
+ /// OK, otherwise return nullptr.
+ MachineInstr *tryReplaceLoad(unsigned New32BitOpcode, MachineInstr *MI) const;
+
+ /// Change the MachineInstr \p MI into the equivalent 32-bit copy if it is
+ /// safe to do so. Return the replacement instruction if OK, otherwise return
+ /// nullptr.
+ MachineInstr *tryReplaceCopy(MachineInstr *MI) const;
+
+ // Change the MachineInstr \p MI into an eqivalent 32 bit instruction if
+ // possible. Return the replacement instruction if OK, return nullptr
+ // otherwise. Set WasCandidate to true or false depending on whether the
+ // MI was a candidate for this sort of transformation.
+ MachineInstr *tryReplaceInstr(MachineInstr *MI, MachineBasicBlock &MBB,
+ bool &WasCandidate) const;
+public:
+ static char ID;
+
+ const char *getPassName() const override {
+ return FIXUPBW_DESC;
+ }
+
+ FixupBWInstPass() : MachineFunctionPass(ID) {
+ initializeFixupBWInstPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<MachineLoopInfo>(); // Machine loop info is used to
+ // guide some heuristics.
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ /// Loop over all of the basic blocks, replacing byte and word instructions by
+ /// equivalent 32 bit instructions where performance or code size can be
+ /// improved.
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
+private:
+ MachineFunction *MF;
+
+ /// Machine instruction info used throughout the class.
+ const X86InstrInfo *TII;
+
+ /// Local member for function's OptForSize attribute.
+ bool OptForSize;
+
+ /// Machine loop info used for guiding some heruistics.
+ MachineLoopInfo *MLI;
+
+ /// Register Liveness information after the current instruction.
+ LivePhysRegs LiveRegs;
+};
+char FixupBWInstPass::ID = 0;
+}
+
+INITIALIZE_PASS(FixupBWInstPass, FIXUPBW_NAME, FIXUPBW_DESC, false, false)
+
+FunctionPass *llvm::createX86FixupBWInsts() { return new FixupBWInstPass(); }
+
+bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
+ if (!FixupBWInsts || skipFunction(*MF.getFunction()))
+ return false;
+
+ this->MF = &MF;
+ TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
+ OptForSize = MF.getFunction()->optForSize();
+ MLI = &getAnalysis<MachineLoopInfo>();
+ LiveRegs.init(&TII->getRegisterInfo());
+
+ DEBUG(dbgs() << "Start X86FixupBWInsts\n";);
+
+ // Process all basic blocks.
+ for (auto &MBB : MF)
+ processBasicBlock(MF, MBB);
+
+ DEBUG(dbgs() << "End X86FixupBWInsts\n";);
+
+ return true;
+}
+
+// TODO: This method of analysis can miss some legal cases, because the
+// super-register could be live into the address expression for a memory
+// reference for the instruction, and still be killed/last used by the
+// instruction. However, the existing query interfaces don't seem to
+// easily allow that to be checked.
+//
+// What we'd really like to know is whether after OrigMI, the
+// only portion of SuperDestReg that is alive is the portion that
+// was the destination register of OrigMI.
+bool FixupBWInstPass::getSuperRegDestIfDead(MachineInstr *OrigMI,
+ unsigned &SuperDestReg) const {
+ auto *TRI = &TII->getRegisterInfo();
+
+ unsigned OrigDestReg = OrigMI->getOperand(0).getReg();
+ SuperDestReg = getX86SubSuperRegister(OrigDestReg, 32);
+
+ const auto SubRegIdx = TRI->getSubRegIndex(SuperDestReg, OrigDestReg);
+
+ // Make sure that the sub-register that this instruction has as its
+ // destination is the lowest order sub-register of the super-register.
+ // If it isn't, then the register isn't really dead even if the
+ // super-register is considered dead.
+ if (SubRegIdx == X86::sub_8bit_hi)
+ return false;
+
+ if (LiveRegs.contains(SuperDestReg))
+ return false;
+
+ if (SubRegIdx == X86::sub_8bit) {
+ // In the case of byte registers, we also have to check that the upper
+ // byte register is also dead. That is considered to be independent of
+ // whether the super-register is dead.
+ unsigned UpperByteReg =
+ getX86SubSuperRegister(SuperDestReg, 8, /*High=*/true);
+
+ if (LiveRegs.contains(UpperByteReg))
+ return false;
+ }
+
+ return true;
+}
+
+MachineInstr *FixupBWInstPass::tryReplaceLoad(unsigned New32BitOpcode,
+ MachineInstr *MI) const {
+ unsigned NewDestReg;
+
+ // We are going to try to rewrite this load to a larger zero-extending
+ // load. This is safe if all portions of the 32 bit super-register
+ // of the original destination register, except for the original destination
+ // register are dead. getSuperRegDestIfDead checks that.
+ if (!getSuperRegDestIfDead(MI, NewDestReg))
+ return nullptr;
+
+ // Safe to change the instruction.
+ MachineInstrBuilder MIB =
+ BuildMI(*MF, MI->getDebugLoc(), TII->get(New32BitOpcode), NewDestReg);
+
+ unsigned NumArgs = MI->getNumOperands();
+ for (unsigned i = 1; i < NumArgs; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+
+ return MIB;
+}
+
+MachineInstr *FixupBWInstPass::tryReplaceCopy(MachineInstr *MI) const {
+ assert(MI->getNumExplicitOperands() == 2);
+ auto &OldDest = MI->getOperand(0);
+ auto &OldSrc = MI->getOperand(1);
+
+ unsigned NewDestReg;
+ if (!getSuperRegDestIfDead(MI, NewDestReg))
+ return nullptr;
+
+ unsigned NewSrcReg = getX86SubSuperRegister(OldSrc.getReg(), 32);
+
+ // This is only correct if we access the same subregister index: otherwise,
+ // we could try to replace "movb %ah, %al" with "movl %eax, %eax".
+ auto *TRI = &TII->getRegisterInfo();
+ if (TRI->getSubRegIndex(NewSrcReg, OldSrc.getReg()) !=
+ TRI->getSubRegIndex(NewDestReg, OldDest.getReg()))
+ return nullptr;
+
+ // Safe to change the instruction.
+ // Don't set src flags, as we don't know if we're also killing the superreg.
+ // However, the superregister might not be defined; make it explicit that
+ // we don't care about the higher bits by reading it as Undef, and adding
+ // an imp-use on the original subregister.
+ MachineInstrBuilder MIB =
+ BuildMI(*MF, MI->getDebugLoc(), TII->get(X86::MOV32rr), NewDestReg)
+ .addReg(NewSrcReg, RegState::Undef)
+ .addReg(OldSrc.getReg(), RegState::Implicit);
+
+ // Drop imp-defs/uses that would be redundant with the new def/use.
+ for (auto &Op : MI->implicit_operands())
+ if (Op.getReg() != (Op.isDef() ? NewDestReg : NewSrcReg))
+ MIB.addOperand(Op);
+
+ return MIB;
+}
+
+MachineInstr *FixupBWInstPass::tryReplaceInstr(
+ MachineInstr *MI, MachineBasicBlock &MBB,
+ bool &WasCandidate) const {
+ MachineInstr *NewMI = nullptr;
+ WasCandidate = false;
+
+ // See if this is an instruction of the type we are currently looking for.
+ switch (MI->getOpcode()) {
+
+ case X86::MOV8rm:
+ // Only replace 8 bit loads with the zero extending versions if
+ // in an inner most loop and not optimizing for size. This takes
+ // an extra byte to encode, and provides limited performance upside.
+ if (MachineLoop *ML = MLI->getLoopFor(&MBB)) {
+ if (ML->begin() == ML->end() && !OptForSize) {
+ NewMI = tryReplaceLoad(X86::MOVZX32rm8, MI);
+ WasCandidate = true;
+ }
+ }
+ break;
+
+ case X86::MOV16rm:
+ // Always try to replace 16 bit load with 32 bit zero extending.
+ // Code size is the same, and there is sometimes a perf advantage
+ // from eliminating a false dependence on the upper portion of
+ // the register.
+ NewMI = tryReplaceLoad(X86::MOVZX32rm16, MI);
+ WasCandidate = true;
+ break;
+
+ case X86::MOV8rr:
+ case X86::MOV16rr:
+ // Always try to replace 8/16 bit copies with a 32 bit copy.
+ // Code size is either less (16) or equal (8), and there is sometimes a
+ // perf advantage from eliminating a false dependence on the upper portion
+ // of the register.
+ NewMI = tryReplaceCopy(MI);
+ WasCandidate = true;
+ break;
+
+ default:
+ // nothing to do here.
+ break;
+ }
+
+ return NewMI;
+}
+
+void FixupBWInstPass::processBasicBlock(MachineFunction &MF,
+ MachineBasicBlock &MBB) {
+
+ // This algorithm doesn't delete the instructions it is replacing
+ // right away. By leaving the existing instructions in place, the
+ // register liveness information doesn't change, and this makes the
+ // analysis that goes on be better than if the replaced instructions
+ // were immediately removed.
+ //
+ // This algorithm always creates a replacement instruction
+ // and notes that and the original in a data structure, until the
+ // whole BB has been analyzed. This keeps the replacement instructions
+ // from making it seem as if the larger register might be live.
+ SmallVector<std::pair<MachineInstr *, MachineInstr *>, 8> MIReplacements;
+
+ // Start computing liveness for this block. We iterate from the end to be able
+ // to update this for each instruction.
+ LiveRegs.clear();
+ // We run after PEI, so we need to AddPristinesAndCSRs.
+ LiveRegs.addLiveOuts(MBB);
+
+ bool WasCandidate = false;
+
+ for (auto I = MBB.rbegin(); I != MBB.rend(); ++I) {
+ MachineInstr *MI = &*I;
+
+ MachineInstr *NewMI = tryReplaceInstr(MI, MBB, WasCandidate);
+
+ // Add this to replacements if it was a candidate, even if NewMI is
+ // nullptr. We will revisit that in a bit.
+ if (WasCandidate) {
+ MIReplacements.push_back(std::make_pair(MI, NewMI));
+ }
+
+ // We're done with this instruction, update liveness for the next one.
+ LiveRegs.stepBackward(*MI);
+ }
+
+ while (!MIReplacements.empty()) {
+ MachineInstr *MI = MIReplacements.back().first;
+ MachineInstr *NewMI = MIReplacements.back().second;
+ MIReplacements.pop_back();
+ if (NewMI) {
+ MBB.insert(MI, NewMI);
+ MBB.erase(MI);
+ }
+ }
+}
diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp
index 1dd69e8a6a5f..013ee249a60f 100644
--- a/lib/Target/X86/X86FixupLEAs.cpp
+++ b/lib/Target/X86/X86FixupLEAs.cpp
@@ -92,6 +92,12 @@ public:
/// if needed and when possible.
bool runOnMachineFunction(MachineFunction &MF) override;
+ // This pass runs after regalloc and doesn't support VReg operands.
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
private:
MachineFunction *MF;
const X86InstrInfo *TII; // Machine instruction info.
@@ -104,22 +110,22 @@ char FixupLEAPass::ID = 0;
MachineInstr *
FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI) const {
- MachineInstr *MI = MBBI;
- MachineInstr *NewMI;
- switch (MI->getOpcode()) {
+ MachineInstr &MI = *MBBI;
+ switch (MI.getOpcode()) {
case X86::MOV32rr:
case X86::MOV64rr: {
- const MachineOperand &Src = MI->getOperand(1);
- const MachineOperand &Dest = MI->getOperand(0);
- NewMI = BuildMI(*MF, MI->getDebugLoc(),
- TII->get(MI->getOpcode() == X86::MOV32rr ? X86::LEA32r
- : X86::LEA64r))
- .addOperand(Dest)
- .addOperand(Src)
- .addImm(1)
- .addReg(0)
- .addImm(0)
- .addReg(0);
+ const MachineOperand &Src = MI.getOperand(1);
+ const MachineOperand &Dest = MI.getOperand(0);
+ MachineInstr *NewMI =
+ BuildMI(*MF, MI.getDebugLoc(),
+ TII->get(MI.getOpcode() == X86::MOV32rr ? X86::LEA32r
+ : X86::LEA64r))
+ .addOperand(Dest)
+ .addOperand(Src)
+ .addImm(1)
+ .addReg(0)
+ .addImm(0)
+ .addReg(0);
MFI->insert(MBBI, NewMI); // Insert the new inst
return NewMI;
}
@@ -135,7 +141,7 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
- if (!MI->getOperand(2).isImm()) {
+ if (!MI.getOperand(2).isImm()) {
// convertToThreeAddress will call getImm()
// which requires isImm() to be true
return nullptr;
@@ -143,19 +149,22 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
break;
case X86::ADD16rr:
case X86::ADD16rr_DB:
- if (MI->getOperand(1).getReg() != MI->getOperand(2).getReg()) {
+ if (MI.getOperand(1).getReg() != MI.getOperand(2).getReg()) {
// if src1 != src2, then convertToThreeAddress will
// need to create a Virtual register, which we cannot do
// after register allocation.
return nullptr;
}
}
- return TII->convertToThreeAddress(MFI, MBBI, nullptr);
+ return TII->convertToThreeAddress(MFI, MI, nullptr);
}
FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
+ if (skipFunction(*Func.getFunction()))
+ return false;
+
MF = &Func;
const X86Subtarget &ST = Func.getSubtarget<X86Subtarget>();
OptIncDec = !ST.slowIncDec() || Func.getFunction()->optForMinSize();
@@ -178,10 +187,10 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
FixupLEAPass::RegUsageState
FixupLEAPass::usesRegister(MachineOperand &p, MachineBasicBlock::iterator I) {
RegUsageState RegUsage = RU_NotUsed;
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
- for (unsigned int i = 0; i < MI->getNumOperands(); ++i) {
- MachineOperand &opnd = MI->getOperand(i);
+ for (unsigned int i = 0; i < MI.getNumOperands(); ++i) {
+ MachineOperand &opnd = MI.getOperand(i);
if (opnd.isReg() && opnd.getReg() == p.getReg()) {
if (opnd.isDef())
return RU_Write;
@@ -227,10 +236,10 @@ FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I,
return CurInst;
}
InstrDistance += TII->getInstrLatency(
- MF->getSubtarget().getInstrItineraryData(), CurInst);
+ MF->getSubtarget().getInstrItineraryData(), *CurInst);
Found = getPreviousInstr(CurInst, MFI);
}
- return nullptr;
+ return MachineBasicBlock::iterator();
}
static inline bool isLEA(const int opcode) {
@@ -241,28 +250,28 @@ static inline bool isLEA(const int opcode) {
/// isLEASimpleIncOrDec - Does this LEA have one these forms:
/// lea %reg, 1(%reg)
/// lea %reg, -1(%reg)
-static inline bool isLEASimpleIncOrDec(MachineInstr *LEA) {
- unsigned SrcReg = LEA->getOperand(1 + X86::AddrBaseReg).getReg();
- unsigned DstReg = LEA->getOperand(0).getReg();
+static inline bool isLEASimpleIncOrDec(MachineInstr &LEA) {
+ unsigned SrcReg = LEA.getOperand(1 + X86::AddrBaseReg).getReg();
+ unsigned DstReg = LEA.getOperand(0).getReg();
unsigned AddrDispOp = 1 + X86::AddrDisp;
return SrcReg == DstReg &&
- LEA->getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
- LEA->getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
- LEA->getOperand(AddrDispOp).isImm() &&
- (LEA->getOperand(AddrDispOp).getImm() == 1 ||
- LEA->getOperand(AddrDispOp).getImm() == -1);
+ LEA.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
+ LEA.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
+ LEA.getOperand(AddrDispOp).isImm() &&
+ (LEA.getOperand(AddrDispOp).getImm() == 1 ||
+ LEA.getOperand(AddrDispOp).getImm() == -1);
}
bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) const {
- MachineInstr *MI = I;
- int Opcode = MI->getOpcode();
+ MachineInstr &MI = *I;
+ int Opcode = MI.getOpcode();
if (!isLEA(Opcode))
return false;
if (isLEASimpleIncOrDec(MI) && TII->isSafeToClobberEFLAGS(*MFI, I)) {
int NewOpcode;
- bool isINC = MI->getOperand(4).getImm() == 1;
+ bool isINC = MI.getOperand(4).getImm() == 1;
switch (Opcode) {
case X86::LEA16r:
NewOpcode = isINC ? X86::INC16r : X86::DEC16r;
@@ -277,9 +286,9 @@ bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
}
MachineInstr *NewMI =
- BuildMI(*MFI, I, MI->getDebugLoc(), TII->get(NewOpcode))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1));
+ BuildMI(*MFI, I, MI.getDebugLoc(), TII->get(NewOpcode))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1));
MFI->erase(I);
I = static_cast<MachineBasicBlock::iterator>(NewMI);
return true;
@@ -290,17 +299,16 @@ bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
void FixupLEAPass::processInstruction(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
// Process a load, store, or LEA instruction.
- MachineInstr *MI = I;
- int opcode = MI->getOpcode();
- const MCInstrDesc &Desc = MI->getDesc();
- int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags, opcode);
+ MachineInstr &MI = *I;
+ const MCInstrDesc &Desc = MI.getDesc();
+ int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags);
if (AddrOffset >= 0) {
AddrOffset += X86II::getOperandBias(Desc);
- MachineOperand &p = MI->getOperand(AddrOffset + X86::AddrBaseReg);
+ MachineOperand &p = MI.getOperand(AddrOffset + X86::AddrBaseReg);
if (p.isReg() && p.getReg() != X86::ESP) {
seekLEAFixup(p, I, MFI);
}
- MachineOperand &q = MI->getOperand(AddrOffset + X86::AddrIndexReg);
+ MachineOperand &q = MI.getOperand(AddrOffset + X86::AddrIndexReg);
if (q.isReg() && q.getReg() != X86::ESP) {
seekLEAFixup(q, I, MFI);
}
@@ -311,7 +319,7 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p,
MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
MachineBasicBlock::iterator MBI = searchBackwards(p, I, MFI);
- if (MBI) {
+ if (MBI != MachineBasicBlock::iterator()) {
MachineInstr *NewMI = postRAConvertToLEA(MFI, MBI);
if (NewMI) {
++NumLEAs;
@@ -328,19 +336,19 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p,
void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI) {
- MachineInstr *MI = I;
- const int opcode = MI->getOpcode();
+ MachineInstr &MI = *I;
+ const int opcode = MI.getOpcode();
if (!isLEA(opcode))
return;
- if (MI->getOperand(5).getReg() != 0 || !MI->getOperand(4).isImm() ||
+ if (MI.getOperand(5).getReg() != 0 || !MI.getOperand(4).isImm() ||
!TII->isSafeToClobberEFLAGS(*MFI, I))
return;
- const unsigned DstR = MI->getOperand(0).getReg();
- const unsigned SrcR1 = MI->getOperand(1).getReg();
- const unsigned SrcR2 = MI->getOperand(3).getReg();
+ const unsigned DstR = MI.getOperand(0).getReg();
+ const unsigned SrcR1 = MI.getOperand(1).getReg();
+ const unsigned SrcR2 = MI.getOperand(3).getReg();
if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR))
return;
- if (MI->getOperand(2).getImm() > 1)
+ if (MI.getOperand(2).getImm() > 1)
return;
int addrr_opcode, addri_opcode;
switch (opcode) {
@@ -363,12 +371,12 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
DEBUG(dbgs() << "FixLEA: Replaced by: ";);
MachineInstr *NewMI = nullptr;
- const MachineOperand &Dst = MI->getOperand(0);
+ const MachineOperand &Dst = MI.getOperand(0);
// Make ADD instruction for two registers writing to LEA's destination
if (SrcR1 != 0 && SrcR2 != 0) {
- const MachineOperand &Src1 = MI->getOperand(SrcR1 == DstR ? 1 : 3);
- const MachineOperand &Src2 = MI->getOperand(SrcR1 == DstR ? 3 : 1);
- NewMI = BuildMI(*MF, MI->getDebugLoc(), TII->get(addrr_opcode))
+ const MachineOperand &Src1 = MI.getOperand(SrcR1 == DstR ? 1 : 3);
+ const MachineOperand &Src2 = MI.getOperand(SrcR1 == DstR ? 3 : 1);
+ NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addrr_opcode))
.addOperand(Dst)
.addOperand(Src1)
.addOperand(Src2);
@@ -376,12 +384,12 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I,
DEBUG(NewMI->dump(););
}
// Make ADD instruction for immediate
- if (MI->getOperand(4).getImm() != 0) {
- const MachineOperand &SrcR = MI->getOperand(SrcR1 == DstR ? 1 : 3);
- NewMI = BuildMI(*MF, MI->getDebugLoc(), TII->get(addri_opcode))
+ if (MI.getOperand(4).getImm() != 0) {
+ const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3);
+ NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addri_opcode))
.addOperand(Dst)
.addOperand(SrcR)
- .addImm(MI->getOperand(4).getImm());
+ .addImm(MI.getOperand(4).getImm());
MFI->insert(I, NewMI);
DEBUG(NewMI->dump(););
}
diff --git a/lib/Target/X86/X86FixupSetCC.cpp b/lib/Target/X86/X86FixupSetCC.cpp
new file mode 100644
index 000000000000..fb317da95355
--- /dev/null
+++ b/lib/Target/X86/X86FixupSetCC.cpp
@@ -0,0 +1,186 @@
+//===---- X86FixupSetCC.cpp - optimize usage of LEA instructions ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass that fixes zero-extension of setcc patterns.
+// X86 setcc instructions are modeled to have no input arguments, and a single
+// GR8 output argument. This is consistent with other similar instructions
+// (e.g. movb), but means it is impossible to directly generate a setcc into
+// the lower GR8 of a specified GR32.
+// This means that ISel must select (zext (setcc)) into something like
+// seta %al; movzbl %al, %eax.
+// Unfortunately, this can cause a stall due to the partial register write
+// performed by the setcc. Instead, we can use:
+// xor %eax, %eax; seta %al
+// This both avoids the stall, and encodes shorter.
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-fixup-setcc"
+
+STATISTIC(NumSubstZexts, "Number of setcc + zext pairs substituted");
+
+namespace {
+class X86FixupSetCCPass : public MachineFunctionPass {
+public:
+ X86FixupSetCCPass() : MachineFunctionPass(ID) {}
+
+ const char *getPassName() const override { return "X86 Fixup SetCC"; }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ // Find the preceding instruction that imp-defs eflags.
+ MachineInstr *findFlagsImpDef(MachineBasicBlock *MBB,
+ MachineBasicBlock::reverse_iterator MI);
+
+ // Return true if MI imp-uses eflags.
+ bool impUsesFlags(MachineInstr *MI);
+
+ // Return true if this is the opcode of a SetCC instruction with a register
+ // output.
+ bool isSetCCr(unsigned Opode);
+
+ MachineRegisterInfo *MRI;
+ const X86InstrInfo *TII;
+
+ enum { SearchBound = 16 };
+
+ static char ID;
+};
+
+char X86FixupSetCCPass::ID = 0;
+}
+
+FunctionPass *llvm::createX86FixupSetCC() { return new X86FixupSetCCPass(); }
+
+bool X86FixupSetCCPass::isSetCCr(unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return false;
+ case X86::SETOr:
+ case X86::SETNOr:
+ case X86::SETBr:
+ case X86::SETAEr:
+ case X86::SETEr:
+ case X86::SETNEr:
+ case X86::SETBEr:
+ case X86::SETAr:
+ case X86::SETSr:
+ case X86::SETNSr:
+ case X86::SETPr:
+ case X86::SETNPr:
+ case X86::SETLr:
+ case X86::SETGEr:
+ case X86::SETLEr:
+ case X86::SETGr:
+ return true;
+ }
+}
+
+// We expect the instruction *immediately* before the setcc to imp-def
+// EFLAGS (because of scheduling glue). To make this less brittle w.r.t
+// scheduling, look backwards until we hit the beginning of the
+// basic-block, or a small bound (to avoid quadratic behavior).
+MachineInstr *
+X86FixupSetCCPass::findFlagsImpDef(MachineBasicBlock *MBB,
+ MachineBasicBlock::reverse_iterator MI) {
+ auto MBBStart = MBB->instr_rend();
+ for (int i = 0; (i < SearchBound) && (MI != MBBStart); ++i, ++MI)
+ for (auto &Op : MI->implicit_operands())
+ if ((Op.getReg() == X86::EFLAGS) && (Op.isDef()))
+ return &*MI;
+
+ return nullptr;
+}
+
+bool X86FixupSetCCPass::impUsesFlags(MachineInstr *MI) {
+ for (auto &Op : MI->implicit_operands())
+ if ((Op.getReg() == X86::EFLAGS) && (Op.isUse()))
+ return true;
+
+ return false;
+}
+
+bool X86FixupSetCCPass::runOnMachineFunction(MachineFunction &MF) {
+ bool Changed = false;
+ MRI = &MF.getRegInfo();
+ TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
+
+ SmallVector<MachineInstr*, 4> ToErase;
+
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ // Find a setcc that is used by a zext.
+ // This doesn't have to be the only use, the transformation is safe
+ // regardless.
+ if (!isSetCCr(MI.getOpcode()))
+ continue;
+
+ MachineInstr *ZExt = nullptr;
+ for (auto &Use : MRI->use_instructions(MI.getOperand(0).getReg()))
+ if (Use.getOpcode() == X86::MOVZX32rr8)
+ ZExt = &Use;
+
+ if (!ZExt)
+ continue;
+
+ // Find the preceding instruction that imp-defs eflags.
+ MachineInstr *FlagsDefMI = findFlagsImpDef(
+ MI.getParent(), MachineBasicBlock::reverse_iterator(&MI));
+ if (!FlagsDefMI)
+ continue;
+
+ // We'd like to put something that clobbers eflags directly before
+ // FlagsDefMI. This can't hurt anything after FlagsDefMI, because
+ // it, itself, by definition, clobbers eflags. But it may happen that
+ // FlagsDefMI also *uses* eflags, in which case the transformation is
+ // invalid.
+ if (impUsesFlags(FlagsDefMI))
+ continue;
+
+ ++NumSubstZexts;
+ Changed = true;
+
+ // On 32-bit, we need to be careful to force an ABCD register.
+ const TargetRegisterClass *RC = MF.getSubtarget<X86Subtarget>().is64Bit()
+ ? &X86::GR32RegClass
+ : &X86::GR32_ABCDRegClass;
+ unsigned ZeroReg = MRI->createVirtualRegister(RC);
+ unsigned InsertReg = MRI->createVirtualRegister(RC);
+
+ // Initialize a register with 0. This must go before the eflags def
+ BuildMI(MBB, FlagsDefMI, MI.getDebugLoc(), TII->get(X86::MOV32r0),
+ ZeroReg);
+
+ // X86 setcc only takes an output GR8, so fake a GR32 input by inserting
+ // the setcc result into the low byte of the zeroed register.
+ BuildMI(*ZExt->getParent(), ZExt, ZExt->getDebugLoc(),
+ TII->get(X86::INSERT_SUBREG), InsertReg)
+ .addReg(ZeroReg)
+ .addReg(MI.getOperand(0).getReg())
+ .addImm(X86::sub_8bit);
+ MRI->replaceRegWith(ZExt->getOperand(0).getReg(), InsertReg);
+ ToErase.push_back(ZExt);
+ }
+ }
+
+ for (auto &I : ToErase)
+ I->eraseFromParent();
+
+ return Changed;
+}
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index 97bb8ab653a6..55c1bff2bc18 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -76,6 +76,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override { return "X86 FP Stackifier"; }
private:
@@ -222,7 +227,8 @@ namespace {
++NumFXCH;
}
- void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
+ void duplicateToTop(unsigned RegNo, unsigned AsReg,
+ MachineBasicBlock::iterator I) {
DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
unsigned STReg = getSTReg(RegNo);
pushReg(AsReg); // New register on top of stack
@@ -257,6 +263,7 @@ namespace {
bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
void handleCall(MachineBasicBlock::iterator &I);
+ void handleReturn(MachineBasicBlock::iterator &I);
void handleZeroArgFP(MachineBasicBlock::iterator &I);
void handleOneArgFP(MachineBasicBlock::iterator &I);
void handleOneArgFPRW(MachineBasicBlock::iterator &I);
@@ -266,9 +273,9 @@ namespace {
void handleSpecialFP(MachineBasicBlock::iterator &I);
// Check if a COPY instruction is using FP registers.
- static bool isFPCopy(MachineInstr *MI) {
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned SrcReg = MI->getOperand(1).getReg();
+ static bool isFPCopy(MachineInstr &MI) {
+ unsigned DstReg = MI.getOperand(0).getReg();
+ unsigned SrcReg = MI.getOperand(1).getReg();
return X86::RFP80RegClass.contains(DstReg) ||
X86::RFP80RegClass.contains(SrcReg);
@@ -367,21 +374,21 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
setupBlockStack();
for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
- MachineInstr *MI = I;
- uint64_t Flags = MI->getDesc().TSFlags;
+ MachineInstr &MI = *I;
+ uint64_t Flags = MI.getDesc().TSFlags;
unsigned FPInstClass = Flags & X86II::FPTypeMask;
- if (MI->isInlineAsm())
+ if (MI.isInlineAsm())
FPInstClass = X86II::SpecialFP;
- if (MI->isCopy() && isFPCopy(MI))
+ if (MI.isCopy() && isFPCopy(MI))
FPInstClass = X86II::SpecialFP;
- if (MI->isImplicitDef() &&
- X86::RFP80RegClass.contains(MI->getOperand(0).getReg()))
+ if (MI.isImplicitDef() &&
+ X86::RFP80RegClass.contains(MI.getOperand(0).getReg()))
FPInstClass = X86II::SpecialFP;
- if (MI->isCall())
+ if (MI.isCall())
FPInstClass = X86II::SpecialFP;
if (FPInstClass == X86II::NotFP)
@@ -389,16 +396,16 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
MachineInstr *PrevMI = nullptr;
if (I != BB.begin())
- PrevMI = std::prev(I);
+ PrevMI = &*std::prev(I);
++NumFP; // Keep track of # of pseudo instrs
- DEBUG(dbgs() << "\nFPInst:\t" << *MI);
+ DEBUG(dbgs() << "\nFPInst:\t" << MI);
// Get dead variables list now because the MI pointer may be deleted as part
// of processing!
SmallVector<unsigned, 8> DeadRegs;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDead())
DeadRegs.push_back(MO.getReg());
}
@@ -427,20 +434,22 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
}
// Print out all of the instructions expanded to if -debug
- DEBUG(
- MachineBasicBlock::iterator PrevI(PrevMI);
+ DEBUG({
+ MachineBasicBlock::iterator PrevI = PrevMI;
if (I == PrevI) {
dbgs() << "Just deleted pseudo instruction\n";
} else {
MachineBasicBlock::iterator Start = I;
// Rewind to first instruction newly inserted.
- while (Start != BB.begin() && std::prev(Start) != PrevI) --Start;
+ while (Start != BB.begin() && std::prev(Start) != PrevI)
+ --Start;
dbgs() << "Inserted instructions:\n\t";
Start->print(dbgs());
- while (++Start != std::next(I)) {}
+ while (++Start != std::next(I)) {
+ }
}
dumpStack();
- );
+ });
(void)PrevMI;
Changed = true;
@@ -779,8 +788,8 @@ static const TableEntry PopTable[] = {
/// instruction if it was modified in place.
///
void FPS::popStackAfter(MachineBasicBlock::iterator &I) {
- MachineInstr* MI = I;
- DebugLoc dl = MI->getDebugLoc();
+ MachineInstr &MI = *I;
+ const DebugLoc &dl = MI.getDebugLoc();
ASSERT_SORTED(PopTable);
if (StackTop == 0)
report_fatal_error("Cannot pop empty stack!");
@@ -943,15 +952,102 @@ void FPS::handleCall(MachineBasicBlock::iterator &I) {
pushReg(N - I - 1);
}
+/// If RET has an FP register use operand, pass the first one in ST(0) and
+/// the second one in ST(1).
+void FPS::handleReturn(MachineBasicBlock::iterator &I) {
+ MachineInstr &MI = *I;
+
+ // Find the register operands.
+ unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
+ unsigned LiveMask = 0;
+
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &Op = MI.getOperand(i);
+ if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
+ continue;
+ // FP Register uses must be kills unless there are two uses of the same
+ // register, in which case only one will be a kill.
+ assert(Op.isUse() &&
+ (Op.isKill() || // Marked kill.
+ getFPReg(Op) == FirstFPRegOp || // Second instance.
+ MI.killsRegister(Op.getReg())) && // Later use is marked kill.
+ "Ret only defs operands, and values aren't live beyond it");
+
+ if (FirstFPRegOp == ~0U)
+ FirstFPRegOp = getFPReg(Op);
+ else {
+ assert(SecondFPRegOp == ~0U && "More than two fp operands!");
+ SecondFPRegOp = getFPReg(Op);
+ }
+ LiveMask |= (1 << getFPReg(Op));
+
+ // Remove the operand so that later passes don't see it.
+ MI.RemoveOperand(i);
+ --i;
+ --e;
+ }
+
+ // We may have been carrying spurious live-ins, so make sure only the
+ // returned registers are left live.
+ adjustLiveRegs(LiveMask, MI);
+ if (!LiveMask) return; // Quick check to see if any are possible.
+
+ // There are only four possibilities here:
+ // 1) we are returning a single FP value. In this case, it has to be in
+ // ST(0) already, so just declare success by removing the value from the
+ // FP Stack.
+ if (SecondFPRegOp == ~0U) {
+ // Assert that the top of stack contains the right FP register.
+ assert(StackTop == 1 && FirstFPRegOp == getStackEntry(0) &&
+ "Top of stack not the right register for RET!");
+
+ // Ok, everything is good, mark the value as not being on the stack
+ // anymore so that our assertion about the stack being empty at end of
+ // block doesn't fire.
+ StackTop = 0;
+ return;
+ }
+
+ // Otherwise, we are returning two values:
+ // 2) If returning the same value for both, we only have one thing in the FP
+ // stack. Consider: RET FP1, FP1
+ if (StackTop == 1) {
+ assert(FirstFPRegOp == SecondFPRegOp && FirstFPRegOp == getStackEntry(0)&&
+ "Stack misconfiguration for RET!");
+
+ // Duplicate the TOS so that we return it twice. Just pick some other FPx
+ // register to hold it.
+ unsigned NewReg = ScratchFPReg;
+ duplicateToTop(FirstFPRegOp, NewReg, MI);
+ FirstFPRegOp = NewReg;
+ }
+
+ /// Okay we know we have two different FPx operands now:
+ assert(StackTop == 2 && "Must have two values live!");
+
+ /// 3) If SecondFPRegOp is currently in ST(0) and FirstFPRegOp is currently
+ /// in ST(1). In this case, emit an fxch.
+ if (getStackEntry(0) == SecondFPRegOp) {
+ assert(getStackEntry(1) == FirstFPRegOp && "Unknown regs live");
+ moveToTop(FirstFPRegOp, MI);
+ }
+
+ /// 4) Finally, FirstFPRegOp must be in ST(0) and SecondFPRegOp must be in
+ /// ST(1). Just remove both from our understanding of the stack and return.
+ assert(getStackEntry(0) == FirstFPRegOp && "Unknown regs live");
+ assert(getStackEntry(1) == SecondFPRegOp && "Unknown regs live");
+ StackTop = 0;
+}
+
/// handleZeroArgFP - ST(0) = fld0 ST(0) = flds <mem>
///
void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
- MachineInstr *MI = I;
- unsigned DestReg = getFPReg(MI->getOperand(0));
+ MachineInstr &MI = *I;
+ unsigned DestReg = getFPReg(MI.getOperand(0));
// Change from the pseudo instruction to the concrete instruction.
- MI->RemoveOperand(0); // Remove the explicit ST(0) operand
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
+ MI.RemoveOperand(0); // Remove the explicit ST(0) operand
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// Result gets pushed on the stack.
pushReg(DestReg);
@@ -960,14 +1056,14 @@ void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
/// handleOneArgFP - fst <mem>, ST(0)
///
void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
- MachineInstr *MI = I;
- unsigned NumOps = MI->getDesc().getNumOperands();
+ MachineInstr &MI = *I;
+ unsigned NumOps = MI.getDesc().getNumOperands();
assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
"Can only handle fst* & ftst instructions!");
// Is this the last use of the source register?
- unsigned Reg = getFPReg(MI->getOperand(NumOps-1));
- bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
+ unsigned Reg = getFPReg(MI.getOperand(NumOps - 1));
+ bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
// FISTP64m is strange because there isn't a non-popping versions.
// If we have one _and_ we don't want to pop the operand, duplicate the value
@@ -975,34 +1071,31 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
// always ok.
// Ditto FISTTP16m, FISTTP32m, FISTTP64m, ST_FpP80m.
//
- if (!KillsSrc &&
- (MI->getOpcode() == X86::IST_Fp64m32 ||
- MI->getOpcode() == X86::ISTT_Fp16m32 ||
- MI->getOpcode() == X86::ISTT_Fp32m32 ||
- MI->getOpcode() == X86::ISTT_Fp64m32 ||
- MI->getOpcode() == X86::IST_Fp64m64 ||
- MI->getOpcode() == X86::ISTT_Fp16m64 ||
- MI->getOpcode() == X86::ISTT_Fp32m64 ||
- MI->getOpcode() == X86::ISTT_Fp64m64 ||
- MI->getOpcode() == X86::IST_Fp64m80 ||
- MI->getOpcode() == X86::ISTT_Fp16m80 ||
- MI->getOpcode() == X86::ISTT_Fp32m80 ||
- MI->getOpcode() == X86::ISTT_Fp64m80 ||
- MI->getOpcode() == X86::ST_FpP80m)) {
+ if (!KillsSrc && (MI.getOpcode() == X86::IST_Fp64m32 ||
+ MI.getOpcode() == X86::ISTT_Fp16m32 ||
+ MI.getOpcode() == X86::ISTT_Fp32m32 ||
+ MI.getOpcode() == X86::ISTT_Fp64m32 ||
+ MI.getOpcode() == X86::IST_Fp64m64 ||
+ MI.getOpcode() == X86::ISTT_Fp16m64 ||
+ MI.getOpcode() == X86::ISTT_Fp32m64 ||
+ MI.getOpcode() == X86::ISTT_Fp64m64 ||
+ MI.getOpcode() == X86::IST_Fp64m80 ||
+ MI.getOpcode() == X86::ISTT_Fp16m80 ||
+ MI.getOpcode() == X86::ISTT_Fp32m80 ||
+ MI.getOpcode() == X86::ISTT_Fp64m80 ||
+ MI.getOpcode() == X86::ST_FpP80m)) {
duplicateToTop(Reg, ScratchFPReg, I);
} else {
moveToTop(Reg, I); // Move to the top of the stack...
}
// Convert from the pseudo instruction to the concrete instruction.
- MI->RemoveOperand(NumOps-1); // Remove explicit ST(0) operand
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
-
- if (MI->getOpcode() == X86::IST_FP64m ||
- MI->getOpcode() == X86::ISTT_FP16m ||
- MI->getOpcode() == X86::ISTT_FP32m ||
- MI->getOpcode() == X86::ISTT_FP64m ||
- MI->getOpcode() == X86::ST_FP80m) {
+ MI.RemoveOperand(NumOps - 1); // Remove explicit ST(0) operand
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
+
+ if (MI.getOpcode() == X86::IST_FP64m || MI.getOpcode() == X86::ISTT_FP16m ||
+ MI.getOpcode() == X86::ISTT_FP32m || MI.getOpcode() == X86::ISTT_FP64m ||
+ MI.getOpcode() == X86::ST_FP80m) {
if (StackTop == 0)
report_fatal_error("Stack empty??");
--StackTop;
@@ -1021,15 +1114,15 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
/// R1 = fadd R2, [mem]
///
void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
#ifndef NDEBUG
- unsigned NumOps = MI->getDesc().getNumOperands();
+ unsigned NumOps = MI.getDesc().getNumOperands();
assert(NumOps >= 2 && "FPRW instructions must have 2 ops!!");
#endif
// Is this the last use of the source register?
- unsigned Reg = getFPReg(MI->getOperand(1));
- bool KillsSrc = MI->killsRegister(X86::FP0+Reg);
+ unsigned Reg = getFPReg(MI.getOperand(1));
+ bool KillsSrc = MI.killsRegister(X86::FP0 + Reg);
if (KillsSrc) {
// If this is the last use of the source register, just make sure it's on
@@ -1038,17 +1131,17 @@ void FPS::handleOneArgFPRW(MachineBasicBlock::iterator &I) {
if (StackTop == 0)
report_fatal_error("Stack cannot be empty!");
--StackTop;
- pushReg(getFPReg(MI->getOperand(0)));
+ pushReg(getFPReg(MI.getOperand(0)));
} else {
// If this is not the last use of the source register, _copy_ it to the top
// of the stack.
- duplicateToTop(Reg, getFPReg(MI->getOperand(0)), I);
+ duplicateToTop(Reg, getFPReg(MI.getOperand(0)), I);
}
// Change from the pseudo instruction to the concrete instruction.
- MI->RemoveOperand(1); // Drop the source operand.
- MI->RemoveOperand(0); // Drop the destination operand.
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
+ MI.RemoveOperand(1); // Drop the source operand.
+ MI.RemoveOperand(0); // Drop the destination operand.
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
}
@@ -1132,16 +1225,16 @@ static const TableEntry ReverseSTiTable[] = {
void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
- unsigned NumOperands = MI->getDesc().getNumOperands();
+ unsigned NumOperands = MI.getDesc().getNumOperands();
assert(NumOperands == 3 && "Illegal TwoArgFP instruction!");
- unsigned Dest = getFPReg(MI->getOperand(0));
- unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
- unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
- bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
- bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
- DebugLoc dl = MI->getDebugLoc();
+ unsigned Dest = getFPReg(MI.getOperand(0));
+ unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
+ unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
+ bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
+ DebugLoc dl = MI.getDebugLoc();
unsigned TOS = getStackEntry(0);
@@ -1198,14 +1291,14 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
InstTable = ReverseSTiTable;
}
- int Opcode = Lookup(InstTable, MI->getOpcode());
+ int Opcode = Lookup(InstTable, MI.getOpcode());
assert(Opcode != -1 && "Unknown TwoArgFP pseudo instruction!");
// NotTOS - The register which is not on the top of stack...
unsigned NotTOS = (TOS == Op0) ? Op1 : Op0;
// Replace the old instruction with a new instruction
- MBB->remove(I++);
+ MBB->remove(&*I++);
I = BuildMI(*MBB, I, dl, TII->get(Opcode)).addReg(getSTReg(NotTOS));
// If both operands are killed, pop one off of the stack in addition to
@@ -1221,7 +1314,7 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
assert(UpdatedSlot < StackTop && Dest < 7);
Stack[UpdatedSlot] = Dest;
RegMap[Dest] = UpdatedSlot;
- MBB->getParent()->DeleteMachineInstr(MI); // Remove the old instruction
+ MBB->getParent()->DeleteMachineInstr(&MI); // Remove the old instruction
}
/// handleCompareFP - Handle FUCOM and FUCOMI instructions, which have two FP
@@ -1230,23 +1323,23 @@ void FPS::handleTwoArgFP(MachineBasicBlock::iterator &I) {
void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table);
ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable);
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
- unsigned NumOperands = MI->getDesc().getNumOperands();
+ unsigned NumOperands = MI.getDesc().getNumOperands();
assert(NumOperands == 2 && "Illegal FUCOM* instruction!");
- unsigned Op0 = getFPReg(MI->getOperand(NumOperands-2));
- unsigned Op1 = getFPReg(MI->getOperand(NumOperands-1));
- bool KillsOp0 = MI->killsRegister(X86::FP0+Op0);
- bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
+ unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2));
+ unsigned Op1 = getFPReg(MI.getOperand(NumOperands - 1));
+ bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0);
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
// Make sure the first operand is on the top of stack, the other one can be
// anywhere.
moveToTop(Op0, I);
// Change from the pseudo instruction to the concrete instruction.
- MI->getOperand(0).setReg(getSTReg(Op1));
- MI->RemoveOperand(1);
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
+ MI.getOperand(0).setReg(getSTReg(Op1));
+ MI.RemoveOperand(1);
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// If any of the operands are killed by this instruction, free them.
if (KillsOp0) freeStackSlotAfter(I, Op0);
@@ -1258,21 +1351,21 @@ void FPS::handleCompareFP(MachineBasicBlock::iterator &I) {
/// instructions require that the first operand is at the top of the stack, but
/// otherwise don't modify the stack at all.
void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
- MachineInstr *MI = I;
+ MachineInstr &MI = *I;
- unsigned Op0 = getFPReg(MI->getOperand(0));
- unsigned Op1 = getFPReg(MI->getOperand(2));
- bool KillsOp1 = MI->killsRegister(X86::FP0+Op1);
+ unsigned Op0 = getFPReg(MI.getOperand(0));
+ unsigned Op1 = getFPReg(MI.getOperand(2));
+ bool KillsOp1 = MI.killsRegister(X86::FP0 + Op1);
// The first operand *must* be on the top of the stack.
moveToTop(Op0, I);
// Change the second operand to the stack register that the operand is in.
// Change from the pseudo instruction to the concrete instruction.
- MI->RemoveOperand(0);
- MI->RemoveOperand(1);
- MI->getOperand(0).setReg(getSTReg(Op1));
- MI->setDesc(TII->get(getConcreteOpcode(MI->getOpcode())));
+ MI.RemoveOperand(0);
+ MI.RemoveOperand(1);
+ MI.getOperand(0).setReg(getSTReg(Op1));
+ MI.setDesc(TII->get(getConcreteOpcode(MI.getOpcode())));
// If we kill the second operand, make sure to pop it from the stack.
if (Op0 != Op1 && KillsOp1) {
@@ -1287,20 +1380,25 @@ void FPS::handleCondMovFP(MachineBasicBlock::iterator &I) {
/// instructions.
///
void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
- MachineInstr *MI = Inst;
+ MachineInstr &MI = *Inst;
- if (MI->isCall()) {
+ if (MI.isCall()) {
handleCall(Inst);
return;
}
- switch (MI->getOpcode()) {
+ if (MI.isReturn()) {
+ handleReturn(Inst);
+ return;
+ }
+
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unknown SpecialFP instruction!");
case TargetOpcode::COPY: {
// We handle three kinds of copies: FP <- FP, FP <- ST, and ST <- FP.
- const MachineOperand &MO1 = MI->getOperand(1);
- const MachineOperand &MO0 = MI->getOperand(0);
- bool KillsSrc = MI->killsRegister(MO1.getReg());
+ const MachineOperand &MO1 = MI.getOperand(1);
+ const MachineOperand &MO0 = MI.getOperand(0);
+ bool KillsSrc = MI.killsRegister(MO1.getReg());
// FP <- FP copy.
unsigned DstFP = getFPReg(MO0);
@@ -1322,9 +1420,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
case TargetOpcode::IMPLICIT_DEF: {
// All FP registers must be explicitly defined, so load a 0 instead.
- unsigned Reg = MI->getOperand(0).getReg() - X86::FP0;
+ unsigned Reg = MI.getOperand(0).getReg() - X86::FP0;
DEBUG(dbgs() << "Emitting LD_F0 for implicit FP" << Reg << '\n');
- BuildMI(*MBB, Inst, MI->getDebugLoc(), TII->get(X86::LD_F0));
+ BuildMI(*MBB, Inst, MI.getDebugLoc(), TII->get(X86::LD_F0));
pushReg(Reg);
break;
}
@@ -1368,14 +1466,14 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
SmallSet<unsigned, 1> FRegIdx;
unsigned RCID;
- for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
- i != e && MI->getOperand(i).isImm(); i += 1 + NumOps) {
- unsigned Flags = MI->getOperand(i).getImm();
+ for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI.getNumOperands();
+ i != e && MI.getOperand(i).isImm(); i += 1 + NumOps) {
+ unsigned Flags = MI.getOperand(i).getImm();
NumOps = InlineAsm::getNumOperandRegisters(Flags);
if (NumOps != 1)
continue;
- const MachineOperand &MO = MI->getOperand(i + 1);
+ const MachineOperand &MO = MI.getOperand(i + 1);
if (!MO.isReg())
continue;
unsigned STReg = MO.getReg() - X86::FP0;
@@ -1408,24 +1506,24 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
}
if (STUses && !isMask_32(STUses))
- MI->emitError("fixed input regs must be last on the x87 stack");
+ MI.emitError("fixed input regs must be last on the x87 stack");
unsigned NumSTUses = countTrailingOnes(STUses);
// Defs must be contiguous from the stack top. ST0-STn.
if (STDefs && !isMask_32(STDefs)) {
- MI->emitError("output regs must be last on the x87 stack");
+ MI.emitError("output regs must be last on the x87 stack");
STDefs = NextPowerOf2(STDefs) - 1;
}
unsigned NumSTDefs = countTrailingOnes(STDefs);
// So must the clobbered stack slots. ST0-STm, m >= n.
if (STClobbers && !isMask_32(STDefs | STClobbers))
- MI->emitError("clobbers must be last on the x87 stack");
+ MI.emitError("clobbers must be last on the x87 stack");
// Popped inputs are the ones that are also clobbered or defined.
unsigned STPopped = STUses & (STDefs | STClobbers);
if (STPopped && !isMask_32(STPopped))
- MI->emitError("implicitly popped regs must be last on the x87 stack");
+ MI.emitError("implicitly popped regs must be last on the x87 stack");
unsigned NumSTPopped = countTrailingOnes(STPopped);
DEBUG(dbgs() << "Asm uses " << NumSTUses << " fixed regs, pops "
@@ -1434,9 +1532,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
#ifndef NDEBUG
// If any input operand uses constraint "f", all output register
// constraints must be early-clobber defs.
- for (unsigned I = 0, E = MI->getNumOperands(); I < E; ++I)
+ for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I)
if (FRegIdx.count(I)) {
- assert((1 << getFPReg(MI->getOperand(I)) & STDefs) == 0 &&
+ assert((1 << getFPReg(MI.getOperand(I)) & STDefs) == 0 &&
"Operands with constraint \"f\" cannot overlap with defs");
}
#endif
@@ -1444,8 +1542,8 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
// Collect all FP registers (register operands with constraints "t", "u",
// and "f") to kill afer the instruction.
unsigned FPKills = ((1u << NumFPRegs) - 1) & ~0xff;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &Op = MI.getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;
unsigned FPReg = getFPReg(Op);
@@ -1470,8 +1568,8 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
DEBUG({dbgs() << "Before asm: "; dumpStack();});
// With the stack layout fixed, rewrite the FP registers.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &Op = MI.getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
continue;
@@ -1508,94 +1606,6 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) {
// Don't delete the inline asm!
return;
}
-
- case X86::RETQ:
- case X86::RETL:
- case X86::RETIL:
- case X86::RETIQ:
- // If RET has an FP register use operand, pass the first one in ST(0) and
- // the second one in ST(1).
-
- // Find the register operands.
- unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
- unsigned LiveMask = 0;
-
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI->getOperand(i);
- if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
- continue;
- // FP Register uses must be kills unless there are two uses of the same
- // register, in which case only one will be a kill.
- assert(Op.isUse() &&
- (Op.isKill() || // Marked kill.
- getFPReg(Op) == FirstFPRegOp || // Second instance.
- MI->killsRegister(Op.getReg())) && // Later use is marked kill.
- "Ret only defs operands, and values aren't live beyond it");
-
- if (FirstFPRegOp == ~0U)
- FirstFPRegOp = getFPReg(Op);
- else {
- assert(SecondFPRegOp == ~0U && "More than two fp operands!");
- SecondFPRegOp = getFPReg(Op);
- }
- LiveMask |= (1 << getFPReg(Op));
-
- // Remove the operand so that later passes don't see it.
- MI->RemoveOperand(i);
- --i, --e;
- }
-
- // We may have been carrying spurious live-ins, so make sure only the returned
- // registers are left live.
- adjustLiveRegs(LiveMask, MI);
- if (!LiveMask) return; // Quick check to see if any are possible.
-
- // There are only four possibilities here:
- // 1) we are returning a single FP value. In this case, it has to be in
- // ST(0) already, so just declare success by removing the value from the
- // FP Stack.
- if (SecondFPRegOp == ~0U) {
- // Assert that the top of stack contains the right FP register.
- assert(StackTop == 1 && FirstFPRegOp == getStackEntry(0) &&
- "Top of stack not the right register for RET!");
-
- // Ok, everything is good, mark the value as not being on the stack
- // anymore so that our assertion about the stack being empty at end of
- // block doesn't fire.
- StackTop = 0;
- return;
- }
-
- // Otherwise, we are returning two values:
- // 2) If returning the same value for both, we only have one thing in the FP
- // stack. Consider: RET FP1, FP1
- if (StackTop == 1) {
- assert(FirstFPRegOp == SecondFPRegOp && FirstFPRegOp == getStackEntry(0)&&
- "Stack misconfiguration for RET!");
-
- // Duplicate the TOS so that we return it twice. Just pick some other FPx
- // register to hold it.
- unsigned NewReg = ScratchFPReg;
- duplicateToTop(FirstFPRegOp, NewReg, MI);
- FirstFPRegOp = NewReg;
- }
-
- /// Okay we know we have two different FPx operands now:
- assert(StackTop == 2 && "Must have two values live!");
-
- /// 3) If SecondFPRegOp is currently in ST(0) and FirstFPRegOp is currently
- /// in ST(1). In this case, emit an fxch.
- if (getStackEntry(0) == SecondFPRegOp) {
- assert(getStackEntry(1) == FirstFPRegOp && "Unknown regs live");
- moveToTop(FirstFPRegOp, MI);
- }
-
- /// 4) Finally, FirstFPRegOp must be in ST(0) and SecondFPRegOp must be in
- /// ST(1). Just remove both from our understanding of the stack and return.
- assert(getStackEntry(0) == FirstFPRegOp && "Unknown regs live");
- assert(getStackEntry(1) == SecondFPRegOp && "Unknown regs live");
- StackTop = 0;
- return;
}
Inst = MBB->erase(Inst); // Remove the pseudo instruction
@@ -1614,7 +1624,7 @@ void FPS::setKillFlags(MachineBasicBlock &MBB) const {
MBB.getParent()->getSubtarget().getRegisterInfo();
LivePhysRegs LPR(TRI);
- LPR.addLiveOuts(&MBB);
+ LPR.addLiveOuts(MBB);
for (MachineBasicBlock::reverse_iterator I = MBB.rbegin(), E = MBB.rend();
I != E; ++I) {
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index f5ffe0cf7e88..03d925692adf 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -159,6 +159,8 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
unsigned Opc = MBBI->getOpcode();
switch (Opc) {
default: return 0;
+ case TargetOpcode::PATCHABLE_RET:
+ case X86::RET:
case X86::RETL:
case X86::RETQ:
case X86::RETIL:
@@ -314,8 +316,8 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
}
MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
- int64_t Offset, bool InEpilogue) const {
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
assert(Offset != 0 && "zero offset stack adjustment requested");
// On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
@@ -374,16 +376,33 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
unsigned Opc = PI->getOpcode();
int Offset = 0;
+ if (!doMergeWithPrevious && NI != MBB.end() &&
+ NI->getOpcode() == TargetOpcode::CFI_INSTRUCTION) {
+ // Don't merge with the next instruction if it has CFI.
+ return Offset;
+ }
+
if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
- Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
PI->getOperand(0).getReg() == StackPtr){
+ assert(PI->getOperand(1).getReg() == StackPtr);
Offset += PI->getOperand(2).getImm();
MBB.erase(PI);
if (!doMergeWithPrevious) MBBI = NI;
+ } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ PI->getOperand(0).getReg() == StackPtr &&
+ PI->getOperand(1).getReg() == StackPtr &&
+ PI->getOperand(2).getImm() == 1 &&
+ PI->getOperand(3).getReg() == X86::NoRegister &&
+ PI->getOperand(5).getReg() == X86::NoRegister) {
+ // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
+ Offset += PI->getOperand(4).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
} else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
PI->getOperand(0).getReg() == StackPtr) {
+ assert(PI->getOperand(1).getReg() == StackPtr);
Offset -= PI->getOperand(2).getImm();
MBB.erase(PI);
if (!doMergeWithPrevious) MBBI = NI;
@@ -393,18 +412,18 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
}
void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- MCCFIInstruction CFIInst) const {
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL,
+ const MCCFIInstruction &CFIInst) const {
MachineFunction &MF = *MBB.getParent();
unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
}
-void
-X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- DebugLoc DL) const {
+void X86FrameLowering::emitCalleeSavedFrameMoves(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
@@ -429,7 +448,7 @@ X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineInstr *X86FrameLowering::emitStackProbe(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL,
+ const DebugLoc &DL,
bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
if (STI.isTargetWindowsCoreCLR()) {
@@ -457,6 +476,8 @@ void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
}
if (ChkStkStub != nullptr) {
+ assert(!ChkStkStub->isBundled() &&
+ "Not expecting bundled instructions here");
MachineBasicBlock::iterator MBBI = std::next(ChkStkStub->getIterator());
assert(std::prev(MBBI).operator==(ChkStkStub) &&
"MBBI expected after __chkstk_stub.");
@@ -467,8 +488,8 @@ void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
}
MachineInstr *X86FrameLowering::emitStackProbeInline(
- MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
assert(STI.is64Bit() && "different expansion needed for 32 bit");
assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
@@ -679,12 +700,12 @@ MachineInstr *X86FrameLowering::emitStackProbeInline(
// Possible TODO: physreg liveness for InProlog case.
- return ContinueMBBI;
+ return &*ContinueMBBI;
}
MachineInstr *X86FrameLowering::emitStackProbeCall(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
unsigned CallOp;
@@ -743,19 +764,19 @@ MachineInstr *X86FrameLowering::emitStackProbeCall(
ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
}
- return MBBI;
+ return &*MBBI;
}
MachineInstr *X86FrameLowering::emitStackProbeInlineStub(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, bool InProlog) const {
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
assert(InProlog && "ChkStkStub called outside prolog!");
BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("__chkstk_stub");
- return MBBI;
+ return &*MBBI;
}
static unsigned calculateSetFPREG(uint64_t SPAdjust) {
@@ -786,7 +807,7 @@ uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) con
void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, unsigned Reg,
+ const DebugLoc &DL, unsigned Reg,
uint64_t MaxAlign) const {
uint64_t Val = -MaxAlign;
unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
@@ -950,6 +971,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
!MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
+ X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
MFI->setStackSize(StackSize);
}
@@ -1009,7 +1031,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Callee-saved registers are pushed on stack before the stack is realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
+ NumBytes = alignTo(NumBytes, MaxAlign);
// Get the offset of the stack slot for the EBP register, which is
// guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
@@ -1130,7 +1152,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// virtual memory manager are allocated in correct sequence.
uint64_t AlignedNumBytes = NumBytes;
if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
- AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
+ AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
// Check whether EAX is livein for this block.
bool isEAXAlive = isEAXLiveIn(MBB);
@@ -1260,7 +1282,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
- const MachineInstr *FrameInstr = &*MBBI;
+ const MachineInstr &FrameInstr = *MBBI;
++MBBI;
if (NeedsWinCFI) {
@@ -1360,6 +1382,18 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
if (PushedRegs)
emitCalleeSavedFrameMoves(MBB, MBBI, DL);
}
+
+ // X86 Interrupt handling function cannot assume anything about the direction
+ // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
+ // in each prologue of interrupt handler function.
+ //
+ // FIXME: Create "cld" instruction only in these cases:
+ // 1. The interrupt handling function uses any of the "rep" instructions.
+ // 2. Interrupt handling function calls another function.
+ //
+ if (Fn->getCallingConv() == CallingConv::X86_INTR)
+ BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
+ .setMIFlag(MachineInstr::FrameSetup);
}
bool X86FrameLowering::canUseLEAForSPInEpilogue(
@@ -1373,8 +1407,8 @@ bool X86FrameLowering::canUseLEAForSPInEpilogue(
return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
}
-static bool isFuncletReturnInstr(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static bool isFuncletReturnInstr(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
case X86::CATCHRET:
case X86::CLEANUPRET:
return true;
@@ -1400,11 +1434,10 @@ static bool isFuncletReturnInstr(MachineInstr *MI) {
unsigned
X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
- // getFrameIndexReferenceFromSP has an out ref parameter for the stack
- // pointer register; pass a dummy that we ignore
unsigned SPReg;
- int Offset = getFrameIndexReferenceFromSP(MF, Info.PSPSymFrameIdx, SPReg);
- assert(Offset >= 0);
+ int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
+ /*IgnoreSPUpdates*/ true);
+ assert(Offset >= 0 && SPReg == TRI->getStackRegister());
return static_cast<unsigned>(Offset);
}
@@ -1429,18 +1462,25 @@ X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
// RBP is not included in the callee saved register block. After pushing RBP,
// everything is 16 byte aligned. Everything we allocate before an outgoing
// call must also be 16 byte aligned.
- unsigned FrameSizeMinusRBP =
- RoundUpToAlignment(CSSize + UsedSize, getStackAlignment());
+ unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment());
// Subtract out the size of the callee saved registers. This is how much stack
// each funclet will allocate.
return FrameSizeMinusRBP - CSSize;
}
+static bool isTailCallOpcode(unsigned Opc) {
+ return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
+ Opc == X86::TCRETURNmi ||
+ Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
+ Opc == X86::TCRETURNmi64;
+}
+
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
+ unsigned RetOpcode = MBBI->getOpcode();
DebugLoc DL;
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
@@ -1453,7 +1493,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWinCFI =
IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
- bool IsFunclet = isFuncletReturnInstr(MBBI);
+ bool IsFunclet = isFuncletReturnInstr(*MBBI);
MachineBasicBlock *TargetMBB = nullptr;
// Get the number of bytes to allocate from the FrameInfo.
@@ -1490,7 +1530,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Callee-saved registers were pushed on stack before the stack was
// realigned.
if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
- NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
+ NumBytes = alignTo(FrameSize, MaxAlign);
// Pop EBP.
BuildMI(MBB, MBBI, DL,
@@ -1589,15 +1629,17 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
if (NeedsWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
- // Add the return addr area delta back since we are not tail calling.
- int Offset = -1 * X86FI->getTCReturnAddrDelta();
- assert(Offset >= 0 && "TCDelta should never be positive");
- if (Offset) {
- MBBI = MBB.getFirstTerminator();
+ if (!isTailCallOpcode(RetOpcode)) {
+ // Add the return addr area delta back since we are not tail calling.
+ int Offset = -1 * X86FI->getTCReturnAddrDelta();
+ assert(Offset >= 0 && "TCDelta should never be positive");
+ if (Offset) {
+ MBBI = MBB.getFirstTerminator();
- // Check for possible merge with preceding ADD instruction.
- Offset += mergeSPUpdates(MBB, MBBI, true);
- emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
+ // Check for possible merge with preceding ADD instruction.
+ Offset += mergeSPUpdates(MBB, MBBI, true);
+ emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
+ }
}
}
@@ -1689,58 +1731,61 @@ int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
return Offset + FPDelta;
}
-// Simplified from getFrameIndexReference keeping only StackPointer cases
-int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
- int FI,
- unsigned &FrameReg) const {
+int
+X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
+ int FI, unsigned &FrameReg,
+ bool IgnoreSPUpdates) const {
+
const MachineFrameInfo *MFI = MF.getFrameInfo();
// Does not include any dynamic realign.
const uint64_t StackSize = MFI->getStackSize();
- {
-#ifndef NDEBUG
- // LLVM arranges the stack as follows:
- // ...
- // ARG2
- // ARG1
- // RETADDR
- // PUSH RBP <-- RBP points here
- // PUSH CSRs
- // ~~~~~~~ <-- possible stack realignment (non-win64)
- // ...
- // STACK OBJECTS
- // ... <-- RSP after prologue points here
- // ~~~~~~~ <-- possible stack realignment (win64)
- //
- // if (hasVarSizedObjects()):
- // ... <-- "base pointer" (ESI/RBX) points here
- // DYNAMIC ALLOCAS
- // ... <-- RSP points here
- //
- // Case 1: In the simple case of no stack realignment and no dynamic
- // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
- // with fixed offsets from RSP.
- //
- // Case 2: In the case of stack realignment with no dynamic allocas, fixed
- // stack objects are addressed with RBP and regular stack objects with RSP.
- //
- // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
- // to address stack arguments for outgoing calls and nothing else. The "base
- // pointer" points to local variables, and RBP points to fixed objects.
- //
- // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
- // answer we give is relative to the SP after the prologue, and not the
- // SP in the middle of the function.
-
- assert((!MFI->isFixedObjectIndex(FI) || !TRI->needsStackRealignment(MF) ||
- STI.isTargetWin64()) &&
- "offset from fixed object to SP is not static");
-
- // We don't handle tail calls, and shouldn't be seeing them either.
- int TailCallReturnAddrDelta =
- MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
- assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
-#endif
- }
+ // LLVM arranges the stack as follows:
+ // ...
+ // ARG2
+ // ARG1
+ // RETADDR
+ // PUSH RBP <-- RBP points here
+ // PUSH CSRs
+ // ~~~~~~~ <-- possible stack realignment (non-win64)
+ // ...
+ // STACK OBJECTS
+ // ... <-- RSP after prologue points here
+ // ~~~~~~~ <-- possible stack realignment (win64)
+ //
+ // if (hasVarSizedObjects()):
+ // ... <-- "base pointer" (ESI/RBX) points here
+ // DYNAMIC ALLOCAS
+ // ... <-- RSP points here
+ //
+ // Case 1: In the simple case of no stack realignment and no dynamic
+ // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
+ // with fixed offsets from RSP.
+ //
+ // Case 2: In the case of stack realignment with no dynamic allocas, fixed
+ // stack objects are addressed with RBP and regular stack objects with RSP.
+ //
+ // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
+ // to address stack arguments for outgoing calls and nothing else. The "base
+ // pointer" points to local variables, and RBP points to fixed objects.
+ //
+ // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
+ // answer we give is relative to the SP after the prologue, and not the
+ // SP in the middle of the function.
+
+ if (MFI->isFixedObjectIndex(FI) && TRI->needsStackRealignment(MF) &&
+ !STI.isTargetWin64())
+ return getFrameIndexReference(MF, FI, FrameReg);
+
+ // If !hasReservedCallFrame the function might have SP adjustement in the
+ // body. So, even though the offset is statically known, it depends on where
+ // we are in the function.
+ const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ if (!IgnoreSPUpdates && !TFI->hasReservedCallFrame(MF))
+ return getFrameIndexReference(MF, FI, FrameReg);
+
+ // We don't handle tail calls, and shouldn't be seeing them either.
+ assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
+ "we don't handle this case!");
// Fill in FrameReg output argument.
FrameReg = TRI->getStackRegister();
@@ -1851,16 +1896,37 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
return true;
// Push GPRs. It increases frame size.
+ const MachineFunction &MF = *MBB.getParent();
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i - 1].getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(Reg);
- BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ bool isLiveIn = MRI.isLiveIn(Reg);
+ if (!isLiveIn)
+ MBB.addLiveIn(Reg);
+
+ // Decide whether we can add a kill flag to the use.
+ bool CanKill = !isLiveIn;
+ // Check if any subregister is live-in
+ if (CanKill) {
+ for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
+ if (MRI.isLiveIn(*AReg)) {
+ CanKill = false;
+ break;
+ }
+ }
+ }
+
+ // Do not set a kill flag on values that are also marked as live-in. This
+ // happens with the @llvm-returnaddress intrinsic and with arguments
+ // passed in callee saved registers.
+ // Omitting the kill flags is conservatively correct even if the live-in
+ // is not used after all.
+ BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
.setMIFlag(MachineInstr::FrameSetup);
}
@@ -1891,7 +1957,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
if (CSI.empty())
return false;
- if (isFuncletReturnInstr(MI) && STI.isOSWindows()) {
+ if (isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
// Don't restore CSRs in 32-bit EH funclets. Matches
// spillCalleeSavedRegisters.
if (STI.is32Bit())
@@ -2250,11 +2316,33 @@ void X86FrameLowering::adjustForSegmentedStacks(
checkMBB->addSuccessor(allocMBB);
checkMBB->addSuccessor(&PrologueMBB);
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
+/// Lookup an ERTS parameter in the !hipe.literals named metadata node.
+/// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
+/// to fields it needs, through a named metadata node "hipe.literals" containing
+/// name-value pairs.
+static unsigned getHiPELiteral(
+ NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
+ for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
+ MDNode *Node = HiPELiteralsMD->getOperand(i);
+ if (Node->getNumOperands() != 2) continue;
+ MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
+ ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
+ if (!NodeName || !NodeVal) continue;
+ ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
+ if (ValConst && NodeName->getString() == LiteralName) {
+ return ValConst->getZExtValue();
+ }
+ }
+
+ report_fatal_error("HiPE literal " + LiteralName
+ + " required but not provided");
+}
+
/// Erlang programs may need a special prologue to handle the stack size they
/// might need at runtime. That is because Erlang/OTP does not implement a C
/// stack but uses a custom implementation of hybrid stack/heap architecture.
@@ -2280,7 +2368,14 @@ void X86FrameLowering::adjustForHiPEPrologue(
assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
// HiPE-specific values
- const unsigned HipeLeafWords = 24;
+ NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
+ ->getNamedMetadata("hipe.literals");
+ if (!HiPELiteralsMD)
+ report_fatal_error(
+ "Can't generate HiPE prologue without runtime parameters");
+ const unsigned HipeLeafWords
+ = getHiPELiteral(HiPELiteralsMD,
+ Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
const unsigned Guaranteed = HipeLeafWords * SlotSize;
unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
@@ -2300,15 +2395,13 @@ void X86FrameLowering::adjustForHiPEPrologue(
if (MFI->hasCalls()) {
unsigned MoreStackForCalls = 0;
- for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
- MBBI != MBBE; ++MBBI)
- for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
- MI != ME; ++MI) {
- if (!MI->isCall())
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (!MI.isCall())
continue;
// Get callee operand.
- const MachineOperand &MO = MI->getOperand(0);
+ const MachineOperand &MO = MI.getOperand(0);
// Only take account of global function calls (no closures etc.).
if (!MO.isGlobal())
@@ -2334,6 +2427,7 @@ void X86FrameLowering::adjustForHiPEPrologue(
MoreStackForCalls = std::max(MoreStackForCalls,
(HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
}
+ }
MaxStack += MoreStackForCalls;
}
@@ -2353,20 +2447,19 @@ void X86FrameLowering::adjustForHiPEPrologue(
unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
unsigned LEAop, CMPop, CALLop;
+ SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
if (Is64Bit) {
SPReg = X86::RSP;
PReg = X86::RBP;
LEAop = X86::LEA64r;
CMPop = X86::CMP64rm;
CALLop = X86::CALL64pcrel32;
- SPLimitOffset = 0x90;
} else {
SPReg = X86::ESP;
PReg = X86::EBP;
LEAop = X86::LEA32r;
CMPop = X86::CMP32rm;
CALLop = X86::CALLpcrel32;
- SPLimitOffset = 0x4c;
}
ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
@@ -2395,13 +2488,15 @@ void X86FrameLowering::adjustForHiPEPrologue(
incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
incStackMBB->addSuccessor(incStackMBB, {1, 100});
}
-#ifdef XDEBUG
+#ifdef EXPENSIVE_CHECKS
MF.verify();
#endif
}
bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL, int Offset) const {
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL,
+ int Offset) const {
if (Offset <= 0)
return false;
@@ -2440,7 +2535,8 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
bool IsDef = false;
for (const MachineOperand &MO : Prev->implicit_operands()) {
- if (MO.isReg() && MO.isDef() && MO.getReg() == Candidate) {
+ if (MO.isReg() && MO.isDef() &&
+ TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
IsDef = true;
break;
}
@@ -2468,7 +2564,7 @@ bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
return true;
}
-void X86FrameLowering::
+MachineBasicBlock::iterator X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
bool reserveCallFrame = hasReservedCallFrame(MF);
@@ -2488,7 +2584,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
unsigned StackAlign = getStackAlignment();
- Amount = RoundUpToAlignment(Amount, StackAlign);
+ Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
const Function *Fn = MF.getFunction();
@@ -2512,7 +2608,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
if (Amount == 0)
- return;
+ return I;
// Factor out the amount that gets handled inside the sequence
// (Pushes of argument for frame setup, callee pops for frame destroy)
@@ -2525,13 +2621,23 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
BuildCFI(MBB, I, DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
- if (Amount) {
- // Add Amount to SP to destroy a frame, and subtract to setup.
- int Offset = isDestroy ? Amount : -Amount;
-
- if (!(Fn->optForMinSize() &&
- adjustStackWithPops(MBB, I, DL, Offset)))
- BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
+ // Add Amount to SP to destroy a frame, or subtract to setup.
+ int64_t StackAdjustment = isDestroy ? Amount : -Amount;
+ int64_t CfaAdjustment = -StackAdjustment;
+
+ if (StackAdjustment) {
+ // Merge with any previous or following adjustment instruction. Note: the
+ // instructions merged with here do not have CFI, so their stack
+ // adjustments do not feed into CfaAdjustment.
+ StackAdjustment += mergeSPUpdates(MBB, I, true);
+ StackAdjustment += mergeSPUpdates(MBB, I, false);
+
+ if (StackAdjustment) {
+ if (!(Fn->optForMinSize() &&
+ adjustStackWithPops(MBB, I, DL, StackAdjustment)))
+ BuildStackAdjustment(MBB, I, DL, StackAdjustment,
+ /*InEpilogue=*/false);
+ }
}
if (DwarfCFI && !hasFP(MF)) {
@@ -2541,18 +2647,16 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// CFI only for EH purposes or for debugging. EH only requires the CFA
// offset to be correct at each call site, while for debugging we want
// it to be more precise.
- int CFAOffset = Amount;
+
// TODO: When not using precise CFA, we also need to adjust for the
// InternalAmt here.
-
- if (CFAOffset) {
- CFAOffset = isDestroy ? -CFAOffset : CFAOffset;
- BuildCFI(MBB, I, DL,
- MCCFIInstruction::createAdjustCfaOffset(nullptr, CFAOffset));
+ if (CfaAdjustment) {
+ BuildCFI(MBB, I, DL, MCCFIInstruction::createAdjustCfaOffset(
+ nullptr, CfaAdjustment));
}
}
- return;
+ return I;
}
if (isDestroy && InternalAmt) {
@@ -2562,11 +2666,20 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// We are not tracking the stack pointer adjustment by the callee, so make
// sure we restore the stack pointer immediately after the call, there may
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
+ MachineBasicBlock::iterator CI = I;
MachineBasicBlock::iterator B = MBB.begin();
- while (I != B && !std::prev(I)->isCall())
- --I;
- BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
+ while (CI != B && !std::prev(CI)->isCall())
+ --CI;
+ BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
}
+
+ return I;
+}
+
+bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
+ assert(MBB.getParent() && "Block is not attached to a function!");
+ const MachineFunction &MF = *MBB.getParent();
+ return !TRI->needsStackRealignment(MF) || !MBB.isLiveIn(X86::EFLAGS);
}
bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
@@ -2604,7 +2717,7 @@ bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool RestoreSP) const {
+ const DebugLoc &DL, bool RestoreSP) const {
assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
assert(STI.is32Bit() && !Uses64BitFramePtr &&
@@ -2664,6 +2777,150 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
return MBBI;
}
+namespace {
+// Struct used by orderFrameObjects to help sort the stack objects.
+struct X86FrameSortingObject {
+ bool IsValid = false; // true if we care about this Object.
+ unsigned ObjectIndex = 0; // Index of Object into MFI list.
+ unsigned ObjectSize = 0; // Size of Object in bytes.
+ unsigned ObjectAlignment = 1; // Alignment of Object in bytes.
+ unsigned ObjectNumUses = 0; // Object static number of uses.
+};
+
+// The comparison function we use for std::sort to order our local
+// stack symbols. The current algorithm is to use an estimated
+// "density". This takes into consideration the size and number of
+// uses each object has in order to roughly minimize code size.
+// So, for example, an object of size 16B that is referenced 5 times
+// will get higher priority than 4 4B objects referenced 1 time each.
+// It's not perfect and we may be able to squeeze a few more bytes out of
+// it (for example : 0(esp) requires fewer bytes, symbols allocated at the
+// fringe end can have special consideration, given their size is less
+// important, etc.), but the algorithmic complexity grows too much to be
+// worth the extra gains we get. This gets us pretty close.
+// The final order leaves us with objects with highest priority going
+// at the end of our list.
+struct X86FrameSortingComparator {
+ inline bool operator()(const X86FrameSortingObject &A,
+ const X86FrameSortingObject &B) {
+ uint64_t DensityAScaled, DensityBScaled;
+
+ // For consistency in our comparison, all invalid objects are placed
+ // at the end. This also allows us to stop walking when we hit the
+ // first invalid item after it's all sorted.
+ if (!A.IsValid)
+ return false;
+ if (!B.IsValid)
+ return true;
+
+ // The density is calculated by doing :
+ // (double)DensityA = A.ObjectNumUses / A.ObjectSize
+ // (double)DensityB = B.ObjectNumUses / B.ObjectSize
+ // Since this approach may cause inconsistencies in
+ // the floating point <, >, == comparisons, depending on the floating
+ // point model with which the compiler was built, we're going
+ // to scale both sides by multiplying with
+ // A.ObjectSize * B.ObjectSize. This ends up factoring away
+ // the division and, with it, the need for any floating point
+ // arithmetic.
+ DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
+ static_cast<uint64_t>(B.ObjectSize);
+ DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
+ static_cast<uint64_t>(A.ObjectSize);
+
+ // If the two densities are equal, prioritize highest alignment
+ // objects. This allows for similar alignment objects
+ // to be packed together (given the same density).
+ // There's room for improvement here, also, since we can pack
+ // similar alignment (different density) objects next to each
+ // other to save padding. This will also require further
+ // complexity/iterations, and the overall gain isn't worth it,
+ // in general. Something to keep in mind, though.
+ if (DensityAScaled == DensityBScaled)
+ return A.ObjectAlignment < B.ObjectAlignment;
+
+ return DensityAScaled < DensityBScaled;
+ }
+};
+} // namespace
+
+// Order the symbols in the local stack.
+// We want to place the local stack objects in some sort of sensible order.
+// The heuristic we use is to try and pack them according to static number
+// of uses and size of object in order to minimize code size.
+void X86FrameLowering::orderFrameObjects(
+ const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Don't waste time if there's nothing to do.
+ if (ObjectsToAllocate.empty())
+ return;
+
+ // Create an array of all MFI objects. We won't need all of these
+ // objects, but we're going to create a full array of them to make
+ // it easier to index into when we're counting "uses" down below.
+ // We want to be able to easily/cheaply access an object by simply
+ // indexing into it, instead of having to search for it every time.
+ std::vector<X86FrameSortingObject> SortingObjects(MFI->getObjectIndexEnd());
+
+ // Walk the objects we care about and mark them as such in our working
+ // struct.
+ for (auto &Obj : ObjectsToAllocate) {
+ SortingObjects[Obj].IsValid = true;
+ SortingObjects[Obj].ObjectIndex = Obj;
+ SortingObjects[Obj].ObjectAlignment = MFI->getObjectAlignment(Obj);
+ // Set the size.
+ int ObjectSize = MFI->getObjectSize(Obj);
+ if (ObjectSize == 0)
+ // Variable size. Just use 4.
+ SortingObjects[Obj].ObjectSize = 4;
+ else
+ SortingObjects[Obj].ObjectSize = ObjectSize;
+ }
+
+ // Count the number of uses for each object.
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.isDebugValue())
+ continue;
+ for (const MachineOperand &MO : MI.operands()) {
+ // Check to see if it's a local stack symbol.
+ if (!MO.isFI())
+ continue;
+ int Index = MO.getIndex();
+ // Check to see if it falls within our range, and is tagged
+ // to require ordering.
+ if (Index >= 0 && Index < MFI->getObjectIndexEnd() &&
+ SortingObjects[Index].IsValid)
+ SortingObjects[Index].ObjectNumUses++;
+ }
+ }
+ }
+
+ // Sort the objects using X86FrameSortingAlgorithm (see its comment for
+ // info).
+ std::stable_sort(SortingObjects.begin(), SortingObjects.end(),
+ X86FrameSortingComparator());
+
+ // Now modify the original list to represent the final order that
+ // we want. The order will depend on whether we're going to access them
+ // from the stack pointer or the frame pointer. For SP, the list should
+ // end up with the END containing objects that we want with smaller offsets.
+ // For FP, it should be flipped.
+ int i = 0;
+ for (auto &Obj : SortingObjects) {
+ // All invalid items are sorted at the end, so it's safe to stop.
+ if (!Obj.IsValid)
+ break;
+ ObjectsToAllocate[i++] = Obj.ObjectIndex;
+ }
+
+ // Flip it if we're accessing off of the FP.
+ if (!TRI->needsStackRealignment(MF) && hasFP(MF))
+ std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
+}
+
+
unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
// RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
unsigned Offset = 16;
@@ -2691,14 +2948,30 @@ void X86FrameLowering::processFunctionBeforeFrameFinalized(
// were no fixed objects, use offset -SlotSize, which is immediately after the
// return address. Fixed objects have negative frame indices.
MachineFrameInfo *MFI = MF.getFrameInfo();
+ WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
int64_t MinFixedObjOffset = -SlotSize;
for (int I = MFI->getObjectIndexBegin(); I < 0; ++I)
MinFixedObjOffset = std::min(MinFixedObjOffset, MFI->getObjectOffset(I));
+ for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
+ for (WinEHHandlerType &H : TBME.HandlerArray) {
+ int FrameIndex = H.CatchObj.FrameIndex;
+ if (FrameIndex != INT_MAX) {
+ // Ensure alignment.
+ unsigned Align = MFI->getObjectAlignment(FrameIndex);
+ MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
+ MinFixedObjOffset -= MFI->getObjectSize(FrameIndex);
+ MFI->setObjectOffset(FrameIndex, MinFixedObjOffset);
+ }
+ }
+ }
+
+ // Ensure alignment.
+ MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
int UnwindHelpFI =
MFI->CreateFixedObject(SlotSize, UnwindHelpOffset, /*Immutable=*/false);
- MF.getWinEHFuncInfo()->UnwindHelpFrameIdx = UnwindHelpFI;
+ EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
// Store -2 into UnwindHelp on function entry. We have to scan forwards past
// other frame setup instructions.
diff --git a/lib/Target/X86/X86FrameLowering.h b/lib/Target/X86/X86FrameLowering.h
index 3ab41b4a5789..4a01014ee545 100644
--- a/lib/Target/X86/X86FrameLowering.h
+++ b/lib/Target/X86/X86FrameLowering.h
@@ -52,8 +52,8 @@ public:
/// the number of bytes to probe in RAX/EAX. Returns instruction just
/// after the expansion.
MachineInstr *emitStackProbe(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- bool InProlog) const;
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, bool InProlog) const;
/// Replace a StackProbe inline-stub with the actual probe code inline.
void inlineStackProbe(MachineFunction &MF,
@@ -61,7 +61,7 @@ public:
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL) const;
+ const DebugLoc &DL) const;
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
@@ -100,12 +100,13 @@ public:
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const override;
- int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const override;
+ int getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg,
+ bool IgnoreSPUpdates) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const override;
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override;
@@ -127,6 +128,16 @@ public:
/// Check that LEA can be used on SP in an epilogue sequence for \p MF.
bool canUseLEAForSPInEpilogue(const MachineFunction &MF) const;
+ /// Check whether or not the given \p MBB can be used as a prologue
+ /// for the target.
+ /// The prologue will be inserted first in this basic block.
+ /// This method is used by the shrink-wrapping pass to decide if
+ /// \p MBB will be correctly handled by the target.
+ /// As soon as the target enable shrink-wrapping without overriding
+ /// this method, we assume that each basic block is a valid
+ /// prologue.
+ bool canUseAsPrologue(const MachineBasicBlock &MBB) const override;
+
/// Check whether or not the given \p MBB can be used as a epilogue
/// for the target.
/// The epilogue will be inserted before the first terminator of that block.
@@ -137,6 +148,13 @@ public:
/// Returns true if the target will correctly handle shrink wrapping.
bool enableShrinkWrapping(const MachineFunction &MF) const override;
+ /// Order the symbols in the local stack.
+ /// We want to place the local stack objects in some sort of sensible order.
+ /// The heuristic we use is to try and pack them according to static number
+ /// of uses and size in order to minimize code size.
+ void orderFrameObjects(const MachineFunction &MF,
+ SmallVectorImpl<int> &ObjectsToAllocate) const override;
+
/// convertArgMovsToPushes - This method tries to convert a call sequence
/// that uses sub and mov instructions to put the argument onto the stack
/// into a series of pushes.
@@ -148,14 +166,14 @@ public:
/// Wraps up getting a CFI index and building a MachineInstr for it.
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc DL, MCCFIInstruction CFIInst) const;
+ const DebugLoc &DL, const MCCFIInstruction &CFIInst) const;
/// Sets up EBP and optionally ESI based on the incoming EBP value. Only
/// needed for 32-bit. Used in funclet prologues and at catchret destinations.
MachineBasicBlock::iterator
restoreWin32EHStackPointers(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- bool RestoreSP = false) const;
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &DL, bool RestoreSP = false) const;
private:
uint64_t calculateMaxStackAlign(const MachineFunction &MF) const;
@@ -163,34 +181,35 @@ private:
/// Emit target stack probe as a call to a helper function
MachineInstr *emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool InProlog) const;
+ const DebugLoc &DL, bool InProlog) const;
/// Emit target stack probe as an inline sequence.
MachineInstr *emitStackProbeInline(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool InProlog) const;
+ const DebugLoc &DL, bool InProlog) const;
/// Emit a stub to later inline the target stack probe.
MachineInstr *emitStackProbeInlineStub(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, bool InProlog) const;
+ const DebugLoc &DL,
+ bool InProlog) const;
/// Aligns the stack pointer by ANDing it with -MaxAlign.
void BuildStackAlignAND(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned Reg, uint64_t MaxAlign) const;
/// Make small positive stack adjustments using POPs.
bool adjustStackWithPops(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
int Offset) const;
/// Adjusts the stack pointer using LEA, SUB, or ADD.
MachineInstrBuilder BuildStackAdjustment(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL, int64_t Offset,
+ const DebugLoc &DL, int64_t Offset,
bool InEpilogue) const;
unsigned getPSPSlotOffsetFromSP(const MachineFunction &MF) const;
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 868ae4e19e55..7d53b3db6175 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -157,9 +157,13 @@ namespace {
/// performance.
bool OptForSize;
+ /// If true, selector should try to optimize for minimum code size.
+ bool OptForMinSize;
+
public:
explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(tm, OptLevel), OptForSize(false) {}
+ : SelectionDAGISel(tm, OptLevel), OptForSize(false),
+ OptForMinSize(false) {}
const char *getPassName() const override {
return "X86 DAG->DAG Instruction Selection";
@@ -192,9 +196,8 @@ namespace {
#include "X86GenDAGISel.inc"
private:
- SDNode *Select(SDNode *N) override;
- SDNode *selectGather(SDNode *N, unsigned Opc);
- SDNode *selectAtomicLoadArith(SDNode *Node, MVT NVT);
+ void Select(SDNode *N) override;
+ bool tryGather(SDNode *N, unsigned Opc);
bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
@@ -238,7 +241,7 @@ namespace {
void emitSpecialCodeForMain();
- inline void getAddressOperands(X86ISelAddressMode &AM, SDLoc DL,
+ inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp,
SDValue &Segment) {
@@ -323,7 +326,7 @@ namespace {
// types.
if (User->getNumOperands() != 2)
continue;
-
+
// Immediates that are used for offsets as part of stack
// manipulation should be left alone. These are typically
// used to indicate SP offsets for argument passing and
@@ -357,12 +360,12 @@ namespace {
}
/// Return a target constant with the specified value of type i8.
- inline SDValue getI8Imm(unsigned Imm, SDLoc DL) {
+ inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
}
/// Return a target constant with the specified value, of type i32.
- inline SDValue getI32Imm(unsigned Imm, SDLoc DL) {
+ inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
}
@@ -531,8 +534,10 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
}
void X86DAGToDAGISel::PreprocessISelDAG() {
- // OptForSize is used in pattern predicates that isel is matching.
+ // OptFor[Min]Size are used in pattern predicates that isel is matching.
OptForSize = MF->getFunction()->optForSize();
+ OptForMinSize = MF->getFunction()->optForMinSize();
+ assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {
@@ -545,7 +550,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
(N->getOpcode() == X86ISD::TC_RETURN &&
// Only does this if load can be folded into TC_RETURN.
(Subtarget->is64Bit() ||
- getTargetMachine().getRelocationModel() != Reloc::PIC_)))) {
+ !getTargetMachine().isPositionIndependent())))) {
/// Also try moving call address load from outside callseq_start to just
/// before the call to allow it to be folded.
///
@@ -624,13 +629,11 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
SDLoc dl(N);
// FIXME: optimize the case where the src/dest is a load or store?
- SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
- N->getOperand(0),
- MemTmp, MachinePointerInfo(), MemVT,
- false, false, 0);
+ SDValue Store =
+ CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0),
+ MemTmp, MachinePointerInfo(), MemVT);
SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
- MachinePointerInfo(),
- MemVT, false, false, false, 0);
+ MachinePointerInfo(), MemVT);
// We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
// extload we created. This will cause general havok on the dag because
@@ -657,7 +660,7 @@ void X86DAGToDAGISel::emitSpecialCodeForMain() {
CLI.setChain(CurDAG->getRoot())
.setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
- std::move(Args), 0);
+ std::move(Args));
const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
CurDAG->setRoot(Result.second);
@@ -714,7 +717,7 @@ bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
// For more information see http://people.redhat.com/drepper/tls.pdf
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
- Subtarget->isTargetLinux())
+ Subtarget->isTargetGlibc())
switch (N->getPointerInfo().getAddrSpace()) {
case 256:
AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
@@ -722,6 +725,8 @@ bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
case 257:
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
return false;
+ // Address space 258 is not handled here, because it is not used to
+ // address TLS areas.
}
return true;
@@ -1419,11 +1424,13 @@ bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
return false;
X86ISelAddressMode AM;
unsigned AddrSpace = Mgs->getPointerInfo().getAddrSpace();
- // AddrSpace 256 -> GS, 257 -> FS.
+ // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
if (AddrSpace == 256)
AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
if (AddrSpace == 257)
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
+ if (AddrSpace == 258)
+ AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
SDLoc DL(N);
Base = Mgs->getBasePtr();
@@ -1468,11 +1475,13 @@ bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
unsigned AddrSpace =
cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
- // AddrSpace 256 -> GS, 257 -> FS.
+ // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
if (AddrSpace == 256)
AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
if (AddrSpace == 257)
AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
+ if (AddrSpace == 258)
+ AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
}
if (matchAddress(N, AM))
@@ -1569,10 +1578,12 @@ bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
+ // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
+ SDLoc DL(N);
+
if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
return false;
- SDLoc DL(N);
RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
if (RN && RN->getReg() == 0)
Base = CurDAG->getRegister(0, MVT::i64);
@@ -1612,6 +1623,10 @@ bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
SDValue &Segment) {
X86ISelAddressMode AM;
+ // Save the DL and VT before calling matchAddress, it can invalidate N.
+ SDLoc DL(N);
+ MVT VT = N.getSimpleValueType();
+
// Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
// segments.
SDValue Copy = AM.Segment;
@@ -1622,7 +1637,6 @@ bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
assert (T == AM.Segment);
AM.Segment = Copy;
- MVT VT = N.getSimpleValueType();
unsigned Complexity = 0;
if (AM.BaseType == X86ISelAddressMode::RegBase)
if (AM.Base_Reg.getNode())
@@ -1662,7 +1676,7 @@ bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
if (Complexity <= 2)
return false;
- getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
+ getAddressOperands(AM, DL, Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1713,295 +1727,6 @@ SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
}
-/// Atomic opcode table
-///
-enum AtomicOpc {
- ADD,
- SUB,
- INC,
- DEC,
- OR,
- AND,
- XOR,
- AtomicOpcEnd
-};
-
-enum AtomicSz {
- ConstantI8,
- I8,
- SextConstantI16,
- ConstantI16,
- I16,
- SextConstantI32,
- ConstantI32,
- I32,
- SextConstantI64,
- ConstantI64,
- I64,
- AtomicSzEnd
-};
-
-static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
- {
- X86::LOCK_ADD8mi,
- X86::LOCK_ADD8mr,
- X86::LOCK_ADD16mi8,
- X86::LOCK_ADD16mi,
- X86::LOCK_ADD16mr,
- X86::LOCK_ADD32mi8,
- X86::LOCK_ADD32mi,
- X86::LOCK_ADD32mr,
- X86::LOCK_ADD64mi8,
- X86::LOCK_ADD64mi32,
- X86::LOCK_ADD64mr,
- },
- {
- X86::LOCK_SUB8mi,
- X86::LOCK_SUB8mr,
- X86::LOCK_SUB16mi8,
- X86::LOCK_SUB16mi,
- X86::LOCK_SUB16mr,
- X86::LOCK_SUB32mi8,
- X86::LOCK_SUB32mi,
- X86::LOCK_SUB32mr,
- X86::LOCK_SUB64mi8,
- X86::LOCK_SUB64mi32,
- X86::LOCK_SUB64mr,
- },
- {
- 0,
- X86::LOCK_INC8m,
- 0,
- 0,
- X86::LOCK_INC16m,
- 0,
- 0,
- X86::LOCK_INC32m,
- 0,
- 0,
- X86::LOCK_INC64m,
- },
- {
- 0,
- X86::LOCK_DEC8m,
- 0,
- 0,
- X86::LOCK_DEC16m,
- 0,
- 0,
- X86::LOCK_DEC32m,
- 0,
- 0,
- X86::LOCK_DEC64m,
- },
- {
- X86::LOCK_OR8mi,
- X86::LOCK_OR8mr,
- X86::LOCK_OR16mi8,
- X86::LOCK_OR16mi,
- X86::LOCK_OR16mr,
- X86::LOCK_OR32mi8,
- X86::LOCK_OR32mi,
- X86::LOCK_OR32mr,
- X86::LOCK_OR64mi8,
- X86::LOCK_OR64mi32,
- X86::LOCK_OR64mr,
- },
- {
- X86::LOCK_AND8mi,
- X86::LOCK_AND8mr,
- X86::LOCK_AND16mi8,
- X86::LOCK_AND16mi,
- X86::LOCK_AND16mr,
- X86::LOCK_AND32mi8,
- X86::LOCK_AND32mi,
- X86::LOCK_AND32mr,
- X86::LOCK_AND64mi8,
- X86::LOCK_AND64mi32,
- X86::LOCK_AND64mr,
- },
- {
- X86::LOCK_XOR8mi,
- X86::LOCK_XOR8mr,
- X86::LOCK_XOR16mi8,
- X86::LOCK_XOR16mi,
- X86::LOCK_XOR16mr,
- X86::LOCK_XOR32mi8,
- X86::LOCK_XOR32mi,
- X86::LOCK_XOR32mr,
- X86::LOCK_XOR64mi8,
- X86::LOCK_XOR64mi32,
- X86::LOCK_XOR64mr,
- }
-};
-
-// Return the target constant operand for atomic-load-op and do simple
-// translations, such as from atomic-load-add to lock-sub. The return value is
-// one of the following 3 cases:
-// + target-constant, the operand could be supported as a target constant.
-// + empty, the operand is not needed any more with the new op selected.
-// + non-empty, otherwise.
-static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
- SDLoc dl,
- enum AtomicOpc &Op, MVT NVT,
- SDValue Val,
- const X86Subtarget *Subtarget) {
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
- int64_t CNVal = CN->getSExtValue();
- // Quit if not 32-bit imm.
- if ((int32_t)CNVal != CNVal)
- return Val;
- // Quit if INT32_MIN: it would be negated as it is negative and overflow,
- // producing an immediate that does not fit in the 32 bits available for
- // an immediate operand to sub. However, it still fits in 32 bits for the
- // add (since it is not negated) so we can return target-constant.
- if (CNVal == INT32_MIN)
- return CurDAG->getTargetConstant(CNVal, dl, NVT);
- // For atomic-load-add, we could do some optimizations.
- if (Op == ADD) {
- // Translate to INC/DEC if ADD by 1 or -1.
- if (((CNVal == 1) || (CNVal == -1)) && !Subtarget->slowIncDec()) {
- Op = (CNVal == 1) ? INC : DEC;
- // No more constant operand after being translated into INC/DEC.
- return SDValue();
- }
- // Translate to SUB if ADD by negative value.
- if (CNVal < 0) {
- Op = SUB;
- CNVal = -CNVal;
- }
- }
- return CurDAG->getTargetConstant(CNVal, dl, NVT);
- }
-
- // If the value operand is single-used, try to optimize it.
- if (Op == ADD && Val.hasOneUse()) {
- // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x).
- if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) {
- Op = SUB;
- return Val.getOperand(1);
- }
- // A special case for i16, which needs truncating as, in most cases, it's
- // promoted to i32. We will translate
- // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x))
- if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 &&
- Val.getOperand(0).getOpcode() == ISD::SUB &&
- X86::isZeroNode(Val.getOperand(0).getOperand(0))) {
- Op = SUB;
- Val = Val.getOperand(0);
- return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT,
- Val.getOperand(1));
- }
- }
-
- return Val;
-}
-
-SDNode *X86DAGToDAGISel::selectAtomicLoadArith(SDNode *Node, MVT NVT) {
- if (Node->hasAnyUseOfValue(0))
- return nullptr;
-
- SDLoc dl(Node);
-
- // Optimize common patterns for __sync_or_and_fetch and similar arith
- // operations where the result is not used. This allows us to use the "lock"
- // version of the arithmetic instruction.
- SDValue Chain = Node->getOperand(0);
- SDValue Ptr = Node->getOperand(1);
- SDValue Val = Node->getOperand(2);
- SDValue Base, Scale, Index, Disp, Segment;
- if (!selectAddr(Node, Ptr, Base, Scale, Index, Disp, Segment))
- return nullptr;
-
- // Which index into the table.
- enum AtomicOpc Op;
- switch (Node->getOpcode()) {
- default:
- return nullptr;
- case ISD::ATOMIC_LOAD_OR:
- Op = OR;
- break;
- case ISD::ATOMIC_LOAD_AND:
- Op = AND;
- break;
- case ISD::ATOMIC_LOAD_XOR:
- Op = XOR;
- break;
- case ISD::ATOMIC_LOAD_ADD:
- Op = ADD;
- break;
- }
-
- Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val, Subtarget);
- bool isUnOp = !Val.getNode();
- bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
-
- unsigned Opc = 0;
- switch (NVT.SimpleTy) {
- default: return nullptr;
- case MVT::i8:
- if (isCN)
- Opc = AtomicOpcTbl[Op][ConstantI8];
- else
- Opc = AtomicOpcTbl[Op][I8];
- break;
- case MVT::i16:
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = AtomicOpcTbl[Op][SextConstantI16];
- else
- Opc = AtomicOpcTbl[Op][ConstantI16];
- } else
- Opc = AtomicOpcTbl[Op][I16];
- break;
- case MVT::i32:
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = AtomicOpcTbl[Op][SextConstantI32];
- else
- Opc = AtomicOpcTbl[Op][ConstantI32];
- } else
- Opc = AtomicOpcTbl[Op][I32];
- break;
- case MVT::i64:
- if (isCN) {
- if (immSext8(Val.getNode()))
- Opc = AtomicOpcTbl[Op][SextConstantI64];
- else if (i64immSExt32(Val.getNode()))
- Opc = AtomicOpcTbl[Op][ConstantI64];
- else
- llvm_unreachable("True 64 bits constant in SelectAtomicLoadArith");
- } else
- Opc = AtomicOpcTbl[Op][I64];
- break;
- }
-
- assert(Opc != 0 && "Invalid arith lock transform!");
-
- // Building the new node.
- SDValue Ret;
- if (isUnOp) {
- SDValue Ops[] = { Base, Scale, Index, Disp, Segment, Chain };
- Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
- } else {
- SDValue Ops[] = { Base, Scale, Index, Disp, Segment, Val, Chain };
- Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
- }
-
- // Copying the MachineMemOperand.
- MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
- cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
-
- // We need to have two outputs as that is what the original instruction had.
- // So we add a dummy, undefined output. This is safe as we checked first
- // that no-one uses our output anyway.
- SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, NVT), 0);
- SDValue RetVals[] = { Undef, Ret };
- return CurDAG->getMergeValues(RetVals, dl).getNode();
-}
-
/// Test whether the given X86ISD::CMP node has any uses which require the SF
/// or OF bits to be accurate.
static bool hasNoSignedComparisonUses(SDNode *N) {
@@ -2168,7 +1893,7 @@ static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
}
/// Customized ISel for GATHER operations.
-SDNode *X86DAGToDAGISel::selectGather(SDNode *Node, unsigned Opc) {
+bool X86DAGToDAGISel::tryGather(SDNode *Node, unsigned Opc) {
// Operands of Gather: VSrc, Base, VIdx, VMask, Scale
SDValue Chain = Node->getOperand(0);
SDValue VSrc = Node->getOperand(2);
@@ -2177,7 +1902,7 @@ SDNode *X86DAGToDAGISel::selectGather(SDNode *Node, unsigned Opc) {
SDValue VMask = Node->getOperand(5);
ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
if (!Scale)
- return nullptr;
+ return false;
SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
MVT::Other);
@@ -2196,10 +1921,11 @@ SDNode *X86DAGToDAGISel::selectGather(SDNode *Node, unsigned Opc) {
// of ResNode.
ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
- return ResNode;
+ CurDAG->RemoveDeadNode(Node);
+ return true;
}
-SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
+void X86DAGToDAGISel::Select(SDNode *Node) {
MVT NVT = Node->getSimpleValueType(0);
unsigned Opc, MOpc;
unsigned Opcode = Node->getOpcode();
@@ -2210,7 +1936,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
Node->setNodeId(-1);
- return nullptr; // Already selected.
+ return; // Already selected.
}
switch (Opcode) {
@@ -2229,10 +1955,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
Node->getOperand(0), ZextTarget);
- ReplaceUses(SDValue(Node, 0), Brind);
+ ReplaceNode(Node, Brind.getNode());
SelectCode(ZextTarget.getNode());
SelectCode(Brind.getNode());
- return nullptr;
+ return;
}
break;
}
@@ -2278,17 +2004,16 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break;
case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break;
}
- SDNode *RetVal = selectGather(Node, Opc);
- if (RetVal)
- // We already called ReplaceUses inside SelectGather.
- return nullptr;
+ if (tryGather(Node, Opc))
+ return;
break;
}
}
break;
}
case X86ISD::GlobalBaseReg:
- return getGlobalBaseReg();
+ ReplaceNode(Node, getGlobalBaseReg());
+ return;
case X86ISD::SHRUNKBLEND: {
// SHRUNKBLEND selects like a regular VSELECT.
@@ -2298,18 +2023,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(Node, 0), VSelect);
SelectCode(VSelect.getNode());
// We already called ReplaceUses.
- return nullptr;
+ return;
}
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_ADD: {
- SDNode *RetVal = selectAtomicLoadArith(Node, NVT);
- if (RetVal)
- return RetVal;
- break;
- }
case ISD::AND:
case ISD::OR:
case ISD::XOR: {
@@ -2387,10 +2103,12 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT);
SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
if (ShlVal == 1)
- return CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
- SDValue(New, 0));
- return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
- getI8Imm(ShlVal, dl));
+ CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
+ SDValue(New, 0));
+ else
+ CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
+ getI8Imm(ShlVal, dl));
+ return;
}
case X86ISD::UMUL8:
case X86ISD::SMUL8: {
@@ -2406,9 +2124,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Ops[] = {N1, InFlag};
SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
- ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
- ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
- return nullptr;
+ ReplaceNode(Node, CNode);
+ return;
}
case X86ISD::UMUL: {
@@ -2431,10 +2148,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Ops[] = {N1, InFlag};
SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
- ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
- ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
- ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
- return nullptr;
+ ReplaceNode(Node, CNode);
+ return;
}
case ISD::SMUL_LOHI:
@@ -2506,24 +2221,32 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
if (foldedLoad) {
SDValue Chain;
+ MachineSDNode *CNode = nullptr;
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
InFlag };
if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
- SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
+ CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
ResHi = SDValue(CNode, 0);
ResLo = SDValue(CNode, 1);
Chain = SDValue(CNode, 2);
InFlag = SDValue(CNode, 3);
} else {
SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
- SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
+ CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
Chain = SDValue(CNode, 0);
InFlag = SDValue(CNode, 1);
}
// Update the chain.
ReplaceUses(N1.getValue(1), Chain);
+ // Record the mem-refs
+ LoadSDNode *LoadNode = cast<LoadSDNode>(N1);
+ if (LoadNode) {
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LoadNode->getMemOperand();
+ CNode->setMemRefs(MemOp, MemOp + 1);
+ }
} else {
SDValue Ops[] = { N1, InFlag };
if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
@@ -2583,7 +2306,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
}
- return nullptr;
+ return;
}
case ISD::SDIVREM:
@@ -2767,7 +2490,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
- return nullptr;
+ return;
}
case X86ISD::CMP:
@@ -2825,7 +2548,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return nullptr;
+ return;
}
// For example, "testl %eax, $2048" to "testb %ah, $8".
@@ -2862,7 +2585,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return nullptr;
+ return;
}
// For example, "testl %eax, $32776" to "testw %ax, $32776".
@@ -2885,7 +2608,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return nullptr;
+ return;
}
// For example, "testq %rax, $268468232" to "testl %eax, $268468232".
@@ -2908,7 +2631,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// one, do not call ReplaceAllUsesWith.
ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
SDValue(NewNode, 0));
- return nullptr;
+ return;
}
}
break;
@@ -2959,21 +2682,12 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
-
- return Result;
+ CurDAG->RemoveDeadNode(Node);
+ return;
}
}
- SDNode *ResNode = SelectCode(Node);
-
- DEBUG(dbgs() << "=> ";
- if (ResNode == nullptr || ResNode == Node)
- Node->dump(CurDAG);
- else
- ResNode->dump(CurDAG);
- dbgs() << '\n');
-
- return ResNode;
+ SelectCode(Node);
}
bool X86DAGToDAGISel::
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index dd9966f9e179..e54711195900 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -71,9 +71,10 @@ static cl::opt<bool> ExperimentalVectorWideningLegalization(
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
const X86Subtarget &STI)
- : TargetLowering(TM), Subtarget(&STI) {
- X86ScalarSSEf64 = Subtarget->hasSSE2();
- X86ScalarSSEf32 = Subtarget->hasSSE1();
+ : TargetLowering(TM), Subtarget(STI) {
+ bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
+ X86ScalarSSEf64 = Subtarget.hasSSE2();
+ X86ScalarSSEf32 = Subtarget.hasSSE1();
MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
// Set up the TargetLowering object.
@@ -86,24 +87,24 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// For 64-bit, since we have so many registers, use the ILP scheduler.
// For 32-bit, use the register pressure specific scheduling.
// For Atom, always use ILP scheduling.
- if (Subtarget->isAtom())
+ if (Subtarget.isAtom())
setSchedulingPreference(Sched::ILP);
- else if (Subtarget->is64Bit())
+ else if (Subtarget.is64Bit())
setSchedulingPreference(Sched::ILP);
else
setSchedulingPreference(Sched::RegPressure);
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
// Bypass expensive divides on Atom when compiling with O2.
if (TM.getOptLevel() >= CodeGenOpt::Default) {
- if (Subtarget->hasSlowDivide32())
+ if (Subtarget.hasSlowDivide32())
addBypassSlowDiv(32, 8);
- if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
+ if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
addBypassSlowDiv(64, 16);
}
- if (Subtarget->isTargetKnownWindowsMSVC()) {
+ if (Subtarget.isTargetKnownWindowsMSVC()) {
// Setup Windows compiler runtime calls.
setLibcallName(RTLIB::SDIV_I64, "_alldiv");
setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
@@ -117,11 +118,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
}
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget.isTargetDarwin()) {
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(false);
setUseUnderscoreLongJmp(false);
- } else if (Subtarget->isTargetWindowsGNU()) {
+ } else if (Subtarget.isTargetWindowsGNU()) {
// MS runtime is weird: it exports _setjmp, but longjmp!
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(false);
@@ -134,7 +135,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
addRegisterClass(MVT::i8, &X86::GR8RegClass);
addRegisterClass(MVT::i16, &X86::GR16RegClass);
addRegisterClass(MVT::i32, &X86::GR32RegClass);
- if (Subtarget->is64Bit())
+ if (Subtarget.is64Bit())
addRegisterClass(MVT::i64, &X86::GR64RegClass);
for (MVT VT : MVT::integer_valuetypes())
@@ -164,14 +165,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
- if (Subtarget->is64Bit()) {
- if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512())
+ if (Subtarget.is64Bit()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
// f32/f64 are legal, f80 is custom.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
else
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
- } else if (!Subtarget->useSoftFloat()) {
+ } else if (!Subtarget.useSoftFloat()) {
// We have an algorithm for SSE2->double, and we turn this into a
// 64-bit FILD followed by conditional FADD for other targets.
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
@@ -185,8 +186,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
- if (!Subtarget->useSoftFloat()) {
- // SSE has no i16 to fp conversion, only i32
+ if (!Subtarget.useSoftFloat()) {
+ // SSE has no i16 to fp conversion, only i32.
if (X86ScalarSSEf32) {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
// f32 and f64 cases are Legal, f80 case is not
@@ -205,7 +206,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
- if (!Subtarget->useSoftFloat()) {
+ if (!Subtarget.useSoftFloat()) {
// In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
// are Legal, f80 is custom lowered.
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
@@ -231,8 +232,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
- if (Subtarget->is64Bit()) {
- if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) {
+ if (Subtarget.is64Bit()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
// FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
@@ -240,9 +241,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
}
- } else if (!Subtarget->useSoftFloat()) {
+ } else if (!Subtarget.useSoftFloat()) {
// Since AVX is a superset of SSE3, only check for SSE here.
- if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
+ if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
// Expand FP_TO_UINT into a select.
// FIXME: We would like to use a Custom expander here eventually to do
// the optimal thing for SSE vs. the default expansion in the legalizer.
@@ -260,12 +261,12 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (!X86ScalarSSEf64) {
setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
// Without SSE, i64->f64 goes through memory.
setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
}
- } else if (!Subtarget->is64Bit())
+ } else if (!Subtarget.is64Bit())
setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
// Scalar integer divide and remainder are lowered to use operations that
@@ -295,72 +296,43 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
- setOperationAction(ISD::BR_CC , MVT::f32, Expand);
- setOperationAction(ISD::BR_CC , MVT::f64, Expand);
- setOperationAction(ISD::BR_CC , MVT::f80, Expand);
- setOperationAction(ISD::BR_CC , MVT::f128, Expand);
- setOperationAction(ISD::BR_CC , MVT::i8, Expand);
- setOperationAction(ISD::BR_CC , MVT::i16, Expand);
- setOperationAction(ISD::BR_CC , MVT::i32, Expand);
- setOperationAction(ISD::BR_CC , MVT::i64, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::f128, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
- setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
- if (Subtarget->is64Bit())
+ for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
+ MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
+ setOperationAction(ISD::BR_CC, VT, Expand);
+ setOperationAction(ISD::SELECT_CC, VT, Expand);
+ }
+ if (Subtarget.is64Bit())
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
- if (Subtarget->is32Bit() && Subtarget->isTargetKnownWindowsMSVC()) {
- // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
- // is. We should promote the value to 64-bits to solve this.
- // This is what the CRT headers do - `fmodf` is an inline header
- // function casting to f64 and calling `fmod`.
- setOperationAction(ISD::FREM , MVT::f32 , Promote);
- } else {
- setOperationAction(ISD::FREM , MVT::f32 , Expand);
- }
-
+ setOperationAction(ISD::FREM , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::FREM , MVT::f80 , Expand);
setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
// Promote the i8 variants and force them on up to i32 which has a shorter
// encoding.
- setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
- AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
- AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
- if (Subtarget->hasBMI()) {
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
- if (Subtarget->is64Bit())
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
- } else {
+ setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
+ setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
+ if (!Subtarget.hasBMI()) {
setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
- if (Subtarget->is64Bit())
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
+ if (Subtarget.is64Bit()) {
setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
+ }
}
- if (Subtarget->hasLZCNT()) {
+ if (Subtarget.hasLZCNT()) {
// When promoting the i8 variants, force them to i32 for a shorter
// encoding.
- setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
- AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
- AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
- if (Subtarget->is64Bit())
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
+ setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
} else {
setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
@@ -368,7 +340,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
}
@@ -377,7 +349,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// Special handling for half-precision floating point conversions.
// If we don't have F16C support, then lower half float conversions
// into library calls.
- if (Subtarget->useSoftFloat() || !Subtarget->hasF16C()) {
+ if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
}
@@ -395,45 +367,34 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f80, MVT::f16, Expand);
- if (Subtarget->hasPOPCNT()) {
+ if (Subtarget.hasPOPCNT()) {
setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
} else {
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
- if (Subtarget->is64Bit())
+ if (Subtarget.is64Bit())
setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
}
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
- if (!Subtarget->hasMOVBE())
+ if (!Subtarget.hasMOVBE())
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
// X86 wants to expand cmov itself.
- setOperationAction(ISD::SELECT , MVT::i8 , Custom);
- setOperationAction(ISD::SELECT , MVT::i16 , Custom);
- setOperationAction(ISD::SELECT , MVT::i32 , Custom);
- setOperationAction(ISD::SELECT , MVT::f32 , Custom);
- setOperationAction(ISD::SELECT , MVT::f64 , Custom);
- setOperationAction(ISD::SELECT , MVT::f80 , Custom);
- setOperationAction(ISD::SELECT , MVT::f128 , Custom);
- setOperationAction(ISD::SETCC , MVT::i8 , Custom);
- setOperationAction(ISD::SETCC , MVT::i16 , Custom);
- setOperationAction(ISD::SETCC , MVT::i32 , Custom);
- setOperationAction(ISD::SETCC , MVT::f32 , Custom);
- setOperationAction(ISD::SETCC , MVT::f64 , Custom);
- setOperationAction(ISD::SETCC , MVT::f80 , Custom);
- setOperationAction(ISD::SETCC , MVT::f128 , Custom);
- setOperationAction(ISD::SETCCE , MVT::i8 , Custom);
- setOperationAction(ISD::SETCCE , MVT::i16 , Custom);
- setOperationAction(ISD::SETCCE , MVT::i32 , Custom);
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::SELECT , MVT::i64 , Custom);
- setOperationAction(ISD::SETCC , MVT::i64 , Custom);
- setOperationAction(ISD::SETCCE , MVT::i64 , Custom);
+ for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ }
+ for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
+ if (VT == MVT::i64 && !Subtarget.is64Bit())
+ continue;
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ setOperationAction(ISD::SETCCE, VT, Custom);
}
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
// NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
@@ -444,34 +405,31 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// LLVM/Clang supports zero-cost DWARF exception handling.
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
+ setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
+ if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
+ setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
// Darwin ABI issue.
- setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
- setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
- setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
- setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
- if (Subtarget->is64Bit())
- setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
- setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
- setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
- setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
- setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
- setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
- setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
+ for (auto VT : { MVT::i32, MVT::i64 }) {
+ if (VT == MVT::i64 && !Subtarget.is64Bit())
+ continue;
+ setOperationAction(ISD::ConstantPool , VT, Custom);
+ setOperationAction(ISD::JumpTable , VT, Custom);
+ setOperationAction(ISD::GlobalAddress , VT, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
+ setOperationAction(ISD::ExternalSymbol , VT, Custom);
+ setOperationAction(ISD::BlockAddress , VT, Custom);
}
// 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
- setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
- setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
- setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
- setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
- setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
+ for (auto VT : { MVT::i32, MVT::i64 }) {
+ if (VT == MVT::i64 && !Subtarget.is64Bit())
+ continue;
+ setOperationAction(ISD::SHL_PARTS, VT, Custom);
+ setOperationAction(ISD::SRA_PARTS, VT, Custom);
+ setOperationAction(ISD::SRL_PARTS, VT, Custom);
}
- if (Subtarget->hasSSE1())
+ if (Subtarget.hasSSE1())
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
@@ -480,16 +438,21 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
}
- if (Subtarget->hasCmpxchg16b()) {
+ if (Subtarget.hasCmpxchg16b()) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
}
// FIXME - use subtarget debug flags
- if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
- !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
+ if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
+ !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
+ TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
}
@@ -505,14 +468,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::VAARG , MVT::Other, Custom);
- setOperationAction(ISD::VACOPY , MVT::Other, Custom);
- } else {
- // TargetInfo::CharPtrBuiltinVaList
- setOperationAction(ISD::VAARG , MVT::Other, Expand);
- setOperationAction(ISD::VACOPY , MVT::Other, Expand);
- }
+ bool Is64Bit = Subtarget.is64Bit();
+ setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
@@ -523,41 +481,37 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
- if (!Subtarget->useSoftFloat() && X86ScalarSSEf64) {
+ if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
addRegisterClass(MVT::f32, &X86::FR32RegClass);
addRegisterClass(MVT::f64, &X86::FR64RegClass);
- // Use ANDPD to simulate FABS.
- setOperationAction(ISD::FABS , MVT::f64, Custom);
- setOperationAction(ISD::FABS , MVT::f32, Custom);
+ for (auto VT : { MVT::f32, MVT::f64 }) {
+ // Use ANDPD to simulate FABS.
+ setOperationAction(ISD::FABS, VT, Custom);
- // Use XORP to simulate FNEG.
- setOperationAction(ISD::FNEG , MVT::f64, Custom);
- setOperationAction(ISD::FNEG , MVT::f32, Custom);
+ // Use XORP to simulate FNEG.
+ setOperationAction(ISD::FNEG, VT, Custom);
- // Use ANDPD and ORPD to simulate FCOPYSIGN.
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+ // Use ANDPD and ORPD to simulate FCOPYSIGN.
+ setOperationAction(ISD::FCOPYSIGN, VT, Custom);
+
+ // We don't support sin/cos/fmod
+ setOperationAction(ISD::FSIN , VT, Expand);
+ setOperationAction(ISD::FCOS , VT, Expand);
+ setOperationAction(ISD::FSINCOS, VT, Expand);
+ }
- // Lower this to FGETSIGNx86 plus an AND.
+ // Lower this to MOVMSK plus an AND.
setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
- // We don't support sin/cos/fmod
- setOperationAction(ISD::FSIN , MVT::f64, Expand);
- setOperationAction(ISD::FCOS , MVT::f64, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
- setOperationAction(ISD::FSIN , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f32, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
-
// Expand FP immediates into loads from the stack, except for the special
// cases we handle.
addLegalFPImmediate(APFloat(+0.0)); // xorpd
addLegalFPImmediate(APFloat(+0.0f)); // xorps
- } else if (!Subtarget->useSoftFloat() && X86ScalarSSEf32) {
+ } else if (UseX87 && X86ScalarSSEf32) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
addRegisterClass(MVT::f32, &X86::FR32RegClass);
@@ -592,24 +546,21 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
}
- } else if (!Subtarget->useSoftFloat()) {
+ } else if (UseX87) {
// f32 and f64 in x87.
// Set up the FP register classes.
addRegisterClass(MVT::f64, &X86::RFP64RegClass);
addRegisterClass(MVT::f32, &X86::RFP32RegClass);
- setOperationAction(ISD::UNDEF, MVT::f64, Expand);
- setOperationAction(ISD::UNDEF, MVT::f32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ for (auto VT : { MVT::f32, MVT::f64 }) {
+ setOperationAction(ISD::UNDEF, VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, VT, Expand);
- if (!TM.Options.UnsafeFPMath) {
- setOperationAction(ISD::FSIN , MVT::f64, Expand);
- setOperationAction(ISD::FSIN , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f64, Expand);
- setOperationAction(ISD::FCOS , MVT::f32, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
- setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
+ if (!TM.Options.UnsafeFPMath) {
+ setOperationAction(ISD::FSIN , VT, Expand);
+ setOperationAction(ISD::FCOS , VT, Expand);
+ setOperationAction(ISD::FSINCOS, VT, Expand);
+ }
}
addLegalFPImmediate(APFloat(+0.0)); // FLD0
addLegalFPImmediate(APFloat(+1.0)); // FLD1
@@ -626,8 +577,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMA, MVT::f32, Expand);
// Long double always uses X87, except f128 in MMX.
- if (!Subtarget->useSoftFloat()) {
- if (Subtarget->is64Bit() && Subtarget->hasMMX()) {
+ if (UseX87) {
+ if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
addRegisterClass(MVT::f128, &X86::FR128RegClass);
ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
setOperationAction(ISD::FABS , MVT::f128, Custom);
@@ -680,38 +631,36 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
+ // Some FP actions are always expanded for vector types.
+ for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
+ MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
+ setOperationAction(ISD::FSIN, VT, Expand);
+ setOperationAction(ISD::FSINCOS, VT, Expand);
+ setOperationAction(ISD::FCOS, VT, Expand);
+ setOperationAction(ISD::FREM, VT, Expand);
+ setOperationAction(ISD::FPOWI, VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, VT, Expand);
+ setOperationAction(ISD::FPOW, VT, Expand);
+ setOperationAction(ISD::FLOG, VT, Expand);
+ setOperationAction(ISD::FLOG2, VT, Expand);
+ setOperationAction(ISD::FLOG10, VT, Expand);
+ setOperationAction(ISD::FEXP, VT, Expand);
+ setOperationAction(ISD::FEXP2, VT, Expand);
+ }
+
// First set operation action for all vector types to either promote
// (for widening) or expand (for scalarization). Then we will selectively
// turn on ones that can be effectively codegen'd.
for (MVT VT : MVT::vector_valuetypes()) {
- setOperationAction(ISD::ADD , VT, Expand);
- setOperationAction(ISD::SUB , VT, Expand);
- setOperationAction(ISD::FADD, VT, Expand);
- setOperationAction(ISD::FNEG, VT, Expand);
- setOperationAction(ISD::FSUB, VT, Expand);
- setOperationAction(ISD::MUL , VT, Expand);
- setOperationAction(ISD::FMUL, VT, Expand);
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
- setOperationAction(ISD::FDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
- setOperationAction(ISD::LOAD, VT, Expand);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
- setOperationAction(ISD::FABS, VT, Expand);
- setOperationAction(ISD::FSIN, VT, Expand);
- setOperationAction(ISD::FSINCOS, VT, Expand);
- setOperationAction(ISD::FCOS, VT, Expand);
- setOperationAction(ISD::FSINCOS, VT, Expand);
- setOperationAction(ISD::FREM, VT, Expand);
setOperationAction(ISD::FMA, VT, Expand);
- setOperationAction(ISD::FPOWI, VT, Expand);
- setOperationAction(ISD::FSQRT, VT, Expand);
- setOperationAction(ISD::FCOPYSIGN, VT, Expand);
setOperationAction(ISD::FFLOOR, VT, Expand);
setOperationAction(ISD::FCEIL, VT, Expand);
setOperationAction(ISD::FTRUNC, VT, Expand);
@@ -723,24 +672,13 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Expand);
setOperationAction(ISD::UDIVREM, VT, Expand);
- setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::CTPOP, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
- setOperationAction(ISD::SHL, VT, Expand);
- setOperationAction(ISD::SRA, VT, Expand);
- setOperationAction(ISD::SRL, VT, Expand);
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::SETCC, VT, Expand);
- setOperationAction(ISD::FLOG, VT, Expand);
- setOperationAction(ISD::FLOG2, VT, Expand);
- setOperationAction(ISD::FLOG10, VT, Expand);
- setOperationAction(ISD::FEXP, VT, Expand);
- setOperationAction(ISD::FEXP2, VT, Expand);
setOperationAction(ISD::FP_TO_UINT, VT, Expand);
setOperationAction(ISD::FP_TO_SINT, VT, Expand);
setOperationAction(ISD::UINT_TO_FP, VT, Expand);
@@ -750,7 +688,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
setOperationAction(ISD::ANY_EXTEND, VT, Expand);
- setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
for (MVT InnerVT : MVT::vector_valuetypes()) {
setTruncStoreAction(InnerVT, VT, Expand);
@@ -774,35 +711,16 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
- if (!Subtarget->useSoftFloat() && Subtarget->hasMMX()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
// No operations on x86mmx supported, everything uses intrinsics.
}
- // MMX-sized vectors (other than x86mmx) are expected to be expanded
- // into smaller operations.
- for (MVT MMXTy : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64}) {
- setOperationAction(ISD::MULHS, MMXTy, Expand);
- setOperationAction(ISD::AND, MMXTy, Expand);
- setOperationAction(ISD::OR, MMXTy, Expand);
- setOperationAction(ISD::XOR, MMXTy, Expand);
- setOperationAction(ISD::SCALAR_TO_VECTOR, MMXTy, Expand);
- setOperationAction(ISD::SELECT, MMXTy, Expand);
- setOperationAction(ISD::BITCAST, MMXTy, Expand);
- }
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
-
- if (!Subtarget->useSoftFloat() && Subtarget->hasSSE1()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
- setOperationAction(ISD::FADD, MVT::v4f32, Legal);
- setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
- setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
- setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
- setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
setOperationAction(ISD::FABS, MVT::v4f32, Custom);
- setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
@@ -811,7 +729,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
}
- if (!Subtarget->useSoftFloat() && Subtarget->hasSSE2()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
// FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
@@ -821,27 +739,16 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
- setOperationAction(ISD::ADD, MVT::v16i8, Legal);
- setOperationAction(ISD::ADD, MVT::v8i16, Legal);
- setOperationAction(ISD::ADD, MVT::v4i32, Legal);
- setOperationAction(ISD::ADD, MVT::v2i64, Legal);
setOperationAction(ISD::MUL, MVT::v16i8, Custom);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v2i64, Custom);
setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
+ setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
+ setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
- setOperationAction(ISD::SUB, MVT::v16i8, Legal);
- setOperationAction(ISD::SUB, MVT::v8i16, Legal);
- setOperationAction(ISD::SUB, MVT::v4i32, Legal);
- setOperationAction(ISD::SUB, MVT::v2i64, Legal);
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
- setOperationAction(ISD::FADD, MVT::v2f64, Legal);
- setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
- setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
- setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
- setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
setOperationAction(ISD::FABS, MVT::v2f64, Custom);
@@ -870,10 +777,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
// ISD::CTTZ v2i64 - scalarization is faster.
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
- // ISD::CTTZ_ZERO_UNDEF v2i64 - scalarization is faster.
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
@@ -899,37 +802,28 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
}
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
- setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
- setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
+ for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Custom);
+
+ if (VT == MVT::v2i64 && !Subtarget.is64Bit())
+ continue;
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
- setOperationAction(ISD::AND, VT, Promote);
- AddPromotedToType (ISD::AND, VT, MVT::v2i64);
- setOperationAction(ISD::OR, VT, Promote);
- AddPromotedToType (ISD::OR, VT, MVT::v2i64);
- setOperationAction(ISD::XOR, VT, Promote);
- AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
- setOperationAction(ISD::LOAD, VT, Promote);
- AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
- setOperationAction(ISD::SELECT, VT, Promote);
- AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::AND, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::OR, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64);
+ setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64);
}
// Custom lower v2i64 and v2f64 selects.
- setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
- setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
@@ -942,7 +836,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
// As there is no 64-bit GPR available, we need build a special custom
// sequence to convert from v2i32 to v2f32.
- if (!Subtarget->is64Bit())
+ if (!Subtarget.is64Bit())
setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
@@ -954,9 +848,35 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
+
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
+
+ for (auto VT : { MVT::v8i16, MVT::v16i8 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ }
+
+ // In the customized shift lowering, the legal cases in AVX2 will be
+ // recognized.
+ for (auto VT : { MVT::v4i32, MVT::v2i64 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ }
+ }
+
+ if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
+ setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
+ setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
+ // ISD::CTLZ v4i32 - scalarization is faster.
+ // ISD::CTLZ v2i64 - scalarization is faster.
}
- if (!Subtarget->useSoftFloat() && Subtarget->hasSSE41()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
setOperationAction(ISD::FCEIL, RoundedTy, Legal);
@@ -1004,66 +924,28 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
- // i8 and i16 vectors are custom because the source register and source
- // source memory operand types are not the same width. f32 vectors are
- // custom since the immediate controlling the insert encodes additional
- // information.
+ // i8 vectors are custom because the source register and source
+ // source memory operand types are not the same width.
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
-
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
-
- // FIXME: these should be Legal, but that's only for the case where
- // the index is constant. For now custom expand to deal with that.
- if (Subtarget->is64Bit()) {
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
- }
}
- if (Subtarget->hasSSE2()) {
- setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
- setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
- setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
-
- setOperationAction(ISD::SRL, MVT::v8i16, Custom);
- setOperationAction(ISD::SRL, MVT::v16i8, Custom);
+ if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
+ for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
+ MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
+ setOperationAction(ISD::ROTL, VT, Custom);
- setOperationAction(ISD::SHL, MVT::v8i16, Custom);
- setOperationAction(ISD::SHL, MVT::v16i8, Custom);
-
- setOperationAction(ISD::SRA, MVT::v8i16, Custom);
- setOperationAction(ISD::SRA, MVT::v16i8, Custom);
-
- // In the customized shift lowering, the legal cases in AVX2 will be
- // recognized.
- setOperationAction(ISD::SRL, MVT::v2i64, Custom);
- setOperationAction(ISD::SRL, MVT::v4i32, Custom);
+ // XOP can efficiently perform BITREVERSE with VPPERM.
+ for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
+ setOperationAction(ISD::BITREVERSE, VT, Custom);
- setOperationAction(ISD::SHL, MVT::v2i64, Custom);
- setOperationAction(ISD::SHL, MVT::v4i32, Custom);
-
- setOperationAction(ISD::SRA, MVT::v2i64, Custom);
- setOperationAction(ISD::SRA, MVT::v4i32, Custom);
+ for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
+ MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
+ setOperationAction(ISD::BITREVERSE, VT, Custom);
}
- if (Subtarget->hasXOP()) {
- setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
- setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
- setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
- setOperationAction(ISD::ROTL, MVT::v2i64, Custom);
- setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
- setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
- setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
- setOperationAction(ISD::ROTL, MVT::v4i64, Custom);
- }
+ if (!Subtarget.useSoftFloat() && Subtarget.hasFp256()) {
+ bool HasInt256 = Subtarget.hasInt256();
- if (!Subtarget->useSoftFloat() && Subtarget->hasFp256()) {
addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
@@ -1071,35 +953,15 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
- setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
- setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
- setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
-
- setOperationAction(ISD::FADD, MVT::v8f32, Legal);
- setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
- setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
- setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
- setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
- setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
- setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
- setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
- setOperationAction(ISD::FABS, MVT::v8f32, Custom);
-
- setOperationAction(ISD::FADD, MVT::v4f64, Legal);
- setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
- setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
- setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
- setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
- setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
- setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
- setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
- setOperationAction(ISD::FABS, MVT::v4f64, Custom);
+ for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FRINT, VT, Legal);
+ setOperationAction(ISD::FNEARBYINT, VT, Legal);
+ setOperationAction(ISD::FNEG, VT, Custom);
+ setOperationAction(ISD::FABS, VT, Custom);
+ }
// (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
// even though v8i16 is a legal type.
@@ -1117,14 +979,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
- setOperationAction(ISD::SRL, MVT::v16i16, Custom);
- setOperationAction(ISD::SRL, MVT::v32i8, Custom);
-
- setOperationAction(ISD::SHL, MVT::v16i16, Custom);
- setOperationAction(ISD::SHL, MVT::v32i8, Custom);
-
- setOperationAction(ISD::SRA, MVT::v16i16, Custom);
- setOperationAction(ISD::SRA, MVT::v32i8, Custom);
+ for (auto VT : { MVT::v32i8, MVT::v16i16 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ }
setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
@@ -1147,63 +1006,57 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
+ setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
+
+ for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
+ setOperationAction(ISD::CTPOP, VT, Custom);
+ setOperationAction(ISD::CTTZ, VT, Custom);
+ }
+
+ // ISD::CTLZ v8i32/v4i64 - scalarization is faster without AVX2
+ // as we end up splitting the 256-bit vectors.
+ for (auto VT : { MVT::v32i8, MVT::v16i16 })
+ setOperationAction(ISD::CTLZ, VT, Custom);
+
+ if (HasInt256)
+ for (auto VT : { MVT::v8i32, MVT::v4i64 })
+ setOperationAction(ISD::CTLZ, VT, Custom);
- setOperationAction(ISD::CTPOP, MVT::v32i8, Custom);
- setOperationAction(ISD::CTPOP, MVT::v16i16, Custom);
- setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
- setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
-
- setOperationAction(ISD::CTTZ, MVT::v32i8, Custom);
- setOperationAction(ISD::CTTZ, MVT::v16i16, Custom);
- setOperationAction(ISD::CTTZ, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ, MVT::v4i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v32i8, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i16, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
-
- if (Subtarget->hasAnyFMA()) {
- setOperationAction(ISD::FMA, MVT::v8f32, Legal);
- setOperationAction(ISD::FMA, MVT::v4f64, Legal);
- setOperationAction(ISD::FMA, MVT::v4f32, Legal);
- setOperationAction(ISD::FMA, MVT::v2f64, Legal);
- setOperationAction(ISD::FMA, MVT::f32, Legal);
- setOperationAction(ISD::FMA, MVT::f64, Legal);
- }
-
- if (Subtarget->hasInt256()) {
- setOperationAction(ISD::ADD, MVT::v4i64, Legal);
- setOperationAction(ISD::ADD, MVT::v8i32, Legal);
- setOperationAction(ISD::ADD, MVT::v16i16, Legal);
- setOperationAction(ISD::ADD, MVT::v32i8, Legal);
-
- setOperationAction(ISD::SUB, MVT::v4i64, Legal);
- setOperationAction(ISD::SUB, MVT::v8i32, Legal);
- setOperationAction(ISD::SUB, MVT::v16i16, Legal);
- setOperationAction(ISD::SUB, MVT::v32i8, Legal);
-
- setOperationAction(ISD::MUL, MVT::v4i64, Custom);
- setOperationAction(ISD::MUL, MVT::v8i32, Legal);
- setOperationAction(ISD::MUL, MVT::v16i16, Legal);
- setOperationAction(ISD::MUL, MVT::v32i8, Custom);
-
- setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
- setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
- setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
- setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
-
- setOperationAction(ISD::SMAX, MVT::v32i8, Legal);
- setOperationAction(ISD::SMAX, MVT::v16i16, Legal);
- setOperationAction(ISD::SMAX, MVT::v8i32, Legal);
- setOperationAction(ISD::UMAX, MVT::v32i8, Legal);
- setOperationAction(ISD::UMAX, MVT::v16i16, Legal);
- setOperationAction(ISD::UMAX, MVT::v8i32, Legal);
- setOperationAction(ISD::SMIN, MVT::v32i8, Legal);
- setOperationAction(ISD::SMIN, MVT::v16i16, Legal);
- setOperationAction(ISD::SMIN, MVT::v8i32, Legal);
- setOperationAction(ISD::UMIN, MVT::v32i8, Legal);
- setOperationAction(ISD::UMIN, MVT::v16i16, Legal);
- setOperationAction(ISD::UMIN, MVT::v8i32, Legal);
+ if (Subtarget.hasAnyFMA()) {
+ for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
+ MVT::v2f64, MVT::v4f64 })
+ setOperationAction(ISD::FMA, VT, Legal);
+ }
+
+ for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
+ setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
+ }
+
+ setOperationAction(ISD::MUL, MVT::v4i64, Custom);
+ setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::MUL, MVT::v32i8, Custom);
+
+ setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
+ setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
+
+ setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
+ setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
+
+ for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
+ setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
+ }
+
+ if (HasInt256) {
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i32, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v16i16, Custom);
// The custom lowering for UINT_TO_FP for v8i32 becomes interesting
// when we have a 256bit-wide blend with immediate.
@@ -1223,62 +1076,32 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
- } else {
- setOperationAction(ISD::ADD, MVT::v4i64, Custom);
- setOperationAction(ISD::ADD, MVT::v8i32, Custom);
- setOperationAction(ISD::ADD, MVT::v16i16, Custom);
- setOperationAction(ISD::ADD, MVT::v32i8, Custom);
-
- setOperationAction(ISD::SUB, MVT::v4i64, Custom);
- setOperationAction(ISD::SUB, MVT::v8i32, Custom);
- setOperationAction(ISD::SUB, MVT::v16i16, Custom);
- setOperationAction(ISD::SUB, MVT::v32i8, Custom);
-
- setOperationAction(ISD::MUL, MVT::v4i64, Custom);
- setOperationAction(ISD::MUL, MVT::v8i32, Custom);
- setOperationAction(ISD::MUL, MVT::v16i16, Custom);
- setOperationAction(ISD::MUL, MVT::v32i8, Custom);
-
- setOperationAction(ISD::SMAX, MVT::v32i8, Custom);
- setOperationAction(ISD::SMAX, MVT::v16i16, Custom);
- setOperationAction(ISD::SMAX, MVT::v8i32, Custom);
- setOperationAction(ISD::UMAX, MVT::v32i8, Custom);
- setOperationAction(ISD::UMAX, MVT::v16i16, Custom);
- setOperationAction(ISD::UMAX, MVT::v8i32, Custom);
- setOperationAction(ISD::SMIN, MVT::v32i8, Custom);
- setOperationAction(ISD::SMIN, MVT::v16i16, Custom);
- setOperationAction(ISD::SMIN, MVT::v8i32, Custom);
- setOperationAction(ISD::UMIN, MVT::v32i8, Custom);
- setOperationAction(ISD::UMIN, MVT::v16i16, Custom);
- setOperationAction(ISD::UMIN, MVT::v8i32, Custom);
}
// In the customized shift lowering, the legal cases in AVX2 will be
// recognized.
- setOperationAction(ISD::SRL, MVT::v4i64, Custom);
- setOperationAction(ISD::SRL, MVT::v8i32, Custom);
+ for (auto VT : { MVT::v8i32, MVT::v4i64 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ }
- setOperationAction(ISD::SHL, MVT::v4i64, Custom);
- setOperationAction(ISD::SHL, MVT::v8i32, Custom);
+ for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
+ MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
+ setOperationAction(ISD::MLOAD, VT, Legal);
+ setOperationAction(ISD::MSTORE, VT, Legal);
+ }
- setOperationAction(ISD::SRA, MVT::v4i64, Custom);
- setOperationAction(ISD::SRA, MVT::v8i32, Custom);
+ // Extract subvector is special because the value type
+ // (result) is 128-bit but the source is 256-bit wide.
+ for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
+ MVT::v4f32, MVT::v2f64 }) {
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
+ }
// Custom lower several nodes for 256-bit types.
- for (MVT VT : MVT::vector_valuetypes()) {
- if (VT.getScalarSizeInBits() >= 32) {
- setOperationAction(ISD::MLOAD, VT, Legal);
- setOperationAction(ISD::MSTORE, VT, Legal);
- }
- // Extract subvector is special because the value type
- // (result) is 128-bit but the source is 256-bit wide.
- if (VT.is128BitVector()) {
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
- }
- // Do not attempt to custom lower other non-256-bit vectors
- if (!VT.is256BitVector())
- continue;
-
+ for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
+ MVT::v8f32, MVT::v4f64 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
@@ -1289,25 +1112,20 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
}
- if (Subtarget->hasInt256())
+ if (HasInt256)
setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
// Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
- setOperationAction(ISD::AND, VT, Promote);
- AddPromotedToType (ISD::AND, VT, MVT::v4i64);
- setOperationAction(ISD::OR, VT, Promote);
- AddPromotedToType (ISD::OR, VT, MVT::v4i64);
- setOperationAction(ISD::XOR, VT, Promote);
- AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
- setOperationAction(ISD::LOAD, VT, Promote);
- AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
- setOperationAction(ISD::SELECT, VT, Promote);
- AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::AND, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::OR, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64);
+ setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64);
}
}
- if (!Subtarget->useSoftFloat() && Subtarget->hasAVX512()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
@@ -1320,19 +1138,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i32, MVT::v16i8, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v16i32, MVT::v16i8, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i32, MVT::v16i16, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v16i32, MVT::v16i16, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v32i16, MVT::v32i8, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v32i16, MVT::v32i8, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i8, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i8, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i16, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i16, Legal);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i32, Legal);
- setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i32, Legal);
-
+ for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
+ setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
+ setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
+ setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
+ setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
+ setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
+ setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
+ }
setOperationAction(ISD::BR_CC, MVT::i1, Expand);
setOperationAction(ISD::SETCC, MVT::i1, Custom);
setOperationAction(ISD::SETCCE, MVT::i1, Custom);
@@ -1343,29 +1156,22 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SUB, MVT::i1, Custom);
setOperationAction(ISD::ADD, MVT::i1, Custom);
setOperationAction(ISD::MUL, MVT::i1, Custom);
- setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
- setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
- setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
- setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
- setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
-
- setOperationAction(ISD::FADD, MVT::v16f32, Legal);
- setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
- setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
- setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
- setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
- setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
- setOperationAction(ISD::FABS, MVT::v16f32, Custom);
-
- setOperationAction(ISD::FADD, MVT::v8f64, Legal);
- setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
- setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
- setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
- setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
- setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
- setOperationAction(ISD::FABS, MVT::v8f64, Custom);
- setOperationAction(ISD::FMA, MVT::v8f64, Legal);
- setOperationAction(ISD::FMA, MVT::v16f32, Legal);
+
+ for (MVT VT : {MVT::v2i64, MVT::v4i32, MVT::v8i32, MVT::v4i64, MVT::v8i16,
+ MVT::v16i8, MVT::v16i16, MVT::v32i8, MVT::v16i32,
+ MVT::v8i64, MVT::v32i16, MVT::v64i8}) {
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
+ setLoadExtAction(ISD::SEXTLOAD, VT, MaskVT, Custom);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MaskVT, Custom);
+ setLoadExtAction(ISD::EXTLOAD, VT, MaskVT, Custom);
+ setTruncStoreAction(VT, MaskVT, Custom);
+ }
+
+ for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
+ setOperationAction(ISD::FNEG, VT, Custom);
+ setOperationAction(ISD::FABS, VT, Custom);
+ setOperationAction(ISD::FMA, VT, Legal);
+ }
setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
@@ -1389,7 +1195,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
- if (Subtarget->hasVLX()){
+ if (Subtarget.hasVLX()){
setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
@@ -1412,15 +1218,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i1, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i1, Custom);
- if (Subtarget->hasDQI()) {
- setOperationAction(ISD::TRUNCATE, MVT::v2i1, Custom);
- setOperationAction(ISD::TRUNCATE, MVT::v4i1, Custom);
-
+ setOperationAction(ISD::VSELECT, MVT::v8i1, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v16i1, Expand);
+ if (Subtarget.hasDQI()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
- if (Subtarget->hasVLX()) {
+ if (Subtarget.hasVLX()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Legal);
@@ -1431,7 +1236,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
}
}
- if (Subtarget->hasVLX()) {
+ if (Subtarget.hasVLX()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
@@ -1440,7 +1245,22 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Custom);
+ setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Custom);
+
+ // FIXME. This commands are available on SSE/AVX2, add relevant patterns.
+ setLoadExtAction(ISD::EXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
}
+
setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
@@ -1453,20 +1273,17 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
- if (Subtarget->hasDQI()) {
+ if (Subtarget.hasDQI()) {
setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Custom);
}
- setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
- setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
- setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
- setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
- setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
- setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
- setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
- setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
+ for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FRINT, VT, Legal);
+ setOperationAction(ISD::FNEARBYINT, VT, Legal);
+ }
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
@@ -1501,139 +1318,115 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UMIN, MVT::v16i32, Legal);
setOperationAction(ISD::UMIN, MVT::v8i64, Legal);
- setOperationAction(ISD::ADD, MVT::v8i64, Legal);
- setOperationAction(ISD::ADD, MVT::v16i32, Legal);
-
- setOperationAction(ISD::SUB, MVT::v8i64, Legal);
- setOperationAction(ISD::SUB, MVT::v16i32, Legal);
+ setOperationAction(ISD::ADD, MVT::v8i1, Expand);
+ setOperationAction(ISD::ADD, MVT::v16i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v8i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v16i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v8i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v16i1, Expand);
setOperationAction(ISD::MUL, MVT::v16i32, Legal);
- setOperationAction(ISD::SRL, MVT::v8i64, Custom);
- setOperationAction(ISD::SRL, MVT::v16i32, Custom);
-
- setOperationAction(ISD::SHL, MVT::v8i64, Custom);
- setOperationAction(ISD::SHL, MVT::v16i32, Custom);
-
- setOperationAction(ISD::SRA, MVT::v8i64, Custom);
- setOperationAction(ISD::SRA, MVT::v16i32, Custom);
-
- setOperationAction(ISD::AND, MVT::v8i64, Legal);
- setOperationAction(ISD::OR, MVT::v8i64, Legal);
- setOperationAction(ISD::XOR, MVT::v8i64, Legal);
- setOperationAction(ISD::AND, MVT::v16i32, Legal);
- setOperationAction(ISD::OR, MVT::v16i32, Legal);
- setOperationAction(ISD::XOR, MVT::v16i32, Legal);
+ for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ setOperationAction(ISD::AND, VT, Legal);
+ setOperationAction(ISD::OR, VT, Legal);
+ setOperationAction(ISD::XOR, VT, Legal);
+ setOperationAction(ISD::CTPOP, VT, Custom);
+ setOperationAction(ISD::CTTZ, VT, Custom);
+ }
- if (Subtarget->hasCDI()) {
+ if (Subtarget.hasCDI()) {
setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i32, Expand);
setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
setOperationAction(ISD::CTLZ, MVT::v16i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i16, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i8, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v16i16, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i8, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom);
- if (Subtarget->hasVLX()) {
+ if (Subtarget.hasVLX()) {
setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Expand);
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
} else {
setOperationAction(ISD::CTLZ, MVT::v4i64, Custom);
setOperationAction(ISD::CTLZ, MVT::v8i32, Custom);
setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v8i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v2i64, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v4i32, Expand);
}
- } // Subtarget->hasCDI()
- if (Subtarget->hasDQI()) {
- setOperationAction(ISD::MUL, MVT::v2i64, Legal);
- setOperationAction(ISD::MUL, MVT::v4i64, Legal);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
+ } // Subtarget.hasCDI()
+
+ if (Subtarget.hasDQI()) {
+ if (Subtarget.hasVLX()) {
+ setOperationAction(ISD::MUL, MVT::v2i64, Legal);
+ setOperationAction(ISD::MUL, MVT::v4i64, Legal);
+ }
setOperationAction(ISD::MUL, MVT::v8i64, Legal);
}
// Custom lower several nodes.
- for (MVT VT : MVT::vector_valuetypes()) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
- if (EltSize == 1) {
- setOperationAction(ISD::AND, VT, Legal);
- setOperationAction(ISD::OR, VT, Legal);
- setOperationAction(ISD::XOR, VT, Legal);
- }
- if ((VT.is128BitVector() || VT.is256BitVector()) && EltSize >= 32) {
- setOperationAction(ISD::MGATHER, VT, Custom);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- }
- // Extract subvector is special because the value type
- // (result) is 256/128-bit but the source is 512-bit wide.
- if (VT.is128BitVector() || VT.is256BitVector()) {
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
- }
- if (VT.getVectorElementType() == MVT::i1)
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
-
- // Do not attempt to custom lower other non-512-bit vectors
- if (!VT.is512BitVector())
- continue;
-
- if (EltSize >= 32) {
- setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
- setOperationAction(ISD::VSELECT, VT, Legal);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
- setOperationAction(ISD::MLOAD, VT, Legal);
- setOperationAction(ISD::MSTORE, VT, Legal);
- setOperationAction(ISD::MGATHER, VT, Legal);
- setOperationAction(ISD::MSCATTER, VT, Custom);
- }
+ for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
+ MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
+ setOperationAction(ISD::MGATHER, VT, Custom);
+ setOperationAction(ISD::MSCATTER, VT, Custom);
+ }
+ // Extract subvector is special because the value type
+ // (result) is 256-bit but the source is 512-bit wide.
+ // 128-bit was made Custom under AVX1.
+ for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
+ MVT::v8f32, MVT::v4f64 })
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
+ for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1,
+ MVT::v16i1, MVT::v32i1, MVT::v64i1 })
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
+
+ for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
+ setOperationAction(ISD::MLOAD, VT, Legal);
+ setOperationAction(ISD::MSTORE, VT, Legal);
+ setOperationAction(ISD::MGATHER, VT, Legal);
+ setOperationAction(ISD::MSCATTER, VT, Custom);
}
for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
- setOperationAction(ISD::SELECT, VT, Promote);
- AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
+ setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64);
}
}// has AVX-512
- if (!Subtarget->useSoftFloat() && Subtarget->hasBWI()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
- setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
- setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
+ setOperationAction(ISD::ADD, MVT::v32i1, Expand);
+ setOperationAction(ISD::ADD, MVT::v64i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v32i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v64i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v32i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v64i1, Expand);
+
setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
- setOperationAction(ISD::ADD, MVT::v32i16, Legal);
- setOperationAction(ISD::ADD, MVT::v64i8, Legal);
- setOperationAction(ISD::SUB, MVT::v32i16, Legal);
- setOperationAction(ISD::SUB, MVT::v64i8, Legal);
setOperationAction(ISD::MUL, MVT::v32i16, Legal);
+ setOperationAction(ISD::MUL, MVT::v64i8, Custom);
setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
@@ -1646,12 +1439,15 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
setOperationAction(ISD::SELECT, MVT::v32i1, Custom);
setOperationAction(ISD::SELECT, MVT::v64i1, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
+ setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
@@ -1667,6 +1463,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i1, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i1, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v32i1, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v64i1, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v32i1, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v64i1, Expand);
+ setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
setOperationAction(ISD::SMAX, MVT::v64i8, Legal);
setOperationAction(ISD::SMAX, MVT::v32i16, Legal);
@@ -1679,36 +1480,59 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
- if (Subtarget->hasVLX())
+ if (Subtarget.hasVLX())
setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
- if (Subtarget->hasCDI()) {
+ LegalizeAction Action = Subtarget.hasVLX() ? Legal : Custom;
+ for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
+ setOperationAction(ISD::MLOAD, VT, Action);
+ setOperationAction(ISD::MSTORE, VT, Action);
+ }
+
+ if (Subtarget.hasCDI()) {
setOperationAction(ISD::CTLZ, MVT::v32i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v64i8, Custom);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v32i16, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::v64i8, Expand);
}
for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
- setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
- setOperationAction(ISD::VSELECT, VT, Legal);
- setOperationAction(ISD::SRL, VT, Custom);
- setOperationAction(ISD::SHL, VT, Custom);
- setOperationAction(ISD::SRA, VT, Custom);
-
- setOperationAction(ISD::AND, VT, Promote);
- AddPromotedToType (ISD::AND, VT, MVT::v8i64);
- setOperationAction(ISD::OR, VT, Promote);
- AddPromotedToType (ISD::OR, VT, MVT::v8i64);
- setOperationAction(ISD::XOR, VT, Promote);
- AddPromotedToType (ISD::XOR, VT, MVT::v8i64);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Legal);
+ setOperationAction(ISD::SRL, VT, Custom);
+ setOperationAction(ISD::SHL, VT, Custom);
+ setOperationAction(ISD::SRA, VT, Custom);
+ setOperationAction(ISD::MLOAD, VT, Legal);
+ setOperationAction(ISD::MSTORE, VT, Legal);
+ setOperationAction(ISD::CTPOP, VT, Custom);
+ setOperationAction(ISD::CTTZ, VT, Custom);
+
+ setOperationPromotedToType(ISD::AND, VT, MVT::v8i64);
+ setOperationPromotedToType(ISD::OR, VT, MVT::v8i64);
+ setOperationPromotedToType(ISD::XOR, VT, MVT::v8i64);
+ }
+
+ for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
+ setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
+ if (Subtarget.hasVLX()) {
+ // FIXME. This commands are available on SSE/AVX2, add relevant patterns.
+ setLoadExtAction(ExtType, MVT::v16i16, MVT::v16i8, Legal);
+ setLoadExtAction(ExtType, MVT::v8i16, MVT::v8i8, Legal);
+ }
}
}
- if (!Subtarget->useSoftFloat() && Subtarget->hasVLX()) {
+ if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
+ setOperationAction(ISD::ADD, MVT::v2i1, Expand);
+ setOperationAction(ISD::ADD, MVT::v4i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v2i1, Expand);
+ setOperationAction(ISD::SUB, MVT::v4i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v2i1, Expand);
+ setOperationAction(ISD::MUL, MVT::v4i1, Expand);
+
+ setOperationAction(ISD::TRUNCATE, MVT::v2i1, Custom);
+ setOperationAction(ISD::TRUNCATE, MVT::v4i1, Custom);
setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i1, Custom);
@@ -1721,31 +1545,28 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i1, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i1, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i1, Custom);
+ setOperationAction(ISD::VSELECT, MVT::v2i1, Expand);
+ setOperationAction(ISD::VSELECT, MVT::v4i1, Expand);
+
+ for (auto VT : { MVT::v4i32, MVT::v8i32 }) {
+ setOperationAction(ISD::AND, VT, Legal);
+ setOperationAction(ISD::OR, VT, Legal);
+ setOperationAction(ISD::XOR, VT, Legal);
+ }
- setOperationAction(ISD::AND, MVT::v8i32, Legal);
- setOperationAction(ISD::OR, MVT::v8i32, Legal);
- setOperationAction(ISD::XOR, MVT::v8i32, Legal);
- setOperationAction(ISD::AND, MVT::v4i32, Legal);
- setOperationAction(ISD::OR, MVT::v4i32, Legal);
- setOperationAction(ISD::XOR, MVT::v4i32, Legal);
- setOperationAction(ISD::SRA, MVT::v2i64, Custom);
- setOperationAction(ISD::SRA, MVT::v4i64, Custom);
-
- setOperationAction(ISD::SMAX, MVT::v2i64, Legal);
- setOperationAction(ISD::SMAX, MVT::v4i64, Legal);
- setOperationAction(ISD::UMAX, MVT::v2i64, Legal);
- setOperationAction(ISD::UMAX, MVT::v4i64, Legal);
- setOperationAction(ISD::SMIN, MVT::v2i64, Legal);
- setOperationAction(ISD::SMIN, MVT::v4i64, Legal);
- setOperationAction(ISD::UMIN, MVT::v2i64, Legal);
- setOperationAction(ISD::UMIN, MVT::v4i64, Legal);
+ for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
+ setOperationAction(ISD::SMAX, VT, Legal);
+ setOperationAction(ISD::UMAX, VT, Legal);
+ setOperationAction(ISD::SMIN, VT, Legal);
+ setOperationAction(ISD::UMIN, VT, Legal);
+ }
}
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
- if (!Subtarget->is64Bit()) {
+ if (!Subtarget.is64Bit()) {
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
}
@@ -1757,7 +1578,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// subtraction on x86-32 once PR3203 is fixed. We really can't do much better
// than generic legalization for 64-bit multiplication-with-overflow, though.
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
- if (VT == MVT::i64 && !Subtarget->is64Bit())
+ if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
// Add/Sub/Mul with overflow operations are custom lowered.
setOperationAction(ISD::SADDO, VT, Custom);
@@ -1768,7 +1589,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UMULO, VT, Custom);
}
- if (!Subtarget->is64Bit()) {
+ if (!Subtarget.is64Bit()) {
// These libcalls are not available in 32-bit.
setLibcallName(RTLIB::SHL_I128, nullptr);
setLibcallName(RTLIB::SRL_I128, nullptr);
@@ -1776,10 +1597,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
// Combine sin / cos into one node or libcall if possible.
- if (Subtarget->hasSinCos()) {
+ if (Subtarget.hasSinCos()) {
setLibcallName(RTLIB::SINCOS_F32, "sincosf");
setLibcallName(RTLIB::SINCOS_F64, "sincos");
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget.isTargetDarwin()) {
// For MacOSX, we don't want the normal expansion of a libcall to sincos.
// We want to issue a libcall to __sincos_stret to avoid memory traffic.
setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
@@ -1787,7 +1608,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}
- if (Subtarget->isTargetWin64()) {
+ if (Subtarget.isTargetWin64()) {
setOperationAction(ISD::SDIV, MVT::i128, Custom);
setOperationAction(ISD::UDIV, MVT::i128, Custom);
setOperationAction(ISD::SREM, MVT::i128, Custom);
@@ -1796,6 +1617,17 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
}
+ // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
+ // is. We should promote the value to 64-bits to solve this.
+ // This is what the CRT headers do - `fmodf` is an inline header
+ // function casting to f64 and calling `fmod`.
+ if (Subtarget.is32Bit() && Subtarget.isTargetKnownWindowsMSVC())
+ for (ISD::NodeType Op :
+ {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
+ ISD::FLOG10, ISD::FPOW, ISD::FSIN})
+ if (isOperationExpand(Op, MVT::f32))
+ setOperationAction(Op, MVT::f32, Promote);
+
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
@@ -1827,13 +1659,12 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::SETCC);
- setTargetDAGCombine(ISD::BUILD_VECTOR);
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::XOR);
setTargetDAGCombine(ISD::MSCATTER);
setTargetDAGCombine(ISD::MGATHER);
- computeRegisterProperties(Subtarget->getRegisterInfo());
+ computeRegisterProperties(Subtarget.getRegisterInfo());
MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
MaxStoresPerMemsetOptSize = 8;
@@ -1843,9 +1674,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
MaxStoresPerMemmoveOptSize = 4;
setPrefLoopAlignment(4); // 2^4 bytes.
- // A predictable cmov does not hurt on an in-order CPU.
- // FIXME: Use a CPU attribute to trigger this, not a CPU model.
- PredictableSelectIsExpensive = !Subtarget->isAtom();
+ // An out-of-order CPU can speculatively execute past a predictable branch,
+ // but a conditional move could be stalled by an expensive earlier operation.
+ PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
EnableExtLdPromotion = true;
setPrefFunctionAlignment(4); // 2^4 bytes.
@@ -1854,7 +1685,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// This has so far only been implemented for 64-bit MachO.
bool X86TargetLowering::useLoadStackGuardNode() const {
- return Subtarget->isTargetMachO() && Subtarget->is64Bit();
+ return Subtarget.isTargetMachO() && Subtarget.is64Bit();
}
TargetLoweringBase::LegalizeTypeAction
@@ -1867,24 +1698,25 @@ X86TargetLowering::getPreferredVectorAction(EVT VT) const {
return TargetLoweringBase::getPreferredVectorAction(VT);
}
-EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
+EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
+ LLVMContext& Context,
EVT VT) const {
if (!VT.isVector())
- return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
+ return Subtarget.hasAVX512() ? MVT::i1: MVT::i8;
if (VT.isSimple()) {
MVT VVT = VT.getSimpleVT();
const unsigned NumElts = VVT.getVectorNumElements();
- const MVT EltVT = VVT.getVectorElementType();
+ MVT EltVT = VVT.getVectorElementType();
if (VVT.is512BitVector()) {
- if (Subtarget->hasAVX512())
+ if (Subtarget.hasAVX512())
if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
EltVT == MVT::f32 || EltVT == MVT::f64)
switch(NumElts) {
case 8: return MVT::v8i1;
case 16: return MVT::v16i1;
}
- if (Subtarget->hasBWI())
+ if (Subtarget.hasBWI())
if (EltVT == MVT::i8 || EltVT == MVT::i16)
switch(NumElts) {
case 32: return MVT::v32i1;
@@ -1892,23 +1724,20 @@ EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
}
}
- if (VVT.is256BitVector() || VVT.is128BitVector()) {
- if (Subtarget->hasVLX())
- if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
- EltVT == MVT::f32 || EltVT == MVT::f64)
- switch(NumElts) {
- case 2: return MVT::v2i1;
- case 4: return MVT::v4i1;
- case 8: return MVT::v8i1;
- }
- if (Subtarget->hasBWI() && Subtarget->hasVLX())
- if (EltVT == MVT::i8 || EltVT == MVT::i16)
- switch(NumElts) {
- case 8: return MVT::v8i1;
- case 16: return MVT::v16i1;
- case 32: return MVT::v32i1;
- }
+ if (Subtarget.hasBWI() && Subtarget.hasVLX())
+ return MVT::getVectorVT(MVT::i1, NumElts);
+
+ if (!isTypeLegal(VT) && getTypeAction(Context, VT) == TypePromoteInteger) {
+ EVT LegalVT = getTypeToTransformTo(Context, VT);
+ EltVT = LegalVT.getVectorElementType().getSimpleVT();
}
+
+ if (Subtarget.hasVLX() && EltVT.getSizeInBits() >= 32)
+ switch(NumElts) {
+ case 2: return MVT::v2i1;
+ case 4: return MVT::v4i1;
+ case 8: return MVT::v8i1;
+ }
}
return VT.changeVectorElementTypeToInteger();
@@ -1945,7 +1774,7 @@ static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
/// are at 4-byte boundaries.
unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
const DataLayout &DL) const {
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
// Max of 8 and alignment of type.
unsigned TyAlign = DL.getABITypeAlignment(Ty);
if (TyAlign > 8)
@@ -1954,7 +1783,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
}
unsigned Align = 4;
- if (Subtarget->hasSSE1())
+ if (Subtarget.hasSSE1())
getMaxByValAlign(Ty, Align);
return Align;
}
@@ -1977,35 +1806,40 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
bool MemcpyStrSrc,
MachineFunction &MF) const {
const Function *F = MF.getFunction();
- if ((!IsMemset || ZeroMemset) &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ if (!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
- (!Subtarget->isUnalignedMem16Slow() ||
+ (!Subtarget.isUnalignedMem16Slow() ||
((DstAlign == 0 || DstAlign >= 16) &&
(SrcAlign == 0 || SrcAlign >= 16)))) {
- if (Size >= 32) {
- // FIXME: Check if unaligned 32-byte accesses are slow.
- if (Subtarget->hasInt256())
- return MVT::v8i32;
- if (Subtarget->hasFp256())
- return MVT::v8f32;
+ // FIXME: Check if unaligned 32-byte accesses are slow.
+ if (Size >= 32 && Subtarget.hasAVX()) {
+ // Although this isn't a well-supported type for AVX1, we'll let
+ // legalization and shuffle lowering produce the optimal codegen. If we
+ // choose an optimal type with a vector element larger than a byte,
+ // getMemsetStores() may create an intermediate splat (using an integer
+ // multiply) before we splat as a vector.
+ return MVT::v32i8;
}
- if (Subtarget->hasSSE2())
- return MVT::v4i32;
- if (Subtarget->hasSSE1())
+ if (Subtarget.hasSSE2())
+ return MVT::v16i8;
+ // TODO: Can SSE1 handle a byte vector?
+ if (Subtarget.hasSSE1())
return MVT::v4f32;
- } else if (!MemcpyStrSrc && Size >= 8 &&
- !Subtarget->is64Bit() &&
- Subtarget->hasSSE2()) {
+ } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
+ !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
// Do not use f64 to lower memcpy if source is string constant. It's
// better to use i32 to avoid the loads.
+ // Also, do not use f64 to lower memset unless this is a memset of zeros.
+ // The gymnastics of splatting a byte value into an XMM register and then
+ // only using 8-byte stores (because this is a CPU with slow unaligned
+ // 16-byte accesses) makes that a loser.
return MVT::f64;
}
}
// This is a compromise. If we reach here, unaligned accesses may be slow on
// this target. However, creating smaller, aligned accesses could be even
// slower and would certainly be a lot more code.
- if (Subtarget->is64Bit() && Size >= 8)
+ if (Subtarget.is64Bit() && Size >= 8)
return MVT::i64;
return MVT::i32;
}
@@ -2030,10 +1864,10 @@ X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
*Fast = true;
break;
case 128:
- *Fast = !Subtarget->isUnalignedMem16Slow();
+ *Fast = !Subtarget.isUnalignedMem16Slow();
break;
case 256:
- *Fast = !Subtarget->isUnalignedMem32Slow();
+ *Fast = !Subtarget.isUnalignedMem32Slow();
break;
// TODO: What about AVX-512 (512-bit) accesses?
}
@@ -2048,8 +1882,7 @@ X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
unsigned X86TargetLowering::getJumpTableEncoding() const {
// In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
// symbol.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT())
+ if (isPositionIndependent() && Subtarget.isPICStyleGOT())
return MachineJumpTableInfo::EK_Custom32;
// Otherwise, use the normal jump table encoding heuristics.
@@ -2057,15 +1890,14 @@ unsigned X86TargetLowering::getJumpTableEncoding() const {
}
bool X86TargetLowering::useSoftFloat() const {
- return Subtarget->useSoftFloat();
+ return Subtarget.useSoftFloat();
}
const MCExpr *
X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid,MCContext &Ctx) const{
- assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT());
+ assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
// In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
// entries.
return MCSymbolRefExpr::create(MBB->getSymbol(),
@@ -2075,7 +1907,7 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
/// Returns relocation base for the given PIC jumptable.
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
- if (!Subtarget->is64Bit())
+ if (!Subtarget.is64Bit())
// This doesn't have SDLoc associated with it, but is not really the
// same as a Register.
return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
@@ -2089,7 +1921,7 @@ const MCExpr *X86TargetLowering::
getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
MCContext &Ctx) const {
// X86-64 uses RIP relative addressing based on the jump table label.
- if (Subtarget->isPICStyleRIPRel())
+ if (Subtarget.isPICStyleRIPRel())
return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
// Otherwise, the reference is relative to the PIC base.
@@ -2105,7 +1937,7 @@ X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
default:
return TargetLowering::findRepresentativeClass(TRI, VT);
case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
- RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
+ RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
break;
case MVT::x86mmx:
RRC = &X86::VR64RegClass;
@@ -2121,47 +1953,76 @@ X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
return std::make_pair(RRC, Cost);
}
-bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
- unsigned &Offset) const {
- if (!Subtarget->isTargetLinux())
- return false;
+unsigned X86TargetLowering::getAddressSpace() const {
+ if (Subtarget.is64Bit())
+ return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
+ return 256;
+}
- if (Subtarget->is64Bit()) {
- // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
- Offset = 0x28;
- if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
- AddressSpace = 256;
- else
- AddressSpace = 257;
- } else {
- // %gs:0x14 on i386
- Offset = 0x14;
- AddressSpace = 256;
+Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
+ // glibc has a special slot for the stack guard in tcbhead_t, use it instead
+ // of the usual global variable (see sysdeps/{i386,x86_64}/nptl/tls.h)
+ if (!Subtarget.isTargetGlibc())
+ return TargetLowering::getIRStackGuard(IRB);
+
+ // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
+ // %gs:0x14 on i386
+ unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
+ unsigned AddressSpace = getAddressSpace();
+ return ConstantExpr::getIntToPtr(
+ ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
+ Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
+}
+
+void X86TargetLowering::insertSSPDeclarations(Module &M) const {
+ // MSVC CRT provides functionalities for stack protection.
+ if (Subtarget.getTargetTriple().isOSMSVCRT()) {
+ // MSVC CRT has a global variable holding security cookie.
+ M.getOrInsertGlobal("__security_cookie",
+ Type::getInt8PtrTy(M.getContext()));
+
+ // MSVC CRT has a function to validate security cookie.
+ auto *SecurityCheckCookie = cast<Function>(
+ M.getOrInsertFunction("__security_check_cookie",
+ Type::getVoidTy(M.getContext()),
+ Type::getInt8PtrTy(M.getContext()), nullptr));
+ SecurityCheckCookie->setCallingConv(CallingConv::X86_FastCall);
+ SecurityCheckCookie->addAttribute(1, Attribute::AttrKind::InReg);
+ return;
}
- return true;
+ // glibc has a special slot for the stack guard.
+ if (Subtarget.isTargetGlibc())
+ return;
+ TargetLowering::insertSSPDeclarations(M);
+}
+
+Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
+ // MSVC CRT has a global variable holding security cookie.
+ if (Subtarget.getTargetTriple().isOSMSVCRT())
+ return M.getGlobalVariable("__security_cookie");
+ return TargetLowering::getSDagStackGuard(M);
+}
+
+Value *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
+ // MSVC CRT has a function to validate security cookie.
+ if (Subtarget.getTargetTriple().isOSMSVCRT())
+ return M.getFunction("__security_check_cookie");
+ return TargetLowering::getSSPStackGuardCheck(M);
}
Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
- if (!Subtarget->isTargetAndroid())
+ if (!Subtarget.isTargetAndroid())
return TargetLowering::getSafeStackPointerLocation(IRB);
// Android provides a fixed TLS slot for the SafeStack pointer. See the
// definition of TLS_SLOT_SAFESTACK in
// https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
unsigned AddressSpace, Offset;
- if (Subtarget->is64Bit()) {
- // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
- Offset = 0x48;
- if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
- AddressSpace = 256;
- else
- AddressSpace = 257;
- } else {
- // %gs:0x24 on i386
- Offset = 0x24;
- AddressSpace = 256;
- }
+ // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
+ // %gs:0x24 on i386
+ Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
+ AddressSpace = getAddressSpace();
return ConstantExpr::getIntToPtr(
ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
@@ -2194,11 +2055,11 @@ const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
}
SDValue
-X86TargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
@@ -2214,10 +2075,10 @@ X86TargetLowering::LowerReturn(SDValue Chain,
RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
// Operand #1 = Bytes To Pop
RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
- MVT::i16));
+ MVT::i32));
// Copy the result values into the output registers.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
SDValue ValToCopy = OutVals[i];
@@ -2244,14 +2105,14 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// or SSE or MMX vectors.
if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
- (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
+ (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
report_fatal_error("SSE register return with SSE disabled");
}
// Likewise we can't return F64 values with SSE1 only. gcc does so, but
// llvm-gcc has never done it right and no one has noticed, so this
// should be OK for now.
if (ValVT == MVT::f64 &&
- (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
+ (Subtarget.is64Bit() && !Subtarget.hasSSE2()))
report_fatal_error("SSE2 register return with SSE2 disabled");
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
@@ -2269,7 +2130,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
// which is returned in RAX / RDX.
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
if (ValVT == MVT::x86mmx) {
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
@@ -2277,7 +2138,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
ValToCopy);
// If we don't have SSE2 available, convert to v4f32 so the generated
// register is legal.
- if (!Subtarget->hasSSE2())
+ if (!Subtarget.hasSSE2())
ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
}
}
@@ -2288,6 +2149,9 @@ X86TargetLowering::LowerReturn(SDValue Chain,
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
+ // Swift calling convention does not require we copy the sret argument
+ // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
+
// All x86 ABIs require that for returning structs by value we copy
// the sret argument into %rax/%eax (depending on ABI) for the return.
// We saved the argument into a virtual register in the entry block,
@@ -2298,11 +2162,30 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// false, then an sret argument may be implicitly inserted in the SelDAG. In
// either case FuncInfo->setSRetReturnReg() will have been called.
if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
- SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg,
+ // When we have both sret and another return value, we should use the
+ // original Chain stored in RetOps[0], instead of the current Chain updated
+ // in the above loop. If we only have sret, RetOps[0] equals to Chain.
+
+ // For the case of sret and another return value, we have
+ // Chain_0 at the function entry
+ // Chain_1 = getCopyToReg(Chain_0) in the above loop
+ // If we use Chain_1 in getCopyFromReg, we will have
+ // Val = getCopyFromReg(Chain_1)
+ // Chain_2 = getCopyToReg(Chain_1, Val) from below
+
+ // getCopyToReg(Chain_0) will be glued together with
+ // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
+ // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
+ // Data dependency from Unit B to Unit A due to usage of Val in
+ // getCopyToReg(Chain_1, Val)
+ // Chain dependency from Unit A to Unit B
+
+ // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
+ SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
getPointerTy(MF.getDataLayout()));
unsigned RetValReg
- = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
+ = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
X86::RAX : X86::EAX;
Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
Flag = Chain.getValue(1);
@@ -2312,7 +2195,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
}
- const X86RegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const MCPhysReg *I =
TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
if (I) {
@@ -2337,9 +2220,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
}
bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
- if (N->getNumValues() != 1)
- return false;
- if (!N->hasNUsesOfValue(1, 0))
+ if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
return false;
SDValue TCChain = Chain;
@@ -2375,15 +2256,19 @@ bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
return true;
}
-EVT
-X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
- ISD::NodeType ExtendKind) const {
- MVT ReturnMVT;
- // TODO: Is this also valid on 32-bit?
- if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
+EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
+ ISD::NodeType ExtendKind) const {
+ MVT ReturnMVT = MVT::i32;
+
+ bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
+ if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
+ // The ABI does not require i1, i8 or i16 to be extended.
+ //
+ // On Darwin, there is code in the wild relying on Clang's old behaviour of
+ // always extending i8/i16 return values, so keep doing that for now.
+ // (PR26665).
ReturnMVT = MVT::i8;
- else
- ReturnMVT = MVT::i32;
+ }
EVT MinVT = getRegisterType(Context, ReturnMVT);
return VT.bitsLT(MinVT) ? MinVT : VT;
@@ -2392,16 +2277,14 @@ X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
/// Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
-SDValue
-X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue X86TargetLowering::LowerCallResult(
+ SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
- bool Is64Bit = Subtarget->is64Bit();
+ bool Is64Bit = Subtarget.is64Bit();
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
@@ -2413,7 +2296,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
- ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
+ ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget.hasSSE1())) {
report_fatal_error("SSE register return with SSE disabled");
}
@@ -2422,6 +2305,8 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
bool RoundAfterCopy = false;
if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
isScalarFPTypeInSSEReg(VA.getValVT())) {
+ if (!Subtarget.hasX87())
+ report_fatal_error("X87 register return with X87 disabled");
CopyVT = MVT::f80;
RoundAfterCopy = (CopyVT != VA.getLocVT());
}
@@ -2492,10 +2377,9 @@ argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins, bool IsMCU) {
/// Make a copy of an aggregate at address specified by "Src" to address
/// "Dst" with size and alignment information specified by the specific
/// parameter attribute. The copy will be passed as a byval function parameter.
-static SDValue
-CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
- ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- SDLoc dl) {
+static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
+ SDValue Chain, ISD::ArgFlagsTy Flags,
+ SelectionDAG &DAG, const SDLoc &dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
@@ -2549,13 +2433,11 @@ bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
}
SDValue
-X86TargetLowering::LowerMemArgument(SDValue Chain,
- CallingConv::ID CallConv,
+X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
- MachineFrameInfo *MFI,
- unsigned i) const {
+ MachineFrameInfo *MFI, unsigned i) const {
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool AlwaysUseMutable = shouldGuaranteeTCO(
@@ -2602,6 +2484,14 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
} else {
int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), isImmutable);
+
+ // Set SExt or ZExt flag.
+ if (VA.getLocInfo() == CCValAssign::ZExt) {
+ MFI->setObjectZExt(FI, true);
+ } else if (VA.getLocInfo() == CCValAssign::SExt) {
+ MFI->setObjectSExt(FI, true);
+ }
+
// Adjust SP offset of interrupt parameter.
if (CallConv == CallingConv::X86_INTR) {
MFI->setObjectOffset(FI, Offset);
@@ -2610,8 +2500,7 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
SDValue Val = DAG.getLoad(
ValVT, dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false,
- false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
return ExtendedInMem ?
DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val) : Val;
}
@@ -2619,10 +2508,10 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
- const X86Subtarget *Subtarget) {
- assert(Subtarget->is64Bit());
+ const X86Subtarget &Subtarget) {
+ assert(Subtarget.is64Bit());
- if (Subtarget->isCallingConvWin64(CallConv)) {
+ if (Subtarget.isCallingConvWin64(CallConv)) {
static const MCPhysReg GPR64ArgRegsWin64[] = {
X86::RCX, X86::RDX, X86::R8, X86::R9
};
@@ -2638,9 +2527,9 @@ static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
CallingConv::ID CallConv,
- const X86Subtarget *Subtarget) {
- assert(Subtarget->is64Bit());
- if (Subtarget->isCallingConvWin64(CallConv)) {
+ const X86Subtarget &Subtarget) {
+ assert(Subtarget.is64Bit());
+ if (Subtarget.isCallingConvWin64(CallConv)) {
// The XMM registers which might contain var arg parameters are shadowed
// in their paired GPR. So we only need to save the GPR to their home
// slots.
@@ -2650,10 +2539,10 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
const Function *Fn = MF.getFunction();
bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
- bool isSoftFloat = Subtarget->useSoftFloat();
+ bool isSoftFloat = Subtarget.useSoftFloat();
assert(!(isSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
- if (isSoftFloat || NoImplicitFloatOps || !Subtarget->hasSSE1())
+ if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
// Kernel mode asks for SSE to be disabled, so there are no XMM argument
// registers.
return None;
@@ -2667,21 +2556,21 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
SDValue X86TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
- const Function* Fn = MF.getFunction();
+ const Function *Fn = MF.getFunction();
if (Fn->hasExternalLinkage() &&
- Subtarget->isTargetCygMing() &&
+ Subtarget.isTargetCygMing() &&
Fn->getName() == "main")
FuncInfo->setForceFramePointer(true);
MachineFrameInfo *MFI = MF.getFrameInfo();
- bool Is64Bit = Subtarget->is64Bit();
- bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
+ bool Is64Bit = Subtarget.is64Bit();
+ bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");
@@ -2778,13 +2667,18 @@ SDValue X86TargetLowering::LowerFormalArguments(
// If value is passed via pointer - do a load.
if (VA.getLocInfo() == CCValAssign::Indirect)
- ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
- MachinePointerInfo(), false, false, false, 0);
+ ArgValue =
+ DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
InVals.push_back(ArgValue);
}
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ // Swift calling convention does not require we copy the sret argument
+ // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
+ if (CallConv == CallingConv::Swift)
+ continue;
+
// All x86 ABIs require that for returning structs by value we copy the
// sret argument into %rax/%eax (depending on ABI) for the return. Save
// the argument into a virtual register so that we can access it from the
@@ -2819,7 +2713,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
}
// Figure out if XMM registers are in use.
- assert(!(Subtarget->useSoftFloat() &&
+ assert(!(Subtarget.useSoftFloat() &&
Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
"SSE register cannot be used when SSE is disabled!");
@@ -2831,7 +2725,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
- assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
+ assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
// Gather all the live in physical registers.
@@ -2865,7 +2759,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
} else {
// For X86-64, if there are vararg parameters that are passed via
// registers, then we must store them to their spots on the stack so
- // they may be loaded by deferencing the result of va_next.
+ // they may be loaded by dereferencing the result of va_next.
FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
@@ -2884,8 +2778,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(),
- FuncInfo->getRegSaveFrameIndex(), Offset),
- false, false, 0);
+ FuncInfo->getRegSaveFrameIndex(), Offset));
MemOps.push_back(Store);
Offset += 8;
}
@@ -2913,13 +2806,13 @@ SDValue X86TargetLowering::LowerFormalArguments(
// Find the largest legal vector type.
MVT VecVT = MVT::Other;
// FIXME: Only some x86_32 calling conventions support AVX512.
- if (Subtarget->hasAVX512() &&
+ if (Subtarget.hasAVX512() &&
(Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
CallConv == CallingConv::Intel_OCL_BI)))
VecVT = MVT::v16f32;
- else if (Subtarget->hasAVX())
+ else if (Subtarget.hasAVX())
VecVT = MVT::v8f32;
- else if (Subtarget->hasSSE2())
+ else if (Subtarget.hasSSE2())
VecVT = MVT::v4f32;
// We forward some GPRs and some vector types.
@@ -2960,8 +2853,8 @@ SDValue X86TargetLowering::LowerFormalArguments(
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
- !Subtarget->getTargetTriple().isOSMSVCRT() &&
- argsAreStructReturn(Ins, Subtarget->isTargetMCU()) == StackStructReturn)
+ !Subtarget.getTargetTriple().isOSMSVCRT() &&
+ argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
FuncInfo->setBytesToPopOnReturn(4);
}
@@ -2987,7 +2880,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
// offset from the bottom of this and each funclet's frame must be the
// same, so the size of funclets' (mostly empty) frames is dictated by
// how far this slot is from the bottom (since they allocate just enough
- // space to accomodate holding this slot at the correct offset).
+ // space to accommodate holding this slot at the correct offset).
int PSPSymFI = MFI->CreateStackObject(8, 8, /*isSS=*/false);
EHInfo->PSPSymFrameIdx = PSPSymFI;
}
@@ -2996,12 +2889,11 @@ SDValue X86TargetLowering::LowerFormalArguments(
return Chain;
}
-SDValue
-X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
- SDValue StackPtr, SDValue Arg,
- SDLoc dl, SelectionDAG &DAG,
- const CCValAssign &VA,
- ISD::ArgFlagsTy Flags) const {
+SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
+ SDValue Arg, const SDLoc &dl,
+ SelectionDAG &DAG,
+ const CCValAssign &VA,
+ ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
@@ -3011,24 +2903,20 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
return DAG.getStore(
Chain, dl, Arg, PtrOff,
- MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset),
- false, false, 0);
+ MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
}
/// Emit a load of return address if tail call
/// optimization is performed and it is required.
-SDValue
-X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
- SDValue &OutRetAddr, SDValue Chain,
- bool IsTailCall, bool Is64Bit,
- int FPDiff, SDLoc dl) const {
+SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
+ SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
+ bool Is64Bit, int FPDiff, const SDLoc &dl) const {
// Adjust the Return address stack slot.
EVT VT = getPointerTy(DAG.getDataLayout());
OutRetAddr = getReturnAddressFrameIndex(DAG);
// Load the "old" Return address.
- OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
- false, false, false, 0);
+ OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
return SDValue(OutRetAddr.getNode(), 1);
}
@@ -3037,7 +2925,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
SDValue Chain, SDValue RetAddrFrIdx,
EVT PtrVT, unsigned SlotSize,
- int FPDiff, SDLoc dl) {
+ int FPDiff, const SDLoc &dl) {
// Store the return address to the appropriate stack slot.
if (!FPDiff) return Chain;
// Calculate the new stack slot for the return address.
@@ -3047,21 +2935,20 @@ static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
MachinePointerInfo::getFixedStack(
- DAG.getMachineFunction(), NewReturnAddrFI),
- false, false, 0);
+ DAG.getMachineFunction(), NewReturnAddrFI));
return Chain;
}
/// Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
-static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
+static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
Mask.push_back(NumElems);
for (unsigned i = 1; i != NumElems; ++i)
Mask.push_back(i);
- return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
+ return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
SDValue
@@ -3079,9 +2966,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool isVarArg = CLI.IsVarArg;
MachineFunction &MF = DAG.getMachineFunction();
- bool Is64Bit = Subtarget->is64Bit();
- bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
- StructReturnType SR = callIsStructReturn(Outs, Subtarget->isTargetMCU());
+ bool Is64Bit = Subtarget.is64Bit();
+ bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
+ StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
@@ -3092,7 +2979,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (Attr.getValueAsString() == "true")
isTailCall = false;
- if (Subtarget->isPICStyleGOT() &&
+ if (Subtarget.isPICStyleGOT() &&
!MF.getTarget().Options.GuaranteedTailCallOpt) {
// If we are using a GOT, disable tail calls to external symbols with
// default visibility. Tail calling such a symbol requires using a GOT
@@ -3195,7 +3082,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Walk the register/memloc assignments, inserting copies/loads. In the case
// of tail call optimization arguments are handle later.
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
// Skip inalloca arguments, they have already been written.
ISD::ArgFlagsTy Flags = Outs[i].Flags;
@@ -3238,8 +3125,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
Chain = DAG.getStore(
Chain, dl, Arg, SpillSlot,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
Arg = SpillSlot;
break;
}
@@ -3273,7 +3159,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
- if (Subtarget->isPICStyleGOT()) {
+ if (Subtarget.isPICStyleGOT()) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (!isTailCall) {
@@ -3314,7 +3200,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
- assert((Subtarget->hasSSE1() || !NumXMMRegs)
+ assert((Subtarget.hasSSE1() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
@@ -3377,8 +3263,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Store relative to framepointer.
MemOpChains2.push_back(DAG.getStore(
ArgChain, dl, Arg, FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
- false, false, 0));
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
}
}
@@ -3416,70 +3301,29 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// non-JIT mode.
const GlobalValue *GV = G->getGlobal();
if (!GV->hasDLLImportStorageClass()) {
- unsigned char OpFlags = 0;
- bool ExtraLoad = false;
- unsigned WrapperKind = ISD::DELETED_NODE;
-
- // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
- // external symbols most go through the PLT in PIC mode. If the symbol
- // has hidden or protected visibility, or if it is static or local, then
- // we don't need to use the PLT - we can directly call it.
- if (Subtarget->isTargetELF() &&
- DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
- GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
- OpFlags = X86II::MO_PLT;
- } else if (Subtarget->isPICStyleStubAny() &&
- !GV->isStrongDefinitionForLinker() &&
- (!Subtarget->getTargetTriple().isMacOSX() ||
- Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = X86II::MO_DARWIN_STUB;
- } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
- cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
- // If the function is marked as non-lazy, generate an indirect call
- // which loads from the GOT directly. This avoids runtime overhead
- // at the cost of eager binding (and one extra byte of encoding).
- OpFlags = X86II::MO_GOTPCREL;
- WrapperKind = X86ISD::WrapperRIP;
- ExtraLoad = true;
- }
+ unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
Callee = DAG.getTargetGlobalAddress(
GV, dl, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
- // Add a wrapper if needed.
- if (WrapperKind != ISD::DELETED_NODE)
+ if (OpFlags == X86II::MO_GOTPCREL) {
+ // Add a wrapper.
Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
- getPointerTy(DAG.getDataLayout()), Callee);
- // Add extra indirection if needed.
- if (ExtraLoad)
+ getPointerTy(DAG.getDataLayout()), Callee);
+ // Add extra indirection
Callee = DAG.getLoad(
getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()), false, false,
- false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
+ }
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- unsigned char OpFlags = 0;
-
- // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
- // external symbols should go through the PLT.
- if (Subtarget->isTargetELF() &&
- DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
- OpFlags = X86II::MO_PLT;
- } else if (Subtarget->isPICStyleStubAny() &&
- (!Subtarget->getTargetTriple().isMacOSX() ||
- Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
- // PC-relative references to external symbols should go through $stub,
- // unless we're building with the leopard linker or later, which
- // automatically synthesizes these stubs.
- OpFlags = X86II::MO_DARWIN_STUB;
- }
+ const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ unsigned char OpFlags =
+ Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
Callee = DAG.getTargetExternalSymbol(
S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
- } else if (Subtarget->isTarget64BitILP32() &&
+ } else if (Subtarget.isTarget64BitILP32() &&
Callee->getValueType(0) == MVT::i32) {
// Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
@@ -3552,7 +3396,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
DAG.getTarget().Options.GuaranteedTailCallOpt))
NumBytesForCalleeToPop = NumBytes; // Callee pops everything
else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
- !Subtarget->getTargetTriple().isOSMSVCRT() &&
+ !Subtarget.getTargetTriple().isOSMSVCRT() &&
SR == StackStructReturn)
// If this is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
@@ -3562,6 +3406,12 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else
NumBytesForCalleeToPop = 0; // Callee pops nothing.
+ if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
+ // No need to reset the stack after the call if the call doesn't return. To
+ // make the MI verify, we'll pretend the callee does it for us.
+ NumBytesForCalleeToPop = NumBytes;
+ }
+
// Returns a flag for retval copy to use.
if (!IsSibcall) {
Chain = DAG.getCALLSEQ_END(Chain,
@@ -3614,8 +3464,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned
X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG& DAG) const {
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize;
@@ -3636,8 +3486,28 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
- const X86InstrInfo *TII) {
+ const X86InstrInfo *TII, const CCValAssign &VA) {
unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
+
+ for (;;) {
+ // Look through nodes that don't alter the bits of the incoming value.
+ unsigned Op = Arg.getOpcode();
+ if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
+ Arg = Arg.getOperand(0);
+ continue;
+ }
+ if (Op == ISD::TRUNCATE) {
+ const SDValue &TruncInput = Arg.getOperand(0);
+ if (TruncInput.getOpcode() == ISD::AssertZext &&
+ cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
+ Arg.getValueType()) {
+ Arg = TruncInput.getOperand(0);
+ continue;
+ }
+ }
+ break;
+ }
+
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
@@ -3647,7 +3517,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
if (!Def)
return false;
if (!Flags.isByVal()) {
- if (!TII->isLoadFromStackSlot(Def, FI))
+ if (!TII->isLoadFromStackSlot(*Def, FI))
return false;
} else {
unsigned Opcode = Def->getOpcode();
@@ -3682,7 +3552,20 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
assert(FI != INT_MAX);
if (!MFI->isFixedObjectIndex(FI))
return false;
- return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
+
+ if (Offset != MFI->getObjectOffset(FI))
+ return false;
+
+ if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
+ // If the argument location is wider than the argument type, check that any
+ // extension flags match.
+ if (Flags.isZExt() != MFI->isObjectZExt(FI) ||
+ Flags.isSExt() != MFI->isObjectSExt(FI)) {
+ return false;
+ }
+ }
+
+ return Bytes == MFI->getObjectSize(FI);
}
/// Check whether the call is eligible for tail call optimization. Targets
@@ -3708,8 +3591,8 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
CallingConv::ID CallerCC = CallerF->getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
- bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
- bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
+ bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
+ bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
// Win64 functions have extra shadow space for argument homing. Don't do the
// sibcall if the caller and callee have mismatched expectations for this
@@ -3728,7 +3611,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
if (RegInfo->needsStackRealignment(MF))
return false;
@@ -3739,6 +3622,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// Do not sibcall optimize vararg calls unless all arguments are passed via
// registers.
+ LLVMContext &C = *DAG.getContext();
if (isVarArg && !Outs.empty()) {
// Optimizing for varargs on Win64 is unlikely to be safe without
// additional testing.
@@ -3746,8 +3630,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
return false;
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext());
+ CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
@@ -3767,8 +3650,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
}
if (Unused) {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
- *DAG.getContext());
+ CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
@@ -3777,34 +3659,17 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
}
}
- // If the calling conventions do not match, then we'd better make sure the
- // results are returned in the same way as what the caller expects.
+ // Check that the call results are passed in the same way.
+ if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
+ RetCC_X86, RetCC_X86))
+ return false;
+ // The callee has to preserve all registers the caller needs to preserve.
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
+ const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
if (!CCMatch) {
- SmallVector<CCValAssign, 16> RVLocs1;
- CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
- *DAG.getContext());
- CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
-
- SmallVector<CCValAssign, 16> RVLocs2;
- CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
- *DAG.getContext());
- CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
-
- if (RVLocs1.size() != RVLocs2.size())
+ const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
+ if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
return false;
- for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
- if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
- return false;
- if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
- return false;
- if (RVLocs1[i].isRegLoc()) {
- if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
- return false;
- } else {
- if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
- return false;
- }
- }
}
unsigned StackArgsSize = 0;
@@ -3815,8 +3680,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// Check if stack adjustment is needed. For now, do not do this if any
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
- *DAG.getContext());
+ CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
// Allocate shadow area for Win64
if (IsCalleeWin64)
@@ -3830,7 +3694,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// the caller's fixed stack objects.
MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
- const X86InstrInfo *TII = Subtarget->getInstrInfo();
+ const X86InstrInfo *TII = Subtarget.getInstrInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
@@ -3839,26 +3703,25 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
return false;
if (!VA.isRegLoc()) {
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
- MFI, MRI, TII))
+ MFI, MRI, TII, VA))
return false;
}
}
}
+ bool PositionIndependent = isPositionIndependent();
// If the tailcall address may be in a register, then make sure it's
// possible to register allocate for it. In 32-bit, the call address can
// only target EAX, EDX, or ECX since the tail call must be scheduled after
// callee-saved registers are restored. These happen to be the same
// registers used to pass 'inreg' arguments so watch out for those.
- if (!Subtarget->is64Bit() &&
- ((!isa<GlobalAddressSDNode>(Callee) &&
- !isa<ExternalSymbolSDNode>(Callee)) ||
- DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
+ if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
+ !isa<ExternalSymbolSDNode>(Callee)) ||
+ PositionIndependent)) {
unsigned NumInRegs = 0;
// In PIC we need an extra register to formulate the address computation
// for the callee.
- unsigned MaxInRegs =
- (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
+ unsigned MaxInRegs = PositionIndependent ? 2 : 3;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
@@ -3874,10 +3737,14 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
}
}
}
+
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
+ return false;
}
bool CalleeWillPop =
- X86::isCalleePop(CalleeCC, Subtarget->is64Bit(), isVarArg,
+ X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
MF.getTarget().Options.GuaranteedTailCallOpt);
if (unsigned BytesToPop =
@@ -3923,6 +3790,8 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::SHUFP:
case X86ISD::INSERTPS:
case X86ISD::PALIGNR:
+ case X86ISD::VSHLDQ:
+ case X86ISD::VSRLDQ:
case X86ISD::MOVLHPS:
case X86ISD::MOVLHPD:
case X86ISD::MOVHLPS:
@@ -3935,16 +3804,30 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::MOVSD:
case X86ISD::UNPCKL:
case X86ISD::UNPCKH:
+ case X86ISD::VBROADCAST:
case X86ISD::VPERMILPI:
+ case X86ISD::VPERMILPV:
case X86ISD::VPERM2X128:
+ case X86ISD::VPERMIL2:
case X86ISD::VPERMI:
+ case X86ISD::VPPERM:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
+ case X86ISD::VZEXT_MOVL:
return true;
}
}
-static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
+static bool isTargetShuffleVariableMask(unsigned Opcode) {
+ switch (Opcode) {
+ default: return false;
+ case X86ISD::PSHUFB:
+ case X86ISD::VPERMILPV:
+ return true;
+ }
+}
+
+static SDValue getTargetShuffleNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue V1, unsigned TargetMask,
SelectionDAG &DAG) {
switch(Opc) {
@@ -3959,7 +3842,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
}
}
-static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
+static SDValue getTargetShuffleNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue V1, SDValue V2, SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
@@ -3978,7 +3861,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, MVT VT,
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex();
@@ -4047,17 +3930,20 @@ bool X86::isCalleePop(CallingConv::ID CallingConv,
/// \brief Return true if the condition is an unsigned comparison operation.
static bool isX86CCUnsigned(unsigned X86CC) {
switch (X86CC) {
- default: llvm_unreachable("Invalid integer condition!");
- case X86::COND_E: return true;
- case X86::COND_G: return false;
- case X86::COND_GE: return false;
- case X86::COND_L: return false;
- case X86::COND_LE: return false;
- case X86::COND_NE: return true;
- case X86::COND_B: return true;
- case X86::COND_A: return true;
- case X86::COND_BE: return true;
- case X86::COND_AE: return true;
+ default:
+ llvm_unreachable("Invalid integer condition!");
+ case X86::COND_E:
+ case X86::COND_NE:
+ case X86::COND_B:
+ case X86::COND_A:
+ case X86::COND_BE:
+ case X86::COND_AE:
+ return true;
+ case X86::COND_G:
+ case X86::COND_GE:
+ case X86::COND_L:
+ case X86::COND_LE:
+ return false;
}
}
@@ -4080,8 +3966,9 @@ static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
/// condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
-static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, SDLoc DL, bool isFP,
- SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
+static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
+ bool isFP, SDValue &LHS, SDValue &RHS,
+ SelectionDAG &DAG) {
if (!isFP) {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
@@ -4181,24 +4068,50 @@ bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
if (!IntrData)
return false;
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.readMem = false;
+ Info.writeMem = false;
+ Info.vol = false;
+ Info.offset = 0;
+
switch (IntrData->Type) {
- case LOADA:
- case LOADU: {
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(I.getType());
+ case EXPAND_FROM_MEM: {
Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.align = (IntrData->Type == LOADA ? Info.memVT.getSizeInBits()/8 : 1);
- Info.vol = false;
+ Info.memVT = MVT::getVT(I.getType());
+ Info.align = 1;
Info.readMem = true;
- Info.writeMem = false;
- return true;
+ break;
}
- default:
+ case COMPRESS_TO_MEM: {
+ Info.ptrVal = I.getArgOperand(0);
+ Info.memVT = MVT::getVT(I.getArgOperand(1)->getType());
+ Info.align = 1;
+ Info.writeMem = true;
break;
}
+ case TRUNCATE_TO_MEM_VI8:
+ case TRUNCATE_TO_MEM_VI16:
+ case TRUNCATE_TO_MEM_VI32: {
+ Info.ptrVal = I.getArgOperand(0);
+ MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
+ MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
+ if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
+ ScalarVT = MVT::i8;
+ else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
+ ScalarVT = MVT::i16;
+ else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
+ ScalarVT = MVT::i32;
+
+ Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
+ Info.align = 1;
+ Info.writeMem = true;
+ break;
+ }
+ default:
+ return false;
+ }
- return false;
+ return true;
}
/// Returns true if the target can instruction select the
@@ -4246,12 +4159,24 @@ bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
bool X86TargetLowering::isCheapToSpeculateCttz() const {
// Speculate cttz only if we can directly use TZCNT.
- return Subtarget->hasBMI();
+ return Subtarget.hasBMI();
}
bool X86TargetLowering::isCheapToSpeculateCtlz() const {
// Speculate ctlz only if we can directly use LZCNT.
- return Subtarget->hasLZCNT();
+ return Subtarget.hasLZCNT();
+}
+
+bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
+ if (!Subtarget.hasBMI())
+ return false;
+
+ // There are only 32-bit and 64-bit forms for 'andn'.
+ EVT VT = Y.getValueType();
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return false;
+
+ return true;
}
/// Return true if every element in Mask, beginning
@@ -4269,11 +4194,26 @@ static bool isUndefOrInRange(int Val, int Low, int Hi) {
return (Val < 0) || (Val >= Low && Val < Hi);
}
+/// Return true if every element in Mask is undef or if its value
+/// falls within the specified range (L, H].
+static bool isUndefOrInRange(ArrayRef<int> Mask,
+ int Low, int Hi) {
+ for (int M : Mask)
+ if (!isUndefOrInRange(M, Low, Hi))
+ return false;
+ return true;
+}
+
/// Val is either less than zero (undef) or equal to the specified value.
static bool isUndefOrEqual(int Val, int CmpVal) {
return (Val < 0 || Val == CmpVal);
}
+/// Val is either the undef or zero sentinel value.
+static bool isUndefOrZero(int Val) {
+ return (Val == SM_SentinelUndef || Val == SM_SentinelZero);
+}
+
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size, falls within the specified
/// sequential range (Low, Low+Size]. or is undef.
@@ -4285,6 +4225,17 @@ static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
return true;
}
+/// Return true if every element in Mask, beginning
+/// from position Pos and ending in Pos+Size, falls within the specified
+/// sequential range (Low, Low+Size], or is undef or is zero.
+static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
+ unsigned Size, int Low) {
+ for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
+ if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
+ return false;
+ return true;
+}
+
/// Return true if the specified EXTRACT_SUBVECTOR operand specifies a vector
/// extract that is suitable for instruction that extract 128 or 256 bit vectors
static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
@@ -4399,9 +4350,8 @@ bool X86::isZeroNode(SDValue Elt) {
// Build a vector of constants
// Use an UNDEF node if MaskElt == -1.
// Spilt 64-bit constants in the 32-bit mode.
-static SDValue getConstVector(ArrayRef<int> Values, MVT VT,
- SelectionDAG &DAG,
- SDLoc dl, bool IsMask = false) {
+static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
+ const SDLoc &dl, bool IsMask = false) {
SmallVector<SDValue, 32> Ops;
bool Split = false;
@@ -4424,63 +4374,40 @@ static SDValue getConstVector(ArrayRef<int> Values, MVT VT,
Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
DAG.getConstant(0, dl, EltVT));
}
- SDValue ConstsNode = DAG.getNode(ISD::BUILD_VECTOR, dl, ConstVecVT, Ops);
+ SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
if (Split)
ConstsNode = DAG.getBitcast(VT, ConstsNode);
return ConstsNode;
}
/// Returns a vector of specified type with all zero elements.
-static SDValue getZeroVector(MVT VT, const X86Subtarget *Subtarget,
- SelectionDAG &DAG, SDLoc dl) {
- assert(VT.isVector() && "Expected a vector type");
-
- // Always build SSE zero vectors as <4 x i32> bitcasted
- // to their dest type. This ensures they get CSE'd.
+static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG, const SDLoc &dl) {
+ assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
+ VT.getVectorElementType() == MVT::i1) &&
+ "Unexpected vector type");
+
+ // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
+ // type. This ensures they get CSE'd. But if the integer type is not
+ // available, use a floating-point +0.0 instead.
SDValue Vec;
- if (VT.is128BitVector()) { // SSE
- if (Subtarget->hasSSE2()) { // SSE2
- SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- } else { // SSE1
- SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32);
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
- }
- } else if (VT.is256BitVector()) { // AVX
- if (Subtarget->hasInt256()) { // AVX2
- SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
- } else {
- // 256-bit logic and arithmetic instructions in AVX are all
- // floating-point, no support for integer ops. Emit fp zeroed vectors.
- SDValue Cst = DAG.getConstantFP(+0.0, dl, MVT::f32);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
- }
- } else if (VT.is512BitVector()) { // AVX-512
- SDValue Cst = DAG.getConstant(0, dl, MVT::i32);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
- Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
+ if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
+ Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
} else if (VT.getVectorElementType() == MVT::i1) {
-
- assert((Subtarget->hasBWI() || VT.getVectorNumElements() <= 16)
- && "Unexpected vector type");
- assert((Subtarget->hasVLX() || VT.getVectorNumElements() >= 8)
- && "Unexpected vector type");
- SDValue Cst = DAG.getConstant(0, dl, MVT::i1);
- SmallVector<SDValue, 64> Ops(VT.getVectorNumElements(), Cst);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
- } else
- llvm_unreachable("Unexpected vector type");
-
+ assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
+ "Unexpected vector type");
+ assert((Subtarget.hasVLX() || VT.getVectorNumElements() >= 8) &&
+ "Unexpected vector type");
+ Vec = DAG.getConstant(0, dl, VT);
+ } else {
+ unsigned Num32BitElts = VT.getSizeInBits() / 32;
+ Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
+ }
return DAG.getBitcast(VT, Vec);
}
-static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl,
- unsigned vectorWidth) {
+static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
+ const SDLoc &dl, unsigned vectorWidth) {
assert((vectorWidth == 128 || vectorWidth == 256) &&
"Unsupported vector width");
EVT VT = Vec.getValueType();
@@ -4490,7 +4417,7 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
VT.getVectorNumElements()/Factor);
// Extract from UNDEF is UNDEF.
- if (Vec.getOpcode() == ISD::UNDEF)
+ if (Vec.isUndef())
return DAG.getUNDEF(ResultVT);
// Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
@@ -4503,8 +4430,8 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
// If the input is a buildvector just emit a smaller one.
if (Vec.getOpcode() == ISD::BUILD_VECTOR)
- return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
- makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
+ return DAG.getNode(ISD::BUILD_VECTOR,
+ dl, ResultVT, makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
@@ -4516,27 +4443,27 @@ static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
/// instructions or a simple subregister reference. Idx is an index in the
/// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering EXTRACT_VECTOR_ELT operations easier.
-static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl) {
+static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl) {
assert((Vec.getValueType().is256BitVector() ||
Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
- return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
+ return extractSubVector(Vec, IdxVal, DAG, dl, 128);
}
/// Generate a DAG to grab 256-bits from a 512-bit vector.
-static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl) {
+static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
- return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
+ return extractSubVector(Vec, IdxVal, DAG, dl, 256);
}
-static SDValue InsertSubVector(SDValue Result, SDValue Vec,
- unsigned IdxVal, SelectionDAG &DAG,
- SDLoc dl, unsigned vectorWidth) {
+static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl,
+ unsigned vectorWidth) {
assert((vectorWidth == 128 || vectorWidth == 256) &&
"Unsupported vector width");
// Inserting UNDEF is Result
- if (Vec.getOpcode() == ISD::UNDEF)
+ if (Vec.isUndef())
return Result;
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
@@ -4560,8 +4487,8 @@ static SDValue InsertSubVector(SDValue Result, SDValue Vec,
/// simple superregister reference. Idx is an index in the 128 bits
/// we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering INSERT_VECTOR_ELT operations easier.
-static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl) {
+static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
// For insertion into the zero index (low half) of a 256-bit vector, it is
@@ -4570,7 +4497,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
// extend the subvector to the size of the result vector. Make sure that
// we are not recursing on that node by checking for undef here.
if (IdxVal == 0 && Result.getValueType().is256BitVector() &&
- Result.getOpcode() != ISD::UNDEF) {
+ !Result.isUndef()) {
EVT ResultVT = Result.getValueType();
SDValue ZeroIndex = DAG.getIntPtrConstant(0, dl);
SDValue Undef = DAG.getUNDEF(ResultVT);
@@ -4607,17 +4534,18 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
return DAG.getBitcast(ResultVT, Vec256);
}
- return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
+ return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
}
-static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
- SelectionDAG &DAG, SDLoc dl) {
+static SDValue insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
+ SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
- return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
+ return insertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
}
/// Insert i1-subvector to i1-vector.
-static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
+static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
@@ -4647,43 +4575,71 @@ static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
// 3. Subvector should be inserted in the middle (for example v2i1
// to v16i1, index 2)
+ // extend to natively supported kshift
+ MVT MinVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
+ MVT WideOpVT = OpVT;
+ if (OpVT.getSizeInBits() < MinVT.getStoreSizeInBits())
+ WideOpVT = MinVT;
+
SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
- SDValue Undef = DAG.getUNDEF(OpVT);
- SDValue WideSubVec =
- DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, SubVec, ZeroIdx);
- if (Vec.isUndef())
- return DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
- DAG.getConstant(IdxVal, dl, MVT::i8));
+ SDValue Undef = DAG.getUNDEF(WideOpVT);
+ SDValue WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
+ Undef, SubVec, ZeroIdx);
+
+ // Extract sub-vector if require.
+ auto ExtractSubVec = [&](SDValue V) {
+ return (WideOpVT == OpVT) ? V : DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
+ OpVT, V, ZeroIdx);
+ };
+
+ if (Vec.isUndef()) {
+ if (IdxVal != 0) {
+ SDValue ShiftBits = DAG.getConstant(IdxVal, dl, MVT::i8);
+ WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, ShiftBits);
+ }
+ return ExtractSubVec(WideSubVec);
+ }
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
+ NumElems = WideOpVT.getVectorNumElements();
unsigned ShiftLeft = NumElems - SubVecNumElems;
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
- WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
- DAG.getConstant(ShiftLeft, dl, MVT::i8));
- return ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, OpVT, WideSubVec,
- DAG.getConstant(ShiftRight, dl, MVT::i8)) : WideSubVec;
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
+ DAG.getConstant(ShiftLeft, dl, MVT::i8));
+ Vec = ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec,
+ DAG.getConstant(ShiftRight, dl, MVT::i8)) : Vec;
+ return ExtractSubVec(Vec);
}
if (IdxVal == 0) {
// Zero lower bits of the Vec
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
- Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
- Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
- // Merge them together
- return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
+ // Merge them together, SubVec should be zero extended.
+ WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
+ getZeroVector(WideOpVT, Subtarget, DAG, dl),
+ SubVec, ZeroIdx);
+ Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
+ return ExtractSubVec(Vec);
}
// Simple case when we put subvector in the upper part
if (IdxVal + SubVecNumElems == NumElems) {
// Zero upper bits of the Vec
- WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec,
- DAG.getConstant(IdxVal, dl, MVT::i8));
+ WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
+ DAG.getConstant(IdxVal, dl, MVT::i8));
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
- Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
- Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
- return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
+ Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
+ Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
+ return ExtractSubVec(Vec);
}
// Subvector should be inserted in the middle - use shuffle
+ WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef,
+ SubVec, ZeroIdx);
SmallVector<int, 64> Mask;
for (unsigned i = 0; i < NumElems; ++i)
Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ?
@@ -4695,103 +4651,206 @@ static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
/// instructions. This is used because creating CONCAT_VECTOR nodes of
/// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
/// large BUILD_VECTORS.
-static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
+static SDValue concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
unsigned NumElems, SelectionDAG &DAG,
- SDLoc dl) {
- SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
- return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
+ const SDLoc &dl) {
+ SDValue V = insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
+ return insert128BitVector(V, V2, NumElems / 2, DAG, dl);
}
-static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
+static SDValue concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
unsigned NumElems, SelectionDAG &DAG,
- SDLoc dl) {
- SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
- return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
+ const SDLoc &dl) {
+ SDValue V = insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
+ return insert256BitVector(V, V2, NumElems / 2, DAG, dl);
}
/// Returns a vector of specified type with all bits set.
/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
-/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
+/// no AVX2 support, use two <4 x i32> inserted in a <8 x i32> appropriately.
/// Then bitcast to their original type, ensuring they get CSE'd.
-static SDValue getOnesVector(EVT VT, const X86Subtarget *Subtarget,
- SelectionDAG &DAG, SDLoc dl) {
- assert(VT.isVector() && "Expected a vector type");
+static SDValue getOnesVector(EVT VT, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG, const SDLoc &dl) {
+ assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
+ "Expected a 128/256/512-bit vector type");
- SDValue Cst = DAG.getConstant(~0U, dl, MVT::i32);
+ APInt Ones = APInt::getAllOnesValue(32);
+ unsigned NumElts = VT.getSizeInBits() / 32;
SDValue Vec;
- if (VT.is512BitVector()) {
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
- Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
- } else if (VT.is256BitVector()) {
- if (Subtarget->hasInt256()) { // AVX2
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
- } else { // AVX
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
- }
- } else if (VT.is128BitVector()) {
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- } else
- llvm_unreachable("Unexpected vector type");
-
+ if (!Subtarget.hasInt256() && NumElts == 8) {
+ Vec = DAG.getConstant(Ones, dl, MVT::v4i32);
+ Vec = concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
+ } else {
+ Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
+ }
return DAG.getBitcast(VT, Vec);
}
/// Returns a vector_shuffle node for an unpackl operation.
-static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
- SDValue V2) {
+static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
+ SDValue V1, SDValue V2) {
+ assert(VT.is128BitVector() && "Expected a 128-bit vector type");
unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 8> Mask;
+ SmallVector<int, 8> Mask(NumElems);
for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
- Mask.push_back(i);
- Mask.push_back(i + NumElems);
+ Mask[i * 2] = i;
+ Mask[i * 2 + 1] = i + NumElems;
}
- return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
+ return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
/// Returns a vector_shuffle node for an unpackh operation.
-static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
- SDValue V2) {
+static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
+ SDValue V1, SDValue V2) {
+ assert(VT.is128BitVector() && "Expected a 128-bit vector type");
unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 8> Mask;
+ SmallVector<int, 8> Mask(NumElems);
for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
- Mask.push_back(i + Half);
- Mask.push_back(i + NumElems + Half);
+ Mask[i * 2] = i + Half;
+ Mask[i * 2 + 1] = i + NumElems + Half;
}
- return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
+ return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
/// Return a vector_shuffle of the specified vector of zero or undef vector.
/// This produces a shuffle where the low element of V2 is swizzled into the
/// zero/undef vector, landing at element Idx.
/// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
-static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
+static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
bool IsZero,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = V2.getSimpleValueType();
SDValue V1 = IsZero
? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
- unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 16> MaskVec;
- for (unsigned i = 0; i != NumElems; ++i)
+ int NumElems = VT.getVectorNumElements();
+ SmallVector<int, 16> MaskVec(NumElems);
+ for (int i = 0; i != NumElems; ++i)
// If this is the insertion idx, put the low elt of V2 here.
- MaskVec.push_back(i == Idx ? NumElems : i);
- return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
+ MaskVec[i] = (i == Idx) ? NumElems : i;
+ return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
+}
+
+static SDValue peekThroughBitcasts(SDValue V) {
+ while (V.getNode() && V.getOpcode() == ISD::BITCAST)
+ V = V.getOperand(0);
+ return V;
+}
+
+static bool getTargetShuffleMaskIndices(SDValue MaskNode,
+ unsigned MaskEltSizeInBits,
+ SmallVectorImpl<uint64_t> &RawMask) {
+ MaskNode = peekThroughBitcasts(MaskNode);
+
+ MVT VT = MaskNode.getSimpleValueType();
+ assert(VT.isVector() && "Can't produce a non-vector with a build_vector!");
+
+ // Split an APInt element into MaskEltSizeInBits sized pieces and
+ // insert into the shuffle mask.
+ auto SplitElementToMask = [&](APInt Element) {
+ // Note that this is x86 and so always little endian: the low byte is
+ // the first byte of the mask.
+ int Split = VT.getScalarSizeInBits() / MaskEltSizeInBits;
+ for (int i = 0; i < Split; ++i) {
+ APInt RawElt = Element.getLoBits(MaskEltSizeInBits);
+ Element = Element.lshr(MaskEltSizeInBits);
+ RawMask.push_back(RawElt.getZExtValue());
+ }
+ };
+
+ if (MaskNode.getOpcode() == X86ISD::VBROADCAST) {
+ // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
+ // TODO: Handle (VT.getScalarSizeInBits() % MaskEltSizeInBits) == 0
+ if (VT.getScalarSizeInBits() != MaskEltSizeInBits)
+ return false;
+ if (auto *CN = dyn_cast<ConstantSDNode>(MaskNode.getOperand(0))) {
+ const APInt &MaskElement = CN->getAPIntValue();
+ for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+ APInt RawElt = MaskElement.getLoBits(MaskEltSizeInBits);
+ RawMask.push_back(RawElt.getZExtValue());
+ }
+ }
+ return false;
+ }
+
+ if (MaskNode.getOpcode() == X86ISD::VZEXT_MOVL &&
+ MaskNode.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR) {
+
+ // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
+ if ((VT.getScalarSizeInBits() % MaskEltSizeInBits) != 0)
+ return false;
+ unsigned ElementSplit = VT.getScalarSizeInBits() / MaskEltSizeInBits;
+
+ SDValue MaskOp = MaskNode.getOperand(0).getOperand(0);
+ if (auto *CN = dyn_cast<ConstantSDNode>(MaskOp)) {
+ SplitElementToMask(CN->getAPIntValue());
+ RawMask.append((VT.getVectorNumElements() - 1) * ElementSplit, 0);
+ return true;
+ }
+ return false;
+ }
+
+ if (MaskNode.getOpcode() != ISD::BUILD_VECTOR)
+ return false;
+
+ // We can always decode if the buildvector is all zero constants,
+ // but can't use isBuildVectorAllZeros as it might contain UNDEFs.
+ if (llvm::all_of(MaskNode->ops(), X86::isZeroNode)) {
+ RawMask.append(VT.getSizeInBits() / MaskEltSizeInBits, 0);
+ return true;
+ }
+
+ // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
+ if ((VT.getScalarSizeInBits() % MaskEltSizeInBits) != 0)
+ return false;
+
+ for (SDValue Op : MaskNode->ops()) {
+ if (auto *CN = dyn_cast<ConstantSDNode>(Op.getNode()))
+ SplitElementToMask(CN->getAPIntValue());
+ else if (auto *CFN = dyn_cast<ConstantFPSDNode>(Op.getNode()))
+ SplitElementToMask(CFN->getValueAPF().bitcastToAPInt());
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static const Constant *getTargetShuffleMaskConstant(SDValue MaskNode) {
+ MaskNode = peekThroughBitcasts(MaskNode);
+
+ auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
+ if (!MaskLoad)
+ return nullptr;
+
+ SDValue Ptr = MaskLoad->getBasePtr();
+ if (Ptr->getOpcode() == X86ISD::Wrapper ||
+ Ptr->getOpcode() == X86ISD::WrapperRIP)
+ Ptr = Ptr->getOperand(0);
+
+ auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
+ if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
+ return nullptr;
+
+ return dyn_cast<Constant>(MaskCP->getConstVal());
}
/// Calculates the shuffle mask corresponding to the target-specific opcode.
-/// Returns true if the Mask could be calculated. Sets IsUnary to true if only
-/// uses one source. Note that this will set IsUnary for shuffles which use a
-/// single input multiple times, and in those cases it will
-/// adjust the mask to only have indices within that single input.
+/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
+/// operands in \p Ops, and returns true.
+/// Sets \p IsUnary to true if only one source is used. Note that this will set
+/// IsUnary for shuffles which use a single input multiple times, and in those
+/// cases it will adjust the mask to only have indices within that single input.
+/// It is an error to call this with non-empty Mask/Ops vectors.
static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
+ SmallVectorImpl<SDValue> &Ops,
SmallVectorImpl<int> &Mask, bool &IsUnary) {
unsigned NumElems = VT.getVectorNumElements();
SDValue ImmN;
+ assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
+ assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
+
IsUnary = false;
bool IsFakeUnary = false;
switch(N->getOpcode()) {
@@ -4826,9 +4885,22 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::PALIGNR:
+ assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
break;
+ case X86ISD::VSHLDQ:
+ assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
+ ImmN = N->getOperand(N->getNumOperands() - 1);
+ DecodePSLLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ IsUnary = true;
+ break;
+ case X86ISD::VSRLDQ:
+ assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
+ ImmN = N->getOperand(N->getNumOperands() - 1);
+ DecodePSRLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ IsUnary = true;
+ break;
case X86ISD::PSHUFD:
case X86ISD::VPERMILPI:
ImmN = N->getOperand(N->getNumOperands()-1);
@@ -4845,70 +4917,51 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
+ case X86ISD::VZEXT_MOVL:
+ DecodeZeroMoveLowMask(VT, Mask);
+ IsUnary = true;
+ break;
+ case X86ISD::VBROADCAST: {
+ // We only decode broadcasts of same-sized vectors at the moment.
+ if (N->getOperand(0).getValueType() == VT) {
+ DecodeVectorBroadcast(VT, Mask);
+ IsUnary = true;
+ break;
+ }
+ return false;
+ }
+ case X86ISD::VPERMILPV: {
+ IsUnary = true;
+ SDValue MaskNode = N->getOperand(1);
+ unsigned MaskEltSize = VT.getScalarSizeInBits();
+ SmallVector<uint64_t, 32> RawMask;
+ if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
+ DecodeVPERMILPMask(VT, RawMask, Mask);
+ break;
+ }
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
+ DecodeVPERMILPMask(C, MaskEltSize, Mask);
+ break;
+ }
+ return false;
+ }
case X86ISD::PSHUFB: {
IsUnary = true;
SDValue MaskNode = N->getOperand(1);
- while (MaskNode->getOpcode() == ISD::BITCAST)
- MaskNode = MaskNode->getOperand(0);
-
- if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
- // If we have a build-vector, then things are easy.
- MVT VT = MaskNode.getSimpleValueType();
- assert(VT.isVector() &&
- "Can't produce a non-vector with a build_vector!");
- if (!VT.isInteger())
- return false;
-
- int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
-
- SmallVector<uint64_t, 32> RawMask;
- for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
- SDValue Op = MaskNode->getOperand(i);
- if (Op->getOpcode() == ISD::UNDEF) {
- RawMask.push_back((uint64_t)SM_SentinelUndef);
- continue;
- }
- auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
- if (!CN)
- return false;
- APInt MaskElement = CN->getAPIntValue();
-
- // We now have to decode the element which could be any integer size and
- // extract each byte of it.
- for (int j = 0; j < NumBytesPerElement; ++j) {
- // Note that this is x86 and so always little endian: the low byte is
- // the first byte of the mask.
- RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
- MaskElement = MaskElement.lshr(8);
- }
- }
+ SmallVector<uint64_t, 32> RawMask;
+ if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
DecodePSHUFBMask(RawMask, Mask);
break;
}
-
- auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
- if (!MaskLoad)
- return false;
-
- SDValue Ptr = MaskLoad->getBasePtr();
- if (Ptr->getOpcode() == X86ISD::Wrapper ||
- Ptr->getOpcode() == X86ISD::WrapperRIP)
- Ptr = Ptr->getOperand(0);
-
- auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
- if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
- return false;
-
- if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
DecodePSHUFBMask(C, Mask);
break;
}
-
return false;
}
case X86ISD::VPERMI:
ImmN = N->getOperand(N->getNumOperands()-1);
- DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ DecodeVPERMMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
case X86ISD::MOVSS:
@@ -4937,110 +4990,63 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
case X86ISD::MOVLPS:
// Not yet implemented
return false;
+ case X86ISD::VPERMIL2: {
+ IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
+ unsigned MaskEltSize = VT.getScalarSizeInBits();
+ SDValue MaskNode = N->getOperand(2);
+ SDValue CtrlNode = N->getOperand(3);
+ if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
+ unsigned CtrlImm = CtrlOp->getZExtValue();
+ SmallVector<uint64_t, 32> RawMask;
+ if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
+ DecodeVPERMIL2PMask(VT, CtrlImm, RawMask, Mask);
+ break;
+ }
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
+ DecodeVPERMIL2PMask(C, CtrlImm, MaskEltSize, Mask);
+ break;
+ }
+ }
+ return false;
+ }
+ case X86ISD::VPPERM: {
+ IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
+ SDValue MaskNode = N->getOperand(2);
+ SmallVector<uint64_t, 32> RawMask;
+ if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
+ DecodeVPPERMMask(RawMask, Mask);
+ break;
+ }
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
+ DecodeVPPERMMask(C, Mask);
+ break;
+ }
+ return false;
+ }
case X86ISD::VPERMV: {
IsUnary = true;
+ // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
+ Ops.push_back(N->getOperand(1));
SDValue MaskNode = N->getOperand(0);
- while (MaskNode->getOpcode() == ISD::BITCAST)
- MaskNode = MaskNode->getOperand(0);
-
- unsigned MaskLoBits = Log2_64(VT.getVectorNumElements());
SmallVector<uint64_t, 32> RawMask;
- if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
- // If we have a build-vector, then things are easy.
- assert(MaskNode.getSimpleValueType().isInteger() &&
- MaskNode.getSimpleValueType().getVectorNumElements() ==
- VT.getVectorNumElements());
-
- for (unsigned i = 0; i < MaskNode->getNumOperands(); ++i) {
- SDValue Op = MaskNode->getOperand(i);
- if (Op->getOpcode() == ISD::UNDEF)
- RawMask.push_back((uint64_t)SM_SentinelUndef);
- else if (isa<ConstantSDNode>(Op)) {
- APInt MaskElement = cast<ConstantSDNode>(Op)->getAPIntValue();
- RawMask.push_back(MaskElement.getLoBits(MaskLoBits).getZExtValue());
- } else
- return false;
- }
+ unsigned MaskEltSize = VT.getScalarSizeInBits();
+ if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
DecodeVPERMVMask(RawMask, Mask);
break;
}
- if (MaskNode->getOpcode() == X86ISD::VBROADCAST) {
- unsigned NumEltsInMask = MaskNode->getNumOperands();
- MaskNode = MaskNode->getOperand(0);
- if (auto *CN = dyn_cast<ConstantSDNode>(MaskNode)) {
- APInt MaskEltValue = CN->getAPIntValue();
- for (unsigned i = 0; i < NumEltsInMask; ++i)
- RawMask.push_back(MaskEltValue.getLoBits(MaskLoBits).getZExtValue());
- DecodeVPERMVMask(RawMask, Mask);
- break;
- }
- // It may be a scalar load
- }
-
- auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
- if (!MaskLoad)
- return false;
-
- SDValue Ptr = MaskLoad->getBasePtr();
- if (Ptr->getOpcode() == X86ISD::Wrapper ||
- Ptr->getOpcode() == X86ISD::WrapperRIP)
- Ptr = Ptr->getOperand(0);
-
- auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
- if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
- return false;
-
- if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
DecodeVPERMVMask(C, VT, Mask);
break;
}
return false;
}
case X86ISD::VPERMV3: {
- IsUnary = false;
+ IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
+ // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
+ Ops.push_back(N->getOperand(0));
+ Ops.push_back(N->getOperand(2));
SDValue MaskNode = N->getOperand(1);
- while (MaskNode->getOpcode() == ISD::BITCAST)
- MaskNode = MaskNode->getOperand(1);
-
- if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
- // If we have a build-vector, then things are easy.
- assert(MaskNode.getSimpleValueType().isInteger() &&
- MaskNode.getSimpleValueType().getVectorNumElements() ==
- VT.getVectorNumElements());
-
- SmallVector<uint64_t, 32> RawMask;
- unsigned MaskLoBits = Log2_64(VT.getVectorNumElements()*2);
-
- for (unsigned i = 0; i < MaskNode->getNumOperands(); ++i) {
- SDValue Op = MaskNode->getOperand(i);
- if (Op->getOpcode() == ISD::UNDEF)
- RawMask.push_back((uint64_t)SM_SentinelUndef);
- else {
- auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
- if (!CN)
- return false;
- APInt MaskElement = CN->getAPIntValue();
- RawMask.push_back(MaskElement.getLoBits(MaskLoBits).getZExtValue());
- }
- }
- DecodeVPERMV3Mask(RawMask, Mask);
- break;
- }
-
- auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
- if (!MaskLoad)
- return false;
-
- SDValue Ptr = MaskLoad->getBasePtr();
- if (Ptr->getOpcode() == X86ISD::Wrapper ||
- Ptr->getOpcode() == X86ISD::WrapperRIP)
- Ptr = Ptr->getOperand(0);
-
- auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
- if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
- return false;
-
- if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
+ if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
DecodeVPERMV3Mask(C, VT, Mask);
break;
}
@@ -5055,8 +5061,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
// Check if we're getting a shuffle mask with zero'd elements.
if (!AllowSentinelZero)
- if (std::any_of(Mask.begin(), Mask.end(),
- [](int M){ return M == SM_SentinelZero; }))
+ if (llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
return false;
// If we have a fake unary shuffle, the shuffle mask is spread across two
@@ -5067,6 +5072,123 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
if (M >= (int)Mask.size())
M -= Mask.size();
+ // If we didn't already add operands in the opcode-specific code, default to
+ // adding 1 or 2 operands starting at 0.
+ if (Ops.empty()) {
+ Ops.push_back(N->getOperand(0));
+ if (!IsUnary || IsFakeUnary)
+ Ops.push_back(N->getOperand(1));
+ }
+
+ return true;
+}
+
+/// Check a target shuffle mask's inputs to see if we can set any values to
+/// SM_SentinelZero - this is for elements that are known to be zero
+/// (not just zeroable) from their inputs.
+/// Returns true if the target shuffle mask was decoded.
+static bool setTargetShuffleZeroElements(SDValue N,
+ SmallVectorImpl<int> &Mask,
+ SmallVectorImpl<SDValue> &Ops) {
+ bool IsUnary;
+ if (!isTargetShuffle(N.getOpcode()))
+ return false;
+ if (!getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), true, Ops,
+ Mask, IsUnary))
+ return false;
+
+ SDValue V1 = Ops[0];
+ SDValue V2 = IsUnary ? V1 : Ops[1];
+
+ V1 = peekThroughBitcasts(V1);
+ V2 = peekThroughBitcasts(V2);
+
+ for (int i = 0, Size = Mask.size(); i < Size; ++i) {
+ int M = Mask[i];
+
+ // Already decoded as SM_SentinelZero / SM_SentinelUndef.
+ if (M < 0)
+ continue;
+
+ // Determine shuffle input and normalize the mask.
+ SDValue V = M < Size ? V1 : V2;
+ M %= Size;
+
+ // We are referencing an UNDEF input.
+ if (V.isUndef()) {
+ Mask[i] = SM_SentinelUndef;
+ continue;
+ }
+
+ // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
+ if (V.getOpcode() != ISD::BUILD_VECTOR)
+ continue;
+
+ // If the BUILD_VECTOR has fewer elements then the (larger) source
+ // element must be UNDEF/ZERO.
+ // TODO: Is it worth testing the individual bits of a constant?
+ if ((Size % V.getNumOperands()) == 0) {
+ int Scale = Size / V->getNumOperands();
+ SDValue Op = V.getOperand(M / Scale);
+ if (Op.isUndef())
+ Mask[i] = SM_SentinelUndef;
+ else if (X86::isZeroNode(Op))
+ Mask[i] = SM_SentinelZero;
+ continue;
+ }
+
+ // If the BUILD_VECTOR has more elements then all the (smaller) source
+ // elements must be all UNDEF or all ZERO.
+ if ((V.getNumOperands() % Size) == 0) {
+ int Scale = V->getNumOperands() / Size;
+ bool AllUndef = true;
+ bool AllZero = true;
+ for (int j = 0; j < Scale; ++j) {
+ SDValue Op = V.getOperand((M * Scale) + j);
+ AllUndef &= Op.isUndef();
+ AllZero &= X86::isZeroNode(Op);
+ }
+ if (AllUndef)
+ Mask[i] = SM_SentinelUndef;
+ else if (AllZero)
+ Mask[i] = SM_SentinelZero;
+ continue;
+ }
+ }
+
+ return true;
+}
+
+/// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs
+/// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the
+/// remaining input indices in case we now have a unary shuffle and adjust the
+/// Op0/Op1 inputs accordingly.
+/// Returns true if the target shuffle mask was decoded.
+static bool resolveTargetShuffleInputs(SDValue Op, SDValue &Op0, SDValue &Op1,
+ SmallVectorImpl<int> &Mask) {
+ SmallVector<SDValue, 2> Ops;
+ if (!setTargetShuffleZeroElements(Op, Mask, Ops))
+ return false;
+
+ int NumElts = Mask.size();
+ bool Op0InUse = std::any_of(Mask.begin(), Mask.end(), [NumElts](int Idx) {
+ return 0 <= Idx && Idx < NumElts;
+ });
+ bool Op1InUse = std::any_of(Mask.begin(), Mask.end(),
+ [NumElts](int Idx) { return NumElts <= Idx; });
+
+ Op0 = Op0InUse ? Ops[0] : SDValue();
+ Op1 = Op1InUse ? Ops[1] : SDValue();
+
+ // We're only using Op1 - commute the mask and inputs.
+ if (!Op0InUse && Op1InUse) {
+ for (int &M : Mask)
+ if (NumElts <= M)
+ M -= NumElts;
+ Op0 = Op1;
+ Op1 = SDValue();
+ }
+
return true;
}
@@ -5097,19 +5219,24 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
// Recurse into target specific vector shuffles to find scalars.
if (isTargetShuffle(Opcode)) {
MVT ShufVT = V.getSimpleValueType();
+ MVT ShufSVT = ShufVT.getVectorElementType();
int NumElems = (int)ShufVT.getVectorNumElements();
SmallVector<int, 16> ShuffleMask;
+ SmallVector<SDValue, 16> ShuffleOps;
bool IsUnary;
- if (!getTargetShuffleMask(N, ShufVT, false, ShuffleMask, IsUnary))
+ if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
return SDValue();
int Elt = ShuffleMask[Index];
+ if (Elt == SM_SentinelZero)
+ return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
+ : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
if (Elt == SM_SentinelUndef)
- return DAG.getUNDEF(ShufVT.getVectorElementType());
+ return DAG.getUNDEF(ShufSVT);
assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
- SDValue NewV = (Elt < NumElems) ? N->getOperand(0) : N->getOperand(1);
+ SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
Depth+1);
}
@@ -5138,7 +5265,7 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
- const X86Subtarget* Subtarget,
+ const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
if (NumNonZero > 8)
return SDValue();
@@ -5148,7 +5275,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
bool First = true;
// SSE4.1 - use PINSRB to insert each byte directly.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
for (unsigned i = 0; i < 16; ++i) {
bool isNonZero = (NonZeros & (1 << i)) != 0;
if (isNonZero) {
@@ -5208,7 +5335,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
- const X86Subtarget* Subtarget,
+ const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
if (NumNonZero > 4)
return SDValue();
@@ -5237,13 +5364,13 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
/// Custom lower build_vector of v4i32 or v4f32.
static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
// Find all zeroable elements.
std::bitset<4> Zeroable;
for (int i=0; i < 4; ++i) {
SDValue Elt = Op->getOperand(i);
- Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
+ Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
}
assert(Zeroable.size() - Zeroable.count() > 1 &&
"We expect at least two non-zero elements!");
@@ -5296,12 +5423,12 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
// Let the shuffle legalizer deal with blend operations.
SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
if (V1.getSimpleValueType() != VT)
- V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
- return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
+ V1 = DAG.getBitcast(VT, V1);
+ return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, Mask);
}
// See if we can lower this build_vector to a INSERTPS.
- if (!Subtarget->hasSSE41())
+ if (!Subtarget.hasSSE41())
return SDValue();
SDValue V2 = Elt.getOperand(0);
@@ -5326,9 +5453,9 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
assert(V1.getNode() && "Expected at least two non-zero elements!");
if (V1.getSimpleValueType() != MVT::v4f32)
- V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
+ V1 = DAG.getBitcast(MVT::v4f32, V1);
if (V2.getSimpleValueType() != MVT::v4f32)
- V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
+ V2 = DAG.getBitcast(MVT::v4f32, V2);
// Ok, we can emit an INSERTPS instruction.
unsigned ZMask = Zeroable.to_ulong();
@@ -5342,11 +5469,11 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
}
/// Return a vector logical shift node.
-static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
- unsigned NumBits, SelectionDAG &DAG,
- const TargetLowering &TLI, SDLoc dl) {
+static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
+ SelectionDAG &DAG, const TargetLowering &TLI,
+ const SDLoc &dl) {
assert(VT.is128BitVector() && "Unknown type for VShift");
- MVT ShVT = MVT::v2i64;
+ MVT ShVT = MVT::v16i8;
unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
SrcOp = DAG.getBitcast(ShVT, SrcOp);
MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(DAG.getDataLayout(), VT);
@@ -5355,8 +5482,8 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
}
-static SDValue
-LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
+static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
+ SelectionDAG &DAG) {
// Check if the scalar load can be widened into a vector load. And if
// the address is "base + cst" see if the cst can be "absorbed" into
@@ -5418,12 +5545,11 @@ LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
- LD->getPointerInfo().getWithOffset(StartOffset),
- false, false, false, 0);
+ LD->getPointerInfo().getWithOffset(StartOffset));
SmallVector<int, 8> Mask(NumElems, EltNo);
- return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
+ return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
}
return SDValue();
@@ -5433,55 +5559,103 @@ LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
/// elements can be replaced by a single large load which has the same value as
/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
///
-/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
-///
-/// FIXME: we'd also like to handle the case where the last elements are zero
-/// rather than undef via VZEXT_LOAD, but we do not detect that case today.
-/// There's even a handy isZeroNode for that purpose.
+/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
SDLoc &DL, SelectionDAG &DAG,
bool isAfterLegalize) {
unsigned NumElems = Elts.size();
- LoadSDNode *LDBase = nullptr;
- unsigned LastLoadedElt = -1U;
+ int LastLoadedElt = -1;
+ SmallBitVector LoadMask(NumElems, false);
+ SmallBitVector ZeroMask(NumElems, false);
+ SmallBitVector UndefMask(NumElems, false);
- // For each element in the initializer, see if we've found a load or an undef.
- // If we don't find an initial load element, or later load elements are
- // non-consecutive, bail out.
+ // For each element in the initializer, see if we've found a load, zero or an
+ // undef.
for (unsigned i = 0; i < NumElems; ++i) {
- SDValue Elt = Elts[i];
- // Look through a bitcast.
- if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
- Elt = Elt.getOperand(0);
- if (!Elt.getNode() ||
- (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
+ SDValue Elt = peekThroughBitcasts(Elts[i]);
+ if (!Elt.getNode())
return SDValue();
- if (!LDBase) {
- if (Elt.getNode()->getOpcode() == ISD::UNDEF)
- return SDValue();
- LDBase = cast<LoadSDNode>(Elt.getNode());
- LastLoadedElt = i;
- continue;
- }
- if (Elt.getOpcode() == ISD::UNDEF)
- continue;
- LoadSDNode *LD = cast<LoadSDNode>(Elt);
- EVT LdVT = Elt.getValueType();
- // Each loaded element must be the correct fractional portion of the
- // requested vector load.
- if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
- return SDValue();
- if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
+ if (Elt.isUndef())
+ UndefMask[i] = true;
+ else if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode()))
+ ZeroMask[i] = true;
+ else if (ISD::isNON_EXTLoad(Elt.getNode())) {
+ LoadMask[i] = true;
+ LastLoadedElt = i;
+ // Each loaded element must be the correct fractional portion of the
+ // requested vector load.
+ if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits())
+ return SDValue();
+ } else
return SDValue();
- LastLoadedElt = i;
}
+ assert((ZeroMask | UndefMask | LoadMask).count() == NumElems &&
+ "Incomplete element masks");
+
+ // Handle Special Cases - all undef or undef/zero.
+ if (UndefMask.count() == NumElems)
+ return DAG.getUNDEF(VT);
+
+ // FIXME: Should we return this as a BUILD_VECTOR instead?
+ if ((ZeroMask | UndefMask).count() == NumElems)
+ return VT.isInteger() ? DAG.getConstant(0, DL, VT)
+ : DAG.getConstantFP(0.0, DL, VT);
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ int FirstLoadedElt = LoadMask.find_first();
+ SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
+ LoadSDNode *LDBase = cast<LoadSDNode>(EltBase);
+ EVT LDBaseVT = EltBase.getValueType();
+
+ // Consecutive loads can contain UNDEFS but not ZERO elements.
+ // Consecutive loads with UNDEFs and ZEROs elements require a
+ // an additional shuffle stage to clear the ZERO elements.
+ bool IsConsecutiveLoad = true;
+ bool IsConsecutiveLoadWithZeros = true;
+ for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
+ if (LoadMask[i]) {
+ SDValue Elt = peekThroughBitcasts(Elts[i]);
+ LoadSDNode *LD = cast<LoadSDNode>(Elt);
+ if (!DAG.areNonVolatileConsecutiveLoads(
+ LD, LDBase, Elt.getValueType().getStoreSizeInBits() / 8,
+ i - FirstLoadedElt)) {
+ IsConsecutiveLoad = false;
+ IsConsecutiveLoadWithZeros = false;
+ break;
+ }
+ } else if (ZeroMask[i]) {
+ IsConsecutiveLoad = false;
+ }
+ }
+
+ auto CreateLoad = [&DAG, &DL](EVT VT, LoadSDNode *LDBase) {
+ auto MMOFlags = LDBase->getMemOperand()->getFlags();
+ assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&
+ "Cannot merge volatile loads.");
+ SDValue NewLd =
+ DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
+ LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
+
+ if (LDBase->hasAnyUseOfValue(1)) {
+ SDValue NewChain =
+ DAG.getNode(ISD::TokenFactor, DL, MVT::Other, SDValue(LDBase, 1),
+ SDValue(NewLd.getNode(), 1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
+ DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
+ SDValue(NewLd.getNode(), 1));
+ }
+
+ return NewLd;
+ };
+ // LOAD - all consecutive load/undefs (must start/end with a load).
// If we have found an entire vector of loads and undefs, then return a large
- // load of the entire vector width starting at the base pointer. If we found
- // consecutive loads for the low half, generate a vzext_load node.
- if (LastLoadedElt == NumElems - 1) {
+ // load of the entire vector width starting at the base pointer.
+ // If the vector contains zeros, then attempt to shuffle those elements.
+ if (FirstLoadedElt == 0 && LastLoadedElt == (int)(NumElems - 1) &&
+ (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
assert(LDBase && "Did not find base load for merging consecutive loads");
EVT EltVT = LDBase->getValueType(0);
// Ensure that the input vector size for the merged loads matches the
@@ -5489,72 +5663,93 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
return SDValue();
- if (isAfterLegalize &&
- !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
+ if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
return SDValue();
- SDValue NewLd = SDValue();
+ if (IsConsecutiveLoad)
+ return CreateLoad(VT, LDBase);
+
+ // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
+ // vector and a zero vector to clear out the zero elements.
+ if (!isAfterLegalize && NumElems == VT.getVectorNumElements()) {
+ SmallVector<int, 4> ClearMask(NumElems, -1);
+ for (unsigned i = 0; i < NumElems; ++i) {
+ if (ZeroMask[i])
+ ClearMask[i] = i + NumElems;
+ else if (LoadMask[i])
+ ClearMask[i] = i;
+ }
+ SDValue V = CreateLoad(VT, LDBase);
+ SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
+ : DAG.getConstantFP(0.0, DL, VT);
+ return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
+ }
+ }
+
+ int LoadSize =
+ (1 + LastLoadedElt - FirstLoadedElt) * LDBaseVT.getStoreSizeInBits();
+
+ // VZEXT_LOAD - consecutive load/undefs followed by zeros/undefs.
+ if (IsConsecutiveLoad && FirstLoadedElt == 0 && LoadSize == 64 &&
+ ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
+ MVT VecSVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
+ MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / 64);
+ if (TLI.isTypeLegal(VecVT)) {
+ SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
+ SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
+ SDValue ResNode =
+ DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
+ LDBase->getPointerInfo(),
+ LDBase->getAlignment(),
+ false/*isVolatile*/, true/*ReadMem*/,
+ false/*WriteMem*/);
- NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
- LDBase->getPointerInfo(), LDBase->isVolatile(),
- LDBase->isNonTemporal(), LDBase->isInvariant(),
- LDBase->getAlignment());
+ // Make sure the newly-created LOAD is in the same position as LDBase in
+ // terms of dependency. We create a TokenFactor for LDBase and ResNode,
+ // and update uses of LDBase's output chain to use the TokenFactor.
+ if (LDBase->hasAnyUseOfValue(1)) {
+ SDValue NewChain =
+ DAG.getNode(ISD::TokenFactor, DL, MVT::Other, SDValue(LDBase, 1),
+ SDValue(ResNode.getNode(), 1));
+ DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
+ DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
+ SDValue(ResNode.getNode(), 1));
+ }
- if (LDBase->hasAnyUseOfValue(1)) {
- SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- SDValue(LDBase, 1),
- SDValue(NewLd.getNode(), 1));
- DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
- DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
- SDValue(NewLd.getNode(), 1));
+ return DAG.getBitcast(VT, ResNode);
}
-
- return NewLd;
}
- //TODO: The code below fires only for for loading the low v2i32 / v2f32
- //of a v4i32 / v4f32. It's probably worth generalizing.
- EVT EltVT = VT.getVectorElementType();
- if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
- DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
- SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
- SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
- SDValue ResNode =
- DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
- LDBase->getPointerInfo(),
- LDBase->getAlignment(),
- false/*isVolatile*/, true/*ReadMem*/,
- false/*WriteMem*/);
-
- // Make sure the newly-created LOAD is in the same position as LDBase in
- // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
- // update uses of LDBase's output chain to use the TokenFactor.
- if (LDBase->hasAnyUseOfValue(1)) {
- SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
- DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
- DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
- SDValue(ResNode.getNode(), 1));
+ // VZEXT_MOVL - consecutive 32-bit load/undefs followed by zeros/undefs.
+ if (IsConsecutiveLoad && FirstLoadedElt == 0 && LoadSize == 32 &&
+ ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
+ MVT VecSVT = VT.isFloatingPoint() ? MVT::f32 : MVT::i32;
+ MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / 32);
+ if (TLI.isTypeLegal(VecVT)) {
+ SDValue V = LastLoadedElt != 0 ? CreateLoad(VecSVT, LDBase)
+ : DAG.getBitcast(VecSVT, EltBase);
+ V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, V);
+ V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, V);
+ return DAG.getBitcast(VT, V);
}
-
- return DAG.getBitcast(VT, ResNode);
}
+
return SDValue();
}
-/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
-/// to generate a splat value for the following cases:
+/// Attempt to use the vbroadcast instruction to generate a splat value for the
+/// following cases:
/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
/// 2. A splat shuffle which uses a scalar_to_vector node which comes from
/// a scalar load, or a constant.
/// The VBROADCAST node is returned when a pattern is found,
/// or SDValue() otherwise.
-static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
+static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// VBROADCAST requires AVX.
// TODO: Splats could be generated for non-AVX CPUs using SSE
// instructions, but there's less potential gain for only 128-bit vectors.
- if (!Subtarget->hasAVX())
+ if (!Subtarget.hasAVX())
return SDValue();
MVT VT = Op.getSimpleValueType();
@@ -5604,12 +5799,12 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
Sc.getOpcode() != ISD::BUILD_VECTOR) {
- if (!Subtarget->hasInt256())
+ if (!Subtarget.hasInt256())
return SDValue();
// Use the register form of the broadcast instruction available on AVX2.
if (VT.getSizeInBits() >= 256)
- Sc = Extract128BitVector(Sc, 0, DAG, dl);
+ Sc = extract128BitVector(Sc, 0, DAG, dl);
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
}
@@ -5622,7 +5817,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// Constants may have multiple users.
// AVX-512 has register version of the broadcast
- bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
+ bool hasRegVer = Subtarget.hasAVX512() && VT.is512BitVector() &&
Ld.getValueType().getSizeInBits() >= 32;
if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
!hasRegVer))
@@ -5647,7 +5842,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// from the constant pool and not to broadcast it from a scalar.
// But override that restriction when optimizing for size.
// TODO: Check if splatting is recommended for other AVX-capable CPUs.
- if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
+ if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
EVT CVT = Ld.getValueType();
assert(!CVT.isVector() && "Must not broadcast a vector type");
@@ -5656,7 +5851,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// with AVX2, also splat i8 and i16.
// With pattern matching, the VBROADCAST node may become a VMOVDDUP.
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
- (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
+ (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
const Constant *C = nullptr;
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
C = CI->getConstantIntValue();
@@ -5671,8 +5866,8 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(
CVT, dl, DAG.getEntryNode(), CP,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false,
- false, false, Alignment);
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
+ Alignment);
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
@@ -5681,7 +5876,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
bool IsLoad = ISD::isNormalLoad(Ld.getNode());
// Handle AVX2 in-register broadcasts.
- if (!IsLoad && Subtarget->hasInt256() &&
+ if (!IsLoad && Subtarget.hasInt256() &&
(ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
@@ -5690,12 +5885,12 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
return SDValue();
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
- (Subtarget->hasVLX() && ScalarSize == 64))
+ (Subtarget.hasVLX() && ScalarSize == 64))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The integer check is needed for the 64-bit into 128-bit so it doesn't match
// double since there is no vbroadcastsd xmm
- if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
+ if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
@@ -5801,7 +5996,7 @@ static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
return SDValue();
VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
- SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
+ SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
unsigned Idx = InsertIndices[i];
NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
@@ -5818,7 +6013,7 @@ static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
uint64_t Immediate = 0;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
- if (In.getOpcode() != ISD::UNDEF)
+ if (!In.isUndef())
Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
}
SDLoc dl(Op);
@@ -5835,17 +6030,11 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
"Unexpected type in LowerBUILD_VECTORvXi1!");
SDLoc dl(Op);
- if (ISD::isBuildVectorAllZeros(Op.getNode())) {
- SDValue Cst = DAG.getTargetConstant(0, dl, MVT::i1);
- SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
- }
+ if (ISD::isBuildVectorAllZeros(Op.getNode()))
+ return DAG.getTargetConstant(0, dl, VT);
- if (ISD::isBuildVectorAllOnes(Op.getNode())) {
- SDValue Cst = DAG.getTargetConstant(1, dl, MVT::i1);
- SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
- }
+ if (ISD::isBuildVectorAllOnes(Op.getNode()))
+ return DAG.getTargetConstant(1, dl, VT);
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
@@ -5864,7 +6053,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
int SplatIdx = -1;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
- if (In.getOpcode() == ISD::UNDEF)
+ if (In.isUndef())
continue;
if (!isa<ConstantSDNode>(In))
NonConstIdx.push_back(idx);
@@ -5872,7 +6061,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
HasConstElts = true;
}
- if (SplatIdx == -1)
+ if (SplatIdx < 0)
SplatIdx = idx;
else if (In != Op.getOperand(SplatIdx))
IsSplat = false;
@@ -5903,7 +6092,7 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(0, dl));
}
- for (unsigned i = 0; i < NonConstIdx.size(); ++i) {
+ for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
unsigned InsertIdx = NonConstIdx[i];
DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
Op.getOperand(InsertIdx),
@@ -5948,7 +6137,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
SDValue Op = N->getOperand(i + BaseIdx);
// Skip UNDEFs.
- if (Op->getOpcode() == ISD::UNDEF) {
+ if (Op->isUndef()) {
// Update the expected vector extract index.
if (i * 2 == NumElts)
ExpectedVExtractIdx = BaseIdx;
@@ -5978,13 +6167,13 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
if (i * 2 < NumElts) {
- if (V0.getOpcode() == ISD::UNDEF) {
+ if (V0.isUndef()) {
V0 = Op0.getOperand(0);
if (V0.getValueType() != VT)
return false;
}
} else {
- if (V1.getOpcode() == ISD::UNDEF) {
+ if (V1.isUndef()) {
V1 = Op0.getOperand(0);
if (V1.getValueType() != VT)
return false;
@@ -6041,37 +6230,35 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
/// the upper 128-bits of the result.
static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
- SDLoc DL, SelectionDAG &DAG,
+ const SDLoc &DL, SelectionDAG &DAG,
unsigned X86Opcode, bool Mode,
bool isUndefLO, bool isUndefHI) {
- EVT VT = V0.getValueType();
- assert(VT.is256BitVector() && VT == V1.getValueType() &&
+ MVT VT = V0.getSimpleValueType();
+ assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
"Invalid nodes in input!");
unsigned NumElts = VT.getVectorNumElements();
- SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
- SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
- SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
- SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
- EVT NewVT = V0_LO.getValueType();
+ SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
+ SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
+ SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
+ SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
+ MVT NewVT = V0_LO.getSimpleValueType();
SDValue LO = DAG.getUNDEF(NewVT);
SDValue HI = DAG.getUNDEF(NewVT);
if (Mode) {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
- if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
+ if (!isUndefLO && !V0->isUndef())
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
- if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
+ if (!isUndefHI && !V1->isUndef())
HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
} else {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
- if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
- V1_LO->getOpcode() != ISD::UNDEF))
+ if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
- if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
- V1_HI->getOpcode() != ISD::UNDEF))
+ if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
}
@@ -6081,10 +6268,10 @@ static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
/// Try to fold a build_vector that performs an 'addsub' to an X86ISD::ADDSUB
/// node.
static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
MVT VT = BV->getSimpleValueType(0);
- if ((!Subtarget->hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
- (!Subtarget->hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
+ if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
+ (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
return SDValue();
SDLoc DL(BV);
@@ -6142,12 +6329,12 @@ static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
SubFound = true;
// Update InVec0 and InVec1.
- if (InVec0.getOpcode() == ISD::UNDEF) {
+ if (InVec0.isUndef()) {
InVec0 = Op0.getOperand(0);
if (InVec0.getSimpleValueType() != VT)
return SDValue();
}
- if (InVec1.getOpcode() == ISD::UNDEF) {
+ if (InVec1.isUndef()) {
InVec1 = Op1.getOperand(0);
if (InVec1.getSimpleValueType() != VT)
return SDValue();
@@ -6174,8 +6361,7 @@ static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
}
// Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
- if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
- InVec1.getOpcode() != ISD::UNDEF)
+ if (AddFound && SubFound && !InVec0.isUndef() && !InVec1.isUndef())
return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
return SDValue();
@@ -6183,7 +6369,7 @@ static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = BV->getSimpleValueType(0);
unsigned NumElts = VT.getVectorNumElements();
@@ -6193,11 +6379,11 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
// Count the number of UNDEF operands in the build_vector in input.
for (unsigned i = 0, e = Half; i != e; ++i)
- if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
+ if (BV->getOperand(i)->isUndef())
NumUndefsLO++;
for (unsigned i = Half, e = NumElts; i != e; ++i)
- if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
+ if (BV->getOperand(i)->isUndef())
NumUndefsHI++;
// Early exit if this is either a build_vector of all UNDEFs or all the
@@ -6207,14 +6393,14 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
SDLoc DL(BV);
SDValue InVec0, InVec1;
- if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
+ if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) {
// Try to match an SSE3 float HADD/HSUB.
if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
- } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
+ } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget.hasSSSE3()) {
// Try to match an SSSE3 integer HADD/HSUB.
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
@@ -6223,7 +6409,7 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
}
- if (!Subtarget->hasAVX())
+ if (!Subtarget.hasAVX())
return SDValue();
if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
@@ -6232,18 +6418,14 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
SDValue InVec2, InVec3;
if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
- ((InVec0.getOpcode() == ISD::UNDEF ||
- InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
- ((InVec1.getOpcode() == ISD::UNDEF ||
- InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+ ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
- ((InVec0.getOpcode() == ISD::UNDEF ||
- InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
- ((InVec1.getOpcode() == ISD::UNDEF ||
- InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+ ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
} else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
// Try to match an AVX2 horizontal add/sub of signed integers.
@@ -6253,17 +6435,13 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
- ((InVec0.getOpcode() == ISD::UNDEF ||
- InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
- ((InVec1.getOpcode() == ISD::UNDEF ||
- InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+ ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
X86Opcode = X86ISD::HADD;
else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
- ((InVec0.getOpcode() == ISD::UNDEF ||
- InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
- ((InVec1.getOpcode() == ISD::UNDEF ||
- InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
+ ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
+ ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
X86Opcode = X86ISD::HSUB;
else
CanFold = false;
@@ -6271,7 +6449,7 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
if (CanFold) {
// Fold this build_vector into a single horizontal add/sub.
// Do this only if the target has AVX2.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
// Do not try to expand this build_vector into a pair of horizontal
@@ -6289,7 +6467,7 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
}
if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
- VT == MVT::v16i16) && Subtarget->hasAVX()) {
+ VT == MVT::v16i16) && Subtarget.hasAVX()) {
unsigned X86Opcode;
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
X86Opcode = X86ISD::HADD;
@@ -6318,39 +6496,101 @@ static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
return SDValue();
}
-SDValue
-X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
-
+/// If a BUILD_VECTOR's source elements all apply the same bit operation and
+/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
+/// just apply the bit to the vectors.
+/// NOTE: Its not in our interest to start make a general purpose vectorizer
+/// from this, but enough scalar bit operations are created from the later
+/// legalization + scalarization stages to need basic support.
+static SDValue lowerBuildVectorToBitOp(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
- MVT ExtVT = VT.getVectorElementType();
- unsigned NumElems = Op.getNumOperands();
+ unsigned NumElems = VT.getVectorNumElements();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- // Generate vectors for predicate vectors.
- if (VT.getVectorElementType() == MVT::i1 && Subtarget->hasAVX512())
- return LowerBUILD_VECTORvXi1(Op, DAG);
+ // Check that all elements have the same opcode.
+ // TODO: Should we allow UNDEFS and if so how many?
+ unsigned Opcode = Op.getOperand(0).getOpcode();
+ for (unsigned i = 1; i < NumElems; ++i)
+ if (Opcode != Op.getOperand(i).getOpcode())
+ return SDValue();
- // Vectors containing all zeros can be matched by pxor and xorps later
+ // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
+ switch (Opcode) {
+ default:
+ return SDValue();
+ case ISD::AND:
+ case ISD::XOR:
+ case ISD::OR:
+ if (!TLI.isOperationLegalOrPromote(Opcode, VT))
+ return SDValue();
+ break;
+ }
+
+ SmallVector<SDValue, 4> LHSElts, RHSElts;
+ for (SDValue Elt : Op->ops()) {
+ SDValue LHS = Elt.getOperand(0);
+ SDValue RHS = Elt.getOperand(1);
+
+ // We expect the canonicalized RHS operand to be the constant.
+ if (!isa<ConstantSDNode>(RHS))
+ return SDValue();
+ LHSElts.push_back(LHS);
+ RHSElts.push_back(RHS);
+ }
+
+ SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
+ SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
+ return DAG.getNode(Opcode, DL, VT, LHS, RHS);
+}
+
+/// Create a vector constant without a load. SSE/AVX provide the bare minimum
+/// functionality to do this, so it's all zeros, all ones, or some derivation
+/// that is cheap to calculate.
+static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ SDLoc DL(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ // Vectors containing all zeros can be matched by pxor and xorps.
if (ISD::isBuildVectorAllZeros(Op.getNode())) {
// Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
// and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
return Op;
- return getZeroVector(VT, Subtarget, DAG, dl);
+ return getZeroVector(VT, Subtarget, DAG, DL);
}
// Vectors containing all ones can be matched by pcmpeqd on 128-bit width
// vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
// vpcmpeqd on 256-bit vectors.
- if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
- if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
+ if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
+ if (VT == MVT::v4i32 || VT == MVT::v16i32 ||
+ (VT == MVT::v8i32 && Subtarget.hasInt256()))
return Op;
- if (!VT.is512BitVector())
- return getOnesVector(VT, Subtarget, DAG, dl);
+ return getOnesVector(VT, Subtarget, DAG, DL);
}
+ return SDValue();
+}
+
+SDValue
+X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+
+ MVT VT = Op.getSimpleValueType();
+ MVT ExtVT = VT.getVectorElementType();
+ unsigned NumElems = Op.getNumOperands();
+
+ // Generate vectors for predicate vectors.
+ if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
+ return LowerBUILD_VECTORvXi1(Op, DAG);
+
+ if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
+ return VectorConstant;
+
BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
if (SDValue AddSub = LowerToAddSub(BV, Subtarget, DAG))
return AddSub;
@@ -6358,6 +6598,8 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return HorizontalOp;
if (SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG))
return Broadcast;
+ if (SDValue BitOp = lowerBuildVectorToBitOp(Op, DAG))
+ return BitOp;
unsigned EVTBits = ExtVT.getSizeInBits();
@@ -6368,7 +6610,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
SmallSet<SDValue, 8> Values;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = Op.getOperand(i);
- if (Elt.getOpcode() == ISD::UNDEF)
+ if (Elt.isUndef())
continue;
Values.insert(Elt);
if (Elt.getOpcode() != ISD::Constant &&
@@ -6397,7 +6639,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// insertion that way. Only do this if the value is non-constant or if the
// value is a constant being inserted into element 0. It is cheaper to do
// a constant pool load than it is to do a movd + shuffle.
- if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
+ if (ExtVT == MVT::i64 && !Subtarget.is64Bit() &&
(!IsAllConstants || Idx == 0)) {
if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
// Handle SSE only.
@@ -6422,7 +6664,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
- (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
+ (ExtVT == MVT::i64 && Subtarget.is64Bit())) {
if (VT.is512BitVector()) {
SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
@@ -6439,16 +6681,17 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// it to i32 first.
if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
- if (VT.is256BitVector()) {
- if (Subtarget->hasAVX()) {
- Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v8i32, Item);
+ if (VT.getSizeInBits() >= 256) {
+ MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
+ if (Subtarget.hasAVX()) {
+ Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
} else {
// Without AVX, we need to extend to a 128-bit vector and then
// insert into the 256-bit vector.
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
- SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
- Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
+ SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
+ Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl);
}
} else {
assert(VT.is128BitVector() && "Expected an SSE value type!");
@@ -6504,28 +6747,30 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (IsAllConstants)
return SDValue();
- // For AVX-length vectors, see if we can use a vector load to get all of the
- // elements, otherwise build the individual 128-bit pieces and use
+ // See if we can use a vector load to get all of the elements.
+ if (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) {
+ SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
+ if (SDValue LD = EltsFromConsecutiveLoads(VT, Ops, dl, DAG, false))
+ return LD;
+ }
+
+ // For AVX-length vectors, build the individual 128-bit pieces and use
// shuffles to put them in place.
if (VT.is256BitVector() || VT.is512BitVector()) {
- SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
-
- // Check for a build vector of consecutive loads.
- if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
- return LD;
+ SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
// Build both the lower and upper subvector.
- SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
- makeArrayRef(&V[0], NumElems/2));
- SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
- makeArrayRef(&V[NumElems / 2], NumElems/2));
+ SDValue Lower =
+ DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElems / 2));
+ SDValue Upper = DAG.getBuildVector(
+ HVT, dl, makeArrayRef(&Ops[NumElems / 2], NumElems / 2));
// Recreate the wider vector with the lower and upper part.
if (VT.is256BitVector())
- return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
- return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
+ return concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
+ return concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
}
// Let legalizer expand 2-wide build_vectors.
@@ -6557,30 +6802,30 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return V;
// If element VT is == 32 bits, turn it into a number of shuffles.
- SmallVector<SDValue, 8> V(NumElems);
if (NumElems == 4 && NumZero > 0) {
+ SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < 4; ++i) {
bool isZero = !(NonZeros & (1ULL << i));
if (isZero)
- V[i] = getZeroVector(VT, Subtarget, DAG, dl);
+ Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
else
- V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
+ Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
}
for (unsigned i = 0; i < 2; ++i) {
switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
default: break;
case 0:
- V[i] = V[i*2]; // Must be a zero vector.
+ Ops[i] = Ops[i*2]; // Must be a zero vector.
break;
case 1:
- V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
+ Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
break;
case 2:
- V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
+ Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
break;
case 3:
- V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
+ Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
break;
}
}
@@ -6593,32 +6838,24 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
static_cast<int>(Reverse2 ? NumElems : NumElems+1)
};
- return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
+ return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
}
if (Values.size() > 1 && VT.is128BitVector()) {
- // Check for a build vector of consecutive loads.
- for (unsigned i = 0; i < NumElems; ++i)
- V[i] = Op.getOperand(i);
-
- // Check for elements which are consecutive loads.
- if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
- return LD;
-
// Check for a build vector from mostly shuffle plus few inserting.
if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
return Sh;
// For SSE 4.1, use insertps to put the high elements into the low element.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
SDValue Result;
- if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
+ if (!Op.getOperand(0).isUndef())
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
else
Result = DAG.getUNDEF(VT);
for (unsigned i = 1; i < NumElems; ++i) {
- if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
+ if (Op.getOperand(i).isUndef()) continue;
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
}
@@ -6628,11 +6865,12 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// Otherwise, expand into a number of unpckl*, start by extending each of
// our (non-undef) elements to the full vector width with the element in the
// bottom slot of the vector (which generates no code for SSE).
+ SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
- if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
- V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
+ if (!Op.getOperand(i).isUndef())
+ Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
else
- V[i] = DAG.getUNDEF(VT);
+ Ops[i] = DAG.getUNDEF(VT);
}
// Next, we iteratively mix elements, e.g. for v4f32:
@@ -6642,20 +6880,20 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
unsigned EltStride = NumElems >> 1;
while (EltStride != 0) {
for (unsigned i = 0; i < EltStride; ++i) {
- // If V[i+EltStride] is undef and this is the first round of mixing,
+ // If Ops[i+EltStride] is undef and this is the first round of mixing,
// then it is safe to just drop this shuffle: V[i] is already in the
// right place, the one element (since it's the first round) being
// inserted as undef can be dropped. This isn't safe for successive
// rounds because they will permute elements within both vectors.
- if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
+ if (Ops[i+EltStride].isUndef() &&
EltStride == NumElems/2)
continue;
- V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
+ Ops[i] = getUnpackl(DAG, dl, VT, Ops[i], Ops[i + EltStride]);
}
EltStride >>= 1;
}
- return V[0];
+ return Ops[0];
}
return SDValue();
}
@@ -6673,21 +6911,23 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
SDValue V2 = Op.getOperand(1);
unsigned NumElems = ResVT.getVectorNumElements();
if (ResVT.is256BitVector())
- return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
+ return concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
if (Op.getNumOperands() == 4) {
MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
ResVT.getVectorNumElements()/2);
SDValue V3 = Op.getOperand(2);
SDValue V4 = Op.getOperand(3);
- return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
- Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
+ return concat256BitVectors(
+ concat128BitVectors(V1, V2, HalfVT, NumElems / 2, DAG, dl),
+ concat128BitVectors(V3, V4, HalfVT, NumElems / 2, DAG, dl), ResVT,
+ NumElems, DAG, dl);
}
- return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
+ return concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
}
static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG & DAG) {
SDLoc dl(Op);
MVT ResVT = Op.getSimpleValueType();
@@ -6764,7 +7004,7 @@ static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
}
static SDValue LowerCONCAT_VECTORS(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (VT.getVectorElementType() == MVT::i1)
@@ -6800,24 +7040,11 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op,
/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
/// in-place shuffle are 'no-op's.
static bool isNoopShuffleMask(ArrayRef<int> Mask) {
- for (int i = 0, Size = Mask.size(); i < Size; ++i)
- if (Mask[i] != -1 && Mask[i] != i)
- return false;
- return true;
-}
-
-/// \brief Helper function to classify a mask as a single-input mask.
-///
-/// This isn't a generic single-input test because in the vector shuffle
-/// lowering we canonicalize single inputs to be the first input operand. This
-/// means we can more quickly test for a single input by only checking whether
-/// an input from the second operand exists. We also assume that the size of
-/// mask corresponds to the size of the input vectors which isn't true in the
-/// fully general case.
-static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
- for (int M : Mask)
- if (M >= (int)Mask.size())
+ for (int i = 0, Size = Mask.size(); i < Size; ++i) {
+ assert(Mask[i] >= -1 && "Out of bound mask element!");
+ if (Mask[i] >= 0 && Mask[i] != i)
return false;
+ }
return true;
}
@@ -6835,22 +7062,22 @@ static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
return false;
}
-/// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
+/// \brief Test whether a shuffle mask is equivalent within each sub-lane.
///
/// This checks a shuffle mask to see if it is performing the same
-/// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
+/// lane-relative shuffle in each sub-lane. This trivially implies
/// that it is also not lane-crossing. It may however involve a blend from the
/// same lane of a second vector.
///
/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
/// non-trivial to compute in the face of undef lanes. The representation is
-/// *not* suitable for use with existing 128-bit shuffles as it will contain
-/// entries from both V1 and V2 inputs to the wider mask.
-static bool
-is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
- SmallVectorImpl<int> &RepeatedMask) {
- int LaneSize = 128 / VT.getScalarSizeInBits();
- RepeatedMask.resize(LaneSize, -1);
+/// suitable for use with existing 128-bit shuffles as entries from the second
+/// vector have been remapped to [LaneSize, 2*LaneSize).
+static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
+ ArrayRef<int> Mask,
+ SmallVectorImpl<int> &RepeatedMask) {
+ int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
+ RepeatedMask.assign(LaneSize, -1);
int Size = Mask.size();
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
@@ -6860,17 +7087,55 @@ is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
return false;
// Ok, handle the in-lane shuffles by detecting if and when they repeat.
- if (RepeatedMask[i % LaneSize] == -1)
+ // Adjust second vector indices to start at LaneSize instead of Size.
+ int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
+ : Mask[i] % LaneSize + LaneSize;
+ if (RepeatedMask[i % LaneSize] < 0)
// This is the first non-undef entry in this slot of a 128-bit lane.
- RepeatedMask[i % LaneSize] =
- Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
- else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
+ RepeatedMask[i % LaneSize] = LocalM;
+ else if (RepeatedMask[i % LaneSize] != LocalM)
// Found a mismatch with the repeated mask.
return false;
}
return true;
}
+/// Test whether a shuffle mask is equivalent within each 128-bit lane.
+static bool
+is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
+ SmallVectorImpl<int> &RepeatedMask) {
+ return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
+}
+
+/// Test whether a shuffle mask is equivalent within each 256-bit lane.
+static bool
+is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
+ SmallVectorImpl<int> &RepeatedMask) {
+ return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
+}
+
+static void scaleShuffleMask(int Scale, ArrayRef<int> Mask,
+ SmallVectorImpl<int> &ScaledMask) {
+ assert(0 < Scale && "Unexpected scaling factor");
+ int NumElts = Mask.size();
+ ScaledMask.assign(NumElts * Scale, -1);
+
+ for (int i = 0; i != NumElts; ++i) {
+ int M = Mask[i];
+
+ // Repeat sentinel values in every mask element.
+ if (M < 0) {
+ for (int s = 0; s != Scale; ++s)
+ ScaledMask[(Scale * i) + s] = M;
+ continue;
+ }
+
+ // Scale mask element and increment across each mask element.
+ for (int s = 0; s != Scale; ++s)
+ ScaledMask[(Scale * i) + s] = (Scale * M) + s;
+ }
+}
+
/// \brief Checks whether a shuffle mask is equivalent to an explicit list of
/// arguments.
///
@@ -6893,8 +7158,9 @@ static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
- for (int i = 0; i < Size; ++i)
- if (Mask[i] != -1 && Mask[i] != ExpectedMask[i]) {
+ for (int i = 0; i < Size; ++i) {
+ assert(Mask[i] >= -1 && "Out of bound mask element!");
+ if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
if (!MaskBV || !ExpectedBV ||
@@ -6902,6 +7168,32 @@ static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
ExpectedBV->getOperand(ExpectedMask[i] % Size))
return false;
}
+}
+
+ return true;
+}
+
+/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
+///
+/// The masks must be exactly the same width.
+///
+/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
+/// value in ExpectedMask is always accepted. Otherwise the indices must match.
+///
+/// SM_SentinelZero is accepted as a valid negative index but must match in both.
+static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
+ ArrayRef<int> ExpectedMask) {
+ int Size = Mask.size();
+ if (Size != (int)ExpectedMask.size())
+ return false;
+
+ for (int i = 0; i < Size; ++i)
+ if (Mask[i] == SM_SentinelUndef)
+ continue;
+ else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero)
+ return false;
+ else if (Mask[i] != ExpectedMask[i])
+ return false;
return true;
}
@@ -6914,8 +7206,7 @@ static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
/// example.
///
/// NB: We rely heavily on "undef" masks preserving the input lane.
-static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
- SelectionDAG &DAG) {
+static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
@@ -6923,11 +7214,16 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
unsigned Imm = 0;
- Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
- Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
- Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
- Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
- return DAG.getConstant(Imm, DL, MVT::i8);
+ Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
+ Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
+ Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
+ Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
+ return Imm;
+}
+
+static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
+ SelectionDAG &DAG) {
+ return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
}
/// \brief Compute whether each element of a shuffle is zeroable.
@@ -6941,15 +7237,16 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
SDValue V1, SDValue V2) {
SmallBitVector Zeroable(Mask.size(), false);
-
- while (V1.getOpcode() == ISD::BITCAST)
- V1 = V1->getOperand(0);
- while (V2.getOpcode() == ISD::BITCAST)
- V2 = V2->getOperand(0);
+ V1 = peekThroughBitcasts(V1);
+ V2 = peekThroughBitcasts(V2);
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
+ int VectorSizeInBits = V1.getValueType().getSizeInBits();
+ int ScalarSizeInBits = VectorSizeInBits / Mask.size();
+ assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
+
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
int M = Mask[i];
// Handle the easy cases.
@@ -6958,38 +7255,119 @@ static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
continue;
}
- // If this is an index into a build_vector node (which has the same number
- // of elements), dig out the input value and use it.
+ // Determine shuffle input and normalize the mask.
SDValue V = M < Size ? V1 : V2;
- if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
+ M %= Size;
+
+ // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
+ if (V.getOpcode() != ISD::BUILD_VECTOR)
continue;
- SDValue Input = V.getOperand(M % Size);
- // The UNDEF opcode check really should be dead code here, but not quite
- // worth asserting on (it isn't invalid, just unexpected).
- if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
- Zeroable[i] = true;
+ // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
+ // the (larger) source element must be UNDEF/ZERO.
+ if ((Size % V.getNumOperands()) == 0) {
+ int Scale = Size / V->getNumOperands();
+ SDValue Op = V.getOperand(M / Scale);
+ if (Op.isUndef() || X86::isZeroNode(Op))
+ Zeroable[i] = true;
+ else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
+ APInt Val = Cst->getAPIntValue();
+ Val = Val.lshr((M % Scale) * ScalarSizeInBits);
+ Val = Val.getLoBits(ScalarSizeInBits);
+ Zeroable[i] = (Val == 0);
+ } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
+ APInt Val = Cst->getValueAPF().bitcastToAPInt();
+ Val = Val.lshr((M % Scale) * ScalarSizeInBits);
+ Val = Val.getLoBits(ScalarSizeInBits);
+ Zeroable[i] = (Val == 0);
+ }
+ continue;
+ }
+
+ // If the BUILD_VECTOR has more elements then all the (smaller) source
+ // elements must be UNDEF or ZERO.
+ if ((V.getNumOperands() % Size) == 0) {
+ int Scale = V->getNumOperands() / Size;
+ bool AllZeroable = true;
+ for (int j = 0; j < Scale; ++j) {
+ SDValue Op = V.getOperand((M * Scale) + j);
+ AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
+ }
+ Zeroable[i] = AllZeroable;
+ continue;
+ }
}
return Zeroable;
}
+/// Try to lower a shuffle with a single PSHUFB of V1.
+/// This is only possible if V2 is unused (at all, or only for zero elements).
+static SDValue lowerVectorShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
+ ArrayRef<int> Mask, SDValue V1,
+ SDValue V2,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ int Size = Mask.size();
+ int LaneSize = 128 / VT.getScalarSizeInBits();
+ const int NumBytes = VT.getSizeInBits() / 8;
+ const int NumEltBytes = VT.getScalarSizeInBits() / 8;
+
+ assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
+ (Subtarget.hasAVX2() && VT.is256BitVector()) ||
+ (Subtarget.hasBWI() && VT.is512BitVector()));
+
+ SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
+
+ SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
+ // Sign bit set in i8 mask means zero element.
+ SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
+
+ for (int i = 0; i < NumBytes; ++i) {
+ int M = Mask[i / NumEltBytes];
+ if (M < 0) {
+ PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
+ continue;
+ }
+ if (Zeroable[i / NumEltBytes]) {
+ PSHUFBMask[i] = ZeroMask;
+ continue;
+ }
+ // Only allow V1.
+ if (M >= Size)
+ return SDValue();
+
+ // PSHUFB can't cross lanes, ensure this doesn't happen.
+ if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
+ return SDValue();
+
+ M = M % LaneSize;
+ M = M * NumEltBytes + (i % NumEltBytes);
+ PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
+ }
+
+ MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
+ return DAG.getBitcast(
+ VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V1),
+ DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
+}
+
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
-static SDValue lowerVectorShuffleWithUNPCK(SDLoc DL, MVT VT, ArrayRef<int> Mask,
- SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue lowerVectorShuffleWithUNPCK(const SDLoc &DL, MVT VT,
+ ArrayRef<int> Mask, SDValue V1,
+ SDValue V2, SelectionDAG &DAG) {
int NumElts = VT.getVectorNumElements();
int NumEltsInLane = 128 / VT.getScalarSizeInBits();
- SmallVector<int, 8> Unpckl;
- SmallVector<int, 8> Unpckh;
+ SmallVector<int, 8> Unpckl(NumElts);
+ SmallVector<int, 8> Unpckh(NumElts);
for (int i = 0; i < NumElts; ++i) {
unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
int LoPos = (i % NumEltsInLane) / 2 + LaneStart + NumElts * (i % 2);
int HiPos = LoPos + NumEltsInLane / 2;
- Unpckl.push_back(LoPos);
- Unpckh.push_back(HiPos);
+ Unpckl[i] = LoPos;
+ Unpckh[i] = HiPos;
}
if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
@@ -7013,7 +7391,7 @@ static SDValue lowerVectorShuffleWithUNPCK(SDLoc DL, MVT VT, ArrayRef<int> Mask,
///
/// This handles cases where we can model a blend exactly as a bitmask due to
/// one of the inputs being zeroable.
-static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
MVT EltVT = VT.getVectorElementType();
@@ -7044,7 +7422,7 @@ static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
if (!V)
return SDValue(); // No non-zeroable elements!
- SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
+ SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps);
V = DAG.getNode(VT.isFloatingPoint()
? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
DL, VT, V, VMask);
@@ -7056,7 +7434,7 @@ static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
/// This is used as a fallback approach when first class blend instructions are
/// unavailable. Currently it is only suitable for integer vectors, but could
/// be generalized for floating point vectors if desirable.
-static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(VT.isInteger() && "Only supports integer vector types!");
@@ -7067,12 +7445,12 @@ static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
EltVT);
SmallVector<SDValue, 16> MaskOps;
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
- if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
+ if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
return SDValue(); // Shuffled input!
MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
}
- SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
+ SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
// We have to cast V2 around.
MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
@@ -7088,9 +7466,9 @@ static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
/// be matched in the backend with the type given. What it does check for is
/// that the shuffle mask is a blend, or convertible into a blend with zero.
-static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Original,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
@@ -7153,13 +7531,13 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
case MVT::v4i64:
case MVT::v8i32:
- assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
+ assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
// FALLTHROUGH
case MVT::v2i64:
case MVT::v4i32:
// If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
// that instruction.
- if (Subtarget->hasAVX2()) {
+ if (Subtarget.hasAVX2()) {
// Scale the blend by the number of 32-bit dwords per element.
int Scale = VT.getScalarSizeInBits() / 32;
BlendMask = ScaleBlendMask(BlendMask, Mask.size(), Scale);
@@ -7184,14 +7562,14 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
}
case MVT::v16i16: {
- assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
+ assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
SmallVector<int, 8> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
// We can lower these with PBLENDW which is mirrored across 128-bit lanes.
assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
BlendMask = 0;
for (int i = 0; i < 8; ++i)
- if (RepeatedMask[i] >= 16)
+ if (RepeatedMask[i] >= 8)
BlendMask |= 1u << i;
return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8));
@@ -7200,7 +7578,7 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
// FALLTHROUGH
case MVT::v16i8:
case MVT::v32i8: {
- assert((VT.is128BitVector() || Subtarget->hasAVX2()) &&
+ assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
"256-bit byte-blends require AVX2 support!");
// Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
@@ -7235,10 +7613,9 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
V1 = DAG.getBitcast(BlendVT, V1);
V2 = DAG.getBitcast(BlendVT, V2);
- return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, DL, BlendVT,
- DAG.getNode(ISD::BUILD_VECTOR, DL,
- BlendVT, VSELECTMask),
- V1, V2));
+ return DAG.getBitcast(
+ VT, DAG.getNode(ISD::VSELECT, DL, BlendVT,
+ DAG.getBuildVector(BlendVT, DL, VSELECTMask), V1, V2));
}
default:
@@ -7251,8 +7628,8 @@ static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
///
/// This matches the pattern where we can blend elements from two inputs and
/// then reduce the shuffle to a single-input permutation.
-static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2,
+static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
// We build up the blend mask while checking whether a blend is a viable way
@@ -7266,7 +7643,7 @@ static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
- if (BlendMask[Mask[i] % Size] == -1)
+ if (BlendMask[Mask[i] % Size] < 0)
BlendMask[Mask[i] % Size] = Mask[i];
else if (BlendMask[Mask[i] % Size] != Mask[i])
return SDValue(); // Can't blend in the needed input!
@@ -7285,8 +7662,8 @@ static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
/// operations. It will try to pick the best arrangement of shuffles and
/// blends.
-static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
- SDValue V1,
+static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(const SDLoc &DL,
+ MVT VT, SDValue V1,
SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
@@ -7335,10 +7712,10 @@ static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
-static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2,
+static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
ArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
@@ -7357,9 +7734,8 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
SDValue Lo, Hi;
for (int l = 0; l < NumElts; l += NumLaneElts) {
for (int i = 0; i < NumLaneElts; ++i) {
- if (Mask[l + i] == -1)
+ if (Mask[l + i] < 0)
continue;
- assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
// Get the mod-Size index and lane correct it.
int LaneIdx = (Mask[l + i] % NumElts) - l;
@@ -7411,19 +7787,22 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
else if (!Hi)
Hi = Lo;
+ // Cast the inputs to i8 vector of correct length to match PALIGNR or
+ // PSLLDQ/PSRLDQ.
+ MVT ByteVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
+ Lo = DAG.getBitcast(ByteVT, Lo);
+ Hi = DAG.getBitcast(ByteVT, Hi);
+
// The actual rotate instruction rotates bytes, so we need to scale the
// rotation based on how many bytes are in the vector lane.
int Scale = 16 / NumLaneElts;
// SSSE3 targets can use the palignr instruction.
- if (Subtarget->hasSSSE3()) {
- // Cast the inputs to i8 vector of correct length to match PALIGNR.
- MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
- Lo = DAG.getBitcast(AlignVT, Lo);
- Hi = DAG.getBitcast(AlignVT, Hi);
-
+ if (Subtarget.hasSSSE3()) {
+ assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
+ "512-bit PALIGNR requires BWI instructions");
return DAG.getBitcast(
- VT, DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Lo, Hi,
+ VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
DAG.getConstant(Rotation * Scale, DL, MVT::i8)));
}
@@ -7431,21 +7810,19 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
"Rotate-based lowering only supports 128-bit lowering!");
assert(Mask.size() <= 16 &&
"Can shuffle at most 16 bytes in a 128-bit vector!");
+ assert(ByteVT == MVT::v16i8 &&
+ "SSE2 rotate lowering only needed for v16i8!");
// Default SSE2 implementation
int LoByteShift = 16 - Rotation * Scale;
int HiByteShift = Rotation * Scale;
- // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
- Lo = DAG.getBitcast(MVT::v2i64, Lo);
- Hi = DAG.getBitcast(MVT::v2i64, Hi);
-
- SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
+ SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
DAG.getConstant(LoByteShift, DL, MVT::i8));
- SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
+ SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
DAG.getConstant(HiByteShift, DL, MVT::i8));
return DAG.getBitcast(VT,
- DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
+ DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
}
/// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
@@ -7471,8 +7848,9 @@ static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
/// [ 5, 6, 7, zz, zz, zz, zz, zz]
/// [ -1, 5, 6, 7, zz, zz, zz, zz]
/// [ 1, 2, -1, -1, -1, -1, zz, zz]
-static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
@@ -7510,7 +7888,8 @@ static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
// We need to round trip through the appropriate type for the shift.
MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
- MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
+ MVT ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8)
+ : MVT::getVectorVT(ShiftSVT, Size / Scale);
assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
"Illegal integer vector type");
V = DAG.getBitcast(ShiftVT, V);
@@ -7526,7 +7905,8 @@ static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
// their width within the elements of the larger integer vector. Test each
// multiple to see if we can find a match with the moved element indices
// and that the shifted in elements are all zeroable.
- for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
+ unsigned MaxWidth = (VT.is512BitVector() && !Subtarget.hasBWI() ? 64 : 128);
+ for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= MaxWidth; Scale *= 2)
for (int Shift = 1; Shift != Scale; ++Shift)
for (bool Left : {true, false})
if (CheckZeros(Shift, Scale, Left))
@@ -7539,7 +7919,7 @@ static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
}
/// \brief Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
-static SDValue lowerVectorShuffleWithSSE4A(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
@@ -7679,8 +8059,8 @@ static SDValue lowerVectorShuffleWithSSE4A(SDLoc DL, MVT VT, SDValue V1,
/// or at the start of a higher lane. All extended elements must be from
/// the same lane.
static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
- SDLoc DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
- ArrayRef<int> Mask, const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
+ ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(Scale > 1 && "Need a scale to extend.");
int EltBits = VT.getScalarSizeInBits();
int NumElements = VT.getVectorNumElements();
@@ -7713,14 +8093,20 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
// Found a valid zext mask! Try various lowering strategies based on the
// input type and available ISA extensions.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
// Not worth offseting 128-bit vectors if scale == 2, a pattern using
// PUNPCK will catch this in a later shuffle match.
if (Offset && Scale == 2 && VT.is128BitVector())
return SDValue();
MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
NumElements / Scale);
- InputV = DAG.getNode(X86ISD::VZEXT, DL, ExtVT, ShuffleOffset(InputV));
+ InputV = ShuffleOffset(InputV);
+
+ // For 256-bit vectors, we only need the lower (128-bit) input half.
+ if (VT.is256BitVector())
+ InputV = extract128BitVector(InputV, 0, DAG, DL);
+
+ InputV = DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV);
return DAG.getBitcast(VT, InputV);
}
@@ -7752,33 +8138,33 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
// The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
// to 64-bits.
- if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget->hasSSE4A()) {
+ if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
assert(VT.is128BitVector() && "Unexpected vector width!");
int LoIdx = Offset * EltBits;
- SDValue Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
- DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
- DAG.getConstant(EltBits, DL, MVT::i8),
- DAG.getConstant(LoIdx, DL, MVT::i8)));
+ SDValue Lo = DAG.getBitcast(
+ MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
+ DAG.getConstant(EltBits, DL, MVT::i8),
+ DAG.getConstant(LoIdx, DL, MVT::i8)));
if (isUndefInRange(Mask, NumElements / 2, NumElements / 2) ||
!SafeOffset(Offset + 1))
- return DAG.getNode(ISD::BITCAST, DL, VT, Lo);
+ return DAG.getBitcast(VT, Lo);
int HiIdx = (Offset + 1) * EltBits;
- SDValue Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
- DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
- DAG.getConstant(EltBits, DL, MVT::i8),
- DAG.getConstant(HiIdx, DL, MVT::i8)));
- return DAG.getNode(ISD::BITCAST, DL, VT,
- DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
+ SDValue Hi = DAG.getBitcast(
+ MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
+ DAG.getConstant(EltBits, DL, MVT::i8),
+ DAG.getConstant(HiIdx, DL, MVT::i8)));
+ return DAG.getBitcast(VT,
+ DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
}
// If this would require more than 2 unpack instructions to expand, use
// pshufb when available. We can only use more than 2 unpack instructions
// when zero extending i8 elements which also makes it easier to use pshufb.
- if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
+ if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
assert(NumElements == 16 && "Unexpected byte vector width!");
SDValue PSHUFBMask[16];
for (int i = 0; i < 16; ++i) {
@@ -7787,10 +8173,9 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
(i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
}
InputV = DAG.getBitcast(MVT::v16i8, InputV);
- return DAG.getBitcast(VT,
- DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
- DAG.getNode(ISD::BUILD_VECTOR, DL,
- MVT::v16i8, PSHUFBMask)));
+ return DAG.getBitcast(
+ VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
+ DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
}
// If we are extending from an offset, ensure we start on a boundary that
@@ -7837,8 +8222,8 @@ static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
/// The reason we have dedicated lowering for zext-style shuffles is that they
/// are both incredibly common and often quite performance sensitive.
static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
- SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
int Bits = VT.getSizeInBits();
@@ -7858,7 +8243,7 @@ static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
int Matches = 0;
for (int i = 0; i < NumElements; ++i) {
int M = Mask[i];
- if (M == -1)
+ if (M < 0)
continue; // Valid anywhere but doesn't tell us anything.
if (i % Scale != 0) {
// Each of the extended elements need to be zeroable.
@@ -7960,8 +8345,8 @@ static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
SelectionDAG &DAG) {
MVT VT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
- while (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
+ V = peekThroughBitcasts(V);
+
// If the bitcasts shift the element size, we can't extract an equivalent
// element from it.
MVT NewVT = V.getSimpleValueType();
@@ -7974,7 +8359,7 @@ static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
// FIXME: Add support for scalar truncation where possible.
SDValue S = V.getOperand(Idx);
if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
- return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, S);
+ return DAG.getBitcast(EltVT, S);
}
return SDValue();
@@ -7985,9 +8370,7 @@ static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
/// This is particularly important because the set of instructions varies
/// significantly based on whether the operand is a load or not.
static bool isShuffleFoldableLoad(SDValue V) {
- while (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
-
+ V = peekThroughBitcasts(V);
return ISD::isNON_EXTLoad(V.getNode());
}
@@ -7996,8 +8379,8 @@ static bool isShuffleFoldableLoad(SDValue V) {
/// This is a common pattern that we have especially efficient patterns to lower
/// across all subtarget feature sets.
static SDValue lowerVectorShuffleAsElementInsertion(
- SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
MVT ExtVT = VT;
MVT EltVT = VT.getVectorElementType();
@@ -8054,7 +8437,7 @@ static SDValue lowerVectorShuffleAsElementInsertion(
// This is essentially a special case blend operation, but if we have
// general purpose blend operations, they are always faster. Bail and let
// the rest of the lowering handle these as blends.
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
return SDValue();
// Otherwise, use MOVSD or MOVSS.
@@ -8082,9 +8465,9 @@ static SDValue lowerVectorShuffleAsElementInsertion(
V2Shuffle[V2Index] = 0;
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
} else {
- V2 = DAG.getBitcast(MVT::v2i64, V2);
+ V2 = DAG.getBitcast(MVT::v16i8, V2);
V2 = DAG.getNode(
- X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
+ X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL,
DAG.getTargetLoweringInfo().getScalarShiftAmountTy(
DAG.getDataLayout(), VT)));
@@ -8094,15 +8477,15 @@ static SDValue lowerVectorShuffleAsElementInsertion(
return V2;
}
-/// \brief Try to lower broadcast of a single - truncated - integer element,
+/// Try to lower broadcast of a single - truncated - integer element,
/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
///
/// This assumes we have AVX2.
-static SDValue lowerVectorShuffleAsTruncBroadcast(SDLoc DL, MVT VT, SDValue V0,
- int BroadcastIdx,
- const X86Subtarget *Subtarget,
+static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT,
+ SDValue V0, int BroadcastIdx,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(Subtarget->hasAVX2() &&
+ assert(Subtarget.hasAVX2() &&
"We can only lower integer broadcasts with AVX2!");
EVT EltVT = VT.getVectorElementType();
@@ -8153,38 +8536,57 @@ static SDValue lowerVectorShuffleAsTruncBroadcast(SDLoc DL, MVT VT, SDValue V0,
/// filtering. While a little annoying to re-dispatch on type here, there isn't
/// a convenient way to factor it out.
/// FIXME: This is very similar to LowerVectorBroadcast - can we merge them?
-static SDValue lowerVectorShuffleAsBroadcast(SDLoc DL, MVT VT, SDValue V,
+static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
ArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- if (!Subtarget->hasAVX())
- return SDValue();
- if (VT.isInteger() && !Subtarget->hasAVX2())
+ if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
+ (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
+ (Subtarget.hasAVX2() && VT.isInteger())))
return SDValue();
+ // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
+ // we can only broadcast from a register with AVX2.
+ unsigned NumElts = Mask.size();
+ unsigned Opcode = VT == MVT::v2f64 ? X86ISD::MOVDDUP : X86ISD::VBROADCAST;
+ bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
+
// Check that the mask is a broadcast.
int BroadcastIdx = -1;
- for (int M : Mask)
- if (M >= 0 && BroadcastIdx == -1)
- BroadcastIdx = M;
- else if (M >= 0 && M != BroadcastIdx)
- return SDValue();
+ for (int i = 0; i != (int)NumElts; ++i) {
+ SmallVector<int, 8> BroadcastMask(NumElts, i);
+ if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
+ BroadcastIdx = i;
+ break;
+ }
+ }
+ if (BroadcastIdx < 0)
+ return SDValue();
assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
"a sorted mask where the broadcast "
"comes from V1.");
// Go up the chain of (vector) values to find a scalar load that we can
// combine with the broadcast.
+ SDValue V = V1;
for (;;) {
switch (V.getOpcode()) {
+ case ISD::BITCAST: {
+ SDValue VSrc = V.getOperand(0);
+ MVT SrcVT = VSrc.getSimpleValueType();
+ if (VT.getScalarSizeInBits() != SrcVT.getScalarSizeInBits())
+ break;
+ V = VSrc;
+ continue;
+ }
case ISD::CONCAT_VECTORS: {
int OperandSize = Mask.size() / V.getNumOperands();
V = V.getOperand(BroadcastIdx / OperandSize);
BroadcastIdx %= OperandSize;
continue;
}
-
case ISD::INSERT_SUBVECTOR: {
SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
@@ -8219,45 +8621,76 @@ static SDValue lowerVectorShuffleAsBroadcast(SDLoc DL, MVT VT, SDValue V,
MVT BroadcastVT = VT;
// Peek through any bitcast (only useful for loads).
- SDValue BC = V;
- while (BC.getOpcode() == ISD::BITCAST)
- BC = BC.getOperand(0);
+ SDValue BC = peekThroughBitcasts(V);
// Also check the simpler case, where we can directly reuse the scalar.
if (V.getOpcode() == ISD::BUILD_VECTOR ||
(V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
V = V.getOperand(BroadcastIdx);
- // If the scalar isn't a load, we can't broadcast from it in AVX1.
- // Only AVX2 has register broadcasts.
- if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
+ // If we can't broadcast from a register, check that the input is a load.
+ if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
return SDValue();
} else if (MayFoldLoad(BC) && !cast<LoadSDNode>(BC)->isVolatile()) {
// 32-bit targets need to load i64 as a f64 and then bitcast the result.
- if (!Subtarget->is64Bit() && VT.getScalarType() == MVT::i64)
+ if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
+ Opcode = (BroadcastVT.is128BitVector() ? X86ISD::MOVDDUP : Opcode);
+ }
// If we are broadcasting a load that is only used by the shuffle
// then we can reduce the vector load to the broadcasted scalar load.
LoadSDNode *Ld = cast<LoadSDNode>(BC);
SDValue BaseAddr = Ld->getOperand(1);
- EVT AddrVT = BaseAddr.getValueType();
EVT SVT = BroadcastVT.getScalarType();
unsigned Offset = BroadcastIdx * SVT.getStoreSize();
- SDValue NewAddr = DAG.getNode(
- ISD::ADD, DL, AddrVT, BaseAddr,
- DAG.getConstant(Offset, DL, AddrVT));
+ SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
DAG.getMachineFunction().getMachineMemOperand(
Ld->getMemOperand(), Offset, SVT.getStoreSize()));
- } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
- // We can't broadcast from a vector register without AVX2, and we can only
- // broadcast from the zero-element of a vector register.
+ } else if (!BroadcastFromReg) {
+ // We can't broadcast from a vector register.
return SDValue();
+ } else if (BroadcastIdx != 0) {
+ // We can only broadcast from the zero-element of a vector register,
+ // but it can be advantageous to broadcast from the zero-element of a
+ // subvector.
+ if (!VT.is256BitVector() && !VT.is512BitVector())
+ return SDValue();
+
+ // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
+ if (VT == MVT::v4f64 || VT == MVT::v4i64)
+ return SDValue();
+
+ // Only broadcast the zero-element of a 128-bit subvector.
+ unsigned EltSize = VT.getScalarSizeInBits();
+ if (((BroadcastIdx * EltSize) % 128) != 0)
+ return SDValue();
+
+ MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 128 / EltSize);
+ V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, V,
+ DAG.getIntPtrConstant(BroadcastIdx, DL));
}
- V = DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, V);
- return DAG.getBitcast(VT, V);
+ if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
+ V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
+ DAG.getBitcast(MVT::f64, V));
+
+ // Bitcast back to the same scalar type as BroadcastVT.
+ MVT SrcVT = V.getSimpleValueType();
+ if (SrcVT.getScalarType() != BroadcastVT.getScalarType()) {
+ assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
+ "Unexpected vector element size");
+ if (SrcVT.isVector()) {
+ unsigned NumSrcElts = SrcVT.getVectorNumElements();
+ SrcVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
+ } else {
+ SrcVT = BroadcastVT.getScalarType();
+ }
+ V = DAG.getBitcast(SrcVT, V);
+ }
+
+ return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
}
// Check for whether we can use INSERTPS to perform the shuffle. We only use
@@ -8266,16 +8699,14 @@ static SDValue lowerVectorShuffleAsBroadcast(SDLoc DL, MVT VT, SDValue V,
// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
// perform INSERTPS if a single V1 element is out of place and all V2
// elements are zeroable.
-static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
- ArrayRef<int> Mask,
- SelectionDAG &DAG) {
- assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
- assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
- assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+static bool matchVectorShuffleAsInsertPS(SDValue &V1, SDValue &V2,
+ unsigned &InsertPSMask,
+ const SmallBitVector &Zeroable,
+ ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
+ assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
-
- SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
-
unsigned ZMask = 0;
int V1DstIndex = -1;
int V2DstIndex = -1;
@@ -8295,8 +8726,8 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
}
// We can only insert a single non-zeroable element.
- if (V1DstIndex != -1 || V2DstIndex != -1)
- return SDValue();
+ if (V1DstIndex >= 0 || V2DstIndex >= 0)
+ return false;
if (Mask[i] < 4) {
// V1 input out of place for insertion.
@@ -8308,13 +8739,13 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
}
// Don't bother if we have no (non-zeroable) element for insertion.
- if (V1DstIndex == -1 && V2DstIndex == -1)
- return SDValue();
+ if (V1DstIndex < 0 && V2DstIndex < 0)
+ return false;
// Determine element insertion src/dst indices. The src index is from the
// start of the inserted vector, not the start of the concatenated vector.
unsigned V2SrcIndex = 0;
- if (V1DstIndex != -1) {
+ if (V1DstIndex >= 0) {
// If we have a V1 input out of place, we use V1 as the V2 element insertion
// and don't use the original V2 at all.
V2SrcIndex = Mask[V1DstIndex];
@@ -8329,11 +8760,25 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
if (!V1UsedInPlace)
V1 = DAG.getUNDEF(MVT::v4f32);
- unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
+ // Insert the V2 element into the desired position.
+ InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
+ return true;
+}
+
+static SDValue lowerVectorShuffleAsInsertPS(const SDLoc &DL, SDValue V1,
+ SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG) {
+ assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+ assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
+ SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
+
+ // Attempt to match the insertps pattern.
+ unsigned InsertPSMask;
+ if (!matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
+ return SDValue();
// Insert the V2 element into the desired position.
- SDLoc DL(Op);
return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
@@ -8347,29 +8792,30 @@ static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
/// because for floating point vectors we have a generalized SHUFPS lowering
/// strategy that handles everything that doesn't *exactly* match an unpack,
/// making this clever lowering unnecessary.
-static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
+static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(!VT.isFloatingPoint() &&
"This routine only supports integer vectors.");
- assert(!isSingleInputShuffleMask(Mask) &&
+ assert(VT.is128BitVector() &&
+ "This routine only works on 128-bit vectors.");
+ assert(!V2.isUndef() &&
"This routine should only be used when blending two inputs.");
assert(Mask.size() >= 2 && "Single element masks are invalid.");
int Size = Mask.size();
- int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
- return M >= 0 && M % Size < Size / 2;
- });
- int NumHiInputs = std::count_if(
- Mask.begin(), Mask.end(), [Size](int M) { return M % Size >= Size / 2; });
+ int NumLoInputs =
+ count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
+ int NumHiInputs =
+ count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
bool UnpackLo = NumLoInputs >= NumHiInputs;
- auto TryUnpack = [&](MVT UnpackVT, int Scale) {
- SmallVector<int, 32> V1Mask(Mask.size(), -1);
- SmallVector<int, 32> V2Mask(Mask.size(), -1);
+ auto TryUnpack = [&](int ScalarSize, int Scale) {
+ SmallVector<int, 16> V1Mask((unsigned)Size, -1);
+ SmallVector<int, 16> V2Mask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
@@ -8401,6 +8847,7 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
// Cast the inputs to the type we will use to unpack them.
+ MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
V1 = DAG.getBitcast(UnpackVT, V1);
V2 = DAG.getBitcast(UnpackVT, V2);
@@ -8412,15 +8859,10 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
// We try each unpack from the largest to the smallest to try and find one
// that fits this mask.
- int OrigNumElements = VT.getVectorNumElements();
int OrigScalarSize = VT.getScalarSizeInBits();
- for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
- int Scale = ScalarSize / OrigScalarSize;
- int NumElements = OrigNumElements / Scale;
- MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
- if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
+ for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
+ if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
return Unpack;
- }
// If none of the unpack-rooted lowerings worked (or were profitable) try an
// initial unpack.
@@ -8434,8 +8876,7 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
// half-crossings are created.
// FIXME: We could consider commuting the unpacks.
- SmallVector<int, 32> PermMask;
- PermMask.assign(Size, -1);
+ SmallVector<int, 32> PermMask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
@@ -8461,28 +8902,25 @@ static SDValue lowerVectorShuffleAsPermuteAndUnpack(SDLoc DL, MVT VT,
/// instructions will incur a domain crossing penalty on some chips though so
/// it is better to avoid lowering through this for integer vectors where
/// possible.
-static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV2F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
- if (isSingleInputShuffleMask(Mask)) {
- // Use low duplicate instructions for masks that match their pattern.
- if (Subtarget->hasSSE3())
- if (isShuffleEquivalent(V1, V2, Mask, {0, 0}))
- return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
+ if (V2.isUndef()) {
+ // Check for being able to broadcast a single element.
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
+ return Broadcast;
// Straight shuffle of a single input vector. Simulate this by using the
// single input as both of the "inputs" to this instruction..
unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
- if (Subtarget->hasAVX()) {
+ if (Subtarget.hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
@@ -8521,7 +8959,7 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DL, MVT::v2f64, V2,
DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
Subtarget, DAG))
return Blend;
@@ -8542,21 +8980,18 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// the integer unit to minimize domain crossing penalties. However, for blends
/// it falls back to the floating point shuffle operation with appropriate bit
/// casting.
-static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV2I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
- if (isSingleInputShuffleMask(Mask)) {
+ if (V2.isUndef()) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v2i64, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. For everything from SSE2
@@ -8576,28 +9011,29 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(Mask[0] < 2 && "We sort V1 to be the first input.");
assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
- // If we have a blend of two PACKUS operations an the blend aligns with the
- // low and half halves, we can just merge the PACKUS operations. This is
- // particularly important as it lets us merge shuffles that this routine itself
- // creates.
+ // If we have a blend of two same-type PACKUS operations and the blend aligns
+ // with the low and high halves, we can just merge the PACKUS operations.
+ // This is particularly important as it lets us merge shuffles that this
+ // routine itself creates.
auto GetPackNode = [](SDValue V) {
- while (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
-
+ V = peekThroughBitcasts(V);
return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
};
if (SDValue V1Pack = GetPackNode(V1))
- if (SDValue V2Pack = GetPackNode(V2))
- return DAG.getBitcast(MVT::v2i64,
- DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8,
- Mask[0] == 0 ? V1Pack.getOperand(0)
- : V1Pack.getOperand(1),
- Mask[1] == 2 ? V2Pack.getOperand(0)
- : V2Pack.getOperand(1)));
+ if (SDValue V2Pack = GetPackNode(V2)) {
+ EVT PackVT = V1Pack.getValueType();
+ if (PackVT == V2Pack.getValueType())
+ return DAG.getBitcast(MVT::v2i64,
+ DAG.getNode(X86ISD::PACKUS, DL, PackVT,
+ Mask[0] == 0 ? V1Pack.getOperand(0)
+ : V1Pack.getOperand(1),
+ Mask[1] == 2 ? V2Pack.getOperand(0)
+ : V2Pack.getOperand(1)));
+ }
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// When loading a scalar and then shuffling it into a vector we can often do
@@ -8614,7 +9050,7 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
- bool IsBlendSupported = Subtarget->hasSSE41();
+ bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
Subtarget, DAG))
@@ -8627,7 +9063,7 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
- if (Subtarget->hasSSSE3())
+ if (Subtarget.hasSSSE3())
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Rotate;
@@ -8655,12 +9091,16 @@ static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
// This routine only handles 128-bit shufps.
assert(Mask.size() == 4 && "Unsupported mask size!");
+ assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
+ assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
+ assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
+ assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
// To lower with a single SHUFPS we need to have the low half and high half
// each requiring a single input.
- if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
+ if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
return false;
- if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
+ if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
return false;
return true;
@@ -8671,14 +9111,13 @@ static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
/// It makes no assumptions about whether this is the *best* lowering, it simply
/// uses it.
-static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
+static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
SDValue LowV = V1, HighV = V2;
int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+ int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 1) {
int V2Index =
@@ -8689,7 +9128,7 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
// the low bit.
int V2AdjIndex = V2Index ^ 1;
- if (Mask[V2AdjIndex] == -1) {
+ if (Mask[V2AdjIndex] < 0) {
// Handles all the cases where we have a single V2 element and an undef.
// This will only ever happen in the high lanes because we commute the
// vector otherwise.
@@ -8761,35 +9200,31 @@ static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
/// Uses instructions exclusively from the floating point unit to minimize
/// domain crossing penalties, as these are sufficient to implement all v4f32
/// shuffles.
-static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV4F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+ int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4f32, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v4f32, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Use even/odd duplicate instructions for masks that match their pattern.
- if (Subtarget->hasSSE3()) {
+ if (Subtarget.hasSSE3()) {
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
}
- if (Subtarget->hasAVX()) {
+ if (Subtarget.hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
@@ -8812,13 +9247,13 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
Mask, Subtarget, DAG))
return V;
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
Subtarget, DAG))
return Blend;
// Use INSERTPS if we can complete the shuffle efficiently.
- if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
+ if (SDValue V = lowerVectorShuffleAsInsertPS(DL, V1, V2, Mask, DAG))
return V;
if (!isSingleSHUFPSMask(Mask))
@@ -8827,6 +9262,12 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return BlendPerm;
}
+ // Use low/high mov instructions.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
+ return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
+ if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
+ return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
+
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
@@ -8840,15 +9281,12 @@ static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// We try to handle these with integer-domain shuffles where we can, but for
/// blends we use the floating point domain blend instructions.
-static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV4I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
@@ -8858,13 +9296,12 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
Mask, Subtarget, DAG))
return ZExt;
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
+ int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i32, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. For everything from SSE2
@@ -8884,8 +9321,8 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// There are special ways we can lower some single-element blends.
@@ -8896,7 +9333,7 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
- bool IsBlendSupported = Subtarget->hasSSE41();
+ bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
Subtarget, DAG))
@@ -8913,7 +9350,7 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
- if (Subtarget->hasSSSE3())
+ if (Subtarget.hasSSSE3())
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
@@ -8957,8 +9394,8 @@ static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
/// vector, form the analogous 128-bit 8-element Mask.
static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
- SDLoc DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
@@ -8987,6 +9424,26 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
+ // If we are splatting two values from one half - one to each half, then
+ // we can shuffle that half so each is splatted to a dword, then splat those
+ // to their respective halves.
+ auto SplatHalfs = [&](int LoInput, int HiInput, unsigned ShufWOp,
+ int DOffset) {
+ int PSHUFHalfMask[] = {LoInput % 4, LoInput % 4, HiInput % 4, HiInput % 4};
+ int PSHUFDMask[] = {DOffset + 0, DOffset + 0, DOffset + 1, DOffset + 1};
+ V = DAG.getNode(ShufWOp, DL, VT, V,
+ getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
+ V = DAG.getBitcast(PSHUFDVT, V);
+ V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
+ return DAG.getBitcast(VT, V);
+ };
+
+ if (NumLToL == 1 && NumLToH == 1 && (NumHToL + NumHToH) == 0)
+ return SplatHalfs(LToLInputs[0], LToHInputs[0], X86ISD::PSHUFLW, 0);
+ if (NumHToL == 1 && NumHToH == 1 && (NumLToL + NumLToH) == 0)
+ return SplatHalfs(HToLInputs[0], HToHInputs[0], X86ISD::PSHUFHW, 2);
+
// Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
// such inputs we can swap two of the dwords across the half mark and end up
// with <=2 inputs to each half in each half. Once there, we can fall through
@@ -9096,9 +9553,9 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
for (int &M : Mask)
- if (M != -1 && M == FixIdx)
+ if (M >= 0 && M == FixIdx)
M = FixFreeIdx;
- else if (M != -1 && M == FixFreeIdx)
+ else if (M >= 0 && M == FixFreeIdx)
M = FixIdx;
};
if (NumFlippedBToBInputs != 0) {
@@ -9123,9 +9580,9 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// Adjust the mask to match the new locations of A and B.
for (int &M : Mask)
- if (M != -1 && M/2 == ADWord)
+ if (M >= 0 && M/2 == ADWord)
M = 2 * BDWord + M % 2;
- else if (M != -1 && M/2 == BDWord)
+ else if (M >= 0 && M/2 == BDWord)
M = 2 * ADWord + M % 2;
// Recurse back into this routine to re-compute state now that this isn't
@@ -9194,7 +9651,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
int DestOffset) {
auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
- return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
+ return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
};
auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
int Word) {
@@ -9213,7 +9670,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// If the source half mask maps over the inputs, turn those into
// swaps and use the swapped lane.
if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
- if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
+ if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
Input - SourceOffset;
// We have to swap the uses in our half mask in one sweep.
@@ -9234,7 +9691,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
}
// Map the input's dword into the correct half.
- if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
+ if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
else
assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
@@ -9280,17 +9737,17 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// the inputs, place the other input in it. We use (Index XOR 1) to
// compute an adjacent index.
if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
- SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
+ SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
InputsFixed[1] = InputsFixed[0] ^ 1;
} else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
- SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
+ SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
InputsFixed[0] = InputsFixed[1] ^ 1;
- } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
- SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
+ } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
+ SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
// The two inputs are in the same DWord but it is clobbered and the
// adjacent DWord isn't used at all. Move both inputs to the free
// slot.
@@ -9304,7 +9761,7 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// free slot adjacent to one of the inputs. In this case, we have to
// swap an input with a non-input.
for (int i = 0; i < 4; ++i)
- assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
+ assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
"We can't handle any clobbers here!");
assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
"Cannot have adjacent inputs here!");
@@ -9338,8 +9795,8 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
}
// Now hoist the DWord down to the right half.
- int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
- assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
+ int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
+ assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
for (int &M : HalfMask)
for (int Input : IncomingInputs)
@@ -9367,11 +9824,9 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
// At this point, each half should contain all its inputs, and we can then
// just shuffle them into their final position.
- assert(std::count_if(LoMask.begin(), LoMask.end(),
- [](int M) { return M >= 4; }) == 0 &&
+ assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
"Failed to lift all the high half inputs to the low mask!");
- assert(std::count_if(HiMask.begin(), HiMask.end(),
- [](int M) { return M >= 0 && M < 4; }) == 0 &&
+ assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
"Failed to lift all the low half inputs to the high mask!");
// Do a half shuffle for the low mask.
@@ -9390,11 +9845,11 @@ static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
return V;
}
-/// \brief Helper to form a PSHUFB-based shuffle+blend.
-static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2, ArrayRef<int> Mask,
- SelectionDAG &DAG, bool &V1InUse,
- bool &V2InUse) {
+/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
+/// blend if only one input is used.
+static SDValue lowerVectorShuffleAsBlendOfPSHUFBs(
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
SDValue V1Mask[16];
SDValue V2Mask[16];
@@ -9404,7 +9859,7 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
int Size = Mask.size();
int Scale = 16 / Size;
for (int i = 0; i < 16; ++i) {
- if (Mask[i / Scale] == -1) {
+ if (Mask[i / Scale] < 0) {
V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
} else {
const int ZeroMask = 0x80;
@@ -9425,11 +9880,11 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
if (V1InUse)
V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
DAG.getBitcast(MVT::v16i8, V1),
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
+ DAG.getBuildVector(MVT::v16i8, DL, V1Mask));
if (V2InUse)
V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
DAG.getBitcast(MVT::v16i8, V2),
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
+ DAG.getBuildVector(MVT::v16i8, DL, V2Mask));
// If we need shuffled inputs from both, blend the two.
SDValue V;
@@ -9454,42 +9909,31 @@ static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
/// the two inputs, try to interleave them. Otherwise, blend the low and high
/// halves of the inputs separately (making them have relatively few inputs)
/// and then concatenate them.
-static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> OrigMask = SVOp->getMask();
- int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
- OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
- MutableArrayRef<int> Mask(MaskStorage);
-
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
- DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
+ DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return ZExt;
- auto isV1 = [](int M) { return M >= 0 && M < 8; };
- (void)isV1;
- auto isV2 = [](int M) { return M >= 8; };
-
- int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
+ int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
if (NumV2Inputs == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i16, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
+ Subtarget, DAG))
return Shift;
// Use dedicated unpack instructions for masks that match their pattern.
@@ -9502,21 +9946,24 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
Mask, Subtarget, DAG))
return Rotate;
- return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1, Mask,
- Subtarget, DAG);
+ // Make a copy of the mask so it can be modified.
+ SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
+ return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1,
+ MutableMask, Subtarget,
+ DAG);
}
- assert(std::any_of(Mask.begin(), Mask.end(), isV1) &&
+ assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
"All single-input shuffles should be canonicalized to be V1-input "
"shuffles.");
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// See if we can use SSE4A Extraction / Insertion.
- if (Subtarget->hasSSE4A())
+ if (Subtarget.hasSSE4A())
if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask, DAG))
return V;
@@ -9528,7 +9975,7 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
- bool IsBlendSupported = Subtarget->hasSSE41();
+ bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
Subtarget, DAG))
@@ -9552,16 +9999,17 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
return BitBlend;
+ // Try to lower by permuting the inputs into an unpack instruction.
if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1,
V2, Mask, DAG))
return Unpack;
// If we can't directly blend but can use PSHUFB, that will be better as it
// can both shuffle and set up the inefficient blend.
- if (!IsBlendSupported && Subtarget->hasSSSE3()) {
+ if (!IsBlendSupported && Subtarget.hasSSSE3()) {
bool V1InUse, V2InUse;
- return lowerVectorShuffleAsPSHUFB(DL, MVT::v8i16, V1, V2, Mask, DAG,
- V1InUse, V2InUse);
+ return lowerVectorShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask, DAG,
+ V1InUse, V2InUse);
}
// We can always bit-blend if we have to so the fallback strategy is to
@@ -9591,10 +10039,8 @@ static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// \returns N above, or the number of times even elements must be dropped if
/// there is such a number. Otherwise returns zero.
-static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
- // Figure out whether we're looping over two inputs or just one.
- bool IsSingleInput = isSingleInputShuffleMask(Mask);
-
+static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
+ bool IsSingleInput) {
// The modulus for the shuffle vector entries is based on whether this is
// a single input or not.
int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
@@ -9611,7 +10057,7 @@ static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
for (int i = 0, e = Mask.size(); i < e; ++i) {
// Ignore undef lanes, we'll optimistically collapse them to the pattern we
// want.
- if (Mask[i] == -1)
+ if (Mask[i] < 0)
continue;
bool IsAnyViable = false;
@@ -9645,20 +10091,17 @@ static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
/// back together.
-static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
@@ -9672,18 +10115,17 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return ZExt;
// See if we can use SSE4A Extraction / Insertion.
- if (Subtarget->hasSSE4A())
+ if (Subtarget.hasSSE4A())
if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask, DAG))
return V;
- int NumV2Elements =
- std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
+ int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
// For single-input shuffles, there are some nicer lowering tricks we can use.
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i8, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Check whether we can widen this to an i16 shuffle by duplicating bytes.
@@ -9696,7 +10138,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// i16 shuffle as well.
auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
for (int i = 0; i < 16; i += 2)
- if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
+ if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
return false;
return true;
@@ -9734,7 +10176,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
// If we haven't yet mapped the input, search for a slot into which
// we can map it.
- while (j < je && PreDupI16Shuffle[j] != -1)
+ while (j < je && PreDupI16Shuffle[j] >= 0)
++j;
if (j == je)
@@ -9759,10 +10201,10 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
for (int i = 0; i < 16; ++i)
- if (Mask[i] != -1) {
+ if (Mask[i] >= 0) {
int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
- if (PostDupI16Shuffle[i / 2] == -1)
+ if (PostDupI16Shuffle[i / 2] < 0)
PostDupI16Shuffle[i / 2] = MappedMask;
else
assert(PostDupI16Shuffle[i / 2] == MappedMask &&
@@ -9799,18 +10241,18 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// FIXME: The only exceptions to the above are blends which are exact
// interleavings with direct instructions supporting them. We currently don't
// handle those well here.
- if (Subtarget->hasSSSE3()) {
+ if (Subtarget.hasSSSE3()) {
bool V1InUse = false;
bool V2InUse = false;
- SDValue PSHUFB = lowerVectorShuffleAsPSHUFB(DL, MVT::v16i8, V1, V2, Mask,
- DAG, V1InUse, V2InUse);
+ SDValue PSHUFB = lowerVectorShuffleAsBlendOfPSHUFBs(
+ DL, MVT::v16i8, V1, V2, Mask, DAG, V1InUse, V2InUse);
// If both V1 and V2 are in use and we can use a direct blend or an unpack,
// do so. This avoids using them to handle blends-with-zero which is
// important as a single pshufb is significantly faster for that.
if (V1InUse && V2InUse) {
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
Mask, Subtarget, DAG))
return Blend;
@@ -9848,11 +10290,11 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// We special case these as they can be particularly efficiently handled with
// the PACKUSB instruction on x86 and they show up in common patterns of
// rearranging bytes to truncate wide elements.
- if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
+ bool IsSingleInput = V2.isUndef();
+ if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
// NumEvenDrops is the power of two stride of the elements. Another way of
// thinking about it is that we need to drop the even elements this many
// times to get the original input.
- bool IsSingleInput = isSingleInputShuffleMask(Mask);
// First we need to zero all the dropped bytes.
assert(NumEvenDrops <= 3 &&
@@ -9907,7 +10349,7 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Use a mask to drop the high bytes.
VLoHalf = DAG.getBitcast(MVT::v8i16, V);
VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
- DAG.getConstant(0x00FF, DL, MVT::v8i16));
+ DAG.getConstant(0x00FF, DL, MVT::v8i16));
// This will be a single vector shuffle instead of a blend so nuke VHiHalf.
VHiHalf = DAG.getUNDEF(MVT::v8i16);
@@ -9938,22 +10380,23 @@ static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine breaks down the specific type of 128-bit shuffle and
/// dispatches to the lowering routines accordingly.
-static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- MVT VT, const X86Subtarget *Subtarget,
+static SDValue lower128BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ MVT VT, SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
switch (VT.SimpleTy) {
case MVT::v2i64:
- return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV2I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v2f64:
- return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV2F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v4i32:
- return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV4I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v4f32:
- return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV4F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v8i16:
- return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v16i8:
- return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV16I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Unimplemented!");
@@ -9971,21 +10414,22 @@ static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// a zero-ed lane of a vector.
static bool canWidenShuffleElements(ArrayRef<int> Mask,
SmallVectorImpl<int> &WidenedMask) {
+ WidenedMask.assign(Mask.size() / 2, 0);
for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
// If both elements are undef, its trivial.
if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
- WidenedMask.push_back(SM_SentinelUndef);
+ WidenedMask[i/2] = SM_SentinelUndef;
continue;
}
// Check for an undef mask and a mask value properly aligned to fit with
// a pair of values. If we find such a case, use the non-undef mask's value.
if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
- WidenedMask.push_back(Mask[i + 1] / 2);
+ WidenedMask[i/2] = Mask[i + 1] / 2;
continue;
}
if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
- WidenedMask.push_back(Mask[i] / 2);
+ WidenedMask[i/2] = Mask[i] / 2;
continue;
}
@@ -9993,7 +10437,7 @@ static bool canWidenShuffleElements(ArrayRef<int> Mask,
if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
(Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
- WidenedMask.push_back(SM_SentinelZero);
+ WidenedMask[i/2] = SM_SentinelZero;
continue;
}
return false;
@@ -10002,7 +10446,7 @@ static bool canWidenShuffleElements(ArrayRef<int> Mask,
// Finally check if the two mask values are adjacent and aligned with
// a pair.
if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
- WidenedMask.push_back(Mask[i] / 2);
+ WidenedMask[i/2] = Mask[i] / 2;
continue;
}
@@ -10020,7 +10464,7 @@ static bool canWidenShuffleElements(ArrayRef<int> Mask,
/// This routine just extracts two subvectors, shuffles them independently, and
/// then concatenates them back together. This should work effectively with all
/// AVX vector shuffle types.
-static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
+static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(VT.getSizeInBits() >= 256 &&
@@ -10039,8 +10483,7 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
// Rather than splitting build-vectors, just build two narrower build
// vectors. This helps shuffling with splats and zeros.
auto SplitVector = [&](SDValue V) {
- while (V.getOpcode() == ISD::BITCAST)
- V = V->getOperand(0);
+ V = peekThroughBitcasts(V);
MVT OrigVT = V.getSimpleValueType();
int OrigNumElements = OrigVT.getVectorNumElements();
@@ -10063,8 +10506,8 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
LoOps.push_back(BV->getOperand(i));
HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
}
- LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
- HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
+ LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
+ HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
}
return std::make_pair(DAG.getBitcast(SplitVT, LoV),
DAG.getBitcast(SplitVT, HiV));
@@ -10077,7 +10520,9 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
// Now create two 4-way blends of these half-width vectors.
auto HalfBlend = [&](ArrayRef<int> HalfMask) {
bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
- SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
+ SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
+ SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
+ SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
for (int i = 0; i < SplitNumElements; ++i) {
int M = HalfMask[i];
if (M >= NumElements) {
@@ -10085,21 +10530,15 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
UseHiV2 = true;
else
UseLoV2 = true;
- V2BlendMask.push_back(M - NumElements);
- V1BlendMask.push_back(-1);
- BlendMask.push_back(SplitNumElements + i);
+ V2BlendMask[i] = M - NumElements;
+ BlendMask[i] = SplitNumElements + i;
} else if (M >= 0) {
if (M >= SplitNumElements)
UseHiV1 = true;
else
UseLoV1 = true;
- V2BlendMask.push_back(-1);
- V1BlendMask.push_back(M);
- BlendMask.push_back(i);
- } else {
- V2BlendMask.push_back(-1);
- V1BlendMask.push_back(-1);
- BlendMask.push_back(-1);
+ V1BlendMask[i] = M;
+ BlendMask[i] = i;
}
}
@@ -10151,12 +10590,12 @@ static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
/// between splitting the shuffle into 128-bit components and stitching those
/// back together vs. extracting the single-input shuffles and blending those
/// results.
-static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2, ArrayRef<int> Mask,
+static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
+ ArrayRef<int> Mask,
SelectionDAG &DAG) {
- assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
- "lower single-input shuffles as it "
- "could then recurse on itself.");
+ assert(!V2.isUndef() && "This routine must not be used to lower single-input "
+ "shuffles as it could then recurse on itself.");
int Size = Mask.size();
// If this can be modeled as a broadcast of two elements followed by a blend,
@@ -10166,12 +10605,12 @@ static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
for (int M : Mask)
if (M >= Size) {
- if (V2BroadcastIdx == -1)
+ if (V2BroadcastIdx < 0)
V2BroadcastIdx = M - Size;
else if (M - Size != V2BroadcastIdx)
return false;
} else if (M >= 0) {
- if (V1BroadcastIdx == -1)
+ if (V1BroadcastIdx < 0)
V1BroadcastIdx = M;
else if (M != V1BroadcastIdx)
return false;
@@ -10210,54 +10649,51 @@ static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
/// is lower than any other fully general cross-lane shuffle strategy I'm aware
/// of. Special cases for each particular shuffle pattern should be handled
/// prior to trying this lowering.
-static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
+static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
// FIXME: This should probably be generalized for 512-bit vectors as well.
assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
- int LaneSize = Mask.size() / 2;
+ int Size = Mask.size();
+ int LaneSize = Size / 2;
// If there are only inputs from one 128-bit lane, splitting will in fact be
// less expensive. The flags track whether the given lane contains an element
// that crosses to another lane.
bool LaneCrossing[2] = {false, false};
- for (int i = 0, Size = Mask.size(); i < Size; ++i)
+ for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
if (!LaneCrossing[0] || !LaneCrossing[1])
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
- if (isSingleInputShuffleMask(Mask)) {
- SmallVector<int, 32> FlippedBlendMask;
- for (int i = 0, Size = Mask.size(); i < Size; ++i)
- FlippedBlendMask.push_back(
- Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
- ? Mask[i]
- : Mask[i] % LaneSize +
- (i / LaneSize) * LaneSize + Size));
-
- // Flip the vector, and blend the results which should now be in-lane. The
- // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
- // 5 for the high source. The value 3 selects the high half of source 2 and
- // the value 2 selects the low half of source 2. We only use source 2 to
- // allow folding it into a memory operand.
- unsigned PERMMask = 3 | 2 << 4;
- SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
- V1, DAG.getConstant(PERMMask, DL, MVT::i8));
- return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
- }
-
- // This now reduces to two single-input shuffles of V1 and V2 which at worst
- // will be handled by the above logic and a blend of the results, much like
- // other patterns in AVX.
- return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
+ assert(V2.isUndef() &&
+ "This last part of this routine only works on single input shuffles");
+
+ SmallVector<int, 32> FlippedBlendMask(Size);
+ for (int i = 0; i < Size; ++i)
+ FlippedBlendMask[i] =
+ Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
+ ? Mask[i]
+ : Mask[i] % LaneSize +
+ (i / LaneSize) * LaneSize + Size);
+
+ // Flip the vector, and blend the results which should now be in-lane. The
+ // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
+ // 5 for the high source. The value 3 selects the high half of source 2 and
+ // the value 2 selects the low half of source 2. We only use source 2 to
+ // allow folding it into a memory operand.
+ unsigned PERMMask = 3 | 2 << 4;
+ SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
+ V1, DAG.getConstant(PERMMask, DL, MVT::i8));
+ return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
}
/// \brief Handle lowering 2-lane 128-bit shuffles.
-static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
+static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// TODO: If minimizing size and one of the inputs is a zero vector and the
// the zero vector has only one use, we could use a VPERM2X128 to save the
@@ -10278,6 +10714,10 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
// subvector.
bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
+ // With AVX2 we should use VPERMQ/VPERMPD to allow memory folding.
+ if (Subtarget.hasAVX2() && V2.isUndef())
+ return SDValue();
+
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() / 2);
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
@@ -10349,10 +10789,9 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
/// in x86 only floating point has interesting non-repeating shuffles, and even
/// those are still *marginally* more expensive.
static SDValue lowerVectorShuffleByMerging128BitLanes(
- SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
- assert(!isSingleInputShuffleMask(Mask) &&
- "This is only useful with multiple inputs.");
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
+ assert(!V2.isUndef() && "This is only useful with multiple inputs.");
int Size = Mask.size();
int LaneSize = 128 / VT.getScalarSizeInBits();
@@ -10361,10 +10800,8 @@ static SDValue lowerVectorShuffleByMerging128BitLanes(
// See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
// check whether the in-128-bit lane shuffles share a repeating pattern.
- SmallVector<int, 4> Lanes;
- Lanes.resize(NumLanes, -1);
- SmallVector<int, 4> InLaneMask;
- InLaneMask.resize(LaneSize, -1);
+ SmallVector<int, 4> Lanes((unsigned)NumLanes, -1);
+ SmallVector<int, 4> InLaneMask((unsigned)LaneSize, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
@@ -10392,8 +10829,7 @@ static SDValue lowerVectorShuffleByMerging128BitLanes(
// First shuffle the lanes into place.
MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
VT.getSizeInBits() / 64);
- SmallVector<int, 8> LaneMask;
- LaneMask.resize(NumLanes * 2, -1);
+ SmallVector<int, 8> LaneMask((unsigned)NumLanes * 2, -1);
for (int i = 0; i < NumLanes; ++i)
if (Lanes[i] >= 0) {
LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
@@ -10408,8 +10844,7 @@ static SDValue lowerVectorShuffleByMerging128BitLanes(
LaneShuffle = DAG.getBitcast(VT, LaneShuffle);
// Now do a simple shuffle that isn't lane crossing.
- SmallVector<int, 8> NewMask;
- NewMask.resize(Size, -1);
+ SmallVector<int, 8> NewMask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0)
NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
@@ -10422,11 +10857,12 @@ static SDValue lowerVectorShuffleByMerging128BitLanes(
/// Lower shuffles where an entire half of a 256-bit vector is UNDEF.
/// This allows for fast cases such as subvector extraction/insertion
/// or shuffling smaller vector types which can lower more efficiently.
-static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
- SDValue V2, ArrayRef<int> Mask,
- const X86Subtarget *Subtarget,
+static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
+ ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(VT.getSizeInBits() == 256 && "Expected 256-bit vector");
+ assert(VT.is256BitVector() && "Expected 256-bit vector");
unsigned NumElts = VT.getVectorNumElements();
unsigned HalfNumElts = NumElts / 2;
@@ -10457,21 +10893,16 @@ static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
DAG.getIntPtrConstant(HalfNumElts, DL));
}
- // AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
- if (UndefLower && Subtarget->hasAVX2() &&
- (VT == MVT::v4f64 || VT == MVT::v4i64))
- return SDValue();
-
- // If the shuffle only uses the lower halves of the input operands,
+ // If the shuffle only uses two of the four halves of the input operands,
// then extract them and perform the 'half' shuffle at half width.
// e.g. vector_shuffle <X, X, X, X, u, u, u, u> or <X, X, u, u>
int HalfIdx1 = -1, HalfIdx2 = -1;
- SmallVector<int, 8> HalfMask;
+ SmallVector<int, 8> HalfMask(HalfNumElts);
unsigned Offset = UndefLower ? HalfNumElts : 0;
for (unsigned i = 0; i != HalfNumElts; ++i) {
int M = Mask[i + Offset];
if (M < 0) {
- HalfMask.push_back(M);
+ HalfMask[i] = M;
continue;
}
@@ -10479,23 +10910,18 @@ static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
// i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
int HalfIdx = M / HalfNumElts;
- // Only shuffle using the lower halves of the inputs.
- // TODO: Investigate usefulness of shuffling with upper halves.
- if (HalfIdx != 0 && HalfIdx != 2)
- return SDValue();
-
// Determine the element index into its half vector source.
int HalfElt = M % HalfNumElts;
// We can shuffle with up to 2 half vectors, set the new 'half'
// shuffle mask accordingly.
- if (-1 == HalfIdx1 || HalfIdx1 == HalfIdx) {
- HalfMask.push_back(HalfElt);
+ if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
+ HalfMask[i] = HalfElt;
HalfIdx1 = HalfIdx;
continue;
}
- if (-1 == HalfIdx2 || HalfIdx2 == HalfIdx) {
- HalfMask.push_back(HalfElt + HalfNumElts);
+ if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
+ HalfMask[i] = HalfElt + HalfNumElts;
HalfIdx2 = HalfIdx;
continue;
}
@@ -10505,6 +10931,33 @@ static SDValue lowerVectorShuffleWithUndefHalf(SDLoc DL, MVT VT, SDValue V1,
}
assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
+ // Only shuffle the halves of the inputs when useful.
+ int NumLowerHalves =
+ (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
+ int NumUpperHalves =
+ (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
+
+ // uuuuXXXX - don't extract uppers just to insert again.
+ if (UndefLower && NumUpperHalves != 0)
+ return SDValue();
+
+ // XXXXuuuu - don't extract both uppers, instead shuffle and then extract.
+ if (UndefUpper && NumUpperHalves == 2)
+ return SDValue();
+
+ // AVX2 - XXXXuuuu - always extract lowers.
+ if (Subtarget.hasAVX2() && !(UndefUpper && NumUpperHalves == 0)) {
+ // AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
+ if (VT == MVT::v4f64 || VT == MVT::v4i64)
+ return SDValue();
+ // AVX2 supports variable 32-bit element cross-lane shuffles.
+ if (VT == MVT::v8f32 || VT == MVT::v8i32) {
+ // XXXXuuuu - don't extract lowers and uppers.
+ if (UndefUpper && NumLowerHalves != 0 && NumUpperHalves != 0)
+ return SDValue();
+ }
+ }
+
auto GetHalfVector = [&](int HalfIdx) {
if (HalfIdx < 0)
return DAG.getUNDEF(HalfVT);
@@ -10536,7 +10989,177 @@ static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
return true;
}
-static SDValue lowerVectorShuffleWithSHUFPD(SDLoc DL, MVT VT,
+/// Handle case where shuffle sources are coming from the same 128-bit lane and
+/// every lane can be represented as the same repeating mask - allowing us to
+/// shuffle the sources with the repeating shuffle and then permute the result
+/// to the destination lanes.
+static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
+ const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
+ int NumElts = VT.getVectorNumElements();
+ int NumLanes = VT.getSizeInBits() / 128;
+ int NumLaneElts = NumElts / NumLanes;
+
+ // On AVX2 we may be able to just shuffle the lowest elements and then
+ // broadcast the result.
+ if (Subtarget.hasAVX2()) {
+ for (unsigned BroadcastSize : {16, 32, 64}) {
+ if (BroadcastSize <= VT.getScalarSizeInBits())
+ continue;
+ int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
+
+ // Attempt to match a repeating pattern every NumBroadcastElts,
+ // accounting for UNDEFs but only references the lowest 128-bit
+ // lane of the inputs.
+ auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
+ for (int i = 0; i != NumElts; i += NumBroadcastElts)
+ for (int j = 0; j != NumBroadcastElts; ++j) {
+ int M = Mask[i + j];
+ if (M < 0)
+ continue;
+ int &R = RepeatMask[j];
+ if (0 != ((M % NumElts) / NumLaneElts))
+ return false;
+ if (0 <= R && R != M)
+ return false;
+ R = M;
+ }
+ return true;
+ };
+
+ SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
+ if (!FindRepeatingBroadcastMask(RepeatMask))
+ continue;
+
+ // Shuffle the (lowest) repeated elements in place for broadcast.
+ SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
+
+ // Shuffle the actual broadcast.
+ SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
+ for (int i = 0; i != NumElts; i += NumBroadcastElts)
+ for (int j = 0; j != NumBroadcastElts; ++j)
+ BroadcastMask[i + j] = j;
+ return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
+ BroadcastMask);
+ }
+ }
+
+ // Bail if the shuffle mask doesn't cross 128-bit lanes.
+ if (!is128BitLaneCrossingShuffleMask(VT, Mask))
+ return SDValue();
+
+ // Bail if we already have a repeated lane shuffle mask.
+ SmallVector<int, 8> RepeatedShuffleMask;
+ if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
+ return SDValue();
+
+ // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
+ // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
+ int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
+ int NumSubLanes = NumLanes * SubLaneScale;
+ int NumSubLaneElts = NumLaneElts / SubLaneScale;
+
+ // Check that all the sources are coming from the same lane and see if we can
+ // form a repeating shuffle mask (local to each sub-lane). At the same time,
+ // determine the source sub-lane for each destination sub-lane.
+ int TopSrcSubLane = -1;
+ SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
+ SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
+ SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
+ SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
+
+ for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
+ // Extract the sub-lane mask, check that it all comes from the same lane
+ // and normalize the mask entries to come from the first lane.
+ int SrcLane = -1;
+ SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
+ for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
+ int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
+ if (M < 0)
+ continue;
+ int Lane = (M % NumElts) / NumLaneElts;
+ if ((0 <= SrcLane) && (SrcLane != Lane))
+ return SDValue();
+ SrcLane = Lane;
+ int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
+ SubLaneMask[Elt] = LocalM;
+ }
+
+ // Whole sub-lane is UNDEF.
+ if (SrcLane < 0)
+ continue;
+
+ // Attempt to match against the candidate repeated sub-lane masks.
+ for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
+ auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
+ for (int i = 0; i != NumSubLaneElts; ++i) {
+ if (M1[i] < 0 || M2[i] < 0)
+ continue;
+ if (M1[i] != M2[i])
+ return false;
+ }
+ return true;
+ };
+
+ auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
+ if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
+ continue;
+
+ // Merge the sub-lane mask into the matching repeated sub-lane mask.
+ for (int i = 0; i != NumSubLaneElts; ++i) {
+ int M = SubLaneMask[i];
+ if (M < 0)
+ continue;
+ assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
+ "Unexpected mask element");
+ RepeatedSubLaneMask[i] = M;
+ }
+
+ // Track the top most source sub-lane - by setting the remaining to UNDEF
+ // we can greatly simplify shuffle matching.
+ int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
+ TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
+ Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
+ break;
+ }
+
+ // Bail if we failed to find a matching repeated sub-lane mask.
+ if (Dst2SrcSubLanes[DstSubLane] < 0)
+ return SDValue();
+ }
+ assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
+ "Unexpected source lane");
+
+ // Create a repeating shuffle mask for the entire vector.
+ SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
+ for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
+ int Lane = SubLane / SubLaneScale;
+ auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
+ for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
+ int M = RepeatedSubLaneMask[Elt];
+ if (M < 0)
+ continue;
+ int Idx = (SubLane * NumSubLaneElts) + Elt;
+ RepeatedMask[Idx] = M + (Lane * NumLaneElts);
+ }
+ }
+ SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
+
+ // Shuffle each source sub-lane to its destination.
+ SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
+ for (int i = 0; i != NumElts; i += NumSubLaneElts) {
+ int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
+ if (SrcSubLane < 0)
+ continue;
+ for (int j = 0; j != NumSubLaneElts; ++j)
+ SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
+ }
+
+ return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
+ SubLaneMask);
+}
+
+static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
@@ -10571,25 +11194,24 @@ static SDValue lowerVectorShuffleWithSHUFPD(SDLoc DL, MVT VT,
///
/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
/// isn't available.
-static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV4F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
SmallVector<int, 4> WidenedMask;
if (canWidenShuffleElements(Mask, WidenedMask))
- return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
- DAG);
+ if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask,
+ Subtarget, DAG))
+ return V;
- if (isSingleInputShuffleMask(Mask)) {
+ if (V2.isUndef()) {
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4f64, V1,
- Mask, Subtarget, DAG))
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
+ DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Use low duplicate instructions for masks that match their pattern.
@@ -10597,7 +11219,7 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
- // Non-half-crossing single input shuffles can be lowerid with an
+ // Non-half-crossing single input shuffles can be lowered with an
// interleaved permutation.
unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
@@ -10606,10 +11228,16 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
// With AVX2 we have direct support for this permutation.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
DAG);
@@ -10629,19 +11257,25 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
lowerVectorShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
return Op;
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
- if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
- isShuffleMaskInputInPlace(1, Mask))))
+ if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
+ isShuffleMaskInputInPlace(1, Mask))))
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Result;
// If we have AVX2 then we always want to lower with a blend because an v4 we
// can fully permute the elements.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
Mask, DAG);
@@ -10653,59 +11287,53 @@ static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v4i64 shuffling..
-static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV4I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
- assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
+ assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
SmallVector<int, 4> WidenedMask;
if (canWidenShuffleElements(Mask, WidenedMask))
- return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
- DAG);
+ if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask,
+ Subtarget, DAG))
+ return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
- // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
- // use lower latency instructions that will operate on both 128-bit lanes.
- SmallVector<int, 2> RepeatedMask;
- if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
- if (isSingleInputShuffleMask(Mask)) {
- int PSHUFDMask[] = {-1, -1, -1, -1};
- for (int i = 0; i < 2; ++i)
- if (RepeatedMask[i] >= 0) {
- PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
- PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
- }
+ if (V2.isUndef()) {
+ // When the shuffle is mirrored between the 128-bit lanes of the unit, we
+ // can use lower latency instructions that will operate on both lanes.
+ SmallVector<int, 2> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
+ SmallVector<int, 4> PSHUFDMask;
+ scaleShuffleMask(2, RepeatedMask, PSHUFDMask);
return DAG.getBitcast(
MVT::v4i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
DAG.getBitcast(MVT::v8i32, V1),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
- }
- // AVX2 provides a direct instruction for permuting a single input across
- // lanes.
- if (isSingleInputShuffleMask(Mask))
+ // AVX2 provides a direct instruction for permuting a single input across
+ // lanes.
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
+ }
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// Use dedicated unpack instructions for masks that match their pattern.
@@ -10717,7 +11345,7 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
- if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
+ if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
isShuffleMaskInputInPlace(1, Mask))))
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
@@ -10732,14 +11360,12 @@ static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
/// isn't available.
-static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
@@ -10747,7 +11373,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10759,12 +11385,12 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
"Repeated masks must be half the mask width!");
// Use even/odd duplicate instructions for masks that match their pattern.
- if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
+ if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
- if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3, 5, 5, 7, 7}))
+ if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
- if (isSingleInputShuffleMask(Mask))
+ if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
@@ -10774,30 +11400,30 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return V;
// Otherwise, fall back to a SHUFPS sequence. Here it is important that we
- // have already handled any direct blends. We also need to squash the
- // repeated mask into a simulated v4f32 mask.
- for (int i = 0; i < 4; ++i)
- if (RepeatedMask[i] >= 8)
- RepeatedMask[i] -= 4;
+ // have already handled any direct blends.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
}
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// If we have a single input shuffle with different shuffle patterns in the
// two 128-bit lanes use the variable mask to VPERMILPS.
- if (isSingleInputShuffleMask(Mask)) {
+ if (V2.isUndef()) {
SDValue VPermMask[8];
for (int i = 0; i < 8; ++i)
VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
: DAG.getConstant(Mask[i], DL, MVT::i32);
if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
- return DAG.getNode(
- X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
+ return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
+ DAG.getBuildVector(MVT::v8i32, DL, VPermMask));
- if (Subtarget->hasAVX2())
- return DAG.getNode(
- X86ISD::VPERMV, DL, MVT::v8f32,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
+ if (Subtarget.hasAVX2())
+ return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
+ DAG.getBuildVector(MVT::v8i32, DL, VPermMask), V1);
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
@@ -10812,7 +11438,7 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// If we have AVX2 then we always want to lower with a blend because at v8 we
// can fully permute the elements.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
Mask, DAG);
@@ -10824,16 +11450,14 @@ static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v8i32 shuffling..
-static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
- assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
+ assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
@@ -10847,7 +11471,7 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return Blend;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10857,7 +11481,7 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
- if (isSingleInputShuffleMask(Mask))
+ if (V2.isUndef())
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
@@ -10868,24 +11492,30 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
+ // Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// If the shuffle patterns aren't repeated but it is a single input, directly
// generate a cross-lane VPERMD instruction.
- if (isSingleInputShuffleMask(Mask)) {
+ if (V2.isUndef()) {
SDValue VPermMask[8];
for (int i = 0; i < 8; ++i)
VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
: DAG.getConstant(Mask[i], DL, MVT::i32);
- return DAG.getNode(
- X86ISD::VPERMV, DL, MVT::v8i32,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
+ return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32,
+ DAG.getBuildVector(MVT::v8i32, DL, VPermMask), V1);
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
@@ -10903,16 +11533,14 @@ static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v16i16 shuffling..
-static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV16I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
- assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
+ assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
@@ -10922,7 +11550,7 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return ZExt;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
@@ -10936,8 +11564,8 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return V;
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
@@ -10945,7 +11573,13 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
- if (isSingleInputShuffleMask(Mask)) {
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
+ if (V2.isUndef()) {
// There are no generalized cross-lane shuffle operations available on i16
// element types.
if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
@@ -10960,26 +11594,12 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return lowerV8I16GeneralSingleInputVectorShuffle(
DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
}
-
- SDValue PSHUFBMask[32];
- for (int i = 0; i < 16; ++i) {
- if (Mask[i] == -1) {
- PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
- continue;
- }
-
- int M = i < 8 ? Mask[i] : Mask[i] - 8;
- assert(M >= 0 && M < 8 && "Invalid single-input mask!");
- PSHUFBMask[2 * i] = DAG.getConstant(2 * M, DL, MVT::i8);
- PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, DL, MVT::i8);
- }
- return DAG.getBitcast(MVT::v16i16,
- DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8,
- DAG.getBitcast(MVT::v32i8, V1),
- DAG.getNode(ISD::BUILD_VECTOR, DL,
- MVT::v32i8, PSHUFBMask)));
}
+ if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1,
+ V2, Subtarget, DAG))
+ return PSHUFB;
+
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
@@ -10994,16 +11614,14 @@ static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v32i8 shuffling..
-static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV32I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
- assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
+ assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
@@ -11013,7 +11631,7 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return ZExt;
// Check for being able to broadcast a single element.
- if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1,
+ if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
@@ -11027,8 +11645,8 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
return V;
// Try to use shift instructions.
- if (SDValue Shift =
- lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
+ Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
@@ -11036,25 +11654,21 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
- if (isSingleInputShuffleMask(Mask)) {
- // There are no generalized cross-lane shuffle operations available on i8
- // element types.
- if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
- return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
- Mask, DAG);
+ // Try to create an in-lane repeating shuffle mask and then shuffle the
+ // the results into the target lanes.
+ if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
+ DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
+ return V;
- SDValue PSHUFBMask[32];
- for (int i = 0; i < 32; ++i)
- PSHUFBMask[i] =
- Mask[i] < 0
- ? DAG.getUNDEF(MVT::i8)
- : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, DL,
- MVT::i8);
+ // There are no generalized cross-lane shuffle operations available on i8
+ // element types.
+ if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
+ return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask,
+ DAG);
- return DAG.getNode(
- X86ISD::PSHUFB, DL, MVT::v32i8, V1,
- DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
- }
+ if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1,
+ V2, Subtarget, DAG))
+ return PSHUFB;
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
@@ -11071,19 +11685,14 @@ static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// This routine either breaks down the specific type of a 256-bit x86 vector
/// shuffle or splits it into two 128-bit shuffles and fuses the results back
/// together based on the available instructions.
-static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- MVT VT, const X86Subtarget *Subtarget,
+static SDValue lower256BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ MVT VT, SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
-
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumElts = VT.getVectorNumElements();
- int NumV2Elements = std::count_if(Mask.begin(), Mask.end(), [NumElts](int M) {
- return M >= NumElts;
- });
+ int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
if (NumV2Elements == 1 && Mask[0] >= NumElts)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
@@ -11101,11 +11710,17 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// essentially *zero* ability to manipulate a 256-bit vector with integer
// types. Since we'll use floating point types there eventually, just
// immediately cast everything to a float and operate entirely in that domain.
- if (VT.isInteger() && !Subtarget->hasAVX2()) {
+ if (VT.isInteger() && !Subtarget.hasAVX2()) {
int ElementBits = VT.getScalarSizeInBits();
- if (ElementBits < 32)
- // No floating point type available, decompose into 128-bit vectors.
+ if (ElementBits < 32) {
+ // No floating point type available, if we can't use the bit operations
+ // for masking/blending then decompose into 128-bit vectors.
+ if (SDValue V = lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, DAG))
+ return V;
+ if (SDValue V = lowerVectorShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
+ return V;
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
+ }
MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
VT.getVectorNumElements());
@@ -11116,17 +11731,17 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
switch (VT.SimpleTy) {
case MVT::v4f64:
- return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV4F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v4i64:
- return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV4I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v8f32:
- return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v8i32:
- return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v16i16:
- return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV16I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v32i8:
- return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV32I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Not a valid 256-bit x86 vector type!");
@@ -11134,21 +11749,37 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
/// \brief Try to lower a vector shuffle as a 128-bit shuffles.
-static SDValue lowerV4X128VectorShuffle(SDLoc DL, MVT VT,
- ArrayRef<int> Mask,
- SDValue V1, SDValue V2,
- SelectionDAG &DAG) {
+static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT,
+ ArrayRef<int> Mask, SDValue V1,
+ SDValue V2, SelectionDAG &DAG) {
assert(VT.getScalarSizeInBits() == 64 &&
"Unexpected element type size for 128bit shuffle.");
// To handle 256 bit vector requires VLX and most probably
// function lowerV2X128VectorShuffle() is better solution.
- assert(VT.is512BitVector() && "Unexpected vector size for 128bit shuffle.");
+ assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
SmallVector<int, 4> WidenedMask;
if (!canWidenShuffleElements(Mask, WidenedMask))
return SDValue();
+ SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
+ // Insure elements came from the same Op.
+ int MaxOp1Index = VT.getVectorNumElements()/2 - 1;
+ for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
+ if (WidenedMask[i] == SM_SentinelZero)
+ return SDValue();
+ if (WidenedMask[i] == SM_SentinelUndef)
+ continue;
+
+ SDValue Op = WidenedMask[i] > MaxOp1Index ? V2 : V1;
+ unsigned OpIndex = (i < Size/2) ? 0 : 1;
+ if (Ops[OpIndex].isUndef())
+ Ops[OpIndex] = Op;
+ else if (Ops[OpIndex] != Op)
+ return SDValue();
+ }
+
// Form a 128-bit permutation.
// Convert the 64-bit shuffle mask selection values into 128-bit selection
// bits defined by a vshuf64x2 instruction's immediate control byte.
@@ -11156,19 +11787,16 @@ static SDValue lowerV4X128VectorShuffle(SDLoc DL, MVT VT,
unsigned ControlBitsNum = WidenedMask.size() / 2;
for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
- if (WidenedMask[i] == SM_SentinelZero)
- return SDValue();
-
// Use first element in place of undef mask.
Imm = (WidenedMask[i] == SM_SentinelUndef) ? 0 : WidenedMask[i];
PermMask |= (Imm % WidenedMask.size()) << (i * ControlBitsNum);
}
- return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
+ return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
DAG.getConstant(PermMask, DL, MVT::i8));
}
-static SDValue lowerVectorShuffleWithPERMV(SDLoc DL, MVT VT,
+static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
@@ -11178,23 +11806,43 @@ static SDValue lowerVectorShuffleWithPERMV(SDLoc DL, MVT VT,
MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
- if (isSingleInputShuffleMask(Mask))
+ if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
}
/// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
-static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
+ if (V2.isUndef()) {
+ // Use low duplicate instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
+ return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
+
+ if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
+ // Non-half-crossing single input shuffles can be lowered with an
+ // interleaved permutation.
+ unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
+ ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
+ ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
+ ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
+ return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
+ DAG.getConstant(VPERMILPMask, DL, MVT::i8));
+ }
+
+ SmallVector<int, 4> RepeatedMask;
+ if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
+ return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
+ getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
+ }
+
if (SDValue Shuf128 =
lowerV4X128VectorShuffle(DL, MVT::v8f64, Mask, V1, V2, DAG))
return Shuf128;
@@ -11203,42 +11851,90 @@ static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
return Unpck;
+ // Check if the blend happens to exactly fit that of SHUFPD.
+ if (SDValue Op =
+ lowerVectorShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG))
+ return Op;
+
return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
-static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV16F32VectorShuffle(SDLoc DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
- if (SDValue Unpck =
- lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
- return Unpck;
+ // If the shuffle mask is repeated in each 128-bit lane, we have many more
+ // options to efficiently lower the shuffle.
+ SmallVector<int, 4> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
+ assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
+
+ // Use even/odd duplicate instructions for masks that match their pattern.
+ if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
+ return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
+ if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
+ return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
+
+ if (V2.isUndef())
+ return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
+ getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (SDValue Unpck =
+ lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
+ return Unpck;
+
+ // Otherwise, fall back to a SHUFPS sequence.
+ return lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
+ }
return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 8-lane 64-bit integer shuffles.
-static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (SDValue Shuf128 =
lowerV4X128VectorShuffle(DL, MVT::v8i64, Mask, V1, V2, DAG))
return Shuf128;
+ if (V2.isUndef()) {
+ // When the shuffle is mirrored between the 128-bit lanes of the unit, we
+ // can use lower latency instructions that will operate on all four
+ // 128-bit lanes.
+ SmallVector<int, 2> Repeated128Mask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
+ SmallVector<int, 4> PSHUFDMask;
+ scaleShuffleMask(2, Repeated128Mask, PSHUFDMask);
+ return DAG.getBitcast(
+ MVT::v8i64,
+ DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
+ DAG.getBitcast(MVT::v16i32, V1),
+ getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
+ }
+
+ SmallVector<int, 4> Repeated256Mask;
+ if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
+ return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
+ getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
+ }
+
+ // Try to use shift instructions.
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
+ Subtarget, DAG))
+ return Shift;
+
if (SDValue Unpck =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
return Unpck;
@@ -11247,49 +11943,111 @@ static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
/// \brief Handle lowering of 16-lane 32-bit integer shuffles.
-static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
- if (SDValue Unpck =
- lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
- return Unpck;
+ // If the shuffle mask is repeated in each 128-bit lane we can use more
+ // efficient instructions that mirror the shuffles across the four 128-bit
+ // lanes.
+ SmallVector<int, 4> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask)) {
+ assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
+ if (V2.isUndef())
+ return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
+ getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (SDValue V =
+ lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
+ return V;
+ }
+
+ // Try to use shift instructions.
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
+ Subtarget, DAG))
+ return Shift;
+
+ // Try to use byte rotation instructions.
+ if (Subtarget.hasBWI())
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 32-lane 16-bit integer shuffles.
-static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
- assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
+ assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (SDValue V =
+ lowerVectorShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
+ return V;
+
+ // Try to use shift instructions.
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
+ Subtarget, DAG))
+ return Shift;
+
+ // Try to use byte rotation instructions.
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
+
+ if (V2.isUndef()) {
+ SmallVector<int, 8> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
+ // As this is a single-input shuffle, the repeated mask should be
+ // a strictly valid v8i16 mask that we can pass through to the v8i16
+ // lowering to handle even the v32 case.
+ return lowerV8I16GeneralSingleInputVectorShuffle(
+ DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
+ }
+ }
return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 64-lane 8-bit integer shuffles.
-static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- const X86Subtarget *Subtarget,
+static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
- assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
+ assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
+
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (SDValue V =
+ lowerVectorShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
+ return V;
+
+ // Try to use shift instructions.
+ if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
+ Subtarget, DAG))
+ return Shift;
+
+ // Try to use byte rotation instructions.
+ if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
+ DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
+ return Rotate;
+
+ if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1,
+ V2, Subtarget, DAG))
+ return PSHUFB;
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
@@ -11300,61 +12058,50 @@ static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// This routine either breaks down the specific type of a 512-bit x86 vector
/// shuffle or splits it into two 256-bit shuffles and fuses the results back
/// together based on the available instructions.
-static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- MVT VT, const X86Subtarget *Subtarget,
+static SDValue lower512BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ MVT VT, SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
- assert(Subtarget->hasAVX512() &&
+ assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/ basic ISA!");
// Check for being able to broadcast a single element.
if (SDValue Broadcast =
- lowerVectorShuffleAsBroadcast(DL, VT, V1, Mask, Subtarget, DAG))
+ lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
- // Dispatch to each element type for lowering. If we don't have supprot for
+ // Dispatch to each element type for lowering. If we don't have support for
// specific element type shuffles at 512 bits, immediately split them and
// lower them. Each lowering routine of a given type is allowed to assume that
// the requisite ISA extensions for that element type are available.
switch (VT.SimpleTy) {
case MVT::v8f64:
- return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v16f32:
- return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV16F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v8i64:
- return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV8I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v16i32:
- return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
+ return lowerV16I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v32i16:
- if (Subtarget->hasBWI())
- return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
- break;
+ return lowerV32I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
case MVT::v64i8:
- if (Subtarget->hasBWI())
- return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
- break;
+ return lowerV64I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Not a valid 512-bit x86 vector type!");
}
-
- // Otherwise fall back on splitting.
- return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
}
// Lower vXi1 vector shuffles.
// There is no a dedicated instruction on AVX-512 that shuffles the masks.
// The only way to shuffle bits is to sign-extend the mask vector to SIMD
// vector, shuffle and then truncate it back.
-static SDValue lower1BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
- MVT VT, const X86Subtarget *Subtarget,
+static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
+ MVT VT, SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- SDLoc DL(Op);
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
- ArrayRef<int> Mask = SVOp->getMask();
- assert(Subtarget->hasAVX512() &&
+ assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/o basic ISA!");
MVT ExtVT;
switch (VT.SimpleTy) {
@@ -11405,7 +12152,7 @@ static SDValue lower1BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
/// above in helper routines. The canonicalization attempts to widen shuffles
/// to involve fewer lanes of wider elements, consolidate symmetric patterns
/// s.t. only one of the two inputs needs to be tested, etc.
-static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
ArrayRef<int> Mask = SVOp->getMask();
@@ -11413,14 +12160,14 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
SDValue V2 = Op.getOperand(1);
MVT VT = Op.getSimpleValueType();
int NumElements = VT.getVectorNumElements();
- SDLoc dl(Op);
+ SDLoc DL(Op);
bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
"Can't lower MMX shuffles");
- bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
- bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
+ bool V1IsUndef = V1.isUndef();
+ bool V2IsUndef = V2.isUndef();
if (V1IsUndef && V2IsUndef)
return DAG.getUNDEF(VT);
@@ -11440,7 +12187,7 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
for (int &M : NewMask)
if (M >= NumElements)
M = -1;
- return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
+ return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
}
// We actually see shuffles that are entirely re-arrangements of a set of
@@ -11448,7 +12195,7 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// simple ones. Directly lower these as a buildvector of zeros.
SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
if (Zeroable.all())
- return getZeroVector(VT, Subtarget, DAG, dl);
+ return getZeroVector(VT, Subtarget, DAG, DL);
// Try to collapse shuffles into using a vector type with fewer elements but
// wider element types. We cap this to not form integers or floating point
@@ -11467,12 +12214,12 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
V1 = DAG.getBitcast(NewVT, V1);
V2 = DAG.getBitcast(NewVT, V2);
return DAG.getBitcast(
- VT, DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
+ VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
}
}
int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
- for (int M : SVOp->getMask())
+ for (int M : Mask)
if (M < 0)
++NumUndefElements;
else if (M < NumElements)
@@ -11486,6 +12233,9 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
if (NumV2Elements > NumV1Elements)
return DAG.getCommutedVectorShuffle(*SVOp);
+ assert(NumV1Elements > 0 && "No V1 indices");
+ assert((NumV2Elements > 0 || V2IsUndef) && "V2 not undef, but not used");
+
// When the number of V1 and V2 elements are the same, try to minimize the
// number of uses of V2 in the low half of the vector. When that is tied,
// ensure that the sum of indices for V1 is equal to or lower than the sum
@@ -11493,28 +12243,28 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// indices for V1 is lower than the number of odd indices for V2.
if (NumV1Elements == NumV2Elements) {
int LowV1Elements = 0, LowV2Elements = 0;
- for (int M : SVOp->getMask().slice(0, NumElements / 2))
+ for (int M : Mask.slice(0, NumElements / 2))
if (M >= NumElements)
++LowV2Elements;
else if (M >= 0)
++LowV1Elements;
- if (LowV2Elements > LowV1Elements) {
+ if (LowV2Elements > LowV1Elements)
return DAG.getCommutedVectorShuffle(*SVOp);
- } else if (LowV2Elements == LowV1Elements) {
+ if (LowV2Elements == LowV1Elements) {
int SumV1Indices = 0, SumV2Indices = 0;
- for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
- if (SVOp->getMask()[i] >= NumElements)
+ for (int i = 0, Size = Mask.size(); i < Size; ++i)
+ if (Mask[i] >= NumElements)
SumV2Indices += i;
- else if (SVOp->getMask()[i] >= 0)
+ else if (Mask[i] >= 0)
SumV1Indices += i;
- if (SumV2Indices < SumV1Indices) {
+ if (SumV2Indices < SumV1Indices)
return DAG.getCommutedVectorShuffle(*SVOp);
- } else if (SumV2Indices == SumV1Indices) {
+ if (SumV2Indices == SumV1Indices) {
int NumV1OddIndices = 0, NumV2OddIndices = 0;
- for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
- if (SVOp->getMask()[i] >= NumElements)
+ for (int i = 0, Size = Mask.size(); i < Size; ++i)
+ if (Mask[i] >= NumElements)
NumV2OddIndices += i % 2;
- else if (SVOp->getMask()[i] >= 0)
+ else if (Mask[i] >= 0)
NumV1OddIndices += i % 2;
if (NumV2OddIndices < NumV1OddIndices)
return DAG.getCommutedVectorShuffle(*SVOp);
@@ -11524,69 +12274,23 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
// For each vector width, delegate to a specialized lowering routine.
if (VT.is128BitVector())
- return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
+ return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
if (VT.is256BitVector())
- return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
+ return lower256BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
if (VT.is512BitVector())
- return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
+ return lower512BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
if (Is1BitVector)
- return lower1BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
- llvm_unreachable("Unimplemented!");
-}
+ return lower1BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
-// This function assumes its argument is a BUILD_VECTOR of constants or
-// undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
-// true.
-static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
- unsigned &MaskValue) {
- MaskValue = 0;
- unsigned NumElems = BuildVector->getNumOperands();
-
- // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
- // We don't handle the >2 lanes case right now.
- unsigned NumLanes = (NumElems - 1) / 8 + 1;
- if (NumLanes > 2)
- return false;
-
- unsigned NumElemsInLane = NumElems / NumLanes;
-
- // Blend for v16i16 should be symmetric for the both lanes.
- for (unsigned i = 0; i < NumElemsInLane; ++i) {
- SDValue EltCond = BuildVector->getOperand(i);
- SDValue SndLaneEltCond =
- (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
-
- int Lane1Cond = -1, Lane2Cond = -1;
- if (isa<ConstantSDNode>(EltCond))
- Lane1Cond = !isNullConstant(EltCond);
- if (isa<ConstantSDNode>(SndLaneEltCond))
- Lane2Cond = !isNullConstant(SndLaneEltCond);
-
- unsigned LaneMask = 0;
- if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
- // Lane1Cond != 0, means we want the first argument.
- // Lane1Cond == 0, means we want the second argument.
- // The encoding of this argument is 0 for the first argument, 1
- // for the second. Therefore, invert the condition.
- LaneMask = !Lane1Cond << i;
- else if (Lane1Cond < 0)
- LaneMask = !Lane2Cond << i;
- else
- return false;
-
- MaskValue |= LaneMask;
- if (NumLanes == 2)
- MaskValue |= LaneMask << NumElemsInLane;
- }
- return true;
+ llvm_unreachable("Unimplemented!");
}
/// \brief Try to lower a VSELECT instruction to a vector shuffle.
static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Cond = Op.getOperand(0);
SDValue LHS = Op.getOperand(1);
@@ -11624,7 +12328,7 @@ SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
return BlendOp;
// Variable blends are only legal from SSE4.1 onward.
- if (!Subtarget->hasSSE41())
+ if (!Subtarget.hasSSE41())
return SDValue();
// Only some types will be legal on some subtargets. If we can emit a legal
@@ -11637,7 +12341,7 @@ SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
case MVT::v32i8:
// The byte blends for AVX vectors were introduced only in AVX2.
- if (Subtarget->hasAVX2())
+ if (Subtarget.hasAVX2())
return Op;
return SDValue();
@@ -11645,7 +12349,7 @@ SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
case MVT::v8i16:
case MVT::v16i16:
// AVX-512 BWI and VLX features support VSELECT with i16 elements.
- if (Subtarget->hasBWI() && Subtarget->hasVLX())
+ if (Subtarget.hasBWI() && Subtarget.hasVLX())
return Op;
// FIXME: We should custom lower this by fixing the condition and using i8
@@ -11723,7 +12427,7 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
MVT EltVT = Op.getSimpleValueType();
assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
- assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
+ assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
"Unexpected vector type in ExtractBitFromMaskVector");
// variable index can't be handled in mask registers,
@@ -11737,10 +12441,15 @@ X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const
}
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
- const TargetRegisterClass* rc = getRegClassFor(VecVT);
- if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
- rc = getRegClassFor(MVT::v16i1);
- unsigned MaxSift = rc->getSize()*8 - 1;
+ if (!Subtarget.hasDQI() && (VecVT.getVectorNumElements() <= 8)) {
+ // Use kshiftlw/rw instruction.
+ VecVT = MVT::v16i1;
+ Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT,
+ DAG.getUNDEF(VecVT),
+ Vec,
+ DAG.getIntPtrConstant(0, dl));
+ }
+ unsigned MaxSift = VecVT.getVectorNumElements() - 1;
Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8));
Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
@@ -11762,7 +12471,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
if (!isa<ConstantSDNode>(Idx)) {
if (VecVT.is512BitVector() ||
- (VecVT.is256BitVector() && Subtarget->hasInt256() &&
+ (VecVT.is256BitVector() && Subtarget.hasInt256() &&
VecVT.getVectorElementType().getSizeInBits() == 32)) {
MVT MaskEltVT =
@@ -11782,13 +12491,13 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
return SDValue();
}
+ unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
+
// If this is a 256-bit vector result, first extract the 128-bit vector and
// then extract the element from the 128-bit vector.
if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
-
- unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
// Get the 128-bit vector.
- Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
+ Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
MVT EltVT = VecVT.getVectorElementType();
unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
@@ -11803,38 +12512,33 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
assert(VecVT.is128BitVector() && "Unexpected vector length");
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
return Res;
MVT VT = Op.getSimpleValueType();
// TODO: handle v16i8.
if (VT.getSizeInBits() == 16) {
- SDValue Vec = Op.getOperand(0);
- if (isNullConstant(Op.getOperand(1)))
+ if (IdxVal == 0)
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
- DAG.getBitcast(MVT::v4i32, Vec),
- Op.getOperand(1)));
+ DAG.getBitcast(MVT::v4i32, Vec), Idx));
+
// Transform it so it match pextrw which produces a 32-bit result.
MVT EltVT = MVT::i32;
- SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
- Op.getOperand(0), Op.getOperand(1));
+ SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, Vec, Idx);
SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
}
if (VT.getSizeInBits() == 32) {
- unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
- if (Idx == 0)
+ if (IdxVal == 0)
return Op;
// SHUFPS the element to the lowest double word, then movss.
- int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
- MVT VVT = Op.getOperand(0).getSimpleValueType();
- SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
- DAG.getUNDEF(VVT), Mask);
+ int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
+ Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0, dl));
}
@@ -11843,16 +12547,14 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
// FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
// FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
// to match extract_elt for f64.
- if (isNullConstant(Op.getOperand(1)))
+ if (IdxVal == 0)
return Op;
// UNPCKHPD the element to the lowest double word, then movsd.
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
int Mask[2] = { 1, -1 };
- MVT VVT = Op.getOperand(0).getSimpleValueType();
- SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
- DAG.getUNDEF(VVT), Mask);
+ Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0, dl));
}
@@ -11886,7 +12588,7 @@ X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
if (IdxVal)
EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
- if (Vec.getOpcode() == ISD::UNDEF)
+ if (Vec.isUndef())
return EltInVec;
return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
}
@@ -11895,6 +12597,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
+ unsigned NumElts = VT.getVectorNumElements();
if (EltVT == MVT::i1)
return InsertBitToMaskVector(Op, DAG);
@@ -11908,6 +12611,19 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
auto *N2C = cast<ConstantSDNode>(N2);
unsigned IdxVal = N2C->getZExtValue();
+ // If we are clearing out a element, we do this more efficiently with a
+ // blend shuffle than a costly integer insertion.
+ // TODO: would other rematerializable values (e.g. allbits) benefit as well?
+ // TODO: pre-SSE41 targets will tend to use bit masking - this could still
+ // be beneficial if we are inserting several zeros and can combine the masks.
+ if (X86::isZeroNode(N1) && Subtarget.hasSSE41() && NumElts <= 8) {
+ SmallVector<int, 8> ClearMask;
+ for (unsigned i = 0; i != NumElts; ++i)
+ ClearMask.push_back(i == IdxVal ? i + NumElts : i);
+ SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, dl);
+ return DAG.getVectorShuffle(VT, dl, N0, ZeroVector, ClearMask);
+ }
+
// If the vector is wider than 128 bits, extract the 128-bit subvector, insert
// into that, and then insert the subvector back into the result.
if (VT.is256BitVector() || VT.is512BitVector()) {
@@ -11917,8 +12633,8 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
// TODO: It is worthwhile to cast integer to floating point and back
// and incur a domain crossing penalty if that's what we'll end up
// doing anyway after extracting to a 128-bit vector.
- if ((Subtarget->hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
- (Subtarget->hasAVX2() && EltVT == MVT::i32)) {
+ if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
+ (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
N2 = DAG.getIntPtrConstant(1, dl);
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
@@ -11926,7 +12642,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
}
// Get the desired 128-bit vector chunk.
- SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
+ SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
// Insert the element into the desired chunk.
unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
@@ -11938,11 +12654,11 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
DAG.getConstant(IdxIn128, dl, MVT::i32));
// Insert the changed part back into the bigger vector
- return Insert128BitVector(N0, V, IdxVal, DAG, dl);
+ return insert128BitVector(N0, V, IdxVal, DAG, dl);
}
assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
unsigned Opc;
if (VT == MVT::v8i16) {
@@ -12026,7 +12742,7 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
// Insert the 128-bit vector.
- return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
+ return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
}
if (OpVT == MVT::v1i64 &&
@@ -12042,7 +12758,7 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
// Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
// a simple subregister reference or explicit instructions to grab
// upper bits of a vector.
-static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
SDValue In = Op.getOperand(0);
@@ -12051,15 +12767,15 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
MVT ResVT = Op.getSimpleValueType();
MVT InVT = In.getSimpleValueType();
- if (Subtarget->hasFp256()) {
+ if (Subtarget.hasFp256()) {
if (ResVT.is128BitVector() &&
(InVT.is256BitVector() || InVT.is512BitVector()) &&
isa<ConstantSDNode>(Idx)) {
- return Extract128BitVector(In, IdxVal, DAG, dl);
+ return extract128BitVector(In, IdxVal, DAG, dl);
}
if (ResVT.is256BitVector() && InVT.is512BitVector() &&
isa<ConstantSDNode>(Idx)) {
- return Extract256BitVector(In, IdxVal, DAG, dl);
+ return extract256BitVector(In, IdxVal, DAG, dl);
}
}
return SDValue();
@@ -12068,9 +12784,9 @@ static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
// simple superregister reference or explicit instructions to insert
// the upper bits of a vector.
-static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- if (!Subtarget->hasAVX())
+ if (!Subtarget.hasAVX())
return SDValue();
SDLoc dl(Op);
@@ -12094,16 +12810,13 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
OpVT.is256BitVector() && SubVecVT.is128BitVector()) {
auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2));
if (Idx2 && Idx2->getZExtValue() == 0) {
- SDValue SubVec2 = Vec.getOperand(1);
- // If needed, look through a bitcast to get to the load.
- if (SubVec2.getNode() && SubVec2.getOpcode() == ISD::BITCAST)
- SubVec2 = SubVec2.getOperand(0);
-
+ // If needed, look through bitcasts to get to the load.
+ SDValue SubVec2 = peekThroughBitcasts(Vec.getOperand(1));
if (auto *FirstLd = dyn_cast<LoadSDNode>(SubVec2)) {
bool Fast;
unsigned Alignment = FirstLd->getAlignment();
unsigned AS = FirstLd->getAddressSpace();
- const X86TargetLowering *TLI = Subtarget->getTargetLowering();
+ const X86TargetLowering *TLI = Subtarget.getTargetLowering();
if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
OpVT, AS, Alignment, &Fast) && Fast) {
SDValue Ops[] = { SubVec2, SubVec };
@@ -12116,13 +12829,13 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
SubVecVT.is128BitVector())
- return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
+ return insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
- return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
+ return insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
if (OpVT.getVectorElementType() == MVT::i1)
- return Insert1BitVector(Op, DAG);
+ return insert1BitVector(Op, DAG, Subtarget);
return SDValue();
}
@@ -12139,17 +12852,13 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- unsigned char OpFlag = 0;
+ unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
unsigned WrapperKind = X86ISD::Wrapper;
CodeModel::Model M = DAG.getTarget().getCodeModel();
- if (Subtarget->isPICStyleRIPRel() &&
+ if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
WrapperKind = X86ISD::WrapperRIP;
- else if (Subtarget->isPICStyleGOT())
- OpFlag = X86II::MO_GOTOFF;
- else if (Subtarget->isPICStyleStubPIC())
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetConstantPool(
@@ -12171,17 +12880,13 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- unsigned char OpFlag = 0;
+ unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
unsigned WrapperKind = X86ISD::Wrapper;
CodeModel::Model M = DAG.getTarget().getCodeModel();
- if (Subtarget->isPICStyleRIPRel() &&
+ if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
WrapperKind = X86ISD::WrapperRIP;
- else if (Subtarget->isPICStyleGOT())
- OpFlag = X86II::MO_GOTOFF;
- else if (Subtarget->isPICStyleStubPIC())
- OpFlag = X86II::MO_PIC_BASE_OFFSET;
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
@@ -12203,22 +12908,14 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- unsigned char OpFlag = 0;
+ const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
unsigned WrapperKind = X86ISD::Wrapper;
CodeModel::Model M = DAG.getTarget().getCodeModel();
- if (Subtarget->isPICStyleRIPRel() &&
- (M == CodeModel::Small || M == CodeModel::Kernel)) {
- if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
- OpFlag = X86II::MO_GOTPCREL;
+ if (Subtarget.isPICStyleRIPRel() &&
+ (M == CodeModel::Small || M == CodeModel::Kernel))
WrapperKind = X86ISD::WrapperRIP;
- } else if (Subtarget->isPICStyleGOT()) {
- OpFlag = X86II::MO_GOT;
- } else if (Subtarget->isPICStyleStubPIC()) {
- OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
- } else if (Subtarget->isPICStyleStubNoDynamic()) {
- OpFlag = X86II::MO_DARWIN_NONLAZY;
- }
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
@@ -12227,8 +12924,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
- if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
- !Subtarget->is64Bit()) {
+ if (isPositionIndependent() && !Subtarget.is64Bit()) {
Result =
DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
@@ -12238,8 +12934,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
// load.
if (isGlobalStubReference(OpFlag))
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
return Result;
}
@@ -12248,7 +12943,7 @@ SDValue
X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// Create the TargetBlockAddressAddress node.
unsigned char OpFlags =
- Subtarget->ClassifyBlockAddressReference();
+ Subtarget.classifyBlockAddressReference();
CodeModel::Model M = DAG.getTarget().getCodeModel();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
@@ -12256,7 +12951,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
- if (Subtarget->isPICStyleRIPRel() &&
+ if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
Result = DAG.getNode(X86ISD::WrapperRIP, dl, PtrVT, Result);
else
@@ -12271,13 +12966,12 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
return Result;
}
-SDValue
-X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
- int64_t Offset, SelectionDAG &DAG) const {
+SDValue X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV,
+ const SDLoc &dl, int64_t Offset,
+ SelectionDAG &DAG) const {
// Create the TargetGlobalAddress node, folding in the constant
// offset if it is legal.
- unsigned char OpFlags =
- Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
+ unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
CodeModel::Model M = DAG.getTarget().getCodeModel();
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result;
@@ -12290,7 +12984,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, OpFlags);
}
- if (Subtarget->isPICStyleRIPRel() &&
+ if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
Result = DAG.getNode(X86ISD::WrapperRIP, dl, PtrVT, Result);
else
@@ -12306,8 +13000,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
// load.
if (isGlobalStubReference(OpFlags))
Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
// If there was a non-zero offset that we didn't fold, create an explicit
// addition for it.
@@ -12429,7 +13122,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
SDValue ThreadPointer =
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
- MachinePointerInfo(Ptr), false, false, false, 0);
+ MachinePointerInfo(Ptr));
unsigned char OperandFlags = 0;
// Most TLS accesses are not RIP relative, even on x86-64. One exception is
@@ -12464,8 +13157,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
}
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getGOT(DAG.getMachineFunction()),
- false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
// The address of the thread local variable is the add of the thread
@@ -12478,45 +13170,40 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- // Cygwin uses emutls.
- // FIXME: It may be EmulatedTLS-generic also for X86-Android.
- if (Subtarget->isTargetWindowsCygwin())
+ if (DAG.getTarget().Options.EmulatedTLS)
return LowerToTLSEmulatedModel(GA, DAG);
const GlobalValue *GV = GA->getGlobal();
auto PtrVT = getPointerTy(DAG.getDataLayout());
+ bool PositionIndependent = isPositionIndependent();
- if (Subtarget->isTargetELF()) {
- if (DAG.getTarget().Options.EmulatedTLS)
- return LowerToTLSEmulatedModel(GA, DAG);
+ if (Subtarget.isTargetELF()) {
TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
switch (model) {
case TLSModel::GeneralDynamic:
- if (Subtarget->is64Bit())
+ if (Subtarget.is64Bit())
return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
case TLSModel::LocalDynamic:
return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
- Subtarget->is64Bit());
+ Subtarget.is64Bit());
case TLSModel::InitialExec:
case TLSModel::LocalExec:
- return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget->is64Bit(),
- DAG.getTarget().getRelocationModel() ==
- Reloc::PIC_);
+ return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
+ PositionIndependent);
}
llvm_unreachable("Unknown TLS model.");
}
- if (Subtarget->isTargetDarwin()) {
+ if (Subtarget.isTargetDarwin()) {
// Darwin only has one model of TLS. Lower to that.
unsigned char OpFlag = 0;
- unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
+ unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
X86ISD::WrapperRIP : X86ISD::Wrapper;
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
- !Subtarget->is64Bit();
+ bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
if (PIC32)
OpFlag = X86II::MO_TLVP_PIC_BASE;
else
@@ -12540,9 +13227,9 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, DL, true), DL);
SDValue Args[] = { Chain, Offset };
Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
- Chain =
- DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
- DAG.getIntPtrConstant(0, DL, true), SDValue(), DL);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
+ DAG.getIntPtrConstant(0, DL, true),
+ Chain.getValue(1), DL);
// TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
@@ -12550,12 +13237,13 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
// And our return value (tls address) is in the standard call return value
// location.
- unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
}
- if (Subtarget->isTargetKnownWindowsMSVC() ||
- Subtarget->isTargetWindowsGNU()) {
+ if (Subtarget.isTargetKnownWindowsMSVC() ||
+ Subtarget.isTargetWindowsItanium() ||
+ Subtarget.isTargetWindowsGNU()) {
// Just use the implicit TLS architecture
// Need to generate someting similar to:
// mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
@@ -12573,21 +13261,20 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
// Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
// %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
// use its literal value of 0x2C.
- Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
+ Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
? Type::getInt8PtrTy(*DAG.getContext(),
256)
: Type::getInt32PtrTy(*DAG.getContext(),
257));
- SDValue TlsArray = Subtarget->is64Bit()
+ SDValue TlsArray = Subtarget.is64Bit()
? DAG.getIntPtrConstant(0x58, dl)
- : (Subtarget->isTargetWindowsGNU()
+ : (Subtarget.isTargetWindowsGNU()
? DAG.getIntPtrConstant(0x2C, dl)
: DAG.getExternalSymbol("_tls_array", PtrVT));
SDValue ThreadPointer =
- DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr), false,
- false, false, 0);
+ DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
SDValue res;
if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
@@ -12595,13 +13282,11 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
} else {
// Load the _tls_index variable
SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
- if (Subtarget->is64Bit())
+ if (Subtarget.is64Bit())
IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
- MachinePointerInfo(), MVT::i32, false, false,
- false, 0);
+ MachinePointerInfo(), MVT::i32);
else
- IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo(), false,
- false, false, 0);
+ IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
auto &DL = DAG.getDataLayout();
SDValue Scale =
@@ -12611,8 +13296,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
}
- res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo(), false, false,
- false, 0);
+ res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
// Get the offset of start of .tls section
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
@@ -12628,7 +13312,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
llvm_unreachable("TLS not implemented for this target.");
}
-/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
+/// Lower SRA_PARTS and friends, which return two i32 values
/// and take a 2 x i32 value to shift plus a shift amount.
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
@@ -12711,13 +13395,13 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
return Op;
if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
- Subtarget->is64Bit()) {
+ Subtarget.is64Bit()) {
return Op;
}
SDValue ValueToStore = Op.getOperand(0);
if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
- !Subtarget->is64Bit())
+ !Subtarget.is64Bit())
// Bitcasting to f64 here allows us to do a single 64-bit store from
// an SSE register, avoiding the store forwarding penalty that would come
// with two 32-bit stores.
@@ -12730,8 +13414,7 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
SDValue Chain = DAG.getStore(
DAG.getEntryNode(), dl, ValueToStore, StackSlot,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI), false,
- false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
}
@@ -12789,14 +13472,13 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
Ops, Op.getValueType(), MMO);
Result = DAG.getLoad(
Op.getValueType(), DL, Chain, StackSlot,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
- false, false, false, 0);
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
}
return Result;
}
-// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
+/// 64-bit unsigned integer to double expansion.
SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
SelectionDAG &DAG) const {
// This algorithm is not obvious. Here it is what we're trying to output:
@@ -12837,20 +13519,20 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
SDValue CLod0 =
DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 16);
+ /* Alignment = */ 16);
SDValue Unpck1 =
getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
SDValue CLod1 =
DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 16);
+ /* Alignment = */ 16);
SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
// TODO: Are there any fast-math-flags to propagate here?
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
SDValue Result;
- if (Subtarget->hasSSE3()) {
+ if (Subtarget.hasSSE3()) {
// FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
} else {
@@ -12865,7 +13547,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
DAG.getIntPtrConstant(0, dl));
}
-// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
+/// 32-bit unsigned integer to float expansion.
SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
@@ -12945,10 +13627,8 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
if (VecFloatVT != Op->getSimpleValueType(0))
return SDValue();
- unsigned NumElts = VecIntVT.getVectorNumElements();
assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
"Unsupported custom type");
- assert(NumElts <= 8 && "The size of the constant array must be fixed");
// In the #idef/#else code, we have in common:
// - The vector of constants:
@@ -12958,24 +13638,12 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
// -- v >> 16
// Create the splat vector for 0x4b000000.
- SDValue CstLow = DAG.getConstant(0x4b000000, DL, MVT::i32);
- SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
- CstLow, CstLow, CstLow, CstLow};
- SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
- makeArrayRef(&CstLowArray[0], NumElts));
+ SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
// Create the splat vector for 0x53000000.
- SDValue CstHigh = DAG.getConstant(0x53000000, DL, MVT::i32);
- SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
- CstHigh, CstHigh, CstHigh, CstHigh};
- SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
- makeArrayRef(&CstHighArray[0], NumElts));
+ SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
// Create the right shift.
- SDValue CstShift = DAG.getConstant(16, DL, MVT::i32);
- SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
- CstShift, CstShift, CstShift, CstShift};
- SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
- makeArrayRef(&CstShiftArray[0], NumElts));
+ SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
SDValue Low, High;
@@ -12997,9 +13665,7 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
} else {
- SDValue CstMask = DAG.getConstant(0xffff, DL, MVT::i32);
- SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
- CstMask, CstMask, CstMask);
+ SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
// uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
@@ -13009,12 +13675,8 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
}
// Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
- SDValue CstFAdd = DAG.getConstantFP(
- APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), DL, MVT::f32);
- SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
- CstFAdd, CstFAdd, CstFAdd, CstFAdd};
- SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
- makeArrayRef(&CstFAddArray[0], NumElts));
+ SDValue VecCstFAdd = DAG.getConstantFP(
+ APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), DL, VecFloatVT);
// float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
@@ -13045,10 +13707,10 @@ SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
}
case MVT::v4i32:
case MVT::v8i32:
- return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
+ return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
case MVT::v16i8:
case MVT::v16i16:
- assert(Subtarget->hasAVX512());
+ assert(Subtarget.hasAVX512());
return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, N0));
}
@@ -13072,8 +13734,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
MVT SrcVT = N0.getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
- if (Subtarget->hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
- (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget->is64Bit()))) {
+ if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
+ (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
// Conversions from unsigned i32 to f32/f64 are legal,
// using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
return Op;
@@ -13083,34 +13745,30 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
return LowerUINT_TO_FP_i64(Op, DAG);
if (SrcVT == MVT::i32 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i32(Op, DAG);
- if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
+ if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
return SDValue();
// Make a 64-bit buffer, and use it to build an FILD.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
if (SrcVT == MVT::i32) {
- SDValue WordOff = DAG.getConstant(4, dl, PtrVT);
- SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, WordOff);
+ SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
- StackSlot, MachinePointerInfo(),
- false, false, 0);
+ StackSlot, MachinePointerInfo());
SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
- OffsetSlot, MachinePointerInfo(),
- false, false, 0);
+ OffsetSlot, MachinePointerInfo());
SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
return Fild;
}
assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
SDValue ValueToStore = Op.getOperand(0);
- if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget->is64Bit())
+ if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
// Bitcasting to f64 here allows us to do a single 64-bit store from
// an SSE register, avoiding the store forwarding penalty that would come
// with two 32-bit stores.
ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore,
- StackSlot, MachinePointerInfo(),
- false, false, 0);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
+ MachinePointerInfo());
// For i64 source, we need to add the appropriate power of 2 if the input
// was negative. This is the same as the optimization in
// DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
@@ -13149,7 +13807,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
SDValue Fudge = DAG.getExtLoad(
ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
- false, false, false, 4);
+ /* Alignment = */ 4);
// Extend everything to 80 bits to force it to be done on x87.
// TODO: Are there any fast-math-flags to propagate here?
SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
@@ -13186,10 +13844,10 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
// used for the 32-bit subtarget, but also for f80 on a 64-bit target.
bool UnsignedFixup = !IsSigned &&
DstTy == MVT::i64 &&
- (!Subtarget->is64Bit() ||
+ (!Subtarget.is64Bit() ||
!isScalarFPTypeInSSEReg(TheVT));
- if (!IsSigned && DstTy != MVT::i64 && !Subtarget->hasAVX512()) {
+ if (!IsSigned && DstTy != MVT::i64 && !Subtarget.hasAVX512()) {
// Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
// The low 32 bits of the fist result will have the correct uint32 result.
assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
@@ -13204,7 +13862,7 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
if (DstTy == MVT::i32 &&
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
- if (Subtarget->is64Bit() &&
+ if (Subtarget.is64Bit() &&
DstTy == MVT::i64 &&
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
@@ -13280,8 +13938,7 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
if (isScalarFPTypeInSSEReg(TheVT)) {
assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, DL, Value, StackSlot,
- MachinePointerInfo::getFixedStack(MF, SSFI), false,
- false, 0);
+ MachinePointerInfo::getFixedStack(MF, SSFI));
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
SDValue Ops[] = {
Chain, StackSlot, DAG.getValueType(TheVT)
@@ -13309,18 +13966,15 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
FistOps, DstTy, MMO);
- SDValue Low32 = DAG.getLoad(MVT::i32, DL, FIST, StackSlot,
- MachinePointerInfo(),
- false, false, false, 0);
- SDValue HighAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackSlot,
- DAG.getConstant(4, DL, PtrVT));
+ SDValue Low32 =
+ DAG.getLoad(MVT::i32, DL, FIST, StackSlot, MachinePointerInfo());
+ SDValue HighAddr = DAG.getMemBasePlusOffset(StackSlot, 4, DL);
- SDValue High32 = DAG.getLoad(MVT::i32, DL, FIST, HighAddr,
- MachinePointerInfo(),
- false, false, false, 0);
+ SDValue High32 =
+ DAG.getLoad(MVT::i32, DL, FIST, HighAddr, MachinePointerInfo());
High32 = DAG.getNode(ISD::XOR, DL, MVT::i32, High32, Adjust);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
// Join High32 and Low32 into a 64-bit result.
// (High32 << 32) | Low32
Low32 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Low32);
@@ -13347,7 +14001,7 @@ X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
@@ -13374,7 +14028,7 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
return SDValue();
- if (Subtarget->hasInt256())
+ if (Subtarget.hasInt256())
return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
@@ -13393,41 +14047,46 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
- const X86Subtarget *Subtarget, SelectionDAG &DAG) {
+ const X86Subtarget &Subtarget, SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
SDLoc DL(Op);
unsigned int NumElts = VT.getVectorNumElements();
- if (NumElts != 8 && NumElts != 16 && !Subtarget->hasBWI())
+ if (NumElts != 8 && NumElts != 16 && !Subtarget.hasBWI())
return SDValue();
if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
assert(InVT.getVectorElementType() == MVT::i1);
- MVT ExtVT = NumElts == 8 ? MVT::v8i64 : MVT::v16i32;
+
+ // Extend VT if the target is 256 or 128bit vector and VLX is not supported.
+ MVT ExtVT = VT;
+ if (!VT.is512BitVector() && !Subtarget.hasVLX())
+ ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
+
SDValue One =
DAG.getConstant(APInt(ExtVT.getScalarSizeInBits(), 1), DL, ExtVT);
SDValue Zero =
DAG.getConstant(APInt::getNullValue(ExtVT.getScalarSizeInBits()), DL, ExtVT);
- SDValue V = DAG.getNode(ISD::VSELECT, DL, ExtVT, In, One, Zero);
- if (VT.is512BitVector())
- return V;
- return DAG.getNode(X86ISD::VTRUNC, DL, VT, V);
+ SDValue SelectedVal = DAG.getNode(ISD::VSELECT, DL, ExtVT, In, One, Zero);
+ if (VT == ExtVT)
+ return SelectedVal;
+ return DAG.getNode(X86ISD::VTRUNC, DL, VT, SelectedVal);
}
-static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- if (Subtarget->hasFp256())
+ if (Subtarget.hasFp256())
if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
return Res;
return SDValue();
}
-static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
@@ -13437,7 +14096,7 @@ static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
return LowerZERO_EXTEND_AVX512(Op, Subtarget, DAG);
- if (Subtarget->hasFp256())
+ if (Subtarget.hasFp256())
if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
return Res;
@@ -13447,50 +14106,32 @@ static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
}
static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT InVT = In.getSimpleValueType();
- assert(VT.getVectorElementType() == MVT::i1 && "Unexected vector type.");
+ assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
- // Shift LSB to MSB and use VPMOVB2M - SKX.
+ // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
- if ((InVT.is512BitVector() && InVT.getScalarSizeInBits() <= 16 &&
- Subtarget->hasBWI()) || // legal, will go to VPMOVB2M, VPMOVW2M
- ((InVT.is256BitVector() || InVT.is128BitVector()) &&
- InVT.getScalarSizeInBits() <= 16 && Subtarget->hasBWI() &&
- Subtarget->hasVLX())) { // legal, will go to VPMOVB2M, VPMOVW2M
- // Shift packed bytes not supported natively, bitcast to dword
- MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
- SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, ExtVT,
- DAG.getBitcast(ExtVT, In),
- DAG.getConstant(ShiftInx, DL, ExtVT));
- ShiftNode = DAG.getBitcast(InVT, ShiftNode);
- return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
- }
- if ((InVT.is512BitVector() && InVT.getScalarSizeInBits() >= 32 &&
- Subtarget->hasDQI()) || // legal, will go to VPMOVD2M, VPMOVQ2M
- ((InVT.is256BitVector() || InVT.is128BitVector()) &&
- InVT.getScalarSizeInBits() >= 32 && Subtarget->hasDQI() &&
- Subtarget->hasVLX())) { // legal, will go to VPMOVD2M, VPMOVQ2M
-
- SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, InVT, In,
- DAG.getConstant(ShiftInx, DL, InVT));
- return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
- }
-
- // Shift LSB to MSB, extend if necessary and use TESTM.
- unsigned NumElts = InVT.getVectorNumElements();
- if (InVT.getSizeInBits() < 512 &&
- (InVT.getScalarType() == MVT::i8 || InVT.getScalarType() == MVT::i16 ||
- !Subtarget->hasVLX())) {
- assert((NumElts == 8 || NumElts == 16) && "Unexected vector type.");
-
- // TESTD/Q should be used (if BW supported we use CVT2MASK above),
- // so vector should be extended to packed dword/qword.
+ if (InVT.getScalarSizeInBits() <= 16) {
+ if (Subtarget.hasBWI()) {
+ // legal, will go to VPMOVB2M, VPMOVW2M
+ // Shift packed bytes not supported natively, bitcast to word
+ MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
+ SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, ExtVT,
+ DAG.getBitcast(ExtVT, In),
+ DAG.getConstant(ShiftInx, DL, ExtVT));
+ ShiftNode = DAG.getBitcast(InVT, ShiftNode);
+ return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
+ }
+ // Use TESTD/Q, extended vector to packed dword/qword.
+ assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
+ "Unexpected vector type.");
+ unsigned NumElts = InVT.getVectorNumElements();
MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
InVT = ExtVT;
@@ -13523,16 +14164,16 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
return LowerTruncateVecI1(Op, DAG, Subtarget);
// vpmovqb/w/d, vpmovdb/w, vpmovwb
- if (Subtarget->hasAVX512()) {
+ if (Subtarget.hasAVX512()) {
// word to byte only under BWI
- if (InVT == MVT::v16i16 && !Subtarget->hasBWI()) // v16i16 -> v16i8
+ if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) // v16i16 -> v16i8
return DAG.getNode(X86ISD::VTRUNC, DL, VT,
DAG.getNode(X86ISD::VSEXT, DL, MVT::v16i32, In));
return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
}
if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
// On AVX2, v4i64 -> v4i32 becomes VPERMD.
- if (Subtarget->hasInt256()) {
+ if (Subtarget.hasInt256()) {
static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
In = DAG.getBitcast(MVT::v8i32, In);
In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
@@ -13553,7 +14194,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
// On AVX2, v8i32 -> v8i16 becomed PSHUFB.
- if (Subtarget->hasInt256()) {
+ if (Subtarget.hasInt256()) {
In = DAG.getBitcast(MVT::v32i8, In);
SmallVector<SDValue,32> pshufbMask;
@@ -13569,13 +14210,13 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
for (unsigned j = 0; j < 8; ++j)
pshufbMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
}
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
+ SDValue BV = DAG.getBuildVector(MVT::v32i8, DL, pshufbMask);
In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
In = DAG.getBitcast(MVT::v4i64, In);
static const int ShufMask[] = {0, 2, -1, -1};
In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
- &ShufMask[0]);
+ ShufMask);
In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(0, DL));
return DAG.getBitcast(VT, In);
@@ -13611,7 +14252,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
if (!VT.is128BitVector() || !InVT.is256BitVector())
return SDValue();
- assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
+ assert(Subtarget.hasFp256() && "256-bit vector without AVX!");
unsigned NumElems = VT.getVectorNumElements();
MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
@@ -13621,7 +14262,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
for (unsigned i = 0; i != NumElems; ++i)
MaskVec[i] = i * 2;
SDValue V = DAG.getVectorShuffle(NVT, DL, DAG.getBitcast(NVT, In),
- DAG.getUNDEF(NVT), &MaskVec[0]);
+ DAG.getUNDEF(NVT), MaskVec);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
DAG.getIntPtrConstant(0, DL));
}
@@ -13639,9 +14280,8 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
if (StackSlot.getNode())
// Load the result.
- return DAG.getLoad(Op.getValueType(), SDLoc(Op),
- FIST, StackSlot, MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot,
+ MachinePointerInfo());
// The node is the result.
return FIST;
@@ -13658,9 +14298,8 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
if (StackSlot.getNode())
// Load the result.
- return DAG.getLoad(Op.getValueType(), SDLoc(Op),
- FIST, StackSlot, MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot,
+ MachinePointerInfo());
// The node is the result.
return FIST;
@@ -13736,10 +14375,9 @@ static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
- SDValue Mask =
- DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, Alignment);
+ SDValue Mask = DAG.getLoad(
+ LogicVT, dl, DAG.getEntryNode(), CPIdx,
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment);
SDValue Op0 = Op.getOperand(0);
bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
@@ -13807,7 +14445,7 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue Mask1 =
DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 16);
+ /* Alignment = */ 16);
if (!IsF128)
Op1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Op1);
SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Op1, Mask1);
@@ -13833,7 +14471,7 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue Val =
DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
- false, false, false, 16);
+ /* Alignment = */ 16);
// If the magnitude operand wasn't a constant, we need to AND out the sign.
if (!isa<ConstantFPSDNode>(Op0)) {
if (!IsF128)
@@ -13852,18 +14490,25 @@ static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
- // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
- SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
- DAG.getConstant(1, dl, VT));
- return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, dl, VT));
+ MVT OpVT = N0.getSimpleValueType();
+ assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
+ "Unexpected type for FGETSIGN");
+
+ // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
+ MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
+ SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
+ Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
+ Res = DAG.getZExtOrTrunc(Res, dl, VT);
+ Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
+ return Res;
}
// Check whether an OR'd tree is PTEST-able.
-static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
- if (!Subtarget->hasSSE41())
+ if (!Subtarget.hasSSE41())
return SDValue();
if (!Op->hasOneUse())
@@ -13969,9 +14614,27 @@ static bool hasNonFlagsUse(SDValue Op) {
return false;
}
+// Emit KTEST instruction for bit vectors on AVX-512
+static SDValue EmitKTEST(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ if (Op.getOpcode() == ISD::BITCAST) {
+ auto hasKTEST = [&](MVT VT) {
+ unsigned SizeInBits = VT.getSizeInBits();
+ return (Subtarget.hasDQI() && (SizeInBits == 8 || SizeInBits == 16)) ||
+ (Subtarget.hasBWI() && (SizeInBits == 32 || SizeInBits == 64));
+ };
+ SDValue Op0 = Op.getOperand(0);
+ MVT Op0VT = Op0.getValueType().getSimpleVT();
+ if (Op0VT.isVector() && Op0VT.getVectorElementType() == MVT::i1 &&
+ hasKTEST(Op0VT))
+ return DAG.getNode(X86ISD::KTEST, SDLoc(Op), Op0VT, Op0, Op0);
+ }
+ return SDValue();
+}
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
-SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
+SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const {
if (Op.getValueType() == MVT::i1) {
SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
@@ -14014,10 +14677,10 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
// we prove that the arithmetic won't overflow, we can't use OF or CF.
if (Op.getResNo() != 0 || NeedOF || NeedCF) {
+ // Emit KTEST for bit vectors
+ if (auto Node = EmitKTEST(Op, DAG, Subtarget))
+ return Node;
// Emit a CMP with 0, which is the TEST pattern.
- //if (Op.getValueType() == MVT::i1)
- // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
- // DAG.getConstant(0, MVT::i1));
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
}
@@ -14071,14 +14734,14 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
if (ConstantSDNode *C =
dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
// An add of one will be selected as an INC.
- if (C->isOne() && !Subtarget->slowIncDec()) {
+ if (C->isOne() && !Subtarget.slowIncDec()) {
Opcode = X86ISD::INC;
NumOperands = 1;
break;
}
// An add of negative one (subtract of one) will be selected as a DEC.
- if (C->isAllOnesValue() && !Subtarget->slowIncDec()) {
+ if (C->isAllOnesValue() && !Subtarget.slowIncDec()) {
Opcode = X86ISD::DEC;
NumOperands = 1;
break;
@@ -14106,18 +14769,26 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
: APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
if (!Mask.isSignedIntN(32)) // Avoid large immediates.
break;
- SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
- DAG.getConstant(Mask, dl, VT));
- DAG.ReplaceAllUsesWith(Op, New);
- Op = New;
+ Op = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
+ DAG.getConstant(Mask, dl, VT));
}
break;
case ISD::AND:
- // If the primary and result isn't used, don't bother using X86ISD::AND,
+ // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
// because a TEST instruction will be better.
- if (!hasNonFlagsUse(Op))
- break;
+ if (!hasNonFlagsUse(Op)) {
+ SDValue Op0 = ArithOp->getOperand(0);
+ SDValue Op1 = ArithOp->getOperand(1);
+ EVT VT = ArithOp.getValueType();
+ bool isAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
+ bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
+
+ // But if we can combine this into an ANDN operation, then create an AND
+ // now and allow it to be pattern matched into an ANDN.
+ if (!Subtarget.hasBMI() || !isAndn || !isLegalAndnType)
+ break;
+ }
// FALL THROUGH
case ISD::SUB:
case ISD::OR:
@@ -14137,8 +14808,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
case ISD::AND: Opcode = X86ISD::AND; break;
case ISD::OR: {
if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
- SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
- if (EFLAGS.getNode())
+ if (SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG))
return EFLAGS;
}
Opcode = X86ISD::OR;
@@ -14190,11 +14860,15 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
}
}
- if (Opcode == 0)
+ if (Opcode == 0) {
+ // Emit KTEST for bit vectors
+ if (auto Node = EmitKTEST(Op, DAG, Subtarget))
+ return Node;
+
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
-
+ }
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
@@ -14206,7 +14880,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent.
SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
if (isNullConstant(Op1))
return EmitTest(Op0, X86CC, dl, DAG);
@@ -14215,13 +14889,12 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
- // Do the comparison at i32 if it's smaller, besides the Atom case.
- // This avoids subregister aliasing issues. Keep the smaller reference
- // if we're optimizing for size, however, as that'll allow better folding
- // of memory operations.
- if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
+ // Only promote the compare up to I32 if it is a 16 bit operation
+ // with an immediate. 16 bit immediates are to be avoided.
+ if ((Op0.getValueType() == MVT::i16 &&
+ (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
!DAG.getMachineFunction().getFunction()->optForMinSize() &&
- !Subtarget->isAtom()) {
+ !Subtarget.isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
@@ -14241,7 +14914,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
SelectionDAG &DAG) const {
// If the subtarget does not support the FUCOMI instruction, floating-point
// comparisons have to be converted.
- if (Subtarget->hasCMov() ||
+ if (Subtarget.hasCMov() ||
Cmp.getOpcode() != X86ISD::CMP ||
!Cmp.getOperand(0).getValueType().isFloatingPoint() ||
!Cmp.getOperand(1).getValueType().isFloatingPoint())
@@ -14259,7 +14932,7 @@ SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
// Some 64-bit targets lack SAHF support, but they do support FCOMI.
- assert(Subtarget->hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
+ assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
}
@@ -14279,10 +14952,10 @@ SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
// instructions: convert to single, rsqrtss, convert back to double, refine
// (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
// along with FMA, this could be a throughput win.
- if (VT == MVT::f32 && Subtarget->hasSSE1())
+ if (VT == MVT::f32 && Subtarget.hasSSE1())
RecipOp = "sqrtf";
- else if ((VT == MVT::v4f32 && Subtarget->hasSSE1()) ||
- (VT == MVT::v8f32 && Subtarget->hasAVX()))
+ else if ((VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
+ (VT == MVT::v8f32 && Subtarget.hasAVX()))
RecipOp = "vec-sqrtf";
else
return SDValue();
@@ -14311,10 +14984,10 @@ SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
// 15 instructions: convert to single, rcpss, convert back to double, refine
// (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
// along with FMA, this could be a throughput win.
- if (VT == MVT::f32 && Subtarget->hasSSE1())
+ if (VT == MVT::f32 && Subtarget.hasSSE1())
RecipOp = "divf";
- else if ((VT == MVT::v4f32 && Subtarget->hasSSE1()) ||
- (VT == MVT::v8f32 && Subtarget->hasAVX()))
+ else if ((VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
+ (VT == MVT::v8f32 && Subtarget.hasAVX()))
RecipOp = "vec-divf";
else
return SDValue();
@@ -14337,10 +15010,9 @@ unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
return 2;
}
-/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
-/// if it's possible.
+/// Result of 'and' is compared against zero. Change to a BT node if possible.
SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
SDValue Op0 = And.getOperand(0);
SDValue Op1 = And.getOperand(1);
if (Op0.getOpcode() == ISD::TRUNCATE)
@@ -14353,19 +15025,19 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
std::swap(Op0, Op1);
if (Op0.getOpcode() == ISD::SHL) {
if (isOneConstant(Op0.getOperand(0))) {
- // If we looked past a truncate, check that it's only truncating away
- // known zeros.
- unsigned BitWidth = Op0.getValueSizeInBits();
- unsigned AndBitWidth = And.getValueSizeInBits();
- if (BitWidth > AndBitWidth) {
- APInt Zeros, Ones;
- DAG.computeKnownBits(Op0, Zeros, Ones);
- if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
- return SDValue();
- }
- LHS = Op1;
- RHS = Op0.getOperand(1);
+ // If we looked past a truncate, check that it's only truncating away
+ // known zeros.
+ unsigned BitWidth = Op0.getValueSizeInBits();
+ unsigned AndBitWidth = And.getValueSizeInBits();
+ if (BitWidth > AndBitWidth) {
+ APInt Zeros, Ones;
+ DAG.computeKnownBits(Op0, Zeros, Ones);
+ if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
+ return SDValue();
}
+ LHS = Op1;
+ RHS = Op0.getOperand(1);
+ }
} else if (Op1.getOpcode() == ISD::Constant) {
ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
uint64_t AndRHSVal = AndRHS->getZExtValue();
@@ -14407,8 +15079,8 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
return SDValue();
}
-/// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
-/// mask CMPs.
+/// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
+/// CMPs.
static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
SDValue &Op1) {
unsigned SSECC;
@@ -14452,8 +15124,8 @@ static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
return SSECC;
}
-// Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
-// ones, and then concatenate the result back.
+/// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
+/// concatenate the result back.
static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
@@ -14466,13 +15138,13 @@ static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
- SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
- SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
+ SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
- SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
- SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
+ SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
// Issue the operation on the smaller types and concatenate the result back
MVT EltVT = VT.getVectorElementType();
@@ -14525,16 +15197,15 @@ static SDValue LowerBoolVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
}
}
-static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
+
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
- assert(Op0.getSimpleValueType().getVectorElementType().getSizeInBits() >= 8 &&
- Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
+ assert(VT.getVectorElementType() == MVT::i1 &&
"Cannot set masked compare for this operation");
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
@@ -14568,8 +15239,8 @@ static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
/// \brief Try to turn a VSETULT into a VSETULE by modifying its second
/// operand \p Op1. If non-trivial (for example because it's not constant)
/// return an empty value.
-static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
-{
+static SDValue ChangeVSETULTtoVSETULE(const SDLoc &dl, SDValue Op1,
+ SelectionDAG &DAG) {
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
if (!BV)
return SDValue();
@@ -14592,10 +15263,10 @@ static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT));
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
+ return DAG.getBuildVector(VT, dl, ULTOp1);
}
-static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -14611,32 +15282,59 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
assert(EltVT == MVT::f32 || EltVT == MVT::f64);
#endif
- unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
- unsigned Opc = X86ISD::CMPP;
- if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
+ unsigned Opc;
+ if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
assert(VT.getVectorNumElements() <= 16);
Opc = X86ISD::CMPM;
- }
- // In the two special cases we can't handle, emit two comparisons.
+ } else {
+ Opc = X86ISD::CMPP;
+ // The SSE/AVX packed FP comparison nodes are defined with a
+ // floating-point vector result that matches the operand type. This allows
+ // them to work with an SSE1 target (integer vector types are not legal).
+ VT = Op0.getSimpleValueType();
+ }
+
+ // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
+ // emit two comparisons and a logic op to tie them together.
+ // TODO: This can be avoided if Intel (and only Intel as of 2016) AVX is
+ // available.
+ SDValue Cmp;
+ unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
if (SSECC == 8) {
+ // LLVM predicate is SETUEQ or SETONE.
unsigned CC0, CC1;
unsigned CombineOpc;
if (SetCCOpcode == ISD::SETUEQ) {
- CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
+ CC0 = 3; // UNORD
+ CC1 = 0; // EQ
+ CombineOpc = Opc == X86ISD::CMPP ? static_cast<unsigned>(X86ISD::FOR) :
+ static_cast<unsigned>(ISD::OR);
} else {
assert(SetCCOpcode == ISD::SETONE);
- CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
+ CC0 = 7; // ORD
+ CC1 = 4; // NEQ
+ CombineOpc = Opc == X86ISD::CMPP ? static_cast<unsigned>(X86ISD::FAND) :
+ static_cast<unsigned>(ISD::AND);
}
SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC0, dl, MVT::i8));
SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC1, dl, MVT::i8));
- return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
+ Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
+ } else {
+ // Handle all other FP comparisons here.
+ Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
+ DAG.getConstant(SSECC, dl, MVT::i8));
}
- // Handle all other FP comparisons here.
- return DAG.getNode(Opc, dl, VT, Op0, Op1,
- DAG.getConstant(SSECC, dl, MVT::i8));
+
+ // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
+ // result type of SETCC. The bitcast is expected to be optimized away
+ // during combining/isel.
+ if (Opc == X86ISD::CMPP)
+ Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
+
+ return Cmp;
}
MVT VTOp0 = Op0.getSimpleValueType();
@@ -14665,38 +15363,38 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// The non-AVX512 code below works under the assumption that source and
// destination types are the same.
- assert((Subtarget->hasAVX512() || (VT == VTOp0)) &&
+ assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
"Value types for source and destination must be the same!");
// Break 256-bit integer vector compare into smaller ones.
- if (VT.is256BitVector() && !Subtarget->hasInt256())
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntVSETCC(Op, DAG);
+ // Operands are boolean (vectors of i1)
MVT OpVT = Op1.getSimpleValueType();
if (OpVT.getVectorElementType() == MVT::i1)
return LowerBoolVSETCC_AVX512(Op, DAG);
- bool MaskResult = (VT.getVectorElementType() == MVT::i1);
- if (Subtarget->hasAVX512()) {
- if (Op1.getSimpleValueType().is512BitVector() ||
- (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
- (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
- return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
-
+ // The result is boolean, but operands are int/float
+ if (VT.getVectorElementType() == MVT::i1) {
// In AVX-512 architecture setcc returns mask with i1 elements,
// But there is no compare instruction for i8 and i16 elements in KNL.
- // We are not talking about 512-bit operands in this case, these
- // types are illegal.
- if (MaskResult &&
- (OpVT.getVectorElementType().getSizeInBits() < 32 &&
- OpVT.getVectorElementType().getSizeInBits() >= 8))
- return DAG.getNode(ISD::TRUNCATE, dl, VT,
- DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
+ // In this case use SSE compare
+ bool UseAVX512Inst =
+ (OpVT.is512BitVector() ||
+ OpVT.getVectorElementType().getSizeInBits() >= 32 ||
+ (Subtarget.hasBWI() && Subtarget.hasVLX()));
+
+ if (UseAVX512Inst)
+ return LowerIntVSETCC_AVX512(Op, DAG);
+
+ return DAG.getNode(ISD::TRUNCATE, dl, VT,
+ DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
}
// Lower using XOP integer comparisons.
if ((VT == MVT::v16i8 || VT == MVT::v8i16 ||
- VT == MVT::v4i32 || VT == MVT::v2i64) && Subtarget->hasXOP()) {
+ VT == MVT::v4i32 || VT == MVT::v2i64) && Subtarget.hasXOP()) {
// Translate compare code to XOP PCOM compare mode.
unsigned CmpMode = 0;
switch (SetCCOpcode) {
@@ -14748,8 +15446,8 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// Special case: Use min/max operations for SETULE/SETUGE
MVT VET = VT.getVectorElementType();
bool hasMinMax =
- (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
- || (Subtarget->hasSSE2() && (VET == MVT::i8));
+ (Subtarget.hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
+ || (Subtarget.hasSSE2() && (VET == MVT::i8));
if (hasMinMax) {
switch (SetCCOpcode) {
@@ -14761,7 +15459,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
}
- bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
+ bool hasSubus = Subtarget.hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
if (!MinMax && hasSubus) {
// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
// Op0 u<= Op1:
@@ -14775,10 +15473,9 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// beneficial because the constant in the register is no longer
// destructed as the destination so it can be hoisted out of a loop.
// Only do this pre-AVX since vpcmp* is no longer destructive.
- if (Subtarget->hasAVX())
+ if (Subtarget.hasAVX())
break;
- SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
- if (ULEOp1.getNode()) {
+ if (SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG)) {
Op1 = ULEOp1;
Subus = true; Invert = false; Swap = false;
}
@@ -14801,8 +15498,8 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
// Check that the operation in question is available (most are plain SSE2,
// but PCMPGTQ and PCMPEQQ have different requirements).
if (VT == MVT::v2i64) {
- if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
- assert(Subtarget->hasSSE2() && "Don't know how to lower!");
+ if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
+ assert(Subtarget.hasSSE2() && "Don't know how to lower!");
// First cast everything to the right type.
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
@@ -14817,8 +15514,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
} else {
SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32);
SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32);
- SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
- Sign, Zero, Sign, Zero);
+ SB = DAG.getBuildVector(MVT::v4i32, dl, {Sign, Zero, Sign, Zero});
}
Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
@@ -14843,10 +15539,10 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getBitcast(VT, Result);
}
- if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
+ if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
// If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
// pcmpeqd + pshufd + pand.
- assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
+ assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
// First cast everything to the right type.
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
@@ -14899,7 +15595,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
- assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
+ assert(((!Subtarget.hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
&& "SetCC type must be 8-bit or 1-bit integer");
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -14914,8 +15610,11 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
isNullConstant(Op1) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
if (SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG)) {
- if (VT == MVT::i1)
+ if (VT == MVT::i1) {
+ NewSetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, NewSetCC,
+ DAG.getValueType(MVT::i1));
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
+ }
return NewSetCC;
}
}
@@ -14937,16 +15636,23 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(CCode, dl, MVT::i8),
Op0.getOperand(1));
- if (VT == MVT::i1)
+ if (VT == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
+ }
return SetCC;
}
}
- if ((Op0.getValueType() == MVT::i1) && isOneConstant(Op1) &&
- (CC == ISD::SETEQ || CC == ISD::SETNE)) {
-
- ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
- return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC);
+ if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+ if (isOneConstant(Op1)) {
+ ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
+ return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC);
+ }
+ if (!isNullConstant(Op1)) {
+ SDValue Xor = DAG.getNode(ISD::XOR, dl, MVT::i1, Op0, Op1);
+ return DAG.getSetCC(dl, VT, Xor, DAG.getConstant(0, dl, MVT::i1), CC);
+ }
}
bool isFP = Op1.getSimpleValueType().isFloatingPoint();
@@ -14958,8 +15664,11 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(X86CC, dl, MVT::i8), EFLAGS);
- if (VT == MVT::i1)
+ if (VT == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
+ }
return SetCC;
}
@@ -14978,12 +15687,15 @@ SDValue X86TargetLowering::LowerSETCCE(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
- if (Op.getSimpleValueType() == MVT::i1)
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+ if (Op.getSimpleValueType() == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+ }
return SetCC;
}
-// isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
+/// Return true if opcode is a X86 logical comparison.
static bool isX86LogicalCmp(SDValue Op) {
unsigned Opc = Op.getNode()->getOpcode();
if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
@@ -15009,14 +15721,23 @@ static bool isX86LogicalCmp(SDValue Op) {
return false;
}
-static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
+/// Returns the "condition" node, that may be wrapped with "truncate".
+/// Like this: (i1 (trunc (i8 X86ISD::SETCC))).
+static SDValue getCondAfterTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
if (V.getOpcode() != ISD::TRUNCATE)
- return false;
+ return V;
SDValue VOp0 = V.getOperand(0);
+ if (VOp0.getOpcode() == ISD::AssertZext &&
+ V.getValueSizeInBits() ==
+ cast<VTSDNode>(VOp0.getOperand(1))->getVT().getSizeInBits())
+ return VOp0.getOperand(0);
+
unsigned InBits = VOp0.getValueSizeInBits();
unsigned Bits = V.getValueSizeInBits();
- return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
+ if (DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)))
+ return V.getOperand(0);
+ return V;
}
SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
@@ -15032,15 +15753,15 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// are available or VBLENDV if AVX is available.
// Otherwise FP cmovs get lowered into a less efficient branch sequence later.
if (Cond.getOpcode() == ISD::SETCC &&
- ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
- (Subtarget->hasSSE1() && VT == MVT::f32)) &&
+ ((Subtarget.hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
+ (Subtarget.hasSSE1() && VT == MVT::f32)) &&
VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
int SSECC = translateX86FSETCC(
cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
if (SSECC != 8) {
- if (Subtarget->hasAVX512()) {
+ if (Subtarget.hasAVX512()) {
SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
DAG.getConstant(SSECC, DL, MVT::i8));
return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
@@ -15062,7 +15783,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// instructions as the AND/ANDN/OR sequence due to register moves, so
// don't bother.
- if (Subtarget->hasAVX() &&
+ if (Subtarget.hasAVX() &&
!isa<ConstantFPSDNode>(Op1) && !isa<ConstantFPSDNode>(Op2)) {
// Convert to vectors, do a VSELECT, and convert back to scalar.
@@ -15122,8 +15843,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
if (Cond.getOpcode() == ISD::SETCC) {
- SDValue NewCond = LowerSETCC(Cond, DAG);
- if (NewCond.getNode())
+ if (SDValue NewCond = LowerSETCC(Cond, DAG))
Cond = NewCond;
}
@@ -15240,8 +15960,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if (addTest) {
// Look past the truncate if the high bits are known zero.
- if (isTruncWithZeroHighBitsInput(Cond, DAG))
- Cond = Cond.getOperand(0);
+ Cond = getCondAfterTruncWithZeroHighBitsInput(Cond, DAG);
// We know the result of AND is compared against zero. Try to match
// it to BT.
@@ -15302,7 +16021,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
@@ -15313,22 +16032,22 @@ static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
// SKX processor
if ((InVTElt == MVT::i1) &&
- (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
+ (((Subtarget.hasBWI() && Subtarget.hasVLX() &&
VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
- ((Subtarget->hasBWI() && VT.is512BitVector() &&
+ ((Subtarget.hasBWI() && VT.is512BitVector() &&
VTElt.getSizeInBits() <= 16)) ||
- ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
+ ((Subtarget.hasDQI() && Subtarget.hasVLX() &&
VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
- ((Subtarget->hasDQI() && VT.is512BitVector() &&
+ ((Subtarget.hasDQI() && VT.is512BitVector() &&
VTElt.getSizeInBits() >= 32))))
return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
unsigned int NumElts = VT.getVectorNumElements();
- if (NumElts != 8 && NumElts != 16 && !Subtarget->hasBWI())
+ if (NumElts != 8 && NumElts != 16 && !Subtarget.hasBWI())
return SDValue();
if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
@@ -15352,25 +16071,35 @@ static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
}
static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op->getOperand(0);
MVT VT = Op->getSimpleValueType(0);
MVT InVT = In.getSimpleValueType();
assert(VT.getSizeInBits() == InVT.getSizeInBits());
+ MVT SVT = VT.getVectorElementType();
MVT InSVT = InVT.getVectorElementType();
- assert(VT.getVectorElementType().getSizeInBits() > InSVT.getSizeInBits());
+ assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
- if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
+ if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
return SDValue();
if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
return SDValue();
+ if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
+ !(VT.is256BitVector() && Subtarget.hasInt256()))
+ return SDValue();
SDLoc dl(Op);
+ // For 256-bit vectors, we only need the lower (128-bit) half of the input.
+ if (VT.is256BitVector())
+ In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
+ MVT::getVectorVT(InSVT, InVT.getVectorNumElements() / 2),
+ In, DAG.getIntPtrConstant(0, dl));
+
// SSE41 targets can use the pmovsx* instructions directly.
- if (Subtarget->hasSSE41())
+ if (Subtarget.hasSSE41())
return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
// pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
@@ -15407,7 +16136,7 @@ static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,
return SDValue();
}
-static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
@@ -15422,7 +16151,7 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
(VT != MVT::v16i16 || InVT != MVT::v16i8))
return SDValue();
- if (Subtarget->hasInt256())
+ if (Subtarget.hasInt256())
return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
// Optimize vectors in AVX mode
@@ -15441,13 +16170,13 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask1[i] = i;
- SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
+ SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask1);
SmallVector<int,8> ShufMask2(NumElems, -1);
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask2[i] = i + NumElems/2;
- SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
+ SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask2);
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements()/2);
@@ -15458,6 +16187,157 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
+// Lower truncating store. We need a special lowering to vXi1 vectors
+static SDValue LowerTruncatingStore(SDValue StOp, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ StoreSDNode *St = cast<StoreSDNode>(StOp.getNode());
+ SDLoc dl(St);
+ EVT MemVT = St->getMemoryVT();
+ assert(St->isTruncatingStore() && "We only custom truncating store.");
+ assert(MemVT.isVector() && MemVT.getVectorElementType() == MVT::i1 &&
+ "Expected truncstore of i1 vector");
+
+ SDValue Op = St->getValue();
+ MVT OpVT = Op.getValueType().getSimpleVT();
+ unsigned NumElts = OpVT.getVectorNumElements();
+ if ((Subtarget.hasVLX() && Subtarget.hasBWI() && Subtarget.hasDQI()) ||
+ NumElts == 16) {
+ // Truncate and store - everything is legal
+ Op = DAG.getNode(ISD::TRUNCATE, dl, MemVT, Op);
+ if (MemVT.getSizeInBits() < 8)
+ Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
+ DAG.getUNDEF(MVT::v8i1), Op,
+ DAG.getIntPtrConstant(0, dl));
+ return DAG.getStore(St->getChain(), dl, Op, St->getBasePtr(),
+ St->getMemOperand());
+ }
+
+ // A subset, assume that we have only AVX-512F
+ if (NumElts <= 8) {
+ if (NumElts < 8) {
+ // Extend to 8-elts vector
+ MVT ExtVT = MVT::getVectorVT(OpVT.getScalarType(), 8);
+ Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ExtVT,
+ DAG.getUNDEF(ExtVT), Op, DAG.getIntPtrConstant(0, dl));
+ }
+ Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i1, Op);
+ return DAG.getStore(St->getChain(), dl, Op, St->getBasePtr(),
+ St->getMemOperand());
+ }
+ // v32i8
+ assert(OpVT == MVT::v32i8 && "Unexpected operand type");
+ // Divide the vector into 2 parts and store each part separately
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, Op,
+ DAG.getIntPtrConstant(0, dl));
+ Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::v16i1, Lo);
+ SDValue BasePtr = St->getBasePtr();
+ SDValue StLo = DAG.getStore(St->getChain(), dl, Lo, BasePtr,
+ St->getMemOperand());
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, Op,
+ DAG.getIntPtrConstant(16, dl));
+ Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::v16i1, Hi);
+
+ SDValue BasePtrHi =
+ DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
+ DAG.getConstant(2, dl, BasePtr.getValueType()));
+
+ SDValue StHi = DAG.getStore(St->getChain(), dl, Hi,
+ BasePtrHi, St->getMemOperand());
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StLo, StHi);
+}
+
+static SDValue LowerExtended1BitVectorLoad(SDValue Op,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+
+ LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
+ SDLoc dl(Ld);
+ EVT MemVT = Ld->getMemoryVT();
+ assert(MemVT.isVector() && MemVT.getScalarType() == MVT::i1 &&
+ "Expected i1 vector load");
+ unsigned ExtOpcode = Ld->getExtensionType() == ISD::ZEXTLOAD ?
+ ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
+ MVT VT = Op.getValueType().getSimpleVT();
+ unsigned NumElts = VT.getVectorNumElements();
+
+ if ((Subtarget.hasVLX() && Subtarget.hasBWI() && Subtarget.hasDQI()) ||
+ NumElts == 16) {
+ // Load and extend - everything is legal
+ if (NumElts < 8) {
+ SDValue Load = DAG.getLoad(MVT::v8i1, dl, Ld->getChain(),
+ Ld->getBasePtr(),
+ Ld->getMemOperand());
+ // Replace chain users with the new chain.
+ assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
+ MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8);
+ SDValue ExtVec = DAG.getNode(ExtOpcode, dl, ExtVT, Load);
+
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
+ DAG.getIntPtrConstant(0, dl));
+ }
+ SDValue Load = DAG.getLoad(MemVT, dl, Ld->getChain(),
+ Ld->getBasePtr(),
+ Ld->getMemOperand());
+ // Replace chain users with the new chain.
+ assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
+
+ // Finally, do a normal sign-extend to the desired register.
+ return DAG.getNode(ExtOpcode, dl, Op.getValueType(), Load);
+ }
+
+ if (NumElts <= 8) {
+ // A subset, assume that we have only AVX-512F
+ unsigned NumBitsToLoad = NumElts < 8 ? 8 : NumElts;
+ MVT TypeToLoad = MVT::getIntegerVT(NumBitsToLoad);
+ SDValue Load = DAG.getLoad(TypeToLoad, dl, Ld->getChain(),
+ Ld->getBasePtr(),
+ Ld->getMemOperand());
+ // Replace chain users with the new chain.
+ assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
+
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, NumBitsToLoad);
+ SDValue BitVec = DAG.getBitcast(MaskVT, Load);
+
+ if (NumElts == 8)
+ return DAG.getNode(ExtOpcode, dl, VT, BitVec);
+
+ // we should take care to v4i1 and v2i1
+
+ MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8);
+ SDValue ExtVec = DAG.getNode(ExtOpcode, dl, ExtVT, BitVec);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
+ DAG.getIntPtrConstant(0, dl));
+ }
+
+ assert(VT == MVT::v32i8 && "Unexpected extload type");
+
+ SmallVector<SDValue, 2> Chains;
+
+ SDValue BasePtr = Ld->getBasePtr();
+ SDValue LoadLo = DAG.getLoad(MVT::v16i1, dl, Ld->getChain(),
+ Ld->getBasePtr(),
+ Ld->getMemOperand());
+ Chains.push_back(LoadLo.getValue(1));
+
+ SDValue BasePtrHi =
+ DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
+ DAG.getConstant(2, dl, BasePtr.getValueType()));
+
+ SDValue LoadHi = DAG.getLoad(MVT::v16i1, dl, Ld->getChain(),
+ BasePtrHi,
+ Ld->getMemOperand());
+ Chains.push_back(LoadHi.getValue(1));
+ SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
+
+ SDValue Lo = DAG.getNode(ExtOpcode, dl, MVT::v16i8, LoadLo);
+ SDValue Hi = DAG.getNode(ExtOpcode, dl, MVT::v16i8, LoadHi);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v32i8, Lo, Hi);
+}
+
// Lower vector extended loads using a shuffle. If SSSE3 is not available we
// may emit an illegal shuffle but the expansion is still better than scalar
// code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
@@ -15465,7 +16345,7 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
// TODO: It is possible to support ZExt by zeroing the undef values during
// the shuffle phase or after the shuffle.
-static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT RegVT = Op.getSimpleValueType();
assert(RegVT.isVector() && "We only custom lower vector sext loads.");
@@ -15473,11 +16353,14 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
"We only custom lower integer vector sext loads.");
// Nothing useful we can do without SSE2 shuffles.
- assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
+ assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.");
LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
SDLoc dl(Ld);
EVT MemVT = Ld->getMemoryVT();
+ if (MemVT.getScalarType() == MVT::i1)
+ return LowerExtended1BitVectorLoad(Op, Subtarget, DAG);
+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned RegSz = RegVT.getSizeInBits();
@@ -15492,7 +16375,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
unsigned MemSz = MemVT.getSizeInBits();
assert(RegSz > MemSz && "Register size must be greater than the mem size");
- if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
+ if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) {
// The only way in which we have a legal 256-bit vector result but not the
// integer 256-bit operations needed to directly lower a sextload is if we
// have AVX1 but not AVX2. In that case, we can always emit a sextload to
@@ -15508,8 +16391,8 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
"it must be a legal 128-bit vector "
"type!");
Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
- Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(), Ld->getAlignment());
+ Ld->getPointerInfo(), Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
} else {
assert(MemSz < 128 &&
"Can't extend a type wider than 128 bits to a 256 bit vector!");
@@ -15522,9 +16405,8 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
Load =
DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
- Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- Ld->getAlignment());
+ Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
}
// Replace chain users with the new chain.
@@ -15592,8 +16474,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
// Perform a single load.
SDValue ScalarLoad =
DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
- Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
- Ld->getAlignment());
+ Ld->getAlignment(), Ld->getMemOperand()->getFlags());
Chains.push_back(ScalarLoad.getValue(1));
// Create the first element type using SCALAR_TO_VECTOR in order to avoid
// another round of DAGCombining.
@@ -15615,7 +16496,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
if (Ext == ISD::SEXTLOAD) {
// If we have SSE4.1, we can directly emit a VSEXT node.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
return Sext;
@@ -15637,7 +16518,7 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
ShuffleVec[i * SizeRatio] = i;
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
- DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
+ DAG.getUNDEF(WideVecVT), ShuffleVec);
// Bitcast to the requested type.
Shuff = DAG.getBitcast(RegVT, Shuff);
@@ -15645,9 +16526,8 @@ static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
return Shuff;
}
-// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
-// ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
-// from the AND / OR.
+/// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
+/// each of which has no other use apart from the AND / OR.
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
Opc = Op.getOpcode();
if (Opc != ISD::OR && Opc != ISD::AND)
@@ -15658,8 +16538,8 @@ static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
Op.getOperand(1).hasOneUse());
}
-// isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
-// 1 and that the SETCC node has a single use.
+/// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
+/// SETCC node has a single use.
static bool isXor1OfSetCC(SDValue Op) {
if (Op.getOpcode() != ISD::XOR)
return false;
@@ -15692,8 +16572,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Inverted = true;
Cond = Cond.getOperand(0);
} else {
- SDValue NewCond = LowerSETCC(Cond, DAG);
- if (NewCond.getNode())
+ if (SDValue NewCond = LowerSETCC(Cond, DAG))
Cond = NewCond;
}
}
@@ -15917,8 +16796,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
if (addTest) {
// Look pass the truncate if the high bits are known zero.
- if (isTruncWithZeroHighBitsInput(Cond, DAG))
- Cond = Cond.getOperand(0);
+ Cond = getCondAfterTruncWithZeroHighBitsInput(Cond, DAG);
// We know the result of AND is compared against zero. Try to match
// it to BT.
@@ -15951,7 +16829,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool SplitStack = MF.shouldSplitStack();
- bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
+ bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
SplitStack;
SDLoc dl(Op);
@@ -15966,7 +16844,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// pointer when other instructions are using the stack.
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, dl, true), dl);
- bool Is64Bit = Subtarget->is64Bit();
+ bool Is64Bit = Subtarget.is64Bit();
MVT SPTy = getPointerTy(DAG.getDataLayout());
SDValue Result;
@@ -15975,13 +16853,10 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!");
- EVT VT = Node->getValueType(0);
- SDValue Tmp3 = Node->getOperand(2);
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
- unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlign = TFI.getStackAlignment();
Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign)
@@ -15995,12 +16870,11 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
// The 64 bit implementation of segmented stacks needs to clobber both r10
// r11. This makes it impossible to use it along with nested parameters.
const Function *F = MF.getFunction();
-
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- I != E; ++I)
- if (I->hasNestAttr())
+ for (const auto &A : F->args()) {
+ if (A.hasNestAttr())
report_fatal_error("Cannot use segmented stacks with functions that "
"have nested arguments.");
+ }
}
const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
@@ -16009,16 +16883,11 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
DAG.getRegister(Vreg, SPTy));
} else {
- SDValue Flag;
- const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
-
- Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
- Flag = Chain.getValue(1);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
+ MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
- Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
-
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned SPReg = RegInfo->getStackRegister();
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
Chain = SP.getValue(1);
@@ -16047,13 +16916,13 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
SDLoc DL(Op);
- if (!Subtarget->is64Bit() ||
- Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv())) {
+ if (!Subtarget.is64Bit() ||
+ Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV));
}
// __va_list_tag:
@@ -16064,45 +16933,45 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
SmallVector<SDValue, 8> MemOps;
SDValue FIN = Op.getOperand(1);
// Store gp_offset
- SDValue Store = DAG.getStore(Op.getOperand(0), DL,
- DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
- DL, MVT::i32),
- FIN, MachinePointerInfo(SV), false, false, 0);
+ SDValue Store = DAG.getStore(
+ Op.getOperand(0), DL,
+ DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
+ MachinePointerInfo(SV));
MemOps.push_back(Store);
// Store fp_offset
- FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
- Store = DAG.getStore(Op.getOperand(0), DL,
- DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL,
- MVT::i32),
- FIN, MachinePointerInfo(SV, 4), false, false, 0);
+ FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
+ Store = DAG.getStore(
+ Op.getOperand(0), DL,
+ DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
+ MachinePointerInfo(SV, 4));
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
- Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
- MachinePointerInfo(SV, 8),
- false, false, 0);
+ Store =
+ DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
MemOps.push_back(Store);
// Store ptr to reg_save_area.
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
- Subtarget->isTarget64BitLP64() ? 8 : 4, DL));
+ Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
- Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN, MachinePointerInfo(
- SV, Subtarget->isTarget64BitLP64() ? 16 : 12), false, false, 0);
+ Store = DAG.getStore(
+ Op.getOperand(0), DL, RSFIN, FIN,
+ MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
MemOps.push_back(Store);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
- assert(Subtarget->is64Bit() &&
+ assert(Subtarget.is64Bit() &&
"LowerVAARG only handles 64-bit va_arg!");
assert(Op.getNode()->getNumOperands() == 4);
MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ if (Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()))
// The Win64 ABI uses char* instead of a structure.
return DAG.expandVAArg(Op.getNode());
@@ -16132,9 +17001,9 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
- assert(!Subtarget->useSoftFloat() &&
+ assert(!Subtarget.useSoftFloat() &&
!(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) &&
- Subtarget->hasSSE1());
+ Subtarget.hasSSE1());
}
// Insert VAARG_64 node into the DAG
@@ -16153,19 +17022,15 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
Chain = VAARG.getValue(1);
// Load the next argument and return it
- return DAG.getLoad(ArgVT, dl,
- Chain,
- VAARG,
- MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
}
-static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
// where a va_list is still an i8*.
- assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
- if (Subtarget->isCallingConvWin64(
+ assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
+ if (Subtarget.isCallingConvWin64(
DAG.getMachineFunction().getFunction()->getCallingConv()))
// Probably a Win64 va_copy.
return DAG.expandVACopy(Op.getNode());
@@ -16183,9 +17048,9 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
-// getTargetVShiftByConstNode - Handle vector element shifts where the shift
-// amount is a constant. Takes immediate version of shift as input.
-static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
+/// Handle vector element shifts where the shift amount is a constant.
+/// Takes immediate version of shift as input.
+static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue SrcOp, uint64_t ShiftAmt,
SelectionDAG &DAG) {
MVT ElementType = VT.getVectorElementType();
@@ -16214,11 +17079,11 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
ConstantSDNode *ND;
switch(Opc) {
- default: llvm_unreachable(nullptr);
+ default: llvm_unreachable("Unknown opcode!");
case X86ISD::VSHLI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
- if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
@@ -16230,7 +17095,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
case X86ISD::VSRLI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
- if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
@@ -16242,7 +17107,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
case X86ISD::VSRAI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
- if (CurrentOp->getOpcode() == ISD::UNDEF) {
+ if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
@@ -16253,16 +17118,16 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
break;
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
+ return DAG.getBuildVector(VT, dl, Elts);
}
return DAG.getNode(Opc, dl, VT, SrcOp,
DAG.getConstant(ShiftAmt, dl, MVT::i8));
}
-// getTargetVShiftNode - Handle vector element shifts where the shift amount
-// may or may not be a constant. Takes immediate version of shift as input.
-static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
+/// Handle vector element shifts where the shift amount may or may not be a
+/// constant. Takes immediate version of shift as input.
+static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue SrcOp, SDValue ShAmt,
SelectionDAG &DAG) {
MVT SVT = ShAmt.getSimpleValueType();
@@ -16288,7 +17153,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
// Let the shuffle legalizer expand this shift amount node.
SDValue Op0 = ShAmt.getOperand(0);
Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
- ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
+ ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, Subtarget, DAG);
} else {
// Need to build a vector containing shift amount.
// SSE/AVX packed shifts only use the lower 64-bit of the shift count.
@@ -16301,7 +17166,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
ShOps.push_back(DAG.getUNDEF(SVT));
MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
- ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
+ ShAmt = DAG.getBuildVector(BVT, dl, ShOps);
}
// The return type has to be a 128-bit type with the same element
@@ -16316,8 +17181,8 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
/// \brief Return Mask with the necessary casting or extending
/// for \p Mask according to \p MaskVT when lowering masking intrinsics
static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
- const X86Subtarget *Subtarget,
- SelectionDAG &DAG, SDLoc dl) {
+ const X86Subtarget &Subtarget, SelectionDAG &DAG,
+ const SDLoc &dl) {
if (isAllOnesConstant(Mask))
return DAG.getTargetConstant(1, dl, MaskVT);
@@ -16330,9 +17195,9 @@ static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
MVT::getIntegerVT(MaskVT.getSizeInBits()), Mask);
}
- if (Mask.getSimpleValueType() == MVT::i64 && Subtarget->is32Bit()) {
+ if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
if (MaskVT == MVT::v64i1) {
- assert(Subtarget->hasBWI() && "Expected AVX512BW target!");
+ assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
// In case 32bit mode, bitcast i64 is illegal, extend/split it.
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
@@ -16368,7 +17233,7 @@ static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
/// necessary casting or extending for \p Mask when lowering masking intrinsics
static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
@@ -16393,13 +17258,14 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
case X86ISD::VTRUNC:
case X86ISD::VTRUNCS:
case X86ISD::VTRUNCUS:
+ case ISD::FP_TO_FP16:
// We can't use ISD::VSELECT here because it is not always "Legal"
// for the destination type. For example vpmovqb require only AVX512
// and vselect that can operate on byte element type require BWI
OpcodeSelect = X86ISD::SELECT;
break;
}
- if (PreservedSrc.getOpcode() == ISD::UNDEF)
+ if (PreservedSrc.isUndef())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
}
@@ -16413,7 +17279,7 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
/// for a scalar instruction.
static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (isAllOnesConstant(Mask))
return Op;
@@ -16429,7 +17295,7 @@ static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
Op.getOpcode() == X86ISD::VFPCLASSS)
return DAG.getNode(ISD::OR, dl, VT, Op, IMask);
- if (PreservedSrc.getOpcode() == ISD::UNDEF)
+ if (PreservedSrc.isUndef())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
}
@@ -16495,7 +17361,7 @@ static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
}
-static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
@@ -16706,6 +17572,16 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Src1, Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
+ case VPERM_2OP_MASK : {
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue PassThru = Op.getOperand(3);
+ SDValue Mask = Op.getOperand(4);
+
+ // Swap Src1 and Src2 in the node creation
+ return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1),
+ Mask, PassThru, Subtarget, DAG);
+ }
case VPERM_3OP_MASKZ:
case VPERM_3OP_MASK:{
// Src2 is the PassThru
@@ -16764,6 +17640,30 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Src1, Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
+ case FMA_OP_SCALAR_MASK:
+ case FMA_OP_SCALAR_MASK3:
+ case FMA_OP_SCALAR_MASKZ: {
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue Src3 = Op.getOperand(3);
+ SDValue Mask = Op.getOperand(4);
+ MVT VT = Op.getSimpleValueType();
+ SDValue PassThru = SDValue();
+
+ // set PassThru element
+ if (IntrData->Type == FMA_OP_SCALAR_MASKZ)
+ PassThru = getZeroVector(VT, Subtarget, DAG, dl);
+ else if (IntrData->Type == FMA_OP_SCALAR_MASK3)
+ PassThru = Src3;
+ else
+ PassThru = Src1;
+
+ SDValue Rnd = Op.getOperand(5);
+ return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl,
+ Op.getValueType(), Src1, Src2,
+ Src3, Rnd),
+ Mask, PassThru, Subtarget, DAG);
+ }
case TERLOG_OP_MASK:
case TERLOG_OP_MASKZ: {
SDValue Src1 = Op.getOperand(1);
@@ -16879,49 +17779,76 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
MVT::i1),
Subtarget, DAG);
- return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i8,
- DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, CmpMask),
- DAG.getValueType(MVT::i1));
+ return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, CmpMask);
}
case COMI: { // Comparison intrinsics
ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
- unsigned X86CC = TranslateX86CC(CC, dl, true, LHS, RHS, DAG);
- assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
- SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
- SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(X86CC, dl, MVT::i8), Cond);
+ SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
+ SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
+ SDValue SetCC;
+ switch (CC) {
+ case ISD::SETEQ: { // (ZF = 0 and PF = 0)
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_E, dl, MVT::i8), Comi);
+ SDValue SetNP = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_NP, dl, MVT::i8),
+ Comi);
+ SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
+ break;
+ }
+ case ISD::SETNE: { // (ZF = 1 or PF = 1)
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_NE, dl, MVT::i8), Comi);
+ SDValue SetP = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_P, dl, MVT::i8),
+ Comi);
+ SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
+ break;
+ }
+ case ISD::SETGT: // (CF = 0 and ZF = 0)
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_A, dl, MVT::i8), Comi);
+ break;
+ case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_A, dl, MVT::i8), InvComi);
+ break;
+ }
+ case ISD::SETGE: // CF = 0
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_AE, dl, MVT::i8), Comi);
+ break;
+ case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
+ SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
+ DAG.getConstant(X86::COND_AE, dl, MVT::i8), InvComi);
+ break;
+ default:
+ llvm_unreachable("Unexpected illegal condition!");
+ }
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case COMI_RM: { // Comparison intrinsics with Sae
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
- SDValue CC = Op.getOperand(3);
+ unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
SDValue Sae = Op.getOperand(4);
- auto ComiType = TranslateX86ConstCondToX86CC(CC);
- // choose between ordered and unordered (comi/ucomi)
- unsigned comiOp = std::get<0>(ComiType) ? IntrData->Opc0 : IntrData->Opc1;
- SDValue Cond;
- if (cast<ConstantSDNode>(Sae)->getZExtValue() !=
- X86::STATIC_ROUNDING::CUR_DIRECTION)
- Cond = DAG.getNode(comiOp, dl, MVT::i32, LHS, RHS, Sae);
+
+ SDValue FCmp;
+ if (cast<ConstantSDNode>(Sae)->getZExtValue() ==
+ X86::STATIC_ROUNDING::CUR_DIRECTION)
+ FCmp = DAG.getNode(X86ISD::FSETCC, dl, MVT::i1, LHS, RHS,
+ DAG.getConstant(CondVal, dl, MVT::i8));
else
- Cond = DAG.getNode(comiOp, dl, MVT::i32, LHS, RHS);
- SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
- DAG.getConstant(std::get<1>(ComiType), dl, MVT::i8), Cond);
- return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
+ FCmp = DAG.getNode(X86ISD::FSETCC, dl, MVT::i1, LHS, RHS,
+ DAG.getConstant(CondVal, dl, MVT::i8), Sae);
+ // AnyExt just uses KMOVW %kreg, %r32; ZeroExt emits "and $1, %reg"
+ return DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, FCmp);
}
case VSHIFT:
return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
Op.getOperand(1), Op.getOperand(2), DAG);
- case VSHIFT_MASK:
- return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
- Op.getSimpleValueType(),
- Op.getOperand(1),
- Op.getOperand(2), DAG),
- Op.getOperand(4), Op.getOperand(3), Subtarget,
- DAG);
case COMPRESS_EXPAND_IN_REG: {
SDValue Mask = Op.getOperand(3);
SDValue DataToCompress = Op.getOperand(1);
@@ -16940,14 +17867,6 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Mask = DAG.getBitcast(MaskVT, Mask);
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Mask);
}
- case BLEND: {
- SDValue Mask = Op.getOperand(3);
- MVT VT = Op.getSimpleValueType();
- MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
- SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
- return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
- Op.getOperand(2));
- }
case KUNPCK: {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits()/2);
@@ -16960,6 +17879,35 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
Src2, Src1);
return DAG.getBitcast(VT, Res);
}
+ case FIXUPIMMS:
+ case FIXUPIMMS_MASKZ:
+ case FIXUPIMM:
+ case FIXUPIMM_MASKZ:{
+ SDValue Src1 = Op.getOperand(1);
+ SDValue Src2 = Op.getOperand(2);
+ SDValue Src3 = Op.getOperand(3);
+ SDValue Imm = Op.getOperand(4);
+ SDValue Mask = Op.getOperand(5);
+ SDValue Passthru = (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMMS ) ?
+ Src1 : getZeroVector(VT, Subtarget, DAG, dl);
+ // We specify 2 possible modes for intrinsics, with/without rounding
+ // modes.
+ // First, we check if the intrinsic have rounding mode (7 operands),
+ // if not, we set rounding mode to "current".
+ SDValue Rnd;
+ if (Op.getNumOperands() == 7)
+ Rnd = Op.getOperand(6);
+ else
+ Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
+ if (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMM_MASKZ)
+ return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
+ Src1, Src2, Src3, Imm, Rnd),
+ Mask, Passthru, Subtarget, DAG);
+ else // Scalar - FIXUPIMMS, FIXUPIMMS_MASKZ
+ return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
+ Src1, Src2, Src3, Imm, Rnd),
+ Mask, Passthru, Subtarget, DAG);
+ }
case CONVERT_TO_MASK: {
MVT SrcVT = Op.getOperand(1).getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
@@ -16995,6 +17943,21 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
subVec, subVec, immVal),
Mask, Passthru, Subtarget, DAG);
}
+ case BRCST32x2_TO_VEC: {
+ SDValue Src = Op.getOperand(1);
+ SDValue PassThru = Op.getOperand(2);
+ SDValue Mask = Op.getOperand(3);
+
+ assert((VT.getScalarType() == MVT::i32 ||
+ VT.getScalarType() == MVT::f32) && "Unexpected type!");
+ //bitcast Src to packed 64
+ MVT ScalarVT = VT.getScalarType() == MVT::i32 ? MVT::i64 : MVT::f64;
+ MVT BitcastVT = MVT::getVectorVT(ScalarVT, Src.getValueSizeInBits()/64);
+ Src = DAG.getBitcast(BitcastVT, Src);
+
+ return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
+ Mask, PassThru, Subtarget, DAG);
+ }
default:
break;
}
@@ -17082,7 +18045,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
SDValue RHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(2));
SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8);
SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
- SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
+ SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
@@ -17163,6 +18126,16 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
return DAG.getNode(Opcode, dl, VTs, NewOps);
}
+ case Intrinsic::eh_sjlj_lsda: {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
+ auto &Context = MF.getMMI().getContext();
+ MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
+ Twine(MF.getFunctionNumber()));
+ return DAG.getNode(X86ISD::Wrapper, dl, VT, DAG.getMCSymbol(S, PtrVT));
+ }
+
case Intrinsic::x86_seh_lsda: {
// Compute the symbol for the LSDA. We know it'll get emitted later.
MachineFunction &MF = DAG.getMachineFunction();
@@ -17192,7 +18165,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
// Returns one of the stack, base, or frame pointer registers, depending on
// which is used to reference local variables.
MachineFunction &MF = DAG.getMachineFunction();
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned Reg;
if (RegInfo->hasBasePointer(MF))
Reg = RegInfo->getBaseRegister();
@@ -17206,7 +18179,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
- const X86Subtarget * Subtarget) {
+ const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = cast<ConstantSDNode>(ScaleOp);
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
@@ -17217,7 +18190,7 @@ static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
- if (Src.getOpcode() == ISD::UNDEF)
+ if (Src.isUndef())
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
@@ -17237,7 +18210,7 @@ static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
MVT MaskVT = MVT::getVectorVT(MVT::i1,
Index.getSimpleValueType().getVectorNumElements());
- SDValue VMask = getMaskNode(Mask, MaskVT, &Subtarget, DAG, dl);
+ SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
SDValue Ops[] = {Base, Scale, Index, Disp, Segment, VMask, Src, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
@@ -17255,18 +18228,19 @@ static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Segment = DAG.getRegister(0, MVT::i32);
MVT MaskVT =
MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
- SDValue VMask = getMaskNode(Mask, MaskVT, &Subtarget, DAG, dl);
+ SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
//SDVTList VTs = DAG.getVTList(MVT::Other);
SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
return SDValue(Res, 0);
}
-// getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
-// read performance monitor counters (x86_rdpmc).
-static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
- SelectionDAG &DAG, const X86Subtarget *Subtarget,
- SmallVectorImpl<SDValue> &Results) {
+/// Handles the lowering of builtin intrinsics that read performance monitor
+/// counters (x86_rdpmc).
+static void getReadPerformanceCounter(SDNode *N, const SDLoc &DL,
+ SelectionDAG &DAG,
+ const X86Subtarget &Subtarget,
+ SmallVectorImpl<SDValue> &Results) {
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue LO, HI;
@@ -17279,7 +18253,7 @@ static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
// Reads the content of a 64-bit performance counter and returns it in the
// registers EDX:EAX.
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
@@ -17290,7 +18264,7 @@ static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
}
Chain = HI.getValue(1);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
// The EAX register is loaded with the low-order 32 bits. The EDX register
// is loaded with the supported high-order bits of the counter.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
@@ -17307,12 +18281,13 @@ static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
Results.push_back(Chain);
}
-// getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
-// read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
-// also used to custom lower READCYCLECOUNTER nodes.
-static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
- SelectionDAG &DAG, const X86Subtarget *Subtarget,
- SmallVectorImpl<SDValue> &Results) {
+/// Handles the lowering of builtin intrinsics that read the time stamp counter
+/// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
+/// READCYCLECOUNTER nodes.
+static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
+ SelectionDAG &DAG,
+ const X86Subtarget &Subtarget,
+ SmallVectorImpl<SDValue> &Results) {
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
SDValue LO, HI;
@@ -17320,7 +18295,7 @@ static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
// The processor's time-stamp counter (a 64-bit MSR) is stored into the
// EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
// and the EAX register is loaded with the low-order 32 bits.
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
@@ -17341,10 +18316,10 @@ static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
// Explicitly store the content of ECX at the location passed in input
// to the 'rdtscp' intrinsic.
Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo());
}
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
// The EDX register is loaded with the high-order 32 bits of the MSR, and
// the EAX register is loaded with the low-order 32 bits.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
@@ -17361,7 +18336,7 @@ static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
Results.push_back(Chain);
}
-static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SmallVector<SDValue, 2> Results;
SDLoc DL(Op);
@@ -17388,44 +18363,25 @@ static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
return Chain;
}
-/// \brief Lower intrinsics for TRUNCATE_TO_MEM case
-/// return truncate Store/MaskedStore Node
-static SDValue LowerINTRINSIC_TRUNCATE_TO_MEM(const SDValue & Op,
- SelectionDAG &DAG,
- MVT ElementType) {
- SDLoc dl(Op);
- SDValue Mask = Op.getOperand(4);
- SDValue DataToTruncate = Op.getOperand(3);
- SDValue Addr = Op.getOperand(2);
+static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
+ MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
+ SDValue EHGuard = Op.getOperand(2);
+ WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
+ if (!EHInfo)
+ report_fatal_error("EHGuard only live in functions using WinEH");
- MVT VT = DataToTruncate.getSimpleValueType();
- MVT SVT = MVT::getVectorVT(ElementType, VT.getVectorNumElements());
-
- if (isAllOnesConstant(Mask)) // return just a truncate store
- return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr,
- MachinePointerInfo(), SVT, false, false,
- SVT.getScalarSizeInBits()/8);
-
- MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
- MVT BitcastVT = MVT::getVectorVT(MVT::i1,
- Mask.getSimpleValueType().getSizeInBits());
- // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
- // are extracted by EXTRACT_SUBVECTOR.
- SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
- DAG.getBitcast(BitcastVT, Mask),
- DAG.getIntPtrConstant(0, dl));
-
- MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MachinePointerInfo(),
- MachineMemOperand::MOStore, SVT.getStoreSize(),
- SVT.getScalarSizeInBits()/8);
+ // Cast the operand to an alloca, and remember the frame index.
+ auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
+ if (!FINode)
+ report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
+ EHInfo->EHGuardFrameIndex = FINode->getIndex();
- return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr,
- VMask, SVT, MMO, true);
+ // Return the chain operand without making any DAG nodes.
+ return Chain;
}
-static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
@@ -17433,6 +18389,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
if (!IntrData) {
if (IntNo == llvm::Intrinsic::x86_seh_ehregnode)
return MarkEHRegistrationNode(Op, DAG);
+ if (IntNo == llvm::Intrinsic::x86_seh_ehguard)
+ return MarkEHGuard(Op, DAG);
if (IntNo == llvm::Intrinsic::x86_flags_read_u32 ||
IntNo == llvm::Intrinsic::x86_flags_read_u64 ||
IntNo == llvm::Intrinsic::x86_flags_write_u32 ||
@@ -17491,7 +18449,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue Src = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
- Scale, Chain, *Subtarget);
+ Scale, Chain, Subtarget);
}
case PREFETCH: {
SDValue Hint = Op.getOperand(6);
@@ -17504,7 +18462,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue Base = Op.getOperand(4);
SDValue Scale = Op.getOperand(5);
return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
- *Subtarget);
+ Subtarget);
}
// Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
case RDTSC: {
@@ -17532,7 +18490,6 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
}
// ADC/ADCX/SBB
case ADX: {
- SmallVector<SDValue, 2> Results;
SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
@@ -17540,13 +18497,11 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
Op.getOperand(4), GenCF.getValue(1));
SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
- Op.getOperand(5), MachinePointerInfo(),
- false, false, 0);
+ Op.getOperand(5), MachinePointerInfo());
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(X86::COND_B, dl, MVT::i8),
Res.getValue(1));
- Results.push_back(SetCC);
- Results.push_back(Store);
+ SDValue Results[] = { SetCC, Store };
return DAG.getMergeValues(Results, dl);
}
case COMPRESS_TO_MEM: {
@@ -17554,48 +18509,45 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SDValue DataToCompress = Op.getOperand(3);
SDValue Addr = Op.getOperand(2);
SDValue Chain = Op.getOperand(0);
-
MVT VT = DataToCompress.getSimpleValueType();
+
+ MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
+ assert(MemIntr && "Expected MemIntrinsicSDNode!");
+
if (isAllOnesConstant(Mask)) // return just a store
return DAG.getStore(Chain, dl, DataToCompress, Addr,
- MachinePointerInfo(), false, false,
- VT.getScalarSizeInBits()/8);
+ MemIntr->getMemOperand());
SDValue Compressed =
getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress),
Mask, DAG.getUNDEF(VT), Subtarget, DAG);
return DAG.getStore(Chain, dl, Compressed, Addr,
- MachinePointerInfo(), false, false,
- VT.getScalarSizeInBits()/8);
+ MemIntr->getMemOperand());
}
case TRUNCATE_TO_MEM_VI8:
- return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i8);
case TRUNCATE_TO_MEM_VI16:
- return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i16);
- case TRUNCATE_TO_MEM_VI32:
- return LowerINTRINSIC_TRUNCATE_TO_MEM(Op, DAG, MVT::i32);
- case EXPAND_FROM_MEM: {
+ case TRUNCATE_TO_MEM_VI32: {
SDValue Mask = Op.getOperand(4);
- SDValue PassThru = Op.getOperand(3);
+ SDValue DataToTruncate = Op.getOperand(3);
SDValue Addr = Op.getOperand(2);
SDValue Chain = Op.getOperand(0);
- MVT VT = Op.getSimpleValueType();
- if (isAllOnesConstant(Mask)) // return just a load
- return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
- false, VT.getScalarSizeInBits()/8);
+ MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
+ assert(MemIntr && "Expected MemIntrinsicSDNode!");
- SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
- false, false, false,
- VT.getScalarSizeInBits()/8);
+ EVT VT = MemIntr->getMemoryVT();
- SDValue Results[] = {
- getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToExpand),
- Mask, PassThru, Subtarget, DAG), Chain};
- return DAG.getMergeValues(Results, dl);
+ if (isAllOnesConstant(Mask)) // return just a truncate store
+ return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, VT,
+ MemIntr->getMemOperand());
+
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
+ SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
+
+ return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, VT,
+ MemIntr->getMemOperand(), true);
}
- case LOADU:
- case LOADA: {
+ case EXPAND_FROM_MEM: {
SDValue Mask = Op.getOperand(4);
SDValue PassThru = Op.getOperand(3);
SDValue Addr = Op.getOperand(2);
@@ -17605,13 +18557,16 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
assert(MemIntr && "Expected MemIntrinsicSDNode!");
+ SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr,
+ MemIntr->getMemOperand());
+
if (isAllOnesConstant(Mask)) // return just a load
- return DAG.getLoad(VT, dl, Chain, Addr, MemIntr->getMemOperand());
+ return DataToExpand;
- MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
- SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
- return DAG.getMaskedLoad(VT, dl, Chain, Addr, VMask, PassThru, VT,
- MemIntr->getMemOperand(), ISD::NON_EXTLOAD);
+ SDValue Results[] = {
+ getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToExpand),
+ Mask, PassThru, Subtarget, DAG), Chain};
+ return DAG.getMergeValues(Results, dl);
}
}
}
@@ -17630,25 +18585,24 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
- DAG.getNode(ISD::ADD, dl, PtrVT,
- FrameAddr, Offset),
- MachinePointerInfo(), false, false, false, 0);
+ DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
+ MachinePointerInfo());
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
- return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
- RetAddrFI, MachinePointerInfo(), false, false, false, 0);
+ return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
+ MachinePointerInfo());
}
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
EVT VT = Op.getValueType();
MFI->setFrameAddressIsTaken(true);
@@ -17678,8 +18632,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
- MachinePointerInfo(),
- false, false, false, 0);
+ MachinePointerInfo());
return FrameAddr;
}
@@ -17687,7 +18640,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
// this table could be generated automatically from RegInfo.
unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
SelectionDAG &DAG) const {
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
const MachineFunction &MF = DAG.getMachineFunction();
unsigned Reg = StringSwitch<unsigned>(RegName)
@@ -17703,7 +18656,7 @@ unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
" is allocatable: function has no frame pointer");
#ifndef NDEBUG
else {
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FrameReg =
RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
@@ -17720,23 +18673,27 @@ unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
}
unsigned X86TargetLowering::getExceptionPointerRegister(
const Constant *PersonalityFn) const {
if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
- return Subtarget->isTarget64BitLP64() ? X86::RDX : X86::EDX;
+ return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
- return Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
+ return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
}
unsigned X86TargetLowering::getExceptionSelectorRegister(
const Constant *PersonalityFn) const {
// Funclet personalities don't use selectors (the runtime does the selection).
assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
- return Subtarget->isTarget64BitLP64() ? X86::RDX : X86::EDX;
+ return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
+}
+
+bool X86TargetLowering::needsFixedCatchObjects() const {
+ return Subtarget.isTargetWin64();
}
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
@@ -17746,7 +18703,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl (Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
(FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
@@ -17758,8 +18715,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
DAG.getIntPtrConstant(RegInfo->getSlotSize(),
dl));
StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
- Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
- false, false, 0);
+ Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
@@ -17769,6 +18725,16 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
+ // If the subtarget is not 64bit, we may need the global base reg
+ // after isel expand pseudo, i.e., after CGBR pass ran.
+ // Therefore, ask for the GlobalBaseReg now, so that the pass
+ // inserts the code for us in case we need it.
+ // Otherwise, we will end up in a situation where we will
+ // reference a virtual register that is not defined!
+ if (!Subtarget.is64Bit()) {
+ const X86InstrInfo *TII = Subtarget.getInstrInfo();
+ (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
+ }
return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
DAG.getVTList(MVT::i32, MVT::Other),
Op.getOperand(0), Op.getOperand(1));
@@ -17781,6 +18747,13 @@ SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
Op.getOperand(0), Op.getOperand(1));
}
+SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
+ Op.getOperand(0));
+}
+
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
return Op.getOperand(0);
}
@@ -17794,9 +18767,9 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SDLoc dl (Op);
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
SDValue OutChains[6];
// Large code-model.
@@ -17812,14 +18785,13 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
SDValue Addr = Trmp;
OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
- Addr, MachinePointerInfo(TrmpAddr),
- false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(2, dl, MVT::i64));
- OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
- MachinePointerInfo(TrmpAddr, 2),
- false, false, 2);
+ OutChains[1] =
+ DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
+ /* Alignment = */ 2);
// Load the 'nest' parameter value into R10.
// R10 is specified in X86CallingConv.td
@@ -17827,29 +18799,26 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(10, dl, MVT::i64));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
- Addr, MachinePointerInfo(TrmpAddr, 10),
- false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr, 10));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(12, dl, MVT::i64));
- OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
- MachinePointerInfo(TrmpAddr, 12),
- false, false, 2);
+ OutChains[3] =
+ DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
+ /* Alignment = */ 2);
// Jump to the nested function.
OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(20, dl, MVT::i64));
OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
- Addr, MachinePointerInfo(TrmpAddr, 20),
- false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr, 20));
unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(22, dl, MVT::i64));
OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
- Addr, MachinePointerInfo(TrmpAddr, 22),
- false, false, 0);
+ Addr, MachinePointerInfo(TrmpAddr, 22));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
} else {
@@ -17909,29 +18878,28 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
// This is storing the opcode for MOV32ri.
const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
- OutChains[0] = DAG.getStore(Root, dl,
- DAG.getConstant(MOV32ri|N86Reg, dl, MVT::i8),
- Trmp, MachinePointerInfo(TrmpAddr),
- false, false, 0);
+ OutChains[0] =
+ DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
+ Trmp, MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(1, dl, MVT::i32));
- OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
- MachinePointerInfo(TrmpAddr, 1),
- false, false, 1);
+ OutChains[1] =
+ DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
+ /* Alignment = */ 1);
const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(5, dl, MVT::i32));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
Addr, MachinePointerInfo(TrmpAddr, 5),
- false, false, 1);
+ /* Alignment = */ 1);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(6, dl, MVT::i32));
- OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
- MachinePointerInfo(TrmpAddr, 6),
- false, false, 1);
+ OutChains[3] =
+ DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
+ /* Alignment = */ 1);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
@@ -17959,7 +18927,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
*/
MachineFunction &MF = DAG.getMachineFunction();
- const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
+ const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
@@ -17979,8 +18947,8 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
Ops, MVT::i16, MMO);
// Load FP Control Word from stack slot
- SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
- MachinePointerInfo(), false, false, false, 0);
+ SDValue CWD =
+ DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
// Transform as necessary
SDValue CWD1 =
@@ -18014,6 +18982,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
// split the vector, perform operation on it's Lo a Hi part and
// concatenate the results.
static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getOpcode() == ISD::CTLZ);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
@@ -18044,8 +19013,8 @@ static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2);
- Lo = DAG.getNode(Op.getOpcode(), dl, OutVT, Lo);
- Hi = DAG.getNode(Op.getOpcode(), dl, OutVT, Hi);
+ Lo = DAG.getNode(ISD::CTLZ, dl, OutVT, Lo);
+ Hi = DAG.getNode(ISD::CTLZ, dl, OutVT, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
}
@@ -18064,51 +19033,112 @@ static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
}
-static SDValue LowerCTLZ(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
+// Lower CTLZ using a PSHUFB lookup table implementation.
+static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- MVT OpVT = VT;
- unsigned NumBits = VT.getSizeInBits();
- SDLoc dl(Op);
+ int NumElts = VT.getVectorNumElements();
+ int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
+ MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
- if (VT.isVector() && Subtarget->hasAVX512())
- return LowerVectorCTLZ_AVX512(Op, DAG);
+ // Per-nibble leading zero PSHUFB lookup table.
+ const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
+ /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
+ /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
+ /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
- Op = Op.getOperand(0);
- if (VT == MVT::i8) {
- // Zero extend to i32 since there is not an i8 bsr.
- OpVT = MVT::i32;
- Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
+ SmallVector<SDValue, 64> LUTVec;
+ for (int i = 0; i < NumBytes; ++i)
+ LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
+ SDValue InRegLUT = DAG.getNode(ISD::BUILD_VECTOR, DL, CurrVT, LUTVec);
+
+ // Begin by bitcasting the input to byte vector, then split those bytes
+ // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
+ // If the hi input nibble is zero then we add both results together, otherwise
+ // we just take the hi result (by masking the lo result to zero before the
+ // add).
+ SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
+ SDValue Zero = getZeroVector(CurrVT, Subtarget, DAG, DL);
+
+ SDValue NibbleMask = DAG.getConstant(0xF, DL, CurrVT);
+ SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
+ SDValue Lo = DAG.getNode(ISD::AND, DL, CurrVT, Op0, NibbleMask);
+ SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
+ SDValue HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
+
+ Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
+ Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
+ Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
+ SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
+
+ // Merge result back from vXi8 back to VT, working on the lo/hi halves
+ // of the current vector width in the same way we did for the nibbles.
+ // If the upper half of the input element is zero then add the halves'
+ // leading zero counts together, otherwise just use the upper half's.
+ // Double the width of the result until we are at target width.
+ while (CurrVT != VT) {
+ int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
+ int CurrNumElts = CurrVT.getVectorNumElements();
+ MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
+ MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
+ SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
+
+ // Check if the upper half of the input element is zero.
+ SDValue HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
+ DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
+ HiZ = DAG.getBitcast(NextVT, HiZ);
+
+ // Move the upper/lower halves to the lower bits as we'll be extending to
+ // NextVT. Mask the lower result to zero if HiZ is true and add the results
+ // together.
+ SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
+ SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
+ SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
+ R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
+ Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
+ CurrVT = NextVT;
}
- // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
- SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
- Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
+ return Res;
+}
- // If src is zero (i.e. bsr sets ZF), returns NumBits.
- SDValue Ops[] = {
- Op,
- DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
- DAG.getConstant(X86::COND_E, dl, MVT::i8),
- Op.getValue(1)
- };
- Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
+static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+ SDValue Op0 = Op.getOperand(0);
- // Finally xor with NumBits-1.
- Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
- DAG.getConstant(NumBits - 1, dl, OpVT));
+ if (Subtarget.hasAVX512())
+ return LowerVectorCTLZ_AVX512(Op, DAG);
- if (VT == MVT::i8)
- Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
- return Op;
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+ unsigned NumElems = VT.getVectorNumElements();
+
+ // Extract each 128-bit vector, perform ctlz and concat the result.
+ SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
+ SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
+ DAG.getNode(ISD::CTLZ, DL, LHS.getValueType(), LHS),
+ DAG.getNode(ISD::CTLZ, DL, RHS.getValueType(), RHS));
+ }
+
+ assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
+ return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
}
-static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, const X86Subtarget *Subtarget,
- SelectionDAG &DAG) {
+static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- EVT OpVT = VT;
+ MVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
+ unsigned Opc = Op.getOpcode();
+
+ if (VT.isVector())
+ return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
Op = Op.getOperand(0);
if (VT == MVT::i8) {
@@ -18117,11 +19147,22 @@ static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, const X86Subtarget *Subtarget,
Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
}
- // Issue a bsr (scan bits in reverse).
+ // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
- // And xor with NumBits-1.
+ if (Opc == ISD::CTLZ) {
+ // If src is zero (i.e. bsr sets ZF), returns NumBits.
+ SDValue Ops[] = {
+ Op,
+ DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
+ DAG.getConstant(X86::COND_E, dl, MVT::i8),
+ Op.getValue(1)
+ };
+ Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
+ }
+
+ // Finally xor with NumBits-1.
Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
DAG.getConstant(NumBits - 1, dl, OpVT));
@@ -18136,8 +19177,6 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
SDLoc dl(Op);
if (VT.isVector()) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-
SDValue N0 = Op.getOperand(0);
SDValue Zero = DAG.getConstant(0, dl, VT);
@@ -18146,8 +19185,7 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::SUB, dl, VT, Zero, N0));
// cttz_undef(x) = (width - 1) - ctlz(lsb)
- if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF &&
- TLI.isOperationLegal(ISD::CTLZ, VT)) {
+ if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
SDValue WidthMinusOne = DAG.getConstant(NumBits - 1, dl, VT);
return DAG.getNode(ISD::SUB, dl, VT, WidthMinusOne,
DAG.getNode(ISD::CTLZ, dl, VT, LSB));
@@ -18176,8 +19214,8 @@ static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
}
-// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
-// ones, and then concatenate the result back.
+/// Break a 256-bit integer operation into two new 128-bit ones and then
+/// concatenate the result back.
static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
@@ -18189,13 +19227,42 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
- SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
- SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
+ SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
- SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
- SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
+ SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
+
+ MVT EltVT = VT.getVectorElementType();
+ MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
+ DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
+ DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
+}
+
+/// Break a 512-bit integer operation into two new 256-bit ones and then
+/// concatenate the result back.
+static SDValue Lower512IntArith(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+
+ assert(VT.is512BitVector() && VT.isInteger() &&
+ "Unsupported value type for operation");
+
+ unsigned NumElems = VT.getVectorNumElements();
+ SDLoc dl(Op);
+
+ // Extract the LHS vectors
+ SDValue LHS = Op.getOperand(0);
+ SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
+ SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
+
+ // Extract the RHS vectors
+ SDValue RHS = Op.getOperand(1);
+ SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
+ SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
MVT EltVT = VT.getVectorElementType();
MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
@@ -18232,7 +19299,7 @@ static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
return Lower256IntArith(Op, DAG);
}
-static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
@@ -18241,28 +19308,26 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.is256BitVector() && !Subtarget->hasInt256())
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntArith(Op, DAG);
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
- // Lower v16i8/v32i8 mul as promotion to v8i16/v16i16 vector
- // pairs, multiply and truncate.
- if (VT == MVT::v16i8 || VT == MVT::v32i8) {
- if (Subtarget->hasInt256()) {
- if (VT == MVT::v32i8) {
- MVT SubVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() / 2);
- SDValue Lo = DAG.getIntPtrConstant(0, dl);
- SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl);
- SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Lo);
- SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Lo);
- SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Hi);
- SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Hi);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- DAG.getNode(ISD::MUL, dl, SubVT, ALo, BLo),
- DAG.getNode(ISD::MUL, dl, SubVT, AHi, BHi));
- }
+ // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
+ // vector pairs, multiply and truncate.
+ if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
+ if (Subtarget.hasInt256()) {
+ // For 512-bit vectors, split into 256-bit vectors to allow the
+ // sign-extension to occur.
+ if (VT == MVT::v64i8)
+ return Lower512IntArith(Op, DAG);
+
+ // For 256-bit vectors, split into 128-bit vectors to allow the
+ // sign-extension to occur. We don't need this on AVX512BW as we can
+ // safely sign-extend to v32i16.
+ if (VT == MVT::v32i8 && !Subtarget.hasBWI())
+ return Lower256IntArith(Op, DAG);
MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
return DAG.getNode(
@@ -18278,7 +19343,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
// Extract the lo parts and sign extend to i16
SDValue ALo, BLo;
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
ALo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, A);
BLo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, B);
} else {
@@ -18294,7 +19359,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
// Extract the hi parts and sign extend to i16
SDValue AHi, BHi;
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
-1, -1, -1, -1, -1, -1, -1, -1};
AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
@@ -18322,7 +19387,7 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
// Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
if (VT == MVT::v4i32) {
- assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
+ assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
"Should not custom lower when pmuldq is available!");
// Extract the odd parts.
@@ -18386,8 +19451,122 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
}
+static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ SDLoc dl(Op);
+ MVT VT = Op.getSimpleValueType();
+
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntArith(Op, DAG);
+
+ // Only i8 vectors should need custom lowering after this.
+ assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
+ "Unsupported vector type");
+
+ // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
+ // logical shift down the upper half and pack back to i8.
+ SDValue A = Op.getOperand(0);
+ SDValue B = Op.getOperand(1);
+
+ // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
+ // and then ashr/lshr the upper bits down to the lower bits before multiply.
+ unsigned Opcode = Op.getOpcode();
+ unsigned ExShift = (ISD::MULHU == Opcode ? ISD::SRL : ISD::SRA);
+ unsigned ExSSE41 = (ISD::MULHU == Opcode ? X86ISD::VZEXT : X86ISD::VSEXT);
+
+ // AVX2 implementations - extend xmm subvectors to ymm.
+ if (Subtarget.hasInt256()) {
+ SDValue Lo = DAG.getIntPtrConstant(0, dl);
+ SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl);
+
+ if (VT == MVT::v32i8) {
+ SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Lo);
+ SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Lo);
+ SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Hi);
+ SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Hi);
+ ALo = DAG.getNode(ExSSE41, dl, MVT::v16i16, ALo);
+ BLo = DAG.getNode(ExSSE41, dl, MVT::v16i16, BLo);
+ AHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, AHi);
+ BHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, BHi);
+ Lo = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
+ DAG.getNode(ISD::MUL, dl, MVT::v16i16, ALo, BLo),
+ DAG.getConstant(8, dl, MVT::v16i16));
+ Hi = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
+ DAG.getNode(ISD::MUL, dl, MVT::v16i16, AHi, BHi),
+ DAG.getConstant(8, dl, MVT::v16i16));
+ // The ymm variant of PACKUS treats the 128-bit lanes separately, so before
+ // using PACKUS we need to permute the inputs to the correct lo/hi xmm lane.
+ const int LoMask[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 16, 17, 18, 19, 20, 21, 22, 23};
+ const int HiMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
+ 24, 25, 26, 27, 28, 29, 30, 31};
+ return DAG.getNode(X86ISD::PACKUS, dl, VT,
+ DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, LoMask),
+ DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, HiMask));
+ }
+
+ SDValue ExA = DAG.getNode(ExSSE41, dl, MVT::v16i16, A);
+ SDValue ExB = DAG.getNode(ExSSE41, dl, MVT::v16i16, B);
+ SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v16i16, ExA, ExB);
+ SDValue MulH = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul,
+ DAG.getConstant(8, dl, MVT::v16i16));
+ Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Lo);
+ Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Hi);
+ return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
+ }
+
+ assert(VT == MVT::v16i8 &&
+ "Pre-AVX2 support only supports v16i8 multiplication");
+ MVT ExVT = MVT::v8i16;
+
+ // Extract the lo parts and zero/sign extend to i16.
+ SDValue ALo, BLo;
+ if (Subtarget.hasSSE41()) {
+ ALo = DAG.getNode(ExSSE41, dl, ExVT, A);
+ BLo = DAG.getNode(ExSSE41, dl, ExVT, B);
+ } else {
+ const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3,
+ -1, 4, -1, 5, -1, 6, -1, 7};
+ ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
+ BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
+ ALo = DAG.getBitcast(ExVT, ALo);
+ BLo = DAG.getBitcast(ExVT, BLo);
+ ALo = DAG.getNode(ExShift, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
+ BLo = DAG.getNode(ExShift, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
+ }
+
+ // Extract the hi parts and zero/sign extend to i16.
+ SDValue AHi, BHi;
+ if (Subtarget.hasSSE41()) {
+ const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
+ -1, -1, -1, -1, -1, -1, -1, -1};
+ AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
+ BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
+ AHi = DAG.getNode(ExSSE41, dl, ExVT, AHi);
+ BHi = DAG.getNode(ExSSE41, dl, ExVT, BHi);
+ } else {
+ const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11,
+ -1, 12, -1, 13, -1, 14, -1, 15};
+ AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
+ BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
+ AHi = DAG.getBitcast(ExVT, AHi);
+ BHi = DAG.getBitcast(ExVT, BHi);
+ AHi = DAG.getNode(ExShift, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
+ BHi = DAG.getNode(ExShift, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
+ }
+
+ // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
+ // pack back to v16i8.
+ SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
+ SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
+ RLo = DAG.getNode(ISD::SRL, dl, ExVT, RLo, DAG.getConstant(8, dl, ExVT));
+ RHi = DAG.getNode(ISD::SRL, dl, ExVT, RHi, DAG.getConstant(8, dl, ExVT));
+ return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
+}
+
SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
- assert(Subtarget->isTargetWin64() && "Unexpected target");
+ assert(Subtarget.isTargetWin64() && "Unexpected target");
EVT VT = Op.getValueType();
assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
"Unexpected return type for lowering");
@@ -18415,8 +19594,8 @@ SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) cons
"Unexpected argument type for lowering");
SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
Entry.Node = StackPtr;
- InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
- false, false, 16);
+ InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
+ MachinePointerInfo(), /* Alignment = */ 16);
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Ty = PointerType::get(ArgTy,0);
Entry.isSExt = false;
@@ -18431,21 +19610,39 @@ SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) cons
CLI.setDebugLoc(dl).setChain(InChain)
.setCallee(getLibcallCallingConv(LC),
static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
- Callee, std::move(Args), 0)
+ Callee, std::move(Args))
.setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
return DAG.getBitcast(VT, CallInfo.first);
}
-static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
MVT VT = Op0.getSimpleValueType();
SDLoc dl(Op);
- assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
- (VT == MVT::v8i32 && Subtarget->hasInt256()));
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+ unsigned Opcode = Op.getOpcode();
+ unsigned NumElems = VT.getVectorNumElements();
+ MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), NumElems / 2);
+ SDValue Lo0 = extract128BitVector(Op0, 0, DAG, dl);
+ SDValue Lo1 = extract128BitVector(Op1, 0, DAG, dl);
+ SDValue Hi0 = extract128BitVector(Op0, NumElems / 2, DAG, dl);
+ SDValue Hi1 = extract128BitVector(Op1, NumElems / 2, DAG, dl);
+ SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Lo0, Lo1);
+ SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Hi0, Hi1);
+ SDValue Ops[] = {
+ DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(0), Hi.getValue(0)),
+ DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(1), Hi.getValue(1))
+ };
+ return DAG.getMergeValues(Ops, dl);
+ }
+
+ assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
+ (VT == MVT::v8i32 && Subtarget.hasInt256()));
// PMULxD operations multiply each even value (starting at 0) of LHS with
// the related value of RHS and produce a widen result.
@@ -18461,16 +19658,18 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
// step to the left):
const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
// <a|b|c|d> => <b|undef|d|undef>
- SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
+ SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0,
+ makeArrayRef(&Mask[0], VT.getVectorNumElements()));
// <e|f|g|h> => <f|undef|h|undef>
- SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
+ SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1,
+ makeArrayRef(&Mask[0], VT.getVectorNumElements()));
// Emit two multiplies, one for the lower 2 ints and one for the higher 2
// ints.
MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
unsigned Opcode =
- (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
+ (!IsSigned || !Subtarget.hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
// PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
// => <2 x i64> <ae|cg>
SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
@@ -18494,7 +19693,7 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
// If we have a signed multiply but no PMULDQ fix up the high parts of a
// unsigned multiply.
- if (IsSigned && !Subtarget->hasSSE41()) {
+ if (IsSigned && !Subtarget.hasSSE41()) {
SDValue ShAmt = DAG.getConstant(
31, dl,
DAG.getTargetLoweringInfo().getShiftAmountTy(VT, DAG.getDataLayout()));
@@ -18515,19 +19714,19 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
// Return true if the required (according to Opcode) shift-imm form is natively
// supported by the Subtarget
-static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget *Subtarget,
+static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
if (VT.getScalarSizeInBits() < 16)
return false;
if (VT.is512BitVector() &&
- (VT.getScalarSizeInBits() > 16 || Subtarget->hasBWI()))
+ (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
return true;
bool LShift = VT.is128BitVector() ||
- (VT.is256BitVector() && Subtarget->hasInt256());
+ (VT.is256BitVector() && Subtarget.hasInt256());
- bool AShift = LShift && (Subtarget->hasVLX() ||
+ bool AShift = LShift && (Subtarget.hasVLX() ||
(VT != MVT::v2i64 && VT != MVT::v4i64));
return (Opcode == ISD::SRA) ? AShift : LShift;
}
@@ -18535,24 +19734,24 @@ static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget *Subtarget,
// The shift amount is a variable, but it is the same for all vector lanes.
// These instructions are defined together with shift-immediate.
static
-bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget *Subtarget,
+bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
}
// Return true if the required (according to Opcode) variable-shift form is
// natively supported by the Subtarget
-static bool SupportedVectorVarShift(MVT VT, const X86Subtarget *Subtarget,
+static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
- if (!Subtarget->hasInt256() || VT.getScalarSizeInBits() < 16)
+ if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
return false;
// vXi16 supported only on AVX-512, BWI
- if (VT.getScalarSizeInBits() == 16 && !Subtarget->hasBWI())
+ if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
return false;
- if (VT.is512BitVector() || Subtarget->hasVLX())
+ if (VT.is512BitVector() || Subtarget.hasVLX())
return true;
bool LShift = VT.is128BitVector() || VT.is256BitVector();
@@ -18561,7 +19760,7 @@ static bool SupportedVectorVarShift(MVT VT, const X86Subtarget *Subtarget,
}
static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
@@ -18611,12 +19810,12 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
// i64 SRA needs to be performed as partial shifts.
- if ((VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
- Op.getOpcode() == ISD::SRA && !Subtarget->hasXOP())
+ if ((VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
+ Op.getOpcode() == ISD::SRA && !Subtarget.hasXOP())
return ArithmeticShiftRight64(ShiftAmt);
if (VT == MVT::v16i8 ||
- (Subtarget->hasInt256() && VT == MVT::v32i8) ||
+ (Subtarget.hasInt256() && VT == MVT::v32i8) ||
VT == MVT::v64i8) {
unsigned NumElts = VT.getVectorNumElements();
MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
@@ -18628,11 +19827,16 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
// ashr(R, 7) === cmp_slt(R, 0)
if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
+ if (VT.is512BitVector()) {
+ assert(VT == MVT::v64i8 && "Unexpected element type!");
+ SDValue CMP = DAG.getNode(X86ISD::PCMPGTM, dl, MVT::v64i1, Zeros, R);
+ return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
+ }
return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
}
// XOP can shift v16i8 directly instead of as shift v8i16 + mask.
- if (VT == MVT::v16i8 && Subtarget->hasXOP())
+ if (VT == MVT::v16i8 && Subtarget.hasXOP())
return SDValue();
if (Op.getOpcode() == ISD::SHL) {
@@ -18668,8 +19872,8 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
}
// Special case in 32-bit mode, where i64 is expanded into high and low parts.
- if (!Subtarget->is64Bit() && !Subtarget->hasXOP() &&
- (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64))) {
+ if (!Subtarget.is64Bit() && !Subtarget.hasXOP() &&
+ (VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64))) {
// Peek through any splat that was introduced for i64 shift vectorization.
int SplatIndex = -1;
@@ -18726,7 +19930,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget* Subtarget) {
+ const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
@@ -18746,7 +19950,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
// Check if this build_vector node is doing a splat.
// If so, then set BaseShAmt equal to the splat value.
BaseShAmt = BV->getSplatValue();
- if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
+ if (BaseShAmt && BaseShAmt.isUndef())
BaseShAmt = SDValue();
} else {
if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
@@ -18787,7 +19991,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
}
// Special case in 32-bit mode, where i64 is expanded into high and low parts.
- if (!Subtarget->is64Bit() && VT == MVT::v2i64 &&
+ if (!Subtarget.is64Bit() && VT == MVT::v2i64 &&
Amt.getOpcode() == ISD::BITCAST &&
Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
Amt = Amt.getOperand(0);
@@ -18808,15 +20012,16 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
return SDValue();
}
-static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
+static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
+ bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
assert(VT.isVector() && "Custom lowering only for vector shifts!");
- assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
+ assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
return V;
@@ -18829,7 +20034,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// XOP has 128-bit variable logical/arithmetic shifts.
// +ve/-ve Amt = shift left/right.
- if (Subtarget->hasXOP() &&
+ if (Subtarget.hasXOP() &&
(VT == MVT::v2i64 || VT == MVT::v4i32 ||
VT == MVT::v8i16 || VT == MVT::v16i8)) {
if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) {
@@ -18856,7 +20061,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// i64 vector arithmetic shift can be emulated with the transform:
// M = lshr(SIGN_BIT, Amt)
// ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
- if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget->hasInt256())) &&
+ if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
Op.getOpcode() == ISD::SRA) {
SDValue S = DAG.getConstant(APInt::getSignBit(64), dl, VT);
SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
@@ -18869,10 +20074,9 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// If possible, lower this packed shift into a vector multiply instead of
// expanding it into a sequence of scalar shifts.
// Do this only if the vector shift count is a constant build_vector.
- if (Op.getOpcode() == ISD::SHL &&
+ if (ConstantAmt && Op.getOpcode() == ISD::SHL &&
(VT == MVT::v8i16 || VT == MVT::v4i32 ||
- (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
- ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ (Subtarget.hasInt256() && VT == MVT::v16i16))) {
SmallVector<SDValue, 8> Elts;
MVT SVT = VT.getVectorElementType();
unsigned SVTBits = SVT.getSizeInBits();
@@ -18881,7 +20085,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
for (unsigned i=0; i !=NumElems; ++i) {
SDValue Op = Amt->getOperand(i);
- if (Op->getOpcode() == ISD::UNDEF) {
+ if (Op->isUndef()) {
Elts.push_back(Op);
continue;
}
@@ -18895,7 +20099,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
}
Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
}
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
+ SDValue BV = DAG.getBuildVector(VT, dl, Elts);
return DAG.getNode(ISD::MUL, dl, VT, R, BV);
}
@@ -18922,15 +20126,13 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
// the vector shift into four scalar shifts plus four pairs of vector
// insert/extract.
- if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
- ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32)) {
unsigned TargetOpcode = X86ISD::MOVSS;
bool CanBeSimplified;
// The splat value for the first packed shift (the 'X' from the example).
SDValue Amt1 = Amt->getOperand(0);
// The splat value for the second packed shift (the 'Y' from the example).
- SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
- Amt->getOperand(2);
+ SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) : Amt->getOperand(2);
// See if it is possible to replace this node with a sequence of
// two shifts followed by a MOVSS/MOVSD
@@ -18991,7 +20193,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
if (VT == MVT::v4i32) {
unsigned Opc = Op.getOpcode();
SDValue Amt0, Amt1, Amt2, Amt3;
- if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
+ if (ConstantAmt) {
Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
@@ -19031,14 +20233,14 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
}
if (VT == MVT::v16i8 ||
- (VT == MVT::v32i8 && Subtarget->hasInt256() && !Subtarget->hasXOP())) {
+ (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP())) {
MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
unsigned ShiftOpcode = Op->getOpcode();
auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
- if (Subtarget->hasSSE41()) {
+ if (Subtarget.hasSSE41()) {
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
@@ -19141,7 +20343,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
// It's worth extending once and using the v8i32 shifts for 16-bit types, but
// the extra overheads to get from v16i8 to v8i32 make the existing SSE
// solution better.
- if (Subtarget->hasInt256() && VT == MVT::v8i16) {
+ if (Subtarget.hasInt256() && VT == MVT::v8i16) {
MVT ExtVT = MVT::v8i32;
unsigned ExtOpc =
Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
@@ -19151,13 +20353,13 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
DAG.getNode(Op.getOpcode(), dl, ExtVT, R, Amt));
}
- if (Subtarget->hasInt256() && !Subtarget->hasXOP() && VT == MVT::v16i16) {
+ if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
MVT ExtVT = MVT::v8i32;
SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z);
SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Amt, Z);
- SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, R, R);
- SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, R, R);
+ SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Z, R);
+ SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Z, R);
ALo = DAG.getBitcast(ExtVT, ALo);
AHi = DAG.getBitcast(ExtVT, AHi);
RLo = DAG.getBitcast(ExtVT, RLo);
@@ -19172,10 +20374,15 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
if (VT == MVT::v8i16) {
unsigned ShiftOpcode = Op->getOpcode();
+ // If we have a constant shift amount, the non-SSE41 path is best as
+ // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
+ bool UseSSE41 = Subtarget.hasSSE41() &&
+ !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
+
auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
- if (Subtarget->hasSSE41()) {
+ if (UseSSE41) {
MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
V0 = DAG.getBitcast(ExtVT, V0);
V1 = DAG.getBitcast(ExtVT, V1);
@@ -19192,7 +20399,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 12;
- if (Subtarget->hasSSE41()) {
+ if (UseSSE41) {
// On SSE41 targets we need to replicate the shift mask in both
// bytes for PBLENDVB.
Amt = DAG.getNode(
@@ -19231,43 +20438,13 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
}
// Decompose 256-bit shifts into smaller 128-bit shifts.
- if (VT.is256BitVector()) {
- unsigned NumElems = VT.getVectorNumElements();
- MVT EltVT = VT.getVectorElementType();
- MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
-
- // Extract the two vectors
- SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
- SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
-
- // Recreate the shift amount vectors
- SDValue Amt1, Amt2;
- if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
- // Constant shift amount
- SmallVector<SDValue, 8> Ops(Amt->op_begin(), Amt->op_begin() + NumElems);
- ArrayRef<SDValue> Amt1Csts = makeArrayRef(Ops).slice(0, NumElems / 2);
- ArrayRef<SDValue> Amt2Csts = makeArrayRef(Ops).slice(NumElems / 2);
-
- Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
- Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
- } else {
- // Variable shift amount
- Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
- Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
- }
-
- // Issue new vector shifts for the smaller types
- V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
- V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
-
- // Concatenate the result back
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
- }
+ if (VT.is256BitVector())
+ return Lower256IntArith(Op, DAG);
return SDValue();
}
-static SDValue LowerRotate(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
@@ -19275,7 +20452,7 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget *Subtarget,
SDValue Amt = Op.getOperand(1);
assert(VT.isVector() && "Custom lowering only for vector rotates!");
- assert(Subtarget->hasXOP() && "XOP support required for vector rotates!");
+ assert(Subtarget.hasXOP() && "XOP support required for vector rotates!");
assert((Op.getOpcode() == ISD::ROTL) && "Only ROTL supported");
// XOP has 128-bit vector variable + immediate rotates.
@@ -19363,6 +20540,11 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(X86::COND_O, DL, MVT::i32),
SDValue(Sum.getNode(), 2));
+ if (N->getValueType(1) == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
+ SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+ }
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
}
@@ -19372,10 +20554,15 @@ static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
SDValue SetCC =
- DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
+ DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
DAG.getConstant(Cond, DL, MVT::i32),
SDValue(Sum.getNode(), 1));
+ if (N->getValueType(1) == MVT::i1) {
+ SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
+ DAG.getValueType(MVT::i1));
+ SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+ }
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
@@ -19387,9 +20574,9 @@ bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64)
- return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
+ return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
else if (OpWidth == 128)
- return Subtarget->hasCmpxchg16b();
+ return Subtarget.hasCmpxchg16b();
else
return false;
}
@@ -19409,7 +20596,7 @@ X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
- unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
+ unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
Type *MemType = AI->getType();
// If the operand is too big, we must see if cmpxchg8/16b is available
@@ -19446,16 +20633,9 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
}
}
-static bool hasMFENCE(const X86Subtarget& Subtarget) {
- // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
- // no-sse2). There isn't any reason to disable it if the target processor
- // supports it.
- return Subtarget.hasSSE2() || Subtarget.is64Bit();
-}
-
LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
- unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
+ unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
Type *MemType = AI->getType();
// Accesses larger than the native width are turned into cmpxchg/libcalls, so
// there is no benefit in turning such RMWs into loads, and it is actually
@@ -19483,7 +20663,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
// lowered to just a load without a fence. A mfence flushes the store buffer,
// making the optimization clearly correct.
- // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
+ // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
// otherwise, we might be able to be more aggressive on relaxed idempotent
// rmw. In practice, they do not look useful, so we don't try to be
// especially clever.
@@ -19492,7 +20672,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// the IR level, so we must wrap it in an intrinsic.
return nullptr;
- if (!hasMFENCE(*Subtarget))
+ if (!Subtarget.hasMFence())
// FIXME: it might make sense to use a locked operation here but on a
// different cache-line to prevent cache-line bouncing. In practice it
// is probably a small win, and x86 processors without mfence are rare
@@ -19512,7 +20692,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
return Loaded;
}
-static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
@@ -19522,8 +20702,9 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
- if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
- if (hasMFENCE(*Subtarget))
+ if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
+ FenceScope == CrossThread) {
+ if (Subtarget.hasMFence())
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
SDValue Chain = Op.getOperand(0);
@@ -19545,7 +20726,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
}
-static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT T = Op.getSimpleValueType();
SDLoc DL(Op);
@@ -19557,7 +20738,7 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
case MVT::i16: Reg = X86::AX; size = 2; break;
case MVT::i32: Reg = X86::EAX; size = 4; break;
case MVT::i64:
- assert(Subtarget->is64Bit() && "Node not type legal!");
+ assert(Subtarget.is64Bit() && "Node not type legal!");
Reg = X86::RAX; size = 8;
break;
}
@@ -19587,14 +20768,14 @@ static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
return SDValue();
}
-static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT SrcVT = Op.getOperand(0).getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
SrcVT == MVT::i64) {
- assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
if (DstVT != MVT::f64)
// This conversion needs to be expanded.
return SDValue();
@@ -19614,7 +20795,7 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, Op0,
DAG.getIntPtrConstant(i, dl)));
} else {
- assert(SrcVT == MVT::i64 && !Subtarget->is64Bit() &&
+ assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
"Unexpected source type in LowerBITCAST");
Elts.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op0,
DAG.getIntPtrConstant(0, dl)));
@@ -19627,14 +20808,14 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
Elts.append(NumElts, DAG.getUNDEF(SVT));
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
- SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
+ SDValue BV = DAG.getBuildVector(NewVT, dl, Elts);
SDValue ToV2F64 = DAG.getBitcast(MVT::v2f64, BV);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
DAG.getIntPtrConstant(0, dl));
}
- assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
- Subtarget->hasMMX() && "Unexpected custom BITCAST");
+ assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&
+ Subtarget.hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
"Unexpected custom BITCAST");
@@ -19657,12 +20838,11 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
/// how many bytes of V are summed horizontally to produce each element of the
/// result.
static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
- const X86Subtarget *Subtarget,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc DL(V);
MVT ByteVecVT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
- int NumElts = VT.getVectorNumElements();
assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
"Expected value to have byte element type.");
assert(EltVT != MVT::i8 &&
@@ -19713,16 +20893,15 @@ static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
// i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
// right by 8. It is important to shift as i16s as i8 vector shift isn't
// directly supported.
- SmallVector<SDValue, 16> Shifters(NumElts, DAG.getConstant(8, DL, EltVT));
- SDValue Shifter = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Shifters);
- SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), Shifter);
+ SDValue ShifterV = DAG.getConstant(8, DL, VT);
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
DAG.getBitcast(ByteVecVT, V));
- return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), Shifter);
+ return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
}
-static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, SDLoc DL,
- const X86Subtarget *Subtarget,
+static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
@@ -19750,17 +20929,14 @@ static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, SDLoc DL,
int NumByteElts = VecSize / 8;
MVT ByteVecVT = MVT::getVectorVT(MVT::i8, NumByteElts);
SDValue In = DAG.getBitcast(ByteVecVT, Op);
- SmallVector<SDValue, 16> LUTVec;
+ SmallVector<SDValue, 64> LUTVec;
for (int i = 0; i < NumByteElts; ++i)
LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
- SDValue InRegLUT = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, LUTVec);
- SmallVector<SDValue, 16> Mask0F(NumByteElts,
- DAG.getConstant(0x0F, DL, MVT::i8));
- SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, Mask0F);
+ SDValue InRegLUT = DAG.getBuildVector(ByteVecVT, DL, LUTVec);
+ SDValue M0F = DAG.getConstant(0x0F, DL, ByteVecVT);
// High nibbles
- SmallVector<SDValue, 16> Four(NumByteElts, DAG.getConstant(4, DL, MVT::i8));
- SDValue FourV = DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVecVT, Four);
+ SDValue FourV = DAG.getConstant(4, DL, ByteVecVT);
SDValue HighNibbles = DAG.getNode(ISD::SRL, DL, ByteVecVT, In, FourV);
// Low nibbles
@@ -19781,8 +20957,8 @@ static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, SDLoc DL,
return LowerHorizontalByteSum(PopCnt, VT, Subtarget, DAG);
}
-static SDValue LowerVectorCTPOPBitmath(SDValue Op, SDLoc DL,
- const X86Subtarget *Subtarget,
+static SDValue LowerVectorCTPOPBitmath(SDValue Op, const SDLoc &DL,
+ const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is128BitVector() &&
@@ -19801,19 +20977,13 @@ static SDValue LowerVectorCTPOPBitmath(SDValue Op, SDLoc DL,
auto GetShift = [&](unsigned OpCode, SDValue V, int Shifter) {
MVT VT = V.getSimpleValueType();
- SmallVector<SDValue, 32> Shifters(
- VT.getVectorNumElements(),
- DAG.getConstant(Shifter, DL, VT.getVectorElementType()));
- return DAG.getNode(OpCode, DL, VT, V,
- DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Shifters));
+ SDValue ShifterV = DAG.getConstant(Shifter, DL, VT);
+ return DAG.getNode(OpCode, DL, VT, V, ShifterV);
};
auto GetMask = [&](SDValue V, APInt Mask) {
MVT VT = V.getSimpleValueType();
- SmallVector<SDValue, 32> Masks(
- VT.getVectorNumElements(),
- DAG.getConstant(Mask, DL, VT.getVectorElementType()));
- return DAG.getNode(ISD::AND, DL, VT, V,
- DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Masks));
+ SDValue MaskV = DAG.getConstant(Mask, DL, VT);
+ return DAG.getNode(ISD::AND, DL, VT, V, MaskV);
};
// We don't want to incur the implicit masks required to SRL vNi8 vectors on
@@ -19852,27 +21022,38 @@ static SDValue LowerVectorCTPOPBitmath(SDValue Op, SDLoc DL,
DAG);
}
-static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- // FIXME: Need to add AVX-512 support here!
- assert((VT.is256BitVector() || VT.is128BitVector()) &&
+ assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
"Unknown CTPOP type to handle");
SDLoc DL(Op.getNode());
SDValue Op0 = Op.getOperand(0);
- if (!Subtarget->hasSSSE3()) {
+ if (!Subtarget.hasSSSE3()) {
// We can't use the fast LUT approach, so fall back on vectorized bitmath.
assert(VT.is128BitVector() && "Only 128-bit vectors supported in SSE!");
return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
}
- if (VT.is256BitVector() && !Subtarget->hasInt256()) {
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
unsigned NumElems = VT.getVectorNumElements();
// Extract each 128-bit vector, compute pop count and concat the result.
- SDValue LHS = Extract128BitVector(Op0, 0, DAG, DL);
- SDValue RHS = Extract128BitVector(Op0, NumElems/2, DAG, DL);
+ SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
+ SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
+ LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
+ LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
+ }
+
+ if (VT.is512BitVector() && !Subtarget.hasBWI()) {
+ unsigned NumElems = VT.getVectorNumElements();
+
+ // Extract each 256-bit vector, compute pop count and concat the result.
+ SDValue LHS = extract256BitVector(Op0, 0, DAG, DL);
+ SDValue RHS = extract256BitVector(Op0, NumElems / 2, DAG, DL);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
@@ -19882,26 +21063,184 @@ static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget *Subtarget,
return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
}
-static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getSimpleValueType().isVector() &&
"We only do custom lowering for vector population count.");
return LowerVectorCTPOP(Op, Subtarget, DAG);
}
-static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
- SDNode *Node = Op.getNode();
- SDLoc dl(Node);
- EVT T = Node->getValueType(0);
- SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
- DAG.getConstant(0, dl, T), Node->getOperand(2));
- return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
- cast<AtomicSDNode>(Node)->getMemoryVT(),
- Node->getOperand(0),
- Node->getOperand(1), negOp,
- cast<AtomicSDNode>(Node)->getMemOperand(),
- cast<AtomicSDNode>(Node)->getOrdering(),
- cast<AtomicSDNode>(Node)->getSynchScope());
+static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+ SDValue In = Op.getOperand(0);
+ SDLoc DL(Op);
+
+ // For scalars, its still beneficial to transfer to/from the SIMD unit to
+ // perform the BITREVERSE.
+ if (!VT.isVector()) {
+ MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
+ SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
+ Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
+ DAG.getIntPtrConstant(0, DL));
+ }
+
+ MVT SVT = VT.getVectorElementType();
+ int NumElts = VT.getVectorNumElements();
+ int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
+
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector()) {
+ SDValue Lo = extract128BitVector(In, 0, DAG, DL);
+ SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
+
+ MVT HalfVT = MVT::getVectorVT(SVT, NumElts / 2);
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
+ DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo),
+ DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi));
+ }
+
+ assert(VT.is128BitVector() &&
+ "Only 128-bit vector bitreverse lowering supported.");
+
+ // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
+ // perform the BSWAP in the shuffle.
+ // Its best to shuffle using the second operand as this will implicitly allow
+ // memory folding for multiple vectors.
+ SmallVector<SDValue, 16> MaskElts;
+ for (int i = 0; i != NumElts; ++i) {
+ for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
+ int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
+ int PermuteByte = SourceByte | (2 << 5);
+ MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
+ }
+ }
+
+ SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
+ SDValue Res = DAG.getBitcast(MVT::v16i8, In);
+ Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
+ Res, Mask);
+ return DAG.getBitcast(VT, Res);
+}
+
+static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ if (Subtarget.hasXOP())
+ return LowerBITREVERSE_XOP(Op, DAG);
+
+ assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
+
+ MVT VT = Op.getSimpleValueType();
+ SDValue In = Op.getOperand(0);
+ SDLoc DL(Op);
+
+ unsigned NumElts = VT.getVectorNumElements();
+ assert(VT.getScalarType() == MVT::i8 &&
+ "Only byte vector BITREVERSE supported");
+
+ // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+ MVT HalfVT = MVT::getVectorVT(MVT::i8, NumElts / 2);
+ SDValue Lo = extract128BitVector(In, 0, DAG, DL);
+ SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
+ Lo = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo);
+ Hi = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi);
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
+ }
+
+ // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
+ // two nibbles and a PSHUFB lookup to find the bitreverse of each
+ // 0-15 value (moved to the other nibble).
+ SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
+ SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
+ SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
+
+ const int LoLUT[16] = {
+ /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
+ /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
+ /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
+ /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
+ const int HiLUT[16] = {
+ /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
+ /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
+ /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
+ /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
+
+ SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
+ HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
+ }
+
+ SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
+ SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
+ Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
+ Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
+ return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
+}
+
+static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG) {
+ unsigned NewOpc = 0;
+ switch (N->getOpcode()) {
+ case ISD::ATOMIC_LOAD_ADD:
+ NewOpc = X86ISD::LADD;
+ break;
+ case ISD::ATOMIC_LOAD_SUB:
+ NewOpc = X86ISD::LSUB;
+ break;
+ case ISD::ATOMIC_LOAD_OR:
+ NewOpc = X86ISD::LOR;
+ break;
+ case ISD::ATOMIC_LOAD_XOR:
+ NewOpc = X86ISD::LXOR;
+ break;
+ case ISD::ATOMIC_LOAD_AND:
+ NewOpc = X86ISD::LAND;
+ break;
+ default:
+ llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
+ }
+
+ MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
+ return DAG.getMemIntrinsicNode(
+ NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
+ {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
+ /*MemVT=*/N->getSimpleValueType(0), MMO);
+}
+
+/// Lower atomic_load_ops into LOCK-prefixed operations.
+static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ SDValue Chain = N->getOperand(0);
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ unsigned Opc = N->getOpcode();
+ MVT VT = N->getSimpleValueType(0);
+ SDLoc DL(N);
+
+ // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
+ // can only be lowered when the result is unused. They should have already
+ // been transformed into a cmpxchg loop in AtomicExpand.
+ if (N->hasAnyUseOfValue(0)) {
+ // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
+ // select LXADD if LOCK_SUB can't be selected.
+ if (Opc == ISD::ATOMIC_LOAD_SUB) {
+ AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
+ RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
+ return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
+ RHS, AN->getMemOperand(), AN->getOrdering(),
+ AN->getSynchScope());
+ }
+ assert(Opc == ISD::ATOMIC_LOAD_ADD &&
+ "Used AtomicRMW ops other than Add should have been expanded!");
+ return N;
+ }
+
+ SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG);
+ // RAUW the chain, but don't worry about the result, as it's unused.
+ assert(!N->hasAnyUseOfValue(0));
+ DAG.ReplaceAllUsesOfValueWith(N.getValue(1), LockOp.getValue(1));
+ return SDValue();
}
static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
@@ -19914,7 +21253,8 @@ static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
// FIXME: On 32-bit, store -> fist or movq would be more efficient
// (The only way to get a 16-byte store is cmpxchg16b)
// FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
- if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
+ if (cast<AtomicSDNode>(Node)->getOrdering() ==
+ AtomicOrdering::SequentiallyConsistent ||
!DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
cast<AtomicSDNode>(Node)->getMemoryVT(),
@@ -19955,9 +21295,9 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
Op.getOperand(1), Op.getOperand(2));
}
-static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
+ assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
// For MacOSX, we want to call an alternative entry point: __sincos_stret,
// which returns the values as { float, float } (in XMM0) or
@@ -19991,7 +21331,7 @@ static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
- .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
+ .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
@@ -20051,7 +21391,7 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
DAG.getUNDEF(EltVT);
for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
Ops.push_back(FillVal);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Ops);
+ return DAG.getBuildVector(NVT, dl, Ops);
}
SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
DAG.getUNDEF(NVT);
@@ -20059,9 +21399,9 @@ static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
InOp, DAG.getIntPtrConstant(0, dl));
}
-static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(Subtarget->hasAVX512() &&
+ assert(Subtarget.hasAVX512() &&
"MGATHER/MSCATTER are supported on AVX-512 arch only");
// X86 scatter kills mask register, so its type should be added to
@@ -20110,7 +21450,7 @@ static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget *Subtarget,
}
unsigned NumElts = VT.getVectorNumElements();
- if (!Subtarget->hasVLX() && !VT.is512BitVector() &&
+ if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
!Index.getSimpleValueType().is512BitVector()) {
// AVX512F supports only 512-bit vectors. Or data or index should
// be 512 bit wide. If now the both index and data are 256-bit, but
@@ -20150,68 +21490,78 @@ static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget *Subtarget,
NewScatter = DAG.getMaskedScatter(VTs, N->getMemoryVT(), dl, Ops,
N->getMemOperand());
DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
- return SDValue(NewScatter.getNode(), 0);
+ return SDValue(NewScatter.getNode(), 1);
}
-static SDValue LowerMLOAD(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
MVT VT = Op.getSimpleValueType();
+ MVT ScalarVT = VT.getScalarType();
SDValue Mask = N->getMask();
SDLoc dl(Op);
- if (Subtarget->hasAVX512() && !Subtarget->hasVLX() &&
- !VT.is512BitVector() && Mask.getValueType() == MVT::v8i1) {
- // This operation is legal for targets with VLX, but without
- // VLX the vector should be widened to 512 bit
- unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
- MVT WideDataVT = MVT::getVectorVT(VT.getScalarType(), NumEltsInWideVec);
- MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
- SDValue Src0 = N->getSrc0();
- Src0 = ExtendToType(Src0, WideDataVT, DAG);
- Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
- SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
- N->getBasePtr(), Mask, Src0,
- N->getMemoryVT(), N->getMemOperand(),
- N->getExtensionType());
-
- SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
- NewLoad.getValue(0),
- DAG.getIntPtrConstant(0, dl));
- SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
- return DAG.getMergeValues(RetOps, dl);
- }
- return Op;
+ assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
+ "Cannot lower masked load op.");
+
+ assert(((ScalarVT == MVT::i32 || ScalarVT == MVT::f32) ||
+ (Subtarget.hasBWI() &&
+ (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
+ "Unsupported masked load op.");
+
+ // This operation is legal for targets with VLX, but without
+ // VLX the vector should be widened to 512 bit
+ unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
+ MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
+ MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
+ SDValue Src0 = N->getSrc0();
+ Src0 = ExtendToType(Src0, WideDataVT, DAG);
+ Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
+ SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
+ N->getBasePtr(), Mask, Src0,
+ N->getMemoryVT(), N->getMemOperand(),
+ N->getExtensionType());
+
+ SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
+ NewLoad.getValue(0),
+ DAG.getIntPtrConstant(0, dl));
+ SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
+ return DAG.getMergeValues(RetOps, dl);
}
-static SDValue LowerMSTORE(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
SDValue DataToStore = N->getValue();
MVT VT = DataToStore.getSimpleValueType();
+ MVT ScalarVT = VT.getScalarType();
SDValue Mask = N->getMask();
SDLoc dl(Op);
- if (Subtarget->hasAVX512() && !Subtarget->hasVLX() &&
- !VT.is512BitVector() && Mask.getValueType() == MVT::v8i1) {
- // This operation is legal for targets with VLX, but without
- // VLX the vector should be widened to 512 bit
- unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
- MVT WideDataVT = MVT::getVectorVT(VT.getScalarType(), NumEltsInWideVec);
- MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
- DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
- Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
- return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
- Mask, N->getMemoryVT(), N->getMemOperand(),
- N->isTruncatingStore());
- }
- return Op;
+ assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
+ "Cannot lower masked store op.");
+
+ assert(((ScalarVT == MVT::i32 || ScalarVT == MVT::f32) ||
+ (Subtarget.hasBWI() &&
+ (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
+ "Unsupported masked store op.");
+
+ // This operation is legal for targets with VLX, but without
+ // VLX the vector should be widened to 512 bit
+ unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
+ MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
+ MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
+ DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
+ Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
+ return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
+ Mask, N->getMemoryVT(), N->getMemOperand(),
+ N->isTruncatingStore());
}
-static SDValue LowerMGATHER(SDValue Op, const X86Subtarget *Subtarget,
+static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
- assert(Subtarget->hasAVX512() &&
+ assert(Subtarget.hasAVX512() &&
"MGATHER/MSCATTER are supported on AVX-512 arch only");
MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
@@ -20226,7 +21576,7 @@ static SDValue LowerMGATHER(SDValue Op, const X86Subtarget *Subtarget,
unsigned NumElts = VT.getVectorNumElements();
assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
- if (!Subtarget->hasVLX() && !VT.is512BitVector() &&
+ if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
!Index.getSimpleValueType().is512BitVector()) {
// AVX512F supports only 512-bit vectors. Or data or index should
// be 512 bit wide. If now the both index and data are 256-bit, but
@@ -20314,8 +21664,7 @@ SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
return NOOP;
}
-/// LowerOperation - Provide custom lowering hooks for some operations.
-///
+/// Provide custom lowering hooks for some operations.
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
@@ -20323,8 +21672,13 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
return LowerCMP_SWAP(Op, Subtarget, DAG);
case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
- case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
- case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
+ case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
+ case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
@@ -20377,14 +21731,18 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
+ case ISD::EH_SJLJ_SETUP_DISPATCH:
+ return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
- case ISD::CTLZ: return LowerCTLZ(Op, Subtarget, DAG);
- case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, Subtarget, DAG);
+ case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG);
case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
+ case ISD::MULHS:
+ case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
case ISD::UMUL_LOHI:
case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
case ISD::ROTL: return LowerRotate(Op, Subtarget, DAG);
@@ -20417,11 +21775,34 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::GC_TRANSITION_START:
return LowerGC_TRANSITION_START(Op, DAG);
case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
+ case ISD::STORE: return LowerTruncatingStore(Op, Subtarget, DAG);
}
}
-/// ReplaceNodeResults - Replace a node with an illegal result type
-/// with a new node built out of custom code.
+/// Places new result values for the node in Results (their number
+/// and types must exactly match those of the original return values of
+/// the node), or leaves Results empty, which indicates that the node is not
+/// to be custom lowered after all.
+void X86TargetLowering::LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const {
+ SDValue Res = LowerOperation(SDValue(N, 0), DAG);
+
+ if (!Res.getNode())
+ return;
+
+ assert((N->getNumValues() <= Res->getNumValues()) &&
+ "Lowering returned the wrong number of results!");
+
+ // Places new result values base on N result number.
+ // In some cases (LowerSINT_TO_FP for example) Res has more result values
+ // than original node, chain should be dropped(last value).
+ for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
+ Results.push_back(Res.getValue(I));
+}
+
+/// Replace a node with an illegal result type with a new node built out of
+/// custom code.
void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const {
@@ -20432,15 +21813,15 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
llvm_unreachable("Do not know how to custom type legalize this operation!");
case X86ISD::AVG: {
// Legalize types for X86ISD::AVG by expanding vectors.
- assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
auto InVT = N->getValueType(0);
auto InVTSize = InVT.getSizeInBits();
const unsigned RegSize =
(InVTSize > 128) ? ((InVTSize > 256) ? 512 : 256) : 128;
- assert((!Subtarget->hasAVX512() || RegSize < 512) &&
+ assert((!Subtarget.hasAVX512() || RegSize < 512) &&
"512-bit vector requires AVX512");
- assert((!Subtarget->hasAVX2() || RegSize < 256) &&
+ assert((!Subtarget.hasAVX2() || RegSize < 256) &&
"256-bit vector requires AVX2");
auto ElemVT = InVT.getVectorElementType();
@@ -20503,24 +21884,22 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
EVT VT = N->getValueType(0);
// Return a load from the stack slot.
if (StackSlot.getNode())
- Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
- MachinePointerInfo(),
- false, false, false, 0));
+ Results.push_back(
+ DAG.getLoad(VT, dl, FIST, StackSlot, MachinePointerInfo()));
else
Results.push_back(FIST);
}
return;
}
case ISD::UINT_TO_FP: {
- assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
if (N->getOperand(0).getValueType() != MVT::v2i32 ||
N->getValueType(0) != MVT::v2f32)
return;
SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
N->getOperand(0));
- SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
- MVT::f64);
- SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
+ SDValue VBias =
+ DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
DAG.getBitcast(MVT::v2i64, VBias));
Or = DAG.getBitcast(MVT::v2f64, Or);
@@ -20588,20 +21967,49 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
DAG.getConstant(0, dl, HalfT));
swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
DAG.getConstant(1, dl, HalfT));
- swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
- Regs64bit ? X86::RBX : X86::EBX,
- swapInL, cpInH.getValue(1));
- swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
- Regs64bit ? X86::RCX : X86::ECX,
- swapInH, swapInL.getValue(1));
- SDValue Ops[] = { swapInH.getValue(0),
- N->getOperand(1),
- swapInH.getValue(1) };
+ swapInH =
+ DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
+ swapInH, cpInH.getValue(1));
+ // If the current function needs the base pointer, RBX,
+ // we shouldn't use cmpxchg directly.
+ // Indeed the lowering of that instruction will clobber
+ // that register and since RBX will be a reserved register
+ // the register allocator will not make sure its value will
+ // be properly saved and restored around this live-range.
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
+ SDValue Result;
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
+ unsigned BasePtr = TRI->getBaseRegister();
MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
- unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
- X86ISD::LCMPXCHG8_DAG;
- SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
+ if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
+ (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
+ // ISel prefers the LCMPXCHG64 variant.
+ // If that assert breaks, that means it is not the case anymore,
+ // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
+ // not just EBX. This is a matter of accepting i64 input for that
+ // pseudo, and restoring into the register of the right wide
+ // in expand pseudo. Everything else should just work.
+ assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
+ "Saving only half of the RBX");
+ unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
+ : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
+ SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
+ Regs64bit ? X86::RBX : X86::EBX,
+ HalfT, swapInH.getValue(1));
+ SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
+ RBXSave,
+ /*Glue*/ RBXSave.getValue(2)};
+ Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
+ } else {
+ unsigned Opcode =
+ Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
+ swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
+ Regs64bit ? X86::RBX : X86::EBX, swapInL,
+ swapInH.getValue(1));
+ SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
+ swapInL.getValue(1)};
+ Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
+ }
SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
Regs64bit ? X86::RAX : X86::EAX,
HalfT, Result.getValue(1));
@@ -20639,7 +22047,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
break;
}
case ISD::BITCAST: {
- assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
+ assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
EVT DstVT = N->getValueType(0);
EVT SrcVT = N->getOperand(0)->getValueType(0);
@@ -20666,7 +22074,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
ToVecInt, DAG.getIntPtrConstant(i, dl)));
- Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
+ Results.push_back(DAG.getBuildVector(DstVT, dl, Elts));
}
}
}
@@ -20703,7 +22111,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::SETCC: return "X86ISD::SETCC";
case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
case X86ISD::FSETCC: return "X86ISD::FSETCC";
- case X86ISD::FGETSIGNx86: return "X86ISD::FGETSIGNx86";
case X86ISD::CMOV: return "X86ISD::CMOV";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
@@ -20724,7 +22131,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::MMX_PINSRW: return "X86ISD::MMX_PINSRW";
case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
case X86ISD::ANDNP: return "X86ISD::ANDNP";
- case X86ISD::PSIGN: return "X86ISD::PSIGN";
case X86ISD::BLENDI: return "X86ISD::BLENDI";
case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
case X86ISD::ADDUS: return "X86ISD::ADDUS";
@@ -20742,7 +22148,9 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FMAXC: return "X86ISD::FMAXC";
case X86ISD::FMINC: return "X86ISD::FMINC";
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
+ case X86ISD::FRSQRTS: return "X86ISD::FRSQRTS";
case X86ISD::FRCP: return "X86ISD::FRCP";
+ case X86ISD::FRCPS: return "X86ISD::FRCPS";
case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
@@ -20750,6 +22158,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
+ case X86ISD::EH_SJLJ_SETUP_DISPATCH:
+ return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
@@ -20757,6 +22167,15 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
+ case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
+ return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
+ case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
+ return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
+ case X86ISD::LADD: return "X86ISD::LADD";
+ case X86ISD::LSUB: return "X86ISD::LSUB";
+ case X86ISD::LOR: return "X86ISD::LOR";
+ case X86ISD::LXOR: return "X86ISD::LXOR";
+ case X86ISD::LAND: return "X86ISD::LAND";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
case X86ISD::VZEXT: return "X86ISD::VZEXT";
@@ -20778,8 +22197,10 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VSHLI: return "X86ISD::VSHLI";
case X86ISD::VSRLI: return "X86ISD::VSRLI";
case X86ISD::VSRAI: return "X86ISD::VSRAI";
+ case X86ISD::VSRAV: return "X86ISD::VSRAV";
case X86ISD::VROTLI: return "X86ISD::VROTLI";
case X86ISD::VROTRI: return "X86ISD::VROTRI";
+ case X86ISD::VPPERM: return "X86ISD::VPPERM";
case X86ISD::CMPP: return "X86ISD::CMPP";
case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
@@ -20802,6 +22223,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::AND: return "X86ISD::AND";
case X86ISD::BEXTR: return "X86ISD::BEXTR";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
+ case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::TESTP: return "X86ISD::TESTP";
case X86ISD::TESTM: return "X86ISD::TESTM";
@@ -20842,6 +22264,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VPERMI: return "X86ISD::VPERMI";
case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
+ case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
case X86ISD::VRANGE: return "X86ISD::VRANGE";
case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
@@ -20852,8 +22275,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
case X86ISD::MFENCE: return "X86ISD::MFENCE";
- case X86ISD::SFENCE: return "X86ISD::SFENCE";
- case X86ISD::LFENCE: return "X86ISD::LFENCE";
case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
case X86ISD::SAHF: return "X86ISD::SAHF";
case X86ISD::RDRAND: return "X86ISD::RDRAND";
@@ -20866,6 +22287,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VPSHL: return "X86ISD::VPSHL";
case X86ISD::VPCOM: return "X86ISD::VPCOM";
case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
+ case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
case X86ISD::FMADD: return "X86ISD::FMADD";
case X86ISD::FMSUB: return "X86ISD::FMSUB";
case X86ISD::FNMADD: return "X86ISD::FNMADD";
@@ -20878,6 +22300,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
+ case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
+ case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
@@ -20898,6 +22322,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND";
case X86ISD::SCALEF: return "X86ISD::SCALEF";
+ case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
case X86ISD::ADDS: return "X86ISD::ADDS";
case X86ISD::SUBS: return "X86ISD::SUBS";
case X86ISD::AVG: return "X86ISD::AVG";
@@ -20908,26 +22333,27 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FP_TO_UINT_RND: return "X86ISD::FP_TO_UINT_RND";
case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
+ case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
+ case X86ISD::SCALAR_FP_TO_SINT_RND: return "X86ISD::SCALAR_FP_TO_SINT_RND";
+ case X86ISD::SCALAR_FP_TO_UINT_RND: return "X86ISD::SCALAR_FP_TO_UINT_RND";
}
return nullptr;
}
-// isLegalAddressingMode - Return true if the addressing mode represented
-// by AM is legal for this target, for a load/store of the specified type.
+/// Return true if the addressing mode represented by AM is legal for this
+/// target, for a load/store of the specified type.
bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS) const {
// X86 supports extremely general addressing modes.
CodeModel::Model M = getTargetMachine().getCodeModel();
- Reloc::Model R = getTargetMachine().getRelocationModel();
// X86 allows a sign-extended 32-bit immediate field as a displacement.
if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
return false;
if (AM.BaseGV) {
- unsigned GVFlags =
- Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
+ unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
// If a reference to this global requires an extra load, we can't fold it.
if (isGlobalStubReference(GVFlags))
@@ -20939,8 +22365,8 @@ bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
return false;
// If lower 4G is not available, then we must use rip-relative addressing.
- if ((M != CodeModel::Small || R != Reloc::Static) &&
- Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
+ if ((M != CodeModel::Small || isPositionIndependent()) &&
+ Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
return false;
}
@@ -20977,7 +22403,7 @@ bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
// On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
// variable shifts just as cheap as scalar ones.
- if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
+ if (Subtarget.hasInt256() && (Bits == 32 || Bits == 64))
return false;
// Otherwise, it's significantly cheaper to shift by a scalar amount than by a
@@ -21026,12 +22452,12 @@ bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
- return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
+ return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
}
bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
- return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
+ return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
}
bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
@@ -21062,7 +22488,7 @@ bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
bool
X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
- if (!Subtarget->hasAnyFMA())
+ if (!Subtarget.hasAnyFMA())
return false;
VT = VT.getScalarType();
@@ -21086,8 +22512,8 @@ bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
return !(VT1 == MVT::i32 && VT2 == MVT::i16);
}
-/// isShuffleMaskLegal - Targets can use this to indicate that they only
-/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
+/// Targets can use this to indicate that they only support *some*
+/// VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool
@@ -21121,9 +22547,9 @@ X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
//===----------------------------------------------------------------------===//
/// Utility function to emit xbegin specifying the start of an RTM region.
-static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
+static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
const TargetInstrInfo *TII) {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
@@ -21167,21 +22593,21 @@ static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
// sinkMBB:
// EAX is live into the sinkMBB
sinkMBB->addLiveIn(X86::EAX);
- BuildMI(*sinkMBB, sinkMBB->begin(), DL,
- TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::EAX);
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(TargetOpcode::COPY),
+ MI.getOperand(0).getReg())
+ .addReg(X86::EAX);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return sinkMBB;
}
// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
// or XMM0_V32I8 in AVX all of this code can be replaced with that
// in the .td file.
-static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
+static MachineBasicBlock *emitPCMPSTRM(MachineInstr &MI, MachineBasicBlock *BB,
const TargetInstrInfo *TII) {
unsigned Opc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("illegal opcode!");
case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
@@ -21193,32 +22619,31 @@ static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
}
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
- unsigned NumArgs = MI->getNumOperands();
+ unsigned NumArgs = MI.getNumOperands();
for (unsigned i = 1; i < NumArgs; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ MachineOperand &Op = MI.getOperand(i);
if (!(Op.isReg() && Op.isImplicit()))
MIB.addOperand(Op);
}
- if (MI->hasOneMemOperand())
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ if (MI.hasOneMemOperand())
+ MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- BuildMI(*BB, MI, dl,
- TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::XMM0);
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
+ .addReg(X86::XMM0);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return BB;
}
// FIXME: Custom handling because TableGen doesn't support multiple implicit
// defs in an instruction pattern
-static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
+static MachineBasicBlock *emitPCMPSTRI(MachineInstr &MI, MachineBasicBlock *BB,
const TargetInstrInfo *TII) {
unsigned Opc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("illegal opcode!");
case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
@@ -21230,93 +22655,90 @@ static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
}
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = MI.getDebugLoc();
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
- unsigned NumArgs = MI->getNumOperands(); // remove the results
+ unsigned NumArgs = MI.getNumOperands(); // remove the results
for (unsigned i = 1; i < NumArgs; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ MachineOperand &Op = MI.getOperand(i);
if (!(Op.isReg() && Op.isImplicit()))
MIB.addOperand(Op);
}
- if (MI->hasOneMemOperand())
- MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ if (MI.hasOneMemOperand())
+ MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- BuildMI(*BB, MI, dl,
- TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::ECX);
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
+ .addReg(X86::ECX);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return BB;
}
-static MachineBasicBlock *EmitWRPKRU(MachineInstr *MI, MachineBasicBlock *BB,
- const X86Subtarget *Subtarget) {
- DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+static MachineBasicBlock *emitWRPKRU(MachineInstr &MI, MachineBasicBlock *BB,
+ const X86Subtarget &Subtarget) {
+ DebugLoc dl = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// insert input VAL into EAX
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
- .addReg(MI->getOperand(0).getReg());
+ .addReg(MI.getOperand(0).getReg());
// insert zero to ECX
- BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::ECX)
- .addReg(X86::ECX)
- .addReg(X86::ECX);
+ BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
+
// insert zero to EDX
- BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::EDX)
- .addReg(X86::EDX)
- .addReg(X86::EDX);
+ BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::EDX);
+
// insert WRPKRU instruction
BuildMI(*BB, MI, dl, TII->get(X86::WRPKRUr));
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
-static MachineBasicBlock *EmitRDPKRU(MachineInstr *MI, MachineBasicBlock *BB,
- const X86Subtarget *Subtarget) {
- DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+static MachineBasicBlock *emitRDPKRU(MachineInstr &MI, MachineBasicBlock *BB,
+ const X86Subtarget &Subtarget) {
+ DebugLoc dl = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// insert zero to ECX
- BuildMI(*BB, MI, dl, TII->get(X86::XOR32rr), X86::ECX)
- .addReg(X86::ECX)
- .addReg(X86::ECX);
+ BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
+
// insert RDPKRU instruction
BuildMI(*BB, MI, dl, TII->get(X86::RDPKRUr));
- BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
- .addReg(X86::EAX);
+ BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
+ .addReg(X86::EAX);
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
-static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
- const X86Subtarget *Subtarget) {
- DebugLoc dl = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB,
+ const X86Subtarget &Subtarget,
+ unsigned Opc) {
+ DebugLoc dl = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// Address into RAX/EAX, other two args into ECX, EDX.
- unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
- unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
+ unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
for (int i = 0; i < X86::AddrNumOperands; ++i)
- MIB.addOperand(MI->getOperand(i));
+ MIB.addOperand(MI.getOperand(i));
unsigned ValOps = X86::AddrNumOperands;
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
- .addReg(MI->getOperand(ValOps).getReg());
+ .addReg(MI.getOperand(ValOps).getReg());
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
- .addReg(MI->getOperand(ValOps+1).getReg());
+ .addReg(MI.getOperand(ValOps + 1).getReg());
// The instruction doesn't actually take any operands though.
- BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
+ BuildMI(*BB, MI, dl, TII->get(Opc));
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
MachineBasicBlock *
-X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
+X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const {
// Emit va_arg instruction on X86-64.
@@ -21328,31 +22750,31 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
// 8 ) Align : Alignment of type
// 9 ) EFLAGS (implicit-def)
- assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
+ assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
static_assert(X86::AddrNumOperands == 5,
"VAARG_64 assumes 5 address operands");
- unsigned DestReg = MI->getOperand(0).getReg();
- MachineOperand &Base = MI->getOperand(1);
- MachineOperand &Scale = MI->getOperand(2);
- MachineOperand &Index = MI->getOperand(3);
- MachineOperand &Disp = MI->getOperand(4);
- MachineOperand &Segment = MI->getOperand(5);
- unsigned ArgSize = MI->getOperand(6).getImm();
- unsigned ArgMode = MI->getOperand(7).getImm();
- unsigned Align = MI->getOperand(8).getImm();
+ unsigned DestReg = MI.getOperand(0).getReg();
+ MachineOperand &Base = MI.getOperand(1);
+ MachineOperand &Scale = MI.getOperand(2);
+ MachineOperand &Index = MI.getOperand(3);
+ MachineOperand &Disp = MI.getOperand(4);
+ MachineOperand &Segment = MI.getOperand(5);
+ unsigned ArgSize = MI.getOperand(6).getImm();
+ unsigned ArgMode = MI.getOperand(7).getImm();
+ unsigned Align = MI.getOperand(8).getImm();
// Memory Reference
- assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+ assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
+ MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
// Machine Information
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
// struct va_list {
// i32 gp_offset
@@ -21521,7 +22943,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
// to OverflowDestReg.
if (NeedsAlign) {
// Align the overflow address
- assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
+ assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
// aligned_addr = (addr + (align-1)) & ~(align-1)
@@ -21563,15 +22985,13 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
}
// Erase the pseudo instruction
- MI->eraseFromParent();
+ MI.eraseFromParent();
return endMBB;
}
-MachineBasicBlock *
-X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
- MachineInstr *MI,
- MachineBasicBlock *MBB) const {
+MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
+ MachineInstr &MI, MachineBasicBlock *MBB) const {
// Emit code to save XMM registers to the stack. The ABI says that the
// number of registers to save is given in %al, so it's theoretically
// possible to do an indirect jump trick to avoid saving all of them,
@@ -21602,14 +23022,14 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
XMMSaveMBB->addSuccessor(EndMBB);
// Now add the instructions.
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
- unsigned CountReg = MI->getOperand(0).getReg();
- int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
- int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
+ unsigned CountReg = MI.getOperand(0).getReg();
+ int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
+ int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
- if (!Subtarget->isCallingConvWin64(F->getFunction()->getCallingConv())) {
+ if (!Subtarget.isCallingConvWin64(F->getFunction()->getCallingConv())) {
// If %al is 0, branch around the XMM save block.
BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
@@ -21618,29 +23038,29 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
// Make sure the last operand is EFLAGS, which gets clobbered by the branch
// that was just emitted, but clearly shouldn't be "saved".
- assert((MI->getNumOperands() <= 3 ||
- !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
- MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
- && "Expected last argument to be EFLAGS");
- unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
+ assert((MI.getNumOperands() <= 3 ||
+ !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
+ MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
+ "Expected last argument to be EFLAGS");
+ unsigned MOVOpc = Subtarget.hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
// In the XMM save block, save all the XMM argument registers.
- for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
+ for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
MachineMemOperand *MMO = F->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
MachineMemOperand::MOStore,
/*Size=*/16, /*Align=*/16);
BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
- .addFrameIndex(RegSaveFrameIndex)
- .addImm(/*Scale=*/1)
- .addReg(/*IndexReg=*/0)
- .addImm(/*Disp=*/Offset)
- .addReg(/*Segment=*/0)
- .addReg(MI->getOperand(i).getReg())
- .addMemOperand(MMO);
+ .addFrameIndex(RegSaveFrameIndex)
+ .addImm(/*Scale=*/1)
+ .addReg(/*IndexReg=*/0)
+ .addImm(/*Disp=*/Offset)
+ .addReg(/*Segment=*/0)
+ .addReg(MI.getOperand(i).getReg())
+ .addMemOperand(MMO);
}
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return EndMBB;
}
@@ -21684,8 +23104,8 @@ static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
// together with other CMOV pseudo-opcodes into a single basic-block with
// conditional jump around it.
-static bool isCMOVPseudo(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static bool isCMOVPseudo(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_GR8:
@@ -21715,10 +23135,10 @@ static bool isCMOVPseudo(MachineInstr *MI) {
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
+X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
@@ -21837,8 +23257,8 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// retq
//
MachineInstr *CascadedCMOV = nullptr;
- MachineInstr *LastCMOV = MI;
- X86::CondCode CC = X86::CondCode(MI->getOperand(3).getImm());
+ MachineInstr *LastCMOV = &MI;
+ X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
MachineBasicBlock::iterator NextMIIt =
std::next(MachineBasicBlock::iterator(MI));
@@ -21849,8 +23269,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
if (isCMOVPseudo(MI)) {
// See if we have a string of CMOVS with the same condition.
- while (NextMIIt != BB->end() &&
- isCMOVPseudo(NextMIIt) &&
+ while (NextMIIt != BB->end() && isCMOVPseudo(*NextMIIt) &&
(NextMIIt->getOperand(3).getImm() == CC ||
NextMIIt->getOperand(3).getImm() == OppCC)) {
LastCMOV = &*NextMIIt;
@@ -21860,10 +23279,10 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// This checks for case 2, but only do this if we didn't already find
// case 1, as indicated by LastCMOV == MI.
- if (LastCMOV == MI &&
- NextMIIt != BB->end() && NextMIIt->getOpcode() == MI->getOpcode() &&
- NextMIIt->getOperand(2).getReg() == MI->getOperand(2).getReg() &&
- NextMIIt->getOperand(1).getReg() == MI->getOperand(0).getReg() &&
+ if (LastCMOV == &MI && NextMIIt != BB->end() &&
+ NextMIIt->getOpcode() == MI.getOpcode() &&
+ NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
+ NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
NextMIIt->getOperand(1).isKill()) {
CascadedCMOV = &*NextMIIt;
}
@@ -21885,7 +23304,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
- const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
MachineInstr *LastEFLAGSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
if (!LastEFLAGSUser->killsRegister(X86::EFLAGS) &&
@@ -21976,12 +23395,12 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// If we have a cascaded CMOV, the second Jcc provides the same incoming
// value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
if (CascadedCMOV) {
- MIB.addReg(MI->getOperand(2).getReg()).addMBB(jcc1MBB);
+ MIB.addReg(MI.getOperand(2).getReg()).addMBB(jcc1MBB);
// Copy the PHI result to the register defined by the second CMOV.
BuildMI(*sinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
DL, TII->get(TargetOpcode::COPY),
CascadedCMOV->getOperand(0).getReg())
- .addReg(MI->getOperand(0).getReg());
+ .addReg(MI.getOperand(0).getReg());
CascadedCMOV->eraseFromParent();
}
@@ -21993,7 +23412,7 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredAtomicFP(MachineInstr *MI,
+X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI,
MachineBasicBlock *BB) const {
// Combine the following atomic floating-point modification pattern:
// a.store(reg OP a.load(acquire), release)
@@ -22002,52 +23421,55 @@ X86TargetLowering::EmitLoweredAtomicFP(MachineInstr *MI,
// movss %xmm, (%gpr)
// Or sd equivalent for 64-bit operations.
unsigned MOp, FOp;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("unexpected instr type for EmitLoweredAtomicFP");
- case X86::RELEASE_FADD32mr: MOp = X86::MOVSSmr; FOp = X86::ADDSSrm; break;
- case X86::RELEASE_FADD64mr: MOp = X86::MOVSDmr; FOp = X86::ADDSDrm; break;
+ case X86::RELEASE_FADD32mr:
+ FOp = X86::ADDSSrm;
+ MOp = X86::MOVSSmr;
+ break;
+ case X86::RELEASE_FADD64mr:
+ FOp = X86::ADDSDrm;
+ MOp = X86::MOVSDmr;
+ break;
}
- const X86InstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const X86InstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
- MachineOperand MSrc = MI->getOperand(0);
- unsigned VSrc = MI->getOperand(5).getReg();
- const MachineOperand &Disp = MI->getOperand(3);
- MachineOperand ZeroDisp = MachineOperand::CreateImm(0);
- bool hasDisp = Disp.isGlobal() || Disp.isImm();
- if (hasDisp && MSrc.isReg())
- MSrc.setIsKill(false);
- MachineInstrBuilder MIM = BuildMI(*BB, MI, DL, TII->get(MOp))
- .addOperand(/*Base=*/MSrc)
- .addImm(/*Scale=*/1)
- .addReg(/*Index=*/0)
- .addDisp(hasDisp ? Disp : ZeroDisp, /*off=*/0)
- .addReg(0);
- MachineInstr *MIO = BuildMI(*BB, (MachineInstr *)MIM, DL, TII->get(FOp),
- MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
- .addReg(VSrc)
- .addOperand(/*Base=*/MSrc)
- .addImm(/*Scale=*/1)
- .addReg(/*Index=*/0)
- .addDisp(hasDisp ? Disp : ZeroDisp, /*off=*/0)
- .addReg(/*Segment=*/0);
- MIM.addReg(MIO->getOperand(0).getReg(), RegState::Kill);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ unsigned ValOpIdx = X86::AddrNumOperands;
+ unsigned VSrc = MI.getOperand(ValOpIdx).getReg();
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, MI, DL, TII->get(FOp),
+ MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
+ .addReg(VSrc);
+ for (int i = 0; i < X86::AddrNumOperands; ++i) {
+ MachineOperand &Operand = MI.getOperand(i);
+ // Clear any kill flags on register operands as we'll create a second
+ // instruction using the same address operands.
+ if (Operand.isReg())
+ Operand.setIsKill(false);
+ MIB.addOperand(Operand);
+ }
+ MachineInstr *FOpMI = MIB;
+ MIB = BuildMI(*BB, MI, DL, TII->get(MOp));
+ for (int i = 0; i < X86::AddrNumOperands; ++i)
+ MIB.addOperand(MI.getOperand(i));
+ MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill);
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
+X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
assert(MF->shouldSplitStack());
- const bool Is64Bit = Subtarget->is64Bit();
- const bool IsLP64 = Subtarget->isTarget64BitLP64();
+ const bool Is64Bit = Subtarget.is64Bit();
+ const bool IsLP64 = Subtarget.isTarget64BitLP64();
const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
@@ -22077,11 +23499,12 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
getRegClassFor(getPointerTy(MF->getDataLayout()));
unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
- bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
- tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
- SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
- sizeVReg = MI->getOperand(1).getReg(),
- physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
+ bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
+ tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
+ SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
+ sizeVReg = MI.getOperand(1).getReg(),
+ physSPReg =
+ IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
MachineFunction::iterator MBBIter = ++BB->getIterator();
@@ -22113,7 +23536,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
// Calls into a routine in libgcc to allocate more space from the heap.
const uint32_t *RegMask =
- Subtarget->getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
+ Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
if (IsLP64) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg);
@@ -22156,43 +23579,33 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
// Take care of the PHI nodes.
BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
- MI->getOperand(0).getReg())
- .addReg(mallocPtrVReg).addMBB(mallocMBB)
- .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
+ MI.getOperand(0).getReg())
+ .addReg(mallocPtrVReg)
+ .addMBB(mallocMBB)
+ .addReg(bumpSPPtrVReg)
+ .addMBB(bumpMBB);
// Delete the original pseudo instruction.
- MI->eraseFromParent();
+ MI.eraseFromParent();
// And we're done.
return continueMBB;
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- assert(!Subtarget->isTargetMachO());
- DebugLoc DL = MI->getDebugLoc();
- MachineInstr *ResumeMI = Subtarget->getFrameLowering()->emitStackProbe(
- *BB->getParent(), *BB, MI, DL, false);
- MachineBasicBlock *ResumeBB = ResumeMI->getParent();
- MI->eraseFromParent(); // The pseudo instruction is gone now.
- return ResumeBB;
-}
-
-MachineBasicBlock *
-X86TargetLowering::EmitLoweredCatchRet(MachineInstr *MI,
+X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- MachineBasicBlock *TargetMBB = MI->getOperand(0).getMBB();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
+ DebugLoc DL = MI.getDebugLoc();
assert(!isAsynchronousEHPersonality(
classifyEHPersonality(MF->getFunction()->getPersonalityFn())) &&
"SEH does not use catchret!");
// Only 32-bit EH needs to worry about manually restoring stack pointers.
- if (!Subtarget->is32Bit())
+ if (!Subtarget.is32Bit())
return BB;
// C++ EH creates a new target block to hold the restore code, and wires up
@@ -22203,7 +23616,7 @@ X86TargetLowering::EmitLoweredCatchRet(MachineInstr *MI,
MF->insert(std::next(BB->getIterator()), RestoreMBB);
RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(RestoreMBB);
- MI->getOperand(0).setMBB(RestoreMBB);
+ MI.getOperand(0).setMBB(RestoreMBB);
auto RestoreMBBI = RestoreMBB->begin();
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
@@ -22212,37 +23625,37 @@ X86TargetLowering::EmitLoweredCatchRet(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredCatchPad(MachineInstr *MI,
+X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const Constant *PerFn = MF->getFunction()->getPersonalityFn();
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
// Only 32-bit SEH requires special handling for catchpad.
- if (IsSEH && Subtarget->is32Bit()) {
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ if (IsSEH && Subtarget.is32Bit()) {
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
}
- MI->eraseFromParent();
+ MI.eraseFromParent();
return BB;
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredTLSAddr(MachineInstr *MI,
+X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
MachineBasicBlock *BB) const {
// So, here we replace TLSADDR with the sequence:
// adjust_stackdown -> TLSADDR -> adjust_stackup.
// We need this because TLSADDR is lowered into calls
// inside MC, therefore without the two markers shrink-wrapping
// may push the prologue/epilogue pass them.
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
MachineFunction &MF = *BB->getParent();
// Emit CALLSEQ_START right before the instruction.
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
MachineInstrBuilder CallseqStart =
- BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0);
+ BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0);
BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
// Emit CALLSEQ_END right after the instruction.
@@ -22257,86 +23670,89 @@ X86TargetLowering::EmitLoweredTLSAddr(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
+X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
MachineBasicBlock *BB) const {
// This is pretty easy. We're taking the value that we received from
// our load from the relocation, sticking it in either RDI (x86-64)
// or EAX and doing an indirect call. The return value will then
// be in the normal return register.
MachineFunction *F = BB->getParent();
- const X86InstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const X86InstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
- assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
- assert(MI->getOperand(3).isGlobal() && "This should be a global");
+ assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
+ assert(MI.getOperand(3).isGlobal() && "This should be a global");
// Get a register mask for the lowered call.
// FIXME: The 32-bit calls have non-standard calling conventions. Use a
// proper register mask.
const uint32_t *RegMask =
- Subtarget->is64Bit() ?
- Subtarget->getRegisterInfo()->getDarwinTLSCallPreservedMask() :
- Subtarget->getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
- if (Subtarget->is64Bit()) {
- MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
- TII->get(X86::MOV64rm), X86::RDI)
- .addReg(X86::RIP)
- .addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
- MI->getOperand(3).getTargetFlags())
- .addReg(0);
+ Subtarget.is64Bit() ?
+ Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
+ Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
+ if (Subtarget.is64Bit()) {
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
+ MI.getOperand(3).getTargetFlags())
+ .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
addDirectMem(MIB, X86::RDI);
MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
- } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
- MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
- TII->get(X86::MOV32rm), X86::EAX)
- .addReg(0)
- .addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
- MI->getOperand(3).getTargetFlags())
- .addReg(0);
+ } else if (!isPositionIndependent()) {
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(0)
+ .addImm(0)
+ .addReg(0)
+ .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
+ MI.getOperand(3).getTargetFlags())
+ .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
} else {
- MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
- TII->get(X86::MOV32rm), X86::EAX)
- .addReg(TII->getGlobalBaseReg(F))
- .addImm(0).addReg(0)
- .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
- MI->getOperand(3).getTargetFlags())
- .addReg(0);
+ MachineInstrBuilder MIB =
+ BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(TII->getGlobalBaseReg(F))
+ .addImm(0)
+ .addReg(0)
+ .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
+ MI.getOperand(3).getTargetFlags())
+ .addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
}
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
-X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
+X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+ MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
unsigned DstReg;
unsigned MemOpndSlot = 0;
unsigned CurOp = 0;
- DstReg = MI->getOperand(CurOp++).getReg();
+ DstReg = MI.getOperand(CurOp++).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(RC->hasType(MVT::i32) && "Invalid destination!");
unsigned mainDstReg = MRI.createVirtualRegister(RC);
@@ -22384,16 +23800,15 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
unsigned PtrStoreOpc = 0;
unsigned LabelReg = 0;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
- Reloc::Model RM = MF->getTarget().getRelocationModel();
bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
- (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
+ !isPositionIndependent();
// Prepare IP either in reg or imm.
if (!UseImmLabel) {
PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
LabelReg = MRI.createVirtualRegister(PtrRC);
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
.addReg(X86::RIP)
.addImm(0)
@@ -22406,7 +23821,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
.addReg(XII->getGlobalBaseReg(MF))
.addImm(0)
.addReg(0)
- .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
+ .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
.addReg(0);
}
} else
@@ -22415,9 +23830,9 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
- MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
+ MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
else
- MIB.addOperand(MI->getOperand(MemOpndSlot + i));
+ MIB.addOperand(MI.getOperand(MemOpndSlot + i));
}
if (!UseImmLabel)
MIB.addReg(LabelReg);
@@ -22428,7 +23843,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
.addMBB(restoreMBB);
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
MIB.addRegMask(RegInfo->getNoPreservedMask());
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(restoreMBB);
@@ -22447,7 +23862,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
// restoreMBB:
if (RegInfo->hasBasePointer(*MF)) {
const bool Uses64BitFramePtr =
- Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
+ Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
X86FI->setRestoreBasePointer(MF);
unsigned FramePtr = RegInfo->getFrameRegister(*MF);
@@ -22461,21 +23876,21 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
restoreMBB->addSuccessor(sinkMBB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return sinkMBB;
}
MachineBasicBlock *
-X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
+X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- DebugLoc DL = MI->getDebugLoc();
+ DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
- MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
- MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
+ MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
+ MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
@@ -22485,7 +23900,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
unsigned Tmp = MRI.createVirtualRegister(RC);
// Since FP is only updated here but NOT referenced, it's treated as GPR.
- const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
unsigned SP = RegInfo->getStackRegister();
@@ -22500,41 +23915,275 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
// Reload FP
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
- MIB.addOperand(MI->getOperand(i));
+ MIB.addOperand(MI.getOperand(i));
MIB.setMemRefs(MMOBegin, MMOEnd);
// Reload IP
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
- MIB.addDisp(MI->getOperand(i), LabelOffset);
+ MIB.addDisp(MI.getOperand(i), LabelOffset);
else
- MIB.addOperand(MI->getOperand(i));
+ MIB.addOperand(MI.getOperand(i));
}
MIB.setMemRefs(MMOBegin, MMOEnd);
// Reload SP
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
- MIB.addDisp(MI->getOperand(i), SPOffset);
+ MIB.addDisp(MI.getOperand(i), SPOffset);
else
- MIB.addOperand(MI->getOperand(i));
+ MIB.addOperand(MI.getOperand(i));
}
MIB.setMemRefs(MMOBegin, MMOEnd);
// Jump
BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
- MI->eraseFromParent();
+ MI.eraseFromParent();
return MBB;
}
+void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
+ MachineBasicBlock *MBB,
+ MachineBasicBlock *DispatchBB,
+ int FI) const {
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo *MRI = &MF->getRegInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+
+ MVT PVT = getPointerTy(MF->getDataLayout());
+ assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
+
+ unsigned Op = 0;
+ unsigned VR = 0;
+
+ bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
+ !isPositionIndependent();
+
+ if (UseImmLabel) {
+ Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
+ } else {
+ const TargetRegisterClass *TRC =
+ (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
+ VR = MRI->createVirtualRegister(TRC);
+ Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
+
+ /* const X86InstrInfo *XII = static_cast<const X86InstrInfo *>(TII); */
+
+ if (Subtarget.is64Bit())
+ BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
+ .addReg(X86::RIP)
+ .addImm(1)
+ .addReg(0)
+ .addMBB(DispatchBB)
+ .addReg(0);
+ else
+ BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
+ .addReg(0) /* XII->getGlobalBaseReg(MF) */
+ .addImm(1)
+ .addReg(0)
+ .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
+ .addReg(0);
+ }
+
+ MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
+ addFrameReference(MIB, FI, 36);
+ if (UseImmLabel)
+ MIB.addMBB(DispatchBB);
+ else
+ MIB.addReg(VR);
+}
+
+MachineBasicBlock *
+X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = BB->getParent();
+ MachineModuleInfo *MMI = &MF->getMMI();
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ MachineRegisterInfo *MRI = &MF->getRegInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ int FI = MFI->getFunctionContextIndex();
+
+ // Get a mapping of the call site numbers to all of the landing pads they're
+ // associated with.
+ DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
+ unsigned MaxCSNum = 0;
+ for (auto &MBB : *MF) {
+ if (!MBB.isEHPad())
+ continue;
+
+ MCSymbol *Sym = nullptr;
+ for (const auto &MI : MBB) {
+ if (MI.isDebugValue())
+ continue;
+
+ assert(MI.isEHLabel() && "expected EH_LABEL");
+ Sym = MI.getOperand(0).getMCSymbol();
+ break;
+ }
+
+ if (!MMI->hasCallSiteLandingPad(Sym))
+ continue;
+
+ for (unsigned CSI : MMI->getCallSiteLandingPad(Sym)) {
+ CallSiteNumToLPad[CSI].push_back(&MBB);
+ MaxCSNum = std::max(MaxCSNum, CSI);
+ }
+ }
+
+ // Get an ordered list of the machine basic blocks for the jump table.
+ std::vector<MachineBasicBlock *> LPadList;
+ SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
+ LPadList.reserve(CallSiteNumToLPad.size());
+
+ for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
+ for (auto &LP : CallSiteNumToLPad[CSI]) {
+ LPadList.push_back(LP);
+ InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
+ }
+ }
+
+ assert(!LPadList.empty() &&
+ "No landing pad destinations for the dispatch jump table!");
+
+ // Create the MBBs for the dispatch code.
+
+ // Shove the dispatch's address into the return slot in the function context.
+ MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
+ DispatchBB->setIsEHPad(true);
+
+ MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
+ BuildMI(TrapBB, DL, TII->get(X86::TRAP));
+ DispatchBB->addSuccessor(TrapBB);
+
+ MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
+ DispatchBB->addSuccessor(DispContBB);
+
+ // Insert MBBs.
+ MF->push_back(DispatchBB);
+ MF->push_back(DispContBB);
+ MF->push_back(TrapBB);
+
+ // Insert code into the entry block that creates and registers the function
+ // context.
+ SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
+
+ // Create the jump table and associated information
+ MachineJumpTableInfo *JTI =
+ MF->getOrCreateJumpTableInfo(getJumpTableEncoding());
+ unsigned MJTI = JTI->createJumpTableIndex(LPadList);
+
+ const X86InstrInfo *XII = static_cast<const X86InstrInfo *>(TII);
+ const X86RegisterInfo &RI = XII->getRegisterInfo();
+
+ // Add a register mask with no preserved registers. This results in all
+ // registers being marked as clobbered.
+ if (RI.hasBasePointer(*MF)) {
+ const bool FPIs64Bit =
+ Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
+ X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
+ MFI->setRestoreBasePointer(MF);
+
+ unsigned FP = RI.getFrameRegister(*MF);
+ unsigned BP = RI.getBaseRegister();
+ unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
+ addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
+ MFI->getRestoreBasePointerOffset())
+ .addRegMask(RI.getNoPreservedMask());
+ } else {
+ BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
+ .addRegMask(RI.getNoPreservedMask());
+ }
+
+ unsigned IReg = MRI->createVirtualRegister(&X86::GR32RegClass);
+ addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
+ 4);
+ BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
+ .addReg(IReg)
+ .addImm(LPadList.size());
+ BuildMI(DispatchBB, DL, TII->get(X86::JA_1)).addMBB(TrapBB);
+
+ unsigned JReg = MRI->createVirtualRegister(&X86::GR32RegClass);
+ BuildMI(DispContBB, DL, TII->get(X86::SUB32ri), JReg)
+ .addReg(IReg)
+ .addImm(1);
+ BuildMI(DispContBB, DL,
+ TII->get(Subtarget.is64Bit() ? X86::JMP64m : X86::JMP32m))
+ .addReg(0)
+ .addImm(Subtarget.is64Bit() ? 8 : 4)
+ .addReg(JReg)
+ .addJumpTableIndex(MJTI)
+ .addReg(0);
+
+ // Add the jump table entries as successors to the MBB.
+ SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
+ for (auto &LP : LPadList)
+ if (SeenMBBs.insert(LP).second)
+ DispContBB->addSuccessor(LP);
+
+ // N.B. the order the invoke BBs are processed in doesn't matter here.
+ SmallVector<MachineBasicBlock *, 64> MBBLPads;
+ const MCPhysReg *SavedRegs =
+ Subtarget.getRegisterInfo()->getCalleeSavedRegs(MF);
+ for (MachineBasicBlock *MBB : InvokeBBs) {
+ // Remove the landing pad successor from the invoke block and replace it
+ // with the new dispatch block.
+ // Keep a copy of Successors since it's modified inside the loop.
+ SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
+ MBB->succ_rend());
+ // FIXME: Avoid quadratic complexity.
+ for (auto MBBS : Successors) {
+ if (MBBS->isEHPad()) {
+ MBB->removeSuccessor(MBBS);
+ MBBLPads.push_back(MBBS);
+ }
+ }
+
+ MBB->addSuccessor(DispatchBB);
+
+ // Find the invoke call and mark all of the callee-saved registers as
+ // 'implicit defined' so that they're spilled. This prevents code from
+ // moving instructions to before the EH block, where they will never be
+ // executed.
+ for (auto &II : reverse(*MBB)) {
+ if (!II.isCall())
+ continue;
+
+ DenseMap<unsigned, bool> DefRegs;
+ for (auto &MOp : II.operands())
+ if (MOp.isReg())
+ DefRegs[MOp.getReg()] = true;
+
+ MachineInstrBuilder MIB(*MF, &II);
+ for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
+ unsigned Reg = SavedRegs[RI];
+ if (!DefRegs[Reg])
+ MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
+ }
+
+ break;
+ }
+ }
+
+ // Mark all former landing pads as non-landing pads. The dispatch is the only
+ // landing pad now.
+ for (auto &LP : MBBLPads)
+ LP->setIsEHPad(false);
+
+ // The instruction is gone now.
+ MI.eraseFromParent();
+ return BB;
+}
+
// Replace 213-type (isel default) FMA3 instructions with 231-type for
// accumulator loops. Writing back to the accumulator allows the coalescer
// to remove extra copies in the loop.
// FIXME: Do this on AVX512. We don't support 231 variants yet (PR23937).
MachineBasicBlock *
-X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
+X86TargetLowering::emitFMA3Instr(MachineInstr &MI,
MachineBasicBlock *MBB) const {
- MachineOperand &AddendOp = MI->getOperand(3);
+ MachineOperand &AddendOp = MI.getOperand(3);
// Bail out early if the addend isn't a register - we can't switch these.
if (!AddendOp.isReg())
@@ -22565,55 +24214,120 @@ X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
assert(AddendDef.getOperand(i).isReg());
MachineOperand PHISrcOp = AddendDef.getOperand(i);
MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
- if (&PHISrcInst == MI) {
+ if (&PHISrcInst == &MI) {
// Found a matching instruction.
unsigned NewFMAOpc = 0;
- switch (MI->getOpcode()) {
- case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
- case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
- case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
- case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
- case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
- case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
- case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
- case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
- case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
- case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
- case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
- case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
- case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
- case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
- case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
- case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
- case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
- case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
- case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
- case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
-
- case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
- case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
- case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
- case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
- case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
- case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
- case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
- case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
- case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
- case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
- case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
- case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
- default: llvm_unreachable("Unrecognized FMA variant.");
+ switch (MI.getOpcode()) {
+ case X86::VFMADDPDr213r:
+ NewFMAOpc = X86::VFMADDPDr231r;
+ break;
+ case X86::VFMADDPSr213r:
+ NewFMAOpc = X86::VFMADDPSr231r;
+ break;
+ case X86::VFMADDSDr213r:
+ NewFMAOpc = X86::VFMADDSDr231r;
+ break;
+ case X86::VFMADDSSr213r:
+ NewFMAOpc = X86::VFMADDSSr231r;
+ break;
+ case X86::VFMSUBPDr213r:
+ NewFMAOpc = X86::VFMSUBPDr231r;
+ break;
+ case X86::VFMSUBPSr213r:
+ NewFMAOpc = X86::VFMSUBPSr231r;
+ break;
+ case X86::VFMSUBSDr213r:
+ NewFMAOpc = X86::VFMSUBSDr231r;
+ break;
+ case X86::VFMSUBSSr213r:
+ NewFMAOpc = X86::VFMSUBSSr231r;
+ break;
+ case X86::VFNMADDPDr213r:
+ NewFMAOpc = X86::VFNMADDPDr231r;
+ break;
+ case X86::VFNMADDPSr213r:
+ NewFMAOpc = X86::VFNMADDPSr231r;
+ break;
+ case X86::VFNMADDSDr213r:
+ NewFMAOpc = X86::VFNMADDSDr231r;
+ break;
+ case X86::VFNMADDSSr213r:
+ NewFMAOpc = X86::VFNMADDSSr231r;
+ break;
+ case X86::VFNMSUBPDr213r:
+ NewFMAOpc = X86::VFNMSUBPDr231r;
+ break;
+ case X86::VFNMSUBPSr213r:
+ NewFMAOpc = X86::VFNMSUBPSr231r;
+ break;
+ case X86::VFNMSUBSDr213r:
+ NewFMAOpc = X86::VFNMSUBSDr231r;
+ break;
+ case X86::VFNMSUBSSr213r:
+ NewFMAOpc = X86::VFNMSUBSSr231r;
+ break;
+ case X86::VFMADDSUBPDr213r:
+ NewFMAOpc = X86::VFMADDSUBPDr231r;
+ break;
+ case X86::VFMADDSUBPSr213r:
+ NewFMAOpc = X86::VFMADDSUBPSr231r;
+ break;
+ case X86::VFMSUBADDPDr213r:
+ NewFMAOpc = X86::VFMSUBADDPDr231r;
+ break;
+ case X86::VFMSUBADDPSr213r:
+ NewFMAOpc = X86::VFMSUBADDPSr231r;
+ break;
+
+ case X86::VFMADDPDr213rY:
+ NewFMAOpc = X86::VFMADDPDr231rY;
+ break;
+ case X86::VFMADDPSr213rY:
+ NewFMAOpc = X86::VFMADDPSr231rY;
+ break;
+ case X86::VFMSUBPDr213rY:
+ NewFMAOpc = X86::VFMSUBPDr231rY;
+ break;
+ case X86::VFMSUBPSr213rY:
+ NewFMAOpc = X86::VFMSUBPSr231rY;
+ break;
+ case X86::VFNMADDPDr213rY:
+ NewFMAOpc = X86::VFNMADDPDr231rY;
+ break;
+ case X86::VFNMADDPSr213rY:
+ NewFMAOpc = X86::VFNMADDPSr231rY;
+ break;
+ case X86::VFNMSUBPDr213rY:
+ NewFMAOpc = X86::VFNMSUBPDr231rY;
+ break;
+ case X86::VFNMSUBPSr213rY:
+ NewFMAOpc = X86::VFNMSUBPSr231rY;
+ break;
+ case X86::VFMADDSUBPDr213rY:
+ NewFMAOpc = X86::VFMADDSUBPDr231rY;
+ break;
+ case X86::VFMADDSUBPSr213rY:
+ NewFMAOpc = X86::VFMADDSUBPSr231rY;
+ break;
+ case X86::VFMSUBADDPDr213rY:
+ NewFMAOpc = X86::VFMSUBADDPDr231rY;
+ break;
+ case X86::VFMSUBADDPSr213rY:
+ NewFMAOpc = X86::VFMSUBADDPSr231rY;
+ break;
+ default:
+ llvm_unreachable("Unrecognized FMA variant.");
}
- const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
MachineInstrBuilder MIB =
- BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(3))
- .addOperand(MI->getOperand(2))
- .addOperand(MI->getOperand(1));
+ BuildMI(MF, MI.getDebugLoc(), TII.get(NewFMAOpc))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(3))
+ .addOperand(MI.getOperand(2))
+ .addOperand(MI.getOperand(1));
MBB->insert(MachineBasicBlock::iterator(MI), MIB);
- MI->eraseFromParent();
+ MI.eraseFromParent();
}
}
@@ -22621,9 +24335,9 @@ X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
}
MachineBasicBlock *
-X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unexpected instr type to insert");
case X86::TAILJMPd64:
case X86::TAILJMPr64:
@@ -22641,8 +24355,6 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
return EmitLoweredTLSAddr(MI, BB);
- case X86::WIN_ALLOCA:
- return EmitLoweredWinAlloca(MI, BB);
case X86::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
case X86::CATCHPAD:
@@ -22679,31 +24391,35 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::RDFLAGS32:
case X86::RDFLAGS64: {
- DebugLoc DL = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
unsigned PushF =
- MI->getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
- unsigned Pop =
- MI->getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
- BuildMI(*BB, MI, DL, TII->get(PushF));
- BuildMI(*BB, MI, DL, TII->get(Pop), MI->getOperand(0).getReg());
-
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
+ unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
+ MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
+ // Permit reads of the FLAGS register without it being defined.
+ // This intrinsic exists to read external processor state in flags, such as
+ // the trap flag, interrupt flag, and direction flag, none of which are
+ // modeled by the backend.
+ Push->getOperand(2).setIsUndef();
+ BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
+
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
case X86::WRFLAGS32:
case X86::WRFLAGS64: {
- DebugLoc DL = MI->getDebugLoc();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
unsigned Push =
- MI->getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
+ MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
unsigned PopF =
- MI->getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
- BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI->getOperand(0).getReg());
+ MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
+ BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
BuildMI(*BB, MI, DL, TII->get(PopF));
- MI->eraseFromParent(); // The pseudo is gone now.
+ MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
@@ -22721,8 +24437,8 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::FP80_TO_INT32_IN_MEM:
case X86::FP80_TO_INT64_IN_MEM: {
MachineFunction *F = BB->getParent();
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
- DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
@@ -22750,7 +24466,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Get the X86 opcode to use.
unsigned Opc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("illegal opcode!");
case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
@@ -22763,35 +24479,15 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
}
- X86AddressMode AM;
- MachineOperand &Op = MI->getOperand(0);
- if (Op.isReg()) {
- AM.BaseType = X86AddressMode::RegBase;
- AM.Base.Reg = Op.getReg();
- } else {
- AM.BaseType = X86AddressMode::FrameIndexBase;
- AM.Base.FrameIndex = Op.getIndex();
- }
- Op = MI->getOperand(1);
- if (Op.isImm())
- AM.Scale = Op.getImm();
- Op = MI->getOperand(2);
- if (Op.isImm())
- AM.IndexReg = Op.getImm();
- Op = MI->getOperand(3);
- if (Op.isGlobal()) {
- AM.GV = Op.getGlobal();
- } else {
- AM.Disp = Op.getImm();
- }
+ X86AddressMode AM = getAddressFromInstr(&MI, 0);
addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
- .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
+ .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
// Reload the original control word now.
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FLDCW16m)), CWFrameIdx);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
// String/text processing lowering.
@@ -22803,9 +24499,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRM128REG:
case X86::PCMPESTRM128MEM:
case X86::VPCMPESTRM128MEM:
- assert(Subtarget->hasSSE42() &&
+ assert(Subtarget.hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
- return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
+ return emitPCMPSTRM(MI, BB, Subtarget.getInstrInfo());
// String/text processing lowering.
case X86::PCMPISTRIREG:
@@ -22816,21 +24512,23 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VPCMPESTRIREG:
case X86::PCMPESTRIMEM:
case X86::VPCMPESTRIMEM:
- assert(Subtarget->hasSSE42() &&
+ assert(Subtarget.hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
- return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
+ return emitPCMPSTRI(MI, BB, Subtarget.getInstrInfo());
// Thread synchronization.
case X86::MONITOR:
- return EmitMonitor(MI, BB, Subtarget);
+ return emitMonitor(MI, BB, Subtarget, X86::MONITORrrr);
+ case X86::MONITORX:
+ return emitMonitor(MI, BB, Subtarget, X86::MONITORXrrr);
// PKU feature
case X86::WRPKRU:
- return EmitWRPKRU(MI, BB, Subtarget);
+ return emitWRPKRU(MI, BB, Subtarget);
case X86::RDPKRU:
- return EmitRDPKRU(MI, BB, Subtarget);
+ return emitRDPKRU(MI, BB, Subtarget);
// xbegin
case X86::XBEGIN:
- return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
+ return emitXBegin(MI, BB, Subtarget.getInstrInfo());
case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
@@ -22846,6 +24544,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::EH_SjLj_LongJmp64:
return emitEHSjLjLongJmp(MI, BB);
+ case X86::Int_eh_sjlj_setup_dispatch:
+ return EmitSjLjDispatchBlock(MI, BB);
+
case TargetOpcode::STATEPOINT:
// As an implementation detail, STATEPOINT shares the STACKMAP format at
// this point in the process. We diverge later.
@@ -22888,6 +24589,14 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::VFMSUBADDPDr213rY:
case X86::VFMSUBADDPSr213rY:
return emitFMA3Instr(MI, BB);
+ case X86::LCMPXCHG8B_SAVE_EBX:
+ case X86::LCMPXCHG16B_SAVE_RBX: {
+ unsigned BasePtr =
+ MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
+ if (!BB->isLiveIn(BasePtr))
+ BB->addLiveIn(BasePtr);
+ return BB;
+ }
}
}
@@ -22930,33 +24639,9 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case X86ISD::SETCC:
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
break;
- case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- unsigned NumLoBits = 0;
- switch (IntId) {
- default: break;
- case Intrinsic::x86_sse_movmsk_ps:
- case Intrinsic::x86_avx_movmsk_ps_256:
- case Intrinsic::x86_sse2_movmsk_pd:
- case Intrinsic::x86_avx_movmsk_pd_256:
- case Intrinsic::x86_mmx_pmovmskb:
- case Intrinsic::x86_sse2_pmovmskb_128:
- case Intrinsic::x86_avx2_pmovmskb: {
- // High bits of movmskp{s|d}, pmovmskb are known zero.
- switch (IntId) {
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
- case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
- case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
- case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
- case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
- case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
- case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
- case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
- }
- KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
- break;
- }
- }
+ case X86ISD::MOVMSK: {
+ unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
break;
}
}
@@ -22974,8 +24659,8 @@ unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
return 1;
}
-/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
-/// node is a GlobalAddress + offset.
+/// Returns true (and the GlobalValue and the offset) if the node is a
+/// GlobalAddress + offset.
bool X86TargetLowering::isGAPlusOffset(SDNode *N,
const GlobalValue* &GA,
int64_t &Offset) const {
@@ -22989,11 +24674,11 @@ bool X86TargetLowering::isGAPlusOffset(SDNode *N,
return TargetLowering::isGAPlusOffset(N, GA, Offset);
}
-/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
+/// Performs shuffle combines for 256-bit vectors.
/// FIXME: This could be expanded to support 512 bit vectors as well.
-static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget* Subtarget) {
+static SDValue combineShuffle256(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc dl(N);
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
SDValue V1 = SVOp->getOperand(0);
@@ -23014,8 +24699,7 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
// RESULT: V + zero extended
//
if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
- V2.getOperand(1).getOpcode() != ISD::UNDEF ||
- V1.getOperand(1).getOpcode() != ISD::UNDEF)
+ !V2.getOperand(1).isUndef() || !V1.getOperand(1).isUndef())
return SDValue();
if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
@@ -23060,195 +24744,556 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
// Emit a zeroed vector and insert the desired subvector on its
// first half.
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
- SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
+ SDValue InsV = insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
return DCI.CombineTo(N, InsV);
}
return SDValue();
}
+// Attempt to match a combined shuffle mask against supported unary shuffle
+// instructions.
+// TODO: Investigate sharing more of this with shuffle lowering.
+static bool matchUnaryVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget,
+ unsigned &Shuffle, MVT &ShuffleVT) {
+ bool FloatDomain = SrcVT.isFloatingPoint() ||
+ (!Subtarget.hasAVX2() && SrcVT.is256BitVector());
+
+ // Match a 128-bit integer vector against a VZEXT_MOVL (MOVQ) instruction.
+ if (!FloatDomain && SrcVT.is128BitVector() &&
+ isTargetShuffleEquivalent(Mask, {0, SM_SentinelZero})) {
+ Shuffle = X86ISD::VZEXT_MOVL;
+ ShuffleVT = MVT::v2i64;
+ return true;
+ }
+
+ // Check if we have SSE3 which will let us use MOVDDUP etc. The
+ // instructions are no slower than UNPCKLPD but has the option to
+ // fold the input operand into even an unaligned memory load.
+ if (SrcVT.is128BitVector() && Subtarget.hasSSE3() && FloatDomain) {
+ if (isTargetShuffleEquivalent(Mask, {0, 0})) {
+ Shuffle = X86ISD::MOVDDUP;
+ ShuffleVT = MVT::v2f64;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
+ Shuffle = X86ISD::MOVSLDUP;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
+ Shuffle = X86ISD::MOVSHDUP;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ }
+
+ if (SrcVT.is256BitVector() && FloatDomain) {
+ assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
+ Shuffle = X86ISD::MOVDDUP;
+ ShuffleVT = MVT::v4f64;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
+ Shuffle = X86ISD::MOVSLDUP;
+ ShuffleVT = MVT::v8f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
+ Shuffle = X86ISD::MOVSHDUP;
+ ShuffleVT = MVT::v8f32;
+ return true;
+ }
+ }
+
+ if (SrcVT.is512BitVector() && FloatDomain) {
+ assert(Subtarget.hasAVX512() &&
+ "AVX512 required for 512-bit vector shuffles");
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
+ Shuffle = X86ISD::MOVDDUP;
+ ShuffleVT = MVT::v8f64;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(
+ Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
+ Shuffle = X86ISD::MOVSLDUP;
+ ShuffleVT = MVT::v16f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(
+ Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
+ Shuffle = X86ISD::MOVSHDUP;
+ ShuffleVT = MVT::v16f32;
+ return true;
+ }
+ }
+
+ // Attempt to match against broadcast-from-vector.
+ if (Subtarget.hasAVX2()) {
+ unsigned NumElts = Mask.size();
+ SmallVector<int, 64> BroadcastMask(NumElts, 0);
+ if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
+ unsigned EltSize = SrcVT.getSizeInBits() / NumElts;
+ ShuffleVT = FloatDomain ? MVT::getFloatingPointVT(EltSize)
+ : MVT::getIntegerVT(EltSize);
+ ShuffleVT = MVT::getVectorVT(ShuffleVT, NumElts);
+ Shuffle = X86ISD::VBROADCAST;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Attempt to match a combined shuffle mask against supported unary immediate
+// permute instructions.
+// TODO: Investigate sharing more of this with shuffle lowering.
+static bool matchPermuteVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
+ const X86Subtarget &Subtarget,
+ unsigned &Shuffle, MVT &ShuffleVT,
+ unsigned &PermuteImm) {
+ // Ensure we don't contain any zero elements.
+ for (int M : Mask) {
+ if (M == SM_SentinelZero)
+ return false;
+ assert(SM_SentinelUndef <= M && M < (int)Mask.size() &&
+ "Expected unary shuffle");
+ }
+
+ unsigned MaskScalarSizeInBits = SrcVT.getSizeInBits() / Mask.size();
+ MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
+
+ // Handle PSHUFLW/PSHUFHW repeated patterns.
+ if (MaskScalarSizeInBits == 16) {
+ SmallVector<int, 4> RepeatedMask;
+ if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
+ ArrayRef<int> LoMask(Mask.data() + 0, 4);
+ ArrayRef<int> HiMask(Mask.data() + 4, 4);
+
+ // PSHUFLW: permute lower 4 elements only.
+ if (isUndefOrInRange(LoMask, 0, 4) &&
+ isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
+ Shuffle = X86ISD::PSHUFLW;
+ ShuffleVT = MVT::getVectorVT(MVT::i16, SrcVT.getSizeInBits() / 16);
+ PermuteImm = getV4X86ShuffleImm(LoMask);
+ return true;
+ }
+
+ // PSHUFHW: permute upper 4 elements only.
+ if (isUndefOrInRange(HiMask, 4, 8) &&
+ isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
+ // Offset the HiMask so that we can create the shuffle immediate.
+ int OffsetHiMask[4];
+ for (int i = 0; i != 4; ++i)
+ OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
+
+ Shuffle = X86ISD::PSHUFHW;
+ ShuffleVT = MVT::getVectorVT(MVT::i16, SrcVT.getSizeInBits() / 16);
+ PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
+ return true;
+ }
+
+ return false;
+ }
+ return false;
+ }
+
+ // We only support permutation of 32/64 bit elements after this.
+ if (MaskScalarSizeInBits != 32 && MaskScalarSizeInBits != 64)
+ return false;
+
+ // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
+ // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
+ bool FloatDomain = SrcVT.isFloatingPoint();
+ if (FloatDomain && !Subtarget.hasAVX())
+ return false;
+
+ // Pre-AVX2 we must use float shuffles on 256-bit vectors.
+ if (SrcVT.is256BitVector() && !Subtarget.hasAVX2())
+ FloatDomain = true;
+
+ // Check for lane crossing permutes.
+ if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
+ // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
+ if (Subtarget.hasAVX2() && SrcVT.is256BitVector() && Mask.size() == 4) {
+ Shuffle = X86ISD::VPERMI;
+ ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
+ PermuteImm = getV4X86ShuffleImm(Mask);
+ return true;
+ }
+ if (Subtarget.hasAVX512() && SrcVT.is512BitVector() && Mask.size() == 8) {
+ SmallVector<int, 4> RepeatedMask;
+ if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
+ Shuffle = X86ISD::VPERMI;
+ ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
+ PermuteImm = getV4X86ShuffleImm(RepeatedMask);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // VPERMILPD can permute with a non-repeating shuffle.
+ if (FloatDomain && MaskScalarSizeInBits == 64) {
+ Shuffle = X86ISD::VPERMILPI;
+ ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
+ PermuteImm = 0;
+ for (int i = 0, e = Mask.size(); i != e; ++i) {
+ int M = Mask[i];
+ if (M == SM_SentinelUndef)
+ continue;
+ assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
+ PermuteImm |= (M & 1) << i;
+ }
+ return true;
+ }
+
+ // We need a repeating shuffle mask for VPERMILPS/PSHUFD.
+ SmallVector<int, 4> RepeatedMask;
+ if (!is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask))
+ return false;
+
+ // Narrow the repeated mask for 32-bit element permutes.
+ SmallVector<int, 4> WordMask = RepeatedMask;
+ if (MaskScalarSizeInBits == 64)
+ scaleShuffleMask(2, RepeatedMask, WordMask);
+
+ Shuffle = (FloatDomain ? X86ISD::VPERMILPI : X86ISD::PSHUFD);
+ ShuffleVT = (FloatDomain ? MVT::f32 : MVT::i32);
+ ShuffleVT = MVT::getVectorVT(ShuffleVT, SrcVT.getSizeInBits() / 32);
+ PermuteImm = getV4X86ShuffleImm(WordMask);
+ return true;
+}
+
+// Attempt to match a combined unary shuffle mask against supported binary
+// shuffle instructions.
+// TODO: Investigate sharing more of this with shuffle lowering.
+static bool matchBinaryVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
+ unsigned &Shuffle, MVT &ShuffleVT) {
+ bool FloatDomain = SrcVT.isFloatingPoint();
+
+ if (SrcVT.is128BitVector()) {
+ if (isTargetShuffleEquivalent(Mask, {0, 0}) && FloatDomain) {
+ Shuffle = X86ISD::MOVLHPS;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {1, 1}) && FloatDomain) {
+ Shuffle = X86ISD::MOVHLPS;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 1, 1}) && FloatDomain) {
+ Shuffle = X86ISD::UNPCKL;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {2, 2, 3, 3}) && FloatDomain) {
+ Shuffle = X86ISD::UNPCKH;
+ ShuffleVT = MVT::v4f32;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {0, 0, 1, 1, 2, 2, 3, 3}) ||
+ isTargetShuffleEquivalent(
+ Mask, {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7})) {
+ Shuffle = X86ISD::UNPCKL;
+ ShuffleVT = Mask.size() == 8 ? MVT::v8i16 : MVT::v16i8;
+ return true;
+ }
+ if (isTargetShuffleEquivalent(Mask, {4, 4, 5, 5, 6, 6, 7, 7}) ||
+ isTargetShuffleEquivalent(Mask, {8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13,
+ 13, 14, 14, 15, 15})) {
+ Shuffle = X86ISD::UNPCKH;
+ ShuffleVT = Mask.size() == 8 ? MVT::v8i16 : MVT::v16i8;
+ return true;
+ }
+ }
+
+ return false;
+}
+
/// \brief Combine an arbitrary chain of shuffles into a single instruction if
/// possible.
///
-/// This is the leaf of the recursive combinine below. When we have found some
+/// This is the leaf of the recursive combine below. When we have found some
/// chain of single-use x86 shuffle instructions and accumulated the combined
/// shuffle mask represented by them, this will try to pattern match that mask
/// into either a single instruction if there is a special purpose instruction
/// for this operation, or into a PSHUFB instruction which is a fully general
/// instruction but should only be used to replace chains over a certain depth.
-static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
- int Depth, bool HasPSHUFB, SelectionDAG &DAG,
+static bool combineX86ShuffleChain(SDValue Input, SDValue Root,
+ ArrayRef<int> BaseMask, int Depth,
+ bool HasVariableMask, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
- assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
+ const X86Subtarget &Subtarget) {
+ assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
// Find the operand that enters the chain. Note that multiple uses are OK
// here, we're not going to remove the operand we find.
- SDValue Input = Op.getOperand(0);
- while (Input.getOpcode() == ISD::BITCAST)
- Input = Input.getOperand(0);
+ Input = peekThroughBitcasts(Input);
MVT VT = Input.getSimpleValueType();
MVT RootVT = Root.getSimpleValueType();
SDLoc DL(Root);
- if (Mask.size() == 1) {
- int Index = Mask[0];
- assert((Index >= 0 || Index == SM_SentinelUndef ||
- Index == SM_SentinelZero) &&
- "Invalid shuffle index found!");
-
- // We may end up with an accumulated mask of size 1 as a result of
- // widening of shuffle operands (see function canWidenShuffleElements).
- // If the only shuffle index is equal to SM_SentinelZero then propagate
- // a zero vector. Otherwise, the combine shuffle mask is a no-op shuffle
- // mask, and therefore the entire chain of shuffles can be folded away.
- if (Index == SM_SentinelZero)
- DCI.CombineTo(Root.getNode(), getZeroVector(RootVT, Subtarget, DAG, DL));
- else
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Input),
- /*AddTo*/ true);
+ SDValue Res;
+
+ unsigned NumBaseMaskElts = BaseMask.size();
+ if (NumBaseMaskElts == 1) {
+ assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Input),
+ /*AddTo*/ true);
return true;
}
- // Use the float domain if the operand type is a floating point type.
- bool FloatDomain = VT.isFloatingPoint();
+ unsigned RootSizeInBits = RootVT.getSizeInBits();
+ unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
- // For floating point shuffles, we don't have free copies in the shuffle
- // instructions or the ability to load as part of the instruction, so
- // canonicalize their shuffles to UNPCK or MOV variants.
- //
- // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
- // vectors because it can have a load folded into it that UNPCK cannot. This
- // doesn't preclude something switching to the shorter encoding post-RA.
- //
- // FIXME: Should teach these routines about AVX vector widths.
- if (FloatDomain && VT.is128BitVector()) {
- if (Mask.equals({0, 0}) || Mask.equals({1, 1})) {
- bool Lo = Mask.equals({0, 0});
- unsigned Shuffle;
- MVT ShuffleVT;
- // Check if we have SSE3 which will let us use MOVDDUP. That instruction
- // is no slower than UNPCKLPD but has the option to fold the input operand
- // into even an unaligned memory load.
- if (Lo && Subtarget->hasSSE3()) {
- Shuffle = X86ISD::MOVDDUP;
- ShuffleVT = MVT::v2f64;
- } else {
- // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
- // than the UNPCK variants.
- Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
- ShuffleVT = MVT::v4f32;
- }
- if (Depth == 1 && Root->getOpcode() == Shuffle)
- return false; // Nothing to do!
- Op = DAG.getBitcast(ShuffleVT, Input);
- DCI.AddToWorklist(Op.getNode());
- if (Shuffle == X86ISD::MOVDDUP)
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
- else
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
- /*AddTo*/ true);
- return true;
- }
- if (Subtarget->hasSSE3() &&
- (Mask.equals({0, 0, 2, 2}) || Mask.equals({1, 1, 3, 3}))) {
- bool Lo = Mask.equals({0, 0, 2, 2});
- unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
- MVT ShuffleVT = MVT::v4f32;
- if (Depth == 1 && Root->getOpcode() == Shuffle)
- return false; // Nothing to do!
- Op = DAG.getBitcast(ShuffleVT, Input);
- DCI.AddToWorklist(Op.getNode());
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
+ // Don't combine if we are a AVX512/EVEX target and the mask element size
+ // is different from the root element size - this would prevent writemasks
+ // from being reused.
+ // TODO - this currently prevents all lane shuffles from occurring.
+ // TODO - check for writemasks usage instead of always preventing combining.
+ // TODO - attempt to narrow Mask back to writemask size.
+ if (RootVT.getScalarSizeInBits() != BaseMaskEltSizeInBits &&
+ (RootSizeInBits == 512 ||
+ (Subtarget.hasVLX() && RootSizeInBits >= 128))) {
+ return false;
+ }
+
+ // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
+
+ // Handle 128-bit lane shuffles of 256-bit vectors.
+ if (VT.is256BitVector() && NumBaseMaskElts == 2 &&
+ !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
+ if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128)
+ return false; // Nothing to do!
+ MVT ShuffleVT = (VT.isFloatingPoint() || !Subtarget.hasAVX2() ? MVT::v4f64
+ : MVT::v4i64);
+ unsigned PermMask = 0;
+ PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
+ PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
+
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
+ DAG.getUNDEF(ShuffleVT),
+ DAG.getConstant(PermMask, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ // For masks that have been widened to 128-bit elements or more,
+ // narrow back down to 64-bit elements.
+ SmallVector<int, 64> Mask;
+ if (BaseMaskEltSizeInBits > 64) {
+ assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
+ int MaskScale = BaseMaskEltSizeInBits / 64;
+ scaleShuffleMask(MaskScale, BaseMask, Mask);
+ } else {
+ Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
+ }
+
+ unsigned NumMaskElts = Mask.size();
+ unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
+
+ // Determine the effective mask value type.
+ bool FloatDomain =
+ (VT.isFloatingPoint() || (VT.is256BitVector() && !Subtarget.hasAVX2())) &&
+ (32 <= MaskEltSizeInBits);
+ MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
+ : MVT::getIntegerVT(MaskEltSizeInBits);
+ MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
+
+ // Attempt to match the mask against known shuffle patterns.
+ MVT ShuffleVT;
+ unsigned Shuffle, PermuteImm;
+
+ if (matchUnaryVectorShuffle(VT, Mask, Subtarget, Shuffle, ShuffleVT)) {
+ if (Depth == 1 && Root.getOpcode() == Shuffle)
+ return false; // Nothing to do!
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ if (matchPermuteVectorShuffle(VT, Mask, Subtarget, Shuffle, ShuffleVT,
+ PermuteImm)) {
+ if (Depth == 1 && Root.getOpcode() == Shuffle)
+ return false; // Nothing to do!
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
+ DAG.getConstant(PermuteImm, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ if (matchBinaryVectorShuffle(VT, Mask, Shuffle, ShuffleVT)) {
+ if (Depth == 1 && Root.getOpcode() == Shuffle)
+ return false; // Nothing to do!
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res, Res);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ // Attempt to blend with zero.
+ if (NumMaskElts <= 8 &&
+ ((Subtarget.hasSSE41() && VT.is128BitVector()) ||
+ (Subtarget.hasAVX() && VT.is256BitVector()))) {
+ // Convert VT to a type compatible with X86ISD::BLENDI.
+ // TODO - add 16i16 support (requires lane duplication).
+ MVT ShuffleVT = MaskVT;
+ if (Subtarget.hasAVX2()) {
+ if (ShuffleVT == MVT::v4i64)
+ ShuffleVT = MVT::v8i32;
+ else if (ShuffleVT == MVT::v2i64)
+ ShuffleVT = MVT::v4i32;
+ } else {
+ if (ShuffleVT == MVT::v2i64 || ShuffleVT == MVT::v4i32)
+ ShuffleVT = MVT::v8i16;
+ else if (ShuffleVT == MVT::v4i64)
+ ShuffleVT = MVT::v4f64;
+ else if (ShuffleVT == MVT::v8i32)
+ ShuffleVT = MVT::v8f32;
+ }
+
+ if (isSequentialOrUndefOrZeroInRange(Mask, /*Pos*/ 0, /*Size*/ NumMaskElts,
+ /*Low*/ 0) &&
+ NumMaskElts <= ShuffleVT.getVectorNumElements()) {
+ unsigned BlendMask = 0;
+ unsigned ShuffleSize = ShuffleVT.getVectorNumElements();
+ unsigned MaskRatio = ShuffleSize / NumMaskElts;
+
+ if (Depth == 1 && Root.getOpcode() == X86ISD::BLENDI)
+ return false;
+
+ for (unsigned i = 0; i != ShuffleSize; ++i)
+ if (Mask[i / MaskRatio] < 0)
+ BlendMask |= 1u << i;
+
+ SDValue Zero = getZeroVector(ShuffleVT, Subtarget, DAG, DL);
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(X86ISD::BLENDI, DL, ShuffleVT, Res, Zero,
+ DAG.getConstant(BlendMask, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
/*AddTo*/ true);
return true;
}
- if (Mask.equals({0, 0, 1, 1}) || Mask.equals({2, 2, 3, 3})) {
- bool Lo = Mask.equals({0, 0, 1, 1});
- unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
- MVT ShuffleVT = MVT::v4f32;
- if (Depth == 1 && Root->getOpcode() == Shuffle)
+ }
+
+ // Attempt to combine to INSERTPS.
+ if (Subtarget.hasSSE41() && NumMaskElts == 4 &&
+ (VT == MVT::v2f64 || VT == MVT::v4f32)) {
+ SmallBitVector Zeroable(4, false);
+ for (unsigned i = 0; i != NumMaskElts; ++i)
+ if (Mask[i] < 0)
+ Zeroable[i] = true;
+
+ unsigned InsertPSMask;
+ SDValue V1 = Input, V2 = Input;
+ if (Zeroable.any() && matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask,
+ Zeroable, Mask, DAG)) {
+ if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTPS)
return false; // Nothing to do!
- Op = DAG.getBitcast(ShuffleVT, Input);
- DCI.AddToWorklist(Op.getNode());
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
+ V1 = DAG.getBitcast(MVT::v4f32, V1);
+ DCI.AddToWorklist(V1.getNode());
+ V2 = DAG.getBitcast(MVT::v4f32, V2);
+ DCI.AddToWorklist(V2.getNode());
+ Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
/*AddTo*/ true);
return true;
}
}
- // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
- // variants as none of these have single-instruction variants that are
- // superior to the UNPCK formulation.
- if (!FloatDomain && VT.is128BitVector() &&
- (Mask.equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
- Mask.equals({4, 4, 5, 5, 6, 6, 7, 7}) ||
- Mask.equals({0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}) ||
- Mask.equals(
- {8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}))) {
- bool Lo = Mask[0] == 0;
- unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
- if (Depth == 1 && Root->getOpcode() == Shuffle)
- return false; // Nothing to do!
- MVT ShuffleVT;
- switch (Mask.size()) {
- case 8:
- ShuffleVT = MVT::v8i16;
- break;
- case 16:
- ShuffleVT = MVT::v16i8;
- break;
- default:
- llvm_unreachable("Impossible mask size!");
- };
- Op = DAG.getBitcast(ShuffleVT, Input);
- DCI.AddToWorklist(Op.getNode());
- Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
- /*AddTo*/ true);
- return true;
- }
-
// Don't try to re-form single instruction chains under any circumstances now
// that we've done encoding canonicalization for them.
if (Depth < 2)
return false;
- // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
- // can replace them with a single PSHUFB instruction profitably. Intel's
- // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
- // in practice PSHUFB tends to be *very* fast so we're more aggressive.
- if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
+ if (is128BitLaneCrossingShuffleMask(MaskVT, Mask))
+ return false;
+
+ bool MaskContainsZeros =
+ llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
+
+ // If we have a single input shuffle with different shuffle patterns in the
+ // the 128-bit lanes use the variable mask to VPERMILPS.
+ // TODO Combine other mask types at higher depths.
+ if (HasVariableMask && !MaskContainsZeros &&
+ ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
+ (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
+ SmallVector<SDValue, 16> VPermIdx;
+ for (int M : Mask) {
+ SDValue Idx =
+ M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
+ VPermIdx.push_back(Idx);
+ }
+ MVT VPermMaskVT = MVT::getVectorVT(MVT::i32, NumMaskElts);
+ SDValue VPermMask = DAG.getBuildVector(VPermMaskVT, DL, VPermIdx);
+ DCI.AddToWorklist(VPermMask.getNode());
+ Res = DAG.getBitcast(MaskVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
+ // If we have 3 or more shuffle instructions or a chain involving a variable
+ // mask, we can replace them with a single PSHUFB instruction profitably.
+ // Intel's manuals suggest only using PSHUFB if doing so replacing 5
+ // instructions, but in practice PSHUFB tends to be *very* fast so we're
+ // more aggressive.
+ if ((Depth >= 3 || HasVariableMask) &&
+ ((VT.is128BitVector() && Subtarget.hasSSSE3()) ||
+ (VT.is256BitVector() && Subtarget.hasAVX2()) ||
+ (VT.is512BitVector() && Subtarget.hasBWI()))) {
SmallVector<SDValue, 16> PSHUFBMask;
int NumBytes = VT.getSizeInBits() / 8;
- int Ratio = NumBytes / Mask.size();
+ int Ratio = NumBytes / NumMaskElts;
for (int i = 0; i < NumBytes; ++i) {
- if (Mask[i / Ratio] == SM_SentinelUndef) {
+ int M = Mask[i / Ratio];
+ if (M == SM_SentinelUndef) {
PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
continue;
}
- int M = Mask[i / Ratio] != SM_SentinelZero
- ? Ratio * Mask[i / Ratio] + i % Ratio
- : 255;
+ if (M == SM_SentinelZero) {
+ PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
+ continue;
+ }
+ M = Ratio * M + i % Ratio;
+ assert ((M / 16) == (i / 16) && "Lane crossing detected");
PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
}
MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
- Op = DAG.getBitcast(ByteVT, Input);
- DCI.AddToWorklist(Op.getNode());
- SDValue PSHUFBMaskOp =
- DAG.getNode(ISD::BUILD_VECTOR, DL, ByteVT, PSHUFBMask);
+ Res = DAG.getBitcast(ByteVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
DCI.AddToWorklist(PSHUFBMaskOp.getNode());
- Op = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Op, PSHUFBMaskOp);
- DCI.AddToWorklist(Op.getNode());
- DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Op),
+ Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
/*AddTo*/ true);
return true;
}
@@ -23288,10 +25333,10 @@ static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
/// combining in this recursive walk.
static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
ArrayRef<int> RootMask,
- int Depth, bool HasPSHUFB,
+ int Depth, bool HasVariableMask,
SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
// Bound the depth of our recursive combine because this is ultimately
// quadratic in nature.
if (Depth > 8)
@@ -23310,13 +25355,10 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
"Can only combine shuffles of the same vector register size.");
- if (!isTargetShuffle(Op.getOpcode()))
- return false;
+ // Extract target shuffle mask and resolve sentinels and inputs.
+ SDValue Input0, Input1;
SmallVector<int, 16> OpMask;
- bool IsUnary;
- bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, true, OpMask, IsUnary);
- // We only can combine unary shuffles which we can decode the mask for.
- if (!HaveMask || !IsUnary)
+ if (!resolveTargetShuffleInputs(Op, Input0, Input1, OpMask))
return false;
assert(VT.getVectorNumElements() == OpMask.size() &&
@@ -23327,6 +25369,7 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
OpMask.size() % RootMask.size() == 0) ||
OpMask.size() == RootMask.size()) &&
"The smaller number of elements must divide the larger.");
+ int MaskWidth = std::max<int>(OpMask.size(), RootMask.size());
int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
assert(((RootRatio == 1 && OpRatio == 1) ||
@@ -23334,13 +25377,13 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
"Must not have a ratio for both incoming and op masks!");
SmallVector<int, 16> Mask;
- Mask.reserve(std::max(OpMask.size(), RootMask.size()));
+ Mask.reserve(MaskWidth);
// Merge this shuffle operation's mask into our accumulated mask. Note that
// this shuffle's mask will be the first applied to the input, followed by the
// root mask to get us all the way to the root value arrangement. The reason
// for this order is that we are recursing up the operation chain.
- for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
+ for (int i = 0; i < MaskWidth; ++i) {
int RootIdx = i / RootRatio;
if (RootMask[RootIdx] < 0) {
// This is a zero or undef lane, we're done.
@@ -23362,45 +25405,56 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
RootMaskedIdx % OpRatio);
}
- // See if we can recurse into the operand to combine more things.
- switch (Op.getOpcode()) {
- case X86ISD::PSHUFB:
- HasPSHUFB = true;
- case X86ISD::PSHUFD:
- case X86ISD::PSHUFHW:
- case X86ISD::PSHUFLW:
- if (Op.getOperand(0).hasOneUse() &&
- combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
- HasPSHUFB, DAG, DCI, Subtarget))
- return true;
- break;
+ // Handle the all undef/zero cases early.
+ if (llvm::all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) {
+ DCI.CombineTo(Root.getNode(), DAG.getUNDEF(Root.getValueType()));
+ return true;
+ }
+ if (llvm::all_of(Mask, [](int Idx) { return Idx < 0; })) {
+ // TODO - should we handle the mixed zero/undef case as well? Just returning
+ // a zero mask will lose information on undef elements possibly reducing
+ // future combine possibilities.
+ DCI.CombineTo(Root.getNode(), getZeroVector(Root.getSimpleValueType(),
+ Subtarget, DAG, SDLoc(Root)));
+ return true;
+ }
- case X86ISD::UNPCKL:
- case X86ISD::UNPCKH:
- assert(Op.getOperand(0) == Op.getOperand(1) &&
- "We only combine unary shuffles!");
- // We can't check for single use, we have to check that this shuffle is the
- // only user.
- if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
- combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
- HasPSHUFB, DAG, DCI, Subtarget))
- return true;
- break;
+ int MaskSize = Mask.size();
+ bool UseInput0 = std::any_of(Mask.begin(), Mask.end(),
+ [MaskSize](int Idx) { return 0 <= Idx && Idx < MaskSize; });
+ bool UseInput1 = std::any_of(Mask.begin(), Mask.end(),
+ [MaskSize](int Idx) { return MaskSize <= Idx; });
+
+ // At the moment we can only combine unary shuffle mask cases.
+ if (UseInput0 && UseInput1)
+ return false;
+ else if (UseInput1) {
+ std::swap(Input0, Input1);
+ ShuffleVectorSDNode::commuteMask(Mask);
}
+ assert(Input0 && "Shuffle with no inputs detected");
+
+ HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
+
+ // See if we can recurse into Input0 (if it's a target shuffle).
+ if (Op->isOnlyUserOf(Input0.getNode()) &&
+ combineX86ShufflesRecursively(Input0, Root, Mask, Depth + 1,
+ HasVariableMask, DAG, DCI, Subtarget))
+ return true;
+
// Minor canonicalization of the accumulated shuffle mask to make it easier
- // to match below. All this does is detect masks with squential pairs of
+ // to match below. All this does is detect masks with sequential pairs of
// elements, and shrink them to the half-width mask. It does this in a loop
// so it will reduce the size of the mask to the minimal width mask which
// performs an equivalent shuffle.
SmallVector<int, 16> WidenedMask;
while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
Mask = std::move(WidenedMask);
- WidenedMask.clear();
}
- return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
- Subtarget);
+ return combineX86ShuffleChain(Input0, Root, Mask, Depth, HasVariableMask, DAG,
+ DCI, Subtarget);
}
/// \brief Get the PSHUF-style mask from PSHUF node.
@@ -23410,8 +25464,10 @@ static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
MVT VT = N.getSimpleValueType();
SmallVector<int, 4> Mask;
+ SmallVector<SDValue, 2> Ops;
bool IsUnary;
- bool HaveMask = getTargetShuffleMask(N.getNode(), VT, false, Mask, IsUnary);
+ bool HaveMask =
+ getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
(void)HaveMask;
assert(HaveMask);
@@ -23647,9 +25703,9 @@ static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
}
/// \brief Try to combine x86 target specific shuffles.
-static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
MVT VT = N.getSimpleValueType();
SmallVector<int, 4> Mask;
@@ -23681,8 +25737,7 @@ static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
auto Op0 = N.getOperand(0);
auto Op1 = N.getOperand(1);
- if (Op0.getOpcode() == ISD::UNDEF &&
- Op1.getNode()->getOpcode() == ISD::VECTOR_SHUFFLE) {
+ if (Op0.isUndef() && Op1.getNode()->getOpcode() == ISD::VECTOR_SHUFFLE) {
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op1.getNode())->getMask();
unsigned NumElts = VT.getVectorNumElements();
@@ -23719,6 +25774,129 @@ static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask);
}
+ // Attempt to merge blend(insertps(x,y),zero).
+ if (V0.getOpcode() == X86ISD::INSERTPS ||
+ V1.getOpcode() == X86ISD::INSERTPS) {
+ assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
+
+ // Determine which elements are known to be zero.
+ SmallVector<int, 8> TargetMask;
+ SmallVector<SDValue, 2> BlendOps;
+ if (!setTargetShuffleZeroElements(N, TargetMask, BlendOps))
+ return SDValue();
+
+ // Helper function to take inner insertps node and attempt to
+ // merge the blend with zero into its zero mask.
+ auto MergeInsertPSAndBlend = [&](SDValue V, int Offset) {
+ if (V.getOpcode() != X86ISD::INSERTPS)
+ return SDValue();
+ SDValue Op0 = V.getOperand(0);
+ SDValue Op1 = V.getOperand(1);
+ SDValue Op2 = V.getOperand(2);
+ unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
+
+ // Check each element of the blend node's target mask - must either
+ // be zeroable (and update the zero mask) or selects the element from
+ // the inner insertps node.
+ for (int i = 0; i != 4; ++i)
+ if (TargetMask[i] < 0)
+ InsertPSMask |= (1u << i);
+ else if (TargetMask[i] != (i + Offset))
+ return SDValue();
+ return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, Op0, Op1,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+ };
+
+ if (SDValue V = MergeInsertPSAndBlend(V0, 0))
+ return V;
+ if (SDValue V = MergeInsertPSAndBlend(V1, 4))
+ return V;
+ }
+ return SDValue();
+ }
+ case X86ISD::INSERTPS: {
+ assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
+ SDValue Op0 = N.getOperand(0);
+ SDValue Op1 = N.getOperand(1);
+ SDValue Op2 = N.getOperand(2);
+ unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
+ unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
+ unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
+ unsigned ZeroMask = InsertPSMask & 0xF;
+
+ // If we zero out all elements from Op0 then we don't need to reference it.
+ if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+
+ // If we zero out the element from Op1 then we don't need to reference it.
+ if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+
+ // Attempt to merge insertps Op1 with an inner target shuffle node.
+ SmallVector<int, 8> TargetMask1;
+ SmallVector<SDValue, 2> Ops1;
+ if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) {
+ int M = TargetMask1[SrcIdx];
+ if (isUndefOrZero(M)) {
+ // Zero/UNDEF insertion - zero out element and remove dependency.
+ InsertPSMask |= (1u << DstIdx);
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+ }
+ // Update insertps mask srcidx and reference the source input directly.
+ assert(0 <= M && M < 8 && "Shuffle index out of range");
+ InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
+ Op1 = Ops1[M < 4 ? 0 : 1];
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+ }
+
+ // Attempt to merge insertps Op0 with an inner target shuffle node.
+ SmallVector<int, 8> TargetMask0;
+ SmallVector<SDValue, 2> Ops0;
+ if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0))
+ return SDValue();
+
+ bool Updated = false;
+ bool UseInput00 = false;
+ bool UseInput01 = false;
+ for (int i = 0; i != 4; ++i) {
+ int M = TargetMask0[i];
+ if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
+ // No change if element is already zero or the inserted element.
+ continue;
+ } else if (isUndefOrZero(M)) {
+ // If the target mask is undef/zero then we must zero the element.
+ InsertPSMask |= (1u << i);
+ Updated = true;
+ continue;
+ }
+
+ // The input vector element must be inline.
+ if (M != i && M != (i + 4))
+ return SDValue();
+
+ // Determine which inputs of the target shuffle we're using.
+ UseInput00 |= (0 <= M && M < 4);
+ UseInput01 |= (4 <= M);
+ }
+
+ // If we're not using both inputs of the target shuffle then use the
+ // referenced input directly.
+ if (UseInput00 && !UseInput01) {
+ Updated = true;
+ Op0 = Ops0[0];
+ } else if (!UseInput00 && UseInput01) {
+ Updated = true;
+ Op0 = Ops0[1];
+ }
+
+ if (Updated)
+ return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
+ DAG.getConstant(InsertPSMask, DL, MVT::i8));
+
return SDValue();
}
default:
@@ -23814,12 +25992,12 @@ static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
/// the operands which explicitly discard the lanes which are unused by this
/// operation to try to flow through the rest of the combiner the fact that
/// they're unused.
-static SDValue combineShuffleToAddSub(SDNode *N, const X86Subtarget *Subtarget,
+static SDValue combineShuffleToAddSub(SDNode *N, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
- if ((!Subtarget->hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
- (!Subtarget->hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
+ if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
+ (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
return SDValue();
// We only handle target-independent shuffles.
@@ -23865,13 +26043,10 @@ static SDValue combineShuffleToAddSub(SDNode *N, const X86Subtarget *Subtarget,
return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
}
-/// PerformShuffleCombine - Performs several different shuffle combines.
-static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc dl(N);
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// Don't create instructions with illegal types after legalize types has run.
@@ -23886,9 +26061,9 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return AddSub;
// Combine 256-bit vector shuffles. This is only profitable when in AVX mode
- if (TLI.isTypeLegal(VT) && Subtarget->hasFp256() && VT.is256BitVector() &&
+ if (TLI.isTypeLegal(VT) && Subtarget.hasFp256() && VT.is256BitVector() &&
N->getOpcode() == ISD::VECTOR_SHUFFLE)
- return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
+ return combineShuffle256(N, DAG, DCI, Subtarget);
// During Type Legalization, when promoting illegal vector types,
// the backend might introduce new shuffle dag nodes and bitcasts.
@@ -23903,8 +26078,12 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
// potentially need to be further expanded (or custom lowered) into a
// less optimal sequence of dag nodes.
if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
- N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
- N0.getOpcode() == ISD::BITCAST) {
+ N->getOpcode() == ISD::VECTOR_SHUFFLE &&
+ N->getOperand(0).getOpcode() == ISD::BITCAST &&
+ N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
SDValue BC0 = N0.getOperand(0);
EVT SVT = BC0.getValueType();
unsigned Opcode = BC0.getOpcode();
@@ -23936,7 +26115,7 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
- return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
+ return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask());
}
}
}
@@ -23952,9 +26131,8 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return LD;
if (isTargetShuffle(N->getOpcode())) {
- SDValue Shuffle =
- PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
- if (Shuffle.getNode())
+ if (SDValue Shuffle =
+ combineTargetShuffle(SDValue(N, 0), DAG, DCI, Subtarget))
return Shuffle;
// Try recursively combining arbitrary sequences of x86 shuffle
@@ -23973,8 +26151,8 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
-/// specific shuffle of a load can be folded into a single element load.
+/// Check if a vector extract from a target-specific shuffle of a load can be
+/// folded into a single element load.
/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
/// shuffles have been custom lowered so we need to handle those here.
static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
@@ -24012,9 +26190,10 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
return SDValue();
SmallVector<int, 16> ShuffleMask;
+ SmallVector<SDValue, 2> ShuffleOps;
bool UnaryShuffle;
if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
- ShuffleMask, UnaryShuffle))
+ ShuffleOps, ShuffleMask, UnaryShuffle))
return SDValue();
// Select the input vector, guarding against out of range extract vector.
@@ -24029,12 +26208,12 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
return DAG.getUNDEF(EltVT);
assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range");
- SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
- : InVec.getOperand(1);
+ SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0]
+ : ShuffleOps[1];
// If inputs to shuffle are the same for both ops, then allow 2 uses
- unsigned AllowedUses = InVec.getNumOperands() > 1 &&
- InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
+ unsigned AllowedUses =
+ (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
if (LdNode.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
@@ -24068,18 +26247,16 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
SDLoc dl(N);
// Create shuffle node taking into account the case that its a unary shuffle
- SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
- : InVec.getOperand(1);
- Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
- InVec.getOperand(0), Shuffle,
- &ShuffleMask[0]);
+ SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1];
+ Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle,
+ ShuffleMask);
Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
EltNo);
}
-static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -24108,8 +26285,8 @@ static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
default: return SDValue();
}
- if (((Subtarget->hasSSE1() && VT == MVT::f32) ||
- (Subtarget->hasSSE2() && VT == MVT::f64)) &&
+ if (((Subtarget.hasSSE1() && VT == MVT::f32) ||
+ (Subtarget.hasSSE2() && VT == MVT::f64)) &&
isa<ConstantSDNode>(N0.getOperand(1)) &&
N0.getOperand(0).getOpcode() == ISD::BITCAST &&
N0.getOperand(0).getOperand(0).getValueType() == VT) {
@@ -24121,13 +26298,12 @@ static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
-/// generation and convert it from being a bunch of shuffles and extracts
-/// into a somewhat faster sequence. For i686, the best sequence is apparently
-/// storing the value and loading scalars back, while for x64 we should
-/// use 64-bit extracts and shifts.
-static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+/// Detect vector gather/scatter index generation and convert it from being a
+/// bunch of shuffles and extracts into a somewhat faster sequence.
+/// For i686, the best sequence is apparently storing the value and loading
+/// scalars back, while for x64 we should use 64-bit extracts and shifts.
+static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
return NewOp;
@@ -24136,25 +26312,14 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
// Detect mmx to i32 conversion through a v2i32 elt extract.
if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
N->getValueType(0) == MVT::i32 &&
- InputVector.getValueType() == MVT::v2i32) {
+ InputVector.getValueType() == MVT::v2i32 &&
+ isa<ConstantSDNode>(N->getOperand(1)) &&
+ N->getConstantOperandVal(1) == 0) {
+ SDValue MMXSrc = InputVector.getNode()->getOperand(0);
// The bitcast source is a direct mmx result.
- SDValue MMXSrc = InputVector.getNode()->getOperand(0);
if (MMXSrc.getValueType() == MVT::x86mmx)
- return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
- N->getValueType(0),
- InputVector.getNode()->getOperand(0));
-
- // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
- if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
- MMXSrc.getValueType() == MVT::i64) {
- SDValue MMXSrcOp = MMXSrc.getOperand(0);
- if (MMXSrcOp.hasOneUse() && MMXSrcOp.getOpcode() == ISD::BITCAST &&
- MMXSrcOp.getValueType() == MVT::v1i64 &&
- MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
- return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
- N->getValueType(0), MMXSrcOp.getOperand(0));
- }
+ return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
}
EVT VT = N->getValueType(0);
@@ -24236,7 +26401,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
// Store the value to a temporary stack slot.
SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo());
EVT ElementType = InputVector.getValueType().getVectorElementType();
unsigned EltSize = ElementType.getSizeInBits() / 8;
@@ -24251,10 +26416,8 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, OffsetVal);
// Load the scalar.
- Vals[i] = DAG.getLoad(ElementType, dl, Ch,
- ScalarAddr, MachinePointerInfo(),
- false, false, false, 0);
-
+ Vals[i] =
+ DAG.getLoad(ElementType, dl, Ch, ScalarAddr, MachinePointerInfo());
}
}
@@ -24272,55 +26435,10 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue
-transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
- SDLoc dl(N);
- SDValue Cond = N->getOperand(0);
- SDValue LHS = N->getOperand(1);
- SDValue RHS = N->getOperand(2);
-
- if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
- SDValue CondSrc = Cond->getOperand(0);
- if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
- Cond = CondSrc->getOperand(0);
- }
-
- if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
- return SDValue();
-
- // A vselect where all conditions and data are constants can be optimized into
- // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
- if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
- ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
- return SDValue();
-
- unsigned MaskValue = 0;
- if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
- return SDValue();
-
- MVT VT = N->getSimpleValueType(0);
- unsigned NumElems = VT.getVectorNumElements();
- SmallVector<int, 8> ShuffleMask(NumElems, -1);
- for (unsigned i = 0; i < NumElems; ++i) {
- // Be sure we emit undef where we can.
- if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
- ShuffleMask[i] = -1;
- else
- ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
- }
-
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
- return SDValue();
- return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
-}
-
-/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
-/// nodes.
-static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// Do target-specific dag combines on SELECT and VSELECT nodes.
+static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
SDValue Cond = N->getOperand(0);
// Get the LHS/RHS of the select.
@@ -24337,8 +26455,8 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
VT != MVT::f80 && VT != MVT::f128 &&
(TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
- (Subtarget->hasSSE2() ||
- (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
+ (Subtarget.hasSSE2() ||
+ (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
unsigned Opcode = 0;
@@ -24476,7 +26594,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
}
EVT CondVT = Cond.getValueType();
- if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
+ if (Subtarget.hasAVX512() && VT.isVector() && CondVT.isVector() &&
CondVT.getVectorElementType() == MVT::i1) {
// v16i8 (select v16i1, v16i8, v16i8) does not have a proper
// lowering on KNL. In this case we convert it to
@@ -24487,7 +26605,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
(OpVT.getVectorElementType() == MVT::i8 ||
OpVT.getVectorElementType() == MVT::i16) &&
- !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
+ !(Subtarget.hasBWI() && Subtarget.hasVLX())) {
Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
DCI.AddToWorklist(Cond.getNode());
return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
@@ -24625,8 +26743,8 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Match VSELECTs into subs with unsigned saturation.
if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
// psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
- ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
- (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
+ ((Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
+ (Subtarget.hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
// Check if one of the arms of the VSELECT is a zero vector. If it's on the
@@ -24730,25 +26848,6 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
}
}
- // We should generate an X86ISD::BLENDI from a vselect if its argument
- // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
- // constants. This specific pattern gets generated when we split a
- // selector for a 512 bit vector in a machine without AVX512 (but with
- // 256-bit vectors), during legalization:
- //
- // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
- //
- // Iff we find this pattern and the build_vectors are built from
- // constants, we translate the vselect into a shuffle_vector that we
- // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
- if ((N->getOpcode() == ISD::VSELECT ||
- N->getOpcode() == X86ISD::SHRUNKBLEND) &&
- !DCI.isBeforeLegalize() && !VT.is512BitVector()) {
- SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
- if (Shuffle.getNode())
- return Shuffle;
- }
-
// If this is a *dynamic* select (non-constant condition) and we can match
// this node with one of the variable blend instructions, restructure the
// condition so that the blends can use the high bit of each element and use
@@ -24780,10 +26879,10 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (VT.getVectorElementType() == MVT::i16)
return SDValue();
// Dynamic blending was only available from SSE4.1 onward.
- if (VT.is128BitVector() && !Subtarget->hasSSE41())
+ if (VT.is128BitVector() && !Subtarget.hasSSE41())
return SDValue();
// Byte blends are only available in AVX2
- if (VT == MVT::v32i8 && !Subtarget->hasAVX2())
+ if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
return SDValue();
assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
@@ -24837,6 +26936,73 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+/// Combine:
+/// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
+/// to:
+/// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
+/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
+/// Note that this is only legal for some op/cc combinations.
+static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
+ SelectionDAG &DAG) {
+ // This combine only operates on CMP-like nodes.
+ if (!(Cmp.getOpcode() == X86ISD::CMP ||
+ (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
+ return SDValue();
+
+ // This only applies to variations of the common case:
+ // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
+ // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
+ // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
+ // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
+ // Using the proper condcodes (see below), overflow is checked for.
+
+ // FIXME: We can generalize both constraints:
+ // - XOR/OR/AND (if they were made to survive AtomicExpand)
+ // - LHS != 1
+ // if the result is compared.
+
+ SDValue CmpLHS = Cmp.getOperand(0);
+ SDValue CmpRHS = Cmp.getOperand(1);
+
+ if (!CmpLHS.hasOneUse())
+ return SDValue();
+
+ auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
+ if (!CmpRHSC || CmpRHSC->getZExtValue() != 0)
+ return SDValue();
+
+ const unsigned Opc = CmpLHS.getOpcode();
+
+ if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
+ return SDValue();
+
+ SDValue OpRHS = CmpLHS.getOperand(2);
+ auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
+ if (!OpRHSC)
+ return SDValue();
+
+ APInt Addend = OpRHSC->getAPIntValue();
+ if (Opc == ISD::ATOMIC_LOAD_SUB)
+ Addend = -Addend;
+
+ if (CC == X86::COND_S && Addend == 1)
+ CC = X86::COND_LE;
+ else if (CC == X86::COND_NS && Addend == 1)
+ CC = X86::COND_G;
+ else if (CC == X86::COND_G && Addend == -1)
+ CC = X86::COND_GE;
+ else if (CC == X86::COND_LE && Addend == -1)
+ CC = X86::COND_L;
+ else
+ return SDValue();
+
+ SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG);
+ DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
+ DAG.getUNDEF(CmpLHS.getValueType()));
+ DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
+ return LockOp;
+}
+
// Check whether a boolean test is testing a boolean value generated by
// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
// code.
@@ -24853,10 +27019,10 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// where Op could be BRCOND or CMOV.
//
static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
- // Quit if not CMP and SUB with its value result used.
- if (Cmp.getOpcode() != X86ISD::CMP &&
- (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
- return SDValue();
+ // This combine only operates on CMP-like nodes.
+ if (!(Cmp.getOpcode() == X86ISD::CMP ||
+ (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
+ return SDValue();
// Quit if not used as a boolean value.
if (CC != X86::COND_E && CC != X86::COND_NE)
@@ -24890,6 +27056,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
// Skip (zext $x), (trunc $x), or (and $x, 1) node.
while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
SetCC.getOpcode() == ISD::TRUNCATE ||
+ SetCC.getOpcode() == ISD::AssertZext ||
SetCC.getOpcode() == ISD::AND) {
if (SetCC.getOpcode() == ISD::AND) {
int OpIdx = -1;
@@ -24897,7 +27064,7 @@ static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
OpIdx = 1;
if (isOneConstant(SetCC.getOperand(1)))
OpIdx = 0;
- if (OpIdx == -1)
+ if (OpIdx < 0)
break;
SetCC = SetCC.getOperand(OpIdx);
truncatedToBoolWithAnd = true;
@@ -25008,10 +27175,20 @@ static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
return true;
}
+/// Optimize an EFLAGS definition used according to the condition code \p CC
+/// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
+/// uses of chain values.
+static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
+ SelectionDAG &DAG) {
+ if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
+ return R;
+ return combineSetCCAtomicArith(EFLAGS, CC, DAG);
+}
+
/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
-static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
// If the flag operand isn't dead, don't touch this CMOV.
@@ -25034,15 +27211,14 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
}
}
- SDValue Flags;
-
- Flags = checkBoolTestSetCCCombine(Cond, CC);
- if (Flags.getNode() &&
- // Extra check as FCMOV only supports a subset of X86 cond.
- (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
- SDValue Ops[] = { FalseOp, TrueOp,
- DAG.getConstant(CC, DL, MVT::i8), Flags };
- return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
+ // Try to simplify the EFLAGS and condition code operands.
+ // We can't always do this as FCMOV only supports a subset of X86 cond.
+ if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG)) {
+ if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
+ SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
+ Flags};
+ return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
+ }
}
// If this is a select between two integer constants, try to do some
@@ -25218,11 +27394,216 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// PerformMulCombine - Optimize a single multiply with constant into two
-/// in order to implement it with two cheaper instructions, e.g.
-/// LEA + SHL, LEA + LEA.
-static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+/// Different mul shrinking modes.
+enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
+
+static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
+ EVT VT = N->getOperand(0).getValueType();
+ if (VT.getScalarSizeInBits() != 32)
+ return false;
+
+ assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
+ unsigned SignBits[2] = {1, 1};
+ bool IsPositive[2] = {false, false};
+ for (unsigned i = 0; i < 2; i++) {
+ SDValue Opd = N->getOperand(i);
+
+ // DAG.ComputeNumSignBits return 1 for ISD::ANY_EXTEND, so we need to
+ // compute signbits for it separately.
+ if (Opd.getOpcode() == ISD::ANY_EXTEND) {
+ // For anyextend, it is safe to assume an appropriate number of leading
+ // sign/zero bits.
+ if (Opd.getOperand(0).getValueType().getVectorElementType() == MVT::i8)
+ SignBits[i] = 25;
+ else if (Opd.getOperand(0).getValueType().getVectorElementType() ==
+ MVT::i16)
+ SignBits[i] = 17;
+ else
+ return false;
+ IsPositive[i] = true;
+ } else if (Opd.getOpcode() == ISD::BUILD_VECTOR) {
+ // All the operands of BUILD_VECTOR need to be int constant.
+ // Find the smallest value range which all the operands belong to.
+ SignBits[i] = 32;
+ IsPositive[i] = true;
+ for (const SDValue &SubOp : Opd.getNode()->op_values()) {
+ if (SubOp.isUndef())
+ continue;
+ auto *CN = dyn_cast<ConstantSDNode>(SubOp);
+ if (!CN)
+ return false;
+ APInt IntVal = CN->getAPIntValue();
+ if (IntVal.isNegative())
+ IsPositive[i] = false;
+ SignBits[i] = std::min(SignBits[i], IntVal.getNumSignBits());
+ }
+ } else {
+ SignBits[i] = DAG.ComputeNumSignBits(Opd);
+ if (Opd.getOpcode() == ISD::ZERO_EXTEND)
+ IsPositive[i] = true;
+ }
+ }
+
+ bool AllPositive = IsPositive[0] && IsPositive[1];
+ unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
+ // When ranges are from -128 ~ 127, use MULS8 mode.
+ if (MinSignBits >= 25)
+ Mode = MULS8;
+ // When ranges are from 0 ~ 255, use MULU8 mode.
+ else if (AllPositive && MinSignBits >= 24)
+ Mode = MULU8;
+ // When ranges are from -32768 ~ 32767, use MULS16 mode.
+ else if (MinSignBits >= 17)
+ Mode = MULS16;
+ // When ranges are from 0 ~ 65535, use MULU16 mode.
+ else if (AllPositive && MinSignBits >= 16)
+ Mode = MULU16;
+ else
+ return false;
+ return true;
+}
+
+/// When the operands of vector mul are extended from smaller size values,
+/// like i8 and i16, the type of mul may be shrinked to generate more
+/// efficient code. Two typical patterns are handled:
+/// Pattern1:
+/// %2 = sext/zext <N x i8> %1 to <N x i32>
+/// %4 = sext/zext <N x i8> %3 to <N x i32>
+// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
+/// %5 = mul <N x i32> %2, %4
+///
+/// Pattern2:
+/// %2 = zext/sext <N x i16> %1 to <N x i32>
+/// %4 = zext/sext <N x i16> %3 to <N x i32>
+/// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
+/// %5 = mul <N x i32> %2, %4
+///
+/// There are four mul shrinking modes:
+/// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
+/// -128 to 128, and the scalar value range of %4 is also -128 to 128,
+/// generate pmullw+sext32 for it (MULS8 mode).
+/// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
+/// 0 to 255, and the scalar value range of %4 is also 0 to 255,
+/// generate pmullw+zext32 for it (MULU8 mode).
+/// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
+/// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
+/// generate pmullw+pmulhw for it (MULS16 mode).
+/// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
+/// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
+/// generate pmullw+pmulhuw for it (MULU16 mode).
+static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ // pmulld is supported since SSE41. It is better to use pmulld
+ // instead of pmullw+pmulhw.
+ if (Subtarget.hasSSE41())
+ return SDValue();
+
+ ShrinkMode Mode;
+ if (!canReduceVMulWidth(N, DAG, Mode))
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getOperand(0).getValueType();
+ unsigned RegSize = 128;
+ MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
+ EVT ReducedVT =
+ EVT::getVectorVT(*DAG.getContext(), MVT::i16, VT.getVectorNumElements());
+ // Shrink the operands of mul.
+ SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
+ SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
+
+ if (VT.getVectorNumElements() >= OpsVT.getVectorNumElements()) {
+ // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
+ // lower part is needed.
+ SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
+ if (Mode == MULU8 || Mode == MULS8) {
+ return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
+ DL, VT, MulLo);
+ } else {
+ MVT ResVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
+ // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
+ // the higher part is also needed.
+ SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
+ ReducedVT, NewN0, NewN1);
+
+ // Repack the lower part and higher part result of mul into a wider
+ // result.
+ // Generate shuffle functioning as punpcklwd.
+ SmallVector<int, 16> ShuffleMask(VT.getVectorNumElements());
+ for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) {
+ ShuffleMask[2 * i] = i;
+ ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements();
+ }
+ SDValue ResLo =
+ DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
+ ResLo = DAG.getNode(ISD::BITCAST, DL, ResVT, ResLo);
+ // Generate shuffle functioning as punpckhwd.
+ for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) {
+ ShuffleMask[2 * i] = i + VT.getVectorNumElements() / 2;
+ ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements() * 3 / 2;
+ }
+ SDValue ResHi =
+ DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
+ ResHi = DAG.getNode(ISD::BITCAST, DL, ResVT, ResHi);
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
+ }
+ } else {
+ // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
+ // to legalize the mul explicitly because implicit legalization for type
+ // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
+ // instructions which will not exist when we explicitly legalize it by
+ // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
+ // <4 x i16> undef).
+ //
+ // Legalize the operands of mul.
+ SmallVector<SDValue, 16> Ops(RegSize / ReducedVT.getSizeInBits(),
+ DAG.getUNDEF(ReducedVT));
+ Ops[0] = NewN0;
+ NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
+ Ops[0] = NewN1;
+ NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
+
+ if (Mode == MULU8 || Mode == MULS8) {
+ // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
+ // part is needed.
+ SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
+
+ // convert the type of mul result to VT.
+ MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+ SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
+ : ISD::SIGN_EXTEND_VECTOR_INREG,
+ DL, ResVT, Mul);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
+ DAG.getIntPtrConstant(0, DL));
+ } else {
+ // Generate the lower and higher part of mul: pmulhw/pmulhuw. For
+ // MULU16/MULS16, both parts are needed.
+ SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
+ SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
+ OpsVT, NewN0, NewN1);
+
+ // Repack the lower part and higher part result of mul into a wider
+ // result. Make sure the type of mul result is VT.
+ MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+ SDValue Res = DAG.getNode(X86ISD::UNPCKL, DL, OpsVT, MulLo, MulHi);
+ Res = DAG.getNode(ISD::BITCAST, DL, ResVT, Res);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
+ DAG.getIntPtrConstant(0, DL));
+ }
+ }
+}
+
+/// Optimize a single multiply with constant into two operations in order to
+/// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
+static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ EVT VT = N->getValueType(0);
+ if (DCI.isBeforeLegalize() && VT.isVector())
+ return reduceVMULWidth(N, DAG, Subtarget);
+
// An imul is usually smaller than the alternative sequence.
if (DAG.getMachineFunction().getFunction()->optForMinSize())
return SDValue();
@@ -25230,7 +27611,6 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
- EVT VT = N->getValueType(0);
if (VT != MVT::i64 && VT != MVT::i32)
return SDValue();
@@ -25307,7 +27687,7 @@ static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
@@ -25320,7 +27700,7 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
N0.getOperand(1).getOpcode() == ISD::Constant) {
SDValue N00 = N0.getOperand(0);
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
- APInt ShAmt = N1C->getAPIntValue();
+ const APInt &ShAmt = N1C->getAPIntValue();
Mask = Mask.shl(ShAmt);
bool MaskOK = false;
// We can handle cases concerning bit-widening nodes containing setcc_c if
@@ -25367,7 +27747,7 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-static SDValue PerformSRACombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineShiftRightAlgebraic(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
@@ -25424,11 +27804,11 @@ static SDValue PerformSRACombine(SDNode *N, SelectionDAG &DAG) {
/// shift by a constant amount which is known to be bigger than or equal
/// to the vector element size in bits.
static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
- (!Subtarget->hasInt256() ||
+ (!Subtarget.hasInt256() ||
(VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
return SDValue();
@@ -25436,7 +27816,7 @@ static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
SDLoc DL(N);
if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
- APInt ShiftAmt = AmtSplat->getAPIntValue();
+ const APInt &ShiftAmt = AmtSplat->getAPIntValue();
unsigned MaxAmount =
VT.getSimpleVT().getVectorElementType().getSizeInBits();
@@ -25451,16 +27831,15 @@ static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// PerformShiftCombine - Combine shifts.
-static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineShift(SDNode* N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
if (N->getOpcode() == ISD::SHL)
- if (SDValue V = PerformSHLCombine(N, DAG))
+ if (SDValue V = combineShiftLeft(N, DAG))
return V;
if (N->getOpcode() == ISD::SRA)
- if (SDValue V = PerformSRACombine(N, DAG))
+ if (SDValue V = combineShiftRightAlgebraic(N, DAG))
return V;
// Try to fold this logical shift into a zero vector.
@@ -25471,17 +27850,17 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
return SDValue();
}
-// CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
-// where both setccs reference the same FP CMP, and rewrite for CMPEQSS
-// and friends. Likewise for OR -> CMPNEQSS.
-static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
+/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
+/// OR -> CMPNEQSS.
+static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
unsigned opcode;
// SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
// we're requiring SSE2 for both.
- if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
+ if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CMP0 = N0->getOperand(1);
@@ -25530,7 +27909,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
// FIXME: need symbolic constants for these magic numbers.
// See X86ATTInstPrinter.cpp:printSSECC().
unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
- if (Subtarget->hasAVX512()) {
+ if (Subtarget.hasAVX512()) {
SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
CMP01,
DAG.getConstant(x86cc, DL, MVT::i8));
@@ -25547,7 +27926,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
bool is64BitFP = (CMP00.getValueType() == MVT::f64);
MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
- if (is64BitFP && !Subtarget->is64Bit()) {
+ if (is64BitFP && !Subtarget.is64Bit()) {
// On a 32-bit target, we cannot bitcast the 64-bit float to a
// 64-bit integer, since that's not a legal type. Since
// OnesOrZeroesF is all ones of all zeroes, we don't need all the
@@ -25574,34 +27953,47 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
-/// so it can be folded inside ANDNP.
-static bool CanFoldXORWithAllOnes(const SDNode *N) {
+/// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
+static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
+ assert(N->getOpcode() == ISD::AND);
+
EVT VT = N->getValueType(0);
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDLoc DL(N);
- // Match direct AllOnes for 128 and 256-bit vectors
- if (ISD::isBuildVectorAllOnes(N))
- return true;
+ if (VT != MVT::v2i64 && VT != MVT::v4i64 &&
+ VT != MVT::v8i64 && VT != MVT::v16i32 &&
+ VT != MVT::v4i32 && VT != MVT::v8i32) // Legal with VLX
+ return SDValue();
- // Look through a bit convert.
- if (N->getOpcode() == ISD::BITCAST)
- N = N->getOperand(0).getNode();
-
- // Sometimes the operand may come from a insert_subvector building a 256-bit
- // allones vector
- if (VT.is256BitVector() &&
- N->getOpcode() == ISD::INSERT_SUBVECTOR) {
- SDValue V1 = N->getOperand(0);
- SDValue V2 = N->getOperand(1);
-
- if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
- V1.getOperand(0).getOpcode() == ISD::UNDEF &&
- ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
- ISD::isBuildVectorAllOnes(V2.getNode()))
- return true;
- }
+ // Canonicalize XOR to the left.
+ if (N1.getOpcode() == ISD::XOR)
+ std::swap(N0, N1);
- return false;
+ if (N0.getOpcode() != ISD::XOR)
+ return SDValue();
+
+ SDValue N00 = N0->getOperand(0);
+ SDValue N01 = N0->getOperand(1);
+
+ N01 = peekThroughBitcasts(N01);
+
+ // Either match a direct AllOnes for 128, 256, and 512-bit vectors, or an
+ // insert_subvector building a 256-bit AllOnes vector.
+ if (!ISD::isBuildVectorAllOnes(N01.getNode())) {
+ if (!VT.is256BitVector() || N01->getOpcode() != ISD::INSERT_SUBVECTOR)
+ return SDValue();
+
+ SDValue V1 = N01->getOperand(0);
+ SDValue V2 = N01->getOperand(1);
+ if (V1.getOpcode() != ISD::INSERT_SUBVECTOR ||
+ !V1.getOperand(0).isUndef() ||
+ !ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) ||
+ !ISD::isBuildVectorAllOnes(V2.getNode()))
+ return SDValue();
+ }
+ return DAG.getNode(X86ISD::ANDNP, DL, VT, N00, N1);
}
// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
@@ -25610,7 +28002,7 @@ static bool CanFoldXORWithAllOnes(const SDNode *N) {
// some of the transition sequences.
static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (!VT.is256BitVector())
return SDValue();
@@ -25660,8 +28052,7 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
if (RHSConstSplat) {
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getVectorElementType(),
SDValue(RHSConstSplat, 0));
- SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
- N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
+ N1 = DAG.getSplatBuildVector(WideVT, DL, N1);
} else if (RHSTrunc) {
N1 = N1->getOperand(0);
}
@@ -25687,9 +28078,9 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
}
}
-static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
+static SDValue combineVectorZext(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDLoc DL(N);
@@ -25705,8 +28096,7 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
// The other side of the AND should be a splat of 2^C, where C
// is the number of bits in the source type.
- if (N1.getOpcode() == ISD::BITCAST)
- N1 = N1.getOperand(0);
+ N1 = peekThroughBitcasts(N1);
if (N1.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
@@ -25715,10 +28105,11 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
EVT SrcType = Shuffle->getValueType(0);
// We expect a single-source shuffle
- if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
+ if (!Shuffle->getOperand(1)->isUndef())
return SDValue();
unsigned SrcSize = SrcType.getScalarSizeInBits();
+ unsigned NumElems = SrcType.getVectorNumElements();
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
@@ -25742,7 +28133,7 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
// the source and dest type.
unsigned ZextRatio = ResSize / SrcSize;
bool IsZext = true;
- for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
+ for (unsigned i = 0; i != NumElems; ++i) {
if (i % ZextRatio) {
if (Shuffle->getMaskElt(i) > 0) {
// Expected undef
@@ -25765,8 +28156,7 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
// a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
// (instead of undef) where the k elements come from the zero vector.
SmallVector<int, 8> Mask;
- unsigned NumElems = SrcType.getVectorNumElements();
- for (unsigned i = 0; i < NumElems; ++i)
+ for (unsigned i = 0; i != NumElems; ++i)
if (i % ZextRatio)
Mask.push_back(NumElems);
else
@@ -25781,7 +28171,7 @@ static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
/// types, try to convert this into a floating point logic node to avoid
/// unnecessary moves from SSE to integer registers.
static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
unsigned FPOpcode = ISD::DELETED_NODE;
if (N->getOpcode() == ISD::AND)
FPOpcode = X86ISD::FAND;
@@ -25798,8 +28188,8 @@ static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
SDValue N1 = N->getOperand(1);
SDLoc DL(N);
if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
- ((Subtarget->hasSSE1() && VT == MVT::i32) ||
- (Subtarget->hasSSE2() && VT == MVT::i64))) {
+ ((Subtarget.hasSSE1() && VT == MVT::i32) ||
+ (Subtarget.hasSSE2() && VT == MVT::i64))) {
SDValue N00 = N0.getOperand(0);
SDValue N10 = N1.getOperand(0);
EVT N00Type = N00.getValueType();
@@ -25812,21 +28202,63 @@ static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// If this is a PCMPEQ or PCMPGT result that is bitwise-anded with 1 (this is
+/// the x86 lowering of a SETCC + ZEXT), replace the 'and' with a shift-right to
+/// eliminate loading the vector constant mask value. This relies on the fact
+/// that a PCMP always creates an all-ones or all-zeros bitmask per element.
+static SDValue combinePCMPAnd1(SDNode *N, SelectionDAG &DAG) {
+ SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
+ SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
+
+ // TODO: Use AssertSext to mark any nodes that have the property of producing
+ // all-ones or all-zeros. Then check for that node rather than particular
+ // opcodes.
+ if (Op0.getOpcode() != X86ISD::PCMPEQ && Op0.getOpcode() != X86ISD::PCMPGT)
+ return SDValue();
+
+ // The existence of the PCMP node guarantees that we have the required SSE2 or
+ // AVX2 for a shift of this vector type, but there is no vector shift by
+ // immediate for a vector with byte elements (PSRLB). 512-bit vectors use the
+ // masked compare nodes, so they should not make it here.
+ EVT VT0 = Op0.getValueType();
+ EVT VT1 = Op1.getValueType();
+ unsigned EltBitWidth = VT0.getScalarType().getSizeInBits();
+ if (VT0 != VT1 || EltBitWidth == 8)
+ return SDValue();
+
+ assert(VT0.getSizeInBits() == 128 || VT0.getSizeInBits() == 256);
+
+ APInt SplatVal;
+ if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) || SplatVal != 1)
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue ShAmt = DAG.getConstant(EltBitWidth - 1, DL, MVT::i8);
+ SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
+ return DAG.getBitcast(N->getValueType(0), Shift);
+}
+
+static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
- if (SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget))
+ if (SDValue Zext = combineVectorZext(N, DAG, DCI, Subtarget))
return Zext;
- if (SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget))
+ if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
return R;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
+ if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
+ return R;
+
+ if (SDValue ShiftRight = combinePCMPAnd1(N, DAG))
+ return ShiftRight;
+
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -25834,143 +28266,176 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
// Create BEXTR instructions
// BEXTR is ((X >> imm) & (2**size-1))
- if (VT == MVT::i32 || VT == MVT::i64) {
- // Check for BEXTR.
- if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
- (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
- ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
- ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
- if (MaskNode && ShiftNode) {
- uint64_t Mask = MaskNode->getZExtValue();
- uint64_t Shift = ShiftNode->getZExtValue();
- if (isMask_64(Mask)) {
- uint64_t MaskSize = countPopulation(Mask);
- if (Shift + MaskSize <= VT.getSizeInBits())
- return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
- DAG.getConstant(Shift | (MaskSize << 8), DL,
- VT));
- }
- }
- } // BEXTR
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return SDValue();
+ if (!Subtarget.hasBMI() && !Subtarget.hasTBM())
return SDValue();
+ if (N0.getOpcode() != ISD::SRA && N0.getOpcode() != ISD::SRL)
+ return SDValue();
+
+ ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
+ ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ if (MaskNode && ShiftNode) {
+ uint64_t Mask = MaskNode->getZExtValue();
+ uint64_t Shift = ShiftNode->getZExtValue();
+ if (isMask_64(Mask)) {
+ uint64_t MaskSize = countPopulation(Mask);
+ if (Shift + MaskSize <= VT.getSizeInBits())
+ return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
+ DAG.getConstant(Shift | (MaskSize << 8), DL,
+ VT));
+ }
}
+ return SDValue();
+}
- // Want to form ANDNP nodes:
- // 1) In the hopes of then easily combining them with OR and AND nodes
- // to form PBLEND/PSIGN.
- // 2) To match ANDN packed intrinsics
- if (VT != MVT::v2i64 && VT != MVT::v4i64)
+// Try to fold:
+// (or (and (m, y), (pandn m, x)))
+// into:
+// (vselect m, x, y)
+// As a special case, try to fold:
+// (or (and (m, (sub 0, x)), (pandn m, x)))
+// into:
+// (sub (xor X, M), M)
+static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ assert(N->getOpcode() == ISD::OR);
+
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+
+ if (!((VT == MVT::v2i64) || (VT == MVT::v4i64 && Subtarget.hasInt256())))
+ return SDValue();
+ assert(Subtarget.hasSSE2() && "Unexpected i64 vector without SSE2!");
+
+ // Canonicalize pandn to RHS
+ if (N0.getOpcode() == X86ISD::ANDNP)
+ std::swap(N0, N1);
+
+ if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
return SDValue();
- // Check LHS for vnot
- if (N0.getOpcode() == ISD::XOR &&
- //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
- CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
- return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
+ SDValue Mask = N1.getOperand(0);
+ SDValue X = N1.getOperand(1);
+ SDValue Y;
+ if (N0.getOperand(0) == Mask)
+ Y = N0.getOperand(1);
+ if (N0.getOperand(1) == Mask)
+ Y = N0.getOperand(0);
- // Check RHS for vnot
- if (N1.getOpcode() == ISD::XOR &&
- //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
- CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
- return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
+ // Check to see if the mask appeared in both the AND and ANDNP.
+ if (!Y.getNode())
+ return SDValue();
- return SDValue();
+ // Validate that X, Y, and Mask are bitcasts, and see through them.
+ Mask = peekThroughBitcasts(Mask);
+ X = peekThroughBitcasts(X);
+ Y = peekThroughBitcasts(Y);
+
+ EVT MaskVT = Mask.getValueType();
+
+ // Validate that the Mask operand is a vector sra node.
+ // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
+ // there is no psrai.b
+ unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
+ unsigned SraAmt = ~0;
+ if (Mask.getOpcode() == ISD::SRA) {
+ if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
+ if (auto *AmtConst = AmtBV->getConstantSplatNode())
+ SraAmt = AmtConst->getZExtValue();
+ } else if (Mask.getOpcode() == X86ISD::VSRAI) {
+ SDValue SraC = Mask.getOperand(1);
+ SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
+ }
+ if ((SraAmt + 1) != EltBits)
+ return SDValue();
+
+ SDLoc DL(N);
+
+ // Try to match:
+ // (or (and (M, (sub 0, X)), (pandn M, X)))
+ // which is a special case of vselect:
+ // (vselect M, (sub 0, X), X)
+ // Per:
+ // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
+ // We know that, if fNegate is 0 or 1:
+ // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
+ //
+ // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
+ // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
+ // ( M ? -X : X) == ((X ^ M ) + (M & 1))
+ // This lets us transform our vselect to:
+ // (add (xor X, M), (and M, 1))
+ // And further to:
+ // (sub (xor X, M), M)
+ if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
+ auto IsNegV = [](SDNode *N, SDValue V) {
+ return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
+ ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
+ };
+ SDValue V;
+ if (IsNegV(Y.getNode(), X))
+ V = X;
+ else if (IsNegV(X.getNode(), Y))
+ V = Y;
+
+ if (V) {
+ assert(EltBits == 8 || EltBits == 16 || EltBits == 32);
+ SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
+ SDValue SubOp2 = Mask;
+
+ // If the negate was on the false side of the select, then
+ // the operands of the SUB need to be swapped. PR 27251.
+ // This is because the pattern being matched above is
+ // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
+ // but if the pattern matched was
+ // (vselect M, X, (sub (0, X))), that is really negation of the pattern
+ // above, -(vselect M, (sub 0, X), X), and therefore the replacement
+ // pattern also needs to be a negation of the replacement pattern above.
+ // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
+ // sub accomplishes the negation of the replacement pattern.
+ if (V == Y)
+ std::swap(SubOp1, SubOp2);
+
+ return DAG.getBitcast(VT,
+ DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2));
+ }
+ }
+
+ // PBLENDVB is only available on SSE 4.1.
+ if (!Subtarget.hasSSE41())
+ return SDValue();
+
+ MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
+
+ X = DAG.getBitcast(BlendVT, X);
+ Y = DAG.getBitcast(BlendVT, Y);
+ Mask = DAG.getBitcast(BlendVT, Mask);
+ Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
+ return DAG.getBitcast(VT, Mask);
}
-static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
- if (SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget))
+ if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
return R;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
+ if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
+ return R;
+
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
- // look for psign/blend
- if (VT == MVT::v2i64 || VT == MVT::v4i64) {
- if (!Subtarget->hasSSSE3() ||
- (VT == MVT::v4i64 && !Subtarget->hasInt256()))
- return SDValue();
-
- // Canonicalize pandn to RHS
- if (N0.getOpcode() == X86ISD::ANDNP)
- std::swap(N0, N1);
- // or (and (m, y), (pandn m, x))
- if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
- SDValue Mask = N1.getOperand(0);
- SDValue X = N1.getOperand(1);
- SDValue Y;
- if (N0.getOperand(0) == Mask)
- Y = N0.getOperand(1);
- if (N0.getOperand(1) == Mask)
- Y = N0.getOperand(0);
-
- // Check to see if the mask appeared in both the AND and ANDNP and
- if (!Y.getNode())
- return SDValue();
-
- // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
- // Look through mask bitcast.
- if (Mask.getOpcode() == ISD::BITCAST)
- Mask = Mask.getOperand(0);
- if (X.getOpcode() == ISD::BITCAST)
- X = X.getOperand(0);
- if (Y.getOpcode() == ISD::BITCAST)
- Y = Y.getOperand(0);
-
- EVT MaskVT = Mask.getValueType();
-
- // Validate that the Mask operand is a vector sra node.
- // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
- // there is no psrai.b
- unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
- unsigned SraAmt = ~0;
- if (Mask.getOpcode() == ISD::SRA) {
- if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
- if (auto *AmtConst = AmtBV->getConstantSplatNode())
- SraAmt = AmtConst->getZExtValue();
- } else if (Mask.getOpcode() == X86ISD::VSRAI) {
- SDValue SraC = Mask.getOperand(1);
- SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
- }
- if ((SraAmt + 1) != EltBits)
- return SDValue();
-
- SDLoc DL(N);
-
- // Now we know we at least have a plendvb with the mask val. See if
- // we can form a psignb/w/d.
- // psign = x.type == y.type == mask.type && y = sub(0, x);
- if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
- ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
- X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
- assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
- "Unsupported VT for PSIGN");
- Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
- return DAG.getBitcast(VT, Mask);
- }
- // PBLENDVB only available on SSE 4.1
- if (!Subtarget->hasSSE41())
- return SDValue();
-
- MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
-
- X = DAG.getBitcast(BlendVT, X);
- Y = DAG.getBitcast(BlendVT, Y);
- Mask = DAG.getBitcast(BlendVT, Mask);
- Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
- return DAG.getBitcast(VT, Mask);
- }
- }
-
if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
return SDValue();
@@ -25982,7 +28447,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
// series of shifts/or that would otherwise be generated.
// Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
// have higher latencies and we are not optimizing for size.
- if (!OptForSize && Subtarget->isSHLDSlow())
+ if (!OptForSize && Subtarget.isSHLDSlow())
return SDValue();
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
@@ -26040,7 +28505,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
}
// Generate NEG and CMOV for integer abs.
-static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineIntegerAbs(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
// Since X86 does not have CMOV for 8-bit integer, we don't convert
@@ -26073,13 +28538,14 @@ static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-// Try to turn tests against the signbit in the form of:
-// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
-// into:
-// SETGT(X, -1)
+/// Try to turn tests against the signbit in the form of:
+/// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
+/// into:
+/// SETGT(X, -1)
static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
- // This is only worth doing if the output type is i8.
- if (N->getValueType(0) != MVT::i8)
+ // This is only worth doing if the output type is i8 or i1.
+ EVT ResultType = N->getValueType(0);
+ if (ResultType != MVT::i8 && ResultType != MVT::i1)
return SDValue();
SDValue N0 = N->getOperand(0);
@@ -26114,22 +28580,78 @@ static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
SDValue ShiftOp = Shift.getOperand(0);
EVT ShiftOpTy = ShiftOp.getValueType();
- SDValue Cond = DAG.getSetCC(DL, MVT::i8, ShiftOp,
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
+ *DAG.getContext(), ResultType);
+ SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
+ if (SetCCResultType != ResultType)
+ Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
return Cond;
}
-static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
+/// Turn vector tests of the signbit in the form of:
+/// xor (sra X, elt_size(X)-1), -1
+/// into:
+/// pcmpgt X, -1
+///
+/// This should be called before type legalization because the pattern may not
+/// persist after that.
+static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ EVT VT = N->getValueType(0);
+ if (!VT.isSimple())
+ return SDValue();
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return SDValue();
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
+ case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
+ case MVT::v32i8:
+ case MVT::v16i16:
+ case MVT::v8i32:
+ case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
+ }
+
+ // There must be a shift right algebraic before the xor, and the xor must be a
+ // 'not' operation.
+ SDValue Shift = N->getOperand(0);
+ SDValue Ones = N->getOperand(1);
+ if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
+ !ISD::isBuildVectorAllOnes(Ones.getNode()))
+ return SDValue();
+
+ // The shift should be smearing the sign bit across each vector element.
+ auto *ShiftBV = dyn_cast<BuildVectorSDNode>(Shift.getOperand(1));
+ if (!ShiftBV)
+ return SDValue();
+
+ EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
+ auto *ShiftAmt = ShiftBV->getConstantSplatNode();
+ if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
+ return SDValue();
+
+ // Create a greater-than comparison against -1. We don't use the more obvious
+ // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
+ return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
+}
+
+static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
+ if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
+ return Cmp;
+
if (DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
return RV;
- if (Subtarget->hasCMov())
- if (SDValue RV = performIntegerAbsCombine(N, DAG))
+ if (Subtarget.hasCMov())
+ if (SDValue RV = combineIntegerAbs(N, DAG))
return RV;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
@@ -26142,7 +28664,8 @@ static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
/// X86ISD::AVG instruction.
static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
- const X86Subtarget *Subtarget, SDLoc DL) {
+ const X86Subtarget &Subtarget,
+ const SDLoc &DL) {
if (!VT.isVector() || !VT.isSimple())
return SDValue();
EVT InVT = In.getValueType();
@@ -26159,10 +28682,12 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
return SDValue();
- if (Subtarget->hasAVX512()) {
+ if (!Subtarget.hasSSE2())
+ return SDValue();
+ if (Subtarget.hasAVX512()) {
if (VT.getSizeInBits() > 512)
return SDValue();
- } else if (Subtarget->hasAVX2()) {
+ } else if (Subtarget.hasAVX2()) {
if (VT.getSizeInBits() > 256)
return SDValue();
} else {
@@ -26221,10 +28746,8 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
Operands[0].getOperand(0).getValueType() == VT) {
// The pattern is detected. Subtract one from the constant vector, then
// demote it and emit X86ISD::AVG instruction.
- SDValue One = DAG.getConstant(1, DL, InScalarVT);
- SDValue Ones = DAG.getNode(ISD::BUILD_VECTOR, DL, InVT,
- SmallVector<SDValue, 8>(NumElems, One));
- Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], Ones);
+ SDValue VecOnes = DAG.getConstant(1, DL, InVT);
+ Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0),
Operands[1]);
@@ -26258,10 +28781,9 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
return SDValue();
}
-/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
-static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
LoadSDNode *Ld = cast<LoadSDNode>(N);
EVT RegVT = Ld->getValueType(0);
EVT MemVT = Ld->getMemoryVT();
@@ -26283,41 +28805,180 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
SDValue Ptr = Ld->getBasePtr();
- SDValue Increment =
- DAG.getConstant(16, dl, TLI.getPointerTy(DAG.getDataLayout()));
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
NumElems/2);
- SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
- Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- Alignment);
- Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
- SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
- Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- std::min(16U, Alignment));
+ SDValue Load1 =
+ DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
+ Alignment, Ld->getMemOperand()->getFlags());
+
+ Ptr = DAG.getMemBasePlusOffset(Ptr, 16, dl);
+ SDValue Load2 =
+ DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
+ std::min(16U, Alignment), Ld->getMemOperand()->getFlags());
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Load1.getValue(1),
Load2.getValue(1));
SDValue NewVec = DAG.getUNDEF(RegVT);
- NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
- NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
+ NewVec = insert128BitVector(NewVec, Load1, 0, DAG, dl);
+ NewVec = insert128BitVector(NewVec, Load2, NumElems / 2, DAG, dl);
return DCI.CombineTo(N, NewVec, TF, true);
}
return SDValue();
}
-/// PerformMLOADCombine - Resolve extending loads
-static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// If V is a build vector of boolean constants and exactly one of those
+/// constants is true, return the operand index of that true element.
+/// Otherwise, return -1.
+static int getOneTrueElt(SDValue V) {
+ // This needs to be a build vector of booleans.
+ // TODO: Checking for the i1 type matches the IR definition for the mask,
+ // but the mask check could be loosened to i8 or other types. That might
+ // also require checking more than 'allOnesValue'; eg, the x86 HW
+ // instructions only require that the MSB is set for each mask element.
+ // The ISD::MSTORE comments/definition do not specify how the mask operand
+ // is formatted.
+ auto *BV = dyn_cast<BuildVectorSDNode>(V);
+ if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
+ return -1;
+
+ int TrueIndex = -1;
+ unsigned NumElts = BV->getValueType(0).getVectorNumElements();
+ for (unsigned i = 0; i < NumElts; ++i) {
+ const SDValue &Op = BV->getOperand(i);
+ if (Op.isUndef())
+ continue;
+ auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
+ if (!ConstNode)
+ return -1;
+ if (ConstNode->getAPIntValue().isAllOnesValue()) {
+ // If we already found a one, this is too many.
+ if (TrueIndex >= 0)
+ return -1;
+ TrueIndex = i;
+ }
+ }
+ return TrueIndex;
+}
+
+/// Given a masked memory load/store operation, return true if it has one mask
+/// bit set. If it has one mask bit set, then also return the memory address of
+/// the scalar element to load/store, the vector index to insert/extract that
+/// scalar element, and the alignment for the scalar memory access.
+static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
+ SelectionDAG &DAG, SDValue &Addr,
+ SDValue &Index, unsigned &Alignment) {
+ int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
+ if (TrueMaskElt < 0)
+ return false;
+
+ // Get the address of the one scalar element that is specified by the mask
+ // using the appropriate offset from the base pointer.
+ EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
+ Addr = MaskedOp->getBasePtr();
+ if (TrueMaskElt != 0) {
+ unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
+ Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
+ }
+
+ Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
+ Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
+ return true;
+}
+
+/// If exactly one element of the mask is set for a non-extending masked load,
+/// it is a scalar load and vector insert.
+/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
+/// mask have already been optimized in IR, so we don't bother with those here.
+static SDValue
+reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
+ // However, some target hooks may need to be added to know when the transform
+ // is profitable. Endianness would also have to be considered.
+
+ SDValue Addr, VecIndex;
+ unsigned Alignment;
+ if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
+ return SDValue();
+
+ // Load the one scalar element that is specified by the mask using the
+ // appropriate offset from the base pointer.
+ SDLoc DL(ML);
+ EVT VT = ML->getValueType(0);
+ EVT EltVT = VT.getVectorElementType();
+ SDValue Load =
+ DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
+ Alignment, ML->getMemOperand()->getFlags());
+
+ // Insert the loaded element into the appropriate place in the vector.
+ SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, ML->getSrc0(),
+ Load, VecIndex);
+ return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
+}
+
+static SDValue
+combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
+ return SDValue();
+
+ SDLoc DL(ML);
+ EVT VT = ML->getValueType(0);
+
+ // If we are loading the first and last elements of a vector, it is safe and
+ // always faster to load the whole vector. Replace the masked load with a
+ // vector load and select.
+ unsigned NumElts = VT.getVectorNumElements();
+ BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
+ bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
+ bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
+ if (LoadFirstElt && LoadLastElt) {
+ SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
+ ML->getMemOperand());
+ SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd, ML->getSrc0());
+ return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
+ }
+
+ // Convert a masked load with a constant mask into a masked load and a select.
+ // This allows the select operation to use a faster kind of select instruction
+ // (for example, vblendvps -> vblendps).
+
+ // Don't try this if the pass-through operand is already undefined. That would
+ // cause an infinite loop because that's what we're about to create.
+ if (ML->getSrc0().isUndef())
+ return SDValue();
+
+ // The new masked load has an undef pass-through operand. The select uses the
+ // original pass-through operand.
+ SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
+ ML->getMask(), DAG.getUNDEF(VT),
+ ML->getMemoryVT(), ML->getMemOperand(),
+ ML->getExtensionType());
+ SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML, ML->getSrc0());
+
+ return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
+}
+
+static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
+ if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
+ if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
+ return ScalarLoad;
+ // TODO: Do some AVX512 subsets benefit from this transform?
+ if (!Subtarget.hasAVX512())
+ if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
+ return Blend;
+ }
+
if (Mld->getExtensionType() != ISD::SEXTLOAD)
return SDValue();
+ // Resolve extending loads.
EVT VT = Mld->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
EVT LdVT = Mld->getMemoryVT();
@@ -26326,21 +28987,21 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
assert(LdVT != VT && "Cannot extend to the same type");
unsigned ToSz = VT.getVectorElementType().getSizeInBits();
unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
- // From, To sizes and ElemCount must be pow of two
+ // From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for extending masked load");
unsigned SizeRatio = ToSz / FromSz;
assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
- // Create a type on which we perform the shuffle
+ // Create a type on which we perform the shuffle.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
LdVT.getScalarType(), NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
- // Convert Src0 value
+ // Convert Src0 value.
SDValue WideSrc0 = DAG.getBitcast(WideVecVT, Mld->getSrc0());
- if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
+ if (!Mld->getSrc0().isUndef()) {
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
@@ -26349,13 +29010,13 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
"WideVecVT should be legal");
WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
- DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
+ DAG.getUNDEF(WideVecVT), ShuffleVec);
}
- // Prepare the new mask
+ // Prepare the new mask.
SDValue NewMask;
SDValue Mask = Mld->getMask();
if (Mask.getValueType() == VT) {
- // Mask and original value have the same type
+ // Mask and original value have the same type.
NewMask = DAG.getBitcast(WideVecVT, Mask);
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
@@ -26364,9 +29025,8 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
ShuffleVec[i] = NumElems * SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
DAG.getConstant(0, dl, WideVecVT),
- &ShuffleVec[0]);
- }
- else {
+ ShuffleVec);
+ } else {
assert(Mask.getValueType().getVectorElementType() == MVT::i1);
unsigned WidenNumElts = NumElems*SizeRatio;
unsigned MaskNumElts = VT.getVectorNumElements();
@@ -26390,13 +29050,41 @@ static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
}
-/// PerformMSTORECombine - Resolve truncating stores
-static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+
+/// If exactly one element of the mask is set for a non-truncating masked store,
+/// it is a vector extract and scalar store.
+/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
+/// mask have already been optimized in IR, so we don't bother with those here.
+static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
+ SelectionDAG &DAG) {
+ // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
+ // However, some target hooks may need to be added to know when the transform
+ // is profitable. Endianness would also have to be considered.
+
+ SDValue Addr, VecIndex;
+ unsigned Alignment;
+ if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
+ return SDValue();
+
+ // Extract the one scalar element that is actually being stored.
+ SDLoc DL(MS);
+ EVT VT = MS->getValue().getValueType();
+ EVT EltVT = VT.getVectorElementType();
+ SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
+ MS->getValue(), VecIndex);
+
+ // Store that element at the appropriate offset from the base pointer.
+ return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
+ Alignment, MS->getMemOperand()->getFlags());
+}
+
+static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
if (!Mst->isTruncatingStore())
- return SDValue();
+ return reduceMaskedStoreToScalarStore(Mst, DAG);
+ // Resolve truncating stores.
EVT VT = Mst->getValue().getValueType();
unsigned NumElems = VT.getVectorNumElements();
EVT StVT = Mst->getMemoryVT();
@@ -26415,7 +29103,7 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
if (TLI.isTruncStoreLegal(VT, StVT))
return SDValue();
- // From, To sizes and ElemCount must be pow of two
+ // From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for truncating masked store");
// We are going to use the original vector elt for storing.
@@ -26426,7 +29114,7 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
unsigned SizeRatio = FromSz / ToSz;
assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
- // Create a type on which we perform the shuffle
+ // Create a type on which we perform the shuffle.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
StVT.getScalarType(), NumElems*SizeRatio);
@@ -26443,12 +29131,12 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
DAG.getUNDEF(WideVecVT),
- &ShuffleVec[0]);
+ ShuffleVec);
SDValue NewMask;
SDValue Mask = Mst->getMask();
if (Mask.getValueType() == VT) {
- // Mask and original value have the same type
+ // Mask and original value have the same type.
NewMask = DAG.getBitcast(WideVecVT, Mask);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
@@ -26456,9 +29144,8 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
ShuffleVec[i] = NumElems*SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
DAG.getConstant(0, dl, WideVecVT),
- &ShuffleVec[0]);
- }
- else {
+ ShuffleVec);
+ } else {
assert(Mask.getValueType().getVectorElementType() == MVT::i1);
unsigned WidenNumElts = NumElems*SizeRatio;
unsigned MaskNumElts = VT.getVectorNumElements();
@@ -26479,9 +29166,9 @@ static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
Mst->getBasePtr(), NewMask, StVT,
Mst->getMemOperand(), false);
}
-/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
-static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+
+static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
StoreSDNode *St = cast<StoreSDNode>(N);
EVT VT = St->getValue().getValueType();
EVT StVT = St->getMemoryVT();
@@ -26496,26 +29183,24 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
unsigned Alignment = St->getAlignment();
if (VT.is256BitVector() && StVT == VT &&
TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- AddressSpace, Alignment, &Fast) && !Fast) {
+ AddressSpace, Alignment, &Fast) &&
+ !Fast) {
unsigned NumElems = VT.getVectorNumElements();
if (NumElems < 2)
return SDValue();
- SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
- SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
+ SDValue Value0 = extract128BitVector(StoredVal, 0, DAG, dl);
+ SDValue Value1 = extract128BitVector(StoredVal, NumElems / 2, DAG, dl);
- SDValue Stride =
- DAG.getConstant(16, dl, TLI.getPointerTy(DAG.getDataLayout()));
SDValue Ptr0 = St->getBasePtr();
- SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
-
- SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), Alignment);
- SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(),
- std::min(16U, Alignment));
+ SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 16, dl);
+
+ SDValue Ch0 =
+ DAG.getStore(St->getChain(), dl, Value0, Ptr0, St->getPointerInfo(),
+ Alignment, St->getMemOperand()->getFlags());
+ SDValue Ch1 =
+ DAG.getStore(St->getChain(), dl, Value1, Ptr1, St->getPointerInfo(),
+ std::min(16U, Alignment), St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
}
@@ -26526,12 +29211,11 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// Check if we can detect an AVG pattern from the truncation. If yes,
// replace the trunc store by a normal store with the result of X86ISD::AVG
// instruction.
- SDValue Avg =
- detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG, Subtarget, dl);
- if (Avg.getNode())
+ if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
+ Subtarget, dl))
return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
+ St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned NumElems = VT.getVectorNumElements();
@@ -26543,7 +29227,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
// are designated for truncate store.
// In this case we don't need any further transformations.
- if (TLI.isTruncStoreLegal(VT, StVT))
+ if (TLI.isTruncStoreLegalOrCustom(VT, StVT))
return SDValue();
// From, To sizes and ElemCount must be pow of two
@@ -26573,7 +29257,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
DAG.getUNDEF(WideVecVT),
- &ShuffleVec[0]);
+ ShuffleVec);
// At this point all of the data is stored at the bottom of the
// register. We now need to save it to mem.
@@ -26595,8 +29279,6 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
SmallVector<SDValue, 8> Chains;
- SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, dl,
- TLI.getPointerTy(DAG.getDataLayout()));
SDValue Ptr = St->getBasePtr();
// Perform one or more big stores into memory.
@@ -26604,10 +29286,10 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
StoreType, ShuffWide,
DAG.getIntPtrConstant(i, dl));
- SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
- Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
+ SDValue Ch =
+ DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(),
+ St->getAlignment(), St->getMemOperand()->getFlags());
+ Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl);
Chains.push_back(Ch);
}
@@ -26626,9 +29308,9 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
const Function *F = DAG.getMachineFunction().getFunction();
bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal =
- !Subtarget->useSoftFloat() && !NoImplicitFloatOps && Subtarget->hasSSE2();
+ !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
if ((VT.isVector() ||
- (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
+ (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) &&
isa<LoadSDNode>(St->getValue()) &&
!cast<LoadSDNode>(St->getValue())->isVolatile() &&
St->getChain().hasOneUse() && !St->isVolatile()) {
@@ -26667,58 +29349,49 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// If we are a 64-bit capable x86, lower to a single movq load/store pair.
// Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
// pair instead.
- if (Subtarget->is64Bit() || F64IsLegal) {
- MVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
+ if (Subtarget.is64Bit() || F64IsLegal) {
+ MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
- Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->isInvariant(),
- Ld->getAlignment());
+ Ld->getPointerInfo(), Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
SDValue NewChain = NewLd.getValue(1);
- if (TokenFactorIndex != -1) {
+ if (TokenFactorIndex >= 0) {
Ops.push_back(NewChain);
NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
}
return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
- St->getPointerInfo(),
- St->isVolatile(), St->isNonTemporal(),
- St->getAlignment());
+ St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags());
}
// Otherwise, lower to two pairs of 32-bit loads / stores.
SDValue LoAddr = Ld->getBasePtr();
- SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
- DAG.getConstant(4, LdDL, MVT::i32));
+ SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
- Ld->getPointerInfo(),
- Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(), Ld->getAlignment());
+ Ld->getPointerInfo(), Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags());
SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
Ld->getPointerInfo().getWithOffset(4),
- Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->isInvariant(),
- MinAlign(Ld->getAlignment(), 4));
+ MinAlign(Ld->getAlignment(), 4),
+ Ld->getMemOperand()->getFlags());
SDValue NewChain = LoLd.getValue(1);
- if (TokenFactorIndex != -1) {
+ if (TokenFactorIndex >= 0) {
Ops.push_back(LoLd);
Ops.push_back(HiLd);
NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
}
LoAddr = St->getBasePtr();
- HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
- DAG.getConstant(4, StDL, MVT::i32));
-
- SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
- St->getPointerInfo(),
- St->isVolatile(), St->isNonTemporal(),
- St->getAlignment());
- SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
- St->getPointerInfo().getWithOffset(4),
- St->isVolatile(),
- St->isNonTemporal(),
- MinAlign(St->getAlignment(), 4));
+ HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
+
+ SDValue LoSt =
+ DAG.getStore(NewChain, StDL, LoLd, LoAddr, St->getPointerInfo(),
+ St->getAlignment(), St->getMemOperand()->getFlags());
+ SDValue HiSt = DAG.getStore(
+ NewChain, StDL, HiLd, HiAddr, St->getPointerInfo().getWithOffset(4),
+ MinAlign(St->getAlignment(), 4), St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
}
@@ -26728,7 +29401,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// to get past legalization. The execution dependencies fixup pass will
// choose the optimal machine instruction for the store if this really is
// an integer or v2f32 rather than an f64.
- if (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit() &&
+ if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
SDValue OldExtract = St->getOperand(1);
SDValue ExtOp0 = OldExtract.getOperand(0);
@@ -26738,8 +29411,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
BitCast, OldExtract.getOperand(1));
return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
- St->getPointerInfo(), St->isVolatile(),
- St->isNonTemporal(), St->getAlignment());
+ St->getPointerInfo(), St->getAlignment(),
+ St->getMemOperand()->getFlags());
}
return SDValue();
@@ -26798,14 +29471,14 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
SDValue A, B;
SmallVector<int, 16> LMask(NumElts);
if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
- if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
+ if (!LHS.getOperand(0).isUndef())
A = LHS.getOperand(0);
- if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
+ if (!LHS.getOperand(1).isUndef())
B = LHS.getOperand(1);
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
std::copy(Mask.begin(), Mask.end(), LMask.begin());
} else {
- if (LHS.getOpcode() != ISD::UNDEF)
+ if (!LHS.isUndef())
A = LHS;
for (unsigned i = 0; i != NumElts; ++i)
LMask[i] = i;
@@ -26816,14 +29489,14 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
SDValue C, D;
SmallVector<int, 16> RMask(NumElts);
if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
- if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
+ if (!RHS.getOperand(0).isUndef())
C = RHS.getOperand(0);
- if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
+ if (!RHS.getOperand(1).isUndef())
D = RHS.getOperand(1);
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
std::copy(Mask.begin(), Mask.end(), RMask.begin());
} else {
- if (RHS.getOpcode() != ISD::UNDEF)
+ if (!RHS.isUndef())
C = RHS;
for (unsigned i = 0; i != NumElts; ++i)
RMask[i] = i;
@@ -26871,33 +29544,22 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
return true;
}
-/// Do target-specific dag combines on floating point adds.
-static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
-
- // Try to synthesize horizontal adds from adds of shuffles.
- if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
- (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
- isHorizontalBinOp(LHS, RHS, true))
- return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
- return SDValue();
-}
-
-/// Do target-specific dag combines on floating point subs.
-static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+/// Do target-specific dag combines on floating-point adds/subs.
+static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
+ bool IsFadd = N->getOpcode() == ISD::FADD;
+ assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
- // Try to synthesize horizontal subs from subs of shuffles.
- if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
- (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
- isHorizontalBinOp(LHS, RHS, false))
- return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
+ // Try to synthesize horizontal add/sub from adds/subs of shuffles.
+ if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
+ (Subtarget.hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
+ isHorizontalBinOp(LHS, RHS, IsFadd)) {
+ auto NewOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
+ return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
+ }
return SDValue();
}
@@ -26916,13 +29578,11 @@ combineVectorTruncationWithPACKUS(SDNode *N, SelectionDAG &DAG,
// First, use mask to unset all bits that won't appear in the result.
assert((OutSVT == MVT::i8 || OutSVT == MVT::i16) &&
"OutSVT can only be either i8 or i16.");
- SDValue MaskVal =
- DAG.getConstant(OutSVT == MVT::i8 ? 0xFF : 0xFFFF, DL, InSVT);
- SDValue MaskVec = DAG.getNode(
- ISD::BUILD_VECTOR, DL, InVT,
- SmallVector<SDValue, 8>(InVT.getVectorNumElements(), MaskVal));
+ APInt Mask =
+ APInt::getLowBitsSet(InSVT.getSizeInBits(), OutSVT.getSizeInBits());
+ SDValue MaskVal = DAG.getConstant(Mask, DL, InVT);
for (auto &Reg : Regs)
- Reg = DAG.getNode(ISD::AND, DL, InVT, MaskVec, Reg);
+ Reg = DAG.getNode(ISD::AND, DL, InVT, MaskVal, Reg);
MVT UnpackedVT, PackedVT;
if (OutSVT == MVT::i8) {
@@ -26938,7 +29598,7 @@ combineVectorTruncationWithPACKUS(SDNode *N, SelectionDAG &DAG,
for (unsigned j = 1, e = InSVT.getSizeInBits() / OutSVT.getSizeInBits();
j < e; j *= 2, RegNum /= 2) {
for (unsigned i = 0; i < RegNum; i++)
- Regs[i] = DAG.getNode(ISD::BITCAST, DL, UnpackedVT, Regs[i]);
+ Regs[i] = DAG.getBitcast(UnpackedVT, Regs[i]);
for (unsigned i = 0; i < RegNum / 2; i++)
Regs[i] = DAG.getNode(X86ISD::PACKUS, DL, PackedVT, Regs[i * 2],
Regs[i * 2 + 1]);
@@ -26990,7 +29650,7 @@ combineVectorTruncationWithPACKSS(SDNode *N, SelectionDAG &DAG,
/// element that is extracted from a vector and then truncated, and it is
/// diffcult to do this optimization based on them.
static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
EVT OutVT = N->getValueType(0);
if (!OutVT.isVector())
return SDValue();
@@ -27005,7 +29665,7 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
// TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
// SSE2, and we need to take care of it specially.
// AVX512 provides vpmovdb.
- if (!Subtarget->hasSSE2() || Subtarget->hasAVX2())
+ if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
return SDValue();
EVT OutSVT = OutVT.getVectorElementType();
@@ -27016,7 +29676,7 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
return SDValue();
// SSSE3's pshufb results in less instructions in the cases below.
- if (Subtarget->hasSSSE3() && NumElems == 8 &&
+ if (Subtarget.hasSSSE3() && NumElems == 8 &&
((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
(InSVT == MVT::i32 && OutSVT == MVT::i16)))
return SDValue();
@@ -27026,20 +29686,17 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
// Split a long vector into vectors of legal type.
unsigned RegNum = InVT.getSizeInBits() / 128;
SmallVector<SDValue, 8> SubVec(RegNum);
- if (InSVT == MVT::i32) {
- for (unsigned i = 0; i < RegNum; i++)
- SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
- DAG.getIntPtrConstant(i * 4, DL));
- } else {
- for (unsigned i = 0; i < RegNum; i++)
- SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
- DAG.getIntPtrConstant(i * 2, DL));
- }
+ unsigned NumSubRegElts = 128 / InSVT.getSizeInBits();
+ EVT SubRegVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubRegElts);
- // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PAKCUS
+ for (unsigned i = 0; i < RegNum; i++)
+ SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubRegVT, In,
+ DAG.getIntPtrConstant(i * NumSubRegElts, DL));
+
+ // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
// for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
// truncate 2 x v4i32 to v8i16.
- if (Subtarget->hasSSE41() || OutSVT == MVT::i8)
+ if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
return combineVectorTruncationWithPACKUS(N, DAG, SubVec);
else if (InSVT == MVT::i32)
return combineVectorTruncationWithPACKSS(N, DAG, SubVec);
@@ -27047,20 +29704,30 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ EVT VT = N->getValueType(0);
+ SDValue Src = N->getOperand(0);
+ SDLoc DL(N);
+
// Try to detect AVG pattern first.
- SDValue Avg = detectAVGPattern(N->getOperand(0), N->getValueType(0), DAG,
- Subtarget, SDLoc(N));
- if (Avg.getNode())
+ if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
return Avg;
+ // The bitcast source is a direct mmx result.
+ // Detect bitcasts between i32 to x86mmx
+ if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
+ SDValue BCSrc = Src.getOperand(0);
+ if (BCSrc.getValueType() == MVT::x86mmx)
+ return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
+ }
+
return combineVectorTruncation(N, DAG, Subtarget);
}
/// Do target-specific dag combines on floating point negations.
-static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
SDValue Arg = N->getOperand(0);
@@ -27074,7 +29741,7 @@ static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG,
// use of a constant by performing (-0 - A*B) instead.
// FIXME: Check rounding control flags as well once it becomes available.
if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
- Arg->getFlags()->hasNoSignedZeros() && Subtarget->hasAnyFMA()) {
+ Arg->getFlags()->hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
Arg.getOperand(1), Zero);
@@ -27102,17 +29769,17 @@ static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG,
}
static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
- if (VT.is512BitVector() && !Subtarget->hasDQI()) {
+ if (VT.is512BitVector() && !Subtarget.hasDQI()) {
// VXORPS, VORPS, VANDPS, VANDNPS are supported only under DQ extention.
// These logic operations may be executed in the integer domain.
SDLoc dl(N);
MVT IntScalar = MVT::getIntegerVT(VT.getScalarSizeInBits());
MVT IntVT = MVT::getVectorVT(IntScalar, VT.getVectorNumElements());
- SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, IntVT, N->getOperand(0));
- SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, IntVT, N->getOperand(1));
+ SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
+ SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
unsigned IntOpcode = 0;
switch (N->getOpcode()) {
default: llvm_unreachable("Unexpected FP logic op");
@@ -27122,13 +29789,13 @@ static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
}
SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
- return DAG.getNode(ISD::BITCAST, dl, VT, IntOp);
+ return DAG.getBitcast(VT, IntOp);
}
return SDValue();
}
/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
-static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
// F[X]OR(0.0, x) -> x
@@ -27145,7 +29812,7 @@ static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG,
}
/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
-static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
// Only perform optimizations if UnsafeMath is used.
@@ -27165,9 +29832,9 @@ static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
N->getOperand(0), N->getOperand(1));
}
-static SDValue performFMinNumFMaxNumCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
- if (Subtarget->useSoftFloat())
+static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ if (Subtarget.useSoftFloat())
return SDValue();
// TODO: Check for global or instruction-level "nnan". In that case, we
@@ -27176,9 +29843,9 @@ static SDValue performFMinNumFMaxNumCombine(SDNode *N, SelectionDAG &DAG,
// should be an optional swap and FMAX/FMIN.
EVT VT = N->getValueType(0);
- if (!((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
- (Subtarget->hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
- (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
+ if (!((Subtarget.hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
+ (Subtarget.hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
+ (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
return SDValue();
// This takes at least 3 instructions, so favor a library call when operating
@@ -27222,8 +29889,8 @@ static SDValue performFMinNumFMaxNumCombine(SDNode *N, SelectionDAG &DAG,
}
/// Do target-specific dag combines on X86ISD::FAND nodes.
-static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
// FAND(0.0, x) -> 0.0
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
@@ -27238,8 +29905,8 @@ static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG,
}
/// Do target-specific dag combines on X86ISD::FANDN nodes
-static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
// FANDN(0.0, x) -> x
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
if (C->getValueAPF().isPosZero())
@@ -27253,9 +29920,8 @@ static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG,
return lowerX86FPLogicOp(N, DAG, Subtarget);
}
-static SDValue PerformBTCombine(SDNode *N,
- SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
// BT ignores high bits in the bit index operand.
SDValue Op1 = N->getOperand(1);
if (Op1.hasOneUse()) {
@@ -27272,21 +29938,19 @@ static SDValue PerformBTCombine(SDNode *N,
return SDValue();
}
-static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
- SDValue Op = N->getOperand(0);
- if (Op.getOpcode() == ISD::BITCAST)
- Op = Op.getOperand(0);
+static SDValue combineVZextMovl(SDNode *N, SelectionDAG &DAG) {
+ SDValue Op = peekThroughBitcasts(N->getOperand(0));
EVT VT = N->getValueType(0), OpVT = Op.getValueType();
if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
VT.getVectorElementType().getSizeInBits() ==
OpVT.getVectorElementType().getSizeInBits()) {
- return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
+ return DAG.getBitcast(VT, Op);
}
return SDValue();
}
-static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (!VT.isVector())
return SDValue();
@@ -27307,7 +29971,7 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
// EXTLOAD has a better solution on AVX2,
// it may be replaced with X86ISD::VSEXT node.
- if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
+ if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
if (!ISD::isNormalLoad(N00.getNode()))
return SDValue();
@@ -27325,7 +29989,7 @@ static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
/// to combine math ops, use an LEA, or use a complex addressing mode. This can
/// eliminate extend, add, and shift instructions.
static SDValue promoteSextBeforeAddNSW(SDNode *Sext, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget &Subtarget) {
// TODO: This should be valid for other integer types.
EVT VT = Sext->getValueType(0);
if (VT != MVT::i64)
@@ -27397,14 +30061,106 @@ static SDValue getDivRem8(SDNode *N, SelectionDAG &DAG) {
return R.getValue(1);
}
-static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
+/// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
+/// with UNDEFs) of the input to vectors of the same size as the target type
+/// which then extends the lowest elements.
+static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ unsigned Opcode = N->getOpcode();
+ if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND)
+ return SDValue();
+ if (!DCI.isBeforeLegalizeOps())
+ return SDValue();
+ if (!Subtarget.hasSSE2())
+ return SDValue();
+
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
EVT InVT = N0.getValueType();
EVT InSVT = InVT.getScalarType();
+
+ // Input type must be a vector and we must be extending legal integer types.
+ if (!VT.isVector())
+ return SDValue();
+ if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
+ return SDValue();
+ if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
+ return SDValue();
+
+ // On AVX2+ targets, if the input/output types are both legal then we will be
+ // able to use SIGN_EXTEND/ZERO_EXTEND directly.
+ if (Subtarget.hasInt256() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
+ DAG.getTargetLoweringInfo().isTypeLegal(InVT))
+ return SDValue();
+
+ SDLoc DL(N);
+
+ auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) {
+ EVT InVT = N.getValueType();
+ EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
+ Size / InVT.getScalarSizeInBits());
+ SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
+ DAG.getUNDEF(InVT));
+ Opnds[0] = N;
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
+ };
+
+ // If target-size is less than 128-bits, extend to a type that would extend
+ // to 128 bits, extend that and extract the original target vector.
+ if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) {
+ unsigned Scale = 128 / VT.getSizeInBits();
+ EVT ExVT =
+ EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
+ SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
+ SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
+ DAG.getIntPtrConstant(0, DL));
+ }
+
+ // If target-size is 128-bits (or 256-bits on AVX2 target), then convert to
+ // ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
+ // Also use this if we don't have SSE41 to allow the legalizer do its job.
+ if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
+ (VT.is256BitVector() && Subtarget.hasInt256())) {
+ SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
+ return Opcode == ISD::SIGN_EXTEND
+ ? DAG.getSignExtendVectorInReg(ExOp, DL, VT)
+ : DAG.getZeroExtendVectorInReg(ExOp, DL, VT);
+ }
+
+ // On pre-AVX2 targets, split into 128-bit nodes of
+ // ISD::*_EXTEND_VECTOR_INREG.
+ if (!Subtarget.hasInt256() && !(VT.getSizeInBits() % 128)) {
+ unsigned NumVecs = VT.getSizeInBits() / 128;
+ unsigned NumSubElts = 128 / SVT.getSizeInBits();
+ EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
+ EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
+
+ SmallVector<SDValue, 8> Opnds;
+ for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) {
+ SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
+ DAG.getIntPtrConstant(Offset, DL));
+ SrcVec = ExtendVecSize(DL, SrcVec, 128);
+ SrcVec = Opcode == ISD::SIGN_EXTEND
+ ? DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT)
+ : DAG.getZeroExtendVectorInReg(SrcVec, DL, SubVT);
+ Opnds.push_back(SrcVec);
+ }
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
+ }
+
+ return SDValue();
+}
+
+static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ EVT InVT = N0.getValueType();
SDLoc DL(N);
if (SDValue DivRem8 = getDivRem8(N, DAG))
@@ -27414,70 +30170,16 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
if (InVT == MVT::i1) {
SDValue Zero = DAG.getConstant(0, DL, VT);
SDValue AllOnes =
- DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
+ DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
return DAG.getNode(ISD::SELECT, DL, VT, N0, AllOnes, Zero);
}
return SDValue();
}
- if (VT.isVector() && Subtarget->hasSSE2()) {
- auto ExtendVecSize = [&DAG](SDLoc DL, SDValue N, unsigned Size) {
- EVT InVT = N.getValueType();
- EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
- Size / InVT.getScalarSizeInBits());
- SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
- DAG.getUNDEF(InVT));
- Opnds[0] = N;
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
- };
-
- // If target-size is less than 128-bits, extend to a type that would extend
- // to 128 bits, extend that and extract the original target vector.
- if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits()) &&
- (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
- (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
- unsigned Scale = 128 / VT.getSizeInBits();
- EVT ExVT =
- EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
- SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
- SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, ExVT, Ex);
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
- DAG.getIntPtrConstant(0, DL));
- }
-
- // If target-size is 128-bits, then convert to ISD::SIGN_EXTEND_VECTOR_INREG
- // which ensures lowering to X86ISD::VSEXT (pmovsx*).
- if (VT.getSizeInBits() == 128 &&
- (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
- (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
- SDValue ExOp = ExtendVecSize(DL, N0, 128);
- return DAG.getSignExtendVectorInReg(ExOp, DL, VT);
- }
-
- // On pre-AVX2 targets, split into 128-bit nodes of
- // ISD::SIGN_EXTEND_VECTOR_INREG.
- if (!Subtarget->hasInt256() && !(VT.getSizeInBits() % 128) &&
- (SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16) &&
- (InSVT == MVT::i32 || InSVT == MVT::i16 || InSVT == MVT::i8)) {
- unsigned NumVecs = VT.getSizeInBits() / 128;
- unsigned NumSubElts = 128 / SVT.getSizeInBits();
- EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
- EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
-
- SmallVector<SDValue, 8> Opnds;
- for (unsigned i = 0, Offset = 0; i != NumVecs;
- ++i, Offset += NumSubElts) {
- SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
- DAG.getIntPtrConstant(Offset, DL));
- SrcVec = ExtendVecSize(DL, SrcVec, 128);
- SrcVec = DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT);
- Opnds.push_back(SrcVec);
- }
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
- }
- }
+ if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
+ return V;
- if (Subtarget->hasAVX() && VT.is256BitVector())
+ if (Subtarget.hasAVX() && VT.is256BitVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
return R;
@@ -27487,8 +30189,8 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget* Subtarget) {
+static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
@@ -27497,7 +30199,7 @@ static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
EVT ScalarVT = VT.getScalarType();
- if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget->hasAnyFMA())
+ if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
return SDValue();
SDValue A = N->getOperand(0);
@@ -27526,9 +30228,9 @@ static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(Opcode, dl, VT, A, B, C);
}
-static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
// (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
// (and (i32 x86isd::setcc_carry), 1)
// This eliminates the zext. This transformation is necessary because
@@ -27563,6 +30265,9 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
+ return V;
+
if (VT.is256BitVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
return R;
@@ -27573,10 +30278,10 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-// Optimize x == -y --> x+y == 0
-// x != -y --> x+y != 0
-static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget* Subtarget) {
+/// Optimize x == -y --> x+y == 0
+/// x != -y --> x+y != 0
+static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -27631,10 +30336,15 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ // For an SSE1-only target, lower to X86ISD::CMPP early to avoid scalarization
+ // via legalization because v4i32 is not a legal type.
+ if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32)
+ return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
+
return SDValue();
}
-static SDValue PerformGatherScatterCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
// Gather and Scatter instructions use k-registers for masks. The type of
// the masks is v*i1. So the mask will be truncated anyway.
@@ -27648,11 +30358,11 @@ static SDValue PerformGatherScatterCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-// Helper function of PerformSETCCCombine. It is to materialize "setb reg"
+// Helper function of performSETCCCombine. It is to materialize "setb reg"
// as "sbb reg,reg", since it can be extended without zext and produces
// an all-ones bit which is more useful than 0/1 in some cases.
-static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
- MVT VT) {
+static SDValue MaterializeSETB(const SDLoc &DL, SDValue EFLAGS,
+ SelectionDAG &DAG, MVT VT) {
if (VT == MVT::i8)
return DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
@@ -27667,9 +30377,9 @@ static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
}
// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
-static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
SDValue EFLAGS = N->getOperand(1);
@@ -27698,7 +30408,8 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
if (CC == X86::COND_B)
return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
- if (SDValue Flags = checkBoolTestSetCCCombine(EFLAGS, CC)) {
+ // Try to simplify the EFLAGS and condition code operands.
+ if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG)) {
SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
}
@@ -27706,28 +30417,28 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-// Optimize branch condition evaluation.
-//
-static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+/// Optimize branch condition evaluation.
+static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
- SDValue Chain = N->getOperand(0);
- SDValue Dest = N->getOperand(1);
SDValue EFLAGS = N->getOperand(3);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
- if (SDValue Flags = checkBoolTestSetCCCombine(EFLAGS, CC)) {
+ // Try to simplify the EFLAGS and condition code operands.
+ // Make sure to not keep references to operands, as combineSetCCEFLAGS can
+ // RAUW them under us.
+ if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG)) {
SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
- return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
- Flags);
+ return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
+ N->getOperand(1), Cond, Flags);
}
return SDValue();
}
-static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
- SelectionDAG &DAG) {
+static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
+ SelectionDAG &DAG) {
// Take advantage of vector comparisons producing 0 or -1 in each lane to
// optimize away operation when it's from a constant.
//
@@ -27772,8 +30483,8 @@ static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
return SDValue();
}
-static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT InVT = Op0.getValueType();
@@ -27797,11 +30508,11 @@ static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
// First try to optimize away the conversion entirely when it's
// conditionally from a constant. Vectors only.
- if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
+ if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
return Res;
// Now move on to more general possibilities.
@@ -27822,18 +30533,18 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
// Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
// a 32-bit target where SSE doesn't support i64->FP operations.
- if (!Subtarget->useSoftFloat() && Op0.getOpcode() == ISD::LOAD) {
+ if (!Subtarget.useSoftFloat() && Op0.getOpcode() == ISD::LOAD) {
LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
EVT LdVT = Ld->getValueType(0);
- // This transformation is not supported if the result type is f16
- if (VT == MVT::f16)
+ // This transformation is not supported if the result type is f16 or f128.
+ if (VT == MVT::f16 || VT == MVT::f128)
return SDValue();
if (!Ld->isVolatile() && !VT.isVector() &&
ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
- !Subtarget->is64Bit() && LdVT == MVT::i64) {
- SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
+ !Subtarget.is64Bit() && LdVT == MVT::i64) {
+ SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
return FILDChain;
@@ -27843,8 +30554,8 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
}
// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
-static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
- X86TargetLowering::DAGCombinerInfo &DCI) {
+static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
+ X86TargetLowering::DAGCombinerInfo &DCI) {
// If the LHS and RHS of the ADC node are zero, then it can't overflow and
// the result is either zero or one (depending on the input carry bit).
// Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
@@ -27868,10 +30579,10 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-// fold (add Y, (sete X, 0)) -> adc 0, Y
-// (add Y, (setne X, 0)) -> sbb -1, Y
-// (sub (sete X, 0), Y) -> sbb 0, Y
-// (sub (setne X, 0), Y) -> adc -1, Y
+/// fold (add Y, (sete X, 0)) -> adc 0, Y
+/// (add Y, (setne X, 0)) -> sbb -1, Y
+/// (sub (sete X, 0), Y) -> sbb 0, Y
+/// (sub (setne X, 0), Y) -> adc -1, Y
static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
SDLoc DL(N);
@@ -27909,24 +30620,163 @@ static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, DL, OtherVal.getValueType()), NewCmp);
}
-/// PerformADDCombine - Do target-specific dag combines on integer adds.
-static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue detectSADPattern(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ if (!VT.isVector() || !VT.isSimple() ||
+ !(VT.getVectorElementType() == MVT::i32))
+ return SDValue();
+
+ unsigned RegSize = 128;
+ if (Subtarget.hasBWI())
+ RegSize = 512;
+ else if (Subtarget.hasAVX2())
+ RegSize = 256;
+
+ // We only handle v16i32 for SSE2 / v32i32 for AVX2 / v64i32 for AVX512.
+ if (VT.getSizeInBits() / 4 > RegSize)
+ return SDValue();
+
+ // Detect the following pattern:
+ //
+ // 1: %2 = zext <N x i8> %0 to <N x i32>
+ // 2: %3 = zext <N x i8> %1 to <N x i32>
+ // 3: %4 = sub nsw <N x i32> %2, %3
+ // 4: %5 = icmp sgt <N x i32> %4, [0 x N] or [-1 x N]
+ // 5: %6 = sub nsw <N x i32> zeroinitializer, %4
+ // 6: %7 = select <N x i1> %5, <N x i32> %4, <N x i32> %6
+ // 7: %8 = add nsw <N x i32> %7, %vec.phi
+ //
+ // The last instruction must be a reduction add. The instructions 3-6 forms an
+ // ABSDIFF pattern.
+
+ // The two operands of reduction add are from PHI and a select-op as in line 7
+ // above.
+ SDValue SelectOp, Phi;
+ if (Op0.getOpcode() == ISD::VSELECT) {
+ SelectOp = Op0;
+ Phi = Op1;
+ } else if (Op1.getOpcode() == ISD::VSELECT) {
+ SelectOp = Op1;
+ Phi = Op0;
+ } else
+ return SDValue();
+
+ // Check the condition of the select instruction is greater-than.
+ SDValue SetCC = SelectOp->getOperand(0);
+ if (SetCC.getOpcode() != ISD::SETCC)
+ return SDValue();
+ ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
+ if (CC != ISD::SETGT)
+ return SDValue();
+
+ Op0 = SelectOp->getOperand(1);
+ Op1 = SelectOp->getOperand(2);
+
+ // The second operand of SelectOp Op1 is the negation of the first operand
+ // Op0, which is implemented as 0 - Op0.
+ if (!(Op1.getOpcode() == ISD::SUB &&
+ ISD::isBuildVectorAllZeros(Op1.getOperand(0).getNode()) &&
+ Op1.getOperand(1) == Op0))
+ return SDValue();
+
+ // The first operand of SetCC is the first operand of SelectOp, which is the
+ // difference between two input vectors.
+ if (SetCC.getOperand(0) != Op0)
+ return SDValue();
+
+ // The second operand of > comparison can be either -1 or 0.
+ if (!(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()) ||
+ ISD::isBuildVectorAllOnes(SetCC.getOperand(1).getNode())))
+ return SDValue();
+
+ // The first operand of SelectOp is the difference between two input vectors.
+ if (Op0.getOpcode() != ISD::SUB)
+ return SDValue();
+
+ Op1 = Op0.getOperand(1);
+ Op0 = Op0.getOperand(0);
+
+ // Check if the operands of the diff are zero-extended from vectors of i8.
+ if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
+ Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
+ Op1.getOpcode() != ISD::ZERO_EXTEND ||
+ Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
+ return SDValue();
+
+ // SAD pattern detected. Now build a SAD instruction and an addition for
+ // reduction. Note that the number of elments of the result of SAD is less
+ // than the number of elements of its input. Therefore, we could only update
+ // part of elements in the reduction vector.
+
+ // Legalize the type of the inputs of PSADBW.
+ EVT InVT = Op0.getOperand(0).getValueType();
+ if (InVT.getSizeInBits() <= 128)
+ RegSize = 128;
+ else if (InVT.getSizeInBits() <= 256)
+ RegSize = 256;
+
+ unsigned NumConcat = RegSize / InVT.getSizeInBits();
+ SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
+ Ops[0] = Op0.getOperand(0);
+ MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
+ Op0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
+ Ops[0] = Op1.getOperand(0);
+ Op1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
+
+ // The output of PSADBW is a vector of i64.
+ MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
+ SDValue Sad = DAG.getNode(X86ISD::PSADBW, DL, SadVT, Op0, Op1);
+
+ // We need to turn the vector of i64 into a vector of i32.
+ // If the reduction vector is at least as wide as the psadbw result, just
+ // bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
+ // anyway.
+ MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
+ if (VT.getSizeInBits() >= ResVT.getSizeInBits())
+ Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
+ else
+ Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad);
+
+ if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
+ // Update part of elements of the reduction vector. This is done by first
+ // extracting a sub-vector from it, updating this sub-vector, and inserting
+ // it back.
+ SDValue SubPhi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Phi,
+ DAG.getIntPtrConstant(0, DL));
+ SDValue Res = DAG.getNode(ISD::ADD, DL, ResVT, Sad, SubPhi);
+ return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Phi, Res,
+ DAG.getIntPtrConstant(0, DL));
+ } else
+ return DAG.getNode(ISD::ADD, DL, VT, Sad, Phi);
+}
+
+static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags;
+ if (Flags->hasVectorReduction()) {
+ if (SDValue Sad = detectSADPattern(N, DAG, Subtarget))
+ return Sad;
+ }
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
// Try to synthesize horizontal adds from adds of shuffles.
- if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
- (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
+ if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
+ (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
isHorizontalBinOp(Op0, Op1, true))
return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
return OptimizeConditionalInDecrement(N, DAG);
}
-static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -27950,30 +30800,44 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
// Try to synthesize horizontal adds from adds of shuffles.
EVT VT = N->getValueType(0);
- if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
- (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
+ if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
+ (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
isHorizontalBinOp(Op0, Op1, true))
return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
return OptimizeConditionalInDecrement(N, DAG);
}
-/// performVZEXTCombine - Performs build vector combines
-static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget *Subtarget) {
+static SDValue combineVZext(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDLoc DL(N);
MVT VT = N->getSimpleValueType(0);
+ MVT SVT = VT.getVectorElementType();
SDValue Op = N->getOperand(0);
MVT OpVT = Op.getSimpleValueType();
MVT OpEltVT = OpVT.getVectorElementType();
unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
- // (vzext (bitcast (vzext (x)) -> (vzext x)
- SDValue V = Op;
- while (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
+ // Perform any constant folding.
+ if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
+ SmallVector<SDValue, 4> Vals;
+ for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+ SDValue OpElt = Op.getOperand(i);
+ if (OpElt.getOpcode() == ISD::UNDEF) {
+ Vals.push_back(DAG.getUNDEF(SVT));
+ continue;
+ }
+ APInt Cst = cast<ConstantSDNode>(OpElt.getNode())->getAPIntValue();
+ assert(Cst.getBitWidth() == OpEltVT.getSizeInBits());
+ Cst = Cst.zextOrTrunc(SVT.getSizeInBits());
+ Vals.push_back(DAG.getConstant(Cst, DL, SVT));
+ }
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Vals);
+ }
+ // (vzext (bitcast (vzext (x)) -> (vzext x)
+ SDValue V = peekThroughBitcasts(Op);
if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
MVT InnerVT = V.getSimpleValueType();
MVT InnerEltVT = InnerVT.getVectorElementType();
@@ -28022,61 +30886,111 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+/// Canonicalize (LSUB p, 1) -> (LADD p, -1).
+static SDValue combineLockSub(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ SDValue Chain = N->getOperand(0);
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ MVT VT = RHS.getSimpleValueType();
+ SDLoc DL(N);
+
+ auto *C = dyn_cast<ConstantSDNode>(RHS);
+ if (!C || C->getZExtValue() != 1)
+ return SDValue();
+
+ RHS = DAG.getConstant(-1, DL, VT);
+ MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
+ return DAG.getMemIntrinsicNode(X86ISD::LADD, DL,
+ DAG.getVTList(MVT::i32, MVT::Other),
+ {Chain, LHS, RHS}, VT, MMO);
+}
+
+// TEST (AND a, b) ,(AND a, b) -> TEST a, b
+static SDValue combineTestM(SDNode *N, SelectionDAG &DAG) {
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ if (Op0 != Op1 || Op1->getOpcode() != ISD::AND)
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ SDLoc DL(N);
+
+ return DAG.getNode(X86ISD::TESTM, DL, VT,
+ Op0->getOperand(0), Op0->getOperand(1));
+}
+
+static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ MVT VT = N->getSimpleValueType(0);
+ SDLoc DL(N);
+
+ if (N->getOperand(0) == N->getOperand(1)) {
+ if (N->getOpcode() == X86ISD::PCMPEQ)
+ return getOnesVector(VT, Subtarget, DAG, DL);
+ if (N->getOpcode() == X86ISD::PCMPGT)
+ return getZeroVector(VT, Subtarget, DAG, DL);
+ }
+
+ return SDValue();
+}
+
+
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
- case ISD::EXTRACT_VECTOR_ELT:
- return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
+ case ISD::EXTRACT_VECTOR_ELT: return combineExtractVectorElt(N, DAG, DCI);
case ISD::VSELECT:
case ISD::SELECT:
- case X86ISD::SHRUNKBLEND:
- return PerformSELECTCombine(N, DAG, DCI, Subtarget);
- case ISD::BITCAST: return PerformBITCASTCombine(N, DAG, Subtarget);
- case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
- case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
- case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
- case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
- case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
+ case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget);
+ case ISD::BITCAST: return combineBitcast(N, DAG, Subtarget);
+ case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
+ case ISD::ADD: return combineAdd(N, DAG, Subtarget);
+ case ISD::SUB: return combineSub(N, DAG, Subtarget);
+ case X86ISD::ADC: return combineADC(N, DAG, DCI);
+ case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
case ISD::SHL:
case ISD::SRA:
- case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
- case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
- case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
- case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
- case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
- case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
- case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
- case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
- case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
- case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG, Subtarget);
- case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
- case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
- case ISD::FNEG: return PerformFNEGCombine(N, DAG, Subtarget);
- case ISD::TRUNCATE: return PerformTRUNCATECombine(N, DAG, Subtarget);
+ case ISD::SRL: return combineShift(N, DAG, DCI, Subtarget);
+ case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
+ case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
+ case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
+ case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
+ case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
+ case ISD::STORE: return combineStore(N, DAG, Subtarget);
+ case ISD::MSTORE: return combineMaskedStore(N, DAG, Subtarget);
+ case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget);
+ case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
+ case ISD::FADD:
+ case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
+ case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
+ case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
case X86ISD::FXOR:
- case X86ISD::FOR: return PerformFORCombine(N, DAG, Subtarget);
+ case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
case X86ISD::FMIN:
- case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
+ case X86ISD::FMAX: return combineFMinFMax(N, DAG);
case ISD::FMINNUM:
- case ISD::FMAXNUM: return performFMinNumFMaxNumCombine(N, DAG,
- Subtarget);
- case X86ISD::FAND: return PerformFANDCombine(N, DAG, Subtarget);
- case X86ISD::FANDN: return PerformFANDNCombine(N, DAG, Subtarget);
- case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
- case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
+ case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
+ case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
+ case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
+ case X86ISD::BT: return combineBT(N, DAG, DCI);
+ case X86ISD::VZEXT_MOVL: return combineVZextMovl(N, DAG);
case ISD::ANY_EXTEND:
- case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
- case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
- case ISD::SIGN_EXTEND_INREG:
- return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
- case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
- case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
- case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
- case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
+ case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
+ case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
+ case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
+ case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
+ case X86ISD::SETCC: return combineX86SetCC(N, DAG, DCI, Subtarget);
+ case X86ISD::BRCOND: return combineBrCond(N, DAG, DCI, Subtarget);
+ case X86ISD::VZEXT: return combineVZext(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
+ case X86ISD::INSERTPS:
case X86ISD::PALIGNR:
+ case X86ISD::VSHLDQ:
+ case X86ISD::VSRLDQ:
case X86ISD::BLENDI:
case X86ISD::UNPCKH:
case X86ISD::UNPCKL:
@@ -28086,23 +31000,36 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::PSHUFD:
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
+ case X86ISD::MOVSHDUP:
+ case X86ISD::MOVSLDUP:
+ case X86ISD::MOVDDUP:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
+ case X86ISD::VPPERM:
+ case X86ISD::VPERMI:
+ case X86ISD::VPERMV:
+ case X86ISD::VPERMV3:
+ case X86ISD::VPERMIL2:
case X86ISD::VPERMILPI:
+ case X86ISD::VPERMILPV:
case X86ISD::VPERM2X128:
- case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
- case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
+ case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
+ case ISD::FMA: return combineFMA(N, DAG, Subtarget);
case ISD::MGATHER:
- case ISD::MSCATTER: return PerformGatherScatterCombine(N, DAG);
+ case ISD::MSCATTER: return combineGatherScatter(N, DAG);
+ case X86ISD::LSUB: return combineLockSub(N, DAG, Subtarget);
+ case X86ISD::TESTM: return combineTestM(N, DAG);
+ case X86ISD::PCMPEQ:
+ case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
}
return SDValue();
}
-/// isTypeDesirableForOp - Return true if the target has native support for
-/// the specified value type and it is 'desirable' to use the type for the
-/// given node type. e.g. On x86 i16 is legal, but undesirable since i16
-/// instruction encodings are longer and some i16 instructions are slow.
+/// Return true if the target has native support for the specified value type
+/// and it is 'desirable' to use the type for the given node type. e.g. On x86
+/// i16 is legal, but undesirable since i16 instruction encodings are longer and
+/// some i16 instructions are slow.
bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
if (!isTypeLegal(VT))
return false;
@@ -28140,9 +31067,9 @@ bool X86TargetLowering::hasCopyImplyingStackAdjustment(
[](const MachineInstr &RI) { return RI.isCopy(); });
}
-/// IsDesirableToPromoteOp - This method query the target whether it is
-/// beneficial for dag combiner to promote the specified node. If true, it
-/// should return the desired promotion type by reference.
+/// This method query the target whether it is beneficial for dag combiner to
+/// promote the specified node. If true, it should return the desired promotion
+/// type by reference.
bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
EVT VT = Op.getValueType();
if (VT != MVT::i16)
@@ -28152,23 +31079,6 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
bool Commute = false;
switch (Op.getOpcode()) {
default: break;
- case ISD::LOAD: {
- LoadSDNode *LD = cast<LoadSDNode>(Op);
- // If the non-extending load has a single use and it's not live out, then it
- // might be folded.
- if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
- Op.hasOneUse()*/) {
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
- UE = Op.getNode()->use_end(); UI != UE; ++UI) {
- // The only case where we'd want to promote LOAD (rather then it being
- // promoted as an operand is when it's only use is liveout.
- if (UI->getOpcode() != ISD::CopyToReg)
- return false;
- }
- }
- Promote = true;
- break;
- }
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
@@ -28250,7 +31160,7 @@ static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
- std::string AsmStr = IA->getAsmString();
+ const std::string &AsmStr = IA->getAsmString();
IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
@@ -28323,8 +31233,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
return false;
}
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
+/// Given a constraint letter, return the type of constraint for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(StringRef Constraint) const {
if (Constraint.size() == 1) {
@@ -28403,13 +31312,13 @@ TargetLowering::ConstraintWeight
weight = CW_SpecificReg;
break;
case 'y':
- if (type->isX86_MMXTy() && Subtarget->hasMMX())
+ if (type->isX86_MMXTy() && Subtarget.hasMMX())
weight = CW_SpecificReg;
break;
case 'x':
case 'Y':
- if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
- ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
+ if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
+ ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasFp256()))
weight = CW_Register;
break;
case 'I':
@@ -28471,25 +31380,25 @@ TargetLowering::ConstraintWeight
return weight;
}
-/// LowerXConstraint - try to replace an X constraint, which matches anything,
-/// with another that has more specific requirements based on the type of the
-/// corresponding operand.
+/// Try to replace an X constraint, which matches anything, with another that
+/// has more specific requirements based on the type of the corresponding
+/// operand.
const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
if (ConstraintVT.isFloatingPoint()) {
- if (Subtarget->hasSSE2())
+ if (Subtarget.hasSSE2())
return "Y";
- if (Subtarget->hasSSE1())
+ if (Subtarget.hasSSE1())
return "x";
}
return TargetLowering::LowerXConstraint(ConstraintVT);
}
-/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-/// vector. If it is invalid, don't add anything to Ops.
+/// Lower the specified operand into the Ops vector.
+/// If it is invalid, don't add anything to Ops.
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
@@ -28532,7 +31441,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'L':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
- (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
+ (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
Op.getValueType());
break;
@@ -28605,7 +31514,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// In any sort of PIC mode addresses need to be computed at runtime by
// adding in a register or some sort of table lookup. These can't
// be used as immediates.
- if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
+ if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
return;
// If we are in non-pic codegen mode, we allow the address of a global (with
@@ -28639,8 +31548,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
const GlobalValue *GV = GA->getGlobal();
// If we require an extra load to get this address, as in PIC mode, we
// can't accept it.
- if (isGlobalStubReference(
- Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
+ if (isGlobalStubReference(Subtarget.classifyGlobalReference(GV)))
return;
Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
@@ -28656,6 +31564,65 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
+/// Check if \p RC is a general purpose register class.
+/// I.e., GR* or one of their variant.
+static bool isGRClass(const TargetRegisterClass &RC) {
+ switch (RC.getID()) {
+ case X86::GR8RegClassID:
+ case X86::GR8_ABCD_LRegClassID:
+ case X86::GR8_ABCD_HRegClassID:
+ case X86::GR8_NOREXRegClassID:
+ case X86::GR16RegClassID:
+ case X86::GR16_ABCDRegClassID:
+ case X86::GR16_NOREXRegClassID:
+ case X86::GR32RegClassID:
+ case X86::GR32_ABCDRegClassID:
+ case X86::GR32_TCRegClassID:
+ case X86::GR32_NOREXRegClassID:
+ case X86::GR32_NOAXRegClassID:
+ case X86::GR32_NOSPRegClassID:
+ case X86::GR32_NOREX_NOSPRegClassID:
+ case X86::GR32_ADRegClassID:
+ case X86::GR64RegClassID:
+ case X86::GR64_ABCDRegClassID:
+ case X86::GR64_TCRegClassID:
+ case X86::GR64_TCW64RegClassID:
+ case X86::GR64_NOREXRegClassID:
+ case X86::GR64_NOSPRegClassID:
+ case X86::GR64_NOREX_NOSPRegClassID:
+ case X86::LOW32_ADDR_ACCESSRegClassID:
+ case X86::LOW32_ADDR_ACCESS_RBPRegClassID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// Check if \p RC is a vector register class.
+/// I.e., FR* / VR* or one of their variant.
+static bool isFRClass(const TargetRegisterClass &RC) {
+ switch (RC.getID()) {
+ case X86::FR32RegClassID:
+ case X86::FR32XRegClassID:
+ case X86::FR64RegClassID:
+ case X86::FR64XRegClassID:
+ case X86::FR128RegClassID:
+ case X86::VR64RegClassID:
+ case X86::VR128RegClassID:
+ case X86::VR128LRegClassID:
+ case X86::VR128HRegClassID:
+ case X86::VR128XRegClassID:
+ case X86::VR256RegClassID:
+ case X86::VR256LRegClassID:
+ case X86::VR256HRegClassID:
+ case X86::VR256XRegClassID:
+ case X86::VR512RegClassID:
+ return true;
+ default:
+ return false;
+ }
+}
+
std::pair<unsigned, const TargetRegisterClass *>
X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint,
@@ -28670,7 +31637,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
// RIP in the class. Do they matter any more here than they do
// in the normal allocation?
case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
- if (Subtarget->is64Bit()) {
+ if (Subtarget.is64Bit()) {
if (VT == MVT::i32 || VT == MVT::f32)
return std::make_pair(0U, &X86::GR32RegClass);
if (VT == MVT::i16)
@@ -28698,7 +31665,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &X86::GR8RegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16RegClass);
- if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
+ if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
return std::make_pair(0U, &X86::GR32RegClass);
return std::make_pair(0U, &X86::GR64RegClass);
case 'R': // LEGACY_REGS
@@ -28706,7 +31673,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &X86::GR8_NOREXRegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16_NOREXRegClass);
- if (VT == MVT::i32 || !Subtarget->is64Bit())
+ if (VT == MVT::i32 || !Subtarget.is64Bit())
return std::make_pair(0U, &X86::GR32_NOREXRegClass);
return std::make_pair(0U, &X86::GR64_NOREXRegClass);
case 'f': // FP Stack registers.
@@ -28718,13 +31685,13 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &X86::RFP64RegClass);
return std::make_pair(0U, &X86::RFP80RegClass);
case 'y': // MMX_REGS if MMX allowed.
- if (!Subtarget->hasMMX()) break;
+ if (!Subtarget.hasMMX()) break;
return std::make_pair(0U, &X86::VR64RegClass);
case 'Y': // SSE_REGS if SSE2 allowed
- if (!Subtarget->hasSSE2()) break;
+ if (!Subtarget.hasSSE2()) break;
// FALL THROUGH.
case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
- if (!Subtarget->hasSSE1()) break;
+ if (!Subtarget.hasSSE1()) break;
switch (VT.SimpleTy) {
default: break;
@@ -28817,8 +31784,11 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
// return "eax". This should even work for things like getting 64bit integer
// registers when given an f64 type.
const TargetRegisterClass *Class = Res.second;
- if (Class == &X86::GR8RegClass || Class == &X86::GR16RegClass ||
- Class == &X86::GR32RegClass || Class == &X86::GR64RegClass) {
+ // The generic code will match the first register class that contains the
+ // given register. Thus, based on the ordering of the tablegened file,
+ // the "plain" GR classes might not come first.
+ // Therefore, use a helper method.
+ if (isGRClass(*Class)) {
unsigned Size = VT.getSizeInBits();
if (Size == 1) Size = 8;
unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
@@ -28834,11 +31804,7 @@ X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Res.first = 0;
Res.second = nullptr;
}
- } else if (Class == &X86::FR32RegClass || Class == &X86::FR64RegClass ||
- Class == &X86::VR128RegClass || Class == &X86::VR256RegClass ||
- Class == &X86::FR32XRegClass || Class == &X86::FR64XRegClass ||
- Class == &X86::VR128XRegClass || Class == &X86::VR256XRegClass ||
- Class == &X86::VR512RegClass) {
+ } else if (isFRClass(*Class)) {
// Handle references to XMM physical registers that got mapped into the
// wrong class. This can happen with constraints like {xmm0} where the
// target independent register mapper will just pick the first match it can
@@ -28907,7 +31873,7 @@ bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
}
void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
- if (!Subtarget->is64Bit())
+ if (!Subtarget.is64Bit())
return;
// Update IsSplitCSR in X86MachineFunctionInfo.
@@ -28919,12 +31885,12 @@ void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
void X86TargetLowering::insertCopiesSplitCSR(
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
- const X86RegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
if (!IStart)
return;
- const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
MachineBasicBlock::iterator MBBI = Entry->begin();
for (const MCPhysReg *I = IStart; *I; ++I) {
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index b67958a9c498..d826f1ec3e05 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -75,7 +75,7 @@ namespace llvm {
///
CALL,
- /// This operation implements the lowering for readcyclecounter
+ /// This operation implements the lowering for readcyclecounter.
RDTSC_DAG,
/// X86 Read Time-Stamp Counter and Processor ID.
@@ -106,10 +106,6 @@ namespace llvm {
/// 0s or 1s. Generally DTRT for C/C++ with NaNs.
FSETCC,
- /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
- /// result in an integer GPR. Needs masking for scalar result.
- FGETSIGNx86,
-
/// X86 conditional moves. Operand 0 and operand 1 are the two values
/// to select from. Operand 2 is the condition code, and operand 3 is the
/// flag operand produced by a CMP or TEST instruction. It also writes a
@@ -191,9 +187,6 @@ namespace llvm {
/// Bitwise Logical AND NOT of Packed FP values.
ANDNP,
- /// Copy integer sign.
- PSIGN,
-
/// Blend where the selector is an immediate.
BLENDI,
@@ -214,30 +207,31 @@ namespace llvm {
FMIN_RND,
FSQRT_RND,
- // FP vector get exponent
+ // FP vector get exponent.
FGETEXP_RND,
- // Extract Normalized Mantissas
+ // Extract Normalized Mantissas.
VGETMANT,
- // FP Scale
+ // FP Scale.
SCALEF,
+ SCALEFS,
+
// Integer add/sub with unsigned saturation.
ADDUS,
SUBUS,
+
// Integer add/sub with signed saturation.
ADDS,
SUBS,
- // Unsigned Integer average
+
+ // Unsigned Integer average.
AVG,
- /// Integer horizontal add.
- HADD,
- /// Integer horizontal sub.
+ /// Integer horizontal add/sub.
+ HADD,
HSUB,
- /// Floating point horizontal add.
+ /// Floating point horizontal add/sub.
FHADD,
-
- /// Floating point horizontal sub.
FHSUB,
// Integer absolute value
@@ -256,7 +250,8 @@ namespace llvm {
/// Note that these typically require refinement
/// in order to obtain suitable precision.
FRSQRT, FRCP,
-
+ FRSQRTS, FRCPS,
+
// Thread Local Storage.
TLSADDR,
@@ -277,6 +272,9 @@ namespace llvm {
// SjLj exception handling longjmp.
EH_SJLJ_LONGJMP,
+ // SjLj exception handling dispatch.
+ EH_SJLJ_SETUP_DISPATCH,
+
/// Tail call return. See X86TargetLowering::LowerCall for
/// the list of operands.
TC_RETURN,
@@ -286,7 +284,6 @@ namespace llvm {
// Vector integer zero-extend.
VZEXT,
-
// Vector integer signed-extend.
VSEXT,
@@ -313,6 +310,11 @@ namespace llvm {
// Vector shift elements
VSHL, VSRL, VSRA,
+ // Vector variable shift right arithmetic.
+ // Unlike ISD::SRA, in case shift count greater then element size
+ // use sign bit to fill destination data element.
+ VSRAV,
+
// Vector shift elements by immediate
VSHLI, VSRLI, VSRAI,
@@ -327,6 +329,8 @@ namespace llvm {
// Vector integer comparisons, the result is in a mask vector.
PCMPEQM, PCMPGTM,
+ MULTISHIFT,
+
/// Vector comparison generating mask bits for fp and
/// integer signed and unsigned data types.
CMPM,
@@ -338,11 +342,13 @@ namespace llvm {
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
- BEXTR, // Bit field extract
+ // Bit field extract.
+ BEXTR,
- UMUL, // LOW, HI, FLAGS = umul LHS, RHS
+ // LOW, HI, FLAGS = umul LHS, RHS.
+ UMUL,
- // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS
+ // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS.
SMUL8, UMUL8,
// 8-bit divrem that zero-extend the high result (AH).
@@ -352,6 +358,9 @@ namespace llvm {
// X86-specific multiply by immediate.
MUL_IMM,
+ // Vector sign bit extraction.
+ MOVMSK,
+
// Vector bitwise comparisons.
PTEST,
@@ -362,22 +371,23 @@ namespace llvm {
TESTM,
TESTNM,
- // OR/AND test for masks
+ // OR/AND test for masks.
KORTEST,
KTEST,
// Several flavors of instructions with vector shuffle behaviors.
+ // Saturated signed/unnsigned packing.
PACKSS,
PACKUS,
- // Intra-lane alignr
+ // Intra-lane alignr.
PALIGNR,
- // AVX512 inter-lane alignr
+ // AVX512 inter-lane alignr.
VALIGN,
PSHUFD,
PSHUFHW,
PSHUFLW,
SHUFP,
- //Shuffle Packed Values at 128-bit granularity
+ //Shuffle Packed Values at 128-bit granularity.
SHUF128,
MOVDDUP,
MOVSHDUP,
@@ -393,61 +403,82 @@ namespace llvm {
UNPCKH,
VPERMILPV,
VPERMILPI,
+ VPERMI,
+ VPERM2X128,
+
+ // Variable Permute (VPERM).
+ // Res = VPERMV MaskV, V0
VPERMV,
+
+ // 3-op Variable Permute (VPERMT2).
+ // Res = VPERMV3 V0, MaskV, V1
VPERMV3,
+
+ // 3-op Variable Permute overwriting the index (VPERMI2).
+ // Res = VPERMIV3 V0, MaskV, V1
VPERMIV3,
- VPERMI,
- VPERM2X128,
- // Bitwise ternary logic
+
+ // Bitwise ternary logic.
VPTERNLOG,
- // Fix Up Special Packed Float32/64 values
+ // Fix Up Special Packed Float32/64 values.
VFIXUPIMM,
- // Range Restriction Calculation For Packed Pairs of Float32/64 values
+ VFIXUPIMMS,
+ // Range Restriction Calculation For Packed Pairs of Float32/64 values.
VRANGE,
- // Reduce - Perform Reduction Transformation on scalar\packed FP
+ // Reduce - Perform Reduction Transformation on scalar\packed FP.
VREDUCE,
- // RndScale - Round FP Values To Include A Given Number Of Fraction Bits
+ // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
VRNDSCALE,
- // VFPCLASS - Tests Types Of a FP Values for packed types.
- VFPCLASS,
- // VFPCLASSS - Tests Types Of a FP Values for scalar types.
- VFPCLASSS,
- // Broadcast scalar to vector
+ // Tests Types Of a FP Values for packed types.
+ VFPCLASS,
+ // Tests Types Of a FP Values for scalar types.
+ VFPCLASSS,
+
+ // Broadcast scalar to vector.
VBROADCAST,
- // Broadcast mask to vector
+ // Broadcast mask to vector.
VBROADCASTM,
- // Broadcast subvector to vector
+ // Broadcast subvector to vector.
SUBV_BROADCAST,
- // Insert/Extract vector element
+
+ // Insert/Extract vector element.
VINSERT,
VEXTRACT,
/// SSE4A Extraction and Insertion.
EXTRQI, INSERTQI,
- // XOP variable/immediate rotations
+ // XOP variable/immediate rotations.
VPROT, VPROTI,
- // XOP arithmetic/logical shifts
+ // XOP arithmetic/logical shifts.
VPSHA, VPSHL,
- // XOP signed/unsigned integer comparisons
+ // XOP signed/unsigned integer comparisons.
VPCOM, VPCOMU,
+ // XOP packed permute bytes.
+ VPPERM,
+ // XOP two source permutation.
+ VPERMIL2,
- // Vector multiply packed unsigned doubleword integers
+ // Vector multiply packed unsigned doubleword integers.
PMULUDQ,
- // Vector multiply packed signed doubleword integers
+ // Vector multiply packed signed doubleword integers.
PMULDQ,
- // Vector Multiply Packed UnsignedIntegers with Round and Scale
+ // Vector Multiply Packed UnsignedIntegers with Round and Scale.
MULHRS,
- // Multiply and Add Packed Integers
+
+ // Multiply and Add Packed Integers.
VPMADDUBSW, VPMADDWD,
- // FMA nodes
+ VPMADD52L, VPMADD52H,
+
+ // FMA nodes.
FMADD,
FNMADD,
FMSUB,
FNMSUB,
FMADDSUB,
FMSUBADD,
- // FMA with rounding mode
+
+ // FMA with rounding mode.
FMADD_RND,
FNMADD_RND,
FMSUB_RND,
@@ -455,17 +486,20 @@ namespace llvm {
FMADDSUB_RND,
FMSUBADD_RND,
- // Compress and expand
+ // Compress and expand.
COMPRESS,
EXPAND,
- //Convert Unsigned/Integer to Scalar Floating-Point Value
- //with rounding mode
+ // Convert Unsigned/Integer to Scalar Floating-Point Value
+ // with rounding mode.
SINT_TO_FP_RND,
UINT_TO_FP_RND,
// Vector float/double to signed/unsigned integer.
FP_TO_SINT_RND, FP_TO_UINT_RND,
+ // Scalar float/double to signed/unsigned integer.
+ SCALAR_FP_TO_SINT_RND, SCALAR_FP_TO_UINT_RND,
+
// Save xmm argument registers to the stack, according to %al. An operator
// is needed so that this can be expanded with control flow.
VASTART_SAVE_XMM_REGS,
@@ -478,11 +512,9 @@ namespace llvm {
// falls back to heap allocation if not.
SEG_ALLOCA,
- // Memory barrier
+ // Memory barriers.
MEMBARRIER,
MFENCE,
- SFENCE,
- LFENCE,
// Store FP status word into i16 register.
FNSTSW16r,
@@ -497,19 +529,26 @@ namespace llvm {
// indicate whether it is valid in CF.
RDSEED,
+ // SSE42 string comparisons.
PCMPISTRI,
PCMPESTRI,
// Test if in transactional execution.
XTEST,
- // ERI instructions
+ // ERI instructions.
RSQRT28, RCP28, EXP2,
// Compare and swap.
LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
LCMPXCHG8_DAG,
LCMPXCHG16_DAG,
+ LCMPXCHG8_SAVE_EBX_DAG,
+ LCMPXCHG16_SAVE_RBX_DAG,
+
+ /// LOCK-prefixed arithmetic read-modify-write instructions.
+ /// EFLAGS, OUTCHAIN = LADD(INCHAIN, PTR, RHS)
+ LADD, LSUB, LOR, LXOR, LAND,
// Load, scalar_to_vector, and zero extend.
VZEXT_LOAD,
@@ -551,10 +590,10 @@ namespace llvm {
VAARG_64
// WARNING: Do not add anything in the end unless you want the node to
- // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
- // thought as target memory ops!
+ // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
+ // opcodes will be thought as target memory ops!
};
- }
+ } // end namespace X86ISD
/// Define some predicates that are used for node matching.
namespace X86 {
@@ -606,13 +645,12 @@ namespace llvm {
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement = true);
-
/// Determines whether the callee is required to pop its
/// own arguments. Callee pop is necessary to support tail calls.
bool isCalleePop(CallingConv::ID CallingConv,
- bool is64Bit, bool IsVarArg, bool TailCallOpt);
+ bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
- }
+ } // end namespace X86
//===--------------------------------------------------------------------===//
// X86 Implementation of the TargetLowering interface
@@ -679,13 +717,20 @@ namespace llvm {
///
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ /// Places new result values for the node in Results (their number
+ /// and types must exactly match those of the original return values of
+ /// the node), or leaves Results empty, which indicates that the node is not
+ /// to be custom lowered after all.
+ void LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+
/// Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const override;
-
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
/// Return true if the target has native support for
@@ -705,9 +750,8 @@ namespace llvm {
bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const override;
-
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
/// This method returns the name of a target specific DAG node.
const char *getTargetNodeName(unsigned Opcode) const override;
@@ -716,6 +760,12 @@ namespace llvm {
bool isCheapToSpeculateCtlz() const override;
+ bool hasBitPreservingFPLogic(EVT VT) const override {
+ return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
+ }
+
+ bool hasAndNotCompare(SDValue Y) const override;
+
/// Return the value type to use for ISD::SETCC.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
@@ -914,16 +964,21 @@ namespace llvm {
unsigned
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
+ virtual bool needsFixedCatchObjects() const override;
+
/// This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const override;
- /// Return true if the target stores stack protector cookies at a fixed
- /// offset in some non-standard address space, and populates the address
- /// space and offset as appropriate.
- bool getStackCookieLocation(unsigned &AddressSpace,
- unsigned &Offset) const override;
+ /// If the target has a standard location for the stack protector cookie,
+ /// returns the address of that location. Otherwise, returns nullptr.
+ Value *getIRStackGuard(IRBuilder<> &IRB) const override;
+
+ bool useLoadStackGuardNode() const override;
+ void insertSSPDeclarations(Module &M) const override;
+ Value *getSDagStackGuard(const Module &M) const override;
+ Value *getSSPStackGuardCheck(const Module &M) const override;
/// Return true if the target stores SafeStack pointer at a fixed offset in
/// some non-standard address space, and populates the address space and
@@ -935,21 +990,24 @@ namespace llvm {
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
- bool useLoadStackGuardNode() const override;
/// \brief Customize the preferred legalization strategy for certain types.
LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
+ bool supportSwiftError() const override {
+ return true;
+ }
+
protected:
std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI,
MVT VT) const override;
private:
- /// Keep a pointer to the X86Subtarget around so that we can
+ /// Keep a reference to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
- const X86Subtarget *Subtarget;
+ const X86Subtarget &Subtarget;
/// Select between SSE or x87 floating point ops.
/// When SSE is available, use it for f32 operations.
@@ -969,16 +1027,15 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue LowerMemArgument(SDValue Chain,
- CallingConv::ID CallConv,
+ SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &ArgInfo,
- SDLoc dl, SelectionDAG &DAG,
- const CCValAssign &VA, MachineFrameInfo *MFI,
- unsigned i) const;
+ const SDLoc &dl, SelectionDAG &DAG,
+ const CCValAssign &VA, MachineFrameInfo *MFI,
+ unsigned i) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
@@ -997,12 +1054,15 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
- SDValue Chain, bool IsTailCall, bool Is64Bit,
- int FPDiff, SDLoc dl) const;
+ SDValue Chain, bool IsTailCall,
+ bool Is64Bit, int FPDiff,
+ const SDLoc &dl) const;
unsigned GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG &DAG) const;
+ unsigned getAddressSpace(void) const;
+
std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
bool isSigned,
bool isReplace) const;
@@ -1017,7 +1077,7 @@ namespace llvm {
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
+ SDValue LowerGlobalAddress(const GlobalValue *GV, const SDLoc &dl,
int64_t Offset, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -1030,8 +1090,8 @@ namespace llvm {
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerToBT(SDValue And, ISD::CondCode CC,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDValue LowerToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
+ SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
@@ -1046,6 +1106,7 @@ namespace llvm {
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
@@ -1053,19 +1114,17 @@ namespace llvm {
SDValue LowerGC_TRANSITION_END(SDValue Op, SelectionDAG &DAG) const;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
- SDValue LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
+ const SDLoc &dl, SelectionDAG &DAG) const override;
bool supportSplitCSR(MachineFunction *MF) const override {
return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
@@ -1080,8 +1139,8 @@ namespace llvm {
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
- EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
- ISD::NodeType ExtendKind) const override;
+ EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
+ ISD::NodeType ExtendKind) const override;
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
@@ -1101,57 +1160,60 @@ namespace llvm {
bool needsCmpXchgNb(Type *MemType) const;
+ void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
+ MachineBasicBlock *DispatchBB, int FI) const;
+
// Utility function to emit the low-level va_arg code for X86-64.
- MachineBasicBlock *EmitVAARG64WithCustomInserter(
- MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *
+ EmitVAARG64WithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
/// Utility function to emit the xmm reg save portion of va_start.
- MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
- MachineInstr *BInstr,
- MachineBasicBlock *BB) const;
+ MachineBasicBlock *
+ EmitVAStartSaveXMMRegsWithCustomInserter(MachineInstr &BInstr,
+ MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
+ MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr *I,
+ MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr &I,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
- MachineBasicBlock *BB) const;
-
- MachineBasicBlock *EmitLoweredCatchRet(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredCatchPad(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI,
+ MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
+ MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitFMA3Instr(MachineInstr *MI,
+ MachineBasicBlock *emitFMA3Instr(MachineInstr &MI,
MachineBasicBlock *MBB) const;
+ MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl,
+ SDValue EmitTest(SDValue Op0, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const;
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl,
+ SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const;
/// Convert a comparison if required by the subtarget.
@@ -1173,7 +1235,7 @@ namespace llvm {
namespace X86 {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
- }
-}
+ } // end namespace X86
+} // end namespace llvm
-#endif // X86ISELLOWERING_H
+#endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index 6f0199b015cd..de4129f86541 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -30,6 +30,10 @@ class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
// Corresponding write-mask register class.
RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
+ // The mask VT.
+ ValueType KVT = !cast<ValueType>(!if (!eq (NumElts, 1), "i1",
+ "v" # NumElts # "i1"));
+
// The GPR register class that can hold the write mask. Use GR8 for fewer
// than 8 elements. Use shift-right and equal to work around the lack of
// !lt in tablegen.
@@ -95,6 +99,12 @@ class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
"v" # NumElts # "f" # EltSize,
VTName)));
+ ValueType IntVT = !cast<ValueType>(
+ !if (!eq (!srl(EltSize,5),0),
+ VTName,
+ !if (!eq(TypeVariantName, "f"),
+ "v" # NumElts # "i" # EltSize,
+ VTName)));
// The string to specify embedded broadcast in assembly.
string BroadcastStr = "{1to" # NumElts # "}";
@@ -238,12 +248,12 @@ multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
string AttSrcAsm, string IntelSrcAsm,
dag RHS,
InstrItinClass itin = NoItinerary,
- bit IsCommutable = 0> :
+ bit IsCommutable = 0, SDNode Select = vselect> :
AVX512_maskable_common<O, F, _, Outs, Ins,
!con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
!con((ins _.KRCWM:$mask), Ins),
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
- (vselect _.KRCWM:$mask, RHS, _.RC:$src0), vselect,
+ (Select _.KRCWM:$mask, RHS, _.RC:$src0), Select,
"$src0 = $dst", itin, IsCommutable>;
// This multiclass generates the unconditional/non-masking, the masking and
@@ -258,8 +268,8 @@ multiclass AVX512_maskable_scalar<bits<8> O, Format F, X86VectorVTInfo _,
!con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
!con((ins _.KRCWM:$mask), Ins),
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
- (X86select _.KRCWM:$mask, RHS, _.RC:$src0), X86select,
- "$src0 = $dst", itin, IsCommutable>;
+ (X86selects _.KRCWM:$mask, RHS, _.RC:$src0),
+ X86selects, "$src0 = $dst", itin, IsCommutable>;
// Similar to AVX512_maskable but in this case one of the source operands
// ($src1) is already tied to $dst so we just use that for the preserved
@@ -301,7 +311,8 @@ multiclass AVX512_maskable_3src_scalar<bits<8> O, Format F, X86VectorVTInfo _,
!con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
!con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
- (X86select _.KRCWM:$mask, RHS, _.RC:$src1)>;
+ (X86selects _.KRCWM:$mask, RHS, _.RC:$src1),
+ X86selects>;
multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _,
dag Outs, dag Ins,
@@ -363,119 +374,58 @@ multiclass AVX512_maskable_cmp_alt<bits<8> O, Format F, X86VectorVTInfo _,
AttSrcAsm, IntelSrcAsm, [],[]>;
// Bitcasts between 512-bit vector types. Return the original type since
-// no instruction is needed for the conversion
-let Predicates = [HasAVX512] in {
- def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
- def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
-
- def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
- def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
-
-// Bitcasts between 256-bit vector types. Return the original type since
-// no instruction is needed for the conversion
- def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
- def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
-}
-
-//
-// AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
-//
-
+// no instruction is needed for the conversion.
+def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
+def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
+
+// Alias instruction that maps zero vector to pxor / xorp* for AVX-512.
+// This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
+// swizzled by ExecutionDepsFix to pxor.
+// We set canFoldAsLoad because this can be converted to a constant-pool
+// load of an all-zeros value if folding it would be beneficial.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isPseudo = 1, Predicates = [HasAVX512] in {
+ isPseudo = 1, Predicates = [HasAVX512], SchedRW = [WriteZero] in {
def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
- [(set VR512:$dst, (v16f32 immAllZerosV))]>;
+ [(set VR512:$dst, (v16i32 immAllZerosV))]>;
+def AVX512_512_SETALLONES : I<0, Pseudo, (outs VR512:$dst), (ins), "",
+ [(set VR512:$dst, (v16i32 immAllOnesV))]>;
}
-let Predicates = [HasAVX512] in {
-def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
-def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
-def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isPseudo = 1, Predicates = [HasVLX], SchedRW = [WriteZero] in {
+def AVX512_128_SET0 : I<0, Pseudo, (outs VR128X:$dst), (ins), "",
+ [(set VR128X:$dst, (v4i32 immAllZerosV))]>;
+def AVX512_256_SET0 : I<0, Pseudo, (outs VR256X:$dst), (ins), "",
+ [(set VR256X:$dst, (v8i32 immAllZerosV))]>;
}
//===----------------------------------------------------------------------===//
@@ -483,7 +433,7 @@ def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
//
multiclass vinsert_for_size<int Opcode, X86VectorVTInfo From, X86VectorVTInfo To,
PatFrag vinsert_insert> {
- let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
+ let ExeDomain = To.ExeDomain in {
defm rr : AVX512_maskable<Opcode, MRMSrcReg, To, (outs To.RC:$dst),
(ins To.RC:$src1, From.RC:$src2, i32u8imm:$src3),
"vinsert" # From.EltTypeName # "x" # From.NumElts,
@@ -492,7 +442,6 @@ multiclass vinsert_for_size<int Opcode, X86VectorVTInfo From, X86VectorVTInfo To
(From.VT From.RC:$src2),
(iPTR imm))>, AVX512AIi8Base, EVEX_4V;
- let mayLoad = 1 in
defm rm : AVX512_maskable<Opcode, MRMSrcMem, To, (outs To.RC:$dst),
(ins To.RC:$src1, From.MemOp:$src2, i32u8imm:$src3),
"vinsert" # From.EltTypeName # "x" # From.NumElts,
@@ -615,19 +564,9 @@ def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
// AVX-512 VECTOR EXTRACT
//---
-multiclass vextract_for_size_first_position_lowering<X86VectorVTInfo From,
- X86VectorVTInfo To> {
- // A subvector extract from the first vector position is
- // a subregister copy that needs no instruction.
- def NAME # To.NumElts:
- Pat<(To.VT (extract_subvector (From.VT From.RC:$src),(iPTR 0))),
- (To.VT (EXTRACT_SUBREG (From.VT From.RC:$src), To.SubRegIdx))>;
-}
-
multiclass vextract_for_size<int Opcode,
X86VectorVTInfo From, X86VectorVTInfo To,
- PatFrag vextract_extract> :
- vextract_for_size_first_position_lowering<From, To> {
+ PatFrag vextract_extract> {
let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
// use AVX512_maskable_in_asm (AVX512_maskable can't be used due to
@@ -640,21 +579,22 @@ multiclass vextract_for_size<int Opcode,
[(set To.RC:$dst, (vextract_extract:$idx (From.VT From.RC:$src1),
(iPTR imm)))]>,
AVX512AIi8Base, EVEX;
- let mayStore = 1 in {
- def rm : AVX512AIi8<Opcode, MRMDestMem, (outs),
- (ins To.MemOp:$dst, From.RC:$src1, i32u8imm:$src2),
- "vextract" # To.EltTypeName # "x" # To.NumElts #
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, EVEX;
-
- def rmk : AVX512AIi8<Opcode, MRMDestMem, (outs),
- (ins To.MemOp:$dst, To.KRCWM:$mask,
- From.RC:$src1, i32u8imm:$src2),
- "vextract" # To.EltTypeName # "x" # To.NumElts #
- "\t{$src2, $src1, $dst {${mask}}|"
- "$dst {${mask}}, $src1, $src2}",
- []>, EVEX_K, EVEX;
- }//mayStore = 1
+ def mr : AVX512AIi8<Opcode, MRMDestMem, (outs),
+ (ins To.MemOp:$dst, From.RC:$src1, i32u8imm:$idx),
+ "vextract" # To.EltTypeName # "x" # To.NumElts #
+ "\t{$idx, $src1, $dst|$dst, $src1, $idx}",
+ [(store (To.VT (vextract_extract:$idx
+ (From.VT From.RC:$src1), (iPTR imm))),
+ addr:$dst)]>, EVEX;
+
+ let mayStore = 1, hasSideEffects = 0 in
+ def mrk : AVX512AIi8<Opcode, MRMDestMem, (outs),
+ (ins To.MemOp:$dst, To.KRCWM:$mask,
+ From.RC:$src1, i32u8imm:$idx),
+ "vextract" # To.EltTypeName # "x" # To.NumElts #
+ "\t{$idx, $src1, $dst {${mask}}|"
+ "$dst {${mask}}, $src1, $idx}",
+ []>, EVEX_K, EVEX;
}
// Intrinsic call with masking.
@@ -688,14 +628,17 @@ multiclass vextract_for_size<int Opcode,
// Codegen pattern for the alternative types
multiclass vextract_for_size_lowering<string InstrStr, X86VectorVTInfo From,
X86VectorVTInfo To, PatFrag vextract_extract,
- SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> :
- vextract_for_size_first_position_lowering<From, To> {
-
- let Predicates = p in
+ SDNodeXForm EXTRACT_get_vextract_imm, list<Predicate> p> {
+ let Predicates = p in {
def : Pat<(vextract_extract:$ext (From.VT From.RC:$src1), (iPTR imm)),
(To.VT (!cast<Instruction>(InstrStr#"rr")
From.RC:$src1,
(EXTRACT_get_vextract_imm To.RC:$ext)))>;
+ def : Pat<(store (To.VT (vextract_extract:$ext (From.VT From.RC:$src1),
+ (iPTR imm))), addr:$dst),
+ (!cast<Instruction>(InstrStr#"mr") addr:$dst, From.RC:$src1,
+ (EXTRACT_get_vextract_imm To.RC:$ext))>;
+ }
}
multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
@@ -756,6 +699,12 @@ defm : vextract_for_size_lowering<"VEXTRACTF32x4Z256", v4f64x_info, v2f64x_info,
defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v4i64x_info, v2i64x_info,
vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX, NoDQI]>;
+// Codegen pattern with the alternative types extract VEC128 from VEC256
+defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v16i16x_info, v8i16x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
+defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v32i8x_info, v16i8x_info,
+ vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
+
// Codegen pattern with the alternative types extract VEC128 from VEC512
defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v32i16_info, v8i16x_info,
vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
@@ -767,46 +716,76 @@ defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v32i16_info, v16i16x_info,
defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v64i8_info, v32i8x_info,
vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
+// A 128-bit subvector extract from the first 256-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
+ (v2i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
+def : Pat<(v2f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
+ (v2f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
+def : Pat<(v4i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
+ (v4i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
+def : Pat<(v4f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
+ (v4f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
+def : Pat<(v8i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
+ (v8i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_xmm))>;
+def : Pat<(v16i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
+ (v16i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_xmm))>;
+
+// A 256-bit subvector extract from the first 256-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(v4i64 (extract_subvector (v8i64 VR512:$src), (iPTR 0))),
+ (v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm))>;
+def : Pat<(v4f64 (extract_subvector (v8f64 VR512:$src), (iPTR 0))),
+ (v4f64 (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm))>;
+def : Pat<(v8i32 (extract_subvector (v16i32 VR512:$src), (iPTR 0))),
+ (v8i32 (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm))>;
+def : Pat<(v8f32 (extract_subvector (v16f32 VR512:$src), (iPTR 0))),
+ (v8f32 (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm))>;
+def : Pat<(v16i16 (extract_subvector (v32i16 VR512:$src), (iPTR 0))),
+ (v16i16 (EXTRACT_SUBREG (v32i16 VR512:$src), sub_ymm))>;
+def : Pat<(v32i8 (extract_subvector (v64i8 VR512:$src), (iPTR 0))),
+ (v32i8 (EXTRACT_SUBREG (v64i8 VR512:$src), sub_ymm))>;
+
+let AddedComplexity = 25 in { // to give priority over vinsertf128rm
// A 128-bit subvector insert to the first 512-bit vector position
// is a subregister copy that needs no instruction.
-def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
- (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
- (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
- sub_ymm)>;
-def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
- (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
- (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
- sub_ymm)>;
-def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
- (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
- (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
- sub_ymm)>;
-def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
- (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
- (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
- sub_ymm)>;
-
-def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
+def : Pat<(v8i64 (insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v8f64 (insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v16i32 (insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v16f32 (insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v32i16 (insert_subvector undef, (v8i16 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+def : Pat<(v64i8 (insert_subvector undef, (v16i8 VR128X:$src), (iPTR 0))),
+ (INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)>;
+
+// A 256-bit subvector insert to the first 512-bit vector position
+// is a subregister copy that needs no instruction.
+def : Pat<(v8i64 (insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
+def : Pat<(v8f64 (insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
+def : Pat<(v16i32 (insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
+def : Pat<(v16f32 (insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0)),
+def : Pat<(v32i16 (insert_subvector undef, (v16i16 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v32i16 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
-def : Pat<(insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0)),
+def : Pat<(v64i8 (insert_subvector undef, (v32i8 VR256X:$src), (iPTR 0))),
(INSERT_SUBREG (v64i8 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
+}
// vextractps - extract 32 bits from XMM
-def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
+def VEXTRACTPSZrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
(ins VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
EVEX;
-def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
+def VEXTRACTPSZmr : AVX512AIi8<0x17, MRMDestMem, (outs),
(ins f32mem:$dst, VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
@@ -815,90 +794,107 @@ def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
//===---------------------------------------------------------------------===//
// AVX-512 BROADCAST
//---
+// broadcast with a scalar argument.
+multiclass avx512_broadcast_scalar<bits<8> opc, string OpcodeStr,
+ X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
+
+ let isCodeGenOnly = 1 in {
+ def r_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.FRC:$src), OpcodeStr#"\t{$src, $dst|$dst, $src}",
+ [(set DestInfo.RC:$dst, (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX;
+
+ let Constraints = "$src0 = $dst" in
+ def rk_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins DestInfo.RC:$src0, DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
+ OpcodeStr#"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}",
+ [(set DestInfo.RC:$dst,
+ (vselect DestInfo.KRCWM:$mask,
+ (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
+ DestInfo.RC:$src0))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX, EVEX_K;
+
+ def rkz_s : I< opc, MRMSrcReg, (outs DestInfo.RC:$dst),
+ (ins DestInfo.KRCWM:$mask, SrcInfo.FRC:$src),
+ OpcodeStr#"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}",
+ [(set DestInfo.RC:$dst,
+ (vselect DestInfo.KRCWM:$mask,
+ (DestInfo.VT (X86VBroadcast SrcInfo.FRC:$src)),
+ DestInfo.ImmAllZerosV))]>,
+ Requires<[HasAVX512]>, T8PD, EVEX, EVEX_KZ;
+ } // let isCodeGenOnly = 1 in
+}
multiclass avx512_broadcast_rm<bits<8> opc, string OpcodeStr,
X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo> {
-
+ let ExeDomain = DestInfo.ExeDomain in {
defm r : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
(ins SrcInfo.RC:$src), OpcodeStr, "$src", "$src",
(DestInfo.VT (X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src)))>,
T8PD, EVEX;
- let mayLoad = 1 in
- defm m : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
- (ins SrcInfo.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
- (DestInfo.VT (X86VBroadcast
- (SrcInfo.ScalarLdFrag addr:$src)))>,
- T8PD, EVEX, EVEX_CD8<SrcInfo.EltSize, CD8VT1>;
-}
+ defm m : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
+ (DestInfo.VT (X86VBroadcast
+ (SrcInfo.ScalarLdFrag addr:$src)))>,
+ T8PD, EVEX, EVEX_CD8<SrcInfo.EltSize, CD8VT1>;
+ }
-multiclass avx512_fp_broadcast_vl<bits<8> opc, string OpcodeStr,
+ def : Pat<(DestInfo.VT (X86VBroadcast
+ (SrcInfo.VT (scalar_to_vector
+ (SrcInfo.ScalarLdFrag addr:$src))))),
+ (!cast<Instruction>(NAME#DestInfo.ZSuffix#m) addr:$src)>;
+ let AddedComplexity = 20 in
+ def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
+ (X86VBroadcast
+ (SrcInfo.VT (scalar_to_vector
+ (SrcInfo.ScalarLdFrag addr:$src)))),
+ DestInfo.RC:$src0)),
+ (!cast<Instruction>(NAME#DestInfo.ZSuffix#mk)
+ DestInfo.RC:$src0, DestInfo.KRCWM:$mask, addr:$src)>;
+ let AddedComplexity = 30 in
+ def : Pat<(DestInfo.VT (vselect DestInfo.KRCWM:$mask,
+ (X86VBroadcast
+ (SrcInfo.VT (scalar_to_vector
+ (SrcInfo.ScalarLdFrag addr:$src)))),
+ DestInfo.ImmAllZerosV)),
+ (!cast<Instruction>(NAME#DestInfo.ZSuffix#mkz)
+ DestInfo.KRCWM:$mask, addr:$src)>;
+}
+
+multiclass avx512_fp_broadcast_sd<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _> {
- defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
- EVEX_V512;
+ let Predicates = [HasAVX512] in
+ defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
+ EVEX_V512;
let Predicates = [HasVLX] in {
defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
EVEX_V256;
}
}
-let ExeDomain = SSEPackedSingle in {
- defm VBROADCASTSS : avx512_fp_broadcast_vl<0x18, "vbroadcastss",
- avx512vl_f32_info>;
- let Predicates = [HasVLX] in {
- defm VBROADCASTSSZ128 : avx512_broadcast_rm<0x18, "vbroadcastss",
- v4f32x_info, v4f32x_info>, EVEX_V128;
- }
-}
-
-let ExeDomain = SSEPackedDouble in {
- defm VBROADCASTSD : avx512_fp_broadcast_vl<0x19, "vbroadcastsd",
- avx512vl_f64_info>, VEX_W;
-}
-
-// avx512_broadcast_pat introduces patterns for broadcast with a scalar argument.
-// Later, we can canonize broadcast instructions before ISel phase and
-// eliminate additional patterns on ISel.
-// SrcRC_v and SrcRC_s are RegisterClasses for vector and scalar
-// representations of source
-multiclass avx512_broadcast_pat<string InstName, SDNode OpNode,
- X86VectorVTInfo _, RegisterClass SrcRC_v,
- RegisterClass SrcRC_s> {
- def : Pat<(_.VT (OpNode (_.EltVT SrcRC_s:$src))),
- (!cast<Instruction>(InstName##"r")
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
-
- let AddedComplexity = 30 in {
- def : Pat<(_.VT (vselect _.KRCWM:$mask,
- (OpNode (_.EltVT SrcRC_s:$src)), _.RC:$src0)),
- (!cast<Instruction>(InstName##"rk") _.RC:$src0, _.KRCWM:$mask,
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
+multiclass avx512_fp_broadcast_ss<bits<8> opc, string OpcodeStr,
+ AVX512VLVectorVTInfo _> {
+ let Predicates = [HasAVX512] in
+ defm Z : avx512_broadcast_rm<opc, OpcodeStr, _.info512, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info512, _.info128>,
+ EVEX_V512;
- def : Pat<(_.VT(vselect _.KRCWM:$mask,
- (OpNode (_.EltVT SrcRC_s:$src)), _.ImmAllZerosV)),
- (!cast<Instruction>(InstName##"rkz") _.KRCWM:$mask,
- (COPY_TO_REGCLASS SrcRC_s:$src, SrcRC_v))>;
+ let Predicates = [HasVLX] in {
+ defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _.info256, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info256, _.info128>,
+ EVEX_V256;
+ defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _.info128, _.info128>,
+ avx512_broadcast_scalar<opc, OpcodeStr, _.info128, _.info128>,
+ EVEX_V128;
}
}
-
-defm : avx512_broadcast_pat<"VBROADCASTSSZ", X86VBroadcast, v16f32_info,
- VR128X, FR32X>;
-defm : avx512_broadcast_pat<"VBROADCASTSDZ", X86VBroadcast, v8f64_info,
- VR128X, FR64X>;
-
-let Predicates = [HasVLX] in {
- defm : avx512_broadcast_pat<"VBROADCASTSSZ256", X86VBroadcast,
- v8f32x_info, VR128X, FR32X>;
- defm : avx512_broadcast_pat<"VBROADCASTSSZ128", X86VBroadcast,
- v4f32x_info, VR128X, FR32X>;
- defm : avx512_broadcast_pat<"VBROADCASTSDZ256", X86VBroadcast,
- v4f64x_info, VR128X, FR64X>;
-}
-
-def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
- (VBROADCASTSSZm addr:$src)>;
-def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
- (VBROADCASTSDZm addr:$src)>;
+defm VBROADCASTSS : avx512_fp_broadcast_ss<0x18, "vbroadcastss",
+ avx512vl_f32_info>;
+defm VBROADCASTSD : avx512_fp_broadcast_sd<0x19, "vbroadcastsd",
+ avx512vl_f64_info>, VEX_W;
def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
(VBROADCASTSSZm addr:$src)>;
@@ -907,9 +903,10 @@ def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
multiclass avx512_int_broadcast_reg<bits<8> opc, X86VectorVTInfo _,
RegisterClass SrcRC> {
- defm r : AVX512_maskable_in_asm<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins SrcRC:$src), "vpbroadcast"##_.Suffix,
- "$src", "$src", []>, T8PD, EVEX;
+ defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins SrcRC:$src),
+ "vpbroadcast"##_.Suffix, "$src", "$src",
+ (_.VT (X86VBroadcast SrcRC:$src))>, T8PD, EVEX;
}
multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _,
@@ -922,10 +919,18 @@ multiclass avx512_int_broadcast_reg_vl<bits<8> opc, AVX512VLVectorVTInfo _,
}
}
-defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info, GR32,
+let isCodeGenOnly = 1 in {
+defm VPBROADCASTBr : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info, GR8,
HasBWI>;
-defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info, GR32,
+defm VPBROADCASTWr : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info, GR16,
HasBWI>;
+}
+let isAsmParserOnly = 1 in {
+ defm VPBROADCASTBr_Alt : avx512_int_broadcast_reg_vl<0x7A, avx512vl_i8_info,
+ GR32, HasBWI>;
+ defm VPBROADCASTWr_Alt : avx512_int_broadcast_reg_vl<0x7B, avx512vl_i16_info,
+ GR32, HasBWI>;
+}
defm VPBROADCASTDr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i32_info, GR32,
HasAVX512>;
defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info, GR64,
@@ -933,27 +938,9 @@ defm VPBROADCASTQr : avx512_int_broadcast_reg_vl<0x7C, avx512vl_i64_info, GR64,
def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
(VPBROADCASTDrZrkz VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
-
def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
(VPBROADCASTQrZrkz VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
-def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
- (VPBROADCASTDrZr GR32:$src)>;
-def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
- (VPBROADCASTQrZr GR64:$src)>;
-
-def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
- (VPBROADCASTDrZr GR32:$src)>;
-def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
- (VPBROADCASTQrZr GR64:$src)>;
-
-def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
- (v16i32 immAllZerosV), (i16 GR16:$mask))),
- (VPBROADCASTDrZrkz (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
-def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
- (bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
- (VPBROADCASTQrZrkz (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
-
// Provide aliases for broadcast from the same register class that
// automatically does the extract.
multiclass avx512_int_broadcast_rm_lowering<X86VectorVTInfo DestInfo,
@@ -992,12 +979,11 @@ defm VPBROADCASTQ : avx512_int_broadcast_rm_vl<0x59, "vpbroadcastq",
multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
X86VectorVTInfo _Dst, X86VectorVTInfo _Src> {
- let mayLoad = 1 in
- defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
- (_Dst.VT (X86SubVBroadcast
- (_Src.VT (bitconvert (_Src.LdFrag addr:$src)))))>,
- AVX5128IBase, EVEX;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
+ (_Dst.VT (X86SubVBroadcast
+ (_Src.VT (bitconvert (_Src.LdFrag addr:$src)))))>,
+ AVX5128IBase, EVEX;
}
defm VBROADCASTI32X4 : avx512_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
@@ -1044,45 +1030,29 @@ defm VBROADCASTF32X8 : avx512_subvec_broadcast_rm<0x1b, "vbroadcastf32x8",
EVEX_V512, EVEX_CD8<32, CD8VT8>;
}
-multiclass avx512_broadcast_32x2<bits<8> opc, string OpcodeStr,
- X86VectorVTInfo _Dst, X86VectorVTInfo _Src,
- SDNode OpNode = X86SubVBroadcast> {
-
- defm r : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src), OpcodeStr, "$src", "$src",
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src)))>,
- T8PD, EVEX;
- let mayLoad = 1 in
- defm m : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
- (_Dst.VT (OpNode
- (_Src.VT (scalar_to_vector(loadi64 addr:$src)))))>,
- T8PD, EVEX, EVEX_CD8<_Src.EltSize, CD8VT2>;
-}
-
multiclass avx512_common_broadcast_32x2<bits<8> opc, string OpcodeStr,
- AVX512VLVectorVTInfo _> {
+ AVX512VLVectorVTInfo _Dst, AVX512VLVectorVTInfo _Src> {
let Predicates = [HasDQI] in
- defm Z : avx512_broadcast_32x2<opc, OpcodeStr, _.info512, _.info128>,
+ defm Z : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info512, _Src.info128>,
EVEX_V512;
let Predicates = [HasDQI, HasVLX] in
- defm Z256 : avx512_broadcast_32x2<opc, OpcodeStr, _.info256, _.info128>,
+ defm Z256 : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info256, _Src.info128>,
EVEX_V256;
}
multiclass avx512_common_broadcast_i32x2<bits<8> opc, string OpcodeStr,
- AVX512VLVectorVTInfo _> :
- avx512_common_broadcast_32x2<opc, OpcodeStr, _> {
+ AVX512VLVectorVTInfo _Dst, AVX512VLVectorVTInfo _Src> :
+ avx512_common_broadcast_32x2<opc, OpcodeStr, _Dst, _Src> {
let Predicates = [HasDQI, HasVLX] in
- defm Z128 : avx512_broadcast_32x2<opc, OpcodeStr, _.info128, _.info128,
- X86SubV32x2Broadcast>, EVEX_V128;
+ defm Z128 : avx512_broadcast_rm<opc, OpcodeStr, _Dst.info128, _Src.info128>,
+ EVEX_V128;
}
defm VPBROADCASTI32X2 : avx512_common_broadcast_i32x2<0x59, "vbroadcasti32x2",
- avx512vl_i32_info>;
+ avx512vl_i32_info, avx512vl_i64_info>;
defm VPBROADCASTF32X2 : avx512_common_broadcast_32x2<0x19, "vbroadcastf32x2",
- avx512vl_f32_info>;
+ avx512vl_f32_info, avx512vl_f64_info>;
def : Pat<(v16f32 (X86VBroadcast (v16f32 VR512:$src))),
(VBROADCASTSSZr (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
@@ -1094,14 +1064,6 @@ def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
def : Pat<(v8f64 (X86VBroadcast (v4f64 VR256X:$src))),
(VBROADCASTSDZr (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm))>;
-// Provide fallback in case the load node that is used in the patterns above
-// is used by additional users, which prevents the pattern selection.
-def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
- (VBROADCASTSSZr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
-def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
- (VBROADCASTSDZr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
-
-
//===----------------------------------------------------------------------===//
// AVX-512 BROADCAST MASK TO VECTOR REGISTER
//---
@@ -1112,7 +1074,7 @@ multiclass avx512_mask_broadcastm<bits<8> opc, string OpcodeStr,
[(set _.RC:$dst, (_.VT (X86VBroadcastm KRC:$src)))]>, EVEX;
}
-multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
+multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo VTInfo, RegisterClass KRC> {
let Predicates = [HasCDI] in
defm Z : avx512_mask_broadcastm<opc, OpcodeStr, VTInfo.info512, KRC>, EVEX_V512;
@@ -1138,7 +1100,6 @@ let Constraints = "$src1 = $dst" in {
(_.VT (X86VPermi2X IdxVT.RC:$src1, _.RC:$src2, _.RC:$src3))>, EVEX_4V,
AVX5128IBase;
- let mayLoad = 1 in
defm rm: AVX512_maskable_3src_cast<opc, MRMSrcMem, _, IdxVT, (outs _.RC:$dst),
(ins _.RC:$src2, _.MemOp:$src3),
OpcodeStr, "$src3, $src2", "$src2, $src3",
@@ -1149,7 +1110,7 @@ let Constraints = "$src1 = $dst" in {
}
multiclass avx512_perm_i_mb<bits<8> opc, string OpcodeStr,
X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
- let mayLoad = 1, Constraints = "$src1 = $dst" in
+ let Constraints = "$src1 = $dst" in
defm rmb: AVX512_maskable_3src_cast<opc, MRMSrcMem, _, IdxVT, (outs _.RC:$dst),
(ins _.RC:$src2, _.ScalarMemOp:$src3),
OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
@@ -1178,13 +1139,14 @@ multiclass avx512_perm_i_sizes<bits<8> opc, string OpcodeStr,
}
}
-multiclass avx512_perm_i_sizes_w<bits<8> opc, string OpcodeStr,
+multiclass avx512_perm_i_sizes_bw<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo VTInfo,
- AVX512VLVectorVTInfo Idx> {
- let Predicates = [HasBWI] in
+ AVX512VLVectorVTInfo Idx,
+ Predicate Prd> {
+ let Predicates = [Prd] in
defm NAME: avx512_perm_i<opc, OpcodeStr, VTInfo.info512,
Idx.info512>, EVEX_V512;
- let Predicates = [HasBWI, HasVLX] in {
+ let Predicates = [Prd, HasVLX] in {
defm NAME#128: avx512_perm_i<opc, OpcodeStr, VTInfo.info128,
Idx.info128>, EVEX_V128;
defm NAME#256: avx512_perm_i<opc, OpcodeStr, VTInfo.info256,
@@ -1196,8 +1158,12 @@ defm VPERMI2D : avx512_perm_i_sizes<0x76, "vpermi2d",
avx512vl_i32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm VPERMI2Q : avx512_perm_i_sizes<0x76, "vpermi2q",
avx512vl_i64_info, avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMI2W : avx512_perm_i_sizes_w<0x75, "vpermi2w",
- avx512vl_i16_info, avx512vl_i16_info>, VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMI2W : avx512_perm_i_sizes_bw<0x75, "vpermi2w",
+ avx512vl_i16_info, avx512vl_i16_info, HasBWI>,
+ VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMI2B : avx512_perm_i_sizes_bw<0x75, "vpermi2b",
+ avx512vl_i8_info, avx512vl_i8_info, HasVBMI>,
+ EVEX_CD8<8, CD8VF>;
defm VPERMI2PS : avx512_perm_i_sizes<0x77, "vpermi2ps",
avx512vl_f32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm VPERMI2PD : avx512_perm_i_sizes<0x77, "vpermi2pd",
@@ -1213,7 +1179,6 @@ let Constraints = "$src1 = $dst" in {
(_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2, _.RC:$src3))>, EVEX_4V,
AVX5128IBase;
- let mayLoad = 1 in
defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins IdxVT.RC:$src2, _.MemOp:$src3),
OpcodeStr, "$src3, $src2", "$src2, $src3",
@@ -1224,7 +1189,7 @@ let Constraints = "$src1 = $dst" in {
}
multiclass avx512_perm_t_mb<bits<8> opc, string OpcodeStr,
X86VectorVTInfo _, X86VectorVTInfo IdxVT> {
- let mayLoad = 1, Constraints = "$src1 = $dst" in
+ let Constraints = "$src1 = $dst" in
defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins IdxVT.RC:$src2, _.ScalarMemOp:$src3),
OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
@@ -1253,13 +1218,14 @@ multiclass avx512_perm_t_sizes<bits<8> opc, string OpcodeStr,
}
}
-multiclass avx512_perm_t_sizes_w<bits<8> opc, string OpcodeStr,
+multiclass avx512_perm_t_sizes_bw<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo VTInfo,
- AVX512VLVectorVTInfo Idx> {
- let Predicates = [HasBWI] in
+ AVX512VLVectorVTInfo Idx,
+ Predicate Prd> {
+ let Predicates = [Prd] in
defm NAME: avx512_perm_t<opc, OpcodeStr, VTInfo.info512,
Idx.info512>, EVEX_V512;
- let Predicates = [HasBWI, HasVLX] in {
+ let Predicates = [Prd, HasVLX] in {
defm NAME#128: avx512_perm_t<opc, OpcodeStr, VTInfo.info128,
Idx.info128>, EVEX_V128;
defm NAME#256: avx512_perm_t<opc, OpcodeStr, VTInfo.info256,
@@ -1271,8 +1237,12 @@ defm VPERMT2D : avx512_perm_t_sizes<0x7E, "vpermt2d",
avx512vl_i32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm VPERMT2Q : avx512_perm_t_sizes<0x7E, "vpermt2q",
avx512vl_i64_info, avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMT2W : avx512_perm_t_sizes_w<0x7D, "vpermt2w",
- avx512vl_i16_info, avx512vl_i16_info>, VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMT2W : avx512_perm_t_sizes_bw<0x7D, "vpermt2w",
+ avx512vl_i16_info, avx512vl_i16_info, HasBWI>,
+ VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMT2B : avx512_perm_t_sizes_bw<0x7D, "vpermt2b",
+ avx512vl_i8_info, avx512vl_i8_info, HasVBMI>,
+ EVEX_CD8<8, CD8VF>;
defm VPERMT2PS : avx512_perm_t_sizes<0x7F, "vpermt2ps",
avx512vl_f32_info, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm VPERMT2PD : avx512_perm_t_sizes<0x7F, "vpermt2pd",
@@ -1283,6 +1253,7 @@ defm VPERMT2PD : avx512_perm_t_sizes<0x7F, "vpermt2pd",
//
multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
let ExeDomain = _.ExeDomain in {
+ let hasSideEffects = 0 in
def rr : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr,
@@ -1292,14 +1263,16 @@ multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
(ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
- [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
- (_.VT _.RC:$src2)))]>, EVEX_4V, EVEX_K;
+ [(set _.RC:$dst, (vselect _.KRCWM:$mask,
+ (_.VT _.RC:$src2),
+ (_.VT _.RC:$src1)))]>, EVEX_4V, EVEX_K;
+ let hasSideEffects = 0 in
def rrkz : AVX5128I<opc, MRMSrcReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
[]>, EVEX_4V, EVEX_KZ;
- let mayLoad = 1 in {
+ let mayLoad = 1, hasSideEffects = 0 in
def rm : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr,
@@ -1309,16 +1282,17 @@ multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
(ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
- [(set _.RC:$dst, (X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
- (_.VT (bitconvert (_.LdFrag addr:$src2)))))]>,
+ [(set _.RC:$dst, (vselect _.KRCWM:$mask,
+ (_.VT (bitconvert (_.LdFrag addr:$src2))),
+ (_.VT _.RC:$src1)))]>,
EVEX_4V, EVEX_K, EVEX_CD8<_.EltSize, CD8VF>;
+ let mayLoad = 1, hasSideEffects = 0 in
def rmkz : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src1, $src2}"),
[]>, EVEX_4V, EVEX_KZ, EVEX_CD8<_.EltSize, CD8VF>;
}
- }
}
multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
@@ -1327,10 +1301,12 @@ multiclass avx512_blendmask_rmb<bits<8> opc, string OpcodeStr, X86VectorVTInfo _
!strconcat(OpcodeStr,
"\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
"$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
- [(set _.RC:$dst,(X86select _.KRCWM:$mask, (_.VT _.RC:$src1),
- (X86VBroadcast (_.ScalarLdFrag addr:$src2))))]>,
+ [(set _.RC:$dst,(vselect _.KRCWM:$mask,
+ (X86VBroadcast (_.ScalarLdFrag addr:$src2)),
+ (_.VT _.RC:$src1)))]>,
EVEX_4V, EVEX_K, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
+ let mayLoad = 1, hasSideEffects = 0 in
def rmb : AVX5128I<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2),
!strconcat(OpcodeStr,
@@ -1373,7 +1349,7 @@ defm VPBLENDMB : blendmask_bw <0x66, "vpblendmb", avx512vl_i8_info>;
defm VPBLENDMW : blendmask_bw <0x66, "vpblendmw", avx512vl_i16_info>, VEX_W;
-let Predicates = [HasAVX512] in {
+let Predicates = [HasAVX512, NoVLX] in {
def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
(v8f32 VR256X:$src2))),
(EXTRACT_SUBREG
@@ -1404,15 +1380,14 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeRnd>
(OpNode (_.VT _.RC:$src1),
(_.VT _.RC:$src2),
imm:$cc)>, EVEX_4V;
- let mayLoad = 1 in
- defm rm_Int : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
- (outs _.KRC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
- "vcmp${cc}"#_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
- imm:$cc)>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
+ defm rm_Int : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
+ (outs _.KRC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
+ "vcmp${cc}"#_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
+ imm:$cc)>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
defm rrb_Int : AVX512_maskable_cmp<0xC2, MRMSrcReg, _,
(outs _.KRC:$dst),
@@ -1432,7 +1407,7 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeRnd>
"$cc, $src2, $src1", "$src1, $src2, $cc">, EVEX_4V;
defm rmi_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcMem, _,
(outs _.KRC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$cc),
"vcmp"#_.Suffix,
"$cc, $src2, $src1", "$src1, $src2, $cc">,
EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
@@ -1454,16 +1429,15 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeRnd>
_.FRC:$src2,
imm:$cc))],
IIC_SSE_ALU_F32S_RR>, EVEX_4V;
- let mayLoad = 1 in
- def rm : AVX512Ii8<0xC2, MRMSrcMem,
- (outs _.KRC:$dst),
- (ins _.FRC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
- !strconcat("vcmp${cc}", _.Suffix,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set _.KRC:$dst, (OpNode _.FRC:$src1,
- (_.ScalarLdFrag addr:$src2),
- imm:$cc))],
- IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
+ def rm : AVX512Ii8<0xC2, MRMSrcMem,
+ (outs _.KRC:$dst),
+ (ins _.FRC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
+ !strconcat("vcmp${cc}", _.Suffix,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set _.KRC:$dst, (OpNode _.FRC:$src1,
+ (_.ScalarLdFrag addr:$src2),
+ imm:$cc))],
+ IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_CD8<_.EltSize, CD8VT1>;
}
}
@@ -1481,7 +1455,6 @@ multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
- let mayLoad = 1 in
def rm : AVX512BI<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -1495,7 +1468,6 @@ multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
[(set _.KRC:$dst, (and _.KRCWM:$mask,
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
- let mayLoad = 1 in
def rmk : AVX512BI<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
@@ -1510,7 +1482,6 @@ multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> :
avx512_icmp_packed<opc, OpcodeStr, OpNode, _> {
- let mayLoad = 1 in {
def rmb : AVX512BI<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2),
!strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst",
@@ -1529,7 +1500,6 @@ multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
(X86VBroadcast
(_.ScalarLdFrag addr:$src2)))))],
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
- }
}
multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -1612,7 +1582,6 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
imm:$cc))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
- let mayLoad = 1 in
def rmi : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVX512ICC:$cc),
!strconcat("vpcmp${cc}", Suffix,
@@ -1631,7 +1600,6 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
imm:$cc)))],
IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
- let mayLoad = 1 in
def rmik : AVX512AIi8<opc, MRMSrcMem,
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
AVX512ICC:$cc),
@@ -1774,25 +1742,23 @@ multiclass avx512_vcmp_common<X86VectorVTInfo _> {
(_.VT _.RC:$src2),
imm:$cc)>;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
- (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
- "vcmp${cc}"#_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (X86cmpm (_.VT _.RC:$src1),
- (_.VT (bitconvert (_.LdFrag addr:$src2))),
- imm:$cc)>;
-
- defm rmbi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
- (outs _.KRC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
- "vcmp${cc}"#_.Suffix,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (X86cmpm (_.VT _.RC:$src1),
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
- imm:$cc)>,EVEX_B;
- }
+ defm rmi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
+ (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
+ "vcmp${cc}"#_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (X86cmpm (_.VT _.RC:$src1),
+ (_.VT (bitconvert (_.LdFrag addr:$src2))),
+ imm:$cc)>;
+
+ defm rmbi : AVX512_maskable_cmp<0xC2, MRMSrcMem, _,
+ (outs _.KRC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, AVXCC:$cc),
+ "vcmp${cc}"#_.Suffix,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (X86cmpm (_.VT _.RC:$src1),
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
+ imm:$cc)>,EVEX_B;
// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1, hasSideEffects = 0 in {
defm rri_alt : AVX512_maskable_cmp_alt<0xC2, MRMSrcReg, _,
@@ -1888,10 +1854,10 @@ multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.KRCWM:$mask, _.RC:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix#
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- [(set _.KRC:$dst,(or _.KRCWM:$mask,
+ [(set _.KRC:$dst,(or _.KRCWM:$mask,
(OpNode (_.VT _.RC:$src1),
(i32 imm:$src2))))], NoItinerary>, EVEX_K;
- let mayLoad = 1, AddedComplexity = 20 in {
+ let AddedComplexity = 20 in {
def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
(ins _.MemOp:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix##
@@ -1903,7 +1869,7 @@ multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix##
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- [(set _.KRC:$dst,(or _.KRCWM:$mask,
+ [(set _.KRC:$dst,(or _.KRCWM:$mask,
(OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
(i32 imm:$src2))))], NoItinerary>, EVEX_K;
}
@@ -1924,51 +1890,49 @@ multiclass avx512_vector_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.KRCWM:$mask, _.RC:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix#
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- [(set _.KRC:$dst,(or _.KRCWM:$mask,
+ [(set _.KRC:$dst,(or _.KRCWM:$mask,
(OpNode (_.VT _.RC:$src1),
(i32 imm:$src2))))], NoItinerary>, EVEX_K;
- let mayLoad = 1 in {
- def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
- (ins _.MemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix##mem#
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set _.KRC:$dst,(OpNode
- (_.VT (bitconvert (_.LdFrag addr:$src1))),
- (i32 imm:$src2)))], NoItinerary>;
- def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
- (ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix##mem#
- "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- [(set _.KRC:$dst, (or _.KRCWM:$mask, (OpNode
- (_.VT (bitconvert (_.LdFrag addr:$src1))),
- (i32 imm:$src2))))], NoItinerary>, EVEX_K;
- def rmb : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
- (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
- _.BroadcastStr##", $dst|$dst, ${src1}"
- ##_.BroadcastStr##", $src2}",
- [(set _.KRC:$dst,(OpNode
- (_.VT (X86VBroadcast
- (_.ScalarLdFrag addr:$src1))),
- (i32 imm:$src2)))], NoItinerary>,EVEX_B;
- def rmbk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
- (ins _.KRCWM:$mask, _.ScalarMemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
- _.BroadcastStr##", $dst {${mask}}|$dst {${mask}}, ${src1}"##
- _.BroadcastStr##", $src2}",
- [(set _.KRC:$dst,(or _.KRCWM:$mask, (OpNode
- (_.VT (X86VBroadcast
- (_.ScalarLdFrag addr:$src1))),
- (i32 imm:$src2))))], NoItinerary>,
- EVEX_B, EVEX_K;
- }
+ def rm : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
+ (ins _.MemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix##mem#
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set _.KRC:$dst,(OpNode
+ (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (i32 imm:$src2)))], NoItinerary>;
+ def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
+ (ins _.KRCWM:$mask, _.MemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix##mem#
+ "\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
+ [(set _.KRC:$dst, (or _.KRCWM:$mask, (OpNode
+ (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (i32 imm:$src2))))], NoItinerary>, EVEX_K;
+ def rmb : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
+ (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
+ _.BroadcastStr##", $dst|$dst, ${src1}"
+ ##_.BroadcastStr##", $src2}",
+ [(set _.KRC:$dst,(OpNode
+ (_.VT (X86VBroadcast
+ (_.ScalarLdFrag addr:$src1))),
+ (i32 imm:$src2)))], NoItinerary>,EVEX_B;
+ def rmbk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
+ (ins _.KRCWM:$mask, _.ScalarMemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix##broadcast##"\t{$src2, ${src1}"##
+ _.BroadcastStr##", $dst {${mask}}|$dst {${mask}}, ${src1}"##
+ _.BroadcastStr##", $src2}",
+ [(set _.KRC:$dst,(or _.KRCWM:$mask, (OpNode
+ (_.VT (X86VBroadcast
+ (_.ScalarLdFrag addr:$src1))),
+ (i32 imm:$src2))))], NoItinerary>,
+ EVEX_B, EVEX_K;
}
multiclass avx512_vector_fpclass_all<string OpcodeStr,
- AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd,
+ AVX512VLVectorVTInfo _, bits<8> opc, SDNode OpNode, Predicate prd,
string broadcast>{
let Predicates = [prd] in {
- defm Z : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info512, "{z}",
+ defm Z : avx512_vector_fpclass<opc, OpcodeStr, OpNode, _.info512, "{z}",
broadcast>, EVEX_V512;
}
let Predicates = [prd, HasVLX] in {
@@ -1981,9 +1945,9 @@ multiclass avx512_vector_fpclass_all<string OpcodeStr,
multiclass avx512_fp_fpclass_all<string OpcodeStr, bits<8> opcVec,
bits<8> opcScalar, SDNode VecOpNode, SDNode ScalarOpNode, Predicate prd>{
- defm PS : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f32_info, opcVec,
+ defm PS : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f32_info, opcVec,
VecOpNode, prd, "{l}">, EVEX_CD8<32, CD8VF>;
- defm PD : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f64_info, opcVec,
+ defm PD : avx512_vector_fpclass_all<OpcodeStr, avx512vl_f64_info, opcVec,
VecOpNode, prd, "{q}">,EVEX_CD8<64, CD8VF> , VEX_W;
defm SS : avx512_scalar_fpclass<opcScalar, OpcodeStr, ScalarOpNode,
f32x_info, prd>, EVEX_CD8<32, CD8VT1>;
@@ -2003,18 +1967,15 @@ defm VFPCLASS : avx512_fp_fpclass_all<"vfpclass", 0x66, 0x67, X86Vfpclass,
multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
string OpcodeStr, RegisterClass KRC,
ValueType vvt, X86MemOperand x86memop> {
- let hasSideEffects = 0 in {
- def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
- let mayLoad = 1 in
- def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set KRC:$dst, (vvt (load addr:$src)))]>;
- let mayStore = 1 in
- def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(store KRC:$src, addr:$dst)]>;
- }
+ let hasSideEffects = 0 in
+ def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+ def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set KRC:$dst, (vvt (load addr:$src)))]>;
+ def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(store KRC:$src, addr:$dst)]>;
}
multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
@@ -2043,9 +2004,6 @@ let Predicates = [HasBWI] in {
VEX, PD, VEX_W;
defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
VEX, XD;
-}
-
-let Predicates = [HasBWI] in {
defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
VEX, PS, VEX_W;
defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
@@ -2058,12 +2016,20 @@ let Predicates = [HasDQI] in {
(KMOVBkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit))>;
def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
(EXTRACT_SUBREG (KMOVBrk VK8:$src), sub_8bit)>;
+ def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))),
+ (KMOVBrk VK8:$src)>;
+ def : Pat<(i32 (anyext (i8 (bitconvert (v8i1 VK8:$src))))),
+ (KMOVBrk VK8:$src)>;
}
let Predicates = [HasAVX512] in {
def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
(KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
(EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
+ def : Pat<(i32 (zext (i16 (bitconvert (v16i1 VK16:$src))))),
+ (KMOVWrk VK16:$src)>;
+ def : Pat<(i32 (anyext (i16 (bitconvert (v16i1 VK16:$src))))),
+ (KMOVWrk VK16:$src)>;
}
let Predicates = [HasBWI] in {
def : Pat<(v32i1 (bitconvert (i32 GR32:$src))), (KMOVDkr GR32:$src)>;
@@ -2085,20 +2051,45 @@ let Predicates = [HasDQI] in {
(KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK4:$src, VK8))>;
def : Pat<(store VK2:$src, addr:$dst),
(KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK2:$src, VK8))>;
+ def : Pat<(store VK1:$src, addr:$dst),
+ (KMOVBmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK8))>;
+
+ def : Pat<(v2i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (KMOVBkm addr:$src), VK2)>;
+ def : Pat<(v4i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (KMOVBkm addr:$src), VK4)>;
}
let Predicates = [HasAVX512, NoDQI] in {
- def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
- (KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
- def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
- (COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
+ def : Pat<(store VK1:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)),
+ sub_8bit))>;
+ def : Pat<(store VK2:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK2:$src, VK16)),
+ sub_8bit))>;
+ def : Pat<(store VK4:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK4:$src, VK16)),
+ sub_8bit))>;
+ def : Pat<(store VK8:$src, addr:$dst),
+ (MOV8mr addr:$dst,
+ (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
+ sub_8bit))>;
+
+ def : Pat<(v8i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK8)>;
+ def : Pat<(v2i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK2)>;
+ def : Pat<(v4i1 (load addr:$src)),
+ (COPY_TO_REGCLASS (MOVZX32rm8 addr:$src), VK4)>;
}
+
let Predicates = [HasAVX512] in {
def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
(KMOVWmk addr:$dst, VK16:$src)>;
def : Pat<(i1 (load addr:$src)),
- (COPY_TO_REGCLASS (AND16ri (i16 (SUBREG_TO_REG (i32 0),
- (MOV8rm addr:$src), sub_8bit)),
- (i16 1)), VK1)>;
+ (COPY_TO_REGCLASS (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), VK1)>;
def : Pat<(v16i1 (bitconvert (i16 (load addr:$src)))),
(KMOVWkm addr:$src)>;
}
@@ -2107,51 +2098,71 @@ let Predicates = [HasBWI] in {
(KMOVDmk addr:$dst, VK32:$src)>;
def : Pat<(v32i1 (bitconvert (i32 (load addr:$src)))),
(KMOVDkm addr:$src)>;
-}
-let Predicates = [HasBWI] in {
def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
(KMOVQmk addr:$dst, VK64:$src)>;
def : Pat<(v64i1 (bitconvert (i64 (load addr:$src)))),
(KMOVQkm addr:$src)>;
}
+def assertzext_i1 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+ return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i1;
+}]>;
+
let Predicates = [HasAVX512] in {
def : Pat<(i1 (trunc (i64 GR64:$src))),
- (COPY_TO_REGCLASS (KMOVWkr (AND32ri (EXTRACT_SUBREG $src, sub_32bit),
- (i32 1))), VK1)>;
+ (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG (AND64ri8 $src, (i64 1)),
+ sub_16bit)), VK1)>;
+
+ def : Pat<(i1 (trunc (i64 (assertzext_i1 GR64:$src)))),
+ (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG $src, sub_16bit)), VK1)>;
def : Pat<(i1 (trunc (i32 GR32:$src))),
- (COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
+ (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG (AND32ri8 $src, (i32 1)),
+ sub_16bit)), VK1)>;
+
+ def : Pat<(i1 (trunc (i32 (assertzext_i1 GR32:$src)))),
+ (COPY_TO_REGCLASS (i16 (EXTRACT_SUBREG $src, sub_16bit)), VK1)>;
def : Pat<(i1 (trunc (i8 GR8:$src))),
- (COPY_TO_REGCLASS
- (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
- VK1)>;
+ (COPY_TO_REGCLASS (i16 (SUBREG_TO_REG (i64 0), (AND8ri8 $src, (i8 1)),
+ sub_8bit)), VK1)>;
+
+ def : Pat<(i1 (trunc (i8 (assertzext_i1 GR8:$src)))),
+ (COPY_TO_REGCLASS (i16 (SUBREG_TO_REG (i64 0), $src, sub_8bit)), VK1)>;
+
def : Pat<(i1 (trunc (i16 GR16:$src))),
- (COPY_TO_REGCLASS
- (KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
- VK1)>;
+ (COPY_TO_REGCLASS (AND16ri GR16:$src, (i16 1)), VK1)>;
+
+ def : Pat<(i1 (trunc (i16 (assertzext_i1 GR16:$src)))),
+ (COPY_TO_REGCLASS $src, VK1)>;
def : Pat<(i32 (zext VK1:$src)),
- (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
+ (i32 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS $src, GR16)),
+ sub_16bit))>;
+
def : Pat<(i32 (anyext VK1:$src)),
- (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16))>;
+ (i32 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS $src, GR16)),
+ sub_16bit))>;
def : Pat<(i8 (zext VK1:$src)),
- (EXTRACT_SUBREG
- (AND32ri (KMOVWrk
- (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
+ (i8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS VK1:$src, GR16)), sub_8bit))>;
+
def : Pat<(i8 (anyext VK1:$src)),
- (EXTRACT_SUBREG
- (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_8bit)>;
+ (i8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS $src, GR16)), sub_8bit))>;
def : Pat<(i64 (zext VK1:$src)),
- (AND64ri8 (SUBREG_TO_REG (i64 0),
- (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
+ (i64 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS $src, GR16)),
+ sub_16bit))>;
+
+ def : Pat<(i64 (anyext VK1:$src)),
+ (i64 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS $src, GR16)),
+ sub_16bit))>;
+
def : Pat<(i16 (zext VK1:$src)),
- (EXTRACT_SUBREG
- (AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
- sub_16bit)>;
+ (COPY_TO_REGCLASS $src, GR16)>;
+
+ def : Pat<(i16 (anyext VK1:$src)),
+ (i16 (COPY_TO_REGCLASS $src, GR16))>;
}
def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
(COPY_TO_REGCLASS VK1:$src, VK16)>;
@@ -2166,17 +2177,24 @@ def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
(COPY_TO_REGCLASS VK1:$src, VK64)>;
+def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
+def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
+def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
let Predicates = [HasAVX512, NoDQI] in {
// GR from/to 8-bit mask without native support
def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
(COPY_TO_REGCLASS
- (KMOVWkr (MOVZX32rr8 GR8 :$src)), VK8)>;
+ (KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)), VK8)>;
def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
(EXTRACT_SUBREG
(KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
sub_8bit)>;
+ def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))),
+ (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16))>;
+ def : Pat<(i32 (anyext (i8 (bitconvert (v8i1 VK8:$src))))),
+ (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16))>;
}
let Predicates = [HasAVX512] in {
@@ -2419,7 +2437,6 @@ multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
let Predicates = [HasBWI] in {
defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
VEX, TAPD, VEX_W;
- let Predicates = [HasDQI] in
defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
VEX, TAPD;
}
@@ -2456,82 +2473,61 @@ let Predicates = [HasAVX512] in {
def : Pat<(i1 1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
}
-def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
-def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
- (v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
+// Patterns for kmask insert_subvector/extract_subvector to/from index=0
+multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT,
+ RegisterClass RC, ValueType VT> {
+ def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))),
+ (subVT (COPY_TO_REGCLASS RC:$src, subRC))>;
-def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
- (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
+ def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))),
+ (VT (COPY_TO_REGCLASS subRC:$src, RC))>;
+}
-def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))),
- (v16i1 (COPY_TO_REGCLASS VK32:$src, VK16))>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK4, v4i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK8, v8i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK2, v2i1, VK64, v64i1>;
-def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
- (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK8, v8i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK4, v4i1, VK64, v64i1>;
-def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK64:$src, VK32))>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK16, v16i1>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK8, v8i1, VK64, v64i1>;
-def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
- (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
+defm : operation_subvector_mask_lowering<VK16, v16i1, VK32, v32i1>;
+defm : operation_subvector_mask_lowering<VK16, v16i1, VK64, v64i1>;
-def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
- (v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>;
-
-def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
- (v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>;
-
-def : Pat<(v4i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
- (v4i1 (COPY_TO_REGCLASS VK2:$src, VK4))>;
-
-def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>;
-def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
- (v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>;
-
-def : Pat<(v32i1 (insert_subvector undef, VK2:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK2:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK4:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK4:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK8:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK8:$src, VK32))>;
-def : Pat<(v32i1 (insert_subvector undef, VK16:$src, (iPTR 0))),
- (v32i1 (COPY_TO_REGCLASS VK16:$src, VK32))>;
-
-def : Pat<(v64i1 (insert_subvector undef, VK2:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK2:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK4:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK4:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK8:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK8:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK16:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK16:$src, VK64))>;
-def : Pat<(v64i1 (insert_subvector undef, VK32:$src, (iPTR 0))),
- (v64i1 (COPY_TO_REGCLASS VK32:$src, VK64))>;
+defm : operation_subvector_mask_lowering<VK32, v32i1, VK64, v64i1>;
+def : Pat<(v2i1 (extract_subvector (v4i1 VK4:$src), (iPTR 2))),
+ (v2i1 (COPY_TO_REGCLASS
+ (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16), (i8 2)),
+ VK2))>;
+def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 4))),
+ (v4i1 (COPY_TO_REGCLASS
+ (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (i8 4)),
+ VK4))>;
+def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
+ (v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
+def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
+ (v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
+def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
+ (v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
(v8i1 (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16),
(I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
-def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
- (v8i1 (COPY_TO_REGCLASS
- (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16),
- (I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
-
def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))),
(v4i1 (COPY_TO_REGCLASS
(KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16),
(I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
-
-def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))),
- (v4i1 (COPY_TO_REGCLASS
- (KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16),
- (I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
-
//===----------------------------------------------------------------------===//
// AVX-512 - Aligned and unaligned load and store
//
@@ -2539,7 +2535,8 @@ def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))),
multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
PatFrag ld_frag, PatFrag mload,
- bit IsReMaterializable = 1> {
+ bit IsReMaterializable = 1,
+ SDPatternOperator SelectOprr = vselect> {
let hasSideEffects = 0 in {
def rr : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
@@ -2547,7 +2544,10 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
def rrkz : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
- "${dst} {${mask}} {z}, $src}"), [], _.ExeDomain>,
+ "${dst} {${mask}} {z}, $src}"),
+ [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
+ (_.VT _.RC:$src),
+ _.ImmAllZerosV)))], _.ExeDomain>,
EVEX, EVEX_KZ;
let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable,
@@ -2562,11 +2562,11 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1),
!strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
"${dst} {${mask}}, $src1}"),
- [(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
+ [(set _.RC:$dst, (_.VT (SelectOprr _.KRCWM:$mask,
(_.VT _.RC:$src1),
(_.VT _.RC:$src0))))], _.ExeDomain>,
EVEX, EVEX_K;
- let mayLoad = 1, SchedRW = [WriteLoad] in
+ let SchedRW = [WriteLoad] in
def rmk : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src0, _.KRCWM:$mask, _.MemOp:$src1),
!strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
@@ -2576,7 +2576,7 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(_.VT (bitconvert (ld_frag addr:$src1))),
(_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K;
}
- let mayLoad = 1, SchedRW = [WriteLoad] in
+ let SchedRW = [WriteLoad] in
def rmkz : AVX512PI<opc, MRMSrcMem, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.MemOp:$src),
OpcodeStr #"\t{$src, ${dst} {${mask}} {z}|"#
@@ -2615,22 +2615,27 @@ multiclass avx512_alignedload_vl<bits<8> opc, string OpcodeStr,
multiclass avx512_load_vl<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _,
Predicate prd,
- bit IsReMaterializable = 1> {
+ bit IsReMaterializable = 1,
+ SDPatternOperator SelectOprr = vselect> {
let Predicates = [prd] in
defm Z : avx512_load<opc, OpcodeStr, _.info512, _.info512.LdFrag,
- masked_load_unaligned, IsReMaterializable>, EVEX_V512;
+ masked_load_unaligned, IsReMaterializable,
+ SelectOprr>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
defm Z256 : avx512_load<opc, OpcodeStr, _.info256, _.info256.LdFrag,
- masked_load_unaligned, IsReMaterializable>, EVEX_V256;
+ masked_load_unaligned, IsReMaterializable,
+ SelectOprr>, EVEX_V256;
defm Z128 : avx512_load<opc, OpcodeStr, _.info128, _.info128.LdFrag,
- masked_load_unaligned, IsReMaterializable>, EVEX_V128;
+ masked_load_unaligned, IsReMaterializable,
+ SelectOprr>, EVEX_V128;
}
}
multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
PatFrag st_frag, PatFrag mstore> {
+ let hasSideEffects = 0 in {
def rr_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), (ins _.RC:$src),
OpcodeStr # ".s\t{$src, $dst|$dst, $src}",
[], _.ExeDomain>, EVEX;
@@ -2644,8 +2649,8 @@ multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
OpcodeStr # ".s\t{$src, ${dst} {${mask}} {z}|" #
"${dst} {${mask}} {z}, $src}",
[], _.ExeDomain>, EVEX, EVEX_KZ;
+ }
- let mayStore = 1 in {
def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(st_frag (_.VT _.RC:$src), addr:$dst)], _.ExeDomain>, EVEX;
@@ -2653,7 +2658,6 @@ multiclass avx512_store<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
OpcodeStr # "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}",
[], _.ExeDomain>, EVEX, EVEX_K;
- }
def: Pat<(mstore addr:$ptr, _.KRCWM:$mask, (_.VT _.RC:$src)),
(!cast<Instruction>(NAME#_.ZSuffix##mrk) addr:$ptr,
@@ -2699,32 +2703,16 @@ defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info,
avx512_alignedstore_vl<0x29, "vmovapd", avx512vl_f64_info,
HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512>,
+defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512,
+ 1, null_frag>,
avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512>,
PS, EVEX_CD8<32, CD8VF>;
-defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512, 0>,
+defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512, 0,
+ null_frag>,
avx512_store_vl<0x11, "vmovupd", avx512vl_f64_info, HasAVX512>,
PD, VEX_W, EVEX_CD8<64, CD8VF>;
-def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
- GR16:$mask),
- (VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
- VR512:$src)>;
-def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
- GR8:$mask),
- (VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
- VR512:$src)>;
-
-def: Pat<(int_x86_avx512_mask_store_ps_512 addr:$ptr, (v16f32 VR512:$src),
- GR16:$mask),
- (VMOVAPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
- VR512:$src)>;
-def: Pat<(int_x86_avx512_mask_store_pd_512 addr:$ptr, (v8f64 VR512:$src),
- GR8:$mask),
- (VMOVAPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
- VR512:$src)>;
-
defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info,
HasAVX512>,
avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info,
@@ -2743,50 +2731,159 @@ defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI>,
avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info,
HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>;
-defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512>,
+defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512,
+ 1, null_frag>,
avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info,
HasAVX512>, XS, EVEX_CD8<32, CD8VF>;
-defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512>,
+defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512,
+ 1, null_frag>,
avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info,
HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>;
-def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
- (v16i32 immAllZerosV), GR16:$mask)),
- (VMOVDQU32Zrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
-
-def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
- (bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
- (VMOVDQU64Zrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
-
-def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
- GR16:$mask),
- (VMOVDQU32Zmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
- VR512:$src)>;
-def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
- GR8:$mask),
- (VMOVDQU64Zmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
- VR512:$src)>;
-
-let AddedComplexity = 20 in {
-def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
- (bc_v8i64 (v16i32 immAllZerosV)))),
- (VMOVDQU64Zrrkz VK8WM:$mask, VR512:$src)>;
-
def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
(v8i64 VR512:$src))),
- (VMOVDQU64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
+ (VMOVDQA64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
VK8), VR512:$src)>;
-def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
- (v16i32 immAllZerosV))),
- (VMOVDQU32Zrrkz VK16WM:$mask, VR512:$src)>;
-
def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
(v16i32 VR512:$src))),
- (VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
+ (VMOVDQA32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
+
+// These patterns exist to prevent the above patterns from introducing a second
+// mask inversion when one already exists.
+def : Pat<(v8i64 (vselect (xor VK8:$mask, (v8i1 immAllOnesV)),
+ (bc_v8i64 (v16i32 immAllZerosV)),
+ (v8i64 VR512:$src))),
+ (VMOVDQA64Zrrkz VK8:$mask, VR512:$src)>;
+def : Pat<(v16i32 (vselect (xor VK16:$mask, (v16i1 immAllOnesV)),
+ (v16i32 immAllZerosV),
+ (v16i32 VR512:$src))),
+ (VMOVDQA32Zrrkz VK16WM:$mask, VR512:$src)>;
+
+let Predicates = [HasVLX] in {
+ // Special patterns for storing subvector extracts of lower 128-bits of 256.
+ // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
+ def : Pat<(alignedstore (v2f64 (extract_subvector
+ (v4f64 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4f32 (extract_subvector
+ (v8f32 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v2i64 (extract_subvector
+ (v4i64 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4i32 (extract_subvector
+ (v8i32 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v8i16 (extract_subvector
+ (v16i16 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v16i8 (extract_subvector
+ (v32i8 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+
+ def : Pat<(store (v2f64 (extract_subvector
+ (v4f64 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v4f32 (extract_subvector
+ (v8f32 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v2i64 (extract_subvector
+ (v4i64 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v4i32 (extract_subvector
+ (v8i32 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v8i16 (extract_subvector
+ (v16i16 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+ def : Pat<(store (v16i8 (extract_subvector
+ (v32i8 VR256X:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256X:$src,sub_xmm)))>;
+
+ // Special patterns for storing subvector extracts of lower 128-bits of 512.
+ // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
+ def : Pat<(alignedstore (v2f64 (extract_subvector
+ (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4f32 (extract_subvector
+ (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v2i64 (extract_subvector
+ (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v4i32 (extract_subvector
+ (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v8i16 (extract_subvector
+ (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(alignedstore (v16i8 (extract_subvector
+ (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+
+ def : Pat<(store (v2f64 (extract_subvector
+ (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPDZ128mr addr:$dst, (v2f64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v4f32 (extract_subvector
+ (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSZ128mr addr:$dst, (v4f32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v2i64 (extract_subvector
+ (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU64Z128mr addr:$dst, (v2i64 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v4i32 (extract_subvector
+ (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v4i32 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v8i16 (extract_subvector
+ (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v8i16 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+ def : Pat<(store (v16i8 (extract_subvector
+ (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z128mr addr:$dst, (v16i8 (EXTRACT_SUBREG VR512:$src,sub_xmm)))>;
+
+ // Special patterns for storing subvector extracts of lower 256-bits of 512.
+ // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
+ def : Pat<(alignedstore (v4f64 (extract_subvector
+ (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPDZ256mr addr:$dst, (v4f64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v8f32 (extract_subvector
+ (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVAPSZ256mr addr:$dst, (v8f32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v4i64 (extract_subvector
+ (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA64Z256mr addr:$dst, (v4i64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v8i32 (extract_subvector
+ (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z256mr addr:$dst, (v8i32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v16i16 (extract_subvector
+ (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z256mr addr:$dst, (v16i16 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(alignedstore (v32i8 (extract_subvector
+ (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQA32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+
+ def : Pat<(store (v4f64 (extract_subvector
+ (v8f64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPDZ256mr addr:$dst, (v4f64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v8f32 (extract_subvector
+ (v16f32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVUPSZ256mr addr:$dst, (v8f32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v4i64 (extract_subvector
+ (v8i64 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU64Z256mr addr:$dst, (v4i64 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v8i32 (extract_subvector
+ (v16i32 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z256mr addr:$dst, (v8i32 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v16i16 (extract_subvector
+ (v32i16 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z256mr addr:$dst, (v16i16 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
+ def : Pat<(store (v32i8 (extract_subvector
+ (v64i8 VR512:$src), (iPTR 0))), addr:$dst),
+ (VMOVDQU32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
}
+
// Move Int Doubleword to Packed Double Int
//
def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
@@ -2910,45 +3007,43 @@ def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
// AVX-512 MOVSS, MOVSD
//===----------------------------------------------------------------------===//
-multiclass avx512_move_scalar <string asm, SDNode OpNode,
+multiclass avx512_move_scalar <string asm, SDNode OpNode,
X86VectorVTInfo _> {
- defm rr_Int : AVX512_maskable_scalar<0x10, MRMSrcReg, _, (outs _.RC:$dst),
+ defm rr_Int : AVX512_maskable_scalar<0x10, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2),
- asm, "$src2, $src1","$src1, $src2",
+ asm, "$src2, $src1","$src1, $src2",
(_.VT (OpNode (_.VT _.RC:$src1),
(_.VT _.RC:$src2))),
IIC_SSE_MOV_S_RR>, EVEX_4V;
- let Constraints = "$src1 = $dst" , mayLoad = 1 in
+ let Constraints = "$src1 = $dst" in
defm rm_Int : AVX512_maskable_3src_scalar<0x10, MRMSrcMem, _,
- (outs _.RC:$dst),
+ (outs _.RC:$dst),
(ins _.ScalarMemOp:$src),
asm,"$src","$src",
- (_.VT (OpNode (_.VT _.RC:$src1),
- (_.VT (scalar_to_vector
+ (_.VT (OpNode (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector
(_.ScalarLdFrag addr:$src)))))>, EVEX;
let isCodeGenOnly = 1 in {
- def rr : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
+ def rr : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
(ins _.RC:$src1, _.FRC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst, (_.VT (OpNode _.RC:$src1,
(scalar_to_vector _.FRC:$src2))))],
_.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V;
- let mayLoad = 1 in
def rm : AVX512PI<0x10, MRMSrcMem, (outs _.FRC:$dst), (ins _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set _.FRC:$dst, (_.ScalarLdFrag addr:$src))],
_.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX;
}
- let mayStore = 1 in {
- def mr: AVX512PI<0x11, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.FRC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(store _.FRC:$src, addr:$dst)], _.ExeDomain, IIC_SSE_MOV_S_MR>,
- EVEX;
- def mrk: AVX512PI<0x11, MRMDestMem, (outs),
- (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC:$src),
- !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
- [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K;
- } // mayStore
+ def mr: AVX512PI<0x11, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.FRC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(store _.FRC:$src, addr:$dst)], _.ExeDomain, IIC_SSE_MOV_S_MR>,
+ EVEX;
+ let mayStore = 1 in
+ def mrk: AVX512PI<0x11, MRMDestMem, (outs),
+ (ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC:$src),
+ !strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
+ [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K;
}
defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, f32x_info>,
@@ -2957,11 +3052,11 @@ defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, f32x_info>,
defm VMOVSDZ : avx512_move_scalar<"vmovsd", X86Movsd, f64x_info>,
VEX_LIG, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
-def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
+def : Pat<(f32 (X86selects VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
(COPY_TO_REGCLASS (VMOVSSZrr_Intk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
VK1WM:$mask, (v4f32 (IMPLICIT_DEF)),(COPY_TO_REGCLASS FR32X:$src1, VR128X)), FR32X)>;
-def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
+def : Pat<(f64 (X86selects VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
(COPY_TO_REGCLASS (VMOVSDZrr_Intk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
VK1WM:$mask, (v2f64 (IMPLICIT_DEF)), (COPY_TO_REGCLASS FR64X:$src1, VR128X)), FR64X)>;
@@ -2969,11 +3064,13 @@ def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
(VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS GR8:$mask, VK1WM)),
(COPY_TO_REGCLASS VR128X:$src, FR32X))>;
+let hasSideEffects = 0 in
defm VMOVSSZrr_REV : AVX512_maskable_in_asm<0x11, MRMDestReg, f32x_info,
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2),
"vmovss.s", "$src2, $src1", "$src1, $src2", []>,
XS, EVEX_4V, VEX_LIG;
+let hasSideEffects = 0 in
defm VMOVSSDrr_REV : AVX512_maskable_in_asm<0x11, MRMDestReg, f64x_info,
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2),
"vmovsd.s", "$src2, $src1", "$src1, $src2", []>,
@@ -3037,6 +3134,22 @@ let Predicates = [HasAVX512] in {
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
(v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
+ def : Pat<(v4f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
+
+ // Represent the same patterns above but in the form they appear for
+ // 512-bit types
+ def : Pat<(v16i32 (X86vzmovl (insert_subvector undef,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
+ def : Pat<(v16f32 (X86vzmovl (insert_subvector undef,
+ (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
+ def : Pat<(v8f64 (X86vzmovl (insert_subvector undef,
+ (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
+ def : Pat<(v8f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
}
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
(v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
@@ -3064,9 +3177,6 @@ let Predicates = [HasAVX512] in {
def : Pat<(store (f32 (extractelt (v4f32 VR128X:$src), (iPTR 0))),
addr:$dst),
(VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
- def : Pat<(store (f64 (extractelt (v2f64 VR128X:$src), (iPTR 0))),
- addr:$dst),
- (VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
// Shuffle with VMOVSS
def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
@@ -3138,14 +3248,21 @@ def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
EVEX_CD8<8, CD8VT8>;
let Predicates = [HasAVX512] in {
+ let AddedComplexity = 15 in {
+ def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
+ (VMOVDI2PDIZrr GR32:$src)>;
+
+ def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
+ (VMOV64toPQIZrr GR64:$src)>;
+
+ def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
+ (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
+ (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
+ }
// AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
let AddedComplexity = 20 in {
def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
(VMOVDI2PDIZrm addr:$src)>;
- def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
- (VMOV64toPQIZrr GR64:$src)>;
- def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
- (VMOVDI2PDIZrr GR32:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
(VMOVDI2PDIZrm addr:$src)>;
@@ -3157,15 +3274,18 @@ let Predicates = [HasAVX512] in {
(VMOVZPQILo2PQIZrr VR128X:$src)>;
def : Pat<(v2i64 (X86vzload addr:$src)),
(VMOVZPQILo2PQIZrm addr:$src)>;
+ def : Pat<(v4i64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (VMOVZPQILo2PQIZrm addr:$src), sub_xmm)>;
}
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
(v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
- def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
- (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
- (SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
+
+ // Use regular 128-bit instructions to match 512-bit scalar_to_vec+zext.
+ def : Pat<(v8i64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (VMOVZPQILo2PQIZrm addr:$src), sub_xmm)>;
}
def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
@@ -3190,66 +3310,112 @@ let SchedRW = [WriteLoad] in {
SSEPackedInt>, EVEX, T8PD, EVEX_V512,
EVEX_CD8<64, CD8VF>;
- let Predicates = [HasAVX512, HasVLX] in {
+ let Predicates = [HasVLX] in {
def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
- (ins i256mem:$src),
- "vmovntdqa\t{$src, $dst|$dst, $src}", [],
- SSEPackedInt>, EVEX, T8PD, EVEX_V256,
- EVEX_CD8<64, CD8VF>;
+ (ins i256mem:$src),
+ "vmovntdqa\t{$src, $dst|$dst, $src}",
+ [(set VR256X:$dst, (int_x86_avx2_movntdqa addr:$src))],
+ SSEPackedInt>, EVEX, T8PD, EVEX_V256,
+ EVEX_CD8<64, CD8VF>;
def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
- (ins i128mem:$src),
- "vmovntdqa\t{$src, $dst|$dst, $src}", [],
- SSEPackedInt>, EVEX, T8PD, EVEX_V128,
- EVEX_CD8<64, CD8VF>;
+ (ins i128mem:$src),
+ "vmovntdqa\t{$src, $dst|$dst, $src}",
+ [(set VR128X:$dst, (int_x86_sse41_movntdqa addr:$src))],
+ SSEPackedInt>, EVEX, T8PD, EVEX_V128,
+ EVEX_CD8<64, CD8VF>;
}
}
-multiclass avx512_movnt<bits<8> opc, string OpcodeStr, PatFrag st_frag,
- ValueType OpVT, RegisterClass RC, X86MemOperand memop,
- Domain d, InstrItinClass itin = IIC_SSE_MOVNT> {
- let SchedRW = [WriteStore], mayStore = 1,
- AddedComplexity = 400 in
- def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
+multiclass avx512_movnt<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
+ PatFrag st_frag = alignednontemporalstore,
+ InstrItinClass itin = IIC_SSE_MOVNT> {
+ let SchedRW = [WriteStore], AddedComplexity = 400 in
+ def mr : AVX512PI<opc, MRMDestMem, (outs), (ins _.MemOp:$dst, _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(st_frag (OpVT RC:$src), addr:$dst)], d, itin>, EVEX;
+ [(st_frag (_.VT _.RC:$src), addr:$dst)],
+ _.ExeDomain, itin>, EVEX, EVEX_CD8<_.EltSize, CD8VF>;
}
-multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr, PatFrag st_frag,
- string elty, string elsz, string vsz512,
- string vsz256, string vsz128, Domain d,
- Predicate prd, InstrItinClass itin = IIC_SSE_MOVNT> {
- let Predicates = [prd] in
- defm Z : avx512_movnt<opc, OpcodeStr, st_frag,
- !cast<ValueType>("v"##vsz512##elty##elsz), VR512,
- !cast<X86MemOperand>(elty##"512mem"), d, itin>,
- EVEX_V512;
-
- let Predicates = [prd, HasVLX] in {
- defm Z256 : avx512_movnt<opc, OpcodeStr, st_frag,
- !cast<ValueType>("v"##vsz256##elty##elsz), VR256X,
- !cast<X86MemOperand>(elty##"256mem"), d, itin>,
- EVEX_V256;
+multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr,
+ AVX512VLVectorVTInfo VTInfo> {
+ let Predicates = [HasAVX512] in
+ defm Z : avx512_movnt<opc, OpcodeStr, VTInfo.info512>, EVEX_V512;
- defm Z128 : avx512_movnt<opc, OpcodeStr, st_frag,
- !cast<ValueType>("v"##vsz128##elty##elsz), VR128X,
- !cast<X86MemOperand>(elty##"128mem"), d, itin>,
- EVEX_V128;
- }
+ let Predicates = [HasAVX512, HasVLX] in {
+ defm Z256 : avx512_movnt<opc, OpcodeStr, VTInfo.info256>, EVEX_V256;
+ defm Z128 : avx512_movnt<opc, OpcodeStr, VTInfo.info128>, EVEX_V128;
+ }
+}
+
+defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", avx512vl_i64_info>, PD;
+defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", avx512vl_f64_info>, PD, VEX_W;
+defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", avx512vl_f32_info>, PS;
+
+let Predicates = [HasAVX512], AddedComplexity = 400 in {
+ def : Pat<(alignednontemporalstore (v16i32 VR512:$src), addr:$dst),
+ (VMOVNTDQZmr addr:$dst, VR512:$src)>;
+ def : Pat<(alignednontemporalstore (v32i16 VR512:$src), addr:$dst),
+ (VMOVNTDQZmr addr:$dst, VR512:$src)>;
+ def : Pat<(alignednontemporalstore (v64i8 VR512:$src), addr:$dst),
+ (VMOVNTDQZmr addr:$dst, VR512:$src)>;
+
+ def : Pat<(v8f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v16f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v8i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v16i32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v32i16 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+ def : Pat<(v64i8 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZrm addr:$src)>;
+}
+
+let Predicates = [HasVLX], AddedComplexity = 400 in {
+ def : Pat<(alignednontemporalstore (v8i32 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+ def : Pat<(alignednontemporalstore (v16i16 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+ def : Pat<(alignednontemporalstore (v32i8 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+
+ def : Pat<(v4f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v8f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v4i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v8i32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v16i16 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+ def : Pat<(v32i8 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ256rm addr:$src)>;
+
+ def : Pat<(alignednontemporalstore (v4i32 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+ def : Pat<(alignednontemporalstore (v8i16 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+ def : Pat<(alignednontemporalstore (v16i8 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+
+ def : Pat<(v2f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v4f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v2i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v4i32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v8i16 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
+ def : Pat<(v16i8 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAZ128rm addr:$src)>;
}
-defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", alignednontemporalstore,
- "i", "64", "8", "4", "2", SSEPackedInt,
- HasAVX512>, PD, EVEX_CD8<64, CD8VF>;
-
-defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", alignednontemporalstore,
- "f", "64", "8", "4", "2", SSEPackedDouble,
- HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
-
-defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", alignednontemporalstore,
- "f", "32", "16", "8", "4", SSEPackedSingle,
- HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
-
//===----------------------------------------------------------------------===//
// AVX-512 - Integer arithmetic
//
@@ -3263,30 +3429,28 @@ multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
itins.rr, IsCommutable>,
AVX512BIBase, EVEX_4V;
- let mayLoad = 1 in
- defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (_.VT (OpNode _.RC:$src1,
- (bitconvert (_.LdFrag addr:$src2)))),
- itins.rm>,
- AVX512BIBase, EVEX_4V;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_.VT (OpNode _.RC:$src1,
+ (bitconvert (_.LdFrag addr:$src2)))),
+ itins.rm>,
+ AVX512BIBase, EVEX_4V;
}
multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _, OpndItins itins,
bit IsCommutable = 0> :
avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
- let mayLoad = 1 in
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (_.VT (OpNode _.RC:$src1,
- (X86VBroadcast
- (_.ScalarLdFrag addr:$src2)))),
- itins.rm>,
- AVX512BIBase, EVEX_4V, EVEX_B;
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (_.VT (OpNode _.RC:$src1,
+ (X86VBroadcast
+ (_.ScalarLdFrag addr:$src2)))),
+ itins.rm>,
+ AVX512BIBase, EVEX_4V, EVEX_B;
}
multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -3380,7 +3544,8 @@ multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins,
SDNode OpNode,X86VectorVTInfo _Src,
- X86VectorVTInfo _Dst, bit IsCommutable = 0> {
+ X86VectorVTInfo _Dst, X86VectorVTInfo _Brdct,
+ bit IsCommutable = 0> {
defm rr : AVX512_maskable<opc, MRMSrcReg, _Dst, (outs _Dst.RC:$dst),
(ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
"$src2, $src1","$src1, $src2",
@@ -3389,26 +3554,24 @@ multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, OpndItins itins,
(_Src.VT _Src.RC:$src2))),
itins.rr, IsCommutable>,
AVX512BIBase, EVEX_4V;
- let mayLoad = 1 in {
- defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
- (bitconvert (_Src.LdFrag addr:$src2)))),
- itins.rm>,
- AVX512BIBase, EVEX_4V;
-
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src1, _Dst.ScalarMemOp:$src2),
- OpcodeStr,
- "${src2}"##_Dst.BroadcastStr##", $src1",
- "$src1, ${src2}"##_Dst.BroadcastStr,
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
- (_Dst.VT (X86VBroadcast
- (_Dst.ScalarLdFrag addr:$src2)))))),
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
+ (bitconvert (_Src.LdFrag addr:$src2)))),
itins.rm>,
- AVX512BIBase, EVEX_4V, EVEX_B;
- }
+ AVX512BIBase, EVEX_4V;
+
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Dst.ScalarMemOp:$src2),
+ OpcodeStr,
+ "${src2}"##_Brdct.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_Dst.BroadcastStr,
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
+ (_Brdct.VT (X86VBroadcast
+ (_Brdct.ScalarLdFrag addr:$src2)))))),
+ itins.rm>,
+ AVX512BIBase, EVEX_4V, EVEX_B;
}
defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
@@ -3439,39 +3602,46 @@ defm VPAVG : avx512_binop_rm_vl_bw<0xE0, 0xE3, "vpavg", X86avg,
SSE_INTALU_ITINS_P, HasBWI, 1>;
multiclass avx512_binop_all<bits<8> opc, string OpcodeStr, OpndItins itins,
- SDNode OpNode, bit IsCommutable = 0> {
-
- defm NAME#Z : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
- v16i32_info, v8i64_info, IsCommutable>,
- EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
- let Predicates = [HasVLX] in {
+ AVX512VLVectorVTInfo _SrcVTInfo, AVX512VLVectorVTInfo _DstVTInfo,
+ SDNode OpNode, Predicate prd, bit IsCommutable = 0> {
+ let Predicates = [prd] in
+ defm NAME#Z : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
+ _SrcVTInfo.info512, _DstVTInfo.info512,
+ v8i64_info, IsCommutable>,
+ EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
+ let Predicates = [HasVLX, prd] in {
defm NAME#Z256 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
- v8i32x_info, v4i64x_info, IsCommutable>,
- EVEX_V256, EVEX_CD8<64, CD8VF>, VEX_W;
+ _SrcVTInfo.info256, _DstVTInfo.info256,
+ v4i64x_info, IsCommutable>,
+ EVEX_V256, EVEX_CD8<64, CD8VF>, VEX_W;
defm NAME#Z128 : avx512_binop_rm2<opc, OpcodeStr, itins, OpNode,
- v4i32x_info, v2i64x_info, IsCommutable>,
+ _SrcVTInfo.info128, _DstVTInfo.info128,
+ v2i64x_info, IsCommutable>,
EVEX_V128, EVEX_CD8<64, CD8VF>, VEX_W;
}
}
defm VPMULDQ : avx512_binop_all<0x28, "vpmuldq", SSE_INTALU_ITINS_P,
- X86pmuldq, 1>,T8PD;
+ avx512vl_i32_info, avx512vl_i64_info,
+ X86pmuldq, HasAVX512, 1>,T8PD;
defm VPMULUDQ : avx512_binop_all<0xF4, "vpmuludq", SSE_INTMUL_ITINS_P,
- X86pmuludq, 1>;
+ avx512vl_i32_info, avx512vl_i64_info,
+ X86pmuludq, HasAVX512, 1>;
+defm VPMULTISHIFTQB : avx512_binop_all<0x83, "vpmultishiftqb", SSE_INTALU_ITINS_P,
+ avx512vl_i8_info, avx512vl_i8_info,
+ X86multishift, HasVBMI, 0>, T8PD;
multiclass avx512_packs_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _Src, X86VectorVTInfo _Dst> {
- let mayLoad = 1 in {
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2),
- OpcodeStr,
- "${src2}"##_Src.BroadcastStr##", $src1",
- "$src1, ${src2}"##_Src.BroadcastStr,
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
- (_Src.VT (X86VBroadcast
- (_Src.ScalarLdFrag addr:$src2))))))>,
- EVEX_4V, EVEX_B, EVEX_CD8<_Src.EltSize, CD8VF>;
- }
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2),
+ OpcodeStr,
+ "${src2}"##_Src.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_Src.BroadcastStr,
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1), (bitconvert
+ (_Src.VT (X86VBroadcast
+ (_Src.ScalarLdFrag addr:$src2))))))>,
+ EVEX_4V, EVEX_B, EVEX_CD8<_Src.EltSize, CD8VF>;
}
multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
@@ -3484,23 +3654,22 @@ multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
(_Src.VT _Src.RC:$src1),
(_Src.VT _Src.RC:$src2)))>,
EVEX_CD8<_Src.EltSize, CD8VF>, EVEX_4V;
- let mayLoad = 1 in {
- defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
- (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
- (bitconvert (_Src.LdFrag addr:$src2))))>,
- EVEX_4V, EVEX_CD8<_Src.EltSize, CD8VF>;
- }
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
+ (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
+ (bitconvert (_Src.LdFrag addr:$src2))))>,
+ EVEX_4V, EVEX_CD8<_Src.EltSize, CD8VF>;
}
multiclass avx512_packs_all_i32_i16<bits<8> opc, string OpcodeStr,
SDNode OpNode> {
+ let Predicates = [HasBWI] in
defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i32_info,
v32i16_info>,
avx512_packs_rmb<opc, OpcodeStr, OpNode, v16i32_info,
v32i16_info>, EVEX_V512;
- let Predicates = [HasVLX] in {
+ let Predicates = [HasBWI, HasVLX] in {
defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i32x_info,
v16i16x_info>,
avx512_packs_rmb<opc, OpcodeStr, OpNode, v8i32x_info,
@@ -3513,9 +3682,10 @@ multiclass avx512_packs_all_i32_i16<bits<8> opc, string OpcodeStr,
}
multiclass avx512_packs_all_i16_i8<bits<8> opc, string OpcodeStr,
SDNode OpNode> {
+ let Predicates = [HasBWI] in
defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v32i16_info,
v64i8_info>, EVEX_V512;
- let Predicates = [HasVLX] in {
+ let Predicates = [HasBWI, HasVLX] in {
defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i16x_info,
v32i8x_info>, EVEX_V256;
defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i16x_info,
@@ -3526,9 +3696,10 @@ multiclass avx512_packs_all_i16_i8<bits<8> opc, string OpcodeStr,
multiclass avx512_vpmadd<bits<8> opc, string OpcodeStr,
SDNode OpNode, AVX512VLVectorVTInfo _Src,
AVX512VLVectorVTInfo _Dst> {
+ let Predicates = [HasBWI] in
defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info512,
_Dst.info512>, EVEX_V512;
- let Predicates = [HasVLX] in {
+ let Predicates = [HasBWI, HasVLX] in {
defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info256,
_Dst.info256>, EVEX_V256;
defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, _Src.info128,
@@ -3536,17 +3707,15 @@ multiclass avx512_vpmadd<bits<8> opc, string OpcodeStr,
}
}
-let Predicates = [HasBWI] in {
- defm VPACKSSDW : avx512_packs_all_i32_i16<0x6B, "vpackssdw", X86Packss>, PD;
- defm VPACKUSDW : avx512_packs_all_i32_i16<0x2b, "vpackusdw", X86Packus>, T8PD;
- defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512BIBase, VEX_W;
- defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase, VEX_W;
+defm VPACKSSDW : avx512_packs_all_i32_i16<0x6B, "vpackssdw", X86Packss>, AVX512BIBase;
+defm VPACKUSDW : avx512_packs_all_i32_i16<0x2b, "vpackusdw", X86Packus>, AVX5128IBase;
+defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512BIBase;
+defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase;
- defm VPMADDUBSW : avx512_vpmadd<0x04, "vpmaddubsw", X86vpmaddubsw,
- avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD;
- defm VPMADDWD : avx512_vpmadd<0xF5, "vpmaddwd", X86vpmaddwd,
- avx512vl_i16_info, avx512vl_i32_info>, AVX512BIBase;
-}
+defm VPMADDUBSW : avx512_vpmadd<0x04, "vpmaddubsw", X86vpmaddubsw,
+ avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD;
+defm VPMADDWD : avx512_vpmadd<0xF5, "vpmaddwd", X86vpmaddwd,
+ avx512vl_i16_info, avx512vl_i32_info>, AVX512BIBase;
defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxsb", smax,
SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
@@ -3603,7 +3772,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
itins.rr, IsCommutable>;
defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(VecNode (_.VT _.RC:$src1),
(_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
@@ -3620,7 +3789,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(ins _.FRC:$src1, _.ScalarMemOp:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set _.FRC:$dst, (OpNode _.FRC:$src1,
- (_.ScalarLdFrag addr:$src2)))], itins.rr>;
+ (_.ScalarLdFrag addr:$src2)))], itins.rm>;
}
}
@@ -3677,8 +3846,41 @@ defm VADD : avx512_binop_s_round<0x58, "vadd", fadd, X86faddRnd, SSE_ALU_ITINS_S
defm VMUL : avx512_binop_s_round<0x59, "vmul", fmul, X86fmulRnd, SSE_ALU_ITINS_S, 1>;
defm VSUB : avx512_binop_s_round<0x5C, "vsub", fsub, X86fsubRnd, SSE_ALU_ITINS_S, 0>;
defm VDIV : avx512_binop_s_round<0x5E, "vdiv", fdiv, X86fdivRnd, SSE_ALU_ITINS_S, 0>;
-defm VMIN : avx512_binop_s_sae <0x5D, "vmin", X86fmin, X86fminRnd, SSE_ALU_ITINS_S, 1>;
-defm VMAX : avx512_binop_s_sae <0x5F, "vmax", X86fmax, X86fmaxRnd, SSE_ALU_ITINS_S, 1>;
+defm VMIN : avx512_binop_s_sae <0x5D, "vmin", X86fmin, X86fminRnd, SSE_ALU_ITINS_S, 0>;
+defm VMAX : avx512_binop_s_sae <0x5F, "vmax", X86fmax, X86fmaxRnd, SSE_ALU_ITINS_S, 0>;
+
+// MIN/MAX nodes are commutable under "unsafe-fp-math". In this case we use
+// X86fminc and X86fmaxc instead of X86fmin and X86fmax
+multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr,
+ X86VectorVTInfo _, SDNode OpNode, OpndItins itins> {
+ let isCodeGenOnly = 1, isCommutable =1, Predicates = [HasAVX512] in {
+ def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
+ (ins _.FRC:$src1, _.FRC:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))],
+ itins.rr>;
+ def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
+ (ins _.FRC:$src1, _.ScalarMemOp:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set _.FRC:$dst, (OpNode _.FRC:$src1,
+ (_.ScalarLdFrag addr:$src2)))], itins.rm>;
+ }
+}
+defm VMINCSSZ : avx512_comutable_binop_s<0x5D, "vminss", f32x_info, X86fminc,
+ SSE_ALU_ITINS_S.s>, XS, EVEX_4V, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+
+defm VMINCSDZ : avx512_comutable_binop_s<0x5D, "vminsd", f64x_info, X86fminc,
+ SSE_ALU_ITINS_S.d>, XD, VEX_W, EVEX_4V, VEX_LIG,
+ EVEX_CD8<64, CD8VT1>;
+
+defm VMAXCSSZ : avx512_comutable_binop_s<0x5F, "vmaxss", f32x_info, X86fmaxc,
+ SSE_ALU_ITINS_S.s>, XS, EVEX_4V, VEX_LIG,
+ EVEX_CD8<32, CD8VT1>;
+
+defm VMAXCSDZ : avx512_comutable_binop_s<0x5F, "vmaxsd", f64x_info, X86fmaxc,
+ SSE_ALU_ITINS_S.d>, XD, VEX_W, EVEX_4V, VEX_LIG,
+ EVEX_CD8<64, CD8VT1>;
multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _, bit IsCommutable> {
@@ -3686,19 +3888,17 @@ multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, _.RC:$src2))>, EVEX_4V;
- let mayLoad = 1 in {
- defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (OpNode _.RC:$src1, (_.LdFrag addr:$src2))>, EVEX_4V;
- defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (OpNode _.RC:$src1, (_.VT (X86VBroadcast
- (_.ScalarLdFrag addr:$src2))))>,
- EVEX_4V, EVEX_B;
- }//let mayLoad = 1
+ defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode _.RC:$src1, (_.LdFrag addr:$src2))>, EVEX_4V;
+ defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (OpNode _.RC:$src1, (_.VT (X86VBroadcast
+ (_.ScalarLdFrag addr:$src2))))>,
+ EVEX_4V, EVEX_B;
}
multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd,
@@ -3721,16 +3921,18 @@ multiclass avx512_fp_sae_packed<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd,
}
multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
- bit IsCommutable = 0> {
+ Predicate prd, bit IsCommutable = 0> {
+ let Predicates = [prd] in {
defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v16f32_info,
IsCommutable>, EVEX_V512, PS,
EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f64_info,
IsCommutable>, EVEX_V512, PD, VEX_W,
EVEX_CD8<64, CD8VF>;
+ }
// Define only if AVX512VL feature is present.
- let Predicates = [HasVLX] in {
+ let Predicates = [prd, HasVLX] in {
defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f32x_info,
IsCommutable>, EVEX_V128, PS,
EVEX_CD8<32, CD8VF>;
@@ -3760,24 +3962,26 @@ multiclass avx512_fp_binop_p_sae<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd
EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>;
}
-defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, 1>,
+defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, HasAVX512, 1>,
avx512_fp_binop_p_round<0x58, "vadd", X86faddRnd>;
-defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, 1>,
+defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, HasAVX512, 1>,
avx512_fp_binop_p_round<0x59, "vmul", X86fmulRnd>;
-defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub>,
+defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub, HasAVX512>,
avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd>;
-defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv>,
+defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv, HasAVX512>,
avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd>;
-defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, 1>,
+defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, HasAVX512, 0>,
avx512_fp_binop_p_sae<0x5D, "vmin", X86fminRnd>;
-defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, 1>,
+defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, HasAVX512, 0>,
avx512_fp_binop_p_sae<0x5F, "vmax", X86fmaxRnd>;
-let Predicates = [HasDQI] in {
- defm VAND : avx512_fp_binop_p<0x54, "vand", X86fand, 1>;
- defm VANDN : avx512_fp_binop_p<0x55, "vandn", X86fandn, 0>;
- defm VOR : avx512_fp_binop_p<0x56, "vor", X86for, 1>;
- defm VXOR : avx512_fp_binop_p<0x57, "vxor", X86fxor, 1>;
+let isCodeGenOnly = 1 in {
+ defm VMINC : avx512_fp_binop_p<0x5D, "vmin", X86fminc, HasAVX512, 1>;
+ defm VMAXC : avx512_fp_binop_p<0x5F, "vmax", X86fmaxc, HasAVX512, 1>;
}
+defm VAND : avx512_fp_binop_p<0x54, "vand", X86fand, HasDQI, 1>;
+defm VANDN : avx512_fp_binop_p<0x55, "vandn", X86fandn, HasDQI, 0>;
+defm VOR : avx512_fp_binop_p<0x56, "vor", X86for, HasDQI, 1>;
+defm VXOR : avx512_fp_binop_p<0x57, "vxor", X86fxor, HasDQI, 1>;
multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
@@ -3785,19 +3989,17 @@ multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, _.RC:$src2, (i32 FROUND_CURRENT)))>, EVEX_4V;
- let mayLoad = 1 in {
- defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (OpNode _.RC:$src1, (_.LdFrag addr:$src2), (i32 FROUND_CURRENT))>, EVEX_4V;
- defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (OpNode _.RC:$src1, (_.VT (X86VBroadcast
- (_.ScalarLdFrag addr:$src2))), (i32 FROUND_CURRENT))>,
- EVEX_4V, EVEX_B;
- }//let mayLoad = 1
+ defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode _.RC:$src1, (_.LdFrag addr:$src2), (i32 FROUND_CURRENT))>, EVEX_4V;
+ defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (OpNode _.RC:$src1, (_.VT (X86VBroadcast
+ (_.ScalarLdFrag addr:$src2))), (i32 FROUND_CURRENT))>,
+ EVEX_4V, EVEX_B;
}
multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -3806,26 +4008,26 @@ multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, _.RC:$src2, (i32 FROUND_CURRENT)))>;
- let mayLoad = 1 in {
- defm rm: AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
- "$src2, $src1", "$src1, $src2",
- (OpNode _.RC:$src1, (_.LdFrag addr:$src2), (i32 FROUND_CURRENT))>;
- }//let mayLoad = 1
+ defm rm: AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
+ "$src2, $src1", "$src1, $src2",
+ (OpNode _.RC:$src1,
+ (_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
+ (i32 FROUND_CURRENT))>;
}
-multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr, SDNode OpNode> {
+multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr, SDNode OpNode, SDNode OpNodeScal> {
defm PSZ : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v16f32_info>,
avx512_fp_round_packed<opc, OpcodeStr, OpNode, v16f32_info>,
EVEX_V512, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_scalef_p<opc, OpcodeStr, OpNode, v8f64_info>,
avx512_fp_round_packed<opc, OpcodeStr, OpNode, v8f64_info>,
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
- defm SSZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNode, f32x_info>,
- avx512_fp_scalar_round<opcScaler, OpcodeStr##"ss", f32x_info, OpNode, SSE_ALU_ITINS_S.s>,
+ defm SSZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNodeScal, f32x_info>,
+ avx512_fp_scalar_round<opcScaler, OpcodeStr##"ss", f32x_info, OpNodeScal, SSE_ALU_ITINS_S.s>,
EVEX_4V,EVEX_CD8<32, CD8VT1>;
- defm SDZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNode, f64x_info>,
- avx512_fp_scalar_round<opcScaler, OpcodeStr##"sd", f64x_info, OpNode, SSE_ALU_ITINS_S.d>,
+ defm SDZ128 : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, OpNodeScal, f64x_info>,
+ avx512_fp_scalar_round<opcScaler, OpcodeStr##"sd", f64x_info, OpNodeScal, SSE_ALU_ITINS_S.d>,
EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
// Define only if AVX512VL feature is present.
@@ -3840,7 +4042,7 @@ multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr
EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>;
}
}
-defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", X86scalef>, T8PD;
+defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", X86scalef, X86scalefs>, T8PD;
//===----------------------------------------------------------------------===//
// AVX-512 VPTESTM instructions
@@ -3848,12 +4050,12 @@ defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", X86scalef>, T8PD;
multiclass avx512_vptest<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
+ let isCommutable = 1 in
defm rr : AVX512_maskable_cmp<opc, MRMSrcReg, _, (outs _.KRC:$dst),
(ins _.RC:$src1, _.RC:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>,
EVEX_4V;
- let mayLoad = 1 in
defm rm : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -3865,7 +4067,6 @@ multiclass avx512_vptest<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_vptest_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let mayLoad = 1 in
defm rmb : AVX512_maskable_cmp<opc, MRMSrcMem, _, (outs _.KRC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"${src2}"##_.BroadcastStr##", $src1",
@@ -3874,8 +4075,22 @@ multiclass avx512_vptest_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.ScalarLdFrag addr:$src2))))>,
EVEX_B, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
}
+
+// Use 512bit version to implement 128/256 bit in case NoVLX.
+multiclass avx512_vptest_lowering<SDNode OpNode, X86VectorVTInfo ExtendInfo,
+ X86VectorVTInfo _, string Suffix> {
+ def : Pat<(_.KVT (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))),
+ (_.KVT (COPY_TO_REGCLASS
+ (!cast<Instruction>(NAME # Suffix # "Zrr")
+ (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
+ _.RC:$src1, _.SubRegIdx),
+ (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
+ _.RC:$src2, _.SubRegIdx)),
+ _.KRC))>;
+}
+
multiclass avx512_vptest_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
- AVX512VLVectorVTInfo _> {
+ AVX512VLVectorVTInfo _, string Suffix> {
let Predicates = [HasAVX512] in
defm Z : avx512_vptest<opc, OpcodeStr, OpNode, _.info512>,
avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
@@ -3886,13 +4101,17 @@ multiclass avx512_vptest_dq_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm Z128 : avx512_vptest<opc, OpcodeStr, OpNode, _.info128>,
avx512_vptest_mb<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
}
+ let Predicates = [HasAVX512, NoVLX] in {
+ defm Z256_Alt : avx512_vptest_lowering< OpNode, _.info512, _.info256, Suffix>;
+ defm Z128_Alt : avx512_vptest_lowering< OpNode, _.info512, _.info128, Suffix>;
+ }
}
multiclass avx512_vptest_dq<bits<8> opc, string OpcodeStr, SDNode OpNode> {
defm D : avx512_vptest_dq_sizes<opc, OpcodeStr#"d", OpNode,
- avx512vl_i32_info>;
+ avx512vl_i32_info, "D">;
defm Q : avx512_vptest_dq_sizes<opc, OpcodeStr#"q", OpNode,
- avx512vl_i64_info>, VEX_W;
+ avx512vl_i64_info, "Q">, VEX_W;
}
multiclass avx512_vptest_wb<bits<8> opc, string OpcodeStr,
@@ -3914,6 +4133,14 @@ multiclass avx512_vptest_wb<bits<8> opc, string OpcodeStr,
defm BZ128: avx512_vptest<opc, OpcodeStr#"b", OpNode, v16i8x_info>,
EVEX_V128;
}
+
+ let Predicates = [HasAVX512, NoVLX] in {
+ defm BZ256_Alt : avx512_vptest_lowering< OpNode, v64i8_info, v32i8x_info, "B">;
+ defm BZ128_Alt : avx512_vptest_lowering< OpNode, v64i8_info, v16i8x_info, "B">;
+ defm WZ256_Alt : avx512_vptest_lowering< OpNode, v32i16_info, v16i16x_info, "W">;
+ defm WZ128_Alt : avx512_vptest_lowering< OpNode, v32i16_info, v8i16x_info, "W">;
+ }
+
}
multiclass avx512_vptest_all_forms<bits<8> opc_wb, bits<8> opc_dq, string OpcodeStr,
@@ -3924,13 +4151,6 @@ multiclass avx512_vptest_all_forms<bits<8> opc_wb, bits<8> opc_dq, string Opcode
defm VPTESTM : avx512_vptest_all_forms<0x26, 0x27, "vptestm", X86testm>, T8PD;
defm VPTESTNM : avx512_vptest_all_forms<0x26, 0x27, "vptestnm", X86testnm>, T8XS;
-def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (i16 -1))),
- (COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
-
-def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
- (v8i64 VR512:$src2), (i8 -1))),
- (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
//===----------------------------------------------------------------------===//
// AVX-512 Shift instructions
@@ -3942,7 +4162,6 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))),
SSE_INTSHIFT_ITINS_P.rr>;
- let mayLoad = 1 in
defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
(ins _.MemOp:$src1, u8imm:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -3953,7 +4172,6 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
multiclass avx512_shift_rmbi<bits<8> opc, Format ImmFormM,
string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
- let mayLoad = 1 in
defm mbi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
(ins _.ScalarMemOp:$src1, u8imm:$src2), OpcodeStr,
"$src2, ${src1}"##_.BroadcastStr, "${src1}"##_.BroadcastStr##", $src2",
@@ -4073,7 +4291,6 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1, (_.VT _.RC:$src2))),
SSE_INTSHIFT_ITINS_P.rr>, AVX5128IBase, EVEX_4V;
- let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
@@ -4085,7 +4302,6 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_var_shift_mb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let mayLoad = 1 in
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"${src2}"##_.BroadcastStr##", $src1",
@@ -4117,20 +4333,20 @@ multiclass avx512_var_shift_types<bits<8> opc, string OpcodeStr,
avx512vl_i64_info>, VEX_W;
}
-// Use 512bit version to implement 128/256 bit in case NoVLX.
+// Use 512bit version to implement 128/256 bit in case NoVLX.
multiclass avx512_var_shift_w_lowering<AVX512VLVectorVTInfo _, SDNode OpNode> {
let Predicates = [HasBWI, NoVLX] in {
- def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1),
+ def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1),
(_.info256.VT _.info256.RC:$src2))),
- (EXTRACT_SUBREG
+ (EXTRACT_SUBREG
(!cast<Instruction>(NAME#"WZrr")
(INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src1, sub_ymm),
(INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src2, sub_ymm)),
sub_ymm)>;
- def : Pat<(_.info128.VT (OpNode (_.info128.VT _.info128.RC:$src1),
+ def : Pat<(_.info128.VT (OpNode (_.info128.VT _.info128.RC:$src1),
(_.info128.VT _.info128.RC:$src2))),
- (EXTRACT_SUBREG
+ (EXTRACT_SUBREG
(!cast<Instruction>(NAME#"WZrr")
(INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src1, sub_xmm),
(INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src2, sub_xmm)),
@@ -4155,9 +4371,14 @@ multiclass avx512_var_shift_w<bits<8> opc, string OpcodeStr,
defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>,
avx512_var_shift_w<0x12, "vpsllvw", shl>,
avx512_var_shift_w_lowering<avx512vl_i16_info, shl>;
+
defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>,
avx512_var_shift_w<0x11, "vpsravw", sra>,
avx512_var_shift_w_lowering<avx512vl_i16_info, sra>;
+let isCodeGenOnly = 1 in
+ defm VPSRAV_Int : avx512_var_shift_types<0x46, "vpsrav", X86vsrav>,
+ avx512_var_shift_w<0x11, "vpsravw", X86vsrav>;
+
defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>,
avx512_var_shift_w<0x10, "vpsrlvw", srl>,
avx512_var_shift_w_lowering<avx512vl_i16_info, srl>;
@@ -4193,8 +4414,24 @@ multiclass avx512_vpermi_dq_sizes<bits<8> opc, Format ImmFormR, Format ImmFormM,
VTInfo.info256>, EVEX_V256;
}
+multiclass avx512_vperm_bw<bits<8> opc, string OpcodeStr,
+ Predicate prd, SDNode OpNode,
+ AVX512VLVectorVTInfo _> {
+ let Predicates = [prd] in
+ defm Z: avx512_var_shift<opc, OpcodeStr, OpNode, _.info512>,
+ EVEX_V512 ;
+ let Predicates = [HasVLX, prd] in {
+ defm Z256: avx512_var_shift<opc, OpcodeStr, OpNode, _.info256>,
+ EVEX_V256 ;
+ defm Z128: avx512_var_shift<opc, OpcodeStr, OpNode, _.info128>,
+ EVEX_V128 ;
+ }
+}
-defm VPERM : avx512_var_shift_w<0x8D, "vpermw", X86VPermv>;
+defm VPERMW : avx512_vperm_bw<0x8D, "vpermw", HasBWI, X86VPermv,
+ avx512vl_i16_info>, VEX_W;
+defm VPERMB : avx512_vperm_bw<0x8D, "vpermb", HasVBMI, X86VPermv,
+ avx512vl_i8_info>;
defm VPERMD : avx512_vperm_dq_sizes<0x36, "vpermd", X86VPermv,
avx512vl_i32_info>;
@@ -4212,7 +4449,7 @@ defm VPERMPD : avx512_vpermi_dq_sizes<0x01, MRMSrcReg, MRMSrcMem, "vpermpd",
X86VPermi, avx512vl_f64_info>,
EVEX, AVX512AIi8Base, EVEX_CD8<64, CD8VF>, VEX_W;
//===----------------------------------------------------------------------===//
-// AVX-512 - VPERMIL
+// AVX-512 - VPERMIL
//===----------------------------------------------------------------------===//
multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
@@ -4223,24 +4460,22 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src1,
(Ctrl.VT Ctrl.RC:$src2)))>,
T8PD, EVEX_4V;
- let mayLoad = 1 in {
- defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (_.VT (OpNode
- _.RC:$src1,
- (Ctrl.VT (bitconvert(Ctrl.LdFrag addr:$src2)))))>,
- T8PD, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
- defm rmb: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
- "${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr,
- (_.VT (OpNode
- _.RC:$src1,
- (Ctrl.VT (X86VBroadcast
- (Ctrl.ScalarLdFrag addr:$src2)))))>,
- T8PD, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
- }//let mayLoad = 1
+ defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (_.VT (OpNode
+ _.RC:$src1,
+ (Ctrl.VT (bitconvert(Ctrl.LdFrag addr:$src2)))))>,
+ T8PD, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
+ defm rmb: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
+ "${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr,
+ (_.VT (OpNode
+ _.RC:$src1,
+ (Ctrl.VT (X86VBroadcast
+ (Ctrl.ScalarLdFrag addr:$src2)))))>,
+ T8PD, EVEX_4V, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
}
multiclass avx512_permil_vec_common<string OpcodeStr, bits<8> OpcVar,
@@ -4326,16 +4561,15 @@ let Predicates = [HasAVX512] in {
//===----------------------------------------------------------------------===//
multiclass avx512_mov_hilo_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let mayLoad = 1 in
- def rm : AVX512<opc, MRMSrcMem, (outs _.RC:$dst),
- (ins _.RC:$src1, f64mem:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set _.RC:$dst,
- (OpNode _.RC:$src1,
- (_.VT (bitconvert
- (v2f64 (scalar_to_vector (loadf64 addr:$src2)))))))],
- IIC_SSE_MOV_LH>, EVEX_4V;
+ def rm : AVX512<opc, MRMSrcMem, (outs _.RC:$dst),
+ (ins _.RC:$src1, f64mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set _.RC:$dst,
+ (OpNode _.RC:$src1,
+ (_.VT (bitconvert
+ (v2f64 (scalar_to_vector (loadf64 addr:$src2)))))))],
+ IIC_SSE_MOV_LH>, EVEX_4V;
}
defm VMOVHPSZ128 : avx512_mov_hilo_packed<0x16, "vmovhps", X86Movlhps,
@@ -4377,11 +4611,10 @@ let Predicates = [HasAVX512] in {
(VMOVLPDZ128rm VR128X:$src1, addr:$src2)>;
}
-let mayStore = 1 in {
def VMOVHPSZ128mr : AVX512PSI<0x17, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovhps\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract
+ [(store (f64 (extractelt
(X86Unpckh (bc_v2f64 (v4f32 VR128X:$src)),
(bc_v2f64 (v4f32 VR128X:$src))),
(iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
@@ -4389,28 +4622,28 @@ def VMOVHPSZ128mr : AVX512PSI<0x17, MRMDestMem, (outs),
def VMOVHPDZ128mr : AVX512PDI<0x17, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovhpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract
+ [(store (f64 (extractelt
(v2f64 (X86Unpckh VR128X:$src, VR128X:$src)),
(iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
def VMOVLPSZ128mr : AVX512PSI<0x13, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovlps\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128X:$src)),
+ [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128X:$src)),
(iPTR 0))), addr:$dst)],
IIC_SSE_MOV_LH>,
EVEX, EVEX_CD8<32, CD8VT2>;
def VMOVLPDZ128mr : AVX512PDI<0x13, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovlpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract (v2f64 VR128X:$src),
+ [(store (f64 (extractelt (v2f64 VR128X:$src),
(iPTR 0))), addr:$dst)],
IIC_SSE_MOV_LH>,
EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
-}
+
let Predicates = [HasAVX512] in {
// VMOVHPD patterns
- def : Pat<(store (f64 (vector_extract
+ def : Pat<(store (f64 (extractelt
(v2f64 (X86VPermilpi VR128X:$src, (i8 1))),
(iPTR 0))), addr:$dst),
(VMOVHPDZ128mr addr:$dst, VR128X:$src)>;
@@ -4442,21 +4675,19 @@ multiclass avx512_fma3p_213_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
AVX512FMA3Base;
- let mayLoad = 1 in {
- defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.MemOp:$src3),
- OpcodeStr, "$src3, $src2", "$src2, $src3",
- (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
- AVX512FMA3Base;
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
+ AVX512FMA3Base;
- defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.ScalarMemOp:$src3),
- OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
- !strconcat("$src2, ${src3}", _.BroadcastStr ),
- (OpNode _.RC:$src1,
- _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
- AVX512FMA3Base, EVEX_B;
- }
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
+ !strconcat("$src2, ${src3}", _.BroadcastStr ),
+ (OpNode _.RC:$src1,
+ _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
+ AVX512FMA3Base, EVEX_B;
}
multiclass avx512_fma3_213_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -4509,21 +4740,19 @@ multiclass avx512_fma3p_231_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1))>,
AVX512FMA3Base;
- let mayLoad = 1 in {
- defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.MemOp:$src3),
- OpcodeStr, "$src3, $src2", "$src2, $src3",
- (_.VT (OpNode _.RC:$src2, (_.LdFrag addr:$src3), _.RC:$src1))>,
- AVX512FMA3Base;
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src2, (_.LdFrag addr:$src3), _.RC:$src1))>,
+ AVX512FMA3Base;
- defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.ScalarMemOp:$src3),
- OpcodeStr, "${src3}"##_.BroadcastStr##", $src2",
- "$src2, ${src3}"##_.BroadcastStr,
- (_.VT (OpNode _.RC:$src2,
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
- _.RC:$src1))>, AVX512FMA3Base, EVEX_B;
- }
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, "${src3}"##_.BroadcastStr##", $src2",
+ "$src2, ${src3}"##_.BroadcastStr,
+ (_.VT (OpNode _.RC:$src2,
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
+ _.RC:$src1))>, AVX512FMA3Base, EVEX_B;
}
multiclass avx512_fma3_231_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -4575,21 +4804,19 @@ multiclass avx512_fma3p_132_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
AVX512FMA3Base;
- let mayLoad = 1 in {
- defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src3, _.MemOp:$src2),
- OpcodeStr, "$src2, $src3", "$src3, $src2",
- (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2), _.RC:$src3))>,
- AVX512FMA3Base;
-
- defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src3, _.ScalarMemOp:$src2),
- OpcodeStr, "${src2}"##_.BroadcastStr##", $src3",
- "$src3, ${src2}"##_.BroadcastStr,
- (_.VT (OpNode _.RC:$src1,
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
- _.RC:$src3))>, AVX512FMA3Base, EVEX_B;
- }
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src3, _.MemOp:$src2),
+ OpcodeStr, "$src2, $src3", "$src3, $src2",
+ (_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2), _.RC:$src3))>,
+ AVX512FMA3Base;
+
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src3, _.ScalarMemOp:$src2),
+ OpcodeStr, "${src2}"##_.BroadcastStr##", $src3",
+ "$src3, ${src2}"##_.BroadcastStr,
+ (_.VT (OpNode _.RC:$src1,
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
+ _.RC:$src3))>, AVX512FMA3Base, EVEX_B;
}
multiclass avx512_fma3_132_round<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -4641,10 +4868,9 @@ multiclass avx512_fma3s_common<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(ins _.RC:$src2, _.RC:$src3), OpcodeStr,
"$src3, $src2", "$src2, $src3", RHS_VEC_r>, AVX512FMA3Base;
- let mayLoad = 1 in
- defm m_Int: AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.MemOp:$src3), OpcodeStr,
- "$src3, $src2", "$src2, $src3", RHS_VEC_m>, AVX512FMA3Base;
+ defm m_Int: AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3), OpcodeStr,
+ "$src3, $src2", "$src2, $src3", RHS_VEC_m>, AVX512FMA3Base;
defm rb_Int: AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc),
@@ -4657,12 +4883,11 @@ multiclass avx512_fma3s_common<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
[RHS_r]>;
- let mayLoad = 1 in
- def m : AVX512FMA3<opc, MRMSrcMem, (outs _.FRC:$dst),
- (ins _.FRC:$src1, _.FRC:$src2, _.ScalarMemOp:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [RHS_m]>;
+ def m : AVX512FMA3<opc, MRMSrcMem, (outs _.FRC:$dst),
+ (ins _.FRC:$src1, _.FRC:$src2, _.ScalarMemOp:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ [RHS_m]>;
}// isCodeGenOnly = 1
}
}// Constraints = "$src1 = $dst"
@@ -4672,9 +4897,9 @@ multiclass avx512_fma3s_all<bits<8> opc213, bits<8> opc231, bits<8> opc132,
string SUFF> {
defm NAME#213#SUFF: avx512_fma3s_common<opc213, OpcodeStr#"213"#_.Suffix , _ ,
- (_.VT (OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3)),
- (_.VT (OpNode _.RC:$src2, _.RC:$src1,
- (_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))))),
+ (_.VT (OpNodeRnd _.RC:$src2, _.RC:$src1, _.RC:$src3, (i32 FROUND_CURRENT))),
+ (_.VT (OpNodeRnd _.RC:$src2, _.RC:$src1,
+ (_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))), (i32 FROUND_CURRENT))),
(_.VT ( OpNodeRnd _.RC:$src2, _.RC:$src1, _.RC:$src3,
(i32 imm:$rc))),
(set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src1,
@@ -4683,10 +4908,10 @@ multiclass avx512_fma3s_all<bits<8> opc213, bits<8> opc231, bits<8> opc132,
(_.ScalarLdFrag addr:$src3))))>;
defm NAME#231#SUFF: avx512_fma3s_common<opc231, OpcodeStr#"231"#_.Suffix , _ ,
- (_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1)),
- (_.VT (OpNode _.RC:$src2,
+ (_.VT (OpNodeRnd _.RC:$src2, _.RC:$src3, _.RC:$src1, (i32 FROUND_CURRENT))),
+ (_.VT (OpNodeRnd _.RC:$src2,
(_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))),
- _.RC:$src1)),
+ _.RC:$src1, (i32 FROUND_CURRENT))),
(_.VT ( OpNodeRnd _.RC:$src2, _.RC:$src3, _.RC:$src1,
(i32 imm:$rc))),
(set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src2, _.FRC:$src3,
@@ -4695,10 +4920,10 @@ multiclass avx512_fma3s_all<bits<8> opc213, bits<8> opc231, bits<8> opc132,
(_.ScalarLdFrag addr:$src3), _.FRC:$src1)))>;
defm NAME#132#SUFF: avx512_fma3s_common<opc132, OpcodeStr#"132"#_.Suffix , _ ,
- (_.VT (OpNode _.RC:$src1, _.RC:$src3, _.RC:$src2)),
- (_.VT (OpNode _.RC:$src1,
+ (_.VT (OpNodeRnd _.RC:$src1, _.RC:$src3, _.RC:$src2, (i32 FROUND_CURRENT))),
+ (_.VT (OpNodeRnd _.RC:$src1,
(_.VT (scalar_to_vector(_.ScalarLdFrag addr:$src3))),
- _.RC:$src2)),
+ _.RC:$src2, (i32 FROUND_CURRENT))),
(_.VT ( OpNodeRnd _.RC:$src1, _.RC:$src3, _.RC:$src2,
(i32 imm:$rc))),
(set _.FRC:$dst, (_.EltVT (OpNode _.FRC:$src1, _.FRC:$src3,
@@ -4725,6 +4950,53 @@ defm VFNMADD : avx512_fma3s<0xAD, 0xBD, 0x9D, "vfnmadd", X86Fnmadd, X86FnmaddRnd
defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86Fnmsub, X86FnmsubRnd>;
//===----------------------------------------------------------------------===//
+// AVX-512 Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit IFMA
+//===----------------------------------------------------------------------===//
+let Constraints = "$src1 = $dst" in {
+multiclass avx512_pmadd52_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _> {
+ defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
+ AVX512FMA3Base;
+
+ defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, (_.LdFrag addr:$src3)))>,
+ AVX512FMA3Base;
+
+ defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
+ !strconcat("$src2, ${src3}", _.BroadcastStr ),
+ (OpNode _.RC:$src1,
+ _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3))))>,
+ AVX512FMA3Base, EVEX_B;
+}
+} // Constraints = "$src1 = $dst"
+
+multiclass avx512_pmadd52_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ AVX512VLVectorVTInfo _> {
+ let Predicates = [HasIFMA] in {
+ defm Z : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info512>,
+ EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>;
+ }
+ let Predicates = [HasVLX, HasIFMA] in {
+ defm Z256 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info256>,
+ EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>;
+ defm Z128 : avx512_pmadd52_rm<opc, OpcodeStr, OpNode, _.info128>,
+ EVEX_V128, EVEX_CD8<_.info128.EltSize, CD8VF>;
+ }
+}
+
+defm VPMADD52LUQ : avx512_pmadd52_common<0xb4, "vpmadd52luq", x86vpmadd52l,
+ avx512vl_i64_info>, VEX_W;
+defm VPMADD52HUQ : avx512_pmadd52_common<0xb5, "vpmadd52huq", x86vpmadd52h,
+ avx512vl_i64_info>, VEX_W;
+
+//===----------------------------------------------------------------------===//
// AVX-512 Scalar convert from sign integer to float/double
//===----------------------------------------------------------------------===//
@@ -4848,54 +5120,65 @@ def : Pat<(f64 (uint_to_fp GR64:$src)),
//===----------------------------------------------------------------------===//
// AVX-512 Scalar convert from float/double to integer
//===----------------------------------------------------------------------===//
-multiclass avx512_cvt_s_int_round<bits<8> opc, RegisterClass SrcRC,
- RegisterClass DstRC, Intrinsic Int,
- Operand memop, ComplexPattern mem_cpat, string asm> {
- let hasSideEffects = 0, Predicates = [HasAVX512] in {
- def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT ,
+ X86VectorVTInfo DstVT, SDNode OpNode, string asm> {
+ let Predicates = [HasAVX512] in {
+ def rr : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
- [(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG;
- def rb : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
- !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"), []>,
+ [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 FROUND_CURRENT)))]>,
+ EVEX, VEX_LIG;
+ def rb : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src, AVX512RC:$rc),
+ !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
+ [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 imm:$rc)))]>,
EVEX, VEX_LIG, EVEX_B, EVEX_RC;
- let mayLoad = 1 in
- def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
- !strconcat(asm,"\t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG;
- } // hasSideEffects = 0, Predicates = [HasAVX512]
+ def rm : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.ScalarMemOp:$src),
+ !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
+ [(set DstVT.RC:$dst, (OpNode
+ (SrcVT.VT (scalar_to_vector (SrcVT.ScalarLdFrag addr:$src))),
+ (i32 FROUND_CURRENT)))]>,
+ EVEX, VEX_LIG;
+ } // Predicates = [HasAVX512]
}
// Convert float/double to signed/unsigned int 32/64
-defm VCVTSS2SIZ: avx512_cvt_s_int_round<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
- ssmem, sse_load_f32, "cvtss2si">,
+defm VCVTSS2SIZ: avx512_cvt_s_int_round<0x2D, f32x_info, i32x_info,
+ X86cvts2si, "cvtss2si">,
XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTSS2SI64Z: avx512_cvt_s_int_round<0x2D, VR128X, GR64,
- int_x86_sse_cvtss2si64,
- ssmem, sse_load_f32, "cvtss2si">,
+defm VCVTSS2SI64Z: avx512_cvt_s_int_round<0x2D, f32x_info, i64x_info,
+ X86cvts2si, "cvtss2si">,
XS, VEX_W, EVEX_CD8<32, CD8VT1>;
-defm VCVTSS2USIZ: avx512_cvt_s_int_round<0x79, VR128X, GR32,
- int_x86_avx512_cvtss2usi,
- ssmem, sse_load_f32, "cvtss2usi">,
+defm VCVTSS2USIZ: avx512_cvt_s_int_round<0x79, f32x_info, i32x_info,
+ X86cvts2usi, "cvtss2usi">,
XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTSS2USI64Z: avx512_cvt_s_int_round<0x79, VR128X, GR64,
- int_x86_avx512_cvtss2usi64, ssmem,
- sse_load_f32, "cvtss2usi">, XS, VEX_W,
+defm VCVTSS2USI64Z: avx512_cvt_s_int_round<0x79, f32x_info, i64x_info,
+ X86cvts2usi, "cvtss2usi">, XS, VEX_W,
EVEX_CD8<32, CD8VT1>;
-defm VCVTSD2SIZ: avx512_cvt_s_int_round<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
- sdmem, sse_load_f64, "cvtsd2si">,
+defm VCVTSD2SIZ: avx512_cvt_s_int_round<0x2D, f64x_info, i32x_info,
+ X86cvts2si, "cvtsd2si">,
XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTSD2SI64Z: avx512_cvt_s_int_round<0x2D, VR128X, GR64,
- int_x86_sse2_cvtsd2si64,
- sdmem, sse_load_f64, "cvtsd2si">,
+defm VCVTSD2SI64Z: avx512_cvt_s_int_round<0x2D, f64x_info, i64x_info,
+ X86cvts2si, "cvtsd2si">,
XD, VEX_W, EVEX_CD8<64, CD8VT1>;
-defm VCVTSD2USIZ: avx512_cvt_s_int_round<0x79, VR128X, GR32,
- int_x86_avx512_cvtsd2usi,
- sdmem, sse_load_f64, "cvtsd2usi">,
+defm VCVTSD2USIZ: avx512_cvt_s_int_round<0x79, f64x_info, i32x_info,
+ X86cvts2usi, "cvtsd2usi">,
XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTSD2USI64Z: avx512_cvt_s_int_round<0x79, VR128X, GR64,
- int_x86_avx512_cvtsd2usi64, sdmem,
- sse_load_f64, "cvtsd2usi">, XD, VEX_W,
+defm VCVTSD2USI64Z: avx512_cvt_s_int_round<0x79, f64x_info, i64x_info,
+ X86cvts2usi, "cvtsd2usi">, XD, VEX_W,
EVEX_CD8<64, CD8VT1>;
+// The SSE version of these instructions are disabled for AVX512.
+// Therefore, the SSE intrinsics are mapped to the AVX512 instructions.
+let Predicates = [HasAVX512] in {
+ def : Pat<(i32 (int_x86_sse_cvtss2si (v4f32 VR128X:$src))),
+ (VCVTSS2SIZrr (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
+ def : Pat<(i64 (int_x86_sse_cvtss2si64 (v4f32 VR128X:$src))),
+ (VCVTSS2SI64Zrr (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
+ def : Pat<(i32 (int_x86_sse2_cvtsd2si (v2f64 VR128X:$src))),
+ (VCVTSD2SIZrr (COPY_TO_REGCLASS VR128X:$src, FR64X))>;
+ def : Pat<(i64 (int_x86_sse2_cvtsd2si64 (v2f64 VR128X:$src))),
+ (VCVTSD2SI64Zrr (COPY_TO_REGCLASS VR128X:$src, FR64X))>;
+} // HasAVX512
+
let isCodeGenOnly = 1 , Predicates = [HasAVX512] in {
defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
@@ -4910,14 +5193,14 @@ let isCodeGenOnly = 1 , Predicates = [HasAVX512] in {
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
- defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
+ defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x7B, GR32, VR128X,
int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
} // isCodeGenOnly = 1, Predicates = [HasAVX512]
// Convert float/double to signed/unsigned int 32/64 with truncation
-multiclass avx512_cvt_s_all<bits<8> opc, string asm, X86VectorVTInfo _SrcRC,
- X86VectorVTInfo _DstRC, SDNode OpNode,
+multiclass avx512_cvt_s_all<bits<8> opc, string asm, X86VectorVTInfo _SrcRC,
+ X86VectorVTInfo _DstRC, SDNode OpNode,
SDNode OpNodeRnd>{
let Predicates = [HasAVX512] in {
def rr : SI<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src),
@@ -4926,56 +5209,56 @@ let Predicates = [HasAVX512] in {
def rb : SI<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src),
!strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
[]>, EVEX, EVEX_B;
- def rm : SI<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.MemOp:$src),
+ def rm : SI<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.ScalarMemOp:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
- [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>,
+ [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>,
EVEX;
- let isCodeGenOnly = 1,hasSideEffects = 0 in {
+ let isCodeGenOnly = 1 in {
def rr_Int : SI<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
- [(set _DstRC.RC:$dst, (OpNodeRnd _SrcRC.RC:$src,
+ [(set _DstRC.RC:$dst, (OpNodeRnd (_SrcRC.VT _SrcRC.RC:$src),
(i32 FROUND_CURRENT)))]>, EVEX, VEX_LIG;
def rb_Int : SI<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src),
!strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"),
- [(set _DstRC.RC:$dst, (OpNodeRnd _SrcRC.RC:$src,
- (i32 FROUND_NO_EXC)))]>,
+ [(set _DstRC.RC:$dst, (OpNodeRnd (_SrcRC.VT _SrcRC.RC:$src),
+ (i32 FROUND_NO_EXC)))]>,
EVEX,VEX_LIG , EVEX_B;
- let mayLoad = 1 in
- def rm_Int : SI<opc, MRMSrcMem, (outs _DstRC.RC:$dst),
+ let mayLoad = 1, hasSideEffects = 0 in
+ def rm_Int : SI<opc, MRMSrcMem, (outs _DstRC.RC:$dst),
(ins _SrcRC.MemOp:$src),
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
[]>, EVEX, VEX_LIG;
- } // isCodeGenOnly = 1, hasSideEffects = 0
+ } // isCodeGenOnly = 1
} //HasAVX512
}
-defm VCVTTSS2SIZ: avx512_cvt_s_all<0x2C, "cvttss2si", f32x_info, i32x_info,
- fp_to_sint,X86cvttss2IntRnd>,
+defm VCVTTSS2SIZ: avx512_cvt_s_all<0x2C, "cvttss2si", f32x_info, i32x_info,
+ fp_to_sint,X86cvtts2IntRnd>,
XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTTSS2SI64Z: avx512_cvt_s_all<0x2C, "cvttss2si", f32x_info, i64x_info,
- fp_to_sint,X86cvttss2IntRnd>,
+defm VCVTTSS2SI64Z: avx512_cvt_s_all<0x2C, "cvttss2si", f32x_info, i64x_info,
+ fp_to_sint,X86cvtts2IntRnd>,
VEX_W, XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTTSD2SIZ: avx512_cvt_s_all<0x2C, "cvttsd2si", f64x_info, i32x_info,
- fp_to_sint,X86cvttsd2IntRnd>,
+defm VCVTTSD2SIZ: avx512_cvt_s_all<0x2C, "cvttsd2si", f64x_info, i32x_info,
+ fp_to_sint,X86cvtts2IntRnd>,
XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTTSD2SI64Z: avx512_cvt_s_all<0x2C, "cvttsd2si", f64x_info, i64x_info,
- fp_to_sint,X86cvttsd2IntRnd>,
+defm VCVTTSD2SI64Z: avx512_cvt_s_all<0x2C, "cvttsd2si", f64x_info, i64x_info,
+ fp_to_sint,X86cvtts2IntRnd>,
VEX_W, XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTTSS2USIZ: avx512_cvt_s_all<0x78, "cvttss2usi", f32x_info, i32x_info,
- fp_to_uint,X86cvttss2UIntRnd>,
+defm VCVTTSS2USIZ: avx512_cvt_s_all<0x78, "cvttss2usi", f32x_info, i32x_info,
+ fp_to_uint,X86cvtts2UIntRnd>,
XS, EVEX_CD8<32, CD8VT1>;
-defm VCVTTSS2USI64Z: avx512_cvt_s_all<0x78, "cvttss2usi", f32x_info, i64x_info,
- fp_to_uint,X86cvttss2UIntRnd>,
+defm VCVTTSS2USI64Z: avx512_cvt_s_all<0x78, "cvttss2usi", f32x_info, i64x_info,
+ fp_to_uint,X86cvtts2UIntRnd>,
XS,VEX_W, EVEX_CD8<32, CD8VT1>;
-defm VCVTTSD2USIZ: avx512_cvt_s_all<0x78, "cvttsd2usi", f64x_info, i32x_info,
- fp_to_uint,X86cvttsd2UIntRnd>,
+defm VCVTTSD2USIZ: avx512_cvt_s_all<0x78, "cvttsd2usi", f64x_info, i32x_info,
+ fp_to_uint,X86cvtts2UIntRnd>,
XD, EVEX_CD8<64, CD8VT1>;
-defm VCVTTSD2USI64Z: avx512_cvt_s_all<0x78, "cvttsd2usi", f64x_info, i64x_info,
- fp_to_uint,X86cvttsd2UIntRnd>,
+defm VCVTTSD2USI64Z: avx512_cvt_s_all<0x78, "cvttsd2usi", f64x_info, i64x_info,
+ fp_to_uint,X86cvtts2UIntRnd>,
XD, VEX_W, EVEX_CD8<64, CD8VT1>;
let Predicates = [HasAVX512] in {
def : Pat<(i32 (int_x86_sse_cvttss2si (v4f32 VR128X:$src))),
@@ -4994,17 +5277,17 @@ let Predicates = [HasAVX512] in {
multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
X86VectorVTInfo _Src, SDNode OpNode> {
defm rr : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
+ (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
- (_.VT (OpNode (_Src.VT _Src.RC:$src1),
- (_Src.VT _Src.RC:$src2)))>,
+ (_.VT (OpNode (_.VT _.RC:$src1),
+ (_Src.VT _Src.RC:$src2)))>,
EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
+ (ins _Src.RC:$src1, _Src.ScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
- (_.VT (OpNode (_Src.VT _Src.RC:$src1),
- (_Src.VT (scalar_to_vector
- (_Src.ScalarLdFrag addr:$src2)))))>,
+ (_.VT (OpNode (_.VT _.RC:$src1),
+ (_Src.VT (scalar_to_vector
+ (_Src.ScalarLdFrag addr:$src2)))))>,
EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
}
@@ -5012,9 +5295,9 @@ multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _
multiclass avx512_cvt_fp_sae_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
X86VectorVTInfo _Src, SDNode OpNodeRnd> {
defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins _Src.RC:$src1, _Src.RC:$src2), OpcodeStr,
+ (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr,
"{sae}, $src2, $src1", "$src1, $src2, {sae}",
- (_.VT (OpNodeRnd (_Src.VT _Src.RC:$src1),
+ (_.VT (OpNodeRnd (_.VT _.RC:$src1),
(_Src.VT _Src.RC:$src2),
(i32 FROUND_NO_EXC)))>,
EVEX_4V, VEX_LIG, EVEX_B;
@@ -5024,15 +5307,15 @@ multiclass avx512_cvt_fp_sae_scalar<bits<8> opc, string OpcodeStr, X86VectorVTIn
multiclass avx512_cvt_fp_rc_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
X86VectorVTInfo _Src, SDNode OpNodeRnd> {
defm rrb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
- (ins _Src.RC:$src1, _Src.RC:$src2, AVX512RC:$rc), OpcodeStr,
+ (ins _.RC:$src1, _Src.RC:$src2, AVX512RC:$rc), OpcodeStr,
"$rc, $src2, $src1", "$src1, $src2, $rc",
- (_.VT (OpNodeRnd (_Src.VT _Src.RC:$src1),
+ (_.VT (OpNodeRnd (_.VT _.RC:$src1),
(_Src.VT _Src.RC:$src2), (i32 imm:$rc)))>,
EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
EVEX_B, EVEX_RC;
}
-multiclass avx512_cvt_fp_scalar_sd2ss<bits<8> opc, string OpcodeStr, SDNode OpNode,
- SDNode OpNodeRnd, X86VectorVTInfo _src,
+multiclass avx512_cvt_fp_scalar_sd2ss<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ SDNode OpNodeRnd, X86VectorVTInfo _src,
X86VectorVTInfo _dst> {
let Predicates = [HasAVX512] in {
defm Z : avx512_cvt_fp_scalar<opc, OpcodeStr, _dst, _src, OpNode>,
@@ -5042,22 +5325,22 @@ multiclass avx512_cvt_fp_scalar_sd2ss<bits<8> opc, string OpcodeStr, SDNode OpNo
}
}
-multiclass avx512_cvt_fp_scalar_ss2sd<bits<8> opc, string OpcodeStr, SDNode OpNode,
- SDNode OpNodeRnd, X86VectorVTInfo _src,
+multiclass avx512_cvt_fp_scalar_ss2sd<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ SDNode OpNodeRnd, X86VectorVTInfo _src,
X86VectorVTInfo _dst> {
let Predicates = [HasAVX512] in {
defm Z : avx512_cvt_fp_scalar<opc, OpcodeStr, _dst, _src, OpNode>,
- avx512_cvt_fp_sae_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
+ avx512_cvt_fp_sae_scalar<opc, OpcodeStr, _dst, _src, OpNodeRnd>,
EVEX_CD8<32, CD8VT1>, XS, EVEX_V512;
}
}
defm VCVTSD2SS : avx512_cvt_fp_scalar_sd2ss<0x5A, "vcvtsd2ss", X86fround,
X86froundRnd, f64x_info, f32x_info>;
-defm VCVTSS2SD : avx512_cvt_fp_scalar_ss2sd<0x5A, "vcvtss2sd", X86fpext,
+defm VCVTSS2SD : avx512_cvt_fp_scalar_ss2sd<0x5A, "vcvtss2sd", X86fpext,
X86fpextRnd,f32x_info, f64x_info >;
-def : Pat<(f64 (fextend FR32X:$src)),
- (COPY_TO_REGCLASS (VCVTSS2SDZrr (COPY_TO_REGCLASS FR32X:$src, VR128X),
+def : Pat<(f64 (fextend FR32X:$src)),
+ (COPY_TO_REGCLASS (VCVTSS2SDZrr (COPY_TO_REGCLASS FR32X:$src, VR128X),
(COPY_TO_REGCLASS FR32X:$src, VR128X)), VR128X)>,
Requires<[HasAVX512]>;
def : Pat<(f64 (fextend (loadf32 addr:$src))),
@@ -5069,12 +5352,12 @@ def : Pat<(f64 (extloadf32 addr:$src)),
Requires<[HasAVX512, OptForSize]>;
def : Pat<(f64 (extloadf32 addr:$src)),
- (COPY_TO_REGCLASS (VCVTSS2SDZrr (v4f32 (IMPLICIT_DEF)),
+ (COPY_TO_REGCLASS (VCVTSS2SDZrr (v4f32 (IMPLICIT_DEF)),
(COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)), VR128X)>,
Requires<[HasAVX512, OptForSpeed]>;
-def : Pat<(f32 (fround FR64X:$src)),
- (COPY_TO_REGCLASS (VCVTSD2SSZrr (COPY_TO_REGCLASS FR64X:$src, VR128X),
+def : Pat<(f32 (fround FR64X:$src)),
+ (COPY_TO_REGCLASS (VCVTSD2SSZrr (COPY_TO_REGCLASS FR64X:$src, VR128X),
(COPY_TO_REGCLASS FR64X:$src, VR128X)), VR128X)>,
Requires<[HasAVX512]>;
//===----------------------------------------------------------------------===//
@@ -5097,7 +5380,7 @@ multiclass avx512_vcvt_fp<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(bitconvert (_Src.LdFrag addr:$src)))))>, EVEX;
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _Src.MemOp:$src), OpcodeStr,
+ (ins _Src.ScalarMemOp:$src), OpcodeStr,
"${src}"##Broadcast, "${src}"##Broadcast,
(_.VT (OpNode (_Src.VT
(X86VBroadcast (_Src.ScalarLdFrag addr:$src)))
@@ -5405,59 +5688,59 @@ defm VCVTUDQ2PS : avx512_cvtdq2ps<0x7A, "vcvtudq2ps", uint_to_fp,
X86VUintToFpRnd>, XD,
EVEX_CD8<32, CD8VF>;
-defm VCVTPS2DQ : avx512_cvtps2dq<0x5B, "vcvtps2dq", X86cvtps2Int,
- X86cvtps2IntRnd>, PD, EVEX_CD8<32, CD8VF>;
+defm VCVTPS2DQ : avx512_cvtps2dq<0x5B, "vcvtps2dq", X86cvtp2Int,
+ X86cvtp2IntRnd>, PD, EVEX_CD8<32, CD8VF>;
-defm VCVTPD2DQ : avx512_cvtpd2dq<0xE6, "vcvtpd2dq", X86cvtpd2Int,
- X86cvtpd2IntRnd>, XD, VEX_W,
+defm VCVTPD2DQ : avx512_cvtpd2dq<0xE6, "vcvtpd2dq", X86cvtp2Int,
+ X86cvtp2IntRnd>, XD, VEX_W,
EVEX_CD8<64, CD8VF>;
-defm VCVTPS2UDQ : avx512_cvtps2dq<0x79, "vcvtps2udq", X86cvtps2UInt,
- X86cvtps2UIntRnd>,
+defm VCVTPS2UDQ : avx512_cvtps2dq<0x79, "vcvtps2udq", X86cvtp2UInt,
+ X86cvtp2UIntRnd>,
PS, EVEX_CD8<32, CD8VF>;
-defm VCVTPD2UDQ : avx512_cvtpd2dq<0x79, "vcvtpd2udq", X86cvtpd2UInt,
- X86cvtpd2UIntRnd>, VEX_W,
+defm VCVTPD2UDQ : avx512_cvtpd2dq<0x79, "vcvtpd2udq", X86cvtp2UInt,
+ X86cvtp2UIntRnd>, VEX_W,
PS, EVEX_CD8<64, CD8VF>;
-defm VCVTPD2QQ : avx512_cvtpd2qq<0x7B, "vcvtpd2qq", X86cvtpd2Int,
- X86cvtpd2IntRnd>, VEX_W,
+defm VCVTPD2QQ : avx512_cvtpd2qq<0x7B, "vcvtpd2qq", X86cvtp2Int,
+ X86cvtp2IntRnd>, VEX_W,
PD, EVEX_CD8<64, CD8VF>;
-defm VCVTPS2QQ : avx512_cvtps2qq<0x7B, "vcvtps2qq", X86cvtps2Int,
- X86cvtps2IntRnd>, PD, EVEX_CD8<32, CD8VH>;
+defm VCVTPS2QQ : avx512_cvtps2qq<0x7B, "vcvtps2qq", X86cvtp2Int,
+ X86cvtp2IntRnd>, PD, EVEX_CD8<32, CD8VH>;
-defm VCVTPD2UQQ : avx512_cvtpd2qq<0x79, "vcvtpd2uqq", X86cvtpd2UInt,
- X86cvtpd2UIntRnd>, VEX_W,
+defm VCVTPD2UQQ : avx512_cvtpd2qq<0x79, "vcvtpd2uqq", X86cvtp2UInt,
+ X86cvtp2UIntRnd>, VEX_W,
PD, EVEX_CD8<64, CD8VF>;
-defm VCVTPS2UQQ : avx512_cvtps2qq<0x79, "vcvtps2uqq", X86cvtps2UInt,
- X86cvtps2UIntRnd>, PD, EVEX_CD8<32, CD8VH>;
+defm VCVTPS2UQQ : avx512_cvtps2qq<0x79, "vcvtps2uqq", X86cvtp2UInt,
+ X86cvtp2UIntRnd>, PD, EVEX_CD8<32, CD8VH>;
defm VCVTTPD2QQ : avx512_cvttpd2qq<0x7A, "vcvttpd2qq", fp_to_sint,
- X86VFpToSlongRnd>, VEX_W,
+ X86VFpToSintRnd>, VEX_W,
PD, EVEX_CD8<64, CD8VF>;
defm VCVTTPS2QQ : avx512_cvttps2qq<0x7A, "vcvttps2qq", fp_to_sint,
- X86VFpToSlongRnd>, PD, EVEX_CD8<32, CD8VH>;
+ X86VFpToSintRnd>, PD, EVEX_CD8<32, CD8VH>;
defm VCVTTPD2UQQ : avx512_cvttpd2qq<0x78, "vcvttpd2uqq", fp_to_uint,
- X86VFpToUlongRnd>, VEX_W,
+ X86VFpToUintRnd>, VEX_W,
PD, EVEX_CD8<64, CD8VF>;
defm VCVTTPS2UQQ : avx512_cvttps2qq<0x78, "vcvttps2uqq", fp_to_uint,
- X86VFpToUlongRnd>, PD, EVEX_CD8<32, CD8VH>;
+ X86VFpToUintRnd>, PD, EVEX_CD8<32, CD8VH>;
defm VCVTQQ2PD : avx512_cvtqq2pd<0xE6, "vcvtqq2pd", sint_to_fp,
- X86VSlongToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
+ X86VSintToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PD : avx512_cvtqq2pd<0x7A, "vcvtuqq2pd", uint_to_fp,
- X86VUlongToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
+ X86VUintToFpRnd>, VEX_W, XS, EVEX_CD8<64, CD8VF>;
defm VCVTQQ2PS : avx512_cvtqq2ps<0x5B, "vcvtqq2ps", sint_to_fp,
- X86VSlongToFpRnd>, VEX_W, PS, EVEX_CD8<64, CD8VF>;
+ X86VSintToFpRnd>, VEX_W, PS, EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PS : avx512_cvtqq2ps<0x7A, "vcvtuqq2ps", uint_to_fp,
- X86VUlongToFpRnd>, VEX_W, XD, EVEX_CD8<64, CD8VF>;
+ X86VUintToFpRnd>, VEX_W, XD, EVEX_CD8<64, CD8VF>;
let Predicates = [HasAVX512, NoVLX] in {
def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
@@ -5468,6 +5751,10 @@ def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
(EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
(v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
+def : Pat<(v4i32 (fp_to_uint (v4f64 VR256X:$src1))),
+ (EXTRACT_SUBREG (v8i32 (VCVTTPD2UDQZrr
+ (v8f64 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_xmm)>;
+
def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
(EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
@@ -5491,18 +5778,16 @@ let Predicates = [HasAVX512] in {
//===----------------------------------------------------------------------===//
// Half precision conversion instructions
//===----------------------------------------------------------------------===//
-multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
+multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
X86MemOperand x86memop, PatFrag ld_frag> {
defm rr : AVX512_maskable<0x13, MRMSrcReg, _dest ,(outs _dest.RC:$dst), (ins _src.RC:$src),
"vcvtph2ps", "$src", "$src",
(X86cvtph2ps (_src.VT _src.RC:$src),
(i32 FROUND_CURRENT))>, T8PD;
- let hasSideEffects = 0, mayLoad = 1 in {
- defm rm : AVX512_maskable<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst), (ins x86memop:$src),
- "vcvtph2ps", "$src", "$src",
- (X86cvtph2ps (_src.VT (bitconvert (ld_frag addr:$src))),
- (i32 FROUND_CURRENT))>, T8PD;
- }
+ defm rm : AVX512_maskable<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst), (ins x86memop:$src),
+ "vcvtph2ps", "$src", "$src",
+ (X86cvtph2ps (_src.VT (bitconvert (ld_frag addr:$src))),
+ (i32 FROUND_CURRENT))>, T8PD;
}
multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
@@ -5515,44 +5800,45 @@ multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
let Predicates = [HasAVX512] in {
defm VCVTPH2PSZ : avx512_cvtph2ps<v16f32_info, v16i16x_info, f256mem, loadv4i64>,
- avx512_cvtph2ps_sae<v16f32_info, v16i16x_info>,
+ avx512_cvtph2ps_sae<v16f32_info, v16i16x_info>,
EVEX, EVEX_V512, EVEX_CD8<32, CD8VH>;
let Predicates = [HasVLX] in {
- defm VCVTPH2PSZ256 : avx512_cvtph2ps<v8f32x_info, v8i16x_info, f128mem,
+ defm VCVTPH2PSZ256 : avx512_cvtph2ps<v8f32x_info, v8i16x_info, f128mem,
loadv2i64>,EVEX, EVEX_V256, EVEX_CD8<32, CD8VH>;
defm VCVTPH2PSZ128 : avx512_cvtph2ps<v4f32x_info, v8i16x_info, f64mem,
loadv2i64>, EVEX, EVEX_V128, EVEX_CD8<32, CD8VH>;
}
}
-multiclass avx512_cvtps2ph<X86VectorVTInfo _dest, X86VectorVTInfo _src,
+multiclass avx512_cvtps2ph<X86VectorVTInfo _dest, X86VectorVTInfo _src,
X86MemOperand x86memop> {
defm rr : AVX512_maskable<0x1D, MRMDestReg, _dest ,(outs _dest.RC:$dst),
- (ins _src.RC:$src1, i32u8imm:$src2),
- "vcvtps2ph", "$src2, $src1", "$src1, $src2",
+ (ins _src.RC:$src1, i32u8imm:$src2),
+ "vcvtps2ph", "$src2, $src1", "$src1, $src2",
(X86cvtps2ph (_src.VT _src.RC:$src1),
- (i32 imm:$src2),
- (i32 FROUND_CURRENT))>, AVX512AIi8Base;
- let hasSideEffects = 0, mayStore = 1 in {
- def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
- (ins x86memop:$dst, _src.RC:$src1, i32u8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(store (_dest.VT (X86cvtps2ph (_src.VT _src.RC:$src1),
- (i32 imm:$src2), (i32 FROUND_CURRENT) )),
- addr:$dst)]>;
- def mrk : AVX512AIi8<0x1D, MRMDestMem, (outs),
- (ins x86memop:$dst, _dest.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
- []>, EVEX_K;
- }
+ (i32 imm:$src2),
+ (i32 FROUND_CURRENT)),
+ NoItinerary, 0, X86select>, AVX512AIi8Base;
+ def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
+ (ins x86memop:$dst, _src.RC:$src1, i32u8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (_dest.VT (X86cvtps2ph (_src.VT _src.RC:$src1),
+ (i32 imm:$src2), (i32 FROUND_CURRENT) )),
+ addr:$dst)]>;
+ let hasSideEffects = 0, mayStore = 1 in
+ def mrk : AVX512AIi8<0x1D, MRMDestMem, (outs),
+ (ins x86memop:$dst, _dest.KRCWM:$mask, _src.RC:$src1, i32u8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
+ []>, EVEX_K;
}
multiclass avx512_cvtps2ph_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src> {
defm rb : AVX512_maskable<0x1D, MRMDestReg, _dest ,(outs _dest.RC:$dst),
- (ins _src.RC:$src1, i32u8imm:$src2),
- "vcvtps2ph", "$src2, {sae}, $src1", "$src1, $src2, {sae}",
+ (ins _src.RC:$src1, i32u8imm:$src2),
+ "vcvtps2ph", "$src2, {sae}, $src1", "$src1, {sae}, $src2",
(X86cvtps2ph (_src.VT _src.RC:$src1),
- (i32 imm:$src2),
- (i32 FROUND_NO_EXC))>, EVEX_B, AVX512AIi8Base;
+ (i32 imm:$src2),
+ (i32 FROUND_NO_EXC)),
+ NoItinerary, 0, X86select>, EVEX_B, AVX512AIi8Base;
}
let Predicates = [HasAVX512] in {
defm VCVTPS2PHZ : avx512_cvtps2ph<v16i16x_info, v16f32_info, f256mem>,
@@ -5571,7 +5857,7 @@ multiclass avx512_ord_cmp_sae<bits<8> opc, X86VectorVTInfo _, SDNode OpNode,
string OpcodeStr> {
def rb: AVX512<opc, MRMSrcReg, (outs), (ins _.RC:$src1, _.RC:$src2),
!strconcat(OpcodeStr, "\t{{sae}, $src2, $src1|$src1, $src2, {sae}}"),
- [(set EFLAGS, (OpNode (_.VT _.RC:$src1), _.RC:$src2,
+ [(set EFLAGS, (OpNode (_.VT _.RC:$src1), _.RC:$src2,
(i32 FROUND_NO_EXC)))],
IIC_SSE_COMIS_RR>, EVEX, EVEX_B, VEX_LIG, EVEX_V128,
Sched<[WriteFAdd]>;
@@ -5623,18 +5909,16 @@ let Defs = [EFLAGS], Predicates = [HasAVX512] in {
/// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let hasSideEffects = 0, AddedComplexity = 20 , Predicates = [HasAVX512] in {
+ let AddedComplexity = 20 , Predicates = [HasAVX512] in {
defm rr : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>, EVEX_4V;
- let mayLoad = 1 in {
defm rm : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1),
(_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))))>, EVEX_4V;
- }
}
}
@@ -5653,18 +5937,16 @@ multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src), OpcodeStr, "$src", "$src",
(_.FloatVT (OpNode _.RC:$src))>, EVEX, T8PD;
- let mayLoad = 1 in {
- defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
- (OpNode (_.FloatVT
- (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD;
- defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src), OpcodeStr,
- "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
- (OpNode (_.FloatVT
- (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
- EVEX, T8PD, EVEX_B;
- }
+ defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
+ (OpNode (_.FloatVT
+ (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD;
+ defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src), OpcodeStr,
+ "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
+ (OpNode (_.FloatVT
+ (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
+ EVEX, T8PD, EVEX_B;
}
multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode> {
@@ -5710,7 +5992,7 @@ multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(i32 FROUND_NO_EXC))>, EVEX_B;
defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(OpNode (_.VT _.RC:$src1),
(_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
@@ -5724,7 +6006,7 @@ multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode> {
EVEX_CD8<64, CD8VT1>, VEX_W;
}
-let hasSideEffects = 0, Predicates = [HasERI] in {
+let Predicates = [HasERI] in {
defm VRCP28 : avx512_eri_s<0xCB, "vrcp28", X86rcp28s>, T8PD, EVEX_4V;
defm VRSQRT28 : avx512_eri_s<0xCD, "vrsqrt28", X86rsqrt28s>, T8PD, EVEX_4V;
}
@@ -5746,7 +6028,7 @@ multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
(i32 FROUND_CURRENT))>;
defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src), OpcodeStr,
+ (ins _.ScalarMemOp:$src), OpcodeStr,
"${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
(OpNode (_.FloatVT
(X86VBroadcast (_.ScalarLdFrag addr:$src))),
@@ -5783,7 +6065,7 @@ multiclass avx512_fp_unaryop_packed<bits<8> opc, string OpcodeStr,
EVEX_V256, VEX_W, T8PD, EVEX_CD8<64, CD8VF>;
}
}
-let Predicates = [HasERI], hasSideEffects = 0 in {
+let Predicates = [HasERI] in {
defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28>, EVEX;
defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28>, EVEX;
@@ -5805,19 +6087,17 @@ multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr,
defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src), OpcodeStr, "$src", "$src",
(_.FloatVT (OpNode _.RC:$src))>, EVEX;
- let mayLoad = 1 in {
- defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
- (OpNode (_.FloatVT
- (bitconvert (_.LdFrag addr:$src))))>, EVEX;
+ defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src), OpcodeStr, "$src", "$src",
+ (OpNode (_.FloatVT
+ (bitconvert (_.LdFrag addr:$src))))>, EVEX;
- defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src), OpcodeStr,
- "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
- (OpNode (_.FloatVT
- (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
- EVEX, EVEX_B;
- }
+ defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src), OpcodeStr,
+ "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
+ (OpNode (_.FloatVT
+ (X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
+ EVEX, EVEX_B;
}
multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
@@ -5862,14 +6142,13 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(OpNodeRnd (_.VT _.RC:$src1),
(_.VT _.RC:$src2),
(i32 FROUND_CURRENT))>;
- let mayLoad = 1 in
- defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
- "$src2, $src1", "$src1, $src2",
- (OpNodeRnd (_.VT _.RC:$src1),
- (_.VT (scalar_to_vector
- (_.ScalarLdFrag addr:$src2))),
- (i32 FROUND_CURRENT))>;
+ defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
+ "$src2, $src1", "$src1, $src2",
+ (OpNodeRnd (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector
+ (_.ScalarLdFrag addr:$src2))),
+ (i32 FROUND_CURRENT))>;
defm rb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr,
@@ -5879,7 +6158,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(i32 imm:$rc))>,
EVEX_B, EVEX_RC;
- let isCodeGenOnly = 1 in {
+ let isCodeGenOnly = 1, hasSideEffects = 0 in {
def r : I<opc, MRMSrcReg, (outs _.FRC:$dst),
(ins _.FRC:$src1, _.FRC:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>;
@@ -5940,9 +6219,9 @@ avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _> {
(_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2),
(i32 imm:$src3), (i32 FROUND_NO_EXC)))>, EVEX_B;
- let mayLoad = 1 in
defm m : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3), OpcodeStr,
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
+ OpcodeStr,
"$src3, $src2, $src1", "$src1, $src2, $src3",
(_.VT (X86RndScales (_.VT _.RC:$src1),
(_.VT (scalar_to_vector (_.ScalarLdFrag addr:$src2))),
@@ -6022,7 +6301,7 @@ multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
DestInfo.KRCWM:$mask ,
SrcInfo.RC:$src1)>;
- let mayStore = 1 in {
+ let mayStore = 1, mayLoad = 1, hasSideEffects = 0 in {
def mr : AVX512XS8I<opc, MRMDestMem, (outs),
(ins x86memop:$dst, SrcInfo.RC:$src),
OpcodeStr # "\t{$src, $dst|$dst, $src}",
@@ -6032,7 +6311,7 @@ multiclass avx512_trunc_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins x86memop:$dst, SrcInfo.KRCWM:$mask, SrcInfo.RC:$src),
OpcodeStr # "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}",
[]>, EVEX, EVEX_K;
- }//mayStore = 1
+ }//mayStore = 1, mayLoad = 1, hasSideEffects = 0
}
multiclass avx512_trunc_mr_lowering<X86VectorVTInfo SrcInfo,
@@ -6223,23 +6502,21 @@ def: Pat<(v16i8 (X86vtrunc (v16i16 VR256X:$src))),
}
multiclass avx512_extend_common<bits<8> opc, string OpcodeStr,
- X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo,
- X86MemOperand x86memop, PatFrag LdFrag, SDNode OpNode>{
-
+ X86VectorVTInfo DestInfo, X86VectorVTInfo SrcInfo,
+ X86MemOperand x86memop, PatFrag LdFrag, SDPatternOperator OpNode>{
defm rr : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
(ins SrcInfo.RC:$src), OpcodeStr ,"$src", "$src",
(DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src)))>,
EVEX;
- let mayLoad = 1 in {
- defm rm : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
- (ins x86memop:$src), OpcodeStr ,"$src", "$src",
- (DestInfo.VT (LdFrag addr:$src))>,
- EVEX;
- }
+ defm rm : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
+ (ins x86memop:$src), OpcodeStr ,"$src", "$src",
+ (DestInfo.VT (LdFrag addr:$src))>,
+ EVEX;
}
-multiclass avx512_extend_BW<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_BW<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
let Predicates = [HasVLX, HasBWI] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v8i16x_info,
@@ -6257,7 +6534,8 @@ multiclass avx512_extend_BW<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_BD<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_BD<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v4i32x_info,
@@ -6275,7 +6553,8 @@ multiclass avx512_extend_BD<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_BQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_BQ<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi8")> {
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
@@ -6293,7 +6572,8 @@ multiclass avx512_extend_BQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_WD<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_WD<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi16")> {
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v4i32x_info,
@@ -6311,7 +6591,8 @@ multiclass avx512_extend_WD<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_WQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_WQ<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi16")> {
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_extend_common<opc, OpcodeStr, v2i64x_info,
@@ -6329,7 +6610,8 @@ multiclass avx512_extend_WQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
}
-multiclass avx512_extend_DQ<bits<8> opc, string OpcodeStr, SDNode OpNode,
+multiclass avx512_extend_DQ<bits<8> opc, string OpcodeStr,
+ SDPatternOperator OpNode,
string ExtTy,PatFrag LdFrag = !cast<PatFrag>(ExtTy#"extloadvi32")> {
let Predicates = [HasVLX, HasAVX512] in {
@@ -6355,7 +6637,6 @@ defm VPMOVZXWD : avx512_extend_WD<0x33, "vpmovzxwd", X86vzext, "z">;
defm VPMOVZXWQ : avx512_extend_WQ<0x34, "vpmovzxwq", X86vzext, "z">;
defm VPMOVZXDQ : avx512_extend_DQ<0x35, "vpmovzxdq", X86vzext, "z">;
-
defm VPMOVSXBW: avx512_extend_BW<0x20, "vpmovsxbw", X86vsext, "s">;
defm VPMOVSXBD: avx512_extend_BD<0x21, "vpmovsxbd", X86vsext, "s">;
defm VPMOVSXBQ: avx512_extend_BQ<0x22, "vpmovsxbq", X86vsext, "s">;
@@ -6363,6 +6644,47 @@ defm VPMOVSXWD: avx512_extend_WD<0x23, "vpmovsxwd", X86vsext, "s">;
defm VPMOVSXWQ: avx512_extend_WQ<0x24, "vpmovsxwq", X86vsext, "s">;
defm VPMOVSXDQ: avx512_extend_DQ<0x25, "vpmovsxdq", X86vsext, "s">;
+// EXTLOAD patterns, implemented using vpmovz
+multiclass avx512_ext_lowering<string InstrStr, X86VectorVTInfo To,
+ X86VectorVTInfo From, PatFrag LdFrag> {
+ def : Pat<(To.VT (LdFrag addr:$src)),
+ (!cast<Instruction>("VPMOVZX"#InstrStr#"rm") addr:$src)>;
+ def : Pat<(To.VT (vselect To.KRCWM:$mask, (LdFrag addr:$src), To.RC:$src0)),
+ (!cast<Instruction>("VPMOVZX"#InstrStr#"rmk") To.RC:$src0,
+ To.KRC:$mask, addr:$src)>;
+ def : Pat<(To.VT (vselect To.KRCWM:$mask, (LdFrag addr:$src),
+ To.ImmAllZerosV)),
+ (!cast<Instruction>("VPMOVZX"#InstrStr#"rmkz") To.KRC:$mask,
+ addr:$src)>;
+}
+
+let Predicates = [HasVLX, HasBWI] in {
+ defm : avx512_ext_lowering<"BWZ128", v8i16x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BWZ256", v16i16x_info, v16i8x_info, extloadvi8>;
+}
+let Predicates = [HasBWI] in {
+ defm : avx512_ext_lowering<"BWZ", v32i16_info, v32i8x_info, extloadvi8>;
+}
+let Predicates = [HasVLX, HasAVX512] in {
+ defm : avx512_ext_lowering<"BDZ128", v4i32x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BDZ256", v8i32x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BQZ128", v2i64x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BQZ256", v4i64x_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"WDZ128", v4i32x_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"WDZ256", v8i32x_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"WQZ128", v2i64x_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"WQZ256", v4i64x_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"DQZ128", v2i64x_info, v4i32x_info, extloadvi32>;
+ defm : avx512_ext_lowering<"DQZ256", v4i64x_info, v4i32x_info, extloadvi32>;
+}
+let Predicates = [HasAVX512] in {
+ defm : avx512_ext_lowering<"BDZ", v16i32_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"BQZ", v8i64_info, v16i8x_info, extloadvi8>;
+ defm : avx512_ext_lowering<"WDZ", v16i32_info, v16i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"WQZ", v8i64_info, v8i16x_info, extloadvi16>;
+ defm : avx512_ext_lowering<"DQZ", v8i64_info, v8i32x_info, extloadvi32>;
+}
+
//===----------------------------------------------------------------------===//
// GATHER - SCATTER Operations
@@ -6383,34 +6705,34 @@ multiclass avx512_gather<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
multiclass avx512_gather_q_pd<bits<8> dopc, bits<8> qopc,
AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512,
- vy32xmem, mgatherv8i32>, EVEX_V512, VEX_W;
+ vy512mem, mgatherv8i32>, EVEX_V512, VEX_W;
defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info512,
- vz64mem, mgatherv8i64>, EVEX_V512, VEX_W;
+ vz512mem, mgatherv8i64>, EVEX_V512, VEX_W;
let Predicates = [HasVLX] in {
defm NAME##D##SUFF##Z256: avx512_gather<dopc, OpcodeStr##"d", _.info256,
- vx32xmem, mgatherv4i32>, EVEX_V256, VEX_W;
+ vx256xmem, mgatherv4i32>, EVEX_V256, VEX_W;
defm NAME##Q##SUFF##Z256: avx512_gather<qopc, OpcodeStr##"q", _.info256,
- vy64xmem, mgatherv4i64>, EVEX_V256, VEX_W;
+ vy256xmem, mgatherv4i64>, EVEX_V256, VEX_W;
defm NAME##D##SUFF##Z128: avx512_gather<dopc, OpcodeStr##"d", _.info128,
- vx32xmem, mgatherv4i32>, EVEX_V128, VEX_W;
+ vx128xmem, mgatherv4i32>, EVEX_V128, VEX_W;
defm NAME##Q##SUFF##Z128: avx512_gather<qopc, OpcodeStr##"q", _.info128,
- vx64xmem, mgatherv2i64>, EVEX_V128, VEX_W;
+ vx128xmem, mgatherv2i64>, EVEX_V128, VEX_W;
}
}
multiclass avx512_gather_d_ps<bits<8> dopc, bits<8> qopc,
AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
- defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512, vz32mem,
+ defm NAME##D##SUFF##Z: avx512_gather<dopc, OpcodeStr##"d", _.info512, vz512mem,
mgatherv16i32>, EVEX_V512;
- defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info256, vz64mem,
+ defm NAME##Q##SUFF##Z: avx512_gather<qopc, OpcodeStr##"q", _.info256, vz512mem,
mgatherv8i64>, EVEX_V512;
let Predicates = [HasVLX] in {
defm NAME##D##SUFF##Z256: avx512_gather<dopc, OpcodeStr##"d", _.info256,
- vy32xmem, mgatherv8i32>, EVEX_V256;
+ vy256xmem, mgatherv8i32>, EVEX_V256;
defm NAME##Q##SUFF##Z256: avx512_gather<qopc, OpcodeStr##"q", _.info128,
- vy64xmem, mgatherv4i64>, EVEX_V256;
+ vy128xmem, mgatherv4i64>, EVEX_V256;
defm NAME##D##SUFF##Z128: avx512_gather<dopc, OpcodeStr##"d", _.info128,
- vx32xmem, mgatherv4i32>, EVEX_V128;
+ vx128xmem, mgatherv4i32>, EVEX_V128;
defm NAME##Q##SUFF##Z128: avx512_gather<qopc, OpcodeStr##"q", _.info128,
vx64xmem, mgatherv2i64>, EVEX_V128;
}
@@ -6440,34 +6762,34 @@ let mayStore = 1, Constraints = "$mask = $mask_wb", ExeDomain = _.ExeDomain in
multiclass avx512_scatter_q_pd<bits<8> dopc, bits<8> qopc,
AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512,
- vy32xmem, mscatterv8i32>, EVEX_V512, VEX_W;
+ vy512mem, mscatterv8i32>, EVEX_V512, VEX_W;
defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info512,
- vz64mem, mscatterv8i64>, EVEX_V512, VEX_W;
+ vz512mem, mscatterv8i64>, EVEX_V512, VEX_W;
let Predicates = [HasVLX] in {
defm NAME##D##SUFF##Z256: avx512_scatter<dopc, OpcodeStr##"d", _.info256,
- vx32xmem, mscatterv4i32>, EVEX_V256, VEX_W;
+ vx256xmem, mscatterv4i32>, EVEX_V256, VEX_W;
defm NAME##Q##SUFF##Z256: avx512_scatter<qopc, OpcodeStr##"q", _.info256,
- vy64xmem, mscatterv4i64>, EVEX_V256, VEX_W;
+ vy256xmem, mscatterv4i64>, EVEX_V256, VEX_W;
defm NAME##D##SUFF##Z128: avx512_scatter<dopc, OpcodeStr##"d", _.info128,
- vx32xmem, mscatterv4i32>, EVEX_V128, VEX_W;
+ vx128xmem, mscatterv4i32>, EVEX_V128, VEX_W;
defm NAME##Q##SUFF##Z128: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
- vx64xmem, mscatterv2i64>, EVEX_V128, VEX_W;
+ vx128xmem, mscatterv2i64>, EVEX_V128, VEX_W;
}
}
multiclass avx512_scatter_d_ps<bits<8> dopc, bits<8> qopc,
AVX512VLVectorVTInfo _, string OpcodeStr, string SUFF> {
- defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512, vz32mem,
+ defm NAME##D##SUFF##Z: avx512_scatter<dopc, OpcodeStr##"d", _.info512, vz512mem,
mscatterv16i32>, EVEX_V512;
- defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info256, vz64mem,
+ defm NAME##Q##SUFF##Z: avx512_scatter<qopc, OpcodeStr##"q", _.info256, vz512mem,
mscatterv8i64>, EVEX_V512;
let Predicates = [HasVLX] in {
defm NAME##D##SUFF##Z256: avx512_scatter<dopc, OpcodeStr##"d", _.info256,
- vy32xmem, mscatterv8i32>, EVEX_V256;
+ vy256xmem, mscatterv8i32>, EVEX_V256;
defm NAME##Q##SUFF##Z256: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
- vy64xmem, mscatterv4i64>, EVEX_V256;
+ vy128xmem, mscatterv4i64>, EVEX_V256;
defm NAME##D##SUFF##Z128: avx512_scatter<dopc, OpcodeStr##"d", _.info128,
- vx32xmem, mscatterv4i32>, EVEX_V128;
+ vx128xmem, mscatterv4i32>, EVEX_V128;
defm NAME##Q##SUFF##Z128: avx512_scatter<qopc, OpcodeStr##"q", _.info128,
vx64xmem, mscatterv2i64>, EVEX_V128;
}
@@ -6489,79 +6811,57 @@ multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeSt
}
defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
- VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+ VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
- VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
- VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+ VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
- VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
- VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+ VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
- VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
- VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+ VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
- VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
- VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+ VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
- VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
- VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+ VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
- VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
- VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
+ VK16WM, vz512mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
- VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
- VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
+ VK8WM, vy512mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
- VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
+ VK8WM, vz512mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
// Helper fragments to match sext vXi1 to vXiY.
def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
-def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
-def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
-def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
-
-def : Pat<(store VK1:$src, addr:$dst),
- (MOV8mr addr:$dst,
- (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)),
- sub_8bit))>, Requires<[HasAVX512, NoDQI]>;
-
-def : Pat<(store VK8:$src, addr:$dst),
- (MOV8mr addr:$dst,
- (EXTRACT_SUBREG (KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
- sub_8bit))>, Requires<[HasAVX512, NoDQI]>;
-
-def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
- (truncstore node:$val, node:$ptr), [{
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
-}]>;
-
-def : Pat<(truncstorei1 GR8:$src, addr:$dst),
- (MOV8mr addr:$dst, GR8:$src)>;
-
multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > {
def rr : AVX512XS8I<opc, MRMSrcReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
!strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
@@ -6593,22 +6893,38 @@ multiclass avx512_convert_mask_to_vector<string OpcodeStr> {
defm VPMOVM2 : avx512_convert_mask_to_vector<"vpmovm2">;
multiclass convert_vector_to_mask_common<bits<8> opc, X86VectorVTInfo _, string OpcodeStr > {
-def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set _.KRC:$dst, (X86cvt2mask (_.VT _.RC:$src)))]>, EVEX;
+ def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set _.KRC:$dst, (X86cvt2mask (_.VT _.RC:$src)))]>, EVEX;
+}
+
+// Use 512bit version to implement 128/256 bit in case NoVLX.
+multiclass convert_vector_to_mask_lowering<X86VectorVTInfo ExtendInfo,
+ X86VectorVTInfo _> {
+
+ def : Pat<(_.KVT (X86cvt2mask (_.VT _.RC:$src))),
+ (_.KVT (COPY_TO_REGCLASS
+ (!cast<Instruction>(NAME#"Zrr")
+ (INSERT_SUBREG (ExtendInfo.VT (IMPLICIT_DEF)),
+ _.RC:$src, _.SubRegIdx)),
+ _.KRC))>;
}
multiclass avx512_convert_vector_to_mask<bits<8> opc, string OpcodeStr,
- AVX512VLVectorVTInfo VTInfo, Predicate prd> {
-let Predicates = [prd] in
- defm Z : convert_vector_to_mask_common <opc, VTInfo.info512, OpcodeStr>,
- EVEX_V512;
+ AVX512VLVectorVTInfo VTInfo, Predicate prd> {
+ let Predicates = [prd] in
+ defm Z : convert_vector_to_mask_common <opc, VTInfo.info512, OpcodeStr>,
+ EVEX_V512;
let Predicates = [prd, HasVLX] in {
defm Z256 : convert_vector_to_mask_common<opc, VTInfo.info256, OpcodeStr>,
- EVEX_V256;
+ EVEX_V256;
defm Z128 : convert_vector_to_mask_common<opc, VTInfo.info128, OpcodeStr>,
- EVEX_V128;
+ EVEX_V128;
+ }
+ let Predicates = [prd, NoVLX] in {
+ defm Z256_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info256>;
+ defm Z128_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info128>;
}
}
@@ -6631,7 +6947,7 @@ multiclass compress_by_vec_width<bits<8> opc, X86VectorVTInfo _,
(ins _.RC:$src1), OpcodeStr, "$src1", "$src1",
(_.VT (X86compress _.RC:$src1))>, AVX5128IBase;
- let mayStore = 1 in {
+ let mayStore = 1, hasSideEffects = 0 in
def mr : AVX5128I<opc, MRMDestMem, (outs),
(ins _.MemOp:$dst, _.RC:$src),
OpcodeStr # "\t{$src, $dst|$dst, $src}",
@@ -6644,7 +6960,6 @@ multiclass compress_by_vec_width<bits<8> opc, X86VectorVTInfo _,
(_.VT (X86compress _.RC:$src)), _.ImmAllZerosV)),
addr:$dst)]>,
EVEX_K, EVEX_CD8<_.EltSize, CD8VT1>;
- }
}
multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
@@ -6673,7 +6988,6 @@ multiclass expand_by_vec_width<bits<8> opc, X86VectorVTInfo _,
(ins _.RC:$src1), OpcodeStr, "$src1", "$src1",
(_.VT (X86expand _.RC:$src1))>, AVX5128IBase;
- let mayLoad = 1 in
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.MemOp:$src1), OpcodeStr, "$src1", "$src1",
(_.VT (X86expand (_.VT (bitconvert
@@ -6708,25 +7022,23 @@ multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNo
X86VectorVTInfo _>{
defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix, "$src2, $src1", "$src2, $src2",
- (OpNode (_.VT _.RC:$src1),
- (i32 imm:$src2),
- (i32 FROUND_CURRENT))>;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src1, i32u8imm:$src2),
OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2",
- (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (OpNode (_.VT _.RC:$src1),
(i32 imm:$src2),
(i32 FROUND_CURRENT))>;
- defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
- OpcodeStr##_.Suffix, "$src2, ${src1}"##_.BroadcastStr,
- "${src1}"##_.BroadcastStr##", $src2",
- (OpNode (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src1))),
- (i32 imm:$src2),
- (i32 FROUND_CURRENT))>, EVEX_B;
- }
+ defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2",
+ (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (i32 imm:$src2),
+ (i32 FROUND_CURRENT))>;
+ defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src1, i32u8imm:$src2),
+ OpcodeStr##_.Suffix, "$src2, ${src1}"##_.BroadcastStr,
+ "${src1}"##_.BroadcastStr##", $src2",
+ (OpNode (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src1))),
+ (i32 imm:$src2),
+ (i32 FROUND_CURRENT))>, EVEX_B;
}
//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae}
@@ -6769,23 +7081,21 @@ multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT _.RC:$src2),
(i32 imm:$src3),
(i32 FROUND_CURRENT))>;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
- OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (bitconvert (_.LdFrag addr:$src2))),
- (i32 imm:$src3),
- (i32 FROUND_CURRENT))>;
- defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
- OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr##", $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
- (i32 imm:$src3),
- (i32 FROUND_CURRENT))>, EVEX_B;
- }
+ defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
+ OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (bitconvert (_.LdFrag addr:$src2))),
+ (i32 imm:$src3),
+ (i32 FROUND_CURRENT))>;
+ defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
+ OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr##", $src3",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
+ (i32 imm:$src3),
+ (i32 FROUND_CURRENT))>, EVEX_B;
}
//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
@@ -6799,14 +7109,13 @@ multiclass avx512_3Op_rm_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
(DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
(SrcInfo.VT SrcInfo.RC:$src2),
(i8 imm:$src3)))>;
- let mayLoad = 1 in
- defm rmi : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
- (ins SrcInfo.RC:$src1, SrcInfo.MemOp:$src2, u8imm:$src3),
- OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
- (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
- (SrcInfo.VT (bitconvert
- (SrcInfo.LdFrag addr:$src2))),
- (i8 imm:$src3)))>;
+ defm rmi : AVX512_maskable<opc, MRMSrcMem, DestInfo, (outs DestInfo.RC:$dst),
+ (ins SrcInfo.RC:$src1, SrcInfo.MemOp:$src2, u8imm:$src3),
+ OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (DestInfo.VT (OpNode (SrcInfo.VT SrcInfo.RC:$src1),
+ (SrcInfo.VT (bitconvert
+ (SrcInfo.LdFrag addr:$src2))),
+ (i8 imm:$src3)))>;
}
//handle instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
@@ -6816,14 +7125,13 @@ multiclass avx512_3Op_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _>:
avx512_3Op_rm_imm8<opc, OpcodeStr, OpNode, _, _>{
- let mayLoad = 1 in
- defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
- OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
- "$src1, ${src2}"##_.BroadcastStr##", $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
- (i8 imm:$src3))>, EVEX_B;
+ defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
+ OpcodeStr, "$src3, ${src2}"##_.BroadcastStr##", $src1",
+ "$src1, ${src2}"##_.BroadcastStr##", $src3",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src2))),
+ (i8 imm:$src3))>, EVEX_B;
}
//handle scalar instruction reg_vec1 = op(reg_vec2,reg_vec3,imm)
@@ -6839,22 +7147,20 @@ multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(_.VT _.RC:$src2),
(i32 imm:$src3),
(i32 FROUND_CURRENT))>;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
- OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT (scalar_to_vector
- (_.ScalarLdFrag addr:$src2))),
- (i32 imm:$src3),
- (i32 FROUND_CURRENT))>;
+ defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src1, _.MemOp:$src2, i32u8imm:$src3),
+ OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT (scalar_to_vector
+ (_.ScalarLdFrag addr:$src2))),
+ (i32 imm:$src3),
+ (i32 FROUND_CURRENT))>;
- let isAsmParserOnly = 1 in {
- defm rmi_alt :AVX512_maskable_in_asm<opc, MRMSrcMem, _, (outs _.FRC:$dst),
- (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
- OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
- []>;
- }
+ let isAsmParserOnly = 1, mayLoad = 1, hasSideEffects = 0 in {
+ defm rmi_alt :AVX512_maskable_in_asm<opc, MRMSrcMem, _, (outs _.FRC:$dst),
+ (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
+ OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
+ []>;
}
}
@@ -6940,19 +7246,6 @@ multiclass avx512_common_unary_fp_sae_packed_imm_all<string OpcodeStr,
opcPd, OpNode, prd>, EVEX_CD8<64, CD8VF>, VEX_W;
}
-defm VFIXUPIMMPD : avx512_common_fp_sae_packed_imm<"vfixupimmpd",
- avx512vl_f64_info, 0x54, X86VFixupimm, HasAVX512>,
- AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
-defm VFIXUPIMMPS : avx512_common_fp_sae_packed_imm<"vfixupimmps",
- avx512vl_f32_info, 0x54, X86VFixupimm, HasAVX512>,
- AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
-
-defm VFIXUPIMMSD: avx512_common_fp_sae_scalar_imm<"vfixupimmsd", f64x_info,
- 0x55, X86VFixupimm, HasAVX512>,
- AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
-defm VFIXUPIMMSS: avx512_common_fp_sae_scalar_imm<"vfixupimmss", f32x_info,
- 0x55, X86VFixupimm, HasAVX512>,
- AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
defm VREDUCE : avx512_common_unary_fp_sae_packed_imm_all<"vreduce", 0x56, 0x56,
X86VReduce, HasDQI>, AVX512AIi8Base, EVEX;
@@ -7043,7 +7336,7 @@ defm VALIGND: avx512_valign<"valignd", avx512vl_i32_info>,
defm VALIGNQ: avx512_valign<"valignq", avx512vl_i64_info>,
EVEX_CD8<64, CD8VF>, VEX_W;
-multiclass avx512_vpalign_lowering<X86VectorVTInfo _ , list<Predicate> p>{
+multiclass avx512_vpalignr_lowering<X86VectorVTInfo _ , list<Predicate> p>{
let Predicates = p in
def NAME#_.VTName#rri:
Pat<(_.VT (X86PAlignr _.RC:$src1, _.RC:$src2, (i8 imm:$imm))),
@@ -7051,18 +7344,18 @@ multiclass avx512_vpalign_lowering<X86VectorVTInfo _ , list<Predicate> p>{
_.RC:$src1, _.RC:$src2, imm:$imm)>;
}
-multiclass avx512_vpalign_lowering_common<AVX512VLVectorVTInfo _>:
- avx512_vpalign_lowering<_.info512, [HasBWI]>,
- avx512_vpalign_lowering<_.info128, [HasBWI, HasVLX]>,
- avx512_vpalign_lowering<_.info256, [HasBWI, HasVLX]>;
+multiclass avx512_vpalignr_lowering_common<AVX512VLVectorVTInfo _>:
+ avx512_vpalignr_lowering<_.info512, [HasBWI]>,
+ avx512_vpalignr_lowering<_.info128, [HasBWI, HasVLX]>,
+ avx512_vpalignr_lowering<_.info256, [HasBWI, HasVLX]>;
-defm VPALIGN: avx512_common_3Op_rm_imm8<0x0F, X86PAlignr, "vpalignr" ,
+defm VPALIGNR: avx512_common_3Op_rm_imm8<0x0F, X86PAlignr, "vpalignr" ,
avx512vl_i8_info, avx512vl_i8_info>,
- avx512_vpalign_lowering_common<avx512vl_i16_info>,
- avx512_vpalign_lowering_common<avx512vl_i32_info>,
- avx512_vpalign_lowering_common<avx512vl_f32_info>,
- avx512_vpalign_lowering_common<avx512vl_i64_info>,
- avx512_vpalign_lowering_common<avx512vl_f64_info>,
+ avx512_vpalignr_lowering_common<avx512vl_i16_info>,
+ avx512_vpalignr_lowering_common<avx512vl_i32_info>,
+ avx512_vpalignr_lowering_common<avx512vl_f32_info>,
+ avx512_vpalignr_lowering_common<avx512vl_i64_info>,
+ avx512_vpalignr_lowering_common<avx512vl_f64_info>,
EVEX_CD8<8, CD8VF>;
defm VDBPSADBW: avx512_common_3Op_rm_imm8<0x42, X86dbpsadbw, "vdbpsadbw" ,
@@ -7075,25 +7368,23 @@ multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
"$src1", "$src1",
(_.VT (OpNode _.RC:$src1))>, EVEX, AVX5128IBase;
- let mayLoad = 1 in
- defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.MemOp:$src1), OpcodeStr,
- "$src1", "$src1",
- (_.VT (OpNode (bitconvert (_.LdFrag addr:$src1))))>,
- EVEX, AVX5128IBase, EVEX_CD8<_.EltSize, CD8VF>;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.MemOp:$src1), OpcodeStr,
+ "$src1", "$src1",
+ (_.VT (OpNode (bitconvert (_.LdFrag addr:$src1))))>,
+ EVEX, AVX5128IBase, EVEX_CD8<_.EltSize, CD8VF>;
}
multiclass avx512_unary_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> :
avx512_unary_rm<opc, OpcodeStr, OpNode, _> {
- let mayLoad = 1 in
- defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src1), OpcodeStr,
- "${src1}"##_.BroadcastStr,
- "${src1}"##_.BroadcastStr,
- (_.VT (OpNode (X86VBroadcast
- (_.ScalarLdFrag addr:$src1))))>,
- EVEX, AVX5128IBase, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
+ defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src1), OpcodeStr,
+ "${src1}"##_.BroadcastStr,
+ "${src1}"##_.BroadcastStr,
+ (_.VT (OpNode (X86VBroadcast
+ (_.ScalarLdFrag addr:$src1))))>,
+ EVEX, AVX5128IBase, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>;
}
multiclass avx512_unary_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -7185,12 +7476,11 @@ multiclass avx512_movddup_128<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src), OpcodeStr, "$src", "$src",
(_.VT (OpNode (_.VT _.RC:$src)))>, EVEX;
- let mayLoad = 1 in
- defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
- (_.VT (OpNode (_.VT (scalar_to_vector
- (_.ScalarLdFrag addr:$src)))))>,
- EVEX, EVEX_CD8<_.EltSize, CD8VH>;
+ defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.ScalarMemOp:$src), OpcodeStr, "$src", "$src",
+ (_.VT (OpNode (_.VT (scalar_to_vector
+ (_.ScalarLdFrag addr:$src)))))>,
+ EVEX, EVEX_CD8<_.EltSize, CD8VH>;
}
multiclass avx512_movddup_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -7221,8 +7511,8 @@ def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
//===----------------------------------------------------------------------===//
// AVX-512 - Unpack Instructions
//===----------------------------------------------------------------------===//
-defm VUNPCKH : avx512_fp_binop_p<0x15, "vunpckh", X86Unpckh>;
-defm VUNPCKL : avx512_fp_binop_p<0x14, "vunpckl", X86Unpckl>;
+defm VUNPCKH : avx512_fp_binop_p<0x15, "vunpckh", X86Unpckh, HasAVX512>;
+defm VUNPCKL : avx512_fp_binop_p<0x14, "vunpckl", X86Unpckl, HasAVX512>;
defm VPUNPCKLBW : avx512_binop_rm_vl_b<0x60, "vpunpcklbw", X86Unpckl,
SSE_INTALU_ITINS_P, HasBWI>;
@@ -7248,14 +7538,13 @@ defm VPUNPCKHQDQ : avx512_binop_rm_vl_q<0x6D, "vpunpckhqdq", X86Unpckh,
multiclass avx512_extract_elt_bw_m<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _> {
- let mayStore = 1 in
- def mr : AVX512Ii8<opc, MRMDestMem, (outs),
- (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
- OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(store (_.EltVT (trunc (assertzext (OpNode (_.VT _.RC:$src1),
- imm:$src2)))),
- addr:$dst)]>,
- EVEX, EVEX_CD8<_.EltSize, CD8VT1>;
+ def mr : AVX512Ii8<opc, MRMDestMem, (outs),
+ (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (_.EltVT (trunc (assertzext (OpNode (_.VT _.RC:$src1),
+ imm:$src2)))),
+ addr:$dst)]>,
+ EVEX, EVEX_CD8<_.EltSize, CD8VT1>;
}
multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> {
@@ -7280,6 +7569,7 @@ multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
(X86pextrw (_.VT _.RC:$src1), imm:$src2))]>,
EVEX, PD;
+ let hasSideEffects = 0 in
def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
(ins _.RC:$src1, u8imm:$src2),
OpcodeStr#".s\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
@@ -7299,13 +7589,12 @@ multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _,
(extractelt (_.VT _.RC:$src1), imm:$src2))]>,
EVEX, TAPD;
- let mayStore = 1 in
- def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
- (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
- OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(store (extractelt (_.VT _.RC:$src1),
- imm:$src2),addr:$dst)]>,
- EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD;
+ def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
+ (ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
+ OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(store (extractelt (_.VT _.RC:$src1),
+ imm:$src2),addr:$dst)]>,
+ EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD;
}
}
@@ -7380,33 +7669,33 @@ multiclass avx512_shift_packed<bits<8> opc, SDNode OpNode, Format MRMr,
(outs _.RC:$dst), (ins _.RC:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst,(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>;
- let mayLoad = 1 in
- def rm : AVX512<opc, MRMm,
- (outs _.RC:$dst), (ins _.MemOp:$src1, u8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set _.RC:$dst,(_.VT (OpNode
- (_.LdFrag addr:$src1), (i8 imm:$src2))))]>;
+ def rm : AVX512<opc, MRMm,
+ (outs _.RC:$dst), (ins _.MemOp:$src1, u8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set _.RC:$dst,(_.VT (OpNode
+ (_.VT (bitconvert (_.LdFrag addr:$src1))),
+ (i8 imm:$src2))))]>;
}
-multiclass avx512_shift_packed_all<bits<8> opc, SDNode OpNode, Format MRMr,
+multiclass avx512_shift_packed_all<bits<8> opc, SDNode OpNode, Format MRMr,
Format MRMm, string OpcodeStr, Predicate prd>{
let Predicates = [prd] in
- defm Z512 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
- OpcodeStr, v8i64_info>, EVEX_V512;
+ defm Z512 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
+ OpcodeStr, v64i8_info>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
- defm Z256 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
- OpcodeStr, v4i64x_info>, EVEX_V256;
- defm Z128 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
- OpcodeStr, v2i64x_info>, EVEX_V128;
+ defm Z256 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
+ OpcodeStr, v32i8x_info>, EVEX_V256;
+ defm Z128 : avx512_shift_packed<opc, OpNode, MRMr, MRMm,
+ OpcodeStr, v16i8x_info>, EVEX_V128;
}
}
-defm VPSLLDQ : avx512_shift_packed_all<0x73, X86vshldq, MRM7r, MRM7m, "vpslldq",
+defm VPSLLDQ : avx512_shift_packed_all<0x73, X86vshldq, MRM7r, MRM7m, "vpslldq",
HasBWI>, AVX512PDIi8Base, EVEX_4V;
-defm VPSRLDQ : avx512_shift_packed_all<0x73, X86vshrdq, MRM3r, MRM3m, "vpsrldq",
+defm VPSRLDQ : avx512_shift_packed_all<0x73, X86vshrdq, MRM3r, MRM3m, "vpsrldq",
HasBWI>, AVX512PDIi8Base, EVEX_4V;
-multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
+multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
string OpcodeStr, X86VectorVTInfo _dst,
X86VectorVTInfo _src>{
def rr : AVX512BI<opc, MRMSrcReg,
@@ -7415,17 +7704,16 @@ multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
[(set _dst.RC:$dst,(_dst.VT
(OpNode (_src.VT _src.RC:$src1),
(_src.VT _src.RC:$src2))))]>;
- let mayLoad = 1 in
- def rm : AVX512BI<opc, MRMSrcMem,
- (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.MemOp:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set _dst.RC:$dst,(_dst.VT
- (OpNode (_src.VT _src.RC:$src1),
- (_src.VT (bitconvert
- (_src.LdFrag addr:$src2))))))]>;
+ def rm : AVX512BI<opc, MRMSrcMem,
+ (outs _dst.RC:$dst), (ins _src.RC:$src1, _src.MemOp:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set _dst.RC:$dst,(_dst.VT
+ (OpNode (_src.VT _src.RC:$src1),
+ (_src.VT (bitconvert
+ (_src.LdFrag addr:$src2))))))]>;
}
-multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
+multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
string OpcodeStr, Predicate prd> {
let Predicates = [prd] in
defm Z512 : avx512_psadbw_packed<opc, OpNode, OpcodeStr, v8i64_info,
@@ -7438,7 +7726,7 @@ multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
}
}
-defm VPSADBW : avx512_psadbw_packed_all<0xf6, X86psadbw, "vpsadbw",
+defm VPSADBW : avx512_psadbw_packed_all<0xf6, X86psadbw, "vpsadbw",
HasBWI>, EVEX_4V;
multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -7446,30 +7734,28 @@ multiclass avx512_ternlog<bits<8> opc, string OpcodeStr, SDNode OpNode,
let Constraints = "$src1 = $dst" in {
defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.RC:$src3, u8imm:$src4),
- OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src3",
+ OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
(OpNode (_.VT _.RC:$src1),
(_.VT _.RC:$src2),
(_.VT _.RC:$src3),
(i8 imm:$src4))>, AVX512AIi8Base, EVEX_4V;
- let mayLoad = 1 in {
- defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.MemOp:$src3, u8imm:$src4),
- OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src3",
- (OpNode (_.VT _.RC:$src1),
- (_.VT _.RC:$src2),
- (_.VT (bitconvert (_.LdFrag addr:$src3))),
- (i8 imm:$src4))>,
- AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
- defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
- (ins _.RC:$src2, _.ScalarMemOp:$src3, u8imm:$src4),
- OpcodeStr, "$src4, ${src3}"##_.BroadcastStr##", $src2",
- "$src2, ${src3}"##_.BroadcastStr##", $src4",
- (OpNode (_.VT _.RC:$src1),
- (_.VT _.RC:$src2),
- (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
- (i8 imm:$src4))>, EVEX_B,
- AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
- }
+ defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3, u8imm:$src4),
+ OpcodeStr, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.VT (bitconvert (_.LdFrag addr:$src3))),
+ (i8 imm:$src4))>,
+ AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
+ defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3, u8imm:$src4),
+ OpcodeStr, "$src4, ${src3}"##_.BroadcastStr##", $src2",
+ "$src2, ${src3}"##_.BroadcastStr##", $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.VT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
+ (i8 imm:$src4))>, EVEX_B,
+ AVX512AIi8Base, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>;
}// Constraints = "$src1 = $dst"
}
@@ -7485,3 +7771,109 @@ multiclass avx512_common_ternlog<string OpcodeStr, AVX512VLVectorVTInfo _>{
defm VPTERNLOGD : avx512_common_ternlog<"vpternlogd", avx512vl_i32_info>;
defm VPTERNLOGQ : avx512_common_ternlog<"vpternlogq", avx512vl_i64_info>, VEX_W;
+//===----------------------------------------------------------------------===//
+// AVX-512 - FixupImm
+//===----------------------------------------------------------------------===//
+
+multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _>{
+ let Constraints = "$src1 = $dst" in {
+ defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.IntVT _.RC:$src3),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>;
+ defm rmi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.IntVT (bitconvert (_.LdFrag addr:$src3))),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>;
+ defm rmbi : AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, ${src3}"##_.BroadcastStr##", $src2",
+ "$src2, ${src3}"##_.BroadcastStr##", $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.IntVT (X86VBroadcast(_.ScalarLdFrag addr:$src3))),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>, EVEX_B;
+ } // Constraints = "$src1 = $dst"
+}
+
+multiclass avx512_fixupimm_packed_sae<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, X86VectorVTInfo _>{
+let Constraints = "$src1 = $dst" in {
+ defm rrib : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2",
+ "$src2, $src3, {sae}, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_.IntVT _.RC:$src3),
+ (i32 imm:$src4),
+ (i32 FROUND_NO_EXC))>, EVEX_B;
+ }
+}
+
+multiclass avx512_fixupimm_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ X86VectorVTInfo _, X86VectorVTInfo _src3VT> {
+ let Constraints = "$src1 = $dst" , Predicates = [HasAVX512] in {
+ defm rri : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_src3VT.VT _src3VT.RC:$src3),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>;
+
+ defm rrib : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2",
+ "$src2, $src3, {sae}, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_src3VT.VT _src3VT.RC:$src3),
+ (i32 imm:$src4),
+ (i32 FROUND_NO_EXC))>, EVEX_B;
+ defm rmi : AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3, i32u8imm:$src4),
+ OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4",
+ (OpNode (_.VT _.RC:$src1),
+ (_.VT _.RC:$src2),
+ (_src3VT.VT (scalar_to_vector
+ (_src3VT.ScalarLdFrag addr:$src3))),
+ (i32 imm:$src4),
+ (i32 FROUND_CURRENT))>;
+ }
+}
+
+multiclass avx512_fixupimm_packed_all<AVX512VLVectorVTInfo _Vec>{
+ let Predicates = [HasAVX512] in
+ defm Z : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info512>,
+ avx512_fixupimm_packed_sae<0x54, "vfixupimm", X86VFixupimm, _Vec.info512>,
+ AVX512AIi8Base, EVEX_4V, EVEX_V512;
+ let Predicates = [HasAVX512, HasVLX] in {
+ defm Z128 : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info128>,
+ AVX512AIi8Base, EVEX_4V, EVEX_V128;
+ defm Z256 : avx512_fixupimm_packed<0x54, "vfixupimm", X86VFixupimm, _Vec.info256>,
+ AVX512AIi8Base, EVEX_4V, EVEX_V256;
+ }
+}
+
+defm VFIXUPIMMSS : avx512_fixupimm_scalar<0x55, "vfixupimm", X86VFixupimmScalar,
+ f32x_info, v4i32x_info>,
+ AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<32, CD8VT1>;
+defm VFIXUPIMMSD : avx512_fixupimm_scalar<0x55, "vfixupimm", X86VFixupimmScalar,
+ f64x_info, v2i64x_info>,
+ AVX512AIi8Base, VEX_LIG, EVEX_4V, EVEX_CD8<64, CD8VT1>, VEX_W;
+defm VFIXUPIMMPS : avx512_fixupimm_packed_all<avx512vl_f32_info>,
+ EVEX_CD8<32, CD8VF>;
+defm VFIXUPIMMPD : avx512_fixupimm_packed_all<avx512vl_f64_info>,
+ EVEX_CD8<64, CD8VF>, VEX_W;
diff --git a/lib/Target/X86/X86InstrBuilder.h b/lib/Target/X86/X86InstrBuilder.h
index 787f15bc628e..bcea6fa80350 100644
--- a/lib/Target/X86/X86InstrBuilder.h
+++ b/lib/Target/X86/X86InstrBuilder.h
@@ -83,6 +83,34 @@ struct X86AddressMode {
}
};
+/// Compute the addressing mode from an machine instruction starting with the
+/// given operand.
+static inline X86AddressMode getAddressFromInstr(MachineInstr *MI,
+ unsigned Operand) {
+ X86AddressMode AM;
+ MachineOperand &Op = MI->getOperand(Operand);
+ if (Op.isReg()) {
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = Op.getReg();
+ } else {
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = Op.getIndex();
+ }
+ Op = MI->getOperand(Operand + 1);
+ if (Op.isImm())
+ AM.Scale = Op.getImm();
+ Op = MI->getOperand(Operand + 2);
+ if (Op.isImm())
+ AM.IndexReg = Op.getImm();
+ Op = MI->getOperand(Operand + 3);
+ if (Op.isGlobal()) {
+ AM.GV = Op.getGlobal();
+ } else {
+ AM.Disp = Op.getImm();
+ }
+ return AM;
+}
+
/// addDirectMem - This function is used to add a direct memory reference to the
/// current instruction -- that is, a dereference of an address in a register,
/// with no scale, index or displacement. An example is: DWORD PTR [EAX].
@@ -151,7 +179,7 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
MachineFunction &MF = *MI->getParent()->getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
const MCInstrDesc &MCID = MI->getDesc();
- unsigned Flags = 0;
+ auto Flags = MachineMemOperand::MONone;
if (MCID.mayLoad())
Flags |= MachineMemOperand::MOLoad;
if (MCID.mayStore())
diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td
index c709c8aca9fa..925f4efb5aa9 100644
--- a/lib/Target/X86/X86InstrCompiler.td
+++ b/lib/Target/X86/X86InstrCompiler.td
@@ -99,18 +99,6 @@ def VAARG_64 : I<0, Pseudo,
(X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
(implicit EFLAGS)]>;
-// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
-// targets. These calls are needed to probe the stack when allocating more than
-// 4k bytes in one go. Touching the stack at 4K increments is necessary to
-// ensure that the guard pages used by the OS virtual memory manager are
-// allocated in correct sequence.
-// The main point of having separate instruction are extra unmodelled effects
-// (compared to ordinary calls) like stack pointer change.
-
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
- def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
- "# dynamic stack allocation",
- [(X86WinAlloca)]>;
// When using segmented stacks these are lowered into instructions which first
// check if the current stacklet has enough free memory. If it does, memory is
@@ -132,6 +120,27 @@ def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
Requires<[In64BitMode]>;
}
+// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
+// targets. These calls are needed to probe the stack when allocating more than
+// 4k bytes in one go. Touching the stack at 4K increments is necessary to
+// ensure that the guard pages used by the OS virtual memory manager are
+// allocated in correct sequence.
+// The main point of having separate instruction are extra unmodelled effects
+// (compared to ordinary calls) like stack pointer change.
+
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
+def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
+ "# dynamic stack allocation",
+ [(X86WinAlloca GR32:$size)]>,
+ Requires<[NotLP64]>;
+
+let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
+def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
+ "# dynamic stack allocation",
+ [(X86WinAlloca GR64:$size)]>,
+ Requires<[In64BitMode]>;
+
+
//===----------------------------------------------------------------------===//
// EH Pseudo Instructions
//
@@ -250,7 +259,7 @@ def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
// Alias instruction mapping movr0 to xor.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
- isPseudo = 1 in
+ isPseudo = 1, AddedComplexity = 20 in
def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
[(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
@@ -263,7 +272,7 @@ def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)> {
}
let Predicates = [OptForSize, NotSlowIncDec, Not64BitMode],
- AddedComplexity = 1 in {
+ AddedComplexity = 15 in {
// Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
// which only require 3 bytes compared to MOV32ri which requires 5.
let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
@@ -278,6 +287,17 @@ let Predicates = [OptForSize, NotSlowIncDec, Not64BitMode],
def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
}
+let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 10 in {
+// AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
+// FIXME: Add itinerary class and Schedule.
+def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
+ [(set GR32:$dst, i32immSExt8:$src)]>,
+ Requires<[OptForMinSize, NotWin64WithoutFP]>;
+def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
+ [(set GR64:$dst, i64immSExt8:$src)]>,
+ Requires<[OptForMinSize, NotWin64WithoutFP]>;
+}
+
// Materialize i64 constant where top 32-bits are zero. This could theoretically
// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
// that would make it more difficult to rematerialize.
@@ -479,10 +499,13 @@ def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
[(X86TLSCall addr:$sym)]>,
Requires<[Not64BitMode]>;
-// For x86_64, the address of the thunk is passed in %rdi, on return
-// the address of the variable is in %rax. All other registers are preserved.
+// For x86_64, the address of the thunk is passed in %rdi, but the
+// pseudo directly use the symbol, so do not add an implicit use of
+// %rdi. The lowering will do the right thing with RDI.
+// On return the address of the variable is in %rax. All other
+// registers are preserved.
let Defs = [RAX, EFLAGS],
- Uses = [RSP, RDI],
+ Uses = [RSP],
usesCustomInserter = 1 in
def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
"# TLSCall_64",
@@ -568,7 +591,7 @@ def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
// ImmOpc8 corresponds to the mi8 version of the instruction
// ImmMod corresponds to the instruction format of the mi and mi8 versions
multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
- Format ImmMod, string mnemonic> {
+ Format ImmMod, SDPatternOperator Op, string mnemonic> {
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
SchedRW = [WriteALULd, WriteRMW] in {
@@ -577,106 +600,124 @@ def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
!strconcat(mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, GR8:$src2))],
+ IIC_ALU_NONMEM>, LOCK;
+
def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, OpSize16, LOCK;
+ [(set EFLAGS, (Op addr:$dst, GR16:$src2))],
+ IIC_ALU_NONMEM>, OpSize16, LOCK;
+
def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, OpSize32, LOCK;
+ [(set EFLAGS, (Op addr:$dst, GR32:$src2))],
+ IIC_ALU_NONMEM>, OpSize32, LOCK;
+
def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_NONMEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, GR64:$src2))],
+ IIC_ALU_NONMEM>, LOCK;
def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
!strconcat(mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))],
+ IIC_ALU_MEM>, LOCK;
def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize16, LOCK;
+ [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))],
+ IIC_ALU_MEM>, OpSize16, LOCK;
def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize32, LOCK;
+ [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))],
+ IIC_ALU_MEM>, OpSize32, LOCK;
def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))],
+ IIC_ALU_MEM>, LOCK;
def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
!strconcat(mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize16, LOCK;
+ [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))],
+ IIC_ALU_MEM>, OpSize16, LOCK;
+
def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
!strconcat(mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, OpSize32, LOCK;
+ [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))],
+ IIC_ALU_MEM>, OpSize32, LOCK;
+
def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
!strconcat(mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- [], IIC_ALU_MEM>, LOCK;
+ [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))],
+ IIC_ALU_MEM>, LOCK;
}
}
-defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
-defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
-defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
-defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
-defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
+defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
+defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
+defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
+defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
+defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
-// Optimized codegen when the non-memory output is not used.
multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
- string mnemonic> {
+ int Increment, string mnemonic> {
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
- SchedRW = [WriteALULd, WriteRMW] in {
-
+ SchedRW = [WriteALULd, WriteRMW], Predicates = [NotSlowIncDec] in {
def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
!strconcat(mnemonic, "{b}\t$dst"),
- [], IIC_UNARY_MEM>, LOCK;
+ [(set EFLAGS, (X86lock_add addr:$dst, (i8 Increment)))],
+ IIC_UNARY_MEM>, LOCK;
def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
!strconcat(mnemonic, "{w}\t$dst"),
- [], IIC_UNARY_MEM>, OpSize16, LOCK;
+ [(set EFLAGS, (X86lock_add addr:$dst, (i16 Increment)))],
+ IIC_UNARY_MEM>, OpSize16, LOCK;
def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
!strconcat(mnemonic, "{l}\t$dst"),
- [], IIC_UNARY_MEM>, OpSize32, LOCK;
+ [(set EFLAGS, (X86lock_add addr:$dst, (i32 Increment)))],
+ IIC_UNARY_MEM>, OpSize32, LOCK;
def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
!strconcat(mnemonic, "{q}\t$dst"),
- [], IIC_UNARY_MEM>, LOCK;
+ [(set EFLAGS, (X86lock_add addr:$dst, (i64 Increment)))],
+ IIC_UNARY_MEM>, LOCK;
}
}
-defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">;
-defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">;
+defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, 1, "inc">;
+defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, -1, "dec">;
// Atomic compare and swap.
multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
@@ -719,6 +760,38 @@ defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
IIC_CMPX_LOCK_8B>;
}
+// This pseudo must be used when the frame uses RBX as
+// the base pointer. Indeed, in such situation RBX is a reserved
+// register and the register allocator will ignore any use/def of
+// it. In other words, the register will not fix the clobbering of
+// RBX that will happen when setting the arguments for the instrucion.
+//
+// Unlike the actual related instuction, we mark that this one
+// defines EBX (instead of using EBX).
+// The rationale is that we will define RBX during the expansion of
+// the pseudo. The argument feeding EBX is ebx_input.
+//
+// The additional argument, $ebx_save, is a temporary register used to
+// save the value of RBX accross the actual instruction.
+//
+// To make sure the register assigned to $ebx_save does not interfere with
+// the definition of the actual instruction, we use a definition $dst which
+// is tied to $rbx_save. That way, the live-range of $rbx_save spans accross
+// the instruction and we are sure we will have a valid register to restore
+// the value of RBX.
+let Defs = [EAX, EDX, EBX, EFLAGS], Uses = [EAX, ECX, EDX],
+ SchedRW = [WriteALULd, WriteRMW], isCodeGenOnly = 1, isPseudo = 1,
+ Constraints = "$ebx_save = $dst", usesCustomInserter = 1 in {
+def LCMPXCHG8B_SAVE_EBX :
+ I<0, Pseudo, (outs GR32:$dst),
+ (ins i64mem:$ptr, GR32:$ebx_input, GR32:$ebx_save),
+ !strconcat("cmpxchg8b", "\t$ptr"),
+ [(set GR32:$dst, (X86cas8save_ebx addr:$ptr, GR32:$ebx_input,
+ GR32:$ebx_save))],
+ IIC_CMPX_LOCK_8B>;
+}
+
+
let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
@@ -726,6 +799,20 @@ defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
IIC_CMPX_LOCK_16B>, REX_W;
}
+// Same as LCMPXCHG8B_SAVE_RBX but for the 16 Bytes variant.
+let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
+ Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW],
+ isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst",
+ usesCustomInserter = 1 in {
+def LCMPXCHG16B_SAVE_RBX :
+ I<0, Pseudo, (outs GR64:$dst),
+ (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save),
+ !strconcat("cmpxchg16b", "\t$ptr"),
+ [(set GR64:$dst, (X86cas16save_rbx addr:$ptr, GR64:$rbx_input,
+ GR64:$rbx_save))],
+ IIC_CMPX_LOCK_16B>;
+}
+
defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
@@ -926,6 +1013,18 @@ def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
// DAG Pattern Matching Rules
//===----------------------------------------------------------------------===//
+// Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
+// binary size compared to a regular MOV, but it introduces an unnecessary
+// load, so is not suitable for regular or optsize functions.
+let Predicates = [OptForMinSize] in {
+def : Pat<(store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
+def : Pat<(store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
+def : Pat<(store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
+def : Pat<(store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
+def : Pat<(store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
+def : Pat<(store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
+}
+
// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
@@ -994,22 +1093,22 @@ def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
// for MOV64mi32 should handle this sort of thing.
def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tconstpool:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tjumptable:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tglobaladdr:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, texternalsym:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, mcsym:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tblockaddress:$src)>,
- Requires<[NearData, IsStatic]>;
+ Requires<[NearData, IsNotPIC]>;
def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
@@ -1139,12 +1238,13 @@ defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
// zextload bool -> zextload byte
-def : Pat<(zextloadi8i1 addr:$src), (AND8ri (MOV8rm addr:$src), (i8 1))>;
-def : Pat<(zextloadi16i1 addr:$src), (AND16ri8 (MOVZX16rm8 addr:$src), (i16 1))>;
-def : Pat<(zextloadi32i1 addr:$src), (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1))>;
+// i1 stored in one byte in zero-extended form.
+// Upper bits cleanup should be executed before Store.
+def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
+def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
+def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
def : Pat<(zextloadi64i1 addr:$src),
- (SUBREG_TO_REG (i64 0),
- (AND32ri8 (MOVZX32rm8 addr:$src), (i32 1)), sub_32bit)>;
+ (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
// extload bool -> extload byte
// When extloading from 16-bit and smaller memory locations into 64-bit
@@ -1305,7 +1405,7 @@ def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
// instructions.
def : Pat<(add GR64:$src1, 0x0000000080000000),
(SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
-def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
+def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
(SUB64mi32 addr:$dst, 0xffffffff80000000)>;
// To avoid needing to materialize an immediate in a register, use a 32-bit and
@@ -1450,6 +1550,10 @@ def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
sub_8bit_hi)>,
Requires<[Not64BitMode]>;
+def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)>,
+ Requires<[Not64BitMode]>;
def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
sub_8bit_hi)>,
diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td
index 8c351a51c460..bb5f9117f032 100644
--- a/lib/Target/X86/X86InstrControl.td
+++ b/lib/Target/X86/X86InstrControl.td
@@ -22,21 +22,21 @@
let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, FPForm = SpecialFP, SchedRW = [WriteJumpLd] in {
def RETL : I <0xC3, RawFrm, (outs), (ins variable_ops),
- "ret{l}", [(X86retflag 0)], IIC_RET>, OpSize32,
+ "ret{l}", [], IIC_RET>, OpSize32,
Requires<[Not64BitMode]>;
def RETQ : I <0xC3, RawFrm, (outs), (ins variable_ops),
- "ret{q}", [(X86retflag 0)], IIC_RET>, OpSize32,
+ "ret{q}", [], IIC_RET>, OpSize32,
Requires<[In64BitMode]>;
def RETW : I <0xC3, RawFrm, (outs), (ins),
"ret{w}",
[], IIC_RET>, OpSize16;
def RETIL : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
"ret{l}\t$amt",
- [(X86retflag timm:$amt)], IIC_RET_IMM>, OpSize32,
+ [], IIC_RET_IMM>, OpSize32,
Requires<[Not64BitMode]>;
def RETIQ : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
"ret{q}\t$amt",
- [(X86retflag timm:$amt)], IIC_RET_IMM>, OpSize32,
+ [], IIC_RET_IMM>, OpSize32,
Requires<[In64BitMode]>;
def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
"ret{w}\t$amt",
@@ -64,8 +64,8 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1,
def IRET64 : RI <0xcf, RawFrm, (outs), (ins), "iretq", [],
IIC_IRET>, Requires<[In64BitMode]>;
let isCodeGenOnly = 1 in
- def IRET : PseudoI<(outs), (ins i16imm:$adj), [(X86iret timm:$adj)]>;
-
+ def IRET : PseudoI<(outs), (ins i32imm:$adj), [(X86iret timm:$adj)]>;
+ def RET : PseudoI<(outs), (ins i32imm:$adj, variable_ops), [(X86retflag timm:$adj)]>;
}
// Unconditional branches.
diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td
index 03ae21125b0e..078dab41502a 100644
--- a/lib/Target/X86/X86InstrFPStack.td
+++ b/lib/Target/X86/X86InstrFPStack.td
@@ -326,7 +326,7 @@ def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom{s}\t$src">;
def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp{s}\t$src">;
def FLDENVm : FPI<0xD9, MRM4m, (outs), (ins f32mem:$src), "fldenv\t$src">;
-def FSTENVm : FPI<0xD9, MRM6m, (outs f32mem:$dst), (ins), "fnstenv\t$dst">;
+def FSTENVm : FPI<0xD9, MRM6m, (outs), (ins f32mem:$dst), "fnstenv\t$dst">;
def FICOM32m : FPI<0xDA, MRM2m, (outs), (ins i32mem:$src), "ficom{l}\t$src">;
def FICOMP32m: FPI<0xDA, MRM3m, (outs), (ins i32mem:$src), "ficomp{l}\t$src">;
@@ -334,15 +334,15 @@ def FICOMP32m: FPI<0xDA, MRM3m, (outs), (ins i32mem:$src), "ficomp{l}\t$src">;
def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom{l}\t$src">;
def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp{l}\t$src">;
-def FRSTORm : FPI<0xDD, MRM4m, (outs f32mem:$dst), (ins), "frstor\t$dst">;
-def FSAVEm : FPI<0xDD, MRM6m, (outs f32mem:$dst), (ins), "fnsave\t$dst">;
-def FNSTSWm : FPI<0xDD, MRM7m, (outs i16mem:$dst), (ins), "fnstsw\t$dst">;
+def FRSTORm : FPI<0xDD, MRM4m, (outs), (ins f32mem:$dst), "frstor\t$dst">;
+def FSAVEm : FPI<0xDD, MRM6m, (outs), (ins f32mem:$dst), "fnsave\t$dst">;
+def FNSTSWm : FPI<0xDD, MRM7m, (outs), (ins i16mem:$dst), "fnstsw\t$dst">;
def FICOM16m : FPI<0xDE, MRM2m, (outs), (ins i16mem:$src), "ficom{s}\t$src">;
def FICOMP16m: FPI<0xDE, MRM3m, (outs), (ins i16mem:$src), "ficomp{s}\t$src">;
def FBLDm : FPI<0xDF, MRM4m, (outs), (ins f80mem:$src), "fbld\t$src">;
-def FBSTPm : FPI<0xDF, MRM6m, (outs f80mem:$dst), (ins), "fbstp\t$dst">;
+def FBSTPm : FPI<0xDF, MRM6m, (outs), (ins f80mem:$dst), "fbstp\t$dst">;
// Floating point cmovs.
class FpIf32CMov<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td
index e2fa295c0230..5183adc834b1 100644
--- a/lib/Target/X86/X86InstrFormats.td
+++ b/lib/Target/X86/X86InstrFormats.td
@@ -845,7 +845,7 @@ class AVXPCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern, InstrItinClass itin = NoItinerary>
: I<o, F, outs, ins, asm, pattern, itin>, T8PD,
- VEX_4V, FMASC, Requires<[HasFMA]>;
+ VEX_4V, FMASC, Requires<[HasFMA, NoVLX]>;
// FMA4 Instruction Templates
class FMA4<bits<8> o, Format F, dag outs, dag ins, string asm,
diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td
index 643286324e25..ea54f049ec7a 100644
--- a/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -35,7 +35,7 @@ def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
// SSE specific DAG Nodes.
//===----------------------------------------------------------------------===//
-def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
+def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisSameAs<1, 2>,
SDTCisFP<1>, SDTCisVT<3, i8>,
SDTCisVec<1>]>;
def SDTX86CmpTestSae : SDTypeProfile<1, 3, [SDTCisVT<0, i32>,
@@ -60,9 +60,8 @@ def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp,
[SDNPCommutative, SDNPAssociative]>;
def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
-def X86frsqrt14s: SDNode<"X86ISD::FRSQRT", SDTFPBinOp>;
-def X86frcp14s : SDNode<"X86ISD::FRCP", SDTFPBinOp>;
-def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
+def X86frsqrt14s: SDNode<"X86ISD::FRSQRTS", SDTFPBinOp>;
+def X86frcp14s : SDNode<"X86ISD::FRCPS", SDTFPBinOp>;
def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>;
def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>;
def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
@@ -72,7 +71,6 @@ def X86comiSae : SDNode<"X86ISD::COMI", SDTX86CmpTestSae>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
def X86ucomiSae: SDNode<"X86ISD::UCOMI", SDTX86CmpTestSae>;
def X86cmps : SDNode<"X86ISD::FSETCC", SDTX86Cmps>;
-//def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
def X86cvtdq2pd: SDNode<"X86ISD::CVTDQ2PD",
SDTypeProfile<1, 1, [SDTCisVT<0, v2f64>,
SDTCisVT<1, v4i32>]>>;
@@ -95,9 +93,9 @@ def X86dbpsadbw : SDNode<"X86ISD::DBPSADBW",
def X86andnp : SDNode<"X86ISD::ANDNP",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
-def X86psign : SDNode<"X86ISD::PSIGN",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>]>>;
+def X86multishift : SDNode<"X86ISD::MULTISHIFT",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisSameAs<1,2>]>>;
def X86pextrb : SDNode<"X86ISD::PEXTRB",
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisVT<1, v16i8>,
SDTCisPtrTy<2>]>>;
@@ -137,46 +135,39 @@ def X86vtrunc : SDNode<"X86ISD::VTRUNC", SDTVtrunc>;
def X86vtruncs : SDNode<"X86ISD::VTRUNCS", SDTVtrunc>;
def X86vtruncus : SDNode<"X86ISD::VTRUNCUS", SDTVtrunc>;
-def X86trunc : SDNode<"X86ISD::TRUNC",
- SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>,
- SDTCisOpSmallerThanOp<0, 1>]>>;
def X86vfpext : SDNode<"X86ISD::VFPEXT",
- SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>,
- SDTCisOpSmallerThanOp<1, 0>]>>;
+ SDTypeProfile<1, 1, [SDTCVecEltisVT<0, f64>,
+ SDTCVecEltisVT<1, f32>,
+ SDTCisSameSizeAs<0, 1>]>>;
def X86vfpround: SDNode<"X86ISD::VFPROUND",
- SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>,
- SDTCisOpSmallerThanOp<0, 1>]>>;
+ SDTypeProfile<1, 1, [SDTCVecEltisVT<0, f32>,
+ SDTCVecEltisVT<1, f64>,
+ SDTCisSameSizeAs<0, 1>]>>;
def X86fround: SDNode<"X86ISD::VFPROUND",
- SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisFP<1>,SDTCisFP<2>,
- SDTCVecEltisVT<0, f32>,
- SDTCVecEltisVT<1, f64>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f32>,
+ SDTCisSameAs<0, 1>,
SDTCVecEltisVT<2, f64>,
- SDTCisOpSmallerThanOp<0, 1>]>>;
+ SDTCisSameSizeAs<0, 2>]>>;
def X86froundRnd: SDNode<"X86ISD::VFPROUND",
- SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisFP<1>,SDTCisFP<2>,
- SDTCVecEltisVT<0, f32>,
- SDTCVecEltisVT<1, f64>,
+ SDTypeProfile<1, 3, [SDTCVecEltisVT<0, f32>,
+ SDTCisSameAs<0, 1>,
SDTCVecEltisVT<2, f64>,
- SDTCisOpSmallerThanOp<0, 1>,
- SDTCisInt<3>]>>;
+ SDTCisSameSizeAs<0, 2>,
+ SDTCisVT<3, i32>]>>;
def X86fpext : SDNode<"X86ISD::VFPEXT",
- SDTypeProfile<1, 2, [SDTCisFP<0>, SDTCisFP<1>,SDTCisFP<2>,
- SDTCVecEltisVT<0, f64>,
- SDTCVecEltisVT<1, f32>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f64>,
+ SDTCisSameAs<0, 1>,
SDTCVecEltisVT<2, f32>,
- SDTCisOpSmallerThanOp<1, 0>]>>;
+ SDTCisSameSizeAs<0, 2>]>>;
def X86fpextRnd : SDNode<"X86ISD::VFPEXT",
- SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisFP<1>,SDTCisFP<2>,
- SDTCVecEltisVT<0, f64>,
- SDTCVecEltisVT<1, f32>,
+ SDTypeProfile<1, 3, [SDTCVecEltisVT<0, f64>,
+ SDTCisSameAs<0, 1>,
SDTCVecEltisVT<2, f32>,
- SDTCisOpSmallerThanOp<1, 0>,
- SDTCisInt<3>]>>;
+ SDTCisSameSizeAs<0, 2>,
+ SDTCisVT<3, i32>]>>;
def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
@@ -221,6 +212,8 @@ def X86vsra : SDNode<"X86ISD::VSRA",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisVec<2>]>>;
+def X86vsrav : SDNode<"X86ISD::VSRAV" , SDTIntShiftOp>;
+
def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
@@ -250,10 +243,24 @@ def X86vpcomu : SDNode<"X86ISD::VPCOMU",
SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>,
SDTCisVT<3, i8>]>>;
+def X86vpermil2 : SDNode<"X86ISD::VPERMIL2",
+ SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>,
+ SDTCisSameSizeAs<0,3>,
+ SDTCisSameNumEltsAs<0, 3>,
+ SDTCisVT<4, i8>]>>;
+def X86vpperm : SDNode<"X86ISD::VPPERM",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>>;
def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
SDTCisVec<1>,
SDTCisSameAs<2, 1>]>;
+
+def SDTX86Testm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisSameAs<2, 1>, SDTCVecEltisVT<0, i1>,
+ SDTCisSameNumEltsAs<0, 1>]>;
+
def X86addus : SDNode<"X86ISD::ADDUS", SDTIntBinOp>;
def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
def X86adds : SDNode<"X86ISD::ADDS", SDTIntBinOp>;
@@ -264,15 +271,22 @@ def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
def X86ktest : SDNode<"X86ISD::KTEST", SDTX86CmpPTest>;
-def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>,
- SDTCisVec<1>, SDTCisSameAs<2, 1>,
- SDTCVecEltisVT<0, i1>,
- SDTCisSameNumEltsAs<0, 1>]>>;
-def X86testnm : SDNode<"X86ISD::TESTNM", SDTypeProfile<1, 2, [SDTCisVec<0>,
- SDTCisVec<1>, SDTCisSameAs<2, 1>,
- SDTCVecEltisVT<0, i1>,
- SDTCisSameNumEltsAs<0, 1>]>>;
-def X86select : SDNode<"X86ISD::SELECT" , SDTSelect>;
+def X86testm : SDNode<"X86ISD::TESTM", SDTX86Testm, [SDNPCommutative]>;
+def X86testnm : SDNode<"X86ISD::TESTNM", SDTX86Testm, [SDNPCommutative]>;
+
+def X86movmsk : SDNode<"X86ISD::MOVMSK",
+ SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVec<1>]>>;
+
+def X86select : SDNode<"X86ISD::SELECT",
+ SDTypeProfile<1, 3, [SDTCVecEltisVT<1, i1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisSameAs<2, 3>,
+ SDTCisSameNumEltsAs<0, 1>]>>;
+
+def X86selects : SDNode<"X86ISD::SELECT",
+ SDTypeProfile<1, 3, [SDTCisVT<1, i1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisSameAs<2, 3>]>>;
def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
SDTypeProfile<1, 2, [SDTCVecEltisVT<0, i64>,
@@ -308,9 +322,16 @@ def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>, SDTCisVT<3, i8>]>;
def SDTFPBinOpImmRound: SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>, SDTCisInt<3>, SDTCisInt<4>]>;
+ SDTCisSameAs<0,2>, SDTCisVT<3, i32>, SDTCisVT<4, i32>]>;
+def SDTFPTernaryOpImmRound: SDTypeProfile<1, 5, [SDTCisFP<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>,
+ SDTCisInt<3>,
+ SDTCisSameSizeAs<0, 3>,
+ SDTCisSameNumEltsAs<0, 3>,
+ SDTCisVT<4, i32>,
+ SDTCisVT<5, i32>]>;
def SDTFPUnaryOpImmRound: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
- SDTCisInt<2>, SDTCisInt<3>]>;
+ SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
def SDTVBroadcastm : SDTypeProfile<1, 1, [SDTCisVec<0>,
@@ -324,21 +345,16 @@ def SDTTernlog : SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisVT<4, i8>]>;
def SDTFPBinOpRound : SDTypeProfile<1, 3, [ // fadd_round, fmul_round, etc.
- SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>, SDTCisInt<3>]>;
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>, SDTCisVT<3, i32>]>;
def SDTFPUnaryOpRound : SDTypeProfile<1, 2, [ // fsqrt_round, fgetexp_round, etc.
- SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>]>;
+ SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisVT<2, i32>]>;
def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
def SDTFmaRound : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
- SDTCisSameAs<1,2>, SDTCisSameAs<1,3>, SDTCisInt<4>]>;
-def STDFp1SrcRm : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>,
- SDTCisVec<0>, SDTCisVT<2, i32>]>;
-def STDFp2SrcRm : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
- SDTCisVec<0>, SDTCisVT<3, i32>]>;
-def STDFp3SrcRm : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
- SDTCisVec<0>, SDTCisVT<3, i32>, SDTCisVT<4, i32>]>;
+ SDTCisSameAs<1,2>, SDTCisSameAs<1,3>,
+ SDTCisVT<4, i32>]>;
def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>;
def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>;
@@ -405,7 +421,8 @@ def X86vpternlog : SDNode<"X86ISD::VPTERNLOG", SDTTernlog>;
def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
-def X86VFixupimm : SDNode<"X86ISD::VFIXUPIMM", SDTFPBinOpImmRound>;
+def X86VFixupimm : SDNode<"X86ISD::VFIXUPIMM", SDTFPTernaryOpImmRound>;
+def X86VFixupimmScalar : SDNode<"X86ISD::VFIXUPIMMS", SDTFPTernaryOpImmRound>;
def X86VRange : SDNode<"X86ISD::VRANGE", SDTFPBinOpImmRound>;
def X86VReduce : SDNode<"X86ISD::VREDUCE", SDTFPUnaryOpImmRound>;
def X86VRndScale : SDNode<"X86ISD::VRNDSCALE", SDTFPUnaryOpImmRound>;
@@ -422,10 +439,6 @@ def X86Vfpclasss : SDNode<"X86ISD::VFPCLASSS",
def X86SubVBroadcast : SDNode<"X86ISD::SUBV_BROADCAST",
SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
SDTCisSubVecOfVec<1, 0>]>, []>;
-// SDTCisSubVecOfVec restriction cannot be applied for 128 bit version of VBROADCASTI32x2.
-def X86SubV32x2Broadcast : SDNode<"X86ISD::SUBV_BROADCAST",
- SDTypeProfile<1, 1, [SDTCisVec<0>,
- SDTCisSameAs<0,1>]>, []>;
def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
def X86VBroadcastm : SDNode<"X86ISD::VBROADCASTM", SDTVBroadcastm>;
@@ -446,11 +459,12 @@ def X86fmulRnd : SDNode<"X86ISD::FMUL_RND", SDTFPBinOpRound>;
def X86fdivRnd : SDNode<"X86ISD::FDIV_RND", SDTFPBinOpRound>;
def X86fmaxRnd : SDNode<"X86ISD::FMAX_RND", SDTFPBinOpRound>;
def X86scalef : SDNode<"X86ISD::SCALEF", SDTFPBinOpRound>;
+def X86scalefs : SDNode<"X86ISD::SCALEFS", SDTFPBinOpRound>;
def X86fminRnd : SDNode<"X86ISD::FMIN_RND", SDTFPBinOpRound>;
def X86fsqrtRnd : SDNode<"X86ISD::FSQRT_RND", SDTFPUnaryOpRound>;
-def X86fsqrtRnds : SDNode<"X86ISD::FSQRT_RND", STDFp2SrcRm>;
+def X86fsqrtRnds : SDNode<"X86ISD::FSQRT_RND", SDTFPBinOpRound>;
def X86fgetexpRnd : SDNode<"X86ISD::FGETEXP_RND", SDTFPUnaryOpRound>;
-def X86fgetexpRnds : SDNode<"X86ISD::FGETEXP_RND", STDFp2SrcRm>;
+def X86fgetexpRnds : SDNode<"X86ISD::FGETEXP_RND", SDTFPBinOpRound>;
def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
@@ -466,15 +480,18 @@ def X86FnmsubRnd : SDNode<"X86ISD::FNMSUB_RND", SDTFmaRound>;
def X86FmaddsubRnd : SDNode<"X86ISD::FMADDSUB_RND", SDTFmaRound>;
def X86FmsubaddRnd : SDNode<"X86ISD::FMSUBADD_RND", SDTFmaRound>;
-def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", STDFp1SrcRm>;
-def X86rcp28 : SDNode<"X86ISD::RCP28", STDFp1SrcRm>;
-def X86exp2 : SDNode<"X86ISD::EXP2", STDFp1SrcRm>;
+def x86vpmadd52l : SDNode<"X86ISD::VPMADD52L", SDTFma>;
+def x86vpmadd52h : SDNode<"X86ISD::VPMADD52H", SDTFma>;
-def X86rsqrt28s : SDNode<"X86ISD::RSQRT28", STDFp2SrcRm>;
-def X86rcp28s : SDNode<"X86ISD::RCP28", STDFp2SrcRm>;
-def X86RndScales : SDNode<"X86ISD::VRNDSCALE", STDFp3SrcRm>;
-def X86Reduces : SDNode<"X86ISD::VREDUCE", STDFp3SrcRm>;
-def X86GetMants : SDNode<"X86ISD::VGETMANT", STDFp3SrcRm>;
+def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", SDTFPUnaryOpRound>;
+def X86rcp28 : SDNode<"X86ISD::RCP28", SDTFPUnaryOpRound>;
+def X86exp2 : SDNode<"X86ISD::EXP2", SDTFPUnaryOpRound>;
+
+def X86rsqrt28s : SDNode<"X86ISD::RSQRT28", SDTFPBinOpRound>;
+def X86rcp28s : SDNode<"X86ISD::RCP28", SDTFPBinOpRound>;
+def X86RndScales : SDNode<"X86ISD::VRNDSCALE", SDTFPBinOpImmRound>;
+def X86Reduces : SDNode<"X86ISD::VREDUCE", SDTFPBinOpImmRound>;
+def X86GetMants : SDNode<"X86ISD::VGETMANT", SDTFPBinOpImmRound>;
def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
@@ -496,90 +513,62 @@ def SDTintToFPRound: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>,
SDTCisSameAs<0,1>, SDTCisInt<2>,
SDTCisVT<3, i32>]>;
-def SDTDoubleToInt: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCVecEltisVT<1, f64>]>;
def SDTFloatToInt: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCVecEltisVT<1, f32>]>;
+ SDTCisInt<0>, SDTCisFP<1>]>;
-def SDTDoubleToIntRnd: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCVecEltisVT<1, f64>]>;
-def SDTSDoubleToIntRnd: SDTypeProfile<1, 2, [SDTCisInt<0>,SDTCisFP<1>,
- SDTCVecEltisVT<1, f64>, SDTCisInt<2>]>;
def SDTFloatToIntRnd: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisInt<0>, SDTCVecEltisVT<1, f32>]>;
+ SDTCisInt<0>, SDTCisFP<1>,
+ SDTCisVT<2, i32>]>;
def SDTSFloatToIntRnd: SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisFP<1>,
- SDTCVecEltisVT<1, f32>, SDTCisInt<2>]>;
+ SDTCisVec<1>, SDTCisVT<2, i32>]>;
def SDTVintToFPRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCVecEltisVT<1, i32>,
- SDTCisInt<2>]>;
-def SDTVlongToFPRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCVecEltisVT<1, i64>,
- SDTCisInt<2>]>;
-
-def SDTVFPToIntRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<1>, SDTCVecEltisVT<0, i32>,
- SDTCisInt<2>]>;
-def SDTVFPToLongRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<1>, SDTCVecEltisVT<0, i64>,
- SDTCisInt<2>]>;
+ SDTCisFP<0>, SDTCisInt<1>,
+ SDTCisVT<2, i32>]>;
// Scalar
def X86SintToFpRnd : SDNode<"X86ISD::SINT_TO_FP_RND", SDTintToFPRound>;
def X86UintToFpRnd : SDNode<"X86ISD::UINT_TO_FP_RND", SDTintToFPRound>;
-def X86cvttss2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTSFloatToIntRnd>;
-def X86cvttss2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTSFloatToIntRnd>;
-def X86cvttsd2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTSDoubleToIntRnd>;
-def X86cvttsd2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTSDoubleToIntRnd>;
+def X86cvtts2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTSFloatToIntRnd>;
+def X86cvtts2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTSFloatToIntRnd>;
+
+def X86cvts2si : SDNode<"X86ISD::SCALAR_FP_TO_SINT_RND", SDTSFloatToIntRnd>;
+def X86cvts2usi : SDNode<"X86ISD::SCALAR_FP_TO_UINT_RND", SDTSFloatToIntRnd>;
+
// Vector with rounding mode
// cvtt fp-to-int staff
-def X86VFpToSintRnd : SDNode<"ISD::FP_TO_SINT", SDTVFPToIntRound>;
-def X86VFpToUintRnd : SDNode<"ISD::FP_TO_UINT", SDTVFPToIntRound>;
-def X86VFpToSlongRnd : SDNode<"ISD::FP_TO_SINT", SDTVFPToLongRound>;
-def X86VFpToUlongRnd : SDNode<"ISD::FP_TO_UINT", SDTVFPToLongRound>;
+def X86VFpToSintRnd : SDNode<"ISD::FP_TO_SINT", SDTFloatToIntRnd>;
+def X86VFpToUintRnd : SDNode<"ISD::FP_TO_UINT", SDTFloatToIntRnd>;
def X86VSintToFpRnd : SDNode<"ISD::SINT_TO_FP", SDTVintToFPRound>;
def X86VUintToFpRnd : SDNode<"ISD::UINT_TO_FP", SDTVintToFPRound>;
-def X86VSlongToFpRnd : SDNode<"ISD::SINT_TO_FP", SDTVlongToFPRound>;
-def X86VUlongToFpRnd : SDNode<"ISD::UINT_TO_FP", SDTVlongToFPRound>;
// cvt fp-to-int staff
-def X86cvtps2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToIntRnd>;
-def X86cvtps2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToIntRnd>;
-def X86cvtpd2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTDoubleToIntRnd>;
-def X86cvtpd2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTDoubleToIntRnd>;
+def X86cvtp2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToIntRnd>;
+def X86cvtp2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToIntRnd>;
// Vector without rounding mode
-def X86cvtps2Int : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToInt>;
-def X86cvtps2UInt : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToInt>;
-def X86cvtpd2Int : SDNode<"X86ISD::FP_TO_SINT_RND", SDTDoubleToInt>;
-def X86cvtpd2UInt : SDNode<"X86ISD::FP_TO_UINT_RND", SDTDoubleToInt>;
+def X86cvtp2Int : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToInt>;
+def X86cvtp2UInt : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToInt>;
def X86cvtph2ps : SDNode<"ISD::FP16_TO_FP",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCVecEltisVT<0, f32>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f32>,
SDTCVecEltisVT<1, i16>,
- SDTCisFP<0>,
SDTCisVT<2, i32>]> >;
def X86cvtps2ph : SDNode<"ISD::FP_TO_FP16",
- SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCVecEltisVT<0, i16>,
+ SDTypeProfile<1, 3, [SDTCVecEltisVT<0, i16>,
SDTCVecEltisVT<1, f32>,
- SDTCisFP<1>, SDTCisVT<2, i32>,
+ SDTCisVT<2, i32>,
SDTCisVT<3, i32>]> >;
def X86vfpextRnd : SDNode<"X86ISD::VFPEXT",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>,
- SDTCVecEltisVT<0, f64>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f64>,
SDTCVecEltisVT<1, f32>,
SDTCisOpSmallerThanOp<1, 0>,
SDTCisVT<2, i32>]>>;
def X86vfproundRnd: SDNode<"X86ISD::VFPROUND",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisFP<0>, SDTCisFP<1>,
- SDTCVecEltisVT<0, f32>,
+ SDTypeProfile<1, 2, [SDTCVecEltisVT<0, f32>,
SDTCVecEltisVT<1, f64>,
SDTCisOpSmallerThanOp<0, 1>,
SDTCisVT<2, i32>]>>;
@@ -602,13 +591,13 @@ def sse_load_f64 : ComplexPattern<v2f64, 5, "selectScalarSSELoad", [],
def ssmem : Operand<v4f32> {
let PrintMethod = "printf32mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem32AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
def sdmem : Operand<v2f64> {
let PrintMethod = "printf64mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem64AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -674,11 +663,6 @@ def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 16;
}]>;
-// Like 'X86vzload', but always requires 128-bit vector alignment.
-def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
- return cast<MemSDNode>(N)->getAlignment() >= 16;
-}]>;
-
// Like 'load', but always requires 256-bit vector alignment.
def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 32;
@@ -982,9 +966,9 @@ def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
return isa<MaskedLoadSDNode>(N);
}]>;
-// masked store fragments.
+// Masked store fragments.
// X86mstore can't be implemented in core DAG files because some targets
-// doesn't support vector type ( llvm-tblgen will fail)
+// do not support vector types (llvm-tblgen will fail).
def X86mstore : PatFrag<(ops node:$src1, node:$src2, node:$src3),
(masked_store node:$src1, node:$src2, node:$src3), [{
return !cast<MaskedStoreSDNode>(N)->isTruncatingStore();
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 246804e34289..1672b3855b79 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -18,11 +18,13 @@
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DerivedTypes.h"
@@ -36,7 +38,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
-#include <limits>
using namespace llvm;
@@ -57,6 +58,17 @@ static cl::opt<bool>
ReMatPICStubLoad("remat-pic-stub-load",
cl::desc("Re-materialize load from stub in PIC mode"),
cl::init(false), cl::Hidden);
+static cl::opt<unsigned>
+PartialRegUpdateClearance("partial-reg-update-clearance",
+ cl::desc("Clearance between two register writes "
+ "for inserting XOR to avoid partial "
+ "register update"),
+ cl::init(64), cl::Hidden);
+static cl::opt<unsigned>
+UndefRegClearance("undef-reg-clearance",
+ cl::desc("How many idle instructions we would like before "
+ "certain undef register reads"),
+ cl::init(64), cl::Hidden);
enum {
// Select which memory operand is being unfolded.
@@ -105,7 +117,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
: X86::ADJCALLSTACKDOWN32),
(STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
: X86::ADJCALLSTACKUP32),
- X86::CATCHRET),
+ X86::CATCHRET,
+ (STI.is64Bit() ? X86::RETQ : X86::RETL)),
Subtarget(STI), RI(STI.getTargetTriple()) {
static const X86MemoryFoldTableEntry MemoryFoldTable2Addr[] = {
@@ -804,50 +817,54 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::TZMSK64rr, X86::TZMSK64rm, 0 },
// AVX-512 foldable instructions
- { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
- { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
- { X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 },
- { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
- { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
- { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
- { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
- { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
- { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
- { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
- { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
- { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
- { X86::VPABSDZrr, X86::VPABSDZrm, 0 },
- { X86::VPABSQZrr, X86::VPABSQZrm, 0 },
- { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
- { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
+ { X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
+ { X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
+ { X86::VMOVAPDZrr, X86::VMOVAPDZrm, TB_ALIGN_64 },
+ { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 },
+ { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
+ { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 },
+ { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 },
+ { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 },
+ { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 },
+ { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 },
+ { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 },
+ { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 },
+ { X86::VPABSDZrr, X86::VPABSDZrm, 0 },
+ { X86::VPABSQZrr, X86::VPABSQZrm, 0 },
+ { X86::VBROADCASTSSZr, X86::VBROADCASTSSZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZr_s, X86::VBROADCASTSSZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZr, X86::VBROADCASTSDZm, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZr_s, X86::VBROADCASTSDZm, TB_NO_REVERSE },
// AVX-512 foldable instructions (256-bit versions)
- { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
- { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
- { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
- { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
- { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
- { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
- { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
- { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
- { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
- { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
- { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
- { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
-
- // AVX-512 foldable instructions (256-bit versions)
- { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
- { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
- { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
- { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
- { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
- { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
- { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
- { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
- { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
- { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
- { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
+ { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
+ { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 },
+ { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
+ { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 },
+ { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 },
+ { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 },
+ { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 },
+ { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 },
+ { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 },
+ { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 },
+ { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ256r_s, X86::VBROADCASTSSZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
+ { X86::VBROADCASTSDZ256r_s, X86::VBROADCASTSDZ256m, TB_NO_REVERSE },
+ // AVX-512 foldable instructions (128-bit versions)
+ { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
+ { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 },
+ { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
+ { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 },
+ { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 },
+ { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 },
+ { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 },
+ { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 },
+ { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 },
+ { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 },
+ { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
+ { X86::VBROADCASTSSZ128r_s, X86::VBROADCASTSSZ128m, TB_NO_REVERSE },
// F16C foldable instructions
{ X86::VCVTPH2PSrr, X86::VCVTPH2PSrm, 0 },
{ X86::VCVTPH2PSYrr, X86::VCVTPH2PSYrm, 0 },
@@ -998,6 +1015,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::MINSDrr_Int, X86::MINSDrm_Int, 0 },
{ X86::MINSSrr, X86::MINSSrm, 0 },
{ X86::MINSSrr_Int, X86::MINSSrm_Int, 0 },
+ { X86::MOVLHPSrr, X86::MOVHPSrm, TB_NO_REVERSE },
{ X86::MPSADBWrri, X86::MPSADBWrmi, TB_ALIGN_16 },
{ X86::MULPDrr, X86::MULPDrm, TB_ALIGN_16 },
{ X86::MULPSrr, X86::MULPSrm, TB_ALIGN_16 },
@@ -1023,7 +1041,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PADDUSBrr, X86::PADDUSBrm, TB_ALIGN_16 },
{ X86::PADDUSWrr, X86::PADDUSWrm, TB_ALIGN_16 },
{ X86::PADDWrr, X86::PADDWrm, TB_ALIGN_16 },
- { X86::PALIGNR128rr, X86::PALIGNR128rm, TB_ALIGN_16 },
+ { X86::PALIGNRrri, X86::PALIGNRrmi, TB_ALIGN_16 },
{ X86::PANDNrr, X86::PANDNrm, TB_ALIGN_16 },
{ X86::PANDrr, X86::PANDrm, TB_ALIGN_16 },
{ X86::PAVGBrr, X86::PAVGBrm, TB_ALIGN_16 },
@@ -1073,9 +1091,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::PORrr, X86::PORrm, TB_ALIGN_16 },
{ X86::PSADBWrr, X86::PSADBWrm, TB_ALIGN_16 },
{ X86::PSHUFBrr, X86::PSHUFBrm, TB_ALIGN_16 },
- { X86::PSIGNBrr, X86::PSIGNBrm, TB_ALIGN_16 },
- { X86::PSIGNWrr, X86::PSIGNWrm, TB_ALIGN_16 },
- { X86::PSIGNDrr, X86::PSIGNDrm, TB_ALIGN_16 },
+ { X86::PSIGNBrr128, X86::PSIGNBrm128, TB_ALIGN_16 },
+ { X86::PSIGNWrr128, X86::PSIGNWrm128, TB_ALIGN_16 },
+ { X86::PSIGNDrr128, X86::PSIGNDrm128, TB_ALIGN_16 },
{ X86::PSLLDrr, X86::PSLLDrm, TB_ALIGN_16 },
{ X86::PSLLQrr, X86::PSLLQrm, TB_ALIGN_16 },
{ X86::PSLLWrr, X86::PSLLWrm, TB_ALIGN_16 },
@@ -1298,6 +1316,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VMINSDrr_Int, X86::VMINSDrm_Int, 0 },
{ X86::VMINSSrr, X86::VMINSSrm, 0 },
{ X86::VMINSSrr_Int, X86::VMINSSrm_Int, 0 },
+ { X86::VMOVLHPSrr, X86::VMOVHPSrm, TB_NO_REVERSE },
{ X86::VMPSADBWrri, X86::VMPSADBWrmi, 0 },
{ X86::VMULPDrr, X86::VMULPDrm, 0 },
{ X86::VMULPSrr, X86::VMULPSrm, 0 },
@@ -1319,7 +1338,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPADDUSBrr, X86::VPADDUSBrm, 0 },
{ X86::VPADDUSWrr, X86::VPADDUSWrm, 0 },
{ X86::VPADDWrr, X86::VPADDWrm, 0 },
- { X86::VPALIGNR128rr, X86::VPALIGNR128rm, 0 },
+ { X86::VPALIGNRrri, X86::VPALIGNRrmi, 0 },
{ X86::VPANDNrr, X86::VPANDNrm, 0 },
{ X86::VPANDrr, X86::VPANDrm, 0 },
{ X86::VPAVGBrr, X86::VPAVGBrm, 0 },
@@ -1371,9 +1390,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPORrr, X86::VPORrm, 0 },
{ X86::VPSADBWrr, X86::VPSADBWrm, 0 },
{ X86::VPSHUFBrr, X86::VPSHUFBrm, 0 },
- { X86::VPSIGNBrr, X86::VPSIGNBrm, 0 },
- { X86::VPSIGNWrr, X86::VPSIGNWrm, 0 },
- { X86::VPSIGNDrr, X86::VPSIGNDrm, 0 },
+ { X86::VPSIGNBrr128, X86::VPSIGNBrm128, 0 },
+ { X86::VPSIGNWrr128, X86::VPSIGNWrm128, 0 },
+ { X86::VPSIGNDrr128, X86::VPSIGNDrm128, 0 },
{ X86::VPSLLDrr, X86::VPSLLDrm, 0 },
{ X86::VPSLLQrr, X86::VPSLLQrm, 0 },
{ X86::VPSLLWrr, X86::VPSLLWrm, 0 },
@@ -1475,7 +1494,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPADDUSBYrr, X86::VPADDUSBYrm, 0 },
{ X86::VPADDUSWYrr, X86::VPADDUSWYrm, 0 },
{ X86::VPADDWYrr, X86::VPADDWYrm, 0 },
- { X86::VPALIGNR256rr, X86::VPALIGNR256rm, 0 },
+ { X86::VPALIGNRYrri, X86::VPALIGNRYrmi, 0 },
{ X86::VPANDNYrr, X86::VPANDNYrm, 0 },
{ X86::VPANDYrr, X86::VPANDYrm, 0 },
{ X86::VPAVGBYrr, X86::VPAVGBYrm, 0 },
@@ -1526,9 +1545,9 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPORYrr, X86::VPORYrm, 0 },
{ X86::VPSADBWYrr, X86::VPSADBWYrm, 0 },
{ X86::VPSHUFBYrr, X86::VPSHUFBYrm, 0 },
- { X86::VPSIGNBYrr, X86::VPSIGNBYrm, 0 },
- { X86::VPSIGNWYrr, X86::VPSIGNWYrm, 0 },
- { X86::VPSIGNDYrr, X86::VPSIGNDYrm, 0 },
+ { X86::VPSIGNBYrr256, X86::VPSIGNBYrm256, 0 },
+ { X86::VPSIGNWYrr256, X86::VPSIGNWYrm256, 0 },
+ { X86::VPSIGNDYrr256, X86::VPSIGNDYrm256, 0 },
{ X86::VPSLLDYrr, X86::VPSLLDYrm, 0 },
{ X86::VPSLLQYrr, X86::VPSLLQYrm, 0 },
{ X86::VPSLLWYrr, X86::VPSLLWYrm, 0 },
@@ -1540,6 +1559,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPSRAWYrr, X86::VPSRAWYrm, 0 },
{ X86::VPSRAVDrr, X86::VPSRAVDrm, 0 },
{ X86::VPSRAVDYrr, X86::VPSRAVDYrm, 0 },
+ { X86::VPSRAVD_Intrr, X86::VPSRAVD_Intrm, 0 },
+ { X86::VPSRAVD_IntYrr, X86::VPSRAVD_IntYrm, 0 },
{ X86::VPSRLDYrr, X86::VPSRLDYrm, 0 },
{ X86::VPSRLQYrr, X86::VPSRLQYrm, 0 },
{ X86::VPSRLWYrr, X86::VPSRLWYrm, 0 },
@@ -1600,8 +1621,8 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4mrY, TB_ALIGN_NONE },
// XOP foldable instructions
- { X86::VPCMOVrr, X86::VPCMOVmr, 0 },
- { X86::VPCMOVrrY, X86::VPCMOVmrY, 0 },
+ { X86::VPCMOVrrr, X86::VPCMOVrmr, 0 },
+ { X86::VPCMOVrrrY, X86::VPCMOVrmrY, 0 },
{ X86::VPCOMBri, X86::VPCOMBmi, 0 },
{ X86::VPCOMDri, X86::VPCOMDmi, 0 },
{ X86::VPCOMQri, X86::VPCOMQmi, 0 },
@@ -1626,7 +1647,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPMACSWWrr, X86::VPMACSWWrm, 0 },
{ X86::VPMADCSSWDrr, X86::VPMADCSSWDrm, 0 },
{ X86::VPMADCSWDrr, X86::VPMADCSWDrm, 0 },
- { X86::VPPERMrr, X86::VPPERMmr, 0 },
+ { X86::VPPERMrrr, X86::VPPERMrmr, 0 },
{ X86::VPROTBrr, X86::VPROTBrm, 0 },
{ X86::VPROTDrr, X86::VPROTDrm, 0 },
{ X86::VPROTQrr, X86::VPROTQrm, 0 },
@@ -1659,12 +1680,28 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
// AVX-512 foldable instructions
{ X86::VADDPSZrr, X86::VADDPSZrm, 0 },
{ X86::VADDPDZrr, X86::VADDPDZrm, 0 },
+ { X86::VADDSSZrr, X86::VADDSSZrm, 0 },
+ { X86::VADDSSZrr_Int, X86::VADDSSZrm_Int, 0 },
+ { X86::VADDSDZrr, X86::VADDSDZrm, 0 },
+ { X86::VADDSDZrr_Int, X86::VADDSDZrm_Int, 0 },
{ X86::VSUBPSZrr, X86::VSUBPSZrm, 0 },
{ X86::VSUBPDZrr, X86::VSUBPDZrm, 0 },
+ { X86::VSUBSSZrr, X86::VSUBSSZrm, 0 },
+ { X86::VSUBSSZrr_Int, X86::VSUBSSZrm_Int, 0 },
+ { X86::VSUBSDZrr, X86::VSUBSDZrm, 0 },
+ { X86::VSUBSDZrr_Int, X86::VSUBSDZrm_Int, 0 },
{ X86::VMULPSZrr, X86::VMULPSZrm, 0 },
{ X86::VMULPDZrr, X86::VMULPDZrm, 0 },
+ { X86::VMULSSZrr, X86::VMULSSZrm, 0 },
+ { X86::VMULSSZrr_Int, X86::VMULSSZrm_Int, 0 },
+ { X86::VMULSDZrr, X86::VMULSDZrm, 0 },
+ { X86::VMULSDZrr_Int, X86::VMULSDZrm_Int, 0 },
{ X86::VDIVPSZrr, X86::VDIVPSZrm, 0 },
{ X86::VDIVPDZrr, X86::VDIVPDZrm, 0 },
+ { X86::VDIVSSZrr, X86::VDIVSSZrm, 0 },
+ { X86::VDIVSSZrr_Int, X86::VDIVSSZrm_Int, 0 },
+ { X86::VDIVSDZrr, X86::VDIVSDZrm, 0 },
+ { X86::VDIVSDZrr_Int, X86::VDIVSDZrm_Int, 0 },
{ X86::VMINPSZrr, X86::VMINPSZrm, 0 },
{ X86::VMINPDZrr, X86::VMINPDZrm, 0 },
{ X86::VMAXPSZrr, X86::VMAXPSZrm, 0 },
@@ -1902,13 +1939,13 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4rmY, TB_ALIGN_NONE },
// XOP foldable instructions
- { X86::VPCMOVrr, X86::VPCMOVrm, 0 },
- { X86::VPCMOVrrY, X86::VPCMOVrmY, 0 },
+ { X86::VPCMOVrrr, X86::VPCMOVrrm, 0 },
+ { X86::VPCMOVrrrY, X86::VPCMOVrrmY, 0 },
{ X86::VPERMIL2PDrr, X86::VPERMIL2PDrm, 0 },
{ X86::VPERMIL2PDrrY, X86::VPERMIL2PDrmY, 0 },
{ X86::VPERMIL2PSrr, X86::VPERMIL2PSrm, 0 },
{ X86::VPERMIL2PSrrY, X86::VPERMIL2PSrmY, 0 },
- { X86::VPPERMrr, X86::VPPERMrm, 0 },
+ { X86::VPPERMrrr, X86::VPPERMrrm, 0 },
// AVX-512 VPERMI instructions with 3 source operands.
{ X86::VPERMI2Drr, X86::VPERMI2Drm, 0 },
@@ -2025,7 +2062,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
void
X86InstrInfo::AddTableEntry(RegOp2MemOpTableType &R2MTable,
MemOp2RegOpTableType &M2RTable,
- unsigned RegOp, unsigned MemOp, unsigned Flags) {
+ uint16_t RegOp, uint16_t MemOp, uint16_t Flags) {
if ((Flags & TB_NO_FORWARD) == 0) {
assert(!R2MTable.count(RegOp) && "Duplicate entry!");
R2MTable[RegOp] = std::make_pair(MemOp, Flags);
@@ -2085,19 +2122,19 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
return false;
}
-int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
- const MachineFunction *MF = MI->getParent()->getParent();
+int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
+ const MachineFunction *MF = MI.getParent()->getParent();
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
- if (MI->getOpcode() == getCallFrameSetupOpcode() ||
- MI->getOpcode() == getCallFrameDestroyOpcode()) {
+ if (MI.getOpcode() == getCallFrameSetupOpcode() ||
+ MI.getOpcode() == getCallFrameDestroyOpcode()) {
unsigned StackAlign = TFI->getStackAlignment();
- int SPAdj = (MI->getOperand(0).getImm() + StackAlign - 1) / StackAlign *
- StackAlign;
+ int SPAdj =
+ (MI.getOperand(0).getImm() + StackAlign - 1) / StackAlign * StackAlign;
- SPAdj -= MI->getOperand(1).getImm();
+ SPAdj -= MI.getOperand(1).getImm();
- if (MI->getOpcode() == getCallFrameSetupOpcode())
+ if (MI.getOpcode() == getCallFrameSetupOpcode())
return SPAdj;
else
return -SPAdj;
@@ -2106,8 +2143,8 @@ int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
// To know whether a call adjusts the stack, we need information
// that is bound to the following ADJCALLSTACKUP pseudo.
// Look for the next ADJCALLSTACKUP that follows the call.
- if (MI->isCall()) {
- const MachineBasicBlock* MBB = MI->getParent();
+ if (MI.isCall()) {
+ const MachineBasicBlock *MBB = MI.getParent();
auto I = ++MachineBasicBlock::const_iterator(MI);
for (auto E = MBB->end(); I != E; ++I) {
if (I->getOpcode() == getCallFrameDestroyOpcode() ||
@@ -2125,7 +2162,7 @@ int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
// Currently handle only PUSHes we can reasonably expect to see
// in call sequences
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
return 0;
case X86::PUSH32i8:
@@ -2134,21 +2171,27 @@ int X86InstrInfo::getSPAdjust(const MachineInstr *MI) const {
case X86::PUSH32rmr:
case X86::PUSHi32:
return 4;
+ case X86::PUSH64i8:
+ case X86::PUSH64r:
+ case X86::PUSH64rmm:
+ case X86::PUSH64rmr:
+ case X86::PUSH64i32:
+ return 8;
}
}
/// Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
-bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
+bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
int &FrameIndex) const {
- if (MI->getOperand(Op+X86::AddrBaseReg).isFI() &&
- MI->getOperand(Op+X86::AddrScaleAmt).isImm() &&
- MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
- MI->getOperand(Op+X86::AddrDisp).isImm() &&
- MI->getOperand(Op+X86::AddrScaleAmt).getImm() == 1 &&
- MI->getOperand(Op+X86::AddrIndexReg).getReg() == 0 &&
- MI->getOperand(Op+X86::AddrDisp).getImm() == 0) {
- FrameIndex = MI->getOperand(Op+X86::AddrBaseReg).getIndex();
+ if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
+ MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
+ MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
+ MI.getOperand(Op + X86::AddrDisp).isImm() &&
+ MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
+ MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
+ MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
+ FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
return true;
}
return false;
@@ -2166,13 +2209,19 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MOVAPSrm:
+ case X86::MOVUPSrm:
case X86::MOVAPDrm:
+ case X86::MOVUPDrm:
case X86::MOVDQArm:
+ case X86::MOVDQUrm:
case X86::VMOVSSrm:
case X86::VMOVSDrm:
case X86::VMOVAPSrm:
+ case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
+ case X86::VMOVUPDrm:
case X86::VMOVDQArm:
+ case X86::VMOVDQUrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPDYrm:
@@ -2181,8 +2230,42 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::VMOVDQAYrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
+ case X86::VMOVSSZrm:
+ case X86::VMOVSDZrm:
case X86::VMOVAPSZrm:
+ case X86::VMOVAPSZ128rm:
+ case X86::VMOVAPSZ256rm:
case X86::VMOVUPSZrm:
+ case X86::VMOVUPSZ128rm:
+ case X86::VMOVUPSZ256rm:
+ case X86::VMOVAPDZrm:
+ case X86::VMOVAPDZ128rm:
+ case X86::VMOVAPDZ256rm:
+ case X86::VMOVUPDZrm:
+ case X86::VMOVUPDZ128rm:
+ case X86::VMOVUPDZ256rm:
+ case X86::VMOVDQA32Zrm:
+ case X86::VMOVDQA32Z128rm:
+ case X86::VMOVDQA32Z256rm:
+ case X86::VMOVDQU32Zrm:
+ case X86::VMOVDQU32Z128rm:
+ case X86::VMOVDQU32Z256rm:
+ case X86::VMOVDQA64Zrm:
+ case X86::VMOVDQA64Z128rm:
+ case X86::VMOVDQA64Z256rm:
+ case X86::VMOVDQU64Zrm:
+ case X86::VMOVDQU64Z128rm:
+ case X86::VMOVDQU64Z256rm:
+ case X86::VMOVDQU8Zrm:
+ case X86::VMOVDQU8Z128rm:
+ case X86::VMOVDQU8Z256rm:
+ case X86::VMOVDQU16Zrm:
+ case X86::VMOVDQU16Z128rm:
+ case X86::VMOVDQU16Z256rm:
+ case X86::KMOVBkm:
+ case X86::KMOVWkm:
+ case X86::KMOVDkm:
+ case X86::KMOVQkm:
return true;
}
}
@@ -2198,40 +2281,80 @@ static bool isFrameStoreOpcode(int Opcode) {
case X86::MOVSSmr:
case X86::MOVSDmr:
case X86::MOVAPSmr:
+ case X86::MOVUPSmr:
case X86::MOVAPDmr:
+ case X86::MOVUPDmr:
case X86::MOVDQAmr:
+ case X86::MOVDQUmr:
case X86::VMOVSSmr:
case X86::VMOVSDmr:
case X86::VMOVAPSmr:
+ case X86::VMOVUPSmr:
case X86::VMOVAPDmr:
+ case X86::VMOVUPDmr:
case X86::VMOVDQAmr:
+ case X86::VMOVDQUmr:
case X86::VMOVUPSYmr:
case X86::VMOVAPSYmr:
case X86::VMOVUPDYmr:
case X86::VMOVAPDYmr:
case X86::VMOVDQUYmr:
case X86::VMOVDQAYmr:
+ case X86::VMOVSSZmr:
+ case X86::VMOVSDZmr:
case X86::VMOVUPSZmr:
+ case X86::VMOVUPSZ128mr:
+ case X86::VMOVUPSZ256mr:
case X86::VMOVAPSZmr:
+ case X86::VMOVAPSZ128mr:
+ case X86::VMOVAPSZ256mr:
+ case X86::VMOVUPDZmr:
+ case X86::VMOVUPDZ128mr:
+ case X86::VMOVUPDZ256mr:
+ case X86::VMOVAPDZmr:
+ case X86::VMOVAPDZ128mr:
+ case X86::VMOVAPDZ256mr:
+ case X86::VMOVDQA32Zmr:
+ case X86::VMOVDQA32Z128mr:
+ case X86::VMOVDQA32Z256mr:
+ case X86::VMOVDQU32Zmr:
+ case X86::VMOVDQU32Z128mr:
+ case X86::VMOVDQU32Z256mr:
+ case X86::VMOVDQA64Zmr:
+ case X86::VMOVDQA64Z128mr:
+ case X86::VMOVDQA64Z256mr:
+ case X86::VMOVDQU64Zmr:
+ case X86::VMOVDQU64Z128mr:
+ case X86::VMOVDQU64Z256mr:
+ case X86::VMOVDQU8Zmr:
+ case X86::VMOVDQU8Z128mr:
+ case X86::VMOVDQU8Z256mr:
+ case X86::VMOVDQU16Zmr:
+ case X86::VMOVDQU16Z128mr:
+ case X86::VMOVDQU16Z256mr:
case X86::MMX_MOVD64mr:
case X86::MMX_MOVQ64mr:
case X86::MMX_MOVNTQmr:
+ case X86::KMOVBmk:
+ case X86::KMOVWmk:
+ case X86::KMOVDmk:
+ case X86::KMOVQmk:
return true;
}
return false;
}
-unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (isFrameLoadOpcode(MI->getOpcode()))
- if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
- return MI->getOperand(0).getReg();
+ if (isFrameLoadOpcode(MI.getOpcode()))
+ if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
+ return MI.getOperand(0).getReg();
return 0;
}
-unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
+unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
- if (isFrameLoadOpcode(MI->getOpcode())) {
+ if (isFrameLoadOpcode(MI.getOpcode())) {
unsigned Reg;
if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
return Reg;
@@ -2242,18 +2365,18 @@ unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
return 0;
}
-unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (isFrameStoreOpcode(MI->getOpcode()))
- if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
+ if (isFrameStoreOpcode(MI.getOpcode()))
+ if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
isFrameOperand(MI, 0, FrameIndex))
- return MI->getOperand(X86::AddrNumOperands).getReg();
+ return MI.getOperand(X86::AddrNumOperands).getReg();
return 0;
}
-unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
+unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
- if (isFrameStoreOpcode(MI->getOpcode())) {
+ if (isFrameStoreOpcode(MI.getOpcode())) {
unsigned Reg;
if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
return Reg;
@@ -2281,10 +2404,9 @@ static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
return isPICBase;
}
-bool
-X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
- AliasAnalysis *AA) const {
- switch (MI->getOpcode()) {
+bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
+ AliasAnalysis *AA) const {
+ switch (MI.getOpcode()) {
default: break;
case X86::MOV8rm:
case X86::MOV16rm:
@@ -2345,18 +2467,18 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
case X86::VMOVUPSZ256rm:
case X86::VMOVUPSZrm: {
// Loads from constant pools are trivially rematerializable.
- if (MI->getOperand(1+X86::AddrBaseReg).isReg() &&
- MI->getOperand(1+X86::AddrScaleAmt).isImm() &&
- MI->getOperand(1+X86::AddrIndexReg).isReg() &&
- MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 &&
- MI->isInvariantLoad(AA)) {
- unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg();
+ if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
+ MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
+ MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
+ MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
+ MI.isInvariantLoad(AA)) {
+ unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
if (BaseReg == 0 || BaseReg == X86::RIP)
return true;
// Allow re-materialization of PIC load.
- if (!ReMatPICStubLoad && MI->getOperand(1+X86::AddrDisp).isGlobal())
+ if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
return false;
- const MachineFunction &MF = *MI->getParent()->getParent();
+ const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
@@ -2365,18 +2487,18 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
case X86::LEA32r:
case X86::LEA64r: {
- if (MI->getOperand(1+X86::AddrScaleAmt).isImm() &&
- MI->getOperand(1+X86::AddrIndexReg).isReg() &&
- MI->getOperand(1+X86::AddrIndexReg).getReg() == 0 &&
- !MI->getOperand(1+X86::AddrDisp).isReg()) {
+ if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
+ MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
+ MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
+ !MI.getOperand(1 + X86::AddrDisp).isReg()) {
// lea fi#, lea GV, etc. are all rematerializable.
- if (!MI->getOperand(1+X86::AddrBaseReg).isReg())
+ if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
return true;
- unsigned BaseReg = MI->getOperand(1+X86::AddrBaseReg).getReg();
+ unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
if (BaseReg == 0)
return true;
// Allow re-materialization of lea PICBase + x.
- const MachineFunction &MF = *MI->getParent()->getParent();
+ const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
@@ -2469,10 +2591,10 @@ bool X86InstrInfo::isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
+ const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const {
bool ClobbersEFLAGS = false;
- for (const MachineOperand &MO : Orig->operands()) {
+ for (const MachineOperand &MO : Orig.operands()) {
if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) {
ClobbersEFLAGS = true;
break;
@@ -2483,7 +2605,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
// The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
// effects.
int Value;
- switch (Orig->getOpcode()) {
+ switch (Orig.getOpcode()) {
case X86::MOV32r0: Value = 0; break;
case X86::MOV32r1: Value = 1; break;
case X86::MOV32r_1: Value = -1; break;
@@ -2491,22 +2613,23 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
llvm_unreachable("Unexpected instruction!");
}
- DebugLoc DL = Orig->getDebugLoc();
- BuildMI(MBB, I, DL, get(X86::MOV32ri)).addOperand(Orig->getOperand(0))
- .addImm(Value);
+ const DebugLoc &DL = Orig.getDebugLoc();
+ BuildMI(MBB, I, DL, get(X86::MOV32ri))
+ .addOperand(Orig.getOperand(0))
+ .addImm(Value);
} else {
- MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
+ MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
MBB.insert(I, MI);
}
- MachineInstr *NewMI = std::prev(I);
- NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
+ MachineInstr &NewMI = *std::prev(I);
+ NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
-bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr *MI) const {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDef() &&
MO.getReg() == X86::EFLAGS && !MO.isDead()) {
return true;
@@ -2516,11 +2639,11 @@ bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr *MI) const {
}
/// Check whether the shift count for a machine operand is non-zero.
-inline static unsigned getTruncatedShiftCount(MachineInstr *MI,
+inline static unsigned getTruncatedShiftCount(MachineInstr &MI,
unsigned ShiftAmtOperandIdx) {
// The shift count is six bits with the REX.W prefix and five bits without.
- unsigned ShiftCountMask = (MI->getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
- unsigned Imm = MI->getOperand(ShiftAmtOperandIdx).getImm();
+ unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
+ unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
return Imm & ShiftCountMask;
}
@@ -2535,11 +2658,11 @@ inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
return ShAmt < 4 && ShAmt > 0;
}
-bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
- unsigned Opc, bool AllowSP,
- unsigned &NewSrc, bool &isKill, bool &isUndef,
+bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
+ unsigned Opc, bool AllowSP, unsigned &NewSrc,
+ bool &isKill, bool &isUndef,
MachineOperand &ImplicitOp) const {
- MachineFunction &MF = *MI->getParent()->getParent();
+ MachineFunction &MF = *MI.getParent()->getParent();
const TargetRegisterClass *RC;
if (AllowSP) {
RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
@@ -2571,7 +2694,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
NewSrc = getX86SubSuperRegister(Src.getReg(), 64);
MachineBasicBlock::LivenessQueryResult LQR =
- MI->getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI);
+ MI.getParent()->computeRegisterLiveness(&getRegisterInfo(), NewSrc, MI);
switch (LQR) {
case MachineBasicBlock::LQR_Unknown:
@@ -2579,7 +2702,7 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
// formation.
return false;
case MachineBasicBlock::LQR_Live:
- isKill = MI->killsRegister(SrcReg);
+ isKill = MI.killsRegister(SrcReg);
isUndef = false;
break;
default:
@@ -2592,9 +2715,8 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
// Virtual register of the wrong class, we have to create a temporary 64-bit
// vreg to feed into the LEA.
NewSrc = MF.getRegInfo().createVirtualRegister(RC);
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
- get(TargetOpcode::COPY))
- .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
.addOperand(Src);
// Which is obviously going to be dead after we're done with it.
@@ -2609,16 +2731,14 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
/// Helper for convertToThreeAddress when 16-bit LEA is disabled, use 32-bit
/// LEA to form 3-address code by promoting to a 32-bit superregister and then
/// truncating back down to a 16-bit subregister.
-MachineInstr *
-X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
- MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
- MachineInstr *MI = MBBI;
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isKill = MI->getOperand(1).isKill();
+MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
+ unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI,
+ LiveVariables *LV) const {
+ MachineBasicBlock::iterator MBBI = MI.getIterator();
+ unsigned Dest = MI.getOperand(0).getReg();
+ unsigned Src = MI.getOperand(1).getReg();
+ bool isDead = MI.getOperand(0).isDead();
+ bool isKill = MI.getOperand(1).isKill();
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
@@ -2638,19 +2758,19 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
// leal -65(%rdx), %esi
// But testing has shown this *does* help performance in 64-bit mode (at
// least on modern x86 machines).
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
+ BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
MachineInstr *InsMI =
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
- .addReg(leaInReg, RegState::Define, X86::sub_16bit)
- .addReg(Src, getKillRegState(isKill));
+ BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(leaInReg, RegState::Define, X86::sub_16bit)
+ .addReg(Src, getKillRegState(isKill));
- MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
- get(Opc), leaOutReg);
+ MachineInstrBuilder MIB =
+ BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opc), leaOutReg);
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::SHL16ri: {
- unsigned ShAmt = MI->getOperand(2).getImm();
- MIB.addReg(0).addImm(1 << ShAmt)
+ unsigned ShAmt = MI.getOperand(2).getImm();
+ MIB.addReg(0).addImm(1ULL << ShAmt)
.addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
break;
}
@@ -2664,12 +2784,12 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
- addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
+ addRegOffset(MIB, leaInReg, true, MI.getOperand(2).getImm());
break;
case X86::ADD16rr:
case X86::ADD16rr_DB: {
- unsigned Src2 = MI->getOperand(2).getReg();
- bool isKill2 = MI->getOperand(2).isKill();
+ unsigned Src2 = MI.getOperand(2).getReg();
+ bool isKill2 = MI.getOperand(2).isKill();
unsigned leaInReg2 = 0;
MachineInstr *InsMI2 = nullptr;
if (Src == Src2) {
@@ -2683,33 +2803,32 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
- BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2);
- InsMI2 =
- BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
- .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
- .addReg(Src2, getKillRegState(isKill2));
+ BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
+ InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
+ .addReg(Src2, getKillRegState(isKill2));
addRegReg(MIB, leaInReg, true, leaInReg2, true);
}
if (LV && isKill2 && InsMI2)
- LV->replaceKillInstruction(Src2, MI, InsMI2);
+ LV->replaceKillInstruction(Src2, MI, *InsMI2);
break;
}
}
MachineInstr *NewMI = MIB;
MachineInstr *ExtMI =
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
- .addReg(Dest, RegState::Define | getDeadRegState(isDead))
- .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
+ BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(Dest, RegState::Define | getDeadRegState(isDead))
+ .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
if (LV) {
// Update live variables
LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
if (isKill)
- LV->replaceKillInstruction(Src, MI, InsMI);
+ LV->replaceKillInstruction(Src, MI, *InsMI);
if (isDead)
- LV->replaceKillInstruction(Dest, MI, ExtMI);
+ LV->replaceKillInstruction(Dest, MI, *ExtMI);
}
return ExtMI;
@@ -2727,20 +2846,17 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
///
MachineInstr *
X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
- MachineInstr *MI = MBBI;
-
+ MachineInstr &MI, LiveVariables *LV) const {
// The following opcodes also sets the condition code register(s). Only
// convert them to equivalent lea if the condition code register def's
// are dead!
if (hasLiveCondCodeDef(MI))
return nullptr;
- MachineFunction &MF = *MI->getParent()->getParent();
+ MachineFunction &MF = *MI.getParent()->getParent();
// All instructions input are two-addr instructions. Get the known operands.
- const MachineOperand &Dest = MI->getOperand(0);
- const MachineOperand &Src = MI->getOperand(1);
+ const MachineOperand &Dest = MI.getOperand(0);
+ const MachineOperand &Src = MI.getOperand(1);
MachineInstr *NewMI = nullptr;
// FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
@@ -2749,11 +2865,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
bool DisableLEA16 = true;
bool is64Bit = Subtarget.is64Bit();
- unsigned MIOpc = MI->getOpcode();
+ unsigned MIOpc = MI.getOpcode();
switch (MIOpc) {
default: return nullptr;
case X86::SHL64ri: {
- assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
@@ -2763,13 +2879,17 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
&X86::GR64_NOSPRegClass))
return nullptr;
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
- .addOperand(Dest)
- .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
+ NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
+ .addOperand(Dest)
+ .addReg(0)
+ .addImm(1ULL << ShAmt)
+ .addOperand(Src)
+ .addImm(0)
+ .addReg(0);
break;
}
case X86::SHL32ri: {
- assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
@@ -2783,11 +2903,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(0).addImm(1 << ShAmt)
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
- .addImm(0).addReg(0);
+ MachineInstrBuilder MIB =
+ BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(0)
+ .addImm(1ULL << ShAmt)
+ .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
+ .addImm(0)
+ .addReg(0);
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
NewMI = MIB;
@@ -2795,20 +2918,25 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
break;
}
case X86::SHL16ri: {
- assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : nullptr;
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest)
- .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0);
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
+ : nullptr;
+ NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest)
+ .addReg(0)
+ .addImm(1ULL << ShAmt)
+ .addOperand(Src)
+ .addImm(0)
+ .addReg(0);
break;
}
case X86::INC64r:
case X86::INC32r: {
- assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
+ assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
bool isKill, isUndef;
@@ -2818,9 +2946,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
+ MachineInstrBuilder MIB =
+ BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg,
+ getKillRegState(isKill) | getUndefRegState(isUndef));
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
@@ -2829,15 +2959,17 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::INC16r:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
- assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src), 1);
+ assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
+ NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest)
+ .addOperand(Src),
+ 1);
break;
case X86::DEC64r:
case X86::DEC32r: {
- assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
+ assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
@@ -2848,9 +2980,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
+ MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getUndefRegState(isUndef) |
+ getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
@@ -2860,17 +2993,19 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
}
case X86::DEC16r:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
- assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src), -1);
+ assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
+ NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest)
+ .addOperand(Src),
+ -1);
break;
case X86::ADD64rr:
case X86::ADD64rr_DB:
case X86::ADD32rr:
case X86::ADD32rr_DB: {
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc;
if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
Opc = X86::LEA64r;
@@ -2884,7 +3019,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- const MachineOperand &Src2 = MI->getOperand(2);
+ const MachineOperand &Src2 = MI.getOperand(2);
bool isKill2, isUndef2;
unsigned SrcReg2;
MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
@@ -2892,8 +3027,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg2, isKill2, isUndef2, ImplicitOp2))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest);
+ MachineInstrBuilder MIB =
+ BuildMI(MF, MI.getDebugLoc(), get(Opc)).addOperand(Dest);
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
if (ImplicitOp2.getReg() != 0)
@@ -2906,45 +3041,46 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
NewMI->getOperand(3).setIsUndef(isUndef2);
if (LV && Src2.isKill())
- LV->replaceKillInstruction(SrcReg2, MI, NewMI);
+ LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
break;
}
case X86::ADD16rr:
case X86::ADD16rr_DB: {
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- unsigned Src2 = MI->getOperand(2).getReg();
- bool isKill2 = MI->getOperand(2).isKill();
- NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest),
- Src.getReg(), Src.isKill(), Src2, isKill2);
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
+ unsigned Src2 = MI.getOperand(2).getReg();
+ bool isKill2 = MI.getOperand(2).isKill();
+ NewMI = addRegReg(
+ BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).addOperand(Dest),
+ Src.getReg(), Src.isKill(), Src2, isKill2);
// Preserve undefness of the operands.
- bool isUndef = MI->getOperand(1).isUndef();
- bool isUndef2 = MI->getOperand(2).isUndef();
+ bool isUndef = MI.getOperand(1).isUndef();
+ bool isUndef2 = MI.getOperand(2).isUndef();
NewMI->getOperand(1).setIsUndef(isUndef);
NewMI->getOperand(3).setIsUndef(isUndef2);
if (LV && isKill2)
- LV->replaceKillInstruction(Src2, MI, NewMI);
+ LV->replaceKillInstruction(Src2, MI, *NewMI);
break;
}
case X86::ADD64ri32:
case X86::ADD64ri8:
case X86::ADD64ri32_DB:
case X86::ADD64ri8_DB:
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
- .addOperand(Dest).addOperand(Src),
- MI->getOperand(2).getImm());
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
+ NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
+ .addOperand(Dest)
+ .addOperand(Src),
+ MI.getOperand(2).getImm());
break;
case X86::ADD32ri:
case X86::ADD32ri8:
case X86::ADD32ri_DB:
case X86::ADD32ri8_DB: {
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
bool isKill, isUndef;
@@ -2954,13 +3090,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
SrcReg, isKill, isUndef, ImplicitOp))
return nullptr;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addOperand(Dest)
- .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill));
+ MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .addOperand(Dest)
+ .addReg(SrcReg, getUndefRegState(isUndef) |
+ getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.addOperand(ImplicitOp);
- NewMI = addOffset(MIB, MI->getOperand(2).getImm());
+ NewMI = addOffset(MIB, MI.getOperand(2).getImm());
break;
}
case X86::ADD16ri:
@@ -2968,12 +3105,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
if (DisableLEA16)
- return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV)
+ return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
- assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
- .addOperand(Dest).addOperand(Src),
- MI->getOperand(2).getImm());
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
+ NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
+ .addOperand(Dest)
+ .addOperand(Src),
+ MI.getOperand(2).getImm());
break;
}
@@ -2981,12 +3119,12 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (LV) { // Update live variables
if (Src.isKill())
- LV->replaceKillInstruction(Src.getReg(), MI, NewMI);
+ LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
if (Dest.isDead())
- LV->replaceKillInstruction(Dest.getReg(), MI, NewMI);
+ LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
}
- MFI->insert(MBBI, NewMI); // Insert the new inst
+ MFI->insert(MI.getIterator(), NewMI); // Insert the new inst
return NewMI;
}
@@ -3142,11 +3280,16 @@ static bool isFMA3(unsigned Opcode, bool *IsIntrinsic = nullptr) {
llvm_unreachable("Opcode not handled by the switch");
}
-MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
- bool NewMI,
+MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
- switch (MI->getOpcode()) {
+ auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
+ if (NewMI)
+ return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ return MI;
+ };
+
+ switch (MI.getOpcode()) {
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
@@ -3155,7 +3298,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
unsigned Opc;
unsigned Size;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
@@ -3164,15 +3307,12 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
}
- unsigned Amt = MI->getOperand(3).getImm();
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->setDesc(get(Opc));
- MI->getOperand(3).setImm(Size-Amt);
- return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ unsigned Amt = MI.getOperand(3).getImm();
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.setDesc(get(Opc));
+ WorkingMI.getOperand(3).setImm(Size - Amt);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
case X86::BLENDPDrri:
case X86::BLENDPSrri:
@@ -3186,7 +3326,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::VPBLENDDYrri:
case X86::VPBLENDWYrri:{
unsigned Mask;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::BLENDPDrri: Mask = 0x03; break;
case X86::BLENDPSrri: Mask = 0x0F; break;
@@ -3201,29 +3341,23 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::VPBLENDWYrri: Mask = 0xFF; break;
}
// Only the least significant bits of Imm are used.
- unsigned Imm = MI->getOperand(3).getImm() & Mask;
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->getOperand(3).setImm(Mask ^ Imm);
- return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ unsigned Imm = MI.getOperand(3).getImm() & Mask;
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm(Mask ^ Imm);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
case X86::PCLMULQDQrr:
case X86::VPCLMULQDQrr:{
// SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
// SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
- unsigned Imm = MI->getOperand(3).getImm();
+ unsigned Imm = MI.getOperand(3).getImm();
unsigned Src1Hi = Imm & 0x01;
unsigned Src2Hi = Imm & 0x10;
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
- return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
case X86::CMPPDrri:
case X86::CMPPSrri:
@@ -3233,17 +3367,12 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::VCMPPSYrri: {
// Float comparison can be safely commuted for
// Ordered/Unordered/Equal/NotEqual tests
- unsigned Imm = MI->getOperand(3).getImm() & 0x7;
+ unsigned Imm = MI.getOperand(3).getImm() & 0x7;
switch (Imm) {
case 0x00: // EQUAL
case 0x03: // UNORDERED
case 0x04: // NOT EQUAL
case 0x07: // ORDERED
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
default:
return nullptr;
@@ -3254,7 +3383,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::VPCOMQri: case X86::VPCOMUQri:
case X86::VPCOMWri: case X86::VPCOMUWri: {
// Flip comparison mode immediate (if necessary).
- unsigned Imm = MI->getOperand(3).getImm() & 0x7;
+ unsigned Imm = MI.getOperand(3).getImm() & 0x7;
switch (Imm) {
case 0x00: Imm = 0x02; break; // LT -> GT
case 0x01: Imm = 0x03; break; // LE -> GE
@@ -3267,13 +3396,21 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
default:
break;
}
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->getOperand(3).setImm(Imm);
- return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm(Imm);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
+ }
+ case X86::VPERM2F128rr:
+ case X86::VPERM2I128rr: {
+ // Flip permute source immediate.
+ // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
+ // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
+ unsigned Imm = MI.getOperand(3).getImm() & 0xFF;
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr:
case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr:
@@ -3292,7 +3429,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr:
case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: {
unsigned Opc;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
@@ -3343,31 +3480,27 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr *MI,
case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
}
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->setDesc(get(Opc));
- // Fallthrough intended.
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.setDesc(get(Opc));
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
default:
- if (isFMA3(MI->getOpcode())) {
+ if (isFMA3(MI.getOpcode())) {
unsigned Opc = getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2);
if (Opc == 0)
return nullptr;
- if (NewMI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MI = MF.CloneMachineInstr(MI);
- NewMI = false;
- }
- MI->setDesc(get(Opc));
+ auto &WorkingMI = cloneIfNew(MI);
+ WorkingMI.setDesc(get(Opc));
+ return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
+ OpIdx1, OpIdx2);
}
+
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
}
}
-bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr *MI,
+bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
@@ -3402,12 +3535,12 @@ bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr *MI,
// CommutableOpIdx2 is well defined now. Let's choose another commutable
// operand and assign its index to CommutableOpIdx1.
- unsigned Op2Reg = MI->getOperand(CommutableOpIdx2).getReg();
+ unsigned Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
for (CommutableOpIdx1 = RegOpsNum; CommutableOpIdx1 > 0; CommutableOpIdx1--) {
// The commuted operands must have different registers.
// Otherwise, the commute transformation does not change anything and
// is useless then.
- if (Op2Reg != MI->getOperand(CommutableOpIdx1).getReg())
+ if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
break;
}
@@ -3427,14 +3560,13 @@ bool X86InstrInfo::findFMA3CommutedOpIndices(MachineInstr *MI,
return getFMA3OpcodeToCommuteOperands(MI, SrcOpIdx1, SrcOpIdx2) != 0;
}
-unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
- unsigned SrcOpIdx1,
- unsigned SrcOpIdx2) const {
- unsigned Opc = MI->getOpcode();
+unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
+ MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2) const {
+ unsigned Opc = MI.getOpcode();
// Define the array that holds FMA opcodes in groups
// of 3 opcodes(132, 213, 231) in each group.
- static const unsigned RegularOpcodeGroups[][3] = {
+ static const uint16_t RegularOpcodeGroups[][3] = {
{ X86::VFMADDSSr132r, X86::VFMADDSSr213r, X86::VFMADDSSr231r },
{ X86::VFMADDSDr132r, X86::VFMADDSDr213r, X86::VFMADDSDr231r },
{ X86::VFMADDPSr132r, X86::VFMADDPSr213r, X86::VFMADDPSr231r },
@@ -3508,7 +3640,7 @@ unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
// Define the array that holds FMA*_Int opcodes in groups
// of 3 opcodes(132, 213, 231) in each group.
- static const unsigned IntrinOpcodeGroups[][3] = {
+ static const uint16_t IntrinOpcodeGroups[][3] = {
{ X86::VFMADDSSr132r_Int, X86::VFMADDSSr213r_Int, X86::VFMADDSSr231r_Int },
{ X86::VFMADDSDr132r_Int, X86::VFMADDSDr213r_Int, X86::VFMADDSDr231r_Int },
{ X86::VFMADDSSr132m_Int, X86::VFMADDSSr213m_Int, X86::VFMADDSSr231m_Int },
@@ -3539,7 +3671,7 @@ unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
isFMA3(Opc, &IsIntrinOpcode);
size_t GroupsNum;
- const unsigned (*OpcodeGroups)[3];
+ const uint16_t (*OpcodeGroups)[3];
if (IsIntrinOpcode) {
GroupsNum = array_lengthof(IntrinOpcodeGroups);
OpcodeGroups = IntrinOpcodeGroups;
@@ -3548,7 +3680,7 @@ unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
OpcodeGroups = RegularOpcodeGroups;
}
- const unsigned *FoundOpcodesGroup = nullptr;
+ const uint16_t *FoundOpcodesGroup = nullptr;
size_t FormIndex;
// Look for the input opcode in the corresponding opcodes table.
@@ -3616,34 +3748,33 @@ unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
return FoundOpcodesGroup[FormIndex];
}
-bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI,
- unsigned &SrcOpIdx1,
+bool X86InstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
- switch (MI->getOpcode()) {
- case X86::CMPPDrri:
- case X86::CMPPSrri:
- case X86::VCMPPDrri:
- case X86::VCMPPSrri:
- case X86::VCMPPDYrri:
- case X86::VCMPPSYrri: {
- // Float comparison can be safely commuted for
- // Ordered/Unordered/Equal/NotEqual tests
- unsigned Imm = MI->getOperand(3).getImm() & 0x7;
- switch (Imm) {
- case 0x00: // EQUAL
- case 0x03: // UNORDERED
- case 0x04: // NOT EQUAL
- case 0x07: // ORDERED
- // The indices of the commutable operands are 1 and 2.
- // Assign them to the returned operand indices here.
- return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
- }
- return false;
+ switch (MI.getOpcode()) {
+ case X86::CMPPDrri:
+ case X86::CMPPSrri:
+ case X86::VCMPPDrri:
+ case X86::VCMPPSrri:
+ case X86::VCMPPDYrri:
+ case X86::VCMPPSYrri: {
+ // Float comparison can be safely commuted for
+ // Ordered/Unordered/Equal/NotEqual tests
+ unsigned Imm = MI.getOperand(3).getImm() & 0x7;
+ switch (Imm) {
+ case 0x00: // EQUAL
+ case 0x03: // UNORDERED
+ case 0x04: // NOT EQUAL
+ case 0x07: // ORDERED
+ // The indices of the commutable operands are 1 and 2.
+ // Assign them to the returned operand indices here.
+ return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
}
- default:
- if (isFMA3(MI->getOpcode()))
- return findFMA3CommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
- return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
+ return false;
+ }
+ default:
+ if (isFMA3(MI.getOpcode()))
+ return findFMA3CommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
+ return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
}
return false;
}
@@ -3791,6 +3922,8 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
case X86::COND_NP: return X86::COND_P;
case X86::COND_O: return X86::COND_NO;
case X86::COND_NO: return X86::COND_O;
+ case X86::COND_NE_OR_P: return X86::COND_E_AND_NP;
+ case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
}
}
@@ -3887,17 +4020,38 @@ unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes,
}
}
-bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- if (!MI->isTerminator()) return false;
+bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
+ if (!MI.isTerminator()) return false;
// Conditional branch is a special case.
- if (MI->isBranch() && !MI->isBarrier())
+ if (MI.isBranch() && !MI.isBarrier())
return true;
- if (!MI->isPredicable())
+ if (!MI.isPredicable())
return true;
return !isPredicated(MI);
}
+// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
+// not be a fallthrough MBB now due to layout changes). Return nullptr if the
+// fallthrough MBB cannot be identified.
+static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
+ MachineBasicBlock *TBB) {
+ // Look for non-EHPad successors other than TBB. If we find exactly one, it
+ // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
+ // and fallthrough MBB. If we find more than one, we cannot identify the
+ // fallthrough MBB and should return nullptr.
+ MachineBasicBlock *FallthroughBB = nullptr;
+ for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
+ if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB))
+ continue;
+ // Return a nullptr if we found more than one fallthrough successor.
+ if (FallthroughBB && FallthroughBB != TBB)
+ return nullptr;
+ FallthroughBB = *SI;
+ }
+ return FallthroughBB;
+}
+
bool X86InstrInfo::AnalyzeBranchImpl(
MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -3914,7 +4068,7 @@ bool X86InstrInfo::AnalyzeBranchImpl(
// Working from the bottom, when we see a non-terminator instruction, we're
// done.
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
break;
// A terminator that isn't a branch can't easily be handled by this
@@ -4000,7 +4154,7 @@ bool X86InstrInfo::AnalyzeBranchImpl(
FBB = TBB;
TBB = I->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
- CondBranches.push_back(I);
+ CondBranches.push_back(&*I);
continue;
}
@@ -4010,41 +4164,56 @@ bool X86InstrInfo::AnalyzeBranchImpl(
assert(Cond.size() == 1);
assert(TBB);
- // Only handle the case where all conditional branches branch to the same
- // destination.
- if (TBB != I->getOperand(0).getMBB())
- return true;
-
// If the conditions are the same, we can leave them alone.
X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
- if (OldBranchCode == BranchCode)
+ auto NewTBB = I->getOperand(0).getMBB();
+ if (OldBranchCode == BranchCode && TBB == NewTBB)
continue;
// If they differ, see if they fit one of the known patterns. Theoretically,
// we could handle more patterns here, but we shouldn't expect to see them
// if instruction selection has done a reasonable job.
- if ((OldBranchCode == X86::COND_NP &&
- BranchCode == X86::COND_E) ||
- (OldBranchCode == X86::COND_E &&
- BranchCode == X86::COND_NP))
- BranchCode = X86::COND_NP_OR_E;
- else if ((OldBranchCode == X86::COND_P &&
- BranchCode == X86::COND_NE) ||
- (OldBranchCode == X86::COND_NE &&
- BranchCode == X86::COND_P))
+ if (TBB == NewTBB &&
+ ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
+ (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
BranchCode = X86::COND_NE_OR_P;
- else
+ } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
+ (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
+ if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
+ return true;
+
+ // X86::COND_E_AND_NP usually has two different branch destinations.
+ //
+ // JP B1
+ // JE B2
+ // JMP B1
+ // B1:
+ // B2:
+ //
+ // Here this condition branches to B2 only if NP && E. It has another
+ // equivalent form:
+ //
+ // JNE B1
+ // JNP B2
+ // JMP B1
+ // B1:
+ // B2:
+ //
+ // Similarly it branches to B2 only if E && NP. That is why this condition
+ // is named with COND_E_AND_NP.
+ BranchCode = X86::COND_E_AND_NP;
+ } else
return true;
// Update the MachineOperand.
Cond[0].setImm(BranchCode);
- CondBranches.push_back(I);
+ CondBranches.push_back(&*I);
}
return false;
}
-bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -4053,7 +4222,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
}
-bool X86InstrInfo::AnalyzeBranchPredicate(MachineBasicBlock &MBB,
+bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify) const {
using namespace std::placeholders;
@@ -4142,10 +4311,11 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return Count;
}
-unsigned
-X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+unsigned X86InstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
@@ -4158,17 +4328,13 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
return 1;
}
+ // If FBB is null, it is implied to be a fall-through block.
+ bool FallThru = FBB == nullptr;
+
// Conditional branch.
unsigned Count = 0;
X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
switch (CC) {
- case X86::COND_NP_OR_E:
- // Synthesize NP_OR_E with two branches.
- BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB);
- ++Count;
- BuildMI(&MBB, DL, get(X86::JE_1)).addMBB(TBB);
- ++Count;
- break;
case X86::COND_NE_OR_P:
// Synthesize NE_OR_P with two branches.
BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(TBB);
@@ -4176,13 +4342,26 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
BuildMI(&MBB, DL, get(X86::JP_1)).addMBB(TBB);
++Count;
break;
+ case X86::COND_E_AND_NP:
+ // Use the next block of MBB as FBB if it is null.
+ if (FBB == nullptr) {
+ FBB = getFallThroughMBB(&MBB, TBB);
+ assert(FBB && "MBB cannot be the last block in function when the false "
+ "body is a fall-through.");
+ }
+ // Synthesize COND_E_AND_NP with two branches.
+ BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(FBB);
+ ++Count;
+ BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB);
+ ++Count;
+ break;
default: {
unsigned Opc = GetCondBranchFromCond(CC);
BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
++Count;
}
}
- if (FBB) {
+ if (!FallThru) {
// Two-way Conditional branch. Insert the second branch.
BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
++Count;
@@ -4228,15 +4407,16 @@ canInsertSelect(const MachineBasicBlock &MBB,
}
void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DstReg, ArrayRef<MachineOperand> Cond,
- unsigned TrueReg, unsigned FalseReg) const {
- MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
- assert(Cond.size() == 1 && "Invalid Cond array");
- unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
- MRI.getRegClass(DstReg)->getSize(),
- false/*HasMemoryOperand*/);
- BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DstReg,
+ ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+ unsigned FalseReg) const {
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ assert(Cond.size() == 1 && "Invalid Cond array");
+ unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
+ MRI.getRegClass(DstReg)->getSize(),
+ false /*HasMemoryOperand*/);
+ BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
}
/// Test if the given register is a physical h register.
@@ -4258,16 +4438,18 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
if (X86::GR64RegClass.contains(DestReg)) {
if (X86::VR128XRegClass.contains(SrcReg))
// Copy from a VR128 register to a GR64 register.
- return HasAVX512 ? X86::VMOVPQIto64Zrr: (HasAVX ? X86::VMOVPQIto64rr :
- X86::MOVPQIto64rr);
+ return HasAVX512 ? X86::VMOVPQIto64Zrr :
+ HasAVX ? X86::VMOVPQIto64rr :
+ X86::MOVPQIto64rr;
if (X86::VR64RegClass.contains(SrcReg))
// Copy from a VR64 register to a GR64 register.
return X86::MMX_MOVD64from64rr;
} else if (X86::GR64RegClass.contains(SrcReg)) {
// Copy from a GR64 register to a VR128 register.
if (X86::VR128XRegClass.contains(DestReg))
- return HasAVX512 ? X86::VMOV64toPQIZrr: (HasAVX ? X86::VMOV64toPQIrr :
- X86::MOV64toPQIrr);
+ return HasAVX512 ? X86::VMOV64toPQIZrr :
+ HasAVX ? X86::VMOV64toPQIrr :
+ X86::MOV64toPQIrr;
// Copy from a GR64 register to a VR64 register.
if (X86::VR64RegClass.contains(DestReg))
return X86::MMX_MOVD64to64rr;
@@ -4276,22 +4458,30 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
// SrcReg(FR32) -> DestReg(GR32)
// SrcReg(GR32) -> DestReg(FR32)
- if (X86::GR32RegClass.contains(DestReg) && X86::FR32XRegClass.contains(SrcReg))
+ if (X86::GR32RegClass.contains(DestReg) &&
+ X86::FR32XRegClass.contains(SrcReg))
// Copy from a FR32 register to a GR32 register.
- return HasAVX512 ? X86::VMOVSS2DIZrr : (HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr);
+ return HasAVX512 ? X86::VMOVSS2DIZrr :
+ HasAVX ? X86::VMOVSS2DIrr :
+ X86::MOVSS2DIrr;
- if (X86::FR32XRegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg))
+ if (X86::FR32XRegClass.contains(DestReg) &&
+ X86::GR32RegClass.contains(SrcReg))
// Copy from a GR32 register to a FR32 register.
- return HasAVX512 ? X86::VMOVDI2SSZrr : (HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr);
+ return HasAVX512 ? X86::VMOVDI2SSZrr :
+ HasAVX ? X86::VMOVDI2SSrr :
+ X86::MOVDI2SSrr;
return 0;
}
+static bool isMaskRegClass(const TargetRegisterClass *RC) {
+ // All KMASK RegClasses hold the same k registers, can be tested against anyone.
+ return X86::VK16RegClass.hasSubClassEq(RC);
+}
+
static bool MaskRegClassContains(unsigned Reg) {
- return X86::VK8RegClass.contains(Reg) ||
- X86::VK16RegClass.contains(Reg) ||
- X86::VK32RegClass.contains(Reg) ||
- X86::VK64RegClass.contains(Reg) ||
- X86::VK1RegClass.contains(Reg);
+ // All KMASK RegClasses hold the same k registers, can be tested against anyone.
+ return X86::VK16RegClass.contains(Reg);
}
static bool GRRegClassContains(unsigned Reg) {
@@ -4338,13 +4528,22 @@ unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg,
if (Subtarget.hasBWI())
if (auto Opc = copyPhysRegOpcode_AVX512_BW(DestReg, SrcReg))
return Opc;
- if (X86::VR128XRegClass.contains(DestReg, SrcReg) ||
- X86::VR256XRegClass.contains(DestReg, SrcReg) ||
- X86::VR512RegClass.contains(DestReg, SrcReg)) {
- DestReg = get512BitSuperRegister(DestReg);
- SrcReg = get512BitSuperRegister(SrcReg);
+ if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
+ if (Subtarget.hasVLX())
+ return X86::VMOVAPSZ128rr;
+ DestReg = get512BitSuperRegister(DestReg);
+ SrcReg = get512BitSuperRegister(SrcReg);
+ return X86::VMOVAPSZrr;
+ }
+ if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
+ if (Subtarget.hasVLX())
+ return X86::VMOVAPSZ256rr;
+ DestReg = get512BitSuperRegister(DestReg);
+ SrcReg = get512BitSuperRegister(SrcReg);
+ return X86::VMOVAPSZrr;
+ }
+ if (X86::VR512RegClass.contains(DestReg, SrcReg))
return X86::VMOVAPSZrr;
- }
if (MaskRegClassContains(DestReg) && MaskRegClassContains(SrcReg))
return X86::KMOVWkk;
if (MaskRegClassContains(DestReg) && GRRegClassContains(SrcReg)) {
@@ -4359,9 +4558,9 @@ unsigned copyPhysRegOpcode_AVX512(unsigned& DestReg, unsigned& SrcReg,
}
void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
// First deal with the normal symmetric copies.
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
@@ -4455,22 +4654,33 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// first frame index.
// See X86ISelLowering.cpp - X86::hasCopyImplyingStackAdjustment.
-
- bool AXDead = (Reg == AX) ||
- (MachineBasicBlock::LQR_Dead ==
- MBB.computeRegisterLiveness(&getRegisterInfo(), AX, MI));
- if (!AXDead) {
- // FIXME: If computeRegisterLiveness() reported LQR_Unknown then AX may
- // actually be dead. This is not a problem for correctness as we are just
- // (unnecessarily) saving+restoring a dead register. However the
- // MachineVerifier expects operands that read from dead registers
- // to be marked with the "undef" flag.
- // An example of this can be found in
- // test/CodeGen/X86/peephole-na-phys-copy-folding.ll and
- // test/CodeGen/X86/cmpxchg-clobber-flags.ll when using
- // -verify-machineinstrs.
- BuildMI(MBB, MI, DL, get(Push)).addReg(AX, getKillRegState(true));
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ MachineBasicBlock::LivenessQueryResult LQR =
+ MBB.computeRegisterLiveness(TRI, AX, MI);
+ // We do not want to save and restore AX if we do not have to.
+ // Moreover, if we do so whereas AX is dead, we would need to set
+ // an undef flag on the use of AX, otherwise the verifier will
+ // complain that we read an undef value.
+ // We do not want to change the behavior of the machine verifier
+ // as this is usually wrong to read an undef value.
+ if (MachineBasicBlock::LQR_Unknown == LQR) {
+ LivePhysRegs LPR(TRI);
+ LPR.addLiveOuts(MBB);
+ MachineBasicBlock::iterator I = MBB.end();
+ while (I != MI) {
+ --I;
+ LPR.stepBackward(*I);
+ }
+ // AX contains the top most register in the aliasing hierarchy.
+ // It may not be live, but one of its aliases may be.
+ for (MCRegAliasIterator AI(AX, TRI, true);
+ AI.isValid() && LQR != MachineBasicBlock::LQR_Live; ++AI)
+ LQR = LPR.contains(*AI) ? MachineBasicBlock::LQR_Live
+ : MachineBasicBlock::LQR_Dead;
}
+ bool AXDead = (Reg == AX) || (MachineBasicBlock::LQR_Dead == LQR);
+ if (!AXDead)
+ BuildMI(MBB, MI, DL, get(Push)).addReg(AX, getKillRegState(true));
if (FromEFLAGS) {
BuildMI(MBB, MI, DL, get(X86::SETOr), X86::AL);
BuildMI(MBB, MI, DL, get(X86::LAHF));
@@ -4493,15 +4703,28 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
llvm_unreachable("Cannot emit physreg copy instruction");
}
+static unsigned getLoadStoreMaskRegOpcode(const TargetRegisterClass *RC,
+ bool load) {
+ switch (RC->getSize()) {
+ default:
+ llvm_unreachable("Unknown spill size");
+ case 2:
+ return load ? X86::KMOVWkm : X86::KMOVWmk;
+ case 4:
+ return load ? X86::KMOVDkm : X86::KMOVDmk;
+ case 8:
+ return load ? X86::KMOVQkm : X86::KMOVQmk;
+ }
+}
+
static unsigned getLoadStoreRegOpcode(unsigned Reg,
const TargetRegisterClass *RC,
bool isStackAligned,
const X86Subtarget &STI,
bool load) {
if (STI.hasAVX512()) {
- if (X86::VK8RegClass.hasSubClassEq(RC) ||
- X86::VK16RegClass.hasSubClassEq(RC))
- return load ? X86::KMOVWkm : X86::KMOVWmk;
+ if (isMaskRegClass(RC))
+ return getLoadStoreMaskRegOpcode(RC, load);
if (RC->getSize() == 4 && X86::FR32XRegClass.hasSubClassEq(RC))
return load ? X86::VMOVSSZrm : X86::VMOVSSZmr;
if (RC->getSize() == 8 && X86::FR64XRegClass.hasSubClassEq(RC))
@@ -4554,25 +4777,38 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
assert((X86::VR128RegClass.hasSubClassEq(RC) ||
X86::VR128XRegClass.hasSubClassEq(RC))&& "Unknown 16-byte regclass");
// If stack is realigned we can use aligned stores.
+ if (X86::VR128RegClass.hasSubClassEq(RC)) {
+ if (isStackAligned)
+ return load ? (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
+ : (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
+ else
+ return load ? (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
+ : (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
+ }
+ assert(STI.hasVLX() && "Using extended register requires VLX");
if (isStackAligned)
- return load ?
- (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) :
- (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
+ return load ? X86::VMOVAPSZ128rm : X86::VMOVAPSZ128mr;
else
- return load ?
- (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) :
- (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
+ return load ? X86::VMOVUPSZ128rm : X86::VMOVUPSZ128mr;
}
case 32:
assert((X86::VR256RegClass.hasSubClassEq(RC) ||
X86::VR256XRegClass.hasSubClassEq(RC)) && "Unknown 32-byte regclass");
// If stack is realigned we can use aligned stores.
+ if (X86::VR256RegClass.hasSubClassEq(RC)) {
+ if (isStackAligned)
+ return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
+ else
+ return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
+ }
+ assert(STI.hasVLX() && "Using extended register requires VLX");
if (isStackAligned)
- return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
+ return load ? X86::VMOVAPSZ256rm : X86::VMOVAPSZ256mr;
else
- return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
+ return load ? X86::VMOVUPSZ256rm : X86::VMOVUPSZ256mr;
case 64:
assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
+ assert(STI.hasVLX() && "Using 512-bit register requires AVX512");
if (isStackAligned)
return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
else
@@ -4580,25 +4816,29 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
}
}
-bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
- unsigned &Offset,
+bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const {
- const MCInstrDesc &Desc = MemOp->getDesc();
- int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode());
+ const MCInstrDesc &Desc = MemOp.getDesc();
+ int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemRefBegin < 0)
return false;
MemRefBegin += X86II::getOperandBias(Desc);
- BaseReg = MemOp->getOperand(MemRefBegin + X86::AddrBaseReg).getReg();
- if (MemOp->getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
+ MachineOperand &BaseMO = MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
+ if (!BaseMO.isReg()) // Can be an MO_FrameIndex
+ return false;
+
+ BaseReg = BaseMO.getReg();
+ if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
return false;
- if (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
+ if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
X86::NoRegister)
return false;
- const MachineOperand &DispMO = MemOp->getOperand(MemRefBegin + X86::AddrDisp);
+ const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
// Displacement can be symbolic
if (!DispMO.isImm())
@@ -4606,8 +4846,8 @@ bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
Offset = DispMO.getImm();
- return (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() ==
- X86::NoRegister);
+ return MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() ==
+ X86::NoRegister;
}
static unsigned getStoreRegOpcode(unsigned SrcReg,
@@ -4697,10 +4937,10 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
NewMIs.push_back(MIB);
}
-bool X86InstrInfo::
-analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
- int &CmpMask, int &CmpValue) const {
- switch (MI->getOpcode()) {
+bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &CmpMask,
+ int &CmpValue) const {
+ switch (MI.getOpcode()) {
default: break;
case X86::CMP64ri32:
case X86::CMP64ri8:
@@ -4709,17 +4949,17 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
case X86::CMP16ri:
case X86::CMP16ri8:
case X86::CMP8ri:
- SrcReg = MI->getOperand(0).getReg();
+ SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
CmpMask = ~0;
- CmpValue = MI->getOperand(1).getImm();
+ CmpValue = MI.getOperand(1).getImm();
return true;
// A SUB can be used to perform comparison.
case X86::SUB64rm:
case X86::SUB32rm:
case X86::SUB16rm:
case X86::SUB8rm:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
CmpValue = 0;
@@ -4728,8 +4968,8 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
case X86::SUB32rr:
case X86::SUB16rr:
case X86::SUB8rr:
- SrcReg = MI->getOperand(1).getReg();
- SrcReg2 = MI->getOperand(2).getReg();
+ SrcReg = MI.getOperand(1).getReg();
+ SrcReg2 = MI.getOperand(2).getReg();
CmpMask = ~0;
CmpValue = 0;
return true;
@@ -4740,17 +4980,17 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
case X86::SUB16ri:
case X86::SUB16ri8:
case X86::SUB8ri:
- SrcReg = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = ~0;
- CmpValue = MI->getOperand(2).getImm();
+ CmpValue = MI.getOperand(2).getImm();
return true;
case X86::CMP64rr:
case X86::CMP32rr:
case X86::CMP16rr:
case X86::CMP8rr:
- SrcReg = MI->getOperand(0).getReg();
- SrcReg2 = MI->getOperand(1).getReg();
+ SrcReg = MI.getOperand(0).getReg();
+ SrcReg2 = MI.getOperand(1).getReg();
CmpMask = ~0;
CmpValue = 0;
return true;
@@ -4758,8 +4998,9 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
case X86::TEST16rr:
case X86::TEST32rr:
case X86::TEST64rr:
- SrcReg = MI->getOperand(0).getReg();
- if (MI->getOperand(1).getReg() != SrcReg) return false;
+ SrcReg = MI.getOperand(0).getReg();
+ if (MI.getOperand(1).getReg() != SrcReg)
+ return false;
// Compare against zero.
SrcReg2 = 0;
CmpMask = ~0;
@@ -4775,47 +5016,40 @@ analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
/// This function can be extended later on.
/// SrcReg, SrcRegs: register operands for FlagI.
/// ImmValue: immediate for FlagI if it takes an immediate.
-inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg,
+inline static bool isRedundantFlagInstr(MachineInstr &FlagI, unsigned SrcReg,
unsigned SrcReg2, int ImmValue,
- MachineInstr *OI) {
- if (((FlagI->getOpcode() == X86::CMP64rr &&
- OI->getOpcode() == X86::SUB64rr) ||
- (FlagI->getOpcode() == X86::CMP32rr &&
- OI->getOpcode() == X86::SUB32rr)||
- (FlagI->getOpcode() == X86::CMP16rr &&
- OI->getOpcode() == X86::SUB16rr)||
- (FlagI->getOpcode() == X86::CMP8rr &&
- OI->getOpcode() == X86::SUB8rr)) &&
- ((OI->getOperand(1).getReg() == SrcReg &&
- OI->getOperand(2).getReg() == SrcReg2) ||
- (OI->getOperand(1).getReg() == SrcReg2 &&
- OI->getOperand(2).getReg() == SrcReg)))
+ MachineInstr &OI) {
+ if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
+ (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
+ (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
+ (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) &&
+ ((OI.getOperand(1).getReg() == SrcReg &&
+ OI.getOperand(2).getReg() == SrcReg2) ||
+ (OI.getOperand(1).getReg() == SrcReg2 &&
+ OI.getOperand(2).getReg() == SrcReg)))
return true;
- if (((FlagI->getOpcode() == X86::CMP64ri32 &&
- OI->getOpcode() == X86::SUB64ri32) ||
- (FlagI->getOpcode() == X86::CMP64ri8 &&
- OI->getOpcode() == X86::SUB64ri8) ||
- (FlagI->getOpcode() == X86::CMP32ri &&
- OI->getOpcode() == X86::SUB32ri) ||
- (FlagI->getOpcode() == X86::CMP32ri8 &&
- OI->getOpcode() == X86::SUB32ri8) ||
- (FlagI->getOpcode() == X86::CMP16ri &&
- OI->getOpcode() == X86::SUB16ri) ||
- (FlagI->getOpcode() == X86::CMP16ri8 &&
- OI->getOpcode() == X86::SUB16ri8) ||
- (FlagI->getOpcode() == X86::CMP8ri &&
- OI->getOpcode() == X86::SUB8ri)) &&
- OI->getOperand(1).getReg() == SrcReg &&
- OI->getOperand(2).getImm() == ImmValue)
+ if (((FlagI.getOpcode() == X86::CMP64ri32 &&
+ OI.getOpcode() == X86::SUB64ri32) ||
+ (FlagI.getOpcode() == X86::CMP64ri8 &&
+ OI.getOpcode() == X86::SUB64ri8) ||
+ (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) ||
+ (FlagI.getOpcode() == X86::CMP32ri8 &&
+ OI.getOpcode() == X86::SUB32ri8) ||
+ (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) ||
+ (FlagI.getOpcode() == X86::CMP16ri8 &&
+ OI.getOpcode() == X86::SUB16ri8) ||
+ (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) &&
+ OI.getOperand(1).getReg() == SrcReg &&
+ OI.getOperand(2).getImm() == ImmValue)
return true;
return false;
}
/// Check whether the definition can be converted
/// to remove a comparison against zero.
-inline static bool isDefConvertible(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+inline static bool isDefConvertible(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
default: return false;
// The shift instructions only modify ZF if their shift count is non-zero.
@@ -4899,8 +5133,8 @@ inline static bool isDefConvertible(MachineInstr *MI) {
}
/// Check whether the use can be converted to remove a comparison against zero.
-static X86::CondCode isUseDefConvertible(MachineInstr *MI) {
- switch (MI->getOpcode()) {
+static X86::CondCode isUseDefConvertible(MachineInstr &MI) {
+ switch (MI.getOpcode()) {
default: return X86::COND_INVALID;
case X86::LZCNT16rr: case X86::LZCNT16rm:
case X86::LZCNT32rr: case X86::LZCNT32rm:
@@ -4920,13 +5154,13 @@ static X86::CondCode isUseDefConvertible(MachineInstr *MI) {
/// Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
-bool X86InstrInfo::
-optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
- int CmpMask, int CmpValue,
- const MachineRegisterInfo *MRI) const {
+bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int CmpMask,
+ int CmpValue,
+ const MachineRegisterInfo *MRI) const {
// Check whether we can replace SUB with CMP.
unsigned NewOpcode = 0;
- switch (CmpInstr->getOpcode()) {
+ switch (CmpInstr.getOpcode()) {
default: break;
case X86::SUB64ri32:
case X86::SUB64ri8:
@@ -4943,10 +5177,10 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
case X86::SUB32rr:
case X86::SUB16rr:
case X86::SUB8rr: {
- if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
+ if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
return false;
// There is no use of the destination register, we can replace SUB with CMP.
- switch (CmpInstr->getOpcode()) {
+ switch (CmpInstr.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
@@ -4964,8 +5198,8 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
}
- CmpInstr->setDesc(get(NewOpcode));
- CmpInstr->RemoveOperand(0);
+ CmpInstr.setDesc(get(NewOpcode));
+ CmpInstr.RemoveOperand(0);
// Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
@@ -4983,7 +5217,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// If we are comparing against zero, check whether we can use MI to update
// EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
bool IsCmpZero = (SrcReg2 == 0 && CmpValue == 0);
- if (IsCmpZero && MI->getParent() != CmpInstr->getParent())
+ if (IsCmpZero && MI->getParent() != CmpInstr.getParent())
return false;
// If we have a use of the source register between the def and our compare
@@ -4991,19 +5225,20 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// right way.
bool ShouldUpdateCC = false;
X86::CondCode NewCC = X86::COND_INVALID;
- if (IsCmpZero && !isDefConvertible(MI)) {
+ if (IsCmpZero && !isDefConvertible(*MI)) {
// Scan forward from the use until we hit the use we're looking for or the
// compare instruction.
for (MachineBasicBlock::iterator J = MI;; ++J) {
// Do we have a convertible instruction?
- NewCC = isUseDefConvertible(J);
+ NewCC = isUseDefConvertible(*J);
if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
J->getOperand(1).getReg() == SrcReg) {
assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
ShouldUpdateCC = true; // Update CC later on.
// This is not a def of SrcReg, but still a def of EFLAGS. Keep going
// with the new def.
- MI = Def = J;
+ Def = J;
+ MI = &*Def;
break;
}
@@ -5024,29 +5259,29 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// otherwise, RE is the rend of the basic block.
MachineBasicBlock::reverse_iterator
RI = MachineBasicBlock::reverse_iterator(I),
- RE = CmpInstr->getParent() == MI->getParent() ?
- MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ :
- CmpInstr->getParent()->rend();
+ RE = CmpInstr.getParent() == MI->getParent()
+ ? MachineBasicBlock::reverse_iterator(++Def) /* points to MI */
+ : CmpInstr.getParent()->rend();
MachineInstr *Movr0Inst = nullptr;
for (; RI != RE; ++RI) {
- MachineInstr *Instr = &*RI;
+ MachineInstr &Instr = *RI;
// Check whether CmpInstr can be made redundant by the current instruction.
if (!IsCmpZero &&
isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, Instr)) {
- Sub = Instr;
+ Sub = &Instr;
break;
}
- if (Instr->modifiesRegister(X86::EFLAGS, TRI) ||
- Instr->readsRegister(X86::EFLAGS, TRI)) {
+ if (Instr.modifiesRegister(X86::EFLAGS, TRI) ||
+ Instr.readsRegister(X86::EFLAGS, TRI)) {
// This instruction modifies or uses EFLAGS.
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// They are safe to move up, if the definition to EFLAGS is dead and
// earlier instructions do not read or write EFLAGS.
- if (!Movr0Inst && Instr->getOpcode() == X86::MOV32r0 &&
- Instr->registerDefIsDead(X86::EFLAGS, TRI)) {
- Movr0Inst = Instr;
+ if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
+ Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
+ Movr0Inst = &Instr;
continue;
}
@@ -5068,7 +5303,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// live-out.
bool IsSafe = false;
SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate;
- MachineBasicBlock::iterator E = CmpInstr->getParent()->end();
+ MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
for (++I; I != E; ++I) {
const MachineInstr &Instr = *I;
bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
@@ -5159,7 +5394,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
// If EFLAGS is not killed nor re-defined, we should check whether it is
// live-out. If it is live-out, do not optimize.
if ((IsCmpZero || IsSwapped) && !IsSafe) {
- MachineBasicBlock *MBB = CmpInstr->getParent();
+ MachineBasicBlock *MBB = CmpInstr.getParent();
for (MachineBasicBlock *Successor : MBB->successors())
if (Successor->isLiveIn(X86::EFLAGS))
return false;
@@ -5199,7 +5434,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
}
assert(i != e && "Unable to locate a def EFLAGS operand");
- CmpInstr->eraseFromParent();
+ CmpInstr.eraseFromParent();
// Modify the condition code of instructions in OpsToUpdate.
for (auto &Op : OpsToUpdate)
@@ -5211,14 +5446,14 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
/// operand at the use. We fold the load instructions if load defines a virtual
/// register, the virtual register is used once in the same BB, and the
/// instructions in-between do not load or store, and have no side effects.
-MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI,
+MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
if (FoldAsLoadDefReg == 0)
return nullptr;
// To be conservative, if there exists another load, clear the load candidate.
- if (MI->mayLoad()) {
+ if (MI.mayLoad()) {
FoldAsLoadDefReg = 0;
return nullptr;
}
@@ -5233,8 +5468,8 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI,
// Collect information about virtual register operands of MI.
unsigned SrcOperandId = 0;
bool FoundSrcOperand = false;
- for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
@@ -5251,7 +5486,7 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI,
return nullptr;
// Check whether we can fold the def into SrcOperandId.
- if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, DefMI)) {
+ if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, *DefMI)) {
FoldAsLoadDefReg = 0;
return FoldMI;
}
@@ -5313,6 +5548,60 @@ static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
return true;
}
+bool X86InstrInfo::ExpandMOVImmSExti8(MachineInstrBuilder &MIB) const {
+ MachineBasicBlock &MBB = *MIB->getParent();
+ DebugLoc DL = MIB->getDebugLoc();
+ int64_t Imm = MIB->getOperand(1).getImm();
+ assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
+ MachineBasicBlock::iterator I = MIB.getInstr();
+
+ int StackAdjustment;
+
+ if (Subtarget.is64Bit()) {
+ assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
+ MIB->getOpcode() == X86::MOV32ImmSExti8);
+
+ // Can't use push/pop lowering if the function might write to the red zone.
+ X86MachineFunctionInfo *X86FI =
+ MBB.getParent()->getInfo<X86MachineFunctionInfo>();
+ if (X86FI->getUsesRedZone()) {
+ MIB->setDesc(get(MIB->getOpcode() == X86::MOV32ImmSExti8 ? X86::MOV32ri
+ : X86::MOV64ri));
+ return true;
+ }
+
+ // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
+ // widen the register if necessary.
+ StackAdjustment = 8;
+ BuildMI(MBB, I, DL, get(X86::PUSH64i8)).addImm(Imm);
+ MIB->setDesc(get(X86::POP64r));
+ MIB->getOperand(0)
+ .setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64));
+ } else {
+ assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
+ StackAdjustment = 4;
+ BuildMI(MBB, I, DL, get(X86::PUSH32i8)).addImm(Imm);
+ MIB->setDesc(get(X86::POP32r));
+ }
+
+ // Build CFI if necessary.
+ MachineFunction &MF = *MBB.getParent();
+ const X86FrameLowering *TFL = Subtarget.getFrameLowering();
+ bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool NeedsDwarfCFI =
+ !IsWin64Prologue &&
+ (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry());
+ bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
+ if (EmitCFI) {
+ TFL->BuildCFI(MBB, I, DL,
+ MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
+ TFL->BuildCFI(MBB, std::next(I), DL,
+ MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
+ }
+
+ return true;
+}
+
// LoadStackGuard has so far only been implemented for 64-bit MachO. Different
// code sequence is needed for other targets.
static void expandLoadStackGuard(MachineInstrBuilder &MIB,
@@ -5322,9 +5611,9 @@ static void expandLoadStackGuard(MachineInstrBuilder &MIB,
unsigned Reg = MIB->getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
- unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
+ auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
- MachinePointerInfo::getGOT(*MBB.getParent()), Flag, 8, 8);
+ MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, 8);
MachineBasicBlock::iterator I = MIB.getInstr();
BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
@@ -5335,16 +5624,19 @@ static void expandLoadStackGuard(MachineInstrBuilder &MIB,
MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
}
-bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
bool HasAVX = Subtarget.hasAVX();
- MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
- switch (MI->getOpcode()) {
+ MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
+ switch (MI.getOpcode()) {
case X86::MOV32r0:
return Expand2AddrUndef(MIB, get(X86::XOR32rr));
case X86::MOV32r1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
case X86::MOV32r_1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
+ case X86::MOV32ImmSExti8:
+ case X86::MOV64ImmSExti8:
+ return ExpandMOVImmSExti8(MIB);
case X86::SETB_C8r:
return Expand2AddrUndef(MIB, get(X86::SBB8rr));
case X86::SETB_C16r:
@@ -5360,17 +5652,30 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
case X86::AVX_SET0:
assert(HasAVX && "AVX not supported");
return Expand2AddrUndef(MIB, get(X86::VXORPSYrr));
+ case X86::AVX512_128_SET0:
+ return Expand2AddrUndef(MIB, get(X86::VPXORDZ128rr));
+ case X86::AVX512_256_SET0:
+ return Expand2AddrUndef(MIB, get(X86::VPXORDZ256rr));
case X86::AVX512_512_SET0:
return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
case X86::V_SETALLONES:
return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
case X86::AVX2_SETALLONES:
return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
+ case X86::AVX512_512_SETALLONES: {
+ unsigned Reg = MIB->getOperand(0).getReg();
+ MIB->setDesc(get(X86::VPTERNLOGDZrri));
+ // VPTERNLOGD needs 3 register inputs and an immediate.
+ // 0xff will return 1s for any input.
+ MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
+ .addReg(Reg, RegState::Undef).addImm(0xff);
+ return true;
+ }
case X86::TEST8ri_NOREX:
- MI->setDesc(get(X86::TEST8ri));
+ MI.setDesc(get(X86::TEST8ri));
return true;
case X86::MOV32ri64:
- MI->setDesc(get(X86::MOV32ri));
+ MI.setDesc(get(X86::MOV32ri));
return true;
// KNL does not recognize dependency-breaking idioms for mask registers,
@@ -5422,23 +5727,23 @@ static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
- MachineInstr *MI,
+ MachineInstr &MI,
const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
// Omit the implicit operands, something BuildMI can't do.
- MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
- MI->getDebugLoc(), true);
+ MachineInstr *NewMI =
+ MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
addOperands(MIB, MOs);
// Loop over the rest of the ri operands, converting them over.
- unsigned NumOps = MI->getDesc().getNumOperands()-2;
+ unsigned NumOps = MI.getDesc().getNumOperands() - 2;
for (unsigned i = 0; i != NumOps; ++i) {
- MachineOperand &MO = MI->getOperand(i+2);
+ MachineOperand &MO = MI.getOperand(i + 2);
MIB.addOperand(MO);
}
- for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
MIB.addOperand(MO);
}
@@ -5451,15 +5756,15 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
unsigned OpNo, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
- MachineInstr *MI, const TargetInstrInfo &TII,
+ MachineInstr &MI, const TargetInstrInfo &TII,
int PtrOffset = 0) {
// Omit the implicit operands, something BuildMI can't do.
- MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
- MI->getDebugLoc(), true);
+ MachineInstr *NewMI =
+ MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
if (i == OpNo) {
assert(MO.isReg() && "Expected to fold into reg operand!");
addOperands(MIB, MOs, PtrOffset);
@@ -5477,35 +5782,35 @@ static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
- MachineInstr *MI) {
+ MachineInstr &MI) {
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
- MI->getDebugLoc(), TII.get(Opcode));
+ MI.getDebugLoc(), TII.get(Opcode));
addOperands(MIB, MOs);
return MIB.addImm(0);
}
MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
- MachineFunction &MF, MachineInstr *MI, unsigned OpNum,
+ MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align) const {
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case X86::INSERTPSrr:
case X86::VINSERTPSrr:
// Attempt to convert the load of inserted vector into a fold load
// of a single float.
if (OpNum == 2) {
- unsigned Imm = MI->getOperand(MI->getNumOperands() - 1).getImm();
+ unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
unsigned ZMask = Imm & 15;
unsigned DstIdx = (Imm >> 4) & 3;
unsigned SrcIdx = (Imm >> 6) & 3;
- unsigned RCSize = getRegClass(MI->getDesc(), OpNum, &RI, MF)->getSize();
+ unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
if (Size <= RCSize && 4 <= Align) {
int PtrOffset = SrcIdx * 4;
unsigned NewImm = (DstIdx << 4) | ZMask;
unsigned NewOpCode =
- (MI->getOpcode() == X86::VINSERTPSrr ? X86::VINSERTPSrm
- : X86::INSERTPSrm);
+ (MI.getOpcode() == X86::VINSERTPSrr ? X86::VINSERTPSrm
+ : X86::INSERTPSrm);
MachineInstr *NewMI =
FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
@@ -5513,17 +5818,34 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
}
}
break;
+ case X86::MOVHLPSrr:
+ case X86::VMOVHLPSrr:
+ // Move the upper 64-bits of the second operand to the lower 64-bits.
+ // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
+ // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
+ if (OpNum == 2) {
+ unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
+ if (Size <= RCSize && 8 <= Align) {
+ unsigned NewOpCode =
+ (MI.getOpcode() == X86::VMOVHLPSrr ? X86::VMOVLPSrm
+ : X86::MOVLPSrm);
+ MachineInstr *NewMI =
+ FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
+ return NewMI;
+ }
+ }
+ break;
};
return nullptr;
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, unsigned OpNum,
+ MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align, bool AllowCommute) const {
const DenseMap<unsigned,
- std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
+ std::pair<uint16_t, uint16_t> > *OpcodeTablePtr = nullptr;
bool isCallRegIndirect = Subtarget.callRegIndirect();
bool isTwoAddrFold = false;
@@ -5531,19 +5853,19 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
if (isCallRegIndirect && !MF.getFunction()->optForMinSize() &&
- (MI->getOpcode() == X86::CALL32r || MI->getOpcode() == X86::CALL64r ||
- MI->getOpcode() == X86::PUSH16r || MI->getOpcode() == X86::PUSH32r ||
- MI->getOpcode() == X86::PUSH64r))
+ (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
+ MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
+ MI.getOpcode() == X86::PUSH64r))
return nullptr;
- unsigned NumOps = MI->getDesc().getNumOperands();
- bool isTwoAddr = NumOps > 1 &&
- MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
+ unsigned NumOps = MI.getDesc().getNumOperands();
+ bool isTwoAddr =
+ NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
// FIXME: AsmPrinter doesn't know how to handle
// X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
- if (MI->getOpcode() == X86::ADD32ri &&
- MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
+ if (MI.getOpcode() == X86::ADD32ri &&
+ MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
return nullptr;
MachineInstr *NewMI = nullptr;
@@ -5556,14 +5878,13 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
- if (isTwoAddr && NumOps >= 2 && OpNum < 2 &&
- MI->getOperand(0).isReg() &&
- MI->getOperand(1).isReg() &&
- MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
+ if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
+ MI.getOperand(1).isReg() &&
+ MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
} else if (OpNum == 0) {
- if (MI->getOpcode() == X86::MOV32r0) {
+ if (MI.getOpcode() == X86::MOV32r0) {
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
if (NewMI)
return NewMI;
@@ -5583,8 +5904,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// If table selected...
if (OpcodeTablePtr) {
// Find the Opcode to fuse
- DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
- OpcodeTablePtr->find(MI->getOpcode());
+ auto I = OpcodeTablePtr->find(MI.getOpcode());
if (I != OpcodeTablePtr->end()) {
unsigned Opcode = I->second.first;
unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT;
@@ -5592,7 +5912,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
return nullptr;
bool NarrowToMOV32rm = false;
if (Size) {
- unsigned RCSize = getRegClass(MI->getDesc(), OpNum, &RI, MF)->getSize();
+ unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
if (Size < RCSize) {
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -5601,7 +5921,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// If this is a 64-bit load, but the spill slot is 32, then we can do
// a 32-bit load which is implicitly zero-extended. This likely is
// due to live interval analysis remat'ing a load from stack slot.
- if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg())
+ if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
return nullptr;
Opcode = X86::MOV32rm;
NarrowToMOV32rm = true;
@@ -5632,14 +5952,14 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
if (AllowCommute) {
unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
- bool HasDef = MI->getDesc().getNumDefs();
- unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
- unsigned Reg1 = MI->getOperand(CommuteOpIdx1).getReg();
- unsigned Reg2 = MI->getOperand(CommuteOpIdx2).getReg();
+ bool HasDef = MI.getDesc().getNumDefs();
+ unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
+ unsigned Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
+ unsigned Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
bool Tied1 =
- 0 == MI->getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
+ 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
bool Tied2 =
- 0 == MI->getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
+ 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
// If either of the commutable operands are tied to the destination
// then we can not commute + fold.
@@ -5653,7 +5973,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unable to commute.
return nullptr;
}
- if (CommutedMI != MI) {
+ if (CommutedMI != &MI) {
// New instruction. We can't fold from this.
CommutedMI->eraseFromParent();
return nullptr;
@@ -5672,7 +5992,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Unable to commute.
return nullptr;
}
- if (UncommutedMI != MI) {
+ if (UncommutedMI != &MI) {
// New instruction. It doesn't need to be kept.
UncommutedMI->eraseFromParent();
return nullptr;
@@ -5684,8 +6004,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
}
// No fusion
- if (PrintFailedFusing && !MI->isCopy())
- dbgs() << "We failed to fuse operand " << OpNum << " in " << *MI;
+ if (PrintFailedFusing && !MI.isCopy())
+ dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
return nullptr;
}
@@ -5723,6 +6043,10 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
case X86::CVTSS2SDrm:
case X86::Int_CVTSS2SDrr:
case X86::Int_CVTSS2SDrm:
+ case X86::MOVHPDrm:
+ case X86::MOVHPSrm:
+ case X86::MOVLPDrm:
+ case X86::MOVLPSrm:
case X86::RCPSSr:
case X86::RCPSSm:
case X86::RCPSSr_Int:
@@ -5753,27 +6077,27 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
/// Inform the ExeDepsFix pass how many idle
/// instructions we would like before a partial register update.
-unsigned X86InstrInfo::
-getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode()))
+unsigned X86InstrInfo::getPartialRegUpdateClearance(
+ const MachineInstr &MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode()))
return 0;
// If MI is marked as reading Reg, the partial register update is wanted.
- const MachineOperand &MO = MI->getOperand(0);
+ const MachineOperand &MO = MI.getOperand(0);
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- if (MO.readsReg() || MI->readsVirtualRegister(Reg))
+ if (MO.readsReg() || MI.readsVirtualRegister(Reg))
return 0;
} else {
- if (MI->readsRegister(Reg, TRI))
+ if (MI.readsRegister(Reg, TRI))
return 0;
}
- // If any of the preceding 16 instructions are reading Reg, insert a
- // dependency breaking instruction. The magic number is based on a few
- // Nehalem experiments.
- return 16;
+ // If any instructions in the clearance range are reading Reg, insert a
+ // dependency breaking instruction, which is inexpensive and is likely to
+ // be hidden in other instruction's cycles.
+ return PartialRegUpdateClearance;
}
// Return true for any instruction the copies the high bits of the first source
@@ -5847,59 +6171,61 @@ static bool hasUndefRegUpdate(unsigned Opcode) {
///
/// Like getPartialRegUpdateClearance, this makes a strong assumption that the
/// high bits that are passed-through are not live.
-unsigned X86InstrInfo::
-getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
- const TargetRegisterInfo *TRI) const {
- if (!hasUndefRegUpdate(MI->getOpcode()))
+unsigned
+X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
+ const TargetRegisterInfo *TRI) const {
+ if (!hasUndefRegUpdate(MI.getOpcode()))
return 0;
// Set the OpNum parameter to the first source operand.
OpNum = 1;
- const MachineOperand &MO = MI->getOperand(OpNum);
+ const MachineOperand &MO = MI.getOperand(OpNum);
if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
- // Use the same magic number as getPartialRegUpdateClearance.
- return 16;
+ return UndefRegClearance;
}
return 0;
}
-void X86InstrInfo::
-breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- unsigned Reg = MI->getOperand(OpNum).getReg();
+void X86InstrInfo::breakPartialRegDependency(
+ MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
+ unsigned Reg = MI.getOperand(OpNum).getReg();
// If MI kills this register, the false dependence is already broken.
- if (MI->killsRegister(Reg, TRI))
+ if (MI.killsRegister(Reg, TRI))
return;
if (X86::VR128RegClass.contains(Reg)) {
// These instructions are all floating point domain, so xorps is the best
// choice.
unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg)
- .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
- MI->addRegisterKilled(Reg, TRI, true);
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
+ .addReg(Reg, RegState::Undef)
+ .addReg(Reg, RegState::Undef);
+ MI.addRegisterKilled(Reg, TRI, true);
} else if (X86::VR256RegClass.contains(Reg)) {
// Use vxorps to clear the full ymm register.
// It wants to read and write the xmm sub-register.
unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm);
- BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg)
- .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef)
- .addReg(Reg, RegState::ImplicitDefine);
- MI->addRegisterKilled(Reg, TRI, true);
+ BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
+ .addReg(XReg, RegState::Undef)
+ .addReg(XReg, RegState::Undef)
+ .addReg(Reg, RegState::ImplicitDefine);
+ MI.addRegisterKilled(Reg, TRI, true);
}
}
-MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
+MachineInstr *
+X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
+ int FrameIndex, LiveIntervals *LIS) const {
// Check switch flag
if (NoFusing)
return nullptr;
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI->getOpcode()))
+ if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
const MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -5913,7 +6239,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
unsigned RCSize = 0;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
@@ -5925,8 +6251,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
if (Size < RCSize)
return nullptr;
// Change to CMPXXri r, 0 first.
- MI->setDesc(get(NewOpc));
- MI->getOperand(1).ChangeToImmediate(0);
+ MI.setDesc(get(NewOpc));
+ MI.getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return nullptr;
@@ -5957,15 +6283,16 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
unsigned RegSize =
MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg())->getSize();
- if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm) && RegSize > 4) {
+ if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm) &&
+ RegSize > 4) {
// These instructions only load 32 bits, we can't fold them if the
// destination register is wider than 32 bits (4 bytes), and its user
// instruction isn't scalar (SS).
switch (UserOpc) {
- case X86::ADDSSrr_Int: case X86::VADDSSrr_Int:
- case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int:
- case X86::MULSSrr_Int: case X86::VMULSSrr_Int:
- case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int:
+ case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
+ case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
+ case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
+ case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
case X86::VFMADDSSr132r_Int: case X86::VFNMADDSSr132r_Int:
case X86::VFMADDSSr213r_Int: case X86::VFNMADDSSr213r_Int:
case X86::VFMADDSSr231r_Int: case X86::VFNMADDSSr231r_Int:
@@ -5978,15 +6305,16 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
}
}
- if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm) && RegSize > 8) {
+ if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm) &&
+ RegSize > 8) {
// These instructions only load 64 bits, we can't fold them if the
// destination register is wider than 64 bits (8 bytes), and its user
// instruction isn't scalar (SD).
switch (UserOpc) {
- case X86::ADDSDrr_Int: case X86::VADDSDrr_Int:
- case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int:
- case X86::MULSDrr_Int: case X86::VMULSDrr_Int:
- case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int:
+ case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
+ case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
+ case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
+ case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
case X86::VFMADDSDr132r_Int: case X86::VFNMADDSDr132r_Int:
case X86::VFMADDSDr213r_Int: case X86::VFNMADDSDr213r_Int:
case X86::VFMADDSDr231r_Int: case X86::VFNMADDSDr231r_Int:
@@ -6003,36 +6331,43 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS) const {
// If loading from a FrameIndex, fold directly from the FrameIndex.
- unsigned NumOps = LoadMI->getDesc().getNumOperands();
+ unsigned NumOps = LoadMI.getDesc().getNumOperands();
int FrameIndex;
if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
- if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF))
+ if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
return nullptr;
- return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex);
+ return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
}
// Check switch flag
if (NoFusing) return nullptr;
// Avoid partial register update stalls unless optimizing for size.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI->getOpcode()))
+ if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
// Determine the alignment of the load.
unsigned Alignment = 0;
- if (LoadMI->hasOneMemOperand())
- Alignment = (*LoadMI->memoperands_begin())->getAlignment();
+ if (LoadMI.hasOneMemOperand())
+ Alignment = (*LoadMI.memoperands_begin())->getAlignment();
else
- switch (LoadMI->getOpcode()) {
+ switch (LoadMI.getOpcode()) {
+ case X86::AVX512_512_SET0:
+ case X86::AVX512_512_SETALLONES:
+ Alignment = 64;
+ break;
case X86::AVX2_SETALLONES:
case X86::AVX_SET0:
+ case X86::AVX512_256_SET0:
Alignment = 32;
break;
case X86::V_SET0:
case X86::V_SETALLONES:
+ case X86::AVX512_128_SET0:
Alignment = 16;
break;
case X86::FsFLD0SD:
@@ -6046,7 +6381,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
}
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
@@ -6054,22 +6389,26 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
}
// Change to CMPXXri r, 0 first.
- MI->setDesc(get(NewOpc));
- MI->getOperand(1).ChangeToImmediate(0);
+ MI.setDesc(get(NewOpc));
+ MI.getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return nullptr;
// Make sure the subregisters match.
// Otherwise we risk changing the size of the load.
- if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg())
+ if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
return nullptr;
SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
- switch (LoadMI->getOpcode()) {
+ switch (LoadMI.getOpcode()) {
case X86::V_SET0:
case X86::V_SETALLONES:
case X86::AVX2_SETALLONES:
case X86::AVX_SET0:
+ case X86::AVX512_128_SET0:
+ case X86::AVX512_256_SET0:
+ case X86::AVX512_512_SET0:
+ case X86::AVX512_512_SETALLONES:
case X86::FsFLD0SD:
case X86::FsFLD0SS: {
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
@@ -6082,7 +6421,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
- if (MF.getTarget().getRelocationModel() == Reloc::PIC_) {
+ if (MF.getTarget().isPositionIndependent()) {
if (Subtarget.is64Bit())
PICBase = X86::RIP;
else
@@ -6096,17 +6435,21 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Create a constant-pool entry.
MachineConstantPool &MCP = *MF.getConstantPool();
Type *Ty;
- unsigned Opc = LoadMI->getOpcode();
+ unsigned Opc = LoadMI.getOpcode();
if (Opc == X86::FsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction()->getContext());
else if (Opc == X86::FsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
- else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0)
+ else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()),16);
+ else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
+ Opc == X86::AVX512_256_SET0)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
else
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
- bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES);
+ bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
+ Opc == X86::AVX512_512_SETALLONES);
const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
Constant::getNullValue(Ty);
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
@@ -6120,12 +6463,12 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
break;
}
default: {
- if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF))
+ if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
return nullptr;
// Folding a normal load. Just copy the load's address operands.
- MOs.append(LoadMI->operands_begin() + NumOps - X86::AddrNumOperands,
- LoadMI->operands_begin() + NumOps);
+ MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
+ LoadMI.operands_begin() + NumOps);
break;
}
}
@@ -6133,11 +6476,10 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
/*Size=*/0, Alignment, /*AllowCommute=*/true);
}
-bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find(MI->getOpcode());
+bool X86InstrInfo::unfoldMemoryOperand(
+ MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
+ bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
+ auto I = MemOp2RegOpTable.find(MI.getOpcode());
if (I == MemOp2RegOpTable.end())
return false;
unsigned Opc = I->second.first;
@@ -6154,8 +6496,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
const MCInstrDesc &MCID = get(Opc);
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
// TODO: Check if 32-byte or greater accesses are slow too?
- if (!MI->hasOneMemOperand() &&
- RC == &X86::VR128RegClass &&
+ if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
Subtarget.isUnalignedMem16Slow())
// Without memoperands, loadRegFromAddr and storeRegToStackSlot will
// conservatively assume the address is unaligned. That's bad for
@@ -6165,8 +6506,8 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
SmallVector<MachineOperand,2> BeforeOps;
SmallVector<MachineOperand,2> AfterOps;
SmallVector<MachineOperand,4> ImpOps;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &Op = MI->getOperand(i);
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &Op = MI.getOperand(i);
if (i >= Index && i < Index + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (Op.isReg() && Op.isImplicit())
@@ -6179,10 +6520,8 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Emit the load instruction.
if (UnfoldLoad) {
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator> MMOs =
- MF.extractLoadMemRefs(MI->memoperands_begin(),
- MI->memoperands_end());
+ std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
+ MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end());
loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
@@ -6195,7 +6534,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
}
// Emit the data processing instruction.
- MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI->getDebugLoc(), true);
+ MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, DataMI);
if (FoldedStore)
@@ -6248,10 +6587,8 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
// Emit the store instruction.
if (UnfoldStore) {
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
- std::pair<MachineInstr::mmo_iterator,
- MachineInstr::mmo_iterator> MMOs =
- MF.extractStoreMemRefs(MI->memoperands_begin(),
- MI->memoperands_end());
+ std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
+ MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end());
storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
}
@@ -6264,8 +6601,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
if (!N->isMachineOpcode())
return false;
- DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find(N->getMachineOpcode());
+ auto I = MemOp2RegOpTable.find(N->getMachineOpcode());
if (I == MemOp2RegOpTable.end())
return false;
unsigned Opc = I->second.first;
@@ -6371,8 +6707,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex) const {
- DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
- MemOp2RegOpTable.find(Opc);
+ auto I = MemOp2RegOpTable.find(Opc);
if (I == MemOp2RegOpTable.end())
return 0;
bool FoldedLoad = I->second.second & TB_FOLDED_LOAD;
@@ -6411,6 +6746,7 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
+ case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
// AVX load instructions
@@ -6421,13 +6757,52 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
+ case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
+ case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
+ // AVX512 load instructions
+ case X86::VMOVSSZrm:
+ case X86::VMOVSDZrm:
+ case X86::VMOVAPSZ128rm:
+ case X86::VMOVUPSZ128rm:
+ case X86::VMOVAPDZ128rm:
+ case X86::VMOVUPDZ128rm:
+ case X86::VMOVDQU8Z128rm:
+ case X86::VMOVDQU16Z128rm:
+ case X86::VMOVDQA32Z128rm:
+ case X86::VMOVDQU32Z128rm:
+ case X86::VMOVDQA64Z128rm:
+ case X86::VMOVDQU64Z128rm:
+ case X86::VMOVAPSZ256rm:
+ case X86::VMOVUPSZ256rm:
+ case X86::VMOVAPDZ256rm:
+ case X86::VMOVUPDZ256rm:
+ case X86::VMOVDQU8Z256rm:
+ case X86::VMOVDQU16Z256rm:
+ case X86::VMOVDQA32Z256rm:
+ case X86::VMOVDQU32Z256rm:
+ case X86::VMOVDQA64Z256rm:
+ case X86::VMOVDQU64Z256rm:
+ case X86::VMOVAPSZrm:
+ case X86::VMOVUPSZrm:
+ case X86::VMOVAPDZrm:
+ case X86::VMOVUPDZrm:
+ case X86::VMOVDQU8Zrm:
+ case X86::VMOVDQU16Zrm:
+ case X86::VMOVDQA32Zrm:
+ case X86::VMOVDQU32Zrm:
+ case X86::VMOVDQA64Zrm:
+ case X86::VMOVDQU64Zrm:
+ case X86::KMOVBkm:
+ case X86::KMOVWkm:
+ case X86::KMOVDkm:
+ case X86::KMOVQkm:
break;
}
switch (Opc2) {
@@ -6448,6 +6823,7 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
+ case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
// AVX load instructions
@@ -6458,13 +6834,52 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
+ case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
+ case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
+ // AVX512 load instructions
+ case X86::VMOVSSZrm:
+ case X86::VMOVSDZrm:
+ case X86::VMOVAPSZ128rm:
+ case X86::VMOVUPSZ128rm:
+ case X86::VMOVAPDZ128rm:
+ case X86::VMOVUPDZ128rm:
+ case X86::VMOVDQU8Z128rm:
+ case X86::VMOVDQU16Z128rm:
+ case X86::VMOVDQA32Z128rm:
+ case X86::VMOVDQU32Z128rm:
+ case X86::VMOVDQA64Z128rm:
+ case X86::VMOVDQU64Z128rm:
+ case X86::VMOVAPSZ256rm:
+ case X86::VMOVUPSZ256rm:
+ case X86::VMOVAPDZ256rm:
+ case X86::VMOVUPDZ256rm:
+ case X86::VMOVDQU8Z256rm:
+ case X86::VMOVDQU16Z256rm:
+ case X86::VMOVDQA32Z256rm:
+ case X86::VMOVDQU32Z256rm:
+ case X86::VMOVDQA64Z256rm:
+ case X86::VMOVDQU64Z256rm:
+ case X86::VMOVAPSZrm:
+ case X86::VMOVUPSZrm:
+ case X86::VMOVAPDZrm:
+ case X86::VMOVUPDZrm:
+ case X86::VMOVDQU8Zrm:
+ case X86::VMOVDQU16Zrm:
+ case X86::VMOVDQA32Zrm:
+ case X86::VMOVDQU32Zrm:
+ case X86::VMOVDQA64Zrm:
+ case X86::VMOVDQU64Zrm:
+ case X86::KMOVBkm:
+ case X86::KMOVWkm:
+ case X86::KMOVDkm:
+ case X86::KMOVQkm:
break;
}
@@ -6540,8 +6955,8 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
return true;
}
-bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
- MachineInstr *Second) const {
+bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr &First,
+ MachineInstr &Second) const {
// Check if this processor supports macro-fusion. Since this is a minor
// heuristic, we haven't specifically reserved a feature. hasAVX is a decent
// proxy for SandyBridge+.
@@ -6554,7 +6969,7 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
FuseInc
} FuseKind;
- switch(Second->getOpcode()) {
+ switch (Second.getOpcode()) {
default:
return false;
case X86::JE_1:
@@ -6580,7 +6995,7 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First,
FuseKind = FuseTest;
break;
}
- switch (First->getOpcode()) {
+ switch (First.getOpcode()) {
default:
return false;
case X86::TEST8rr:
@@ -6703,8 +7118,6 @@ bool X86InstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 1 && "Invalid X86 branch condition!");
X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
- if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E)
- return true;
Cond[0].setImm(GetOppositeBranchCondition(CC));
return false;
}
@@ -6827,29 +7240,29 @@ static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) {
}
std::pair<uint16_t, uint16_t>
-X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const {
- uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
+X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
+ uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
bool hasAVX2 = Subtarget.hasAVX2();
uint16_t validDomains = 0;
- if (domain && lookup(MI->getOpcode(), domain))
+ if (domain && lookup(MI.getOpcode(), domain))
validDomains = 0xe;
- else if (domain && lookupAVX2(MI->getOpcode(), domain))
+ else if (domain && lookupAVX2(MI.getOpcode(), domain))
validDomains = hasAVX2 ? 0xe : 0x6;
return std::make_pair(domain, validDomains);
}
-void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
+void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
assert(Domain>0 && Domain<4 && "Invalid execution domain");
- uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
+ uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
assert(dom && "Not an SSE instruction");
- const uint16_t *table = lookup(MI->getOpcode(), dom);
+ const uint16_t *table = lookup(MI.getOpcode(), dom);
if (!table) { // try the other table
assert((Subtarget.hasAVX2() || Domain < 3) &&
"256-bit vector operations only available in AVX2");
- table = lookupAVX2(MI->getOpcode(), dom);
+ table = lookupAVX2(MI.getOpcode(), dom);
}
assert(table && "Cannot change domain");
- MI->setDesc(get(table[Domain-1]));
+ MI.setDesc(get(table[Domain - 1]));
}
/// Return the noop instruction to use for a noop.
@@ -6886,6 +7299,10 @@ unsigned X86InstrInfo::getJumpInstrTableEntryBound() const {
bool X86InstrInfo::isHighLatencyDef(int opc) const {
switch (opc) {
default: return false;
+ case X86::DIVPDrm:
+ case X86::DIVPDrr:
+ case X86::DIVPSrm:
+ case X86::DIVPSrr:
case X86::DIVSDrm:
case X86::DIVSDrm_Int:
case X86::DIVSDrr:
@@ -6907,6 +7324,14 @@ bool X86InstrInfo::isHighLatencyDef(int opc) const {
case X86::SQRTSSr:
case X86::SQRTSSr_Int:
// AVX instructions with high latency
+ case X86::VDIVPDrm:
+ case X86::VDIVPDrr:
+ case X86::VDIVPDYrm:
+ case X86::VDIVPDYrr:
+ case X86::VDIVPSrm:
+ case X86::VDIVPSrr:
+ case X86::VDIVPSYrm:
+ case X86::VDIVPSYrr:
case X86::VDIVSDrm:
case X86::VDIVSDrm_Int:
case X86::VDIVSDrr:
@@ -6917,55 +7342,277 @@ bool X86InstrInfo::isHighLatencyDef(int opc) const {
case X86::VDIVSSrr_Int:
case X86::VSQRTPDm:
case X86::VSQRTPDr:
+ case X86::VSQRTPDYm:
+ case X86::VSQRTPDYr:
case X86::VSQRTPSm:
case X86::VSQRTPSr:
+ case X86::VSQRTPSYm:
+ case X86::VSQRTPSYr:
case X86::VSQRTSDm:
case X86::VSQRTSDm_Int:
case X86::VSQRTSDr:
+ case X86::VSQRTSDr_Int:
case X86::VSQRTSSm:
case X86::VSQRTSSm_Int:
case X86::VSQRTSSr:
+ case X86::VSQRTSSr_Int:
+ // AVX512 instructions with high latency
+ case X86::VDIVPDZ128rm:
+ case X86::VDIVPDZ128rmb:
+ case X86::VDIVPDZ128rmbk:
+ case X86::VDIVPDZ128rmbkz:
+ case X86::VDIVPDZ128rmk:
+ case X86::VDIVPDZ128rmkz:
+ case X86::VDIVPDZ128rr:
+ case X86::VDIVPDZ128rrk:
+ case X86::VDIVPDZ128rrkz:
+ case X86::VDIVPDZ256rm:
+ case X86::VDIVPDZ256rmb:
+ case X86::VDIVPDZ256rmbk:
+ case X86::VDIVPDZ256rmbkz:
+ case X86::VDIVPDZ256rmk:
+ case X86::VDIVPDZ256rmkz:
+ case X86::VDIVPDZ256rr:
+ case X86::VDIVPDZ256rrk:
+ case X86::VDIVPDZ256rrkz:
+ case X86::VDIVPDZrb:
+ case X86::VDIVPDZrbk:
+ case X86::VDIVPDZrbkz:
+ case X86::VDIVPDZrm:
+ case X86::VDIVPDZrmb:
+ case X86::VDIVPDZrmbk:
+ case X86::VDIVPDZrmbkz:
+ case X86::VDIVPDZrmk:
+ case X86::VDIVPDZrmkz:
+ case X86::VDIVPDZrr:
+ case X86::VDIVPDZrrk:
+ case X86::VDIVPDZrrkz:
+ case X86::VDIVPSZ128rm:
+ case X86::VDIVPSZ128rmb:
+ case X86::VDIVPSZ128rmbk:
+ case X86::VDIVPSZ128rmbkz:
+ case X86::VDIVPSZ128rmk:
+ case X86::VDIVPSZ128rmkz:
+ case X86::VDIVPSZ128rr:
+ case X86::VDIVPSZ128rrk:
+ case X86::VDIVPSZ128rrkz:
+ case X86::VDIVPSZ256rm:
+ case X86::VDIVPSZ256rmb:
+ case X86::VDIVPSZ256rmbk:
+ case X86::VDIVPSZ256rmbkz:
+ case X86::VDIVPSZ256rmk:
+ case X86::VDIVPSZ256rmkz:
+ case X86::VDIVPSZ256rr:
+ case X86::VDIVPSZ256rrk:
+ case X86::VDIVPSZ256rrkz:
+ case X86::VDIVPSZrb:
+ case X86::VDIVPSZrbk:
+ case X86::VDIVPSZrbkz:
+ case X86::VDIVPSZrm:
+ case X86::VDIVPSZrmb:
+ case X86::VDIVPSZrmbk:
+ case X86::VDIVPSZrmbkz:
+ case X86::VDIVPSZrmk:
+ case X86::VDIVPSZrmkz:
+ case X86::VDIVPSZrr:
+ case X86::VDIVPSZrrk:
+ case X86::VDIVPSZrrkz:
+ case X86::VDIVSDZrm:
+ case X86::VDIVSDZrr:
+ case X86::VDIVSDZrm_Int:
+ case X86::VDIVSDZrm_Intk:
+ case X86::VDIVSDZrm_Intkz:
+ case X86::VDIVSDZrr_Int:
+ case X86::VDIVSDZrr_Intk:
+ case X86::VDIVSDZrr_Intkz:
+ case X86::VDIVSDZrrb:
+ case X86::VDIVSDZrrbk:
+ case X86::VDIVSDZrrbkz:
+ case X86::VDIVSSZrm:
+ case X86::VDIVSSZrr:
+ case X86::VDIVSSZrm_Int:
+ case X86::VDIVSSZrm_Intk:
+ case X86::VDIVSSZrm_Intkz:
+ case X86::VDIVSSZrr_Int:
+ case X86::VDIVSSZrr_Intk:
+ case X86::VDIVSSZrr_Intkz:
+ case X86::VDIVSSZrrb:
+ case X86::VDIVSSZrrbk:
+ case X86::VDIVSSZrrbkz:
+ case X86::VSQRTPDZ128m:
+ case X86::VSQRTPDZ128mb:
+ case X86::VSQRTPDZ128mbk:
+ case X86::VSQRTPDZ128mbkz:
+ case X86::VSQRTPDZ128mk:
+ case X86::VSQRTPDZ128mkz:
+ case X86::VSQRTPDZ128r:
+ case X86::VSQRTPDZ128rk:
+ case X86::VSQRTPDZ128rkz:
+ case X86::VSQRTPDZ256m:
+ case X86::VSQRTPDZ256mb:
+ case X86::VSQRTPDZ256mbk:
+ case X86::VSQRTPDZ256mbkz:
+ case X86::VSQRTPDZ256mk:
+ case X86::VSQRTPDZ256mkz:
+ case X86::VSQRTPDZ256r:
+ case X86::VSQRTPDZ256rk:
+ case X86::VSQRTPDZ256rkz:
case X86::VSQRTPDZm:
+ case X86::VSQRTPDZmb:
+ case X86::VSQRTPDZmbk:
+ case X86::VSQRTPDZmbkz:
+ case X86::VSQRTPDZmk:
+ case X86::VSQRTPDZmkz:
case X86::VSQRTPDZr:
+ case X86::VSQRTPDZrb:
+ case X86::VSQRTPDZrbk:
+ case X86::VSQRTPDZrbkz:
+ case X86::VSQRTPDZrk:
+ case X86::VSQRTPDZrkz:
+ case X86::VSQRTPSZ128m:
+ case X86::VSQRTPSZ128mb:
+ case X86::VSQRTPSZ128mbk:
+ case X86::VSQRTPSZ128mbkz:
+ case X86::VSQRTPSZ128mk:
+ case X86::VSQRTPSZ128mkz:
+ case X86::VSQRTPSZ128r:
+ case X86::VSQRTPSZ128rk:
+ case X86::VSQRTPSZ128rkz:
+ case X86::VSQRTPSZ256m:
+ case X86::VSQRTPSZ256mb:
+ case X86::VSQRTPSZ256mbk:
+ case X86::VSQRTPSZ256mbkz:
+ case X86::VSQRTPSZ256mk:
+ case X86::VSQRTPSZ256mkz:
+ case X86::VSQRTPSZ256r:
+ case X86::VSQRTPSZ256rk:
+ case X86::VSQRTPSZ256rkz:
case X86::VSQRTPSZm:
+ case X86::VSQRTPSZmb:
+ case X86::VSQRTPSZmbk:
+ case X86::VSQRTPSZmbkz:
+ case X86::VSQRTPSZmk:
+ case X86::VSQRTPSZmkz:
case X86::VSQRTPSZr:
+ case X86::VSQRTPSZrb:
+ case X86::VSQRTPSZrbk:
+ case X86::VSQRTPSZrbkz:
+ case X86::VSQRTPSZrk:
+ case X86::VSQRTPSZrkz:
case X86::VSQRTSDZm:
case X86::VSQRTSDZm_Int:
+ case X86::VSQRTSDZm_Intk:
+ case X86::VSQRTSDZm_Intkz:
case X86::VSQRTSDZr:
+ case X86::VSQRTSDZr_Int:
+ case X86::VSQRTSDZr_Intk:
+ case X86::VSQRTSDZr_Intkz:
+ case X86::VSQRTSDZrb_Int:
+ case X86::VSQRTSDZrb_Intk:
+ case X86::VSQRTSDZrb_Intkz:
+ case X86::VSQRTSSZm:
case X86::VSQRTSSZm_Int:
+ case X86::VSQRTSSZm_Intk:
+ case X86::VSQRTSSZm_Intkz:
case X86::VSQRTSSZr:
- case X86::VSQRTSSZm:
- case X86::VDIVSDZrm:
- case X86::VDIVSDZrr:
- case X86::VDIVSSZrm:
- case X86::VDIVSSZrr:
-
- case X86::VGATHERQPSZrm:
- case X86::VGATHERQPDZrm:
+ case X86::VSQRTSSZr_Int:
+ case X86::VSQRTSSZr_Intk:
+ case X86::VSQRTSSZr_Intkz:
+ case X86::VSQRTSSZrb_Int:
+ case X86::VSQRTSSZrb_Intk:
+ case X86::VSQRTSSZrb_Intkz:
+
+ case X86::VGATHERDPDYrm:
+ case X86::VGATHERDPDZ128rm:
+ case X86::VGATHERDPDZ256rm:
case X86::VGATHERDPDZrm:
+ case X86::VGATHERDPDrm:
+ case X86::VGATHERDPSYrm:
+ case X86::VGATHERDPSZ128rm:
+ case X86::VGATHERDPSZ256rm:
case X86::VGATHERDPSZrm:
- case X86::VPGATHERQDZrm:
- case X86::VPGATHERQQZrm:
+ case X86::VGATHERDPSrm:
+ case X86::VGATHERPF0DPDm:
+ case X86::VGATHERPF0DPSm:
+ case X86::VGATHERPF0QPDm:
+ case X86::VGATHERPF0QPSm:
+ case X86::VGATHERPF1DPDm:
+ case X86::VGATHERPF1DPSm:
+ case X86::VGATHERPF1QPDm:
+ case X86::VGATHERPF1QPSm:
+ case X86::VGATHERQPDYrm:
+ case X86::VGATHERQPDZ128rm:
+ case X86::VGATHERQPDZ256rm:
+ case X86::VGATHERQPDZrm:
+ case X86::VGATHERQPDrm:
+ case X86::VGATHERQPSYrm:
+ case X86::VGATHERQPSZ128rm:
+ case X86::VGATHERQPSZ256rm:
+ case X86::VGATHERQPSZrm:
+ case X86::VGATHERQPSrm:
+ case X86::VPGATHERDDYrm:
+ case X86::VPGATHERDDZ128rm:
+ case X86::VPGATHERDDZ256rm:
case X86::VPGATHERDDZrm:
+ case X86::VPGATHERDDrm:
+ case X86::VPGATHERDQYrm:
+ case X86::VPGATHERDQZ128rm:
+ case X86::VPGATHERDQZ256rm:
case X86::VPGATHERDQZrm:
- case X86::VSCATTERQPDZmr:
- case X86::VSCATTERQPSZmr:
+ case X86::VPGATHERDQrm:
+ case X86::VPGATHERQDYrm:
+ case X86::VPGATHERQDZ128rm:
+ case X86::VPGATHERQDZ256rm:
+ case X86::VPGATHERQDZrm:
+ case X86::VPGATHERQDrm:
+ case X86::VPGATHERQQYrm:
+ case X86::VPGATHERQQZ128rm:
+ case X86::VPGATHERQQZ256rm:
+ case X86::VPGATHERQQZrm:
+ case X86::VPGATHERQQrm:
+ case X86::VSCATTERDPDZ128mr:
+ case X86::VSCATTERDPDZ256mr:
case X86::VSCATTERDPDZmr:
+ case X86::VSCATTERDPSZ128mr:
+ case X86::VSCATTERDPSZ256mr:
case X86::VSCATTERDPSZmr:
- case X86::VPSCATTERQDZmr:
- case X86::VPSCATTERQQZmr:
+ case X86::VSCATTERPF0DPDm:
+ case X86::VSCATTERPF0DPSm:
+ case X86::VSCATTERPF0QPDm:
+ case X86::VSCATTERPF0QPSm:
+ case X86::VSCATTERPF1DPDm:
+ case X86::VSCATTERPF1DPSm:
+ case X86::VSCATTERPF1QPDm:
+ case X86::VSCATTERPF1QPSm:
+ case X86::VSCATTERQPDZ128mr:
+ case X86::VSCATTERQPDZ256mr:
+ case X86::VSCATTERQPDZmr:
+ case X86::VSCATTERQPSZ128mr:
+ case X86::VSCATTERQPSZ256mr:
+ case X86::VSCATTERQPSZmr:
+ case X86::VPSCATTERDDZ128mr:
+ case X86::VPSCATTERDDZ256mr:
case X86::VPSCATTERDDZmr:
+ case X86::VPSCATTERDQZ128mr:
+ case X86::VPSCATTERDQZ256mr:
case X86::VPSCATTERDQZmr:
+ case X86::VPSCATTERQDZ128mr:
+ case X86::VPSCATTERQDZ256mr:
+ case X86::VPSCATTERQDZmr:
+ case X86::VPSCATTERQQZ128mr:
+ case X86::VPSCATTERQQZ256mr:
+ case X86::VPSCATTERQQZmr:
return true;
}
}
-bool X86InstrInfo::
-hasHighOperandLatency(const TargetSchedModel &SchedModel,
- const MachineRegisterInfo *MRI,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const {
- return isHighLatencyDef(DefMI->getOpcode());
+bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr &DefMI,
+ unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const {
+ return isHighLatencyDef(DefMI.getOpcode());
}
bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
@@ -7014,12 +7661,119 @@ bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
case X86::PANDrr:
case X86::PORrr:
case X86::PXORrr:
+ case X86::ANDPDrr:
+ case X86::ANDPSrr:
+ case X86::ORPDrr:
+ case X86::ORPSrr:
+ case X86::XORPDrr:
+ case X86::XORPSrr:
+ case X86::PADDBrr:
+ case X86::PADDWrr:
+ case X86::PADDDrr:
+ case X86::PADDQrr:
case X86::VPANDrr:
case X86::VPANDYrr:
+ case X86::VPANDDZ128rr:
+ case X86::VPANDDZ256rr:
+ case X86::VPANDDZrr:
+ case X86::VPANDQZ128rr:
+ case X86::VPANDQZ256rr:
+ case X86::VPANDQZrr:
case X86::VPORrr:
case X86::VPORYrr:
+ case X86::VPORDZ128rr:
+ case X86::VPORDZ256rr:
+ case X86::VPORDZrr:
+ case X86::VPORQZ128rr:
+ case X86::VPORQZ256rr:
+ case X86::VPORQZrr:
case X86::VPXORrr:
case X86::VPXORYrr:
+ case X86::VPXORDZ128rr:
+ case X86::VPXORDZ256rr:
+ case X86::VPXORDZrr:
+ case X86::VPXORQZ128rr:
+ case X86::VPXORQZ256rr:
+ case X86::VPXORQZrr:
+ case X86::VANDPDrr:
+ case X86::VANDPSrr:
+ case X86::VANDPDYrr:
+ case X86::VANDPSYrr:
+ case X86::VANDPDZ128rr:
+ case X86::VANDPSZ128rr:
+ case X86::VANDPDZ256rr:
+ case X86::VANDPSZ256rr:
+ case X86::VANDPDZrr:
+ case X86::VANDPSZrr:
+ case X86::VORPDrr:
+ case X86::VORPSrr:
+ case X86::VORPDYrr:
+ case X86::VORPSYrr:
+ case X86::VORPDZ128rr:
+ case X86::VORPSZ128rr:
+ case X86::VORPDZ256rr:
+ case X86::VORPSZ256rr:
+ case X86::VORPDZrr:
+ case X86::VORPSZrr:
+ case X86::VXORPDrr:
+ case X86::VXORPSrr:
+ case X86::VXORPDYrr:
+ case X86::VXORPSYrr:
+ case X86::VXORPDZ128rr:
+ case X86::VXORPSZ128rr:
+ case X86::VXORPDZ256rr:
+ case X86::VXORPSZ256rr:
+ case X86::VXORPDZrr:
+ case X86::VXORPSZrr:
+ case X86::KADDBrr:
+ case X86::KADDWrr:
+ case X86::KADDDrr:
+ case X86::KADDQrr:
+ case X86::KANDBrr:
+ case X86::KANDWrr:
+ case X86::KANDDrr:
+ case X86::KANDQrr:
+ case X86::KORBrr:
+ case X86::KORWrr:
+ case X86::KORDrr:
+ case X86::KORQrr:
+ case X86::KXORBrr:
+ case X86::KXORWrr:
+ case X86::KXORDrr:
+ case X86::KXORQrr:
+ case X86::VPADDBrr:
+ case X86::VPADDWrr:
+ case X86::VPADDDrr:
+ case X86::VPADDQrr:
+ case X86::VPADDBYrr:
+ case X86::VPADDWYrr:
+ case X86::VPADDDYrr:
+ case X86::VPADDQYrr:
+ case X86::VPADDBZ128rr:
+ case X86::VPADDWZ128rr:
+ case X86::VPADDDZ128rr:
+ case X86::VPADDQZ128rr:
+ case X86::VPADDBZ256rr:
+ case X86::VPADDWZ256rr:
+ case X86::VPADDDZ256rr:
+ case X86::VPADDQZ256rr:
+ case X86::VPADDBZrr:
+ case X86::VPADDWZrr:
+ case X86::VPADDDZrr:
+ case X86::VPADDQZrr:
+ case X86::VPMULLWrr:
+ case X86::VPMULLWYrr:
+ case X86::VPMULLWZ128rr:
+ case X86::VPMULLWZ256rr:
+ case X86::VPMULLWZrr:
+ case X86::VPMULLDrr:
+ case X86::VPMULLDYrr:
+ case X86::VPMULLDZ128rr:
+ case X86::VPMULLDZ256rr:
+ case X86::VPMULLDZrr:
+ case X86::VPMULLQZ128rr:
+ case X86::VPMULLQZ256rr:
+ case X86::VPMULLQZrr:
// Normal min/max instructions are not commutative because of NaN and signed
// zero semantics, but these are. Thus, there's no need to check for global
// relaxed math; the instructions themselves have the properties we need.
@@ -7035,14 +7789,30 @@ bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
case X86::VMAXCPSrr:
case X86::VMAXCPDYrr:
case X86::VMAXCPSYrr:
+ case X86::VMAXCPDZ128rr:
+ case X86::VMAXCPSZ128rr:
+ case X86::VMAXCPDZ256rr:
+ case X86::VMAXCPSZ256rr:
+ case X86::VMAXCPDZrr:
+ case X86::VMAXCPSZrr:
case X86::VMAXCSDrr:
case X86::VMAXCSSrr:
+ case X86::VMAXCSDZrr:
+ case X86::VMAXCSSZrr:
case X86::VMINCPDrr:
case X86::VMINCPSrr:
case X86::VMINCPDYrr:
case X86::VMINCPSYrr:
+ case X86::VMINCPDZ128rr:
+ case X86::VMINCPSZ128rr:
+ case X86::VMINCPDZ256rr:
+ case X86::VMINCPSZ256rr:
+ case X86::VMINCPDZrr:
+ case X86::VMINCPSZrr:
case X86::VMINCSDrr:
case X86::VMINCSSrr:
+ case X86::VMINCSDZrr:
+ case X86::VMINCSSZrr:
return true;
case X86::ADDPDrr:
case X86::ADDPSrr:
@@ -7056,14 +7826,30 @@ bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
case X86::VADDPSrr:
case X86::VADDPDYrr:
case X86::VADDPSYrr:
+ case X86::VADDPDZ128rr:
+ case X86::VADDPSZ128rr:
+ case X86::VADDPDZ256rr:
+ case X86::VADDPSZ256rr:
+ case X86::VADDPDZrr:
+ case X86::VADDPSZrr:
case X86::VADDSDrr:
case X86::VADDSSrr:
+ case X86::VADDSDZrr:
+ case X86::VADDSSZrr:
case X86::VMULPDrr:
case X86::VMULPSrr:
case X86::VMULPDYrr:
case X86::VMULPSYrr:
+ case X86::VMULPDZ128rr:
+ case X86::VMULPSZ128rr:
+ case X86::VMULPDZ256rr:
+ case X86::VMULPSZ256rr:
+ case X86::VMULPDZrr:
+ case X86::VMULPSZrr:
case X86::VMULSDrr:
case X86::VMULSSrr:
+ case X86::VMULSDZrr:
+ case X86::VMULSSZrr:
return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
default:
return false;
@@ -7135,10 +7921,8 @@ X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
{MO_NTPOFF, "x86-ntpoff"},
{MO_GOTNTPOFF, "x86-gotntpoff"},
{MO_DLLIMPORT, "x86-dllimport"},
- {MO_DARWIN_STUB, "x86-darwin-stub"},
{MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
{MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
- {MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE, "x86-darwin-hidden-nonlazy-pic-base"},
{MO_TLVP, "x86-tlvp"},
{MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
{MO_SECREL, "x86-secrel"}};
@@ -7163,7 +7947,7 @@ namespace {
return false;
// Only emit a global base reg in PIC mode.
- if (TM->getRelocationModel() != Reloc::PIC_)
+ if (!TM->isPositionIndependent())
return false;
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
@@ -7223,7 +8007,10 @@ namespace {
LDTLSCleanup() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
- X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>();
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
+ X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two.
return false;
@@ -7249,9 +8036,9 @@ namespace {
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
if (TLSBaseAddrReg)
- I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg);
+ I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
else
- I = SetRegister(I, &TLSBaseAddrReg);
+ I = SetRegister(*I, &TLSBaseAddrReg);
Changed = true;
break;
default:
@@ -7270,29 +8057,29 @@ namespace {
// Replace the TLS_base_addr instruction I with a copy from
// TLSBaseAddrReg, returning the new instruction.
- MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I,
+ MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
unsigned TLSBaseAddrReg) {
- MachineFunction *MF = I->getParent()->getParent();
+ MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to RAX/EAX.
- MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- is64Bit ? X86::RAX : X86::EAX)
- .addReg(TLSBaseAddrReg);
+ MachineInstr *Copy =
+ BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
+ .addReg(TLSBaseAddrReg);
// Erase the TLS_base_addr instruction.
- I->eraseFromParent();
+ I.eraseFromParent();
return Copy;
}
// Create a virtal register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
- MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) {
- MachineFunction *MF = I->getParent()->getParent();
+ MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
+ MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
@@ -7304,11 +8091,11 @@ namespace {
: &X86::GR32RegClass);
// Insert a copy from RAX/EAX to TLSBaseAddrReg.
- MachineInstr *Next = I->getNextNode();
- MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- *TLSBaseAddrReg)
- .addReg(is64Bit ? X86::RAX : X86::EAX);
+ MachineInstr *Next = I.getNextNode();
+ MachineInstr *Copy =
+ BuildMI(*I.getParent(), Next, I.getDebugLoc(),
+ TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
+ .addReg(is64Bit ? X86::RAX : X86::EAX);
return Copy;
}
diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h
index edd09d617595..858f35d1cbf0 100644
--- a/lib/Target/X86/X86InstrInfo.h
+++ b/lib/Target/X86/X86InstrInfo.h
@@ -23,60 +23,61 @@
#include "X86GenInstrInfo.inc"
namespace llvm {
+ class MachineInstrBuilder;
class X86RegisterInfo;
class X86Subtarget;
namespace X86 {
// X86 specific condition code. These correspond to X86_*_COND in
// X86InstrInfo.td. They must be kept in synch.
- enum CondCode {
- COND_A = 0,
- COND_AE = 1,
- COND_B = 2,
- COND_BE = 3,
- COND_E = 4,
- COND_G = 5,
- COND_GE = 6,
- COND_L = 7,
- COND_LE = 8,
- COND_NE = 9,
- COND_NO = 10,
- COND_NP = 11,
- COND_NS = 12,
- COND_O = 13,
- COND_P = 14,
- COND_S = 15,
- LAST_VALID_COND = COND_S,
-
- // Artificial condition codes. These are used by AnalyzeBranch
- // to indicate a block terminated with two conditional branches to
- // the same location. This occurs in code using FCMP_OEQ or FCMP_UNE,
- // which can't be represented on x86 with a single condition. These
- // are never used in MachineInstrs.
- COND_NE_OR_P,
- COND_NP_OR_E,
-
- COND_INVALID
- };
-
- // Turn condition code into conditional branch opcode.
- unsigned GetCondBranchFromCond(CondCode CC);
-
- /// \brief Return a set opcode for the given condition and whether it has
- /// a memory operand.
- unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
-
- /// \brief Return a cmov opcode for the given condition, register size in
- /// bytes, and operand type.
- unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
- bool HasMemoryOperand = false);
-
- // Turn CMov opcode into condition code.
- CondCode getCondFromCMovOpc(unsigned Opc);
-
- /// GetOppositeBranchCondition - Return the inverse of the specified cond,
- /// e.g. turning COND_E to COND_NE.
- CondCode GetOppositeBranchCondition(CondCode CC);
+enum CondCode {
+ COND_A = 0,
+ COND_AE = 1,
+ COND_B = 2,
+ COND_BE = 3,
+ COND_E = 4,
+ COND_G = 5,
+ COND_GE = 6,
+ COND_L = 7,
+ COND_LE = 8,
+ COND_NE = 9,
+ COND_NO = 10,
+ COND_NP = 11,
+ COND_NS = 12,
+ COND_O = 13,
+ COND_P = 14,
+ COND_S = 15,
+ LAST_VALID_COND = COND_S,
+
+ // Artificial condition codes. These are used by AnalyzeBranch
+ // to indicate a block terminated with two conditional branches that together
+ // form a compound condition. They occur in code using FCMP_OEQ or FCMP_UNE,
+ // which can't be represented on x86 with a single condition. These
+ // are never used in MachineInstrs and are inverses of one another.
+ COND_NE_OR_P,
+ COND_E_AND_NP,
+
+ COND_INVALID
+};
+
+// Turn condition code into conditional branch opcode.
+unsigned GetCondBranchFromCond(CondCode CC);
+
+/// \brief Return a set opcode for the given condition and whether it has
+/// a memory operand.
+unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
+
+/// \brief Return a cmov opcode for the given condition, register size in
+/// bytes, and operand type.
+unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
+ bool HasMemoryOperand = false);
+
+// Turn CMov opcode into condition code.
+CondCode getCondFromCMovOpc(unsigned Opc);
+
+/// GetOppositeBranchCondition - Return the inverse of the specified cond,
+/// e.g. turning COND_E to COND_NE.
+CondCode GetOppositeBranchCondition(CondCode CC);
} // end namespace X86;
@@ -89,7 +90,6 @@ inline static bool isGlobalStubReference(unsigned char TargetFlag) {
case X86II::MO_GOT: // normal GOT reference.
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref.
case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref.
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Hidden $non_lazy_ptr ref.
return true;
default:
return false;
@@ -105,7 +105,6 @@ inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) {
case X86II::MO_GOT: // isPICStyleGOT: other global.
case X86II::MO_PIC_BASE_OFFSET: // Darwin local global.
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global.
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global.
case X86II::MO_TLVP: // ??? Pretty sure..
return true;
default:
@@ -119,23 +118,24 @@ inline static bool isScale(const MachineOperand &MO) {
MO.getImm() == 4 || MO.getImm() == 8);
}
-inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
- if (MI->getOperand(Op).isFI()) return true;
- return Op+X86::AddrSegmentReg <= MI->getNumOperands() &&
- MI->getOperand(Op+X86::AddrBaseReg).isReg() &&
- isScale(MI->getOperand(Op+X86::AddrScaleAmt)) &&
- MI->getOperand(Op+X86::AddrIndexReg).isReg() &&
- (MI->getOperand(Op+X86::AddrDisp).isImm() ||
- MI->getOperand(Op+X86::AddrDisp).isGlobal() ||
- MI->getOperand(Op+X86::AddrDisp).isCPI() ||
- MI->getOperand(Op+X86::AddrDisp).isJTI());
+inline static bool isLeaMem(const MachineInstr &MI, unsigned Op) {
+ if (MI.getOperand(Op).isFI())
+ return true;
+ return Op + X86::AddrSegmentReg <= MI.getNumOperands() &&
+ MI.getOperand(Op + X86::AddrBaseReg).isReg() &&
+ isScale(MI.getOperand(Op + X86::AddrScaleAmt)) &&
+ MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
+ (MI.getOperand(Op + X86::AddrDisp).isImm() ||
+ MI.getOperand(Op + X86::AddrDisp).isGlobal() ||
+ MI.getOperand(Op + X86::AddrDisp).isCPI() ||
+ MI.getOperand(Op + X86::AddrDisp).isJTI());
}
-inline static bool isMem(const MachineInstr *MI, unsigned Op) {
- if (MI->getOperand(Op).isFI()) return true;
- return Op+X86::AddrNumOperands <= MI->getNumOperands() &&
- MI->getOperand(Op+X86::AddrSegmentReg).isReg() &&
- isLeaMem(MI, Op);
+inline static bool isMem(const MachineInstr &MI, unsigned Op) {
+ if (MI.getOperand(Op).isFI())
+ return true;
+ return Op + X86::AddrNumOperands <= MI.getNumOperands() &&
+ MI.getOperand(Op + X86::AddrSegmentReg).isReg() && isLeaMem(MI, Op);
}
class X86InstrInfo final : public X86GenInstrInfo {
@@ -146,7 +146,7 @@ class X86InstrInfo final : public X86GenInstrInfo {
/// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps.
///
typedef DenseMap<unsigned,
- std::pair<unsigned, unsigned> > RegOp2MemOpTableType;
+ std::pair<uint16_t, uint16_t> > RegOp2MemOpTableType;
RegOp2MemOpTableType RegOp2MemOpTable2Addr;
RegOp2MemOpTableType RegOp2MemOpTable0;
RegOp2MemOpTableType RegOp2MemOpTable1;
@@ -157,12 +157,12 @@ class X86InstrInfo final : public X86GenInstrInfo {
/// MemOp2RegOpTable - Load / store unfolding opcode map.
///
typedef DenseMap<unsigned,
- std::pair<unsigned, unsigned> > MemOp2RegOpTableType;
+ std::pair<uint16_t, uint16_t> > MemOp2RegOpTableType;
MemOp2RegOpTableType MemOp2RegOpTable;
static void AddTableEntry(RegOp2MemOpTableType &R2MTable,
MemOp2RegOpTableType &M2RTable,
- unsigned RegOp, unsigned MemOp, unsigned Flags);
+ uint16_t RegOp, uint16_t MemOp, uint16_t Flags);
virtual void anchor();
@@ -184,7 +184,7 @@ public:
/// getSPAdjust - This returns the stack pointer adjustment made by
/// this instruction. For x86, we need to handle more complex call
/// sequences involving PUSHes.
- int getSPAdjust(const MachineInstr *MI) const override;
+ int getSPAdjust(const MachineInstr &MI) const override;
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
@@ -196,27 +196,27 @@ public:
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const override;
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
- unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
- unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
+ unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const override;
- bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
+ bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
+ const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const override;
/// Given an operand within a MachineInstr, insert preceding code to put it
@@ -227,10 +227,10 @@ public:
///
/// Reference parameters are set to indicate how caller should add this
/// operand to the LEA instruction.
- bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src,
- unsigned LEAOpcode, bool AllowSP,
- unsigned &NewSrc, bool &isKill,
- bool &isUndef, MachineOperand &ImplicitOp) const;
+ bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
+ unsigned LEAOpcode, bool AllowSP, unsigned &NewSrc,
+ bool &isKill, bool &isUndef,
+ MachineOperand &ImplicitOp) const;
/// convertToThreeAddress - This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
@@ -243,7 +243,7 @@ public:
/// performed, otherwise it returns the new instruction.
///
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
+ MachineInstr &MI,
LiveVariables *LV) const override;
/// Returns true iff the routine could find two commutable operands in the
@@ -261,7 +261,7 @@ public:
/// findCommutedOpIndices(MI, Op1, Op2);
/// can be interpreted as a query asking to find an operand that would be
/// commutable with the operand#1.
- bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
/// Returns true if the routine could find two commutable operands
@@ -286,8 +286,7 @@ public:
/// FMA213 #1, #2, #3
/// results into instruction with adjusted opcode:
/// FMA231 #3, #2, #1
- bool findFMA3CommutedOpIndices(MachineInstr *MI,
- unsigned &SrcOpIdx1,
+ bool findFMA3CommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
/// Returns an adjusted FMA opcode that must be used in FMA instruction that
@@ -300,37 +299,35 @@ public:
/// FMA213 #1, #2, #3
/// results into instruction with adjusted opcode:
/// FMA231 #3, #2, #1
- unsigned getFMA3OpcodeToCommuteOperands(MachineInstr *MI,
- unsigned SrcOpIdx1,
+ unsigned getFMA3OpcodeToCommuteOperands(MachineInstr &MI, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) const;
// Branch analysis.
- bool isUnpredicatedTerminator(const MachineInstr* MI) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool isUnpredicatedTerminator(const MachineInstr &MI) const override;
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
- bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
+ bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
- bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
+ bool analyzeBranchPredicate(MachineBasicBlock &MBB,
TargetInstrInfo::MachineBranchPredicate &MBP,
bool AllowModify = false) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
bool canInsertSelect(const MachineBasicBlock&, ArrayRef<MachineOperand> Cond,
unsigned, unsigned, int&, int&, int&) const override;
- void insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DstReg, ArrayRef<MachineOperand> Cond,
- unsigned TrueReg, unsigned FalseReg) const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DstReg,
+ ArrayRef<MachineOperand> Cond, unsigned TrueReg,
+ unsigned FalseReg) const override;
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
@@ -358,7 +355,7 @@ public:
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
- bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
@@ -366,25 +363,27 @@ public:
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- int FrameIndex) const override;
+ MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex,
+ LiveIntervals *LIS = nullptr) const override;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt,
- MachineInstr *LoadMI) const override;
+ MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS = nullptr) const override;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
- bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const override;
+ bool
+ unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
+ bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const override;
bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const override;
@@ -419,8 +418,8 @@ public:
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const override;
- bool shouldScheduleAdjacent(MachineInstr* First,
- MachineInstr *Second) const override;
+ bool shouldScheduleAdjacent(MachineInstr &First,
+ MachineInstr &Second) const override;
void getNoopForMachoTarget(MCInst &NopInst) const override;
@@ -440,7 +439,7 @@ public:
/// True if MI has a condition code def, e.g. EFLAGS, that is
/// not marked dead.
- bool hasLiveCondCodeDef(MachineInstr *MI) const;
+ bool hasLiveCondCodeDef(MachineInstr &MI) const;
/// getGlobalBaseReg - Return a virtual register initialized with the
/// the global base register value. Output instructions required to
@@ -449,19 +448,19 @@ public:
unsigned getGlobalBaseReg(MachineFunction *MF) const;
std::pair<uint16_t, uint16_t>
- getExecutionDomain(const MachineInstr *MI) const override;
+ getExecutionDomain(const MachineInstr &MI) const override;
- void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
+ void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override;
unsigned
- getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const override;
- unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
+ getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const override;
+ unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const override;
- void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const override;
- MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
@@ -480,10 +479,10 @@ public:
bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
unsigned UseIdx) const override;
-
+
bool useMachineCombiner() const override {
return true;
}
@@ -501,14 +500,14 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
- bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
/// optimizeCompareInstr - Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
- bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
@@ -519,7 +518,7 @@ public:
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
- MachineInstr* optimizeLoadInstr(MachineInstr *MI,
+ MachineInstr *optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const override;
@@ -542,19 +541,19 @@ protected:
/// non-commutable operands.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
- MachineInstr *commuteInstructionImpl(MachineInstr *MI, bool NewMI,
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned CommuteOpIdx1,
unsigned CommuteOpIdx2) const override;
private:
- MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
- MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
+ MachineInstr *convertToThreeAddressWithLEA(unsigned MIOpc,
+ MachineFunction::iterator &MFI,
+ MachineInstr &MI,
+ LiveVariables *LV) const;
/// Handles memory folding for special case instructions, for instance those
/// requiring custom manipulation of the address.
- MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr *MI,
+ MachineInstr *foldMemoryOperandCustom(MachineFunction &MF, MachineInstr &MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
@@ -562,8 +561,11 @@ private:
/// isFrameOperand - Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
- bool isFrameOperand(const MachineInstr *MI, unsigned int Op,
+ bool isFrameOperand(const MachineInstr &MI, unsigned int Op,
int &FrameIndex) const;
+
+ /// Expand the MOVImmSExti8 pseudo-instructions.
+ bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB) const;
};
} // End llvm namespace
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 9c8339a841c9..b19a8f3306aa 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -71,10 +71,18 @@ def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
SDTCisVT<2, i8>]>;
def SDTX86caspair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+def SDTX86caspairSaveEbx8 : SDTypeProfile<1, 3,
+ [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
+ SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
+def SDTX86caspairSaveRbx16 : SDTypeProfile<1, 3,
+ [SDTCisVT<0, i64>, SDTCisPtrTy<1>,
+ SDTCisVT<2, i64>, SDTCisVT<3, i64>]>;
-def SDTX86atomicBinary : SDTypeProfile<2, 3, [SDTCisInt<0>, SDTCisInt<1>,
- SDTCisPtrTy<2>, SDTCisInt<3>,SDTCisInt<4>]>;
-def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i16>]>;
+def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
+ SDTCisPtrTy<1>,
+ SDTCisInt<2>]>;
+
+def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
@@ -104,6 +112,8 @@ def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
+
def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
@@ -116,10 +126,6 @@ def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
[SDNPHasChain,SDNPSideEffect]>;
def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
[SDNPHasChain]>;
-def X86SFence : SDNode<"X86ISD::SFENCE", SDT_X86MEMBARRIER,
- [SDNPHasChain]>;
-def X86LFence : SDNode<"X86ISD::LFENCE", SDT_X86MEMBARRIER,
- [SDNPHasChain]>;
def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
@@ -153,6 +159,14 @@ def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86caspair,
def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair,
[SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
+def X86cas8save_ebx : SDNode<"X86ISD::LCMPXCHG8_SAVE_EBX_DAG",
+ SDTX86caspairSaveEbx8,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
+ SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def X86cas16save_rbx : SDNode<"X86ISD::LCMPXCHG16_SAVE_RBX_DAG",
+ SDTX86caspairSaveRbx16,
+ [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
+ SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
@@ -214,6 +228,9 @@ def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
[SDNPHasChain, SDNPSideEffect]>;
+def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
+ SDTypeProfile<0, 0, []>,
+ [SDNPHasChain, SDNPSideEffect]>;
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
@@ -237,12 +254,28 @@ def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
+def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
+ SDNPMemOperand]>;
+
def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
-def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDTX86Void,
- [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>;
+def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
+ [SDNPHasChain, SDNPOutGlue]>;
def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
[SDNPHasChain]>;
@@ -263,7 +296,7 @@ def ptr_rc_nosp : PointerLikeRegClass<1>;
def X86MemAsmOperand : AsmOperandClass {
let Name = "Mem";
}
-let RenderMethod = "addMemOperands" in {
+let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
@@ -273,16 +306,19 @@ let RenderMethod = "addMemOperands" in {
def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
// Gather mem operands
- def X86MemVX32Operand : AsmOperandClass { let Name = "MemVX32"; }
- def X86MemVY32Operand : AsmOperandClass { let Name = "MemVY32"; }
- def X86MemVZ32Operand : AsmOperandClass { let Name = "MemVZ32"; }
- def X86MemVX64Operand : AsmOperandClass { let Name = "MemVX64"; }
- def X86MemVY64Operand : AsmOperandClass { let Name = "MemVY64"; }
- def X86MemVZ64Operand : AsmOperandClass { let Name = "MemVZ64"; }
- def X86MemVX32XOperand : AsmOperandClass { let Name = "MemVX32X"; }
- def X86MemVY32XOperand : AsmOperandClass { let Name = "MemVY32X"; }
- def X86MemVX64XOperand : AsmOperandClass { let Name = "MemVX64X"; }
- def X86MemVY64XOperand : AsmOperandClass { let Name = "MemVY64X"; }
+ def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
+ def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
+ def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
+ def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
+ def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
+
+ def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
+ def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
+ def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
+ def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
+ def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
+ def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
+ def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
}
def X86AbsMemAsmOperand : AsmOperandClass {
@@ -293,7 +329,7 @@ def X86AbsMemAsmOperand : AsmOperandClass {
class X86MemOperand<string printMethod,
AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
let PrintMethod = printMethod;
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
let ParserMatchClass = parserMatchClass;
let OperandType = "OPERAND_MEMORY";
}
@@ -302,7 +338,7 @@ class X86MemOperand<string printMethod,
class X86VMemOperand<RegisterClass RC, string printMethod,
AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
- let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
}
def anymem : X86MemOperand<"printanymem">;
@@ -329,17 +365,19 @@ def f512mem : X86MemOperand<"printf512mem", X86Mem512AsmOperand>;
def v512mem : X86VMemOperand<VR512, "printf512mem", X86Mem512AsmOperand>;
// Gather mem operands
-def vx32mem : X86VMemOperand<VR128, "printi32mem", X86MemVX32Operand>;
-def vy32mem : X86VMemOperand<VR256, "printi32mem", X86MemVY32Operand>;
-def vx64mem : X86VMemOperand<VR128, "printi64mem", X86MemVX64Operand>;
-def vy64mem : X86VMemOperand<VR256, "printi64mem", X86MemVY64Operand>;
-
-def vx32xmem : X86VMemOperand<VR128X, "printi32mem", X86MemVX32XOperand>;
-def vx64xmem : X86VMemOperand<VR128X, "printi32mem", X86MemVX64XOperand>;
-def vy32xmem : X86VMemOperand<VR256X, "printi32mem", X86MemVY32XOperand>;
-def vy64xmem : X86VMemOperand<VR256X, "printi64mem", X86MemVY64XOperand>;
-def vz32mem : X86VMemOperand<VR512, "printi32mem", X86MemVZ32Operand>;
-def vz64mem : X86VMemOperand<VR512, "printi64mem", X86MemVZ64Operand>;
+def vx64mem : X86VMemOperand<VR128, "printi64mem", X86Mem64_RC128Operand>;
+def vx128mem : X86VMemOperand<VR128, "printi128mem", X86Mem128_RC128Operand>;
+def vx256mem : X86VMemOperand<VR128, "printi256mem", X86Mem256_RC128Operand>;
+def vy128mem : X86VMemOperand<VR256, "printi128mem", X86Mem128_RC256Operand>;
+def vy256mem : X86VMemOperand<VR256, "printi256mem", X86Mem256_RC256Operand>;
+
+def vx64xmem : X86VMemOperand<VR128X, "printi64mem", X86Mem64_RC128XOperand>;
+def vx128xmem : X86VMemOperand<VR128X, "printi128mem", X86Mem128_RC128XOperand>;
+def vx256xmem : X86VMemOperand<VR128X, "printi256mem", X86Mem256_RC128XOperand>;
+def vy128xmem : X86VMemOperand<VR256, "printi128mem", X86Mem128_RC256XOperand>;
+def vy256xmem : X86VMemOperand<VR256X, "printi256mem", X86Mem256_RC256XOperand>;
+def vy512mem : X86VMemOperand<VR256X, "printi512mem", X86Mem512_RC256XOperand>;
+def vz512mem : X86VMemOperand<VR512, "printi512mem", X86Mem512_RC512Operand>;
// A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
// of a plain GPR, so that it doesn't potentially require a REX prefix.
@@ -348,7 +386,8 @@ def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
def i8mem_NOREX : Operand<iPTR> {
let PrintMethod = "printi8mem";
- let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm, i8imm);
+ let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
+ SEGMENT_REG);
let ParserMatchClass = X86Mem8AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -363,7 +402,7 @@ def ptr_rc_tailcall : PointerLikeRegClass<4>;
def i32mem_TC : Operand<i32> {
let PrintMethod = "printi32mem";
let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
- i32imm, i8imm);
+ i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem32AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -374,7 +413,7 @@ def i32mem_TC : Operand<i32> {
def i64mem_TC : Operand<i64> {
let PrintMethod = "printi64mem";
let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
- ptr_rc_tailcall, i32imm, i8imm);
+ ptr_rc_tailcall, i32imm, SEGMENT_REG);
let ParserMatchClass = X86Mem64AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -494,7 +533,7 @@ let RenderMethod = "addMemOffsOperands" in {
class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
- let MIOperandInfo = (ops ptr_rc, i8imm);
+ let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
}
class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
@@ -514,7 +553,7 @@ def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
class X86MemOffsOperand<Operand immOperand, string printMethod,
AsmOperandClass parserMatchClass>
: X86MemOperand<printMethod, parserMatchClass> {
- let MIOperandInfo = (ops immOperand, i8imm);
+ let MIOperandInfo = (ops immOperand, SEGMENT_REG);
}
def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
@@ -681,14 +720,14 @@ def i64i32imm_pcrel : Operand<i64> {
def lea64_32mem : Operand<i32> {
let PrintMethod = "printanymem";
- let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm);
+ let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
let ParserMatchClass = X86MemAsmOperand;
}
// Memory operands that use 64-bit pointers in both ILP32 and LP64.
def lea64mem : Operand<i64> {
let PrintMethod = "printanymem";
- let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, i8imm);
+ let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
let ParserMatchClass = X86MemAsmOperand;
}
@@ -728,6 +767,8 @@ def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
+def TruePredicate : Predicate<"true">;
+
def HasCMov : Predicate<"Subtarget->hasCMov()">;
def NoCMov : Predicate<"!Subtarget->hasCMov()">;
@@ -773,7 +814,7 @@ def HasVLX : Predicate<"Subtarget->hasVLX()">,
def NoVLX : Predicate<"!Subtarget->hasVLX()">;
def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
-def PKU : Predicate<"!Subtarget->hasPKU()">;
+def PKU : Predicate<"Subtarget->hasPKU()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
def HasAES : Predicate<"Subtarget->hasAES()">;
@@ -795,6 +836,10 @@ def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
def HasBMI : Predicate<"Subtarget->hasBMI()">;
def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
+def HasVBMI : Predicate<"Subtarget->hasVBMI()">,
+ AssemblerPredicate<"FeatureVBMI", "AVX-512 VBMI ISA">;
+def HasIFMA : Predicate<"Subtarget->hasIFMA()">,
+ AssemblerPredicate<"FeatureIFMA", "AVX-512 IFMA ISA">;
def HasRTM : Predicate<"Subtarget->hasRTM()">;
def HasHLE : Predicate<"Subtarget->hasHLE()">;
def HasTSX : Predicate<"Subtarget->hasRTM() || Subtarget->hasHLE()">;
@@ -804,6 +849,7 @@ def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
+def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def HasMPX : Predicate<"Subtarget->hasMPX()">;
@@ -822,6 +868,8 @@ def In32BitMode : Predicate<"Subtarget->is32Bit()">,
AssemblerPredicate<"Mode32Bit", "32-bit mode">;
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
+def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
+ "Subtarget->getFrameLowering()->hasFP(*MF)">;
def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
@@ -832,15 +880,16 @@ def FarData : Predicate<"TM.getCodeModel() != CodeModel::Small &&"
"TM.getCodeModel() != CodeModel::Kernel">;
def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
"TM.getCodeModel() == CodeModel::Kernel">;
-def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
-def IsNotPIC : Predicate<"TM.getRelocationModel() != Reloc::PIC_">;
+def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
def OptForSize : Predicate<"OptForSize">;
+def OptForMinSize : Predicate<"OptForMinSize">;
def OptForSpeed : Predicate<"!OptForSize">;
def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
-def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">;
+def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
def FavorMemIndirectCall : Predicate<"!Subtarget->callRegIndirect()">;
def NotSlowIncDec : Predicate<"!Subtarget->slowIncDec()">;
def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
+def HasMFence : Predicate<"Subtarget->hasMFence()">;
//===----------------------------------------------------------------------===//
// X86 Instruction Format Definitions.
@@ -871,12 +920,6 @@ def X86_COND_O : PatLeaf<(i8 13)>;
def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
def X86_COND_S : PatLeaf<(i8 15)>;
-// Predicate used to help when pattern matching LZCNT/TZCNT.
-def X86_COND_E_OR_NE : ImmLeaf<i8, [{
- return (Imm == X86::COND_E) || (Imm == X86::COND_NE);
-}]>;
-
-
def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
@@ -1042,6 +1085,10 @@ def LEAVE64 : I<0xC9, RawFrm,
// Miscellaneous Instructions.
//
+let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1 in
+ def Int_eh_sjlj_setup_dispatch
+ : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
+
let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
let mayLoad = 1, SchedRW = [WriteLoad] in {
def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
@@ -1092,12 +1139,12 @@ def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[],
let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
SchedRW = [WriteRMW], Defs = [ESP] in {
- let Uses = [ESP, EFLAGS] in
+ let Uses = [ESP] in
def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
[(set GR32:$dst, (int_x86_flags_read_u32))]>,
Requires<[Not64BitMode]>;
- let Uses = [RSP, EFLAGS] in
+ let Uses = [RSP] in
def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
[(set GR64:$dst, (int_x86_flags_read_u64))]>,
Requires<[In64BitMode]>;
@@ -1253,28 +1300,28 @@ def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
let SchedRW = [WriteMicrocoded] in {
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in {
-def MOVSB : I<0xA4, RawFrmDstSrc, (outs dstidx8:$dst), (ins srcidx8:$src),
+def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
"movsb\t{$src, $dst|$dst, $src}", [], IIC_MOVS>;
-def MOVSW : I<0xA5, RawFrmDstSrc, (outs dstidx16:$dst), (ins srcidx16:$src),
+def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
"movsw\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize16;
-def MOVSL : I<0xA5, RawFrmDstSrc, (outs dstidx32:$dst), (ins srcidx32:$src),
+def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
"movs{l|d}\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize32;
-def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs dstidx64:$dst), (ins srcidx64:$src),
+def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
"movsq\t{$src, $dst|$dst, $src}", [], IIC_MOVS>;
}
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI], Uses = [AL,EDI,EFLAGS] in
-def STOSB : I<0xAA, RawFrmDst, (outs dstidx8:$dst), (ins),
+def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
"stosb\t{%al, $dst|$dst, al}", [], IIC_STOS>;
let Defs = [EDI], Uses = [AX,EDI,EFLAGS] in
-def STOSW : I<0xAB, RawFrmDst, (outs dstidx16:$dst), (ins),
+def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
"stosw\t{%ax, $dst|$dst, ax}", [], IIC_STOS>, OpSize16;
let Defs = [EDI], Uses = [EAX,EDI,EFLAGS] in
-def STOSL : I<0xAB, RawFrmDst, (outs dstidx32:$dst), (ins),
+def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
"stos{l|d}\t{%eax, $dst|$dst, eax}", [], IIC_STOS>, OpSize32;
let Defs = [RDI], Uses = [RAX,RDI,EFLAGS] in
-def STOSQ : RI<0xAB, RawFrmDst, (outs dstidx64:$dst), (ins),
+def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
"stosq\t{%rax, $dst|$dst, rax}", [], IIC_STOS>;
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
@@ -1402,30 +1449,30 @@ def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
}
let mayStore = 1 in {
let Uses = [AL] in
-def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs offset32_8:$dst), (ins),
+def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
"mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize32;
let Uses = [AX] in
-def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs offset32_16:$dst), (ins),
+def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
"mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
OpSize16, AdSize32;
let Uses = [EAX] in
-def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs offset32_32:$dst), (ins),
+def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
"mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
OpSize32, AdSize32;
let Uses = [RAX] in
-def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs offset32_64:$dst), (ins),
+def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
"mov{q}\t{%rax, $dst|$dst, rax}", [], IIC_MOV_MEM>,
AdSize32;
let Uses = [AL] in
-def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs offset16_8:$dst), (ins),
+def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
"mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize16;
let Uses = [AX] in
-def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs offset16_16:$dst), (ins),
+def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
"mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
OpSize16, AdSize16;
let Uses = [EAX] in
-def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs offset16_32:$dst), (ins),
+def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
"mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
OpSize32, AdSize16;
}
@@ -1451,17 +1498,17 @@ def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
let mayStore = 1 in {
let Uses = [AL] in
-def MOV8o64a : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs offset64_8:$dst), (ins),
+def MOV8o64a : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
"movabs{b}\t{%al, $dst|$dst, al}", []>, AdSize64;
let Uses = [AX] in
-def MOV16o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_16:$dst), (ins),
+def MOV16o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
"movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16, AdSize64;
let Uses = [EAX] in
-def MOV32o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs offset64_32:$dst), (ins),
+def MOV32o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
"movabs{l}\t{%eax, $dst|$dst, eax}", []>, OpSize32,
AdSize64;
let Uses = [RAX] in
-def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs offset64_64:$dst), (ins),
+def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
"movabs{q}\t{%rax, $dst|$dst, rax}", []>, AdSize64;
}
} // hasSideEffects = 0
@@ -1951,11 +1998,11 @@ def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
// These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
let Defs = [EDI], Uses = [DX,EDI,EFLAGS] in {
-def INSB : I<0x6C, RawFrmDst, (outs dstidx8:$dst), (ins),
+def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
"insb\t{%dx, $dst|$dst, dx}", [], IIC_INS>;
-def INSW : I<0x6D, RawFrmDst, (outs dstidx16:$dst), (ins),
+def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
"insw\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize16;
-def INSL : I<0x6D, RawFrmDst, (outs dstidx32:$dst), (ins),
+def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
"ins{l|d}\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize32;
}
}
@@ -2124,46 +2171,6 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
(implicit EFLAGS)]>, XS;
}
-let Predicates = [HasLZCNT] in {
- def : Pat<(X86cmov (ctlz GR16:$src), (i16 16), (X86_COND_E_OR_NE),
- (X86cmp GR16:$src, (i16 0))),
- (LZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (ctlz GR32:$src), (i32 32), (X86_COND_E_OR_NE),
- (X86cmp GR32:$src, (i32 0))),
- (LZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (ctlz GR64:$src), (i64 64), (X86_COND_E_OR_NE),
- (X86cmp GR64:$src, (i64 0))),
- (LZCNT64rr GR64:$src)>;
- def : Pat<(X86cmov (i16 16), (ctlz GR16:$src), (X86_COND_E_OR_NE),
- (X86cmp GR16:$src, (i16 0))),
- (LZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (i32 32), (ctlz GR32:$src), (X86_COND_E_OR_NE),
- (X86cmp GR32:$src, (i32 0))),
- (LZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (i64 64), (ctlz GR64:$src), (X86_COND_E_OR_NE),
- (X86cmp GR64:$src, (i64 0))),
- (LZCNT64rr GR64:$src)>;
-
- def : Pat<(X86cmov (ctlz (loadi16 addr:$src)), (i16 16), (X86_COND_E_OR_NE),
- (X86cmp (loadi16 addr:$src), (i16 0))),
- (LZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (ctlz (loadi32 addr:$src)), (i32 32), (X86_COND_E_OR_NE),
- (X86cmp (loadi32 addr:$src), (i32 0))),
- (LZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (ctlz (loadi64 addr:$src)), (i64 64), (X86_COND_E_OR_NE),
- (X86cmp (loadi64 addr:$src), (i64 0))),
- (LZCNT64rm addr:$src)>;
- def : Pat<(X86cmov (i16 16), (ctlz (loadi16 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi16 addr:$src), (i16 0))),
- (LZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (i32 32), (ctlz (loadi32 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi32 addr:$src), (i32 0))),
- (LZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (i64 64), (ctlz (loadi64 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi64 addr:$src), (i64 0))),
- (LZCNT64rm addr:$src)>;
-}
-
//===----------------------------------------------------------------------===//
// BMI Instructions
//
@@ -2240,46 +2247,6 @@ let Predicates = [HasBMI] in {
(BLSI64rr GR64:$src)>;
}
-let Predicates = [HasBMI] in {
- def : Pat<(X86cmov (cttz GR16:$src), (i16 16), (X86_COND_E_OR_NE),
- (X86cmp GR16:$src, (i16 0))),
- (TZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (cttz GR32:$src), (i32 32), (X86_COND_E_OR_NE),
- (X86cmp GR32:$src, (i32 0))),
- (TZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (cttz GR64:$src), (i64 64), (X86_COND_E_OR_NE),
- (X86cmp GR64:$src, (i64 0))),
- (TZCNT64rr GR64:$src)>;
- def : Pat<(X86cmov (i16 16), (cttz GR16:$src), (X86_COND_E_OR_NE),
- (X86cmp GR16:$src, (i16 0))),
- (TZCNT16rr GR16:$src)>;
- def : Pat<(X86cmov (i32 32), (cttz GR32:$src), (X86_COND_E_OR_NE),
- (X86cmp GR32:$src, (i32 0))),
- (TZCNT32rr GR32:$src)>;
- def : Pat<(X86cmov (i64 64), (cttz GR64:$src), (X86_COND_E_OR_NE),
- (X86cmp GR64:$src, (i64 0))),
- (TZCNT64rr GR64:$src)>;
-
- def : Pat<(X86cmov (cttz (loadi16 addr:$src)), (i16 16), (X86_COND_E_OR_NE),
- (X86cmp (loadi16 addr:$src), (i16 0))),
- (TZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (cttz (loadi32 addr:$src)), (i32 32), (X86_COND_E_OR_NE),
- (X86cmp (loadi32 addr:$src), (i32 0))),
- (TZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (cttz (loadi64 addr:$src)), (i64 64), (X86_COND_E_OR_NE),
- (X86cmp (loadi64 addr:$src), (i64 0))),
- (TZCNT64rm addr:$src)>;
- def : Pat<(X86cmov (i16 16), (cttz (loadi16 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi16 addr:$src), (i16 0))),
- (TZCNT16rm addr:$src)>;
- def : Pat<(X86cmov (i32 32), (cttz (loadi32 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi32 addr:$src), (i32 0))),
- (TZCNT32rm addr:$src)>;
- def : Pat<(X86cmov (i64 64), (cttz (loadi64 addr:$src)), (X86_COND_E_OR_NE),
- (X86cmp (loadi64 addr:$src), (i64 0))),
- (TZCNT64rm addr:$src)>;
-}
-
multiclass bmi_bextr_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
X86MemOperand x86memop, Intrinsic Int,
@@ -2440,22 +2407,34 @@ defm TZMSK : tbm_binary_intr<0x01, "tzmsk", MRM4r, MRM4m>;
//===----------------------------------------------------------------------===//
// MONITORX/MWAITX Instructions
//
-let SchedRW = [WriteSystem] in {
-let Uses = [EAX, ECX, EDX] in
-def MONITORXrrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", [],
- IIC_SSE_MONITOR>, TB;
-let Uses = [ECX, EAX, EBX] in
-def MWAITXrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx", [], IIC_SSE_MWAIT>,
- TB;
+let SchedRW = [ WriteSystem ] in {
+ let usesCustomInserter = 1 in {
+ def MONITORX : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
+ [(int_x86_monitorx addr:$src1, GR32:$src2, GR32:$src3)]>,
+ Requires<[ HasMWAITX ]>;
+ }
+
+ let Uses = [ EAX, ECX, EDX ] in {
+ def MONITORXrrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", [], IIC_SSE_MONITORX>,
+ TB, Requires<[ HasMWAITX ]>;
+ }
+
+ let Uses = [ ECX, EAX, EBX ] in {
+ def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
+ [(int_x86_mwaitx ECX, EAX, EBX)], IIC_SSE_MWAITX>,
+ TB, Requires<[ HasMWAITX ]>;
+ }
} // SchedRW
-def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrr)>, Requires<[Not64BitMode]>;
-def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrr)>, Requires<[In64BitMode]>;
+def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
+ Requires<[ Not64BitMode ]>;
+def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
+ Requires<[ In64BitMode ]>;
def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORXrrr)>,
- Requires<[Not64BitMode]>;
+ Requires<[ Not64BitMode ]>;
def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORXrrr)>,
- Requires<[In64BitMode]>;
+ Requires<[ In64BitMode ]>;
//===----------------------------------------------------------------------===//
// CLZERO Instruction
@@ -2535,7 +2514,7 @@ let Predicates = [HasTBM] in {
//
def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
- "clflushopt\t$src", []>, PD;
+ "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src", []>, PD;
def PCOMMIT : I<0xAE, MRM_F8, (outs), (ins), "pcommit", []>, PD;
@@ -2781,6 +2760,11 @@ def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0>;
+def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0>;
+def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0>;
+def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
+
// stos aliases. Accept the source being omitted because it's implicit in
// the mnemonic, or the mnemonic suffix being omitted because it's implicit
@@ -2793,6 +2777,11 @@ def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0>;
+def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0>;
+def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0>;
+def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+
// scas aliases. Accept the destination being omitted because it's implicit
// in the mnemonic, or the mnemonic suffix being omitted because it's implicit
@@ -2805,6 +2794,24 @@ def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0>;
+def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0>;
+def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0>;
+def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
+
+// cmps aliases. Mnemonic suffix being omitted because it's implicit
+// in the destination.
+def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0>;
+def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0>;
+def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0>;
+def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0>, Requires<[In64BitMode]>;
+
+// movs aliases. Mnemonic suffix being omitted because it's implicit
+// in the destination.
+def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0>;
+def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0>;
+def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0>;
+def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0>, Requires<[In64BitMode]>;
// div and idiv aliases for explicit A register.
def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
@@ -2892,8 +2899,8 @@ def : InstAlias<"fnstsw" , (FNSTSW16r)>;
// lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
// this is compatible with what GAS does.
-def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>;
-def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[Not16BitMode]>;
+def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
+def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
@@ -2917,6 +2924,18 @@ def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i3
def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
+// ins aliases. Accept the mnemonic suffix being omitted because it's implicit
+// in the destination.
+def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0>;
+def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0>;
+def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0>;
+
+// outs aliases. Accept the mnemonic suffix being omitted because it's implicit
+// in the source.
+def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0>;
+def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0>;
+def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0>;
+
// inb %dx -> inb %al, %dx
def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
@@ -2929,12 +2948,12 @@ def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
// jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
-def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not16BitMode]>;
-def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not16BitMode]>;
-def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>;
-def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>;
-def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>;
-def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>;
+def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
+def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
+def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
+def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
+def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
+def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
// Force mov without a suffix with a segment and mem to prefer the 'l' form of
// the move. All segment/mem forms are equivalent, this has the shortest
diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td
index 83f9b1409f61..8d70691714dd 100644
--- a/lib/Target/X86/X86InstrMMX.td
+++ b/lib/Target/X86/X86InstrMMX.td
@@ -282,7 +282,7 @@ def MMX_MOVQ64rr_REV : MMXI<0x7F, MRMDestReg, (outs VR64:$dst), (ins VR64:$src),
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
def MMX_MOVD64from64rm : MMXRI<0x7E, MRMDestMem,
- (outs i64mem:$dst), (ins VR64:$src),
+ (outs), (ins i64mem:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}",
[], IIC_MMX_MOV_REG_MM>, Sched<[WriteStore]>;
diff --git a/lib/Target/X86/X86InstrMPX.td b/lib/Target/X86/X86InstrMPX.td
index 71ab97374dd6..309f601d1fce 100644
--- a/lib/Target/X86/X86InstrMPX.td
+++ b/lib/Target/X86/X86InstrMPX.td
@@ -55,10 +55,10 @@ def BNDMOVRM64rm : RI<0x1A, MRMSrcMem, (outs BNDR:$dst), (ins i128mem:$src),
def BNDMOVMRrr : I<0x1B, MRMDestReg, (outs BNDR:$dst), (ins BNDR:$src),
"bndmov\t{$src, $dst|$dst, $src}", []>, PD,
Requires<[HasMPX]>;
-def BNDMOVMR32mr : I<0x1B, MRMDestMem, (outs i64mem:$dst), (ins BNDR:$src),
+def BNDMOVMR32mr : I<0x1B, MRMDestMem, (outs), (ins i64mem:$dst, BNDR:$src),
"bndmov\t{$src, $dst|$dst, $src}", []>, PD,
Requires<[HasMPX, Not64BitMode]>;
-def BNDMOVMR64mr : RI<0x1B, MRMDestMem, (outs i128mem:$dst), (ins BNDR:$src),
+def BNDMOVMR64mr : RI<0x1B, MRMDestMem, (outs), (ins i128mem:$dst, BNDR:$src),
"bndmov\t{$src, $dst|$dst, $src}", []>, PD,
Requires<[HasMPX, In64BitMode]>;
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index 6a7c45665e9c..9a98f5cac2ee 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -382,75 +382,71 @@ def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
// Bitcasts between 128-bit vector types. Return the original type since
// no instruction is needed for the conversion
-let Predicates = [HasSSE2] in {
- def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
- def : Pat<(f128 (bitconvert (i128 FR128:$src))), (f128 FR128:$src)>;
- def : Pat<(i128 (bitconvert (f128 FR128:$src))), (i128 FR128:$src)>;
-}
+def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
+def : Pat<(f128 (bitconvert (i128 FR128:$src))), (f128 FR128:$src)>;
+def : Pat<(i128 (bitconvert (f128 FR128:$src))), (i128 FR128:$src)>;
// Bitcasts between 256-bit vector types. Return the original type since
// no instruction is needed for the conversion
-let Predicates = [HasAVX] in {
- def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
- def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
-}
+def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
+def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
// Alias instructions that map fld0 to xorps for sse or vxorps for avx.
// This is expanded by ExpandPostRAPseudos.
@@ -472,16 +468,13 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
// We set canFoldAsLoad because this can be converted to a constant-pool
// load of an all-zeros value if folding it would be beneficial.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isPseudo = 1, SchedRW = [WriteZero] in {
+ isPseudo = 1, Predicates = [NoVLX], SchedRW = [WriteZero] in {
def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
[(set VR128:$dst, (v4f32 immAllZerosV))]>;
}
-def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
+let Predicates = [NoVLX] in
def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
-def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
-def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
-def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
// The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
@@ -489,39 +482,9 @@ def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
// at the rename stage without using any execution unit, so SET0PSY
// and SET0PDY can be used for vector int instructions without penalty
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
+ isPseudo = 1, Predicates = [HasAVX, NoVLX], SchedRW = [WriteZero] in {
def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
- [(set VR256:$dst, (v8f32 immAllZerosV))]>;
-}
-
-let Predicates = [HasAVX] in
- def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
-
-let Predicates = [HasAVX2] in {
- def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
- def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
- def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
- def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
-}
-
-// AVX1 has no support for 256-bit integer instructions, but since the 128-bit
-// VPXOR instruction writes zero to its upper part, it's safe build zeros.
-let Predicates = [HasAVX1Only] in {
-def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
-def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
- (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
-
-def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
-def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
- (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
-
-def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
-def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
- (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
-
-def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
-def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
- (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
+ [(set VR256:$dst, (v8i32 immAllZerosV))]>;
}
// We set canFoldAsLoad because this can be converted to a constant-pool
@@ -649,15 +612,14 @@ let Predicates = [UseAVX] in {
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
(v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
+ def : Pat<(v4f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
}
// Extract and store.
def : Pat<(store (f32 (extractelt (v4f32 VR128:$src), (iPTR 0))),
addr:$dst),
(VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
- def : Pat<(store (f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
- addr:$dst),
- (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
// Shuffle with VMOVSS
def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
@@ -771,11 +733,6 @@ let Predicates = [UseSSE2] in {
(COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
}
- // Extract and store.
- def : Pat<(store (f64 (extractelt (v2f64 VR128:$src), (iPTR 0))),
- addr:$dst),
- (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
-
// Shuffle with MOVSD
def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
(MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
@@ -800,6 +757,13 @@ let Predicates = [UseSSE2] in {
(MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
}
+// Aliases to help the assembler pick two byte VEX encodings by swapping the
+// operands relative to the normal instructions to use VEX.R instead of VEX.B.
+def : InstAlias<"vmovss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ (VMOVSSrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
+def : InstAlias<"vmovsd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ (VMOVSDrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
//===----------------------------------------------------------------------===//
@@ -937,10 +901,24 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
}
-def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
- (VMOVUPSYmr addr:$dst, VR256:$src)>;
-def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
- (VMOVUPDYmr addr:$dst, VR256:$src)>;
+// Aliases to help the assembler pick two byte VEX encodings by swapping the
+// operands relative to the normal instructions to use VEX.R instead of VEX.B.
+def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
+ (VMOVAPSrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
+ (VMOVAPDrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
+ (VMOVUPSrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
+ (VMOVUPDrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
+ (VMOVAPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
+def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
+ (VMOVAPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
+def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
+ (VMOVUPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
+def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
+ (VMOVUPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
let SchedRW = [WriteStore] in {
def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
@@ -978,20 +956,6 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
IIC_SSE_MOVU_P_RR>;
}
-let Predicates = [HasAVX] in {
- def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
- (VMOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
- (VMOVUPDmr addr:$dst, VR128:$src)>;
-}
-
-let Predicates = [UseSSE1] in
- def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
- (MOVUPSmr addr:$dst, VR128:$src)>;
-let Predicates = [UseSSE2] in
- def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
- (MOVUPDmr addr:$dst, VR128:$src)>;
-
// Use vmovaps/vmovups for AVX integer load/store.
let Predicates = [HasAVX, NoVLX] in {
// 128-bit load/store
@@ -1004,18 +968,10 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVAPSmr addr:$dst, VR128:$src)>;
def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
(VMOVAPSmr addr:$dst, VR128:$src)>;
- def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
- (VMOVAPSmr addr:$dst, VR128:$src)>;
- def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
- (VMOVAPSmr addr:$dst, VR128:$src)>;
def : Pat<(store (v2i64 VR128:$src), addr:$dst),
(VMOVUPSmr addr:$dst, VR128:$src)>;
def : Pat<(store (v4i32 VR128:$src), addr:$dst),
(VMOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (v8i16 VR128:$src), addr:$dst),
- (VMOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (v16i8 VR128:$src), addr:$dst),
- (VMOVUPSmr addr:$dst, VR128:$src)>;
// 256-bit load/store
def : Pat<(alignedloadv4i64 addr:$src),
@@ -1026,18 +982,10 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
(VMOVAPSYmr addr:$dst, VR256:$src)>;
- def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
- (VMOVAPSYmr addr:$dst, VR256:$src)>;
- def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
- (VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v4i64 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v8i32 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
- def : Pat<(store (v16i16 VR256:$src), addr:$dst),
- (VMOVUPSYmr addr:$dst, VR256:$src)>;
- def : Pat<(store (v32i8 VR256:$src), addr:$dst),
- (VMOVUPSYmr addr:$dst, VR256:$src)>;
// Special patterns for storing subvector extracts of lower 128-bits
// Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
@@ -1080,6 +1028,28 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
}
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
+ // 128-bit load/store
+ def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v8i16 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v16i8 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
+
+ // 256-bit load/store
+ def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
+ (VMOVAPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
+ (VMOVAPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(store (v16i16 VR256:$src), addr:$dst),
+ (VMOVUPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(store (v32i8 VR256:$src), addr:$dst),
+ (VMOVUPSYmr addr:$dst, VR256:$src)>;
+}
+
// Use movaps / movups for SSE integer load / store (one byte shorter).
// The instructions selected below are then converted to MOVDQA/MOVDQU
// during the SSE domain pass.
@@ -2039,35 +2009,24 @@ def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// SSE2 packed instructions with XS prefix
def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq VR128:$src))],
- IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
+ [], IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttps2dq
- (loadv4f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
+ [], IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
- IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
+ [], IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
- (loadv8f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
+ [], IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
Sched<[WriteCvtF2ILd]>;
def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
- IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
+ [], IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
- IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
+ [], IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
let Predicates = [HasAVX] in {
def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
@@ -2137,14 +2096,10 @@ def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
// YMM only
def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
- IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
+ [], IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
+ [], IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
(VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
@@ -2170,30 +2125,24 @@ def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
let Predicates = [HasAVX] in {
// SSE2 instructions without OpSize prefix
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
+ "vcvtps2pd\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
- IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
+ [], IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
+ [], IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
}
let Predicates = [UseSSE2] in {
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
- IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
+ "cvtps2pd\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
@@ -2204,24 +2153,17 @@ def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
let Predicates = [HasAVX] in {
let hasSideEffects = 0, mayLoad = 1 in
def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- []>, VEX, Sched<[WriteCvtI2FLd]>;
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX, Sched<[WriteCvtI2FLd]>;
def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX,
- Sched<[WriteCvtI2F]>;
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX, Sched<[WriteCvtI2F]>;
def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvtdq2_pd_256
- (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
- Sched<[WriteCvtI2FLd]>;
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX, VEX_L, Sched<[WriteCvtI2FLd]>;
def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- "vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst,
- (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L,
- Sched<[WriteCvtI2F]>;
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ []>, VEX, VEX_L, Sched<[WriteCvtI2F]>;
}
let hasSideEffects = 0, mayLoad = 1 in
@@ -2229,8 +2171,7 @@ def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}", [],
IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
// AVX register conversion intrinsics
@@ -2239,6 +2180,8 @@ let Predicates = [HasAVX] in {
(VCVTDQ2PDrr VR128:$src)>;
def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
(VCVTDQ2PDrm addr:$src)>;
+ def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (VCVTDQ2PDrm addr:$src)>;
def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
(VCVTDQ2PDYrr VR128:$src)>;
@@ -2252,6 +2195,8 @@ let Predicates = [HasSSE2] in {
(CVTDQ2PDrr VR128:$src)>;
def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
(CVTDQ2PDrm addr:$src)>;
+ def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
+ (CVTDQ2PDrm addr:$src)>;
} // Predicates = [HasSSE2]
// Convert packed double to packed single
@@ -2553,36 +2498,36 @@ let Constraints = "$src1 = $dst" in {
}
let Predicates = [HasAVX] in {
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
(VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
+def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), (loadv4f32 addr:$src2), imm:$cc)),
(VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
(VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
+def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), (loadv2f64 addr:$src2), imm:$cc)),
(VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
-def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
+def : Pat<(v8f32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
(VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
-def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
+def : Pat<(v8f32 (X86cmpp (v8f32 VR256:$src1), (loadv8f32 addr:$src2), imm:$cc)),
(VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
-def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
+def : Pat<(v4f64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
(VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
-def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
+def : Pat<(v4f64 (X86cmpp (v4f64 VR256:$src1), (loadv4f64 addr:$src2), imm:$cc)),
(VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
}
let Predicates = [UseSSE1] in {
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
(CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
+def : Pat<(v4f32 (X86cmpp (v4f32 VR128:$src1), (memopv4f32 addr:$src2), imm:$cc)),
(CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
}
let Predicates = [UseSSE2] in {
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
(CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
+def : Pat<(v2f64 (X86cmpp (v2f64 VR128:$src1), (memopv2f64 addr:$src2), imm:$cc)),
(CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
}
@@ -2763,58 +2708,30 @@ let Predicates = [HasAVX1Only] in {
//===----------------------------------------------------------------------===//
/// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
-multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
- Domain d> {
+multiclass sse12_extr_sign_mask<RegisterClass RC, ValueType vt,
+ string asm, Domain d> {
def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
+ [(set GR32orGR64:$dst, (X86movmsk (vt RC:$src)))], IIC_SSE_MOVMSK, d>,
Sched<[WriteVecLogic]>;
}
let Predicates = [HasAVX] in {
- defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
- "movmskps", SSEPackedSingle>, PS, VEX;
- defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
- "movmskpd", SSEPackedDouble>, PD, VEX;
- defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
- "movmskps", SSEPackedSingle>, PS,
- VEX, VEX_L;
- defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
- "movmskpd", SSEPackedDouble>, PD,
- VEX, VEX_L;
-
- def : Pat<(i32 (X86fgetsign FR32:$src)),
- (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
- def : Pat<(i64 (X86fgetsign FR32:$src)),
- (SUBREG_TO_REG (i64 0),
- (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
- def : Pat<(i32 (X86fgetsign FR64:$src)),
- (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
- def : Pat<(i64 (X86fgetsign FR64:$src)),
- (SUBREG_TO_REG (i64 0),
- (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
+ defm VMOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
+ SSEPackedSingle>, PS, VEX;
+ defm VMOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
+ SSEPackedDouble>, PD, VEX;
+ defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, v8f32, "movmskps",
+ SSEPackedSingle>, PS, VEX, VEX_L;
+ defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
+ SSEPackedDouble>, PD, VEX, VEX_L;
}
-defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
+defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
SSEPackedSingle>, PS;
-defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
+defm MOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
SSEPackedDouble>, PD;
-def : Pat<(i32 (X86fgetsign FR32:$src)),
- (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
- Requires<[UseSSE1]>;
-def : Pat<(i64 (X86fgetsign FR32:$src)),
- (SUBREG_TO_REG (i64 0),
- (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
- Requires<[UseSSE1]>;
-def : Pat<(i32 (X86fgetsign FR64:$src)),
- (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
- Requires<[UseSSE2]>;
-def : Pat<(i64 (X86fgetsign FR64:$src)),
- (SUBREG_TO_REG (i64 0),
- (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
- Requires<[UseSSE2]>;
-
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Logical Instructions
//===---------------------------------------------------------------------===//
@@ -3695,16 +3612,14 @@ def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
PS, Requires<[HasSSE2]>;
} // SchedRW = [WriteStore]
-let Predicates = [HasAVX2, NoVLX] in {
+let Predicates = [HasAVX, NoVLX] in {
def : Pat<(alignednontemporalstore (v8i32 VR256:$src), addr:$dst),
(VMOVNTDQYmr addr:$dst, VR256:$src)>;
def : Pat<(alignednontemporalstore (v16i16 VR256:$src), addr:$dst),
(VMOVNTDQYmr addr:$dst, VR256:$src)>;
def : Pat<(alignednontemporalstore (v32i8 VR256:$src), addr:$dst),
(VMOVNTDQYmr addr:$dst, VR256:$src)>;
-}
-let Predicates = [HasAVX, NoVLX] in {
def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
(VMOVNTDQmr addr:$dst, VR128:$src)>;
def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
@@ -3713,12 +3628,14 @@ let Predicates = [HasAVX, NoVLX] in {
(VMOVNTDQmr addr:$dst, VR128:$src)>;
}
-def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
- (MOVNTDQmr addr:$dst, VR128:$src)>;
-def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
- (MOVNTDQmr addr:$dst, VR128:$src)>;
-def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
- (MOVNTDQmr addr:$dst, VR128:$src)>;
+let Predicates = [UseSSE2] in {
+ def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignednontemporalstore (v8i16 VR128:$src), addr:$dst),
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignednontemporalstore (v16i8 VR128:$src), addr:$dst),
+ (MOVNTDQmr addr:$dst, VR128:$src)>;
+}
} // AddedComplexity
@@ -3760,6 +3677,8 @@ def PAUSE : I<0x90, RawFrm, (outs), (ins),
let SchedRW = [WriteFence] in {
// Load, store, and memory fence
+// TODO: As with mfence, we may want to ease the availablity of sfence/lfence
+// to include any 64-bit target.
def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
"sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
PS, Requires<[HasSSE1]>;
@@ -3768,11 +3687,9 @@ def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
TB, Requires<[HasSSE2]>;
def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
"mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
- TB, Requires<[HasSSE2]>;
+ TB, Requires<[HasMFence]>;
} // SchedRW
-def : Pat<(X86SFence), (SFENCE)>;
-def : Pat<(X86LFence), (LFENCE)>;
def : Pat<(X86MFence), (MFENCE)>;
//===----------------------------------------------------------------------===//
@@ -3920,15 +3837,16 @@ def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
} // ExeDomain = SSEPackedInt
-let Predicates = [HasAVX] in {
- def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
- (VMOVDQUmr addr:$dst, VR128:$src)>;
- def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
- (VMOVDQUYmr addr:$dst, VR256:$src)>;
-}
-let Predicates = [UseSSE2] in
-def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
- (MOVDQUmr addr:$dst, VR128:$src)>;
+// Aliases to help the assembler pick two byte VEX encodings by swapping the
+// operands relative to the normal instructions to use VEX.R instead of VEX.B.
+def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
+ (VMOVDQArr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
+ (VMOVDQAYrr_REV VR256L:$dst, VR256H:$src), 0>;
+def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
+ (VMOVDQUrr_REV VR128L:$dst, VR128H:$src), 0>;
+def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
+ (VMOVDQUYrr_REV VR256L:$dst, VR256H:$src), 0>;
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Arithmetic Instructions
@@ -3985,7 +3903,7 @@ let Predicates = [HasAVX2] in
multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
string OpcodeStr, SDNode OpNode,
SDNode OpNode2, RegisterClass RC,
- ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
+ ValueType DstVT, ValueType SrcVT,
PatFrag ld_frag, ShiftOpndItins itins,
bit Is2Addr = 1> {
// src2 is always 128-bit
@@ -4002,7 +3920,7 @@ multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst, (DstVT (OpNode RC:$src1,
- (bc_frag (ld_frag addr:$src2)))))], itins.rm>,
+ (SrcVT (bitconvert (ld_frag addr:$src2))))))], itins.rm>,
Sched<[WriteVecShiftLd, ReadAfterLd]>;
def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
(ins RC:$src1, u8imm:$src2),
@@ -4046,6 +3964,14 @@ defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
SSE_INTALU_ITINS_P, 1, NoVLX>;
defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
SSE_INTALUQ_ITINS_P, 1, NoVLX>;
+defm PADDSB : PDI_binop_all<0xEC, "paddsb", X86adds, v16i8, v32i8,
+ SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
+defm PADDSW : PDI_binop_all<0xED, "paddsw", X86adds, v8i16, v16i16,
+ SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
+defm PADDUSB : PDI_binop_all<0xDC, "paddusb", X86addus, v16i8, v32i8,
+ SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
+defm PADDUSW : PDI_binop_all<0xDD, "paddusw", X86addus, v8i16, v16i16,
+ SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
SSE_INTMUL_ITINS_P, 1, NoVLX_Or_NoBWI>;
defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
@@ -4060,6 +3986,10 @@ defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
SSE_INTALU_ITINS_P, 0, NoVLX>;
defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
SSE_INTALUQ_ITINS_P, 0, NoVLX>;
+defm PSUBSB : PDI_binop_all<0xE8, "psubsb", X86subs, v16i8, v32i8,
+ SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
+defm PSUBSW : PDI_binop_all<0xE9, "psubsw", X86subs, v8i16, v16i16,
+ SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
@@ -4078,26 +4008,14 @@ defm PAVGW : PDI_binop_all<0xE3, "pavgw", X86avg, v8i16, v16i16,
SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
// Intrinsic forms
-defm PSUBSB : PDI_binop_all_int<0xE8, "psubsb", int_x86_sse2_psubs_b,
- int_x86_avx2_psubs_b, SSE_INTALU_ITINS_P, 0>;
-defm PSUBSW : PDI_binop_all_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
- int_x86_avx2_psubs_w, SSE_INTALU_ITINS_P, 0>;
-defm PADDSB : PDI_binop_all_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
- int_x86_avx2_padds_b, SSE_INTALU_ITINS_P, 1>;
-defm PADDSW : PDI_binop_all_int<0xED, "paddsw" , int_x86_sse2_padds_w,
- int_x86_avx2_padds_w, SSE_INTALU_ITINS_P, 1>;
-defm PADDUSB : PDI_binop_all_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
- int_x86_avx2_paddus_b, SSE_INTALU_ITINS_P, 1>;
-defm PADDUSW : PDI_binop_all_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
- int_x86_avx2_paddus_w, SSE_INTALU_ITINS_P, 1>;
defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
defm VPSADBW : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v2i64, v16i8, VR128,
loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
VEX_4V;
-let Predicates = [HasAVX2] in
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
defm VPSADBWY : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v4i64, v32i8, VR256,
loadv4i64, i256mem, SSE_INTMUL_ITINS_P, 1, 0>,
VEX_4V, VEX_L;
@@ -4105,11 +4023,11 @@ let Constraints = "$src1 = $dst" in
defm PSADBW : PDI_binop_rm2<0xF6, "psadbw", X86psadbw, v2i64, v16i8, VR128,
memopv2i64, i128mem, SSE_INTALU_ITINS_P, 1>;
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoVLX] in
defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
VEX_4V;
-let Predicates = [HasAVX2] in
+let Predicates = [HasAVX2, NoVLX] in
defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
VR256, loadv4i64, i256mem,
SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
@@ -4123,33 +4041,33 @@ defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
let Predicates = [HasAVX, NoVLX] in {
defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
- VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
+ VR128, v4i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
- VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
+ VR128, v2i64, v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
- VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
+ VR128, v4i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
- VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
+ VR128, v2i64, v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
- VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
+ VR128, v4i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
} // Predicates = [HasAVX, NoVLX]
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ VR128, v8i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ VR128, v8i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
- VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+ VR128, v8i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
} // Predicates = [HasAVX, NoVLX_Or_NoBWI]
@@ -4161,46 +4079,46 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] ,
(outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
+ (v16i8 (X86vshldq VR128:$src1, (i8 imm:$src2))))]>,
VEX_4V;
def VPSRLDQri : PDIi8<0x73, MRM3r,
(outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
+ (v16i8 (X86vshrdq VR128:$src1, (i8 imm:$src2))))]>,
VEX_4V;
// PSRADQri doesn't exist in SSE[1-3].
} // Predicates = [HasAVX, NoVLX_Or_NoBWI]
let Predicates = [HasAVX2, NoVLX] in {
defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
- VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
+ VR256, v8i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
- VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
+ VR256, v4i64, v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
- VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
+ VR256, v8i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
- VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
+ VR256, v4i64, v2i64, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
- VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
+ VR256, v8i32, v4i32, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
}// Predicates = [HasAVX2, NoVLX]
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ VR256, v16i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ VR256, v16i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
- VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+ VR256, v16i16, v8i16, loadv2i64,
SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
}// Predicates = [HasAVX2, NoVLX_Or_NoBWI]
@@ -4211,43 +4129,43 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 ,
(outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
"vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR256:$dst,
- (v4i64 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
+ (v32i8 (X86vshldq VR256:$src1, (i8 imm:$src2))))]>,
VEX_4V, VEX_L;
def VPSRLDQYri : PDIi8<0x73, MRM3r,
(outs VR256:$dst), (ins VR256:$src1, u8imm:$src2),
"vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR256:$dst,
- (v4i64 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
+ (v32i8 (X86vshrdq VR256:$src1, (i8 imm:$src2))))]>,
VEX_4V, VEX_L;
// PSRADQYri doesn't exist in SSE[1-3].
} // Predicates = [HasAVX2, NoVLX_Or_NoBWI]
let Constraints = "$src1 = $dst" in {
defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
- VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
+ VR128, v8i16, v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
- VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
+ VR128, v4i32, v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
- VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
+ VR128, v2i64, v2i64, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
- VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
+ VR128, v8i16, v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
- VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
+ VR128, v4i32, v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
- VR128, v2i64, v2i64, bc_v2i64, memopv2i64,
+ VR128, v2i64, v2i64, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
- VR128, v8i16, v8i16, bc_v8i16, memopv2i64,
+ VR128, v8i16, v8i16, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
- VR128, v4i32, v4i32, bc_v4i32, memopv2i64,
+ VR128, v4i32, v4i32, memopv2i64,
SSE_INTSHIFT_ITINS_P>;
let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
@@ -4256,13 +4174,13 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
(outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"pslldq\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (v2i64 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
+ (v16i8 (X86vshldq VR128:$src1, (i8 imm:$src2))))],
IIC_SSE_INTSHDQ_P_RI>;
def PSRLDQri : PDIi8<0x73, MRM3r,
(outs VR128:$dst), (ins VR128:$src1, u8imm:$src2),
"psrldq\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (v2i64 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
+ (v16i8 (X86vshrdq VR128:$src1, (i8 imm:$src2))))],
IIC_SSE_INTSHDQ_P_RI>;
// PSRADQri doesn't exist in SSE[1-3].
}
@@ -4273,17 +4191,17 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 in {
//===---------------------------------------------------------------------===//
defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
- SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
+ SSE_INTALU_ITINS_P, 1, TruePredicate>;
defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
- SSE_INTALU_ITINS_P, 1, NoVLX_Or_NoBWI>;
+ SSE_INTALU_ITINS_P, 1, TruePredicate>;
defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
- SSE_INTALU_ITINS_P, 1, NoVLX>;
+ SSE_INTALU_ITINS_P, 1, TruePredicate>;
defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
- SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
+ SSE_INTALU_ITINS_P, 0, TruePredicate>;
defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
- SSE_INTALU_ITINS_P, 0, NoVLX_Or_NoBWI>;
+ SSE_INTALU_ITINS_P, 0, TruePredicate>;
defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
- SSE_INTALU_ITINS_P, 0, NoVLX>;
+ SSE_INTALU_ITINS_P, 0, TruePredicate>;
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Shuffle Instructions
@@ -4291,8 +4209,8 @@ defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
let ExeDomain = SSEPackedInt in {
multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
- SDNode OpNode> {
-let Predicates = [HasAVX] in {
+ SDNode OpNode, Predicate prd> {
+let Predicates = [HasAVX, prd] in {
def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
@@ -4310,7 +4228,7 @@ let Predicates = [HasAVX] in {
Sched<[WriteShuffleLd]>;
}
-let Predicates = [HasAVX2] in {
+let Predicates = [HasAVX2, prd] in {
def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
@@ -4348,9 +4266,11 @@ let Predicates = [UseSSE2] in {
}
} // ExeDomain = SSEPackedInt
-defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, PD;
-defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
-defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
+defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd, NoVLX>, PD;
+defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw,
+ NoVLX_Or_NoBWI>, XS;
+defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw,
+ NoVLX_Or_NoBWI>, XD;
let Predicates = [HasAVX] in {
def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
@@ -4372,8 +4292,8 @@ let Predicates = [UseSSE2] in {
let ExeDomain = SSEPackedInt in {
multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
- ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
- PatFrag ld_frag, bit Is2Addr = 1> {
+ ValueType ArgVT, SDNode OpNode, PatFrag ld_frag,
+ bit Is2Addr = 1> {
def rr : PDI<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4390,13 +4310,13 @@ multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
- (OutVT (OpNode VR128:$src1,
- (bc_frag (ld_frag addr:$src2)))))]>,
+ (OutVT (OpNode (ArgVT VR128:$src1),
+ (bitconvert (ld_frag addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
- ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
+ ValueType ArgVT, SDNode OpNode> {
def Yrr : PDI<opc, MRMSrcReg,
(outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr,
@@ -4409,14 +4329,14 @@ multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (OutVT (OpNode VR256:$src1,
- (bc_frag (loadv4i64 addr:$src2)))))]>,
+ (OutVT (OpNode (ArgVT VR256:$src1),
+ (bitconvert (loadv4i64 addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
- ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
- PatFrag ld_frag, bit Is2Addr = 1> {
+ ValueType ArgVT, SDNode OpNode, PatFrag ld_frag,
+ bit Is2Addr = 1> {
def rr : SS48I<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4433,13 +4353,13 @@ multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
- (OutVT (OpNode VR128:$src1,
- (bc_frag (ld_frag addr:$src2)))))]>,
+ (OutVT (OpNode (ArgVT VR128:$src1),
+ (bitconvert (ld_frag addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
- ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
+ ValueType ArgVT, SDNode OpNode> {
def Yrr : SS48I<opc, MRMSrcReg,
(outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr,
@@ -4452,47 +4372,46 @@ multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
- (OutVT (OpNode VR256:$src1,
- (bc_frag (loadv4i64 addr:$src2)))))]>,
+ (OutVT (OpNode (ArgVT VR256:$src1),
+ (bitconvert (loadv4i64 addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
- bc_v8i16, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
- bc_v4i32, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
- bc_v8i16, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
- bc_v4i32, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
}
-let Predicates = [HasAVX2] in {
- defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss,
- bc_v16i16>, VEX_4V, VEX_L;
- defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss,
- bc_v8i32>, VEX_4V, VEX_L;
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
+ defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss>,
+ VEX_4V, VEX_L;
+ defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss>,
+ VEX_4V, VEX_L;
- defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus,
- bc_v16i16>, VEX_4V, VEX_L;
- defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus,
- bc_v8i32>, VEX_4V, VEX_L;
+ defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus>,
+ VEX_4V, VEX_L;
+ defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus>,
+ VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
- bc_v8i16, memopv2i64>;
+ memopv2i64>;
defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
- bc_v4i32, memopv2i64>;
+ memopv2i64>;
defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
- bc_v8i16, memopv2i64>;
+ memopv2i64>;
- let Predicates = [HasSSE41] in
defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
- bc_v4i32, memopv2i64>;
+ memopv2i64>;
}
} // ExeDomain = SSEPackedInt
@@ -4502,8 +4421,7 @@ let Constraints = "$src1 = $dst" in {
let ExeDomain = SSEPackedInt in {
multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
- SDNode OpNode, PatFrag bc_frag, PatFrag ld_frag,
- bit Is2Addr = 1> {
+ SDNode OpNode, PatFrag ld_frag, bit Is2Addr = 1> {
def rr : PDI<opc, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
!if(Is2Addr,
@@ -4516,14 +4434,14 @@ multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
!if(Is2Addr,
!strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (OpNode VR128:$src1,
- (bc_frag (ld_frag addr:$src2))))],
+ [(set VR128:$dst, (vt (OpNode VR128:$src1,
+ (bitconvert (ld_frag addr:$src2)))))],
IIC_SSE_UNPCK>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
- SDNode OpNode, PatFrag bc_frag> {
+ SDNode OpNode> {
def Yrr : PDI<opc, MRMSrcReg,
(outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -4532,72 +4450,72 @@ multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
def Yrm : PDI<opc, MRMSrcMem,
(outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst, (OpNode VR256:$src1,
- (bc_frag (loadv4i64 addr:$src2))))]>,
+ [(set VR256:$dst, (vt (OpNode VR256:$src1,
+ (bitconvert (loadv4i64 addr:$src2)))))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
- bc_v16i8, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
- bc_v8i16, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
- bc_v16i8, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
- bc_v8i16, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
}
let Predicates = [HasAVX, NoVLX] in {
defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
- bc_v4i32, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
- bc_v2i64, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
- bc_v4i32, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
- bc_v2i64, loadv2i64, 0>, VEX_4V;
+ loadv2i64, 0>, VEX_4V;
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
- defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
- bc_v32i8>, VEX_4V, VEX_L;
- defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
- bc_v16i16>, VEX_4V, VEX_L;
- defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
- bc_v32i8>, VEX_4V, VEX_L;
- defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
- bc_v16i16>, VEX_4V, VEX_L;
+ defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh>,
+ VEX_4V, VEX_L;
}
let Predicates = [HasAVX2, NoVLX] in {
- defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
- bc_v8i32>, VEX_4V, VEX_L;
- defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
- bc_v4i64>, VEX_4V, VEX_L;
- defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
- bc_v8i32>, VEX_4V, VEX_L;
- defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
- bc_v4i64>, VEX_4V, VEX_L;
+ defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh>,
+ VEX_4V, VEX_L;
+ defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh>,
+ VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
- bc_v16i8, memopv2i64>;
+ memopv2i64>;
defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
- bc_v8i16, memopv2i64>;
+ memopv2i64>;
defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
- bc_v4i32, memopv2i64>;
+ memopv2i64>;
defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
- bc_v2i64, memopv2i64>;
+ memopv2i64>;
defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
- bc_v16i8, memopv2i64>;
+ memopv2i64>;
defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
- bc_v8i16, memopv2i64>;
+ memopv2i64>;
defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
- bc_v4i32, memopv2i64>;
+ memopv2i64>;
defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
- bc_v2i64, memopv2i64>;
+ memopv2i64>;
}
} // ExeDomain = SSEPackedInt
@@ -4661,20 +4579,20 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
+ [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))],
IIC_SSE_MOVMSK>, VEX;
let Predicates = [HasAVX2] in {
def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
+ [(set GR32orGR64:$dst, (X86movmsk (v32i8 VR256:$src)))]>,
VEX, VEX_L;
}
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
+ [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))],
IIC_SSE_MOVMSK>;
} // ExeDomain = SSEPackedInt
@@ -5022,16 +4940,14 @@ def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
}
+// Aliases to help the assembler pick two byte VEX encodings by swapping the
+// operands relative to the normal instructions to use VEX.R instead of VEX.B.
+def : InstAlias<"vmovq\t{$src, $dst|$dst, $src}",
+ (VMOVPQI2QIrr VR128L:$dst, VR128H:$src), 0>;
+
//===---------------------------------------------------------------------===//
// Store / copy lower 64-bits of a XMM register.
//
-let Predicates = [HasAVX] in
-def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
- (VMOVPQI2QImr addr:$dst, VR128:$src)>;
-let Predicates = [UseSSE2] in
-def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
- (MOVPQI2QImr addr:$dst, VR128:$src)>;
-
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1, AddedComplexity = 20 in {
def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
@@ -5058,6 +4974,8 @@ let Predicates = [UseAVX], AddedComplexity = 20 in {
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
(v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
(SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
+ def : Pat<(v4i64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrm addr:$src), sub_xmm)>;
}
let Predicates = [UseSSE2], AddedComplexity = 20 in {
@@ -5066,13 +4984,6 @@ let Predicates = [UseSSE2], AddedComplexity = 20 in {
def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
}
-let Predicates = [HasAVX] in {
-def : Pat<(v4i64 (alignedX86vzload addr:$src)),
- (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
-def : Pat<(v4i64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
-}
-
//===---------------------------------------------------------------------===//
// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
// IA32 document. movq xmm1, xmm2 does clear the high bits.
@@ -5442,38 +5353,36 @@ let Constraints = "$src1 = $dst" in {
/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
-multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
- PatFrag ld_frag> {
+multiclass SS3I_unop_rm<bits<8> opc, string OpcodeStr, ValueType vt,
+ SDNode OpNode, PatFrag ld_frag> {
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
- Sched<[WriteVecALU]>;
+ [(set VR128:$dst, (vt (OpNode VR128:$src)))],
+ IIC_SSE_PABS_RR>, Sched<[WriteVecALU]>;
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
- (IntId128
- (bitconvert (ld_frag addr:$src))))], IIC_SSE_PABS_RM>,
- Sched<[WriteVecALULd]>;
+ (vt (OpNode (bitconvert (ld_frag addr:$src)))))],
+ IIC_SSE_PABS_RM>, Sched<[WriteVecALULd]>;
}
/// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
-multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
- Intrinsic IntId256> {
+multiclass SS3I_unop_rm_y<bits<8> opc, string OpcodeStr, ValueType vt,
+ SDNode OpNode> {
def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (IntId256 VR256:$src))]>,
+ [(set VR256:$dst, (vt (OpNode VR256:$src)))]>,
Sched<[WriteVecALU]>;
def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
(ins i256mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst,
- (IntId256
- (bitconvert (loadv4i64 addr:$src))))]>,
+ (vt (OpNode (bitconvert (loadv4i64 addr:$src)))))]>,
Sched<[WriteVecALULd]>;
}
@@ -5487,14 +5396,15 @@ def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
-let Predicates = [HasAVX] in {
- defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", int_x86_ssse3_pabs_b_128,
- loadv2i64>, VEX;
- defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", int_x86_ssse3_pabs_w_128,
- loadv2i64>, VEX;
- defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", int_x86_ssse3_pabs_d_128,
- loadv2i64>, VEX;
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
+ defm VPABSB : SS3I_unop_rm<0x1C, "vpabsb", v16i8, X86Abs, loadv2i64>, VEX;
+ defm VPABSW : SS3I_unop_rm<0x1D, "vpabsw", v8i16, X86Abs, loadv2i64>, VEX;
+}
+let Predicates = [HasAVX, NoVLX] in {
+ defm VPABSD : SS3I_unop_rm<0x1E, "vpabsd", v4i32, X86Abs, loadv2i64>, VEX;
+}
+let Predicates = [HasAVX] in {
def : Pat<(xor
(bc_v2i64 (v16i1sextv16i8)),
(bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
@@ -5509,14 +5419,15 @@ let Predicates = [HasAVX] in {
(VPABSDrr128 VR128:$src)>;
}
-let Predicates = [HasAVX2] in {
- defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
- int_x86_avx2_pabs_b>, VEX, VEX_L;
- defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
- int_x86_avx2_pabs_w>, VEX, VEX_L;
- defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
- int_x86_avx2_pabs_d>, VEX, VEX_L;
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
+ defm VPABSB : SS3I_unop_rm_y<0x1C, "vpabsb", v32i8, X86Abs>, VEX, VEX_L;
+ defm VPABSW : SS3I_unop_rm_y<0x1D, "vpabsw", v16i16, X86Abs>, VEX, VEX_L;
+}
+let Predicates = [HasAVX2, NoVLX] in {
+ defm VPABSD : SS3I_unop_rm_y<0x1E, "vpabsd", v8i32, X86Abs>, VEX, VEX_L;
+}
+let Predicates = [HasAVX2] in {
def : Pat<(xor
(bc_v4i64 (v32i1sextv32i8)),
(bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))),
@@ -5531,14 +5442,11 @@ let Predicates = [HasAVX2] in {
(VPABSDrr256 VR256:$src)>;
}
-defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", int_x86_ssse3_pabs_b_128,
- memopv2i64>;
-defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", int_x86_ssse3_pabs_w_128,
- memopv2i64>;
-defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", int_x86_ssse3_pabs_d_128,
- memopv2i64>;
+defm PABSB : SS3I_unop_rm<0x1C, "pabsb", v16i8, X86Abs, memopv2i64>;
+defm PABSW : SS3I_unop_rm<0x1D, "pabsw", v8i16, X86Abs, memopv2i64>;
+defm PABSD : SS3I_unop_rm<0x1E, "pabsd", v4i32, X86Abs, memopv2i64>;
-let Predicates = [HasSSSE3] in {
+let Predicates = [UseSSSE3] in {
def : Pat<(xor
(bc_v2i64 (v16i1sextv16i8)),
(bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
@@ -5659,15 +5567,15 @@ let isCommutable = 0 in {
defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
loadv2i64, i128mem,
SSE_PHADDSUBD, 0>, VEX_4V;
- defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
- loadv2i64, i128mem,
- SSE_PSIGN, 0>, VEX_4V;
- defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
- loadv2i64, i128mem,
- SSE_PSIGN, 0>, VEX_4V;
- defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
- loadv2i64, i128mem,
- SSE_PSIGN, 0>, VEX_4V;
+ defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb",
+ int_x86_ssse3_psign_b_128,
+ SSE_PSIGN, loadv2i64, 0>, VEX_4V;
+ defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw",
+ int_x86_ssse3_psign_w_128,
+ SSE_PSIGN, loadv2i64, 0>, VEX_4V;
+ defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd",
+ int_x86_ssse3_psign_d_128,
+ SSE_PSIGN, loadv2i64, 0>, VEX_4V;
defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
loadv2i64, i128mem,
SSE_PSHUFB, 0>, VEX_4V;
@@ -5700,15 +5608,12 @@ let isCommutable = 0 in {
defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
loadv4i64, i256mem,
SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
- defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
- loadv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
- defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
- loadv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
- defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
- loadv4i64, i256mem,
- SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
+ defm VPSIGNBY : SS3I_binop_rm_int_y<0x08, "vpsignb", int_x86_avx2_psign_b,
+ WriteVecALU>, VEX_4V, VEX_L;
+ defm VPSIGNWY : SS3I_binop_rm_int_y<0x09, "vpsignw", int_x86_avx2_psign_w,
+ WriteVecALU>, VEX_4V, VEX_L;
+ defm VPSIGNDY : SS3I_binop_rm_int_y<0x0A, "vpsignd", int_x86_avx2_psign_d,
+ WriteVecALU>, VEX_4V, VEX_L;
defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
loadv4i64, i256mem,
SSE_PSHUFB, 0>, VEX_4V, VEX_L;
@@ -5738,12 +5643,12 @@ let isCommutable = 0 in {
memopv2i64, i128mem, SSE_PHADDSUBW>;
defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
memopv2i64, i128mem, SSE_PHADDSUBD>;
- defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
- memopv2i64, i128mem, SSE_PSIGN>;
- defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
- memopv2i64, i128mem, SSE_PSIGN>;
- defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
- memopv2i64, i128mem, SSE_PSIGN>;
+ defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", int_x86_ssse3_psign_b_128,
+ SSE_PSIGN, memopv2i64>;
+ defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", int_x86_ssse3_psign_w_128,
+ SSE_PSIGN, memopv2i64>;
+ defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", int_x86_ssse3_psign_d_128,
+ SSE_PSIGN, memopv2i64>;
defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
memopv2i64, i128mem, SSE_PSHUFB>;
defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
@@ -5767,7 +5672,7 @@ defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
let hasSideEffects = 0 in {
- def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
+ def rri : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5775,7 +5680,7 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
[], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
let mayLoad = 1 in
- def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
+ def rmi : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, u8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
@@ -5787,13 +5692,13 @@ multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
let hasSideEffects = 0 in {
- def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
+ def Yrri : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, u8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, Sched<[WriteShuffle]>;
let mayLoad = 1 in
- def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
+ def Yrmi : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2, u8imm:$src3),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
@@ -5802,43 +5707,43 @@ multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
}
let Predicates = [HasAVX] in
- defm VPALIGN : ssse3_palignr<"vpalignr", 0>, VEX_4V;
+ defm VPALIGNR : ssse3_palignr<"vpalignr", 0>, VEX_4V;
let Predicates = [HasAVX2] in
- defm VPALIGN : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
+ defm VPALIGNR : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
- defm PALIGN : ssse3_palignr<"palignr">;
+ defm PALIGNR : ssse3_palignr<"palignr">;
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
def : Pat<(v8i32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
+ (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v8f32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
+ (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v16i16 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
+ (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
def : Pat<(v32i8 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPALIGNR256rr VR256:$src1, VR256:$src2, imm:$imm)>;
+ (VPALIGNRYrri VR256:$src1, VR256:$src2, imm:$imm)>;
}
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VPALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (VPALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
}
let Predicates = [UseSSSE3] in {
def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src1, VR128:$src2, imm:$imm)>;
+ (PALIGNRrri VR128:$src1, VR128:$src2, imm:$imm)>;
}
//===---------------------------------------------------------------------===//
@@ -5855,6 +5760,7 @@ def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
let Uses = [EAX, ECX, EDX] in
def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
TB, Requires<[HasSSE3]>;
+
let Uses = [ECX, EAX] in
def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
[(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
@@ -5890,45 +5796,48 @@ multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
X86MemOperand MemOp, X86MemOperand MemYOp,
OpndItins SSEItins, OpndItins AVXItins,
- OpndItins AVX2Itins> {
+ OpndItins AVX2Itins, Predicate prd> {
defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
- let Predicates = [HasAVX, NoVLX] in
+ let Predicates = [HasAVX, prd] in
defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
VR128, VR128, AVXItins>, VEX;
- let Predicates = [HasAVX2, NoVLX] in
+ let Predicates = [HasAVX2, prd] in
defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
VR256, VR128, AVX2Itins>, VEX, VEX_L;
}
-multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr,
- X86MemOperand MemOp, X86MemOperand MemYOp> {
+multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
+ X86MemOperand MemYOp, Predicate prd> {
defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
MemOp, MemYOp,
SSE_INTALU_ITINS_SHUFF_P,
DEFAULT_ITINS_SHUFFLESCHED,
- DEFAULT_ITINS_SHUFFLESCHED>;
+ DEFAULT_ITINS_SHUFFLESCHED, prd>;
defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
!strconcat("pmovzx", OpcodeStr),
MemOp, MemYOp,
SSE_INTALU_ITINS_SHUFF_P,
DEFAULT_ITINS_SHUFFLESCHED,
- DEFAULT_ITINS_SHUFFLESCHED>;
+ DEFAULT_ITINS_SHUFFLESCHED, prd>;
}
-defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem>;
-defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem>;
-defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem>;
+defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem, NoVLX_Or_NoBWI>;
+defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem, NoVLX>;
+defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem, NoVLX>;
-defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem>;
-defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
+defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem, NoVLX>;
+defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem, NoVLX>;
-defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
+defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem, NoVLX>;
// AVX2 Patterns
multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
// Register-Register patterns
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
@@ -5941,26 +5850,14 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtO
def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
(!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
-
- // On AVX2, we also support 256bit inputs.
- def : Pat<(v16i16 (ExtOp (v32i8 VR256:$src))),
- (!cast<I>(OpcPrefix#BWYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v8i32 (ExtOp (v32i8 VR256:$src))),
- (!cast<I>(OpcPrefix#BDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v4i64 (ExtOp (v32i8 VR256:$src))),
- (!cast<I>(OpcPrefix#BQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v8i32 (ExtOp (v16i16 VR256:$src))),
- (!cast<I>(OpcPrefix#WDYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
- def : Pat<(v4i64 (ExtOp (v16i16 VR256:$src))),
- (!cast<I>(OpcPrefix#WQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
-
- def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))),
- (!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
+ }
// Simple Register-Memory patterns
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
@@ -5973,8 +5870,10 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtO
def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
(!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
+ }
// AVX2 Register-Memory patterns
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
@@ -5983,7 +5882,8 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtO
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
-
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
(!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
@@ -6028,18 +5928,20 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtO
(!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
+ }
}
-let Predicates = [HasAVX2, NoVLX] in {
- defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
- defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
-}
+defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
+defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
// SSE4.1/AVX patterns.
multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
SDNode ExtOp, PatFrag ExtLoad16> {
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
@@ -6052,9 +5954,12 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
(!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
-
+ }
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BDrm) addr:$src)>;
def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
@@ -6067,7 +5972,8 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
(!cast<I>(OpcPrefix#DQrm) addr:$src)>;
-
+ }
+ let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
@@ -6078,7 +5984,8 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
-
+ }
+ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
(!cast<I>(OpcPrefix#BDrm) addr:$src)>;
def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
@@ -6127,12 +6034,11 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
(!cast<I>(OpcPrefix#DQrm) addr:$src)>;
def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#DQrm) addr:$src)>;
+ }
}
-let Predicates = [HasAVX, NoVLX] in {
- defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
- defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
-}
+defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
+defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
let Predicates = [UseSSE41] in {
defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
@@ -6859,63 +6765,67 @@ multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
let Predicates = [HasAVX, NoVLX] in {
- defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
- loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
- VEX_4V;
defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", smin, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", umin, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
+ defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
+ defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
+ defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
+ VR128, loadv2i64, i128mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+}
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
+ defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
+ defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
+ loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V;
+ defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v8i16, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V;
- defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
- VR128, loadv2i64, i128mem,
- SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
}
let Predicates = [HasAVX2, NoVLX] in {
- defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
- loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
- VEX_4V, VEX_L;
defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", smin, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", umin, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
+ defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
+ defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
+ defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
+ VR256, loadv4i64, i256mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
+}
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
+ defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
+ defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
+ loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
+ VEX_4V, VEX_L;
+ defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v16i16, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L;
- defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
- VR256, loadv4i64, i256mem,
- SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
}
let Constraints = "$src1 = $dst" in {
@@ -7238,14 +7148,12 @@ let Predicates = [UseAVX] in {
// on targets where they have equal performance. These were changed to use
// blends because blends have better throughput on SandyBridge and Haswell, but
// movs[s/d] are 1-2 byte shorter instructions.
-let Predicates = [UseSSE41] in {
+let Predicates = [UseSSE41], AddedComplexity = 15 in {
// With SSE41 we can use blends for these patterns.
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
(BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
(PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
- def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
- (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;
}
@@ -7316,13 +7224,14 @@ let Predicates = [UseSSE41] in {
(BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
}
+let AddedComplexity = 400 in { // Prefer non-temporal versions
let SchedRW = [WriteLoad] in {
-let Predicates = [HasAVX] in
+let Predicates = [HasAVX, NoVLX] in
def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
VEX;
-let Predicates = [HasAVX2] in
+let Predicates = [HasAVX2, NoVLX] in
def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
@@ -7332,6 +7241,35 @@ def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
} // SchedRW
+let Predicates = [HasAVX2, NoVLX] in {
+ def : Pat<(v8f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAYrm addr:$src)>;
+ def : Pat<(v4f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAYrm addr:$src)>;
+ def : Pat<(v4i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQAYrm addr:$src)>;
+}
+
+let Predicates = [HasAVX, NoVLX] in {
+ def : Pat<(v4f32 (alignednontemporalload addr:$src)),
+ (VMOVNTDQArm addr:$src)>;
+ def : Pat<(v2f64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQArm addr:$src)>;
+ def : Pat<(v2i64 (alignednontemporalload addr:$src)),
+ (VMOVNTDQArm addr:$src)>;
+}
+
+let Predicates = [UseSSE41] in {
+ def : Pat<(v4f32 (alignednontemporalload addr:$src)),
+ (MOVNTDQArm addr:$src)>;
+ def : Pat<(v2f64 (alignednontemporalload addr:$src)),
+ (MOVNTDQArm addr:$src)>;
+ def : Pat<(v2i64 (alignednontemporalload addr:$src)),
+ (MOVNTDQArm addr:$src)>;
+}
+
+} // AddedComplexity
+
//===----------------------------------------------------------------------===//
// SSE4.2 - Compare Instructions
//===----------------------------------------------------------------------===//
@@ -7815,14 +7753,24 @@ def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
VR128:$mask))]>, XD;
}
+// Non-temporal (unaligned) scalar stores.
+let AddedComplexity = 400 in { // Prefer non-temporal versions
+let mayStore = 1, SchedRW = [WriteStore] in {
def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
- "movntss\t{$src, $dst|$dst, $src}",
- [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
+ "movntss\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVNT>, XS;
def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movntsd\t{$src, $dst|$dst, $src}",
- [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
-}
+ "movntsd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVNT>, XD;
+} // SchedRW
+
+def : Pat<(nontemporalstore FR32:$src, addr:$dst),
+ (MOVNTSS addr:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
+
+def : Pat<(nontemporalstore FR64:$src, addr:$dst),
+ (MOVNTSD addr:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
+
+} // AddedComplexity
+} // HasSSE4A
//===----------------------------------------------------------------------===//
// AVX Instructions
@@ -7848,24 +7796,24 @@ class avx2_broadcast_rr<bits<8> opc, string OpcodeStr, RegisterClass RC,
[(set RC:$dst, (ResVT (X86VBroadcast (OpVT VR128:$src))))]>,
Sched<[Sched]>, VEX;
-let ExeDomain = SSEPackedSingle in {
+let ExeDomain = SSEPackedSingle, Predicates = [HasAVX, NoVLX] in {
def VBROADCASTSSrm : avx_broadcast_rm<0x18, "vbroadcastss", VR128,
f32mem, v4f32, loadf32, WriteLoad>;
def VBROADCASTSSYrm : avx_broadcast_rm<0x18, "vbroadcastss", VR256,
f32mem, v8f32, loadf32,
WriteFShuffleLd>, VEX_L;
}
-let ExeDomain = SSEPackedDouble in
+let ExeDomain = SSEPackedDouble, Predicates = [HasAVX, NoVLX] in
def VBROADCASTSDYrm : avx_broadcast_rm<0x19, "vbroadcastsd", VR256, f64mem,
v4f64, loadf64, WriteFShuffleLd>, VEX_L;
-let ExeDomain = SSEPackedSingle in {
+let ExeDomain = SSEPackedSingle, Predicates = [HasAVX2, NoVLX] in {
def VBROADCASTSSrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR128,
v4f32, v4f32, WriteFShuffle>;
def VBROADCASTSSYrr : avx2_broadcast_rr<0x18, "vbroadcastss", VR256,
v8f32, v4f32, WriteFShuffle256>, VEX_L;
}
-let ExeDomain = SSEPackedDouble in
+let ExeDomain = SSEPackedDouble, Predicates = [HasAVX2, NoVLX] in
def VBROADCASTSDYrr : avx2_broadcast_rr<0x19, "vbroadcastsd", VR256,
v4f64, v2f64, WriteFShuffle256>, VEX_L;
@@ -7977,7 +7925,7 @@ def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
}
// AVX1 patterns
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX, NoVLX] in {
def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v4f32 (VEXTRACTF128rr
(v8f32 VR256:$src1),
@@ -8015,20 +7963,20 @@ def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v32i8 VR256:$src1),
(EXTRACT_get_vextract128_imm VR128:$ext)))>;
-def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
(EXTRACT_get_vextract128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
(EXTRACT_get_vextract128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
(EXTRACT_get_vextract128_imm VR128:$ext))>;
-def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
- (iPTR imm))), addr:$dst),
+def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
+ (iPTR imm))), addr:$dst),
(VEXTRACTF128mr addr:$dst, VR256:$src1,
(EXTRACT_get_vextract128_imm VR128:$ext))>;
}
@@ -8078,45 +8026,45 @@ defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
RegisterClass RC, X86MemOperand x86memop_f,
X86MemOperand x86memop_i, PatFrag i_frag,
- Intrinsic IntVar, ValueType vt> {
- def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V,
- Sched<[WriteFShuffle]>;
- def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop_i:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (IntVar RC:$src1,
- (bitconvert (i_frag addr:$src2))))]>, VEX_4V,
- Sched<[WriteFShuffleLd, ReadAfterLd]>;
-
+ ValueType f_vt, ValueType i_vt> {
let Predicates = [HasAVX, NoVLX] in {
+ def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1, (i_vt RC:$src2))))]>, VEX_4V,
+ Sched<[WriteFShuffle]>;
+ def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop_i:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (f_vt (X86VPermilpv RC:$src1,
+ (i_vt (bitconvert (i_frag addr:$src2))))))]>, VEX_4V,
+ Sched<[WriteFShuffleLd, ReadAfterLd]>;
+
def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
+ [(set RC:$dst, (f_vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
Sched<[WriteFShuffle]>;
def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
(ins x86memop_f:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
- (vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
+ (f_vt (X86VPermilpi (load addr:$src1), (i8 imm:$src2))))]>, VEX,
Sched<[WriteFShuffleLd]>;
}// Predicates = [HasAVX, NoVLX]
}
let ExeDomain = SSEPackedSingle in {
defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
- loadv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
+ loadv2i64, v4f32, v4i32>;
defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
- loadv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
+ loadv4i64, v8f32, v8i32>, VEX_L;
}
let ExeDomain = SSEPackedDouble in {
defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
- loadv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
+ loadv2i64, v2f64, v2i64>;
defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
- loadv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
+ loadv4i64, v4f64, v4i64>, VEX_L;
}
let Predicates = [HasAVX, NoVLX] in {
@@ -8158,6 +8106,7 @@ def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
// VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
//
let ExeDomain = SSEPackedSingle in {
+let isCommutable = 1 in
def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, u8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -8276,9 +8225,14 @@ let Predicates = [HasF16C] in {
// Patterns for matching conversions from float to half-float and vice versa.
let Predicates = [HasF16C] in {
+ // Use MXCSR.RC for rounding instead of explicitly specifying the default
+ // rounding mode (Nearest-Even, encoded as 0). Both are equivalent in the
+ // configurations we support (the default). However, falling back to MXCSR is
+ // more consistent with other instructions, which are always controlled by it.
+ // It's encoded as 0b100.
def : Pat<(fp_to_f16 FR32:$src),
(i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
- (COPY_TO_REGCLASS FR32:$src, VR128), 0)), sub_16bit))>;
+ (COPY_TO_REGCLASS FR32:$src, VR128), 4)), sub_16bit))>;
def : Pat<(f16_to_fp GR16:$src),
(f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
@@ -8286,7 +8240,7 @@ let Predicates = [HasF16C] in {
def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
(f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
- (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 0)), FR32)) >;
+ (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 4)), FR32)) >;
}
//===----------------------------------------------------------------------===//
@@ -8387,49 +8341,54 @@ let Predicates = [HasAVX2] in {
def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
(VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
sub_xmm)))>;
+}
+let Predicates = [HasAVX2, NoVLX] in {
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
- let AddedComplexity = 20 in {
+ let AddedComplexity = 20 in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
(VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
(VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
(VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+ }
+}
+
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI], AddedComplexity = 20 in {
+ def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
+ (VPBROADCASTBrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
+ VR128))>;
+ def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
+ (VPBROADCASTBYrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
+ VR128))>;
+
+ def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
+ (VPBROADCASTWrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
+ VR128))>;
+ def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
+ (VPBROADCASTWYrr (COPY_TO_REGCLASS
+ (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
+ VR128))>;
+}
+let Predicates = [HasAVX2, NoVLX], AddedComplexity = 20 in {
+ def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
+ (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
+ def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
+ (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
+ def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
+ (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
- def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
- (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
- def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
- (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
- def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
- (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
-
- def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
- (VPBROADCASTBrr (COPY_TO_REGCLASS
- (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
- VR128))>;
- def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
- (VPBROADCASTBYrr (COPY_TO_REGCLASS
- (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
- VR128))>;
-
- def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
- (VPBROADCASTWrr (COPY_TO_REGCLASS
- (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
- VR128))>;
- def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
- (VPBROADCASTWYrr (COPY_TO_REGCLASS
- (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
- VR128))>;
-
- // The patterns for VPBROADCASTD are not needed because they would match
- // the exact same thing as VBROADCASTSS patterns.
-
- def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
- (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
- // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
- }
+ // The patterns for VPBROADCASTD are not needed because they would match
+ // the exact same thing as VBROADCASTSS patterns.
+
+ def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
+ (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+ // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
}
// AVX1 broadcast patterns
@@ -8442,11 +8401,15 @@ def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
(VBROADCASTSSrm addr:$src)>;
}
-let Predicates = [HasAVX] in {
// Provide fallback in case the load node that is used in the patterns above
// is used by additional users, which prevents the pattern selection.
- let AddedComplexity = 20 in {
+let Predicates = [HasAVX], AddedComplexity = 20 in {
// 128bit broadcasts:
+ def : Pat<(v2f64 (X86VBroadcast f64:$src)),
+ (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
+}
+
+let Predicates = [HasAVX, NoVLX], AddedComplexity = 20 in {
def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
(VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
@@ -8468,12 +8431,9 @@ let Predicates = [HasAVX] in {
(VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
(VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
(VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
- }
- def : Pat<(v2f64 (X86VBroadcast f64:$src)),
- (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
def : Pat<(v2i64 (X86VBroadcast i64:$src)),
- (VMOVDDUPrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
+ (VMOVDDUPrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
}
//===----------------------------------------------------------------------===//
@@ -8482,21 +8442,23 @@ let Predicates = [HasAVX] in {
multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
ValueType OpVT, X86FoldableSchedWrite Sched> {
- def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
- Sched<[Sched]>, VEX_4V, VEX_L;
- def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, i256mem:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (OpVT (X86VPermv VR256:$src1,
- (bitconvert (mem_frag addr:$src2)))))]>,
- Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
+ let Predicates = [HasAVX2, NoVLX] in {
+ def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
+ Sched<[Sched]>, VEX_4V, VEX_L;
+ def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OpVT (X86VPermv VR256:$src1,
+ (bitconvert (mem_frag addr:$src2)))))]>,
+ Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
+ }
}
defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32, WriteShuffle256>;
@@ -8505,21 +8467,23 @@ defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
ValueType OpVT, X86FoldableSchedWrite Sched> {
- def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, u8imm:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
- Sched<[Sched]>, VEX, VEX_L;
- def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
- (ins i256mem:$src1, u8imm:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst,
- (OpVT (X86VPermi (mem_frag addr:$src1),
- (i8 imm:$src2))))]>,
- Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
+ let Predicates = [HasAVX2, NoVLX] in {
+ def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, u8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
+ Sched<[Sched]>, VEX, VEX_L;
+ def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins i256mem:$src1, u8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (OpVT (X86VPermi (mem_frag addr:$src1),
+ (i8 imm:$src2))))]>,
+ Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
+ }
}
defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
@@ -8531,6 +8495,7 @@ defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
//===----------------------------------------------------------------------===//
// VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
//
+let isCommutable = 1 in
def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, u8imm:$src3),
"vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
@@ -8631,7 +8596,7 @@ def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
"vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
Sched<[WriteStore]>, VEX, VEX_L;
-let Predicates = [HasAVX2] in {
+let Predicates = [HasAVX2, NoVLX] in {
def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
(v2i64 (VEXTRACTI128rr
(v4i64 VR256:$src1),
@@ -8703,116 +8668,42 @@ defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
int_x86_avx2_maskstore_q,
int_x86_avx2_maskstore_q_256>, VEX_W;
-def: Pat<(X86mstore addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src)),
- (VMASKMOVPSYmr addr:$ptr, VR256:$mask, VR256:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src)),
- (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src)),
- (VMASKMOVPSmr addr:$ptr, VR128:$mask, VR128:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src)),
- (VPMASKMOVDmr addr:$ptr, VR128:$mask, VR128:$src)>;
-
-def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
- (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask),
- (bc_v8f32 (v8i32 immAllZerosV)))),
- (VMASKMOVPSYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src0))),
- (VBLENDVPSYrr VR256:$src0, (VMASKMOVPSYrm VR256:$mask, addr:$ptr),
- VR256:$mask)>;
-
-def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
- (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 immAllZerosV))),
- (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src0))),
- (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
- VR256:$mask)>;
-
-def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
- (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask),
- (bc_v4f32 (v4i32 immAllZerosV)))),
- (VMASKMOVPSrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v4f32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4f32 VR128:$src0))),
- (VBLENDVPSrr VR128:$src0, (VMASKMOVPSrm VR128:$mask, addr:$ptr),
- VR128:$mask)>;
-
-def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), undef)),
- (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 immAllZerosV))),
- (VPMASKMOVDrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v4i32 (masked_load addr:$ptr, (v4i32 VR128:$mask), (v4i32 VR128:$src0))),
- (VBLENDVPSrr VR128:$src0, (VPMASKMOVDrm VR128:$mask, addr:$ptr),
- VR128:$mask)>;
-
-def: Pat<(X86mstore addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src)),
- (VMASKMOVPDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src)),
- (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
-
-def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
- (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
- (v4f64 immAllZerosV))),
- (VMASKMOVPDYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src0))),
- (VBLENDVPDYrr VR256:$src0, (VMASKMOVPDYrm VR256:$mask, addr:$ptr),
- VR256:$mask)>;
-
-def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
- (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
- (bc_v4i64 (v8i32 immAllZerosV)))),
- (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
-
-def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src0))),
- (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
- VR256:$mask)>;
-
-def: Pat<(X86mstore addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src)),
- (VMASKMOVPDmr addr:$ptr, VR128:$mask, VR128:$src)>;
-
-def: Pat<(X86mstore addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src)),
- (VPMASKMOVQmr addr:$ptr, VR128:$mask, VR128:$src)>;
-
-def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
- (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
- (v2f64 immAllZerosV))),
- (VMASKMOVPDrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v2f64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2f64 VR128:$src0))),
- (VBLENDVPDrr VR128:$src0, (VMASKMOVPDrm VR128:$mask, addr:$ptr),
- VR128:$mask)>;
-
-def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), undef)),
- (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask),
- (bc_v2i64 (v4i32 immAllZerosV)))),
- (VPMASKMOVQrm VR128:$mask, addr:$ptr)>;
-
-def: Pat<(v2i64 (masked_load addr:$ptr, (v2i64 VR128:$mask), (v2i64 VR128:$src0))),
- (VBLENDVPDrr VR128:$src0, (VPMASKMOVQrm VR128:$mask, addr:$ptr),
- VR128:$mask)>;
-
+multiclass maskmov_lowering<string InstrStr, RegisterClass RC, ValueType VT,
+ ValueType MaskVT, string BlendStr, ValueType ZeroVT> {
+ // masked store
+ def: Pat<(X86mstore addr:$ptr, (MaskVT RC:$mask), (VT RC:$src)),
+ (!cast<Instruction>(InstrStr#"mr") addr:$ptr, RC:$mask, RC:$src)>;
+ // masked load
+ def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), undef)),
+ (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
+ def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask),
+ (VT (bitconvert (ZeroVT immAllZerosV))))),
+ (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr)>;
+ def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), (VT RC:$src0))),
+ (!cast<Instruction>(BlendStr#"rr")
+ RC:$src0,
+ (!cast<Instruction>(InstrStr#"rm") RC:$mask, addr:$ptr),
+ RC:$mask)>;
+}
+let Predicates = [HasAVX] in {
+ defm : maskmov_lowering<"VMASKMOVPS", VR128, v4f32, v4i32, "VBLENDVPS", v4i32>;
+ defm : maskmov_lowering<"VMASKMOVPD", VR128, v2f64, v2i64, "VBLENDVPD", v4i32>;
+ defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8f32, v8i32, "VBLENDVPSY", v8i32>;
+ defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4f64, v4i64, "VBLENDVPDY", v8i32>;
+}
+let Predicates = [HasAVX1Only] in {
+ // load/store i32/i64 not supported use ps/pd version
+ defm : maskmov_lowering<"VMASKMOVPSY", VR256, v8i32, v8i32, "VBLENDVPSY", v8i32>;
+ defm : maskmov_lowering<"VMASKMOVPDY", VR256, v4i64, v4i64, "VBLENDVPDY", v8i32>;
+ defm : maskmov_lowering<"VMASKMOVPS", VR128, v4i32, v4i32, "VBLENDVPS", v4i32>;
+ defm : maskmov_lowering<"VMASKMOVPD", VR128, v2i64, v2i64, "VBLENDVPD", v4i32>;
+}
+let Predicates = [HasAVX2] in {
+ defm : maskmov_lowering<"VPMASKMOVDY", VR256, v8i32, v8i32, "VBLENDVPSY", v8i32>;
+ defm : maskmov_lowering<"VPMASKMOVQY", VR256, v4i64, v4i64, "VBLENDVPDY", v8i32>;
+ defm : maskmov_lowering<"VPMASKMOVD", VR128, v4i32, v4i32, "VBLENDVPS", v4i32>;
+ defm : maskmov_lowering<"VPMASKMOVQ", VR128, v2i64, v2i64, "VBLENDVPD", v4i32>;
+}
//===----------------------------------------------------------------------===//
// Variable Bit Shifts
//
@@ -8852,6 +8743,8 @@ let Predicates = [HasAVX2, NoVLX] in {
defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
+ let isCodeGenOnly = 1 in
+ defm VPSRAVD_Int : avx2_var_shift<0x46, "vpsravd", X86vsrav, v4i32, v8i32>;
}
//===----------------------------------------------------------------------===//
// VGATHER - GATHER Operations
@@ -8869,22 +8762,22 @@ multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
[]>, VEX_4VOp3, VEX_L;
}
-let mayLoad = 1, Constraints
+let mayLoad = 1, hasSideEffects = 0, Constraints
= "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
in {
- defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
- defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
- defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
- defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
+ defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx128mem, vx256mem>, VEX_W;
+ defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx128mem, vy256mem>, VEX_W;
+ defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx128mem, vy256mem>;
+ defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx64mem, vy128mem>;
let ExeDomain = SSEPackedDouble in {
- defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
- defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
+ defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx128mem, vx256mem>, VEX_W;
+ defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx128mem, vy256mem>, VEX_W;
}
let ExeDomain = SSEPackedSingle in {
- defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
- defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
+ defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx128mem, vy256mem>;
+ defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx64mem, vy128mem>;
}
}
diff --git a/lib/Target/X86/X86InstrSystem.td b/lib/Target/X86/X86InstrSystem.td
index a97d1e5c86d0..6667bd2aec4a 100644
--- a/lib/Target/X86/X86InstrSystem.td
+++ b/lib/Target/X86/X86InstrSystem.td
@@ -174,11 +174,11 @@ def MOV32rs : I<0x8C, MRMDestReg, (outs GR32:$dst), (ins SEGMENT_REG:$src),
def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_REG_SR>;
-def MOV16ms : I<0x8C, MRMDestMem, (outs i16mem:$dst), (ins SEGMENT_REG:$src),
+def MOV16ms : I<0x8C, MRMDestMem, (outs), (ins i16mem:$dst, SEGMENT_REG:$src),
"mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>, OpSize16;
-def MOV32ms : I<0x8C, MRMDestMem, (outs i32mem:$dst), (ins SEGMENT_REG:$src),
+def MOV32ms : I<0x8C, MRMDestMem, (outs), (ins i32mem:$dst, SEGMENT_REG:$src),
"mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>, OpSize32;
-def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src),
+def MOV64ms : RI<0x8C, MRMDestMem, (outs), (ins i64mem:$dst, SEGMENT_REG:$src),
"mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV_MEM_SR>;
def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src),
@@ -248,7 +248,7 @@ def STR32r : I<0x00, MRM1r, (outs GR32:$dst), (ins),
"str{l}\t$dst", [], IIC_STR>, TB, OpSize32;
def STR64r : RI<0x00, MRM1r, (outs GR64:$dst), (ins),
"str{q}\t$dst", [], IIC_STR>, TB;
-def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins),
+def STRm : I<0x00, MRM1m, (outs), (ins i16mem:$dst),
"str{w}\t$dst", [], IIC_STR>, TB;
def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src),
@@ -339,9 +339,11 @@ def POPGS64 : I<0xa9, RawFrm, (outs), (ins),
def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "lds{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16;
+ "lds{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16,
+ Requires<[Not64BitMode]>;
def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "lds{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32;
+ "lds{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32,
+ Requires<[Not64BitMode]>;
def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"lss{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
@@ -351,9 +353,11 @@ def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src),
"lss{q}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB;
def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
- "les{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16;
+ "les{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize16,
+ Requires<[Not64BitMode]>;
def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src),
- "les{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32;
+ "les{l}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, OpSize32,
+ Requires<[Not64BitMode]>;
def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src),
"lfs{w}\t{$src, $dst|$dst, $src}", [], IIC_LXS>, TB, OpSize16;
@@ -385,21 +389,21 @@ def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg),
// Descriptor-table support instructions
let SchedRW = [WriteSystem] in {
-def SGDT16m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
+def SGDT16m : I<0x01, MRM0m, (outs), (ins opaque48mem:$dst),
"sgdt{w}\t$dst", [], IIC_SGDT>, TB, OpSize16, Requires<[Not64BitMode]>;
-def SGDT32m : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins),
+def SGDT32m : I<0x01, MRM0m, (outs), (ins opaque48mem:$dst),
"sgdt{l}\t$dst", [], IIC_SGDT>, OpSize32, TB, Requires <[Not64BitMode]>;
-def SGDT64m : I<0x01, MRM0m, (outs opaque80mem:$dst), (ins),
+def SGDT64m : I<0x01, MRM0m, (outs), (ins opaque80mem:$dst),
"sgdt{q}\t$dst", [], IIC_SGDT>, TB, Requires <[In64BitMode]>;
-def SIDT16m : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
+def SIDT16m : I<0x01, MRM1m, (outs), (ins opaque48mem:$dst),
"sidt{w}\t$dst", [], IIC_SIDT>, TB, OpSize16, Requires<[Not64BitMode]>;
-def SIDT32m : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins),
+def SIDT32m : I<0x01, MRM1m, (outs), (ins opaque48mem:$dst),
"sidt{l}\t$dst", []>, OpSize32, TB, Requires <[Not64BitMode]>;
-def SIDT64m : I<0x01, MRM1m, (outs opaque80mem:$dst), (ins),
+def SIDT64m : I<0x01, MRM1m, (outs), (ins opaque80mem:$dst),
"sidt{q}\t$dst", []>, TB, Requires <[In64BitMode]>;
def SLDT16r : I<0x00, MRM0r, (outs GR16:$dst), (ins),
"sldt{w}\t$dst", [], IIC_SLDT>, TB, OpSize16;
-def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins),
+def SLDT16m : I<0x00, MRM0m, (outs), (ins i16mem:$dst),
"sldt{w}\t$dst", [], IIC_SLDT>, TB;
def SLDT32r : I<0x00, MRM0r, (outs GR32:$dst), (ins),
"sldt{l}\t$dst", [], IIC_SLDT>, OpSize32, TB;
@@ -408,7 +412,7 @@ def SLDT32r : I<0x00, MRM0r, (outs GR32:$dst), (ins),
// extension.
def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins),
"sldt{q}\t$dst", [], IIC_SLDT>, TB;
-def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins),
+def SLDT64m : RI<0x00, MRM0m, (outs), (ins i16mem:$dst),
"sldt{q}\t$dst", [], IIC_SLDT>, TB;
def LGDT16m : I<0x01, MRM2m, (outs), (ins opaque48mem:$src),
@@ -450,7 +454,7 @@ def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins),
"smsw{q}\t$dst", [], IIC_SMSW>, TB;
// For memory operands, there is only a 16-bit form
-def SMSW16m : I<0x01, MRM4m, (outs i16mem:$dst), (ins),
+def SMSW16m : I<0x01, MRM4m, (outs), (ins i16mem:$dst),
"smsw{w}\t$dst", [], IIC_SMSW>, TB;
def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src),
@@ -558,7 +562,7 @@ let usesCustomInserter = 1 in {
[(set GR32:$dst, (int_x86_rdpkru))]>;
}
-let Defs = [EAX, EDX], Uses = [ECX] in
+let Defs = [EAX, EDX], Uses = [ECX] in
def RDPKRUr : I<0x01, MRM_EE, (outs), (ins), "rdpkru", []>, TB;
let Uses = [EAX, ECX, EDX] in
def WRPKRUr : I<0x01, MRM_EF, (outs), (ins), "wrpkru", []>, TB;
diff --git a/lib/Target/X86/X86InstrVMX.td b/lib/Target/X86/X86InstrVMX.td
index 79afe9a65409..2ea27a934b47 100644
--- a/lib/Target/X86/X86InstrVMX.td
+++ b/lib/Target/X86/X86InstrVMX.td
@@ -41,13 +41,13 @@ def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
def VMRESUME : I<0x01, MRM_C3, (outs), (ins), "vmresume", []>, TB;
def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
"vmptrld\t$vmcs", []>, PS;
-def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins),
+def VMPTRSTm : I<0xC7, MRM7m, (outs), (ins i64mem:$vmcs),
"vmptrst\t$vmcs", []>, TB;
-def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src),
+def VMREAD64rm : I<0x78, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
-def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src),
+def VMREAD32rm : I<0x78, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
diff --git a/lib/Target/X86/X86InstrXOP.td b/lib/Target/X86/X86InstrXOP.td
index 4cb2304e464d..f49917b80f36 100644
--- a/lib/Target/X86/X86InstrXOP.td
+++ b/lib/Target/X86/X86InstrXOP.td
@@ -222,123 +222,199 @@ let ExeDomain = SSEPackedInt in { // SSE integer instructions
defm VPCOMUQ : xopvpcom<0xEF, "uq", X86vpcomu, v2i64>;
}
-// Instruction where either second or third source can be memory
-multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
- def rr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, VR128:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, VR128:$src3))]>,
- XOP_4V, VEX_I8IMM;
- def rm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i128mem:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst,
- (Int VR128:$src1, VR128:$src2,
- (bitconvert (loadv2i64 addr:$src3))))]>,
- XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
- def mr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, VR128:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR128:$dst,
- (Int VR128:$src1, (bitconvert (loadv2i64 addr:$src2)),
- VR128:$src3))]>,
- XOP_4V, VEX_I8IMM;
+multiclass xop4op<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType vt128> {
+ def rrr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
+ (vt128 VR128:$src3))))]>,
+ XOP_4V, VEX_I8IMM;
+ def rrm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
+ (vt128 (bitconvert (loadv2i64 addr:$src3))))))]>,
+ XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
+ def rmr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (v16i8 (OpNode (vt128 VR128:$src1), (vt128 (bitconvert (loadv2i64 addr:$src2))),
+ (vt128 VR128:$src3))))]>,
+ XOP_4V, VEX_I8IMM;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rrr_REV : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
}
let ExeDomain = SSEPackedInt in {
- defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>;
- defm VPCMOV : xop4op<0xA2, "vpcmov", int_x86_xop_vpcmov>;
+ defm VPPERM : xop4op<0xA3, "vpperm", X86vpperm, v16i8>;
}
-multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
- def rrY : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, VR256:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR256:$dst, (Int VR256:$src1, VR256:$src2, VR256:$src3))]>,
- XOP_4V, VEX_I8IMM, VEX_L;
- def rmY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, i256mem:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR256:$dst,
- (Int VR256:$src1, VR256:$src2,
- (bitconvert (loadv4i64 addr:$src3))))]>,
- XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
- def mrY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, f256mem:$src2, VR256:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- [(set VR256:$dst,
- (Int VR256:$src1, (bitconvert (loadv4i64 addr:$src2)),
- VR256:$src3))]>,
- XOP_4V, VEX_I8IMM, VEX_L;
+// Instruction where either second or third source can be memory
+multiclass xop4op_int<bits<8> opc, string OpcodeStr,
+ Intrinsic Int128, Intrinsic Int256> {
+ // 128-bit Instruction
+ def rrr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst, (Int128 VR128:$src1, VR128:$src2, VR128:$src3))]>,
+ XOP_4V, VEX_I8IMM;
+ def rrm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, VR128:$src2,
+ (bitconvert (loadv2i64 addr:$src3))))]>,
+ XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
+ def rmr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, (bitconvert (loadv2i64 addr:$src2)),
+ VR128:$src3))]>,
+ XOP_4V, VEX_I8IMM;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rrr_REV : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, XOP_4V, VEX_I8IMM, VEX_W, MemOp4;
+
+ // 256-bit Instruction
+ def rrrY : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst, (Int256 VR256:$src1, VR256:$src2, VR256:$src3))]>,
+ XOP_4V, VEX_I8IMM, VEX_L;
+ def rrmY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, i256mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, VR256:$src2,
+ (bitconvert (loadv4i64 addr:$src3))))]>,
+ XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
+ def rmrY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2)),
+ VR256:$src3))]>,
+ XOP_4V, VEX_I8IMM, VEX_L;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rrrY_REV : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L;
}
-let ExeDomain = SSEPackedInt in
- defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>;
+let ExeDomain = SSEPackedInt in {
+ defm VPCMOV : xop4op_int<0xA2, "vpcmov",
+ int_x86_xop_vpcmov, int_x86_xop_vpcmov_256>;
+}
let Predicates = [HasXOP] in {
def : Pat<(v2i64 (or (and VR128:$src3, VR128:$src1),
(X86andnp VR128:$src3, VR128:$src2))),
- (VPCMOVrr VR128:$src1, VR128:$src2, VR128:$src3)>;
+ (VPCMOVrrr VR128:$src1, VR128:$src2, VR128:$src3)>;
def : Pat<(v4i64 (or (and VR256:$src3, VR256:$src1),
(X86andnp VR256:$src3, VR256:$src2))),
- (VPCMOVrrY VR256:$src1, VR256:$src2, VR256:$src3)>;
+ (VPCMOVrrrY VR256:$src1, VR256:$src2, VR256:$src3)>;
}
-multiclass xop5op<bits<8> opc, string OpcodeStr, Intrinsic Int128,
- Intrinsic Int256, PatFrag ld_128, PatFrag ld_256> {
+multiclass xop5op<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType vt128, ValueType vt256,
+ ValueType id128, ValueType id256,
+ PatFrag ld_128, PatFrag ld_256> {
def rr : IXOP5<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, VR128:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR128:$dst,
- (Int128 VR128:$src1, VR128:$src2, VR128:$src3, imm:$src4))]>;
+ (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
+ (id128 VR128:$src3), (i8 imm:$src4))))]>;
def rm : IXOP5<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, f128mem:$src3, u8imm:$src4),
+ (ins VR128:$src1, VR128:$src2, i128mem:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR128:$dst,
- (Int128 VR128:$src1, VR128:$src2, (ld_128 addr:$src3), imm:$src4))]>,
+ (vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
+ (id128 (bitconvert (loadv2i64 addr:$src3))),
+ (i8 imm:$src4))))]>,
VEX_W, MemOp4;
def mr : IXOP5<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2, VR128:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR128:$dst,
- (Int128 VR128:$src1, (ld_128 addr:$src2), VR128:$src3, imm:$src4))]>;
+ (vt128 (OpNode (vt128 VR128:$src1),
+ (vt128 (bitconvert (ld_128 addr:$src2))),
+ (id128 VR128:$src3), (i8 imm:$src4))))]>;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rr_REV : IXOP5<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3, u8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ []>, VEX_W, MemOp4;
+
def rrY : IXOP5<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, VR256:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, VR256:$src2, VR256:$src3, imm:$src4))]>, VEX_L;
+ (vt256 (OpNode (vt256 VR256:$src1), (vt256 VR256:$src2),
+ (id256 VR256:$src3), (i8 imm:$src4))))]>, VEX_L;
def rmY : IXOP5<opc, MRMSrcMem, (outs VR256:$dst),
- (ins VR256:$src1, VR256:$src2, f256mem:$src3, u8imm:$src4),
+ (ins VR256:$src1, VR256:$src2, i256mem:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, VR256:$src2, (ld_256 addr:$src3), imm:$src4))]>,
- VEX_W, MemOp4, VEX_L;
+ (vt256 (OpNode (vt256 VR256:$src1), (vt256 VR256:$src2),
+ (id256 (bitconvert (loadv4i64 addr:$src3))),
+ (i8 imm:$src4))))]>, VEX_W, MemOp4, VEX_L;
def mrY : IXOP5<opc, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, VR256:$src3, u8imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
[(set VR256:$dst,
- (Int256 VR256:$src1, (ld_256 addr:$src2), VR256:$src3, imm:$src4))]>,
- VEX_L;
+ (vt256 (OpNode (vt256 VR256:$src1),
+ (vt256 (bitconvert (ld_256 addr:$src2))),
+ (id256 VR256:$src3), (i8 imm:$src4))))]>, VEX_L;
+ // For disassembler
+ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
+ def rrY_REV : IXOP5<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3, u8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ []>, VEX_W, MemOp4, VEX_L;
}
let ExeDomain = SSEPackedDouble in
- defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", int_x86_xop_vpermil2pd,
- int_x86_xop_vpermil2pd_256, loadv2f64, loadv4f64>;
+ defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", X86vpermil2, v2f64, v4f64,
+ v2i64, v4i64, loadv2f64, loadv4f64>;
let ExeDomain = SSEPackedSingle in
- defm VPERMIL2PS : xop5op<0x48, "vpermil2ps", int_x86_xop_vpermil2ps,
- int_x86_xop_vpermil2ps_256, loadv4f32, loadv8f32>;
+ defm VPERMIL2PS : xop5op<0x48, "vpermil2ps", X86vpermil2, v4f32, v8f32,
+ v4i32, v8i32, loadv4f32, loadv8f32>;
diff --git a/lib/Target/X86/X86IntrinsicsInfo.h b/lib/Target/X86/X86IntrinsicsInfo.h
index b525d5eb60a7..b647d11e3866 100644
--- a/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/lib/Target/X86/X86IntrinsicsInfo.h
@@ -14,31 +14,36 @@
#ifndef LLVM_LIB_TARGET_X86_X86INTRINSICSINFO_H
#define LLVM_LIB_TARGET_X86_X86INTRINSICSINFO_H
+#include "X86ISelLowering.h"
+#include "X86InstrInfo.h"
+
namespace llvm {
-enum IntrinsicType {
+enum IntrinsicType : uint16_t {
INTR_NO_TYPE,
GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, ADX, FPCLASS, FPCLASSS,
INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_2OP_IMM8, INTR_TYPE_3OP, INTR_TYPE_4OP,
- CMP_MASK, CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, VSHIFT_MASK, COMI, COMI_RM,
+ CMP_MASK, CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, COMI, COMI_RM,
INTR_TYPE_1OP_MASK, INTR_TYPE_1OP_MASK_RM,
INTR_TYPE_2OP_MASK, INTR_TYPE_2OP_MASK_RM, INTR_TYPE_2OP_IMM8_MASK,
INTR_TYPE_3OP_MASK, INTR_TYPE_3OP_MASK_RM, INTR_TYPE_3OP_IMM8_MASK,
- FMA_OP_MASK, FMA_OP_MASKZ, FMA_OP_MASK3, VPERM_3OP_MASK,
- VPERM_3OP_MASKZ, INTR_TYPE_SCALAR_MASK,
+ FMA_OP_MASK, FMA_OP_MASKZ, FMA_OP_MASK3,
+ FMA_OP_SCALAR_MASK, FMA_OP_SCALAR_MASKZ, FMA_OP_SCALAR_MASK3,
+ VPERM_2OP_MASK, VPERM_3OP_MASK, VPERM_3OP_MASKZ, INTR_TYPE_SCALAR_MASK,
INTR_TYPE_SCALAR_MASK_RM, INTR_TYPE_3OP_SCALAR_MASK_RM,
- COMPRESS_EXPAND_IN_REG, COMPRESS_TO_MEM, BRCST_SUBVEC_TO_VEC,
+ COMPRESS_EXPAND_IN_REG, COMPRESS_TO_MEM, BRCST_SUBVEC_TO_VEC, BRCST32x2_TO_VEC,
TRUNCATE_TO_MEM_VI8, TRUNCATE_TO_MEM_VI16, TRUNCATE_TO_MEM_VI32,
- EXPAND_FROM_MEM, LOADA, LOADU, BLEND, INSERT_SUBVEC,
- TERLOG_OP_MASK, TERLOG_OP_MASKZ, BROADCASTM, KUNPCK, CONVERT_MASK_TO_VEC, CONVERT_TO_MASK
+ EXPAND_FROM_MEM, INSERT_SUBVEC,
+ TERLOG_OP_MASK, TERLOG_OP_MASKZ, BROADCASTM, KUNPCK, FIXUPIMM, FIXUPIMM_MASKZ, FIXUPIMMS,
+ FIXUPIMMS_MASKZ, CONVERT_MASK_TO_VEC, CONVERT_TO_MASK
};
struct IntrinsicData {
- unsigned Id;
+ uint16_t Id;
IntrinsicType Type;
- unsigned Opc0;
- unsigned Opc1;
+ uint16_t Opc0;
+ uint16_t Opc1;
bool operator<(const IntrinsicData &RHS) const {
return Id < RHS.Id;
@@ -61,6 +66,14 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(addcarryx_u32, ADX, X86ISD::ADC, 0),
X86_INTRINSIC_DATA(addcarryx_u64, ADX, X86ISD::ADC, 0),
+ X86_INTRINSIC_DATA(avx512_gather_dpd_512, GATHER, X86::VGATHERDPDZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_dpi_512, GATHER, X86::VPGATHERDDZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_dpq_512, GATHER, X86::VPGATHERDQZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_dps_512, GATHER, X86::VGATHERDPSZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_qpd_512, GATHER, X86::VGATHERQPDZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_qpi_512, GATHER, X86::VPGATHERQDZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_qpq_512, GATHER, X86::VPGATHERQQZrm, 0),
+ X86_INTRINSIC_DATA(avx512_gather_qps_512, GATHER, X86::VGATHERQPSZrm, 0),
X86_INTRINSIC_DATA(avx512_gather3div2_df, GATHER, X86::VGATHERQPDZ128rm, 0),
X86_INTRINSIC_DATA(avx512_gather3div2_di, GATHER, X86::VPGATHERQQZ128rm, 0),
X86_INTRINSIC_DATA(avx512_gather3div4_df, GATHER, X86::VGATHERQPDZ256rm, 0),
@@ -77,14 +90,6 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_gather3siv4_si, GATHER, X86::VPGATHERDDZ128rm, 0),
X86_INTRINSIC_DATA(avx512_gather3siv8_sf, GATHER, X86::VGATHERDPSZ256rm, 0),
X86_INTRINSIC_DATA(avx512_gather3siv8_si, GATHER, X86::VPGATHERDDZ256rm, 0),
- X86_INTRINSIC_DATA(avx512_gather_dpd_512, GATHER, X86::VGATHERDPDZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_dpi_512, GATHER, X86::VPGATHERDDZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_dpq_512, GATHER, X86::VPGATHERDQZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_dps_512, GATHER, X86::VGATHERDPSZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_qpd_512, GATHER, X86::VGATHERQPDZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_qpi_512, GATHER, X86::VPGATHERQDZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_qpq_512, GATHER, X86::VPGATHERQQZrm, 0),
- X86_INTRINSIC_DATA(avx512_gather_qps_512, GATHER, X86::VGATHERQPSZrm, 0),
X86_INTRINSIC_DATA(avx512_gatherpf_dpd_512, PREFETCH,
X86::VGATHERPF0DPDm, X86::VGATHERPF1DPDm),
@@ -143,18 +148,6 @@ static const IntrinsicData IntrinsicsWithChain[] = {
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_q_512,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_pd_128, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_pd_256, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_pd_512, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_ps_128, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_ps_256, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_load_ps_512, LOADA, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_pd_128, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_pd_256, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_pd_512, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_ps_128, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_ps_256, LOADU, ISD::DELETED_NODE, 0),
- X86_INTRINSIC_DATA(avx512_mask_loadu_ps_512, LOADU, ISD::DELETED_NODE, 0),
X86_INTRINSIC_DATA(avx512_mask_pmov_db_mem_128, TRUNCATE_TO_MEM_VI8,
X86ISD::VTRUNC, 0),
X86_INTRINSIC_DATA(avx512_mask_pmov_db_mem_256, TRUNCATE_TO_MEM_VI8,
@@ -223,7 +216,6 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_scattersiv4_si, SCATTER, X86::VPSCATTERDDZ128mr, 0),
X86_INTRINSIC_DATA(avx512_scattersiv8_sf, SCATTER, X86::VSCATTERDPSZ256mr, 0),
X86_INTRINSIC_DATA(avx512_scattersiv8_si, SCATTER, X86::VPSCATTERDDZ256mr, 0),
-
X86_INTRINSIC_DATA(rdpmc, RDPMC, X86ISD::RDPMC_DAG, 0),
X86_INTRINSIC_DATA(rdrand_16, RDRAND, X86ISD::RDRAND, 0),
X86_INTRINSIC_DATA(rdrand_32, RDRAND, X86ISD::RDRAND, 0),
@@ -242,7 +234,7 @@ static const IntrinsicData IntrinsicsWithChain[] = {
/*
* Find Intrinsic data by intrinsic ID
*/
-static const IntrinsicData* getIntrinsicWithChain(unsigned IntNo) {
+static const IntrinsicData* getIntrinsicWithChain(uint16_t IntNo) {
IntrinsicData IntrinsicToFind = {IntNo, INTR_NO_TYPE, 0, 0 };
const IntrinsicData *Data = std::lower_bound(std::begin(IntrinsicsWithChain),
@@ -258,49 +250,51 @@ static const IntrinsicData* getIntrinsicWithChain(unsigned IntNo) {
* the alphabetical order.
*/
static const IntrinsicData IntrinsicsWithoutChain[] = {
+ X86_INTRINSIC_DATA(avx_hadd_pd_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
+ X86_INTRINSIC_DATA(avx_hadd_ps_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
+ X86_INTRINSIC_DATA(avx_hsub_pd_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
+ X86_INTRINSIC_DATA(avx_hsub_ps_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
+ X86_INTRINSIC_DATA(avx_max_pd_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(avx_max_ps_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(avx_min_pd_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(avx_min_ps_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(avx_movmsk_pd_256, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
+ X86_INTRINSIC_DATA(avx_movmsk_ps_256, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
+ X86_INTRINSIC_DATA(avx_rcp_ps_256, INTR_TYPE_1OP, X86ISD::FRCP, 0),
+ X86_INTRINSIC_DATA(avx_rsqrt_ps_256, INTR_TYPE_1OP, X86ISD::FRSQRT, 0),
+ X86_INTRINSIC_DATA(avx_sqrt_pd_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
+ X86_INTRINSIC_DATA(avx_sqrt_ps_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
+ X86_INTRINSIC_DATA(avx_vperm2f128_pd_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx_vperm2f128_ps_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx_vperm2f128_si_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx_vpermilvar_pd, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
+ X86_INTRINSIC_DATA(avx_vpermilvar_pd_256, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
+ X86_INTRINSIC_DATA(avx_vpermilvar_ps, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
+ X86_INTRINSIC_DATA(avx_vpermilvar_ps_256, INTR_TYPE_2OP, X86ISD::VPERMILPV, 0),
+ X86_INTRINSIC_DATA(avx2_pabs_b, INTR_TYPE_1OP, X86ISD::ABS, 0),
+ X86_INTRINSIC_DATA(avx2_pabs_d, INTR_TYPE_1OP, X86ISD::ABS, 0),
+ X86_INTRINSIC_DATA(avx2_pabs_w, INTR_TYPE_1OP, X86ISD::ABS, 0),
X86_INTRINSIC_DATA(avx2_packssdw, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(avx2_packsswb, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(avx2_packusdw, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
X86_INTRINSIC_DATA(avx2_packuswb, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
+ X86_INTRINSIC_DATA(avx2_padds_b, INTR_TYPE_2OP, X86ISD::ADDS, 0),
+ X86_INTRINSIC_DATA(avx2_padds_w, INTR_TYPE_2OP, X86ISD::ADDS, 0),
+ X86_INTRINSIC_DATA(avx2_paddus_b, INTR_TYPE_2OP, X86ISD::ADDUS, 0),
+ X86_INTRINSIC_DATA(avx2_paddus_w, INTR_TYPE_2OP, X86ISD::ADDUS, 0),
X86_INTRINSIC_DATA(avx2_pavg_b, INTR_TYPE_2OP, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx2_pavg_w, INTR_TYPE_2OP, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx2_phadd_d, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(avx2_phadd_w, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(avx2_phsub_d, INTR_TYPE_2OP, X86ISD::HSUB, 0),
X86_INTRINSIC_DATA(avx2_phsub_w, INTR_TYPE_2OP, X86ISD::HSUB, 0),
- X86_INTRINSIC_DATA(avx2_pmaxs_b, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxs_d, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxs_w, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxu_b, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxu_d, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmaxu_w, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(avx2_pmins_b, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(avx2_pmins_d, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(avx2_pmins_w, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(avx2_pminu_b, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(avx2_pminu_d, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(avx2_pminu_w, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxbd, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxbq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxbw, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxdq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxwd, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovsxwq, INTR_TYPE_1OP, X86ISD::VSEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxbd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxbq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxbw, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxdq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxwd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(avx2_pmovzxwq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
+ X86_INTRINSIC_DATA(avx2_pmovmskb, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
X86_INTRINSIC_DATA(avx2_pmul_dq, INTR_TYPE_2OP, X86ISD::PMULDQ, 0),
X86_INTRINSIC_DATA(avx2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0),
X86_INTRINSIC_DATA(avx2_pmulhu_w, INTR_TYPE_2OP, ISD::MULHU, 0),
X86_INTRINSIC_DATA(avx2_pmulu_dq, INTR_TYPE_2OP, X86ISD::PMULUDQ, 0),
X86_INTRINSIC_DATA(avx2_psad_bw, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
X86_INTRINSIC_DATA(avx2_pshuf_b, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
- X86_INTRINSIC_DATA(avx2_psign_b, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
- X86_INTRINSIC_DATA(avx2_psign_d, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
- X86_INTRINSIC_DATA(avx2_psign_w, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
X86_INTRINSIC_DATA(avx2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
@@ -315,8 +309,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx2_psra_w, INTR_TYPE_2OP, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx2_psrai_d, VSHIFT, X86ISD::VSRAI, 0),
X86_INTRINSIC_DATA(avx2_psrai_w, VSHIFT, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx2_psrav_d, INTR_TYPE_2OP, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx2_psrav_d_256, INTR_TYPE_2OP, ISD::SRA, 0),
+ X86_INTRINSIC_DATA(avx2_psrav_d, INTR_TYPE_2OP, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx2_psrav_d_256, INTR_TYPE_2OP, X86ISD::VSRAV, 0),
X86_INTRINSIC_DATA(avx2_psrl_d, INTR_TYPE_2OP, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx2_psrl_q, INTR_TYPE_2OP, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx2_psrl_w, INTR_TYPE_2OP, X86ISD::VSRL, 0),
@@ -327,6 +321,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx2_psrlv_d_256, INTR_TYPE_2OP, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx2_psrlv_q, INTR_TYPE_2OP, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx2_psrlv_q_256, INTR_TYPE_2OP, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx2_psubs_b, INTR_TYPE_2OP, X86ISD::SUBS, 0),
+ X86_INTRINSIC_DATA(avx2_psubs_w, INTR_TYPE_2OP, X86ISD::SUBS, 0),
X86_INTRINSIC_DATA(avx2_psubus_b, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(avx2_psubus_w, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(avx2_vperm2i128, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
@@ -380,50 +376,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_kunpck_bw, KUNPCK, ISD::CONCAT_VECTORS, 0),
X86_INTRINSIC_DATA(avx512_kunpck_dq, KUNPCK, ISD::CONCAT_VECTORS, 0),
X86_INTRINSIC_DATA(avx512_kunpck_wd, KUNPCK, ISD::CONCAT_VECTORS, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_128, FMA_OP_MASK3, X86ISD::FMADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_256, FMA_OP_MASK3, X86ISD::FMADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_512, FMA_OP_MASK3, X86ISD::FMADD,
- X86ISD::FMADD_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_128, FMA_OP_MASK3, X86ISD::FMADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_256, FMA_OP_MASK3, X86ISD::FMADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_512, FMA_OP_MASK3, X86ISD::FMADD,
- X86ISD::FMADD_RND),
-
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_128, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_256, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_512, FMA_OP_MASK3, X86ISD::FMADDSUB,
- X86ISD::FMADDSUB_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_128, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_256, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_512, FMA_OP_MASK3, X86ISD::FMADDSUB,
- X86ISD::FMADDSUB_RND),
-
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_128, FMA_OP_MASK3, X86ISD::FMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_256, FMA_OP_MASK3, X86ISD::FMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_512, FMA_OP_MASK3, X86ISD::FMSUB,
- X86ISD::FMSUB_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_128, FMA_OP_MASK3, X86ISD::FMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_256, FMA_OP_MASK3, X86ISD::FMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_512, FMA_OP_MASK3, X86ISD::FMSUB,
- X86ISD::FMSUB_RND),
-
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_128, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_256, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_512, FMA_OP_MASK3, X86ISD::FMSUBADD,
- X86ISD::FMSUBADD_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_128, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_256, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_512, FMA_OP_MASK3, X86ISD::FMSUBADD,
- X86ISD::FMSUBADD_RND),
-
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_128, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_256, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_512, FMA_OP_MASK3, X86ISD::FNMSUB,
- X86ISD::FNMSUB_RND),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_128, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_256, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
- X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_512, FMA_OP_MASK3, X86ISD::FNMSUB,
- X86ISD::FNMSUB_RND),
X86_INTRINSIC_DATA(avx512_mask_add_pd_128, INTR_TYPE_2OP_MASK, ISD::FADD, 0),
X86_INTRINSIC_DATA(avx512_mask_add_pd_256, INTR_TYPE_2OP_MASK, ISD::FADD, 0),
@@ -449,38 +401,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_andn_ps_128, INTR_TYPE_2OP_MASK, X86ISD::FANDN, 0),
X86_INTRINSIC_DATA(avx512_mask_andn_ps_256, INTR_TYPE_2OP_MASK, X86ISD::FANDN, 0),
X86_INTRINSIC_DATA(avx512_mask_andn_ps_512, INTR_TYPE_2OP_MASK, X86ISD::FANDN, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_b_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_b_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_b_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_d_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_d_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_d_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_pd_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_pd_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_pd_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_ps_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_ps_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_ps_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_q_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_q_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_q_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_w_128, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_w_256, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_blend_w_512, BLEND, X86ISD::SELECT, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_sd_pd_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_sd_pd_512, INTR_TYPE_1OP_MASK,
+ X86_INTRINSIC_DATA(avx512_mask_broadcastf32x2_256, BRCST32x2_TO_VEC,
X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_ss_ps_128, INTR_TYPE_1OP_MASK,
+ X86_INTRINSIC_DATA(avx512_mask_broadcastf32x2_512, BRCST32x2_TO_VEC,
X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_ss_ps_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcast_ss_ps_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcastf32x2_256, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcastf32x2_512, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcastf32x4_256, BRCST_SUBVEC_TO_VEC,
X86ISD::SHUF128, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcastf32x4_512, BRCST_SUBVEC_TO_VEC,
@@ -493,12 +417,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::SHUF128, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcastf64x4_512, BRCST_SUBVEC_TO_VEC,
X86ISD::SHUF128, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_128, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_256, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_512, INTR_TYPE_1OP_MASK,
- X86ISD::SUBV_BROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_128, BRCST32x2_TO_VEC,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_256, BRCST32x2_TO_VEC,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_broadcasti32x2_512, BRCST32x2_TO_VEC,
+ X86ISD::VBROADCAST, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcasti32x4_256, BRCST_SUBVEC_TO_VEC,
X86ISD::SHUF128, 0),
X86_INTRINSIC_DATA(avx512_mask_broadcasti32x4_512, BRCST_SUBVEC_TO_VEC,
@@ -773,6 +697,14 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_q_512, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_128, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_256, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_512, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_ps_128, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_ps_256, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_ps_512, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_sd, FIXUPIMMS, X86ISD::VFIXUPIMMS, 0),
+ X86_INTRINSIC_DATA(avx512_mask_fixupimm_ss, FIXUPIMMS, X86ISD::VFIXUPIMMS, 0),
X86_INTRINSIC_DATA(avx512_mask_fpclass_pd_128, FPCLASS, X86ISD::VFPCLASS, 0),
X86_INTRINSIC_DATA(avx512_mask_fpclass_pd_256, FPCLASS, X86ISD::VFPCLASS, 0),
X86_INTRINSIC_DATA(avx512_mask_fpclass_pd_512, FPCLASS, X86ISD::VFPCLASS, 0),
@@ -873,28 +805,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::FMIN, X86ISD::FMIN_RND),
X86_INTRINSIC_DATA(avx512_mask_min_ss_round, INTR_TYPE_SCALAR_MASK_RM,
X86ISD::FMIN, X86ISD::FMIN_RND),
- X86_INTRINSIC_DATA(avx512_mask_movddup_128, INTR_TYPE_1OP_MASK,
- X86ISD::MOVDDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movddup_256, INTR_TYPE_1OP_MASK,
- X86ISD::MOVDDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movddup_512, INTR_TYPE_1OP_MASK,
- X86ISD::MOVDDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_move_sd, INTR_TYPE_SCALAR_MASK,
+ X86_INTRINSIC_DATA(avx512_mask_move_sd, INTR_TYPE_SCALAR_MASK,
X86ISD::MOVSD, 0),
- X86_INTRINSIC_DATA(avx512_mask_move_ss, INTR_TYPE_SCALAR_MASK,
+ X86_INTRINSIC_DATA(avx512_mask_move_ss, INTR_TYPE_SCALAR_MASK,
X86ISD::MOVSS, 0),
- X86_INTRINSIC_DATA(avx512_mask_movshdup_128, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSHDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movshdup_256, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSHDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movshdup_512, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSHDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movsldup_128, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSLDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movsldup_256, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSLDUP, 0),
- X86_INTRINSIC_DATA(avx512_mask_movsldup_512, INTR_TYPE_1OP_MASK,
- X86ISD::MOVSLDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_128, INTR_TYPE_2OP_MASK, ISD::FMUL, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_256, INTR_TYPE_2OP_MASK, ISD::FMUL, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_512, INTR_TYPE_2OP_MASK, ISD::FMUL,
@@ -961,54 +875,64 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_paddus_w_128, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0),
X86_INTRINSIC_DATA(avx512_mask_paddus_w_256, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0),
X86_INTRINSIC_DATA(avx512_mask_paddus_w_512, INTR_TYPE_2OP_MASK, X86ISD::ADDUS, 0),
- X86_INTRINSIC_DATA(avx512_mask_palignr_128, INTR_TYPE_3OP_IMM8_MASK,
- X86ISD::PALIGNR, 0),
- X86_INTRINSIC_DATA(avx512_mask_palignr_256, INTR_TYPE_3OP_IMM8_MASK,
- X86ISD::PALIGNR, 0),
- X86_INTRINSIC_DATA(avx512_mask_palignr_512, INTR_TYPE_3OP_IMM8_MASK,
- X86ISD::PALIGNR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_d_128, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_d_256, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_d_512, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_q_128, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_q_256, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pand_q_512, INTR_TYPE_2OP_MASK, ISD::AND, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_d_128, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_d_256, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_d_512, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_q_128, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_q_256, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
- X86_INTRINSIC_DATA(avx512_mask_pandn_q_512, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_b_128, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_b_256, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_b_512, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_w_128, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_w_256, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pavg_w_512, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_128, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_256, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_512, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_d_128, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_d_256, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_d_512, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_q_128, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_q_256, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_q_512, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_w_128, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_w_256, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpeq_w_512, CMP_MASK, X86ISD::PCMPEQM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_b_128, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_b_256, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_b_512, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_d_128, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_d_256, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_d_512, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_q_128, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_q_256, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_q_512, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_128, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_256, CMP_MASK, X86ISD::PCMPGTM, 0),
- X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_512, CMP_MASK, X86ISD::PCMPGTM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_b_gpr_128, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_b_gpr_256, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_b_gpr_512, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_d_gpr_128, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_d_gpr_256, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_d_gpr_512, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_q_gpr_128, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_q_gpr_256, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_q_gpr_512, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_w_gpr_128, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_w_gpr_256, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pbroadcast_w_gpr_512, INTR_TYPE_1OP_MASK,
+ X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_df_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_df_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_di_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_di_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_hi_128, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_hi_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_hi_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_qi_128, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_qi_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_qi_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_sf_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_sf_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_si_256, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_permvar_si_512, VPERM_2OP_MASK,
+ X86ISD::VPERMV, 0),
X86_INTRINSIC_DATA(avx512_mask_pmaddubs_w_128, INTR_TYPE_2OP_MASK,
X86ISD::VPMADDUBSW, 0),
X86_INTRINSIC_DATA(avx512_mask_pmaddubs_w_256, INTR_TYPE_2OP_MASK,
@@ -1273,36 +1197,36 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_pmull_w_128, INTR_TYPE_2OP_MASK, ISD::MUL, 0),
X86_INTRINSIC_DATA(avx512_mask_pmull_w_256, INTR_TYPE_2OP_MASK, ISD::MUL, 0),
X86_INTRINSIC_DATA(avx512_mask_pmull_w_512, INTR_TYPE_2OP_MASK, ISD::MUL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_128, INTR_TYPE_2OP_MASK,
+ X86ISD::MULTISHIFT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_256, INTR_TYPE_2OP_MASK,
+ X86ISD::MULTISHIFT, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_512, INTR_TYPE_2OP_MASK,
+ X86ISD::MULTISHIFT, 0),
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_128, INTR_TYPE_2OP_MASK,
X86ISD::PMULUDQ, 0),
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_256, INTR_TYPE_2OP_MASK,
X86ISD::PMULUDQ, 0),
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_512, INTR_TYPE_2OP_MASK,
X86ISD::PMULUDQ, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_d_128, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_d_256, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_d_512, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_q_128, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_q_256, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_por_q_512, INTR_TYPE_2OP_MASK, ISD::OR, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_128, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_256, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_512, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_128, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_256, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_512, INTR_TYPE_2OP_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_d_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_d_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_d_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_q_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_q_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_prol_q_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_d_128, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_d_256, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_d_512, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_q_128, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_q_256, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
X86_INTRINSIC_DATA(avx512_mask_prolv_q_512, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_128, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_256, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_512, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_128, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_256, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_512, INTR_TYPE_2OP_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_d_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_d_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_d_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_q_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_q_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pror_q_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
X86_INTRINSIC_DATA(avx512_mask_prorv_d_128, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
X86_INTRINSIC_DATA(avx512_mask_prorv_d_256, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
X86_INTRINSIC_DATA(avx512_mask_prorv_d_512, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
@@ -1315,44 +1239,26 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::PSHUFB, 0),
X86_INTRINSIC_DATA(avx512_mask_pshuf_b_512, INTR_TYPE_2OP_MASK,
X86ISD::PSHUFB, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshuf_d_128, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFD, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshuf_d_256, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFD, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshuf_d_512, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFD, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufh_w_128, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFHW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufh_w_256, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFHW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufh_w_512, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFHW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufl_w_128, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFLW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufl_w_256, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFLW, 0),
- X86_INTRINSIC_DATA(avx512_mask_pshufl_w_512, INTR_TYPE_2OP_MASK,
- X86ISD::PSHUFLW, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_d, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_d_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_d_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_di_128, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_di_256, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_di_512, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_di_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_di_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_q, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_qi_128, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_qi_256, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_qi_512, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_qi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_qi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_w_128, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_w_256, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psll_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_wi_128, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_wi_256, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psll_wi_512, INTR_TYPE_2OP_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pslli_d, VSHIFT_MASK, X86ISD::VSHLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pslli_q, VSHIFT_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_wi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_wi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psll_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSHLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psllv_d, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psllv_q, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv16_hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv2_di, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv32hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
@@ -1360,57 +1266,53 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_psllv4_si, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv8_hi, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psllv8_si, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psllv_d, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psllv_q, INTR_TYPE_2OP_MASK, ISD::SHL, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_d, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_d_128, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_d_256, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_di_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_di_256, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_di_512, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_di_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_di_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_q, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_qi_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_qi_256, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_qi_512, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_qi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_qi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_w_128, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_w_256, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
X86_INTRINSIC_DATA(avx512_mask_psra_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_wi_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_wi_256, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psra_wi_512, INTR_TYPE_2OP_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrai_d, VSHIFT_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrai_q, VSHIFT_MASK, X86ISD::VSRAI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav16_hi, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav32_hi, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav4_si, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav8_hi, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav8_si, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav_d, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav_q, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav_q_128, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrav_q_256, INTR_TYPE_2OP_MASK, ISD::SRA, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_wi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_wi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psra_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRAI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_d, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_q, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav16_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav32_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav4_si, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav8_hi, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrav8_si, INTR_TYPE_2OP_MASK, X86ISD::VSRAV, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_d, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_d_128, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_d_256, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_di_128, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_di_256, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_di_512, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_di_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_di_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_di_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_q, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_q_128, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_q_256, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_qi_128, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_qi_256, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_qi_512, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_qi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_qi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_qi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_w_128, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_w_256, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrl_w_512, INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_wi_128, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_wi_256, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrl_wi_512, INTR_TYPE_2OP_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrli_d, VSHIFT_MASK, X86ISD::VSRLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrli_q, VSHIFT_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_wi_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_wi_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrl_wi_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrlv_d, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
+ X86_INTRINSIC_DATA(avx512_mask_psrlv_q, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv16_hi, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv2_di, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv32hi, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
@@ -1418,8 +1320,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_psrlv4_si, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv8_hi, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psrlv8_si, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrlv_d, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
- X86_INTRINSIC_DATA(avx512_mask_psrlv_q, INTR_TYPE_2OP_MASK, ISD::SRL, 0),
X86_INTRINSIC_DATA(avx512_mask_psub_b_128, INTR_TYPE_2OP_MASK, ISD::SUB, 0),
X86_INTRINSIC_DATA(avx512_mask_psub_b_256, INTR_TYPE_2OP_MASK, ISD::SUB, 0),
X86_INTRINSIC_DATA(avx512_mask_psub_b_512, INTR_TYPE_2OP_MASK, ISD::SUB, 0),
@@ -1456,60 +1356,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPTERNLOG, 0),
X86_INTRINSIC_DATA(avx512_mask_pternlog_q_512, TERLOG_OP_MASK,
X86ISD::VPTERNLOG, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhb_w_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhb_w_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhb_w_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhd_q_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhd_q_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhd_q_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhqd_q_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhqd_q_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhqd_q_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhw_d_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhw_d_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckhw_d_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklb_w_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklb_w_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklb_w_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckld_q_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckld_q_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpckld_q_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklqd_q_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklqd_q_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklqd_q_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklw_d_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklw_d_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_punpcklw_d_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_d_128, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_d_256, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_d_512, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_q_128, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_q_256, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
- X86_INTRINSIC_DATA(avx512_mask_pxor_q_512, INTR_TYPE_2OP_MASK, ISD::XOR, 0),
X86_INTRINSIC_DATA(avx512_mask_range_pd_128, INTR_TYPE_3OP_MASK_RM, X86ISD::VRANGE, 0),
X86_INTRINSIC_DATA(avx512_mask_range_pd_256, INTR_TYPE_3OP_MASK_RM, X86ISD::VRANGE, 0),
X86_INTRINSIC_DATA(avx512_mask_range_pd_512, INTR_TYPE_3OP_MASK_RM, X86ISD::VRANGE, 0),
@@ -1549,9 +1395,9 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_scalef_ps_512, INTR_TYPE_2OP_MASK_RM,
X86ISD::SCALEF, 0),
X86_INTRINSIC_DATA(avx512_mask_scalef_sd, INTR_TYPE_SCALAR_MASK_RM,
- X86ISD::SCALEF, 0),
+ X86ISD::SCALEFS, 0),
X86_INTRINSIC_DATA(avx512_mask_scalef_ss, INTR_TYPE_SCALAR_MASK_RM,
- X86ISD::SCALEF, 0),
+ X86ISD::SCALEFS, 0),
X86_INTRINSIC_DATA(avx512_mask_shuf_f32x4, INTR_TYPE_3OP_IMM8_MASK,
X86ISD::SHUF128, 0),
X86_INTRINSIC_DATA(avx512_mask_shuf_f32x4_256, INTR_TYPE_3OP_IMM8_MASK,
@@ -1616,30 +1462,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_ucmp_w_128, CMP_MASK_CC, X86ISD::CMPMU, 0),
X86_INTRINSIC_DATA(avx512_mask_ucmp_w_256, CMP_MASK_CC, X86ISD::CMPMU, 0),
X86_INTRINSIC_DATA(avx512_mask_ucmp_w_512, CMP_MASK_CC, X86ISD::CMPMU, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_pd_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_pd_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_pd_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_ps_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_ps_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckh_ps_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKH, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_pd_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_pd_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_pd_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_ps_128, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_ps_256, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
- X86_INTRINSIC_DATA(avx512_mask_unpckl_ps_512, INTR_TYPE_2OP_MASK,
- X86ISD::UNPCKL, 0),
X86_INTRINSIC_DATA(avx512_mask_valign_d_128, INTR_TYPE_3OP_IMM8_MASK,
X86ISD::VALIGN, 0),
X86_INTRINSIC_DATA(avx512_mask_valign_d_256, INTR_TYPE_3OP_IMM8_MASK,
@@ -1673,6 +1495,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_vfmadd_ps_512, FMA_OP_MASK, X86ISD::FMADD,
X86ISD::FMADD_RND),
+ X86_INTRINSIC_DATA(avx512_mask_vfmadd_sd, FMA_OP_SCALAR_MASK, X86ISD::FMADD_RND, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vfmadd_ss, FMA_OP_SCALAR_MASK, X86ISD::FMADD_RND, 0),
X86_INTRINSIC_DATA(avx512_mask_vfmaddsub_pd_128, FMA_OP_MASK, X86ISD::FMADDSUB, 0),
X86_INTRINSIC_DATA(avx512_mask_vfmaddsub_pd_256, FMA_OP_MASK, X86ISD::FMADDSUB, 0),
X86_INTRINSIC_DATA(avx512_mask_vfmaddsub_pd_512, FMA_OP_MASK, X86ISD::FMADDSUB,
@@ -1730,18 +1554,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPERMIV3, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermi2var_q_512, VPERM_3OP_MASK,
X86ISD::VPERMIV3, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_pd_128, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_pd_256, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_pd_512, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_ps_128, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_ps_256, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
- X86_INTRINSIC_DATA(avx512_mask_vpermil_ps_512, INTR_TYPE_2OP_IMM8_MASK,
- X86ISD::VPERMILPI, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermi2var_qi_128, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermi2var_qi_256, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermi2var_qi_512, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermilvar_pd_128, INTR_TYPE_2OP_MASK,
X86ISD::VPERMILPV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermilvar_pd_256, INTR_TYPE_2OP_MASK,
@@ -1784,12 +1602,92 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPERMV3, 0),
X86_INTRINSIC_DATA(avx512_mask_vpermt2var_q_512, VPERM_3OP_MASK,
X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermt2var_qi_128, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermt2var_qi_256, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpermt2var_qi_512, VPERM_3OP_MASK,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_128 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_256 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52h_uq_512 , FMA_OP_MASK,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_128 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_256 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_512 , FMA_OP_MASK,
+ X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_128, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_256, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_pd_512, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_ps_128, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_ps_256, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
X86_INTRINSIC_DATA(avx512_mask_xor_ps_512, INTR_TYPE_2OP_MASK, X86ISD::FXOR, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_128, FMA_OP_MASK3, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_256, FMA_OP_MASK3, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_512, FMA_OP_MASK3, X86ISD::FMADD,
+ X86ISD::FMADD_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_128, FMA_OP_MASK3, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_256, FMA_OP_MASK3, X86ISD::FMADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ps_512, FMA_OP_MASK3, X86ISD::FMADD,
+ X86ISD::FMADD_RND),
+
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_sd, FMA_OP_SCALAR_MASK3, X86ISD::FMADD_RND, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmadd_ss, FMA_OP_SCALAR_MASK3, X86ISD::FMADD_RND, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_128, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_256, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_pd_512, FMA_OP_MASK3, X86ISD::FMADDSUB,
+ X86ISD::FMADDSUB_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_128, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_256, FMA_OP_MASK3, X86ISD::FMADDSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmaddsub_ps_512, FMA_OP_MASK3, X86ISD::FMADDSUB,
+ X86ISD::FMADDSUB_RND),
+
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_128, FMA_OP_MASK3, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_256, FMA_OP_MASK3, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_pd_512, FMA_OP_MASK3, X86ISD::FMSUB,
+ X86ISD::FMSUB_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_128, FMA_OP_MASK3, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_256, FMA_OP_MASK3, X86ISD::FMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsub_ps_512, FMA_OP_MASK3, X86ISD::FMSUB,
+ X86ISD::FMSUB_RND),
+
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_128, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_256, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_pd_512, FMA_OP_MASK3, X86ISD::FMSUBADD,
+ X86ISD::FMSUBADD_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_128, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_256, FMA_OP_MASK3, X86ISD::FMSUBADD, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfmsubadd_ps_512, FMA_OP_MASK3, X86ISD::FMSUBADD,
+ X86ISD::FMSUBADD_RND),
+
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_128, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_256, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_pd_512, FMA_OP_MASK3, X86ISD::FNMSUB,
+ X86ISD::FNMSUB_RND),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_128, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_256, FMA_OP_MASK3, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(avx512_mask3_vfnmsub_ps_512, FMA_OP_MASK3, X86ISD::FNMSUB,
+ X86ISD::FNMSUB_RND),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_pd_128, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_pd_256, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_pd_512, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ps_128, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ps_256, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ps_512, FIXUPIMM_MASKZ,
+ X86ISD::VFIXUPIMM, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_sd, FIXUPIMMS_MASKZ,
+ X86ISD::VFIXUPIMMS, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_fixupimm_ss, FIXUPIMMS_MASKZ,
+ X86ISD::VFIXUPIMMS, 0),
X86_INTRINSIC_DATA(avx512_maskz_pternlog_d_128, TERLOG_OP_MASKZ,
X86ISD::VPTERNLOG, 0),
X86_INTRINSIC_DATA(avx512_maskz_pternlog_d_256, TERLOG_OP_MASKZ,
@@ -1811,6 +1709,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_maskz_vfmadd_ps_512, FMA_OP_MASKZ, X86ISD::FMADD,
X86ISD::FMADD_RND),
+ X86_INTRINSIC_DATA(avx512_maskz_vfmadd_sd, FMA_OP_SCALAR_MASKZ, X86ISD::FMADD_RND, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vfmadd_ss, FMA_OP_SCALAR_MASKZ, X86ISD::FMADD_RND, 0),
X86_INTRINSIC_DATA(avx512_maskz_vfmaddsub_pd_128, FMA_OP_MASKZ, X86ISD::FMADDSUB, 0),
X86_INTRINSIC_DATA(avx512_maskz_vfmaddsub_pd_256, FMA_OP_MASKZ, X86ISD::FMADDSUB, 0),
X86_INTRINSIC_DATA(avx512_maskz_vfmaddsub_pd_512, FMA_OP_MASKZ, X86ISD::FMADDSUB,
@@ -1850,41 +1750,57 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPERMV3, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_q_512, VPERM_3OP_MASKZ,
X86ISD::VPERMV3, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastb_128, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastb_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastb_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastd_128, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastd_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastd_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastq_128, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastq_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastq_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastw_128, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastw_256, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
- X86_INTRINSIC_DATA(avx512_pbroadcastw_512, INTR_TYPE_1OP_MASK,
- X86ISD::VBROADCAST, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_qi_128, VPERM_3OP_MASKZ,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_qi_256, VPERM_3OP_MASKZ,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpermt2var_qi_512, VPERM_3OP_MASKZ,
+ X86ISD::VPERMV3, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_128, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_256, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52h_uq_512, FMA_OP_MASKZ,
+ X86ISD::VPMADD52H, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_128, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_256, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
+ X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_512, FMA_OP_MASKZ,
+ X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_psad_bw_512, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
- X86_INTRINSIC_DATA(avx512_psll_dq_512, INTR_TYPE_2OP_IMM8, X86ISD::VSHLDQ, 0),
- X86_INTRINSIC_DATA(avx512_psrl_dq_512, INTR_TYPE_2OP_IMM8, X86ISD::VSRLDQ, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_b_128, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_b_256, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_b_512, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_d_128, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_d_256, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_d_512, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_q_128, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_q_256, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_q_512, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_w_128, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_w_256, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestm_w_512, CMP_MASK, X86ISD::TESTM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_b_128, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_b_256, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_b_512, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_d_128, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_d_256, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_d_512, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_q_128, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_q_256, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_q_512, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_w_128, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_w_256, CMP_MASK, X86ISD::TESTNM, 0),
+ X86_INTRINSIC_DATA(avx512_ptestnm_w_512, CMP_MASK, X86ISD::TESTNM, 0),
X86_INTRINSIC_DATA(avx512_rcp14_pd_128, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_pd_256, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_pd_512, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_ps_128, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_ps_256, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
X86_INTRINSIC_DATA(avx512_rcp14_ps_512, INTR_TYPE_1OP_MASK, X86ISD::FRCP, 0),
- X86_INTRINSIC_DATA(avx512_rcp14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::FRCP, 0),
- X86_INTRINSIC_DATA(avx512_rcp14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::FRCP, 0),
+ X86_INTRINSIC_DATA(avx512_rcp14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::FRCPS, 0),
+ X86_INTRINSIC_DATA(avx512_rcp14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::FRCPS, 0),
X86_INTRINSIC_DATA(avx512_rcp28_pd, INTR_TYPE_1OP_MASK_RM, X86ISD::RCP28, 0),
X86_INTRINSIC_DATA(avx512_rcp28_ps, INTR_TYPE_1OP_MASK_RM, X86ISD::RCP28, 0),
X86_INTRINSIC_DATA(avx512_rcp28_sd, INTR_TYPE_SCALAR_MASK_RM, X86ISD::RCP28, 0),
@@ -1895,29 +1811,30 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_rsqrt14_ps_128, INTR_TYPE_1OP_MASK, X86ISD::FRSQRT, 0),
X86_INTRINSIC_DATA(avx512_rsqrt14_ps_256, INTR_TYPE_1OP_MASK, X86ISD::FRSQRT, 0),
X86_INTRINSIC_DATA(avx512_rsqrt14_ps_512, INTR_TYPE_1OP_MASK, X86ISD::FRSQRT, 0),
- X86_INTRINSIC_DATA(avx512_rsqrt14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::FRSQRT, 0),
- X86_INTRINSIC_DATA(avx512_rsqrt14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::FRSQRT, 0),
+ X86_INTRINSIC_DATA(avx512_rsqrt14_sd, INTR_TYPE_SCALAR_MASK, X86ISD::FRSQRTS, 0),
+ X86_INTRINSIC_DATA(avx512_rsqrt14_ss, INTR_TYPE_SCALAR_MASK, X86ISD::FRSQRTS, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_pd, INTR_TYPE_1OP_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_ps, INTR_TYPE_1OP_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_sd, INTR_TYPE_SCALAR_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_rsqrt28_ss, INTR_TYPE_SCALAR_MASK_RM,X86ISD::RSQRT28, 0),
X86_INTRINSIC_DATA(avx512_vcomi_sd, COMI_RM, X86ISD::COMI, X86ISD::UCOMI),
X86_INTRINSIC_DATA(avx512_vcomi_ss, COMI_RM, X86ISD::COMI, X86ISD::UCOMI),
- X86_INTRINSIC_DATA(avx_hadd_pd_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
- X86_INTRINSIC_DATA(avx_hadd_ps_256, INTR_TYPE_2OP, X86ISD::FHADD, 0),
- X86_INTRINSIC_DATA(avx_hsub_pd_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
- X86_INTRINSIC_DATA(avx_hsub_ps_256, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
- X86_INTRINSIC_DATA(avx_max_pd_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
- X86_INTRINSIC_DATA(avx_max_ps_256, INTR_TYPE_2OP, X86ISD::FMAX, 0),
- X86_INTRINSIC_DATA(avx_min_pd_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
- X86_INTRINSIC_DATA(avx_min_ps_256, INTR_TYPE_2OP, X86ISD::FMIN, 0),
- X86_INTRINSIC_DATA(avx_rcp_ps_256, INTR_TYPE_1OP, X86ISD::FRCP, 0),
- X86_INTRINSIC_DATA(avx_rsqrt_ps_256, INTR_TYPE_1OP, X86ISD::FRSQRT, 0),
- X86_INTRINSIC_DATA(avx_sqrt_pd_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
- X86_INTRINSIC_DATA(avx_sqrt_ps_256, INTR_TYPE_1OP, ISD::FSQRT, 0),
- X86_INTRINSIC_DATA(avx_vperm2f128_pd_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
- X86_INTRINSIC_DATA(avx_vperm2f128_ps_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
- X86_INTRINSIC_DATA(avx_vperm2f128_si_256, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtsd2si32, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_SINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtsd2si64, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_SINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtsd2usi32, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_UINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtsd2usi64, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_UINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtss2si32, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_SINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtss2si64, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_SINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtss2usi32, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_UINT_RND, 0),
+ X86_INTRINSIC_DATA(avx512_vcvtss2usi64, INTR_TYPE_2OP,
+ X86ISD::SCALAR_FP_TO_UINT_RND, 0),
X86_INTRINSIC_DATA(fma_vfmadd_pd, INTR_TYPE_3OP, X86ISD::FMADD, 0),
X86_INTRINSIC_DATA(fma_vfmadd_pd_256, INTR_TYPE_3OP, X86ISD::FMADD, 0),
X86_INTRINSIC_DATA(fma_vfmadd_ps, INTR_TYPE_3OP, X86ISD::FMADD, 0),
@@ -1942,6 +1859,24 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(fma_vfnmsub_pd_256, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
X86_INTRINSIC_DATA(fma_vfnmsub_ps, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
X86_INTRINSIC_DATA(fma_vfnmsub_ps_256, INTR_TYPE_3OP, X86ISD::FNMSUB, 0),
+ X86_INTRINSIC_DATA(sse_comieq_ss, COMI, X86ISD::COMI, ISD::SETEQ),
+ X86_INTRINSIC_DATA(sse_comige_ss, COMI, X86ISD::COMI, ISD::SETGE),
+ X86_INTRINSIC_DATA(sse_comigt_ss, COMI, X86ISD::COMI, ISD::SETGT),
+ X86_INTRINSIC_DATA(sse_comile_ss, COMI, X86ISD::COMI, ISD::SETLE),
+ X86_INTRINSIC_DATA(sse_comilt_ss, COMI, X86ISD::COMI, ISD::SETLT),
+ X86_INTRINSIC_DATA(sse_comineq_ss, COMI, X86ISD::COMI, ISD::SETNE),
+ X86_INTRINSIC_DATA(sse_max_ps, INTR_TYPE_2OP, X86ISD::FMAX, 0),
+ X86_INTRINSIC_DATA(sse_min_ps, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(sse_movmsk_ps, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
+ X86_INTRINSIC_DATA(sse_rcp_ps, INTR_TYPE_1OP, X86ISD::FRCP, 0),
+ X86_INTRINSIC_DATA(sse_rsqrt_ps, INTR_TYPE_1OP, X86ISD::FRSQRT, 0),
+ X86_INTRINSIC_DATA(sse_sqrt_ps, INTR_TYPE_1OP, ISD::FSQRT, 0),
+ X86_INTRINSIC_DATA(sse_ucomieq_ss, COMI, X86ISD::UCOMI, ISD::SETEQ),
+ X86_INTRINSIC_DATA(sse_ucomige_ss, COMI, X86ISD::UCOMI, ISD::SETGE),
+ X86_INTRINSIC_DATA(sse_ucomigt_ss, COMI, X86ISD::UCOMI, ISD::SETGT),
+ X86_INTRINSIC_DATA(sse_ucomile_ss, COMI, X86ISD::UCOMI, ISD::SETLE),
+ X86_INTRINSIC_DATA(sse_ucomilt_ss, COMI, X86ISD::UCOMI, ISD::SETLT),
+ X86_INTRINSIC_DATA(sse_ucomineq_ss, COMI, X86ISD::UCOMI, ISD::SETNE),
X86_INTRINSIC_DATA(sse2_comieq_sd, COMI, X86ISD::COMI, ISD::SETEQ),
X86_INTRINSIC_DATA(sse2_comige_sd, COMI, X86ISD::COMI, ISD::SETGE),
X86_INTRINSIC_DATA(sse2_comigt_sd, COMI, X86ISD::COMI, ISD::SETGT),
@@ -1950,22 +1885,21 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse2_comineq_sd, COMI, X86ISD::COMI, ISD::SETNE),
X86_INTRINSIC_DATA(sse2_max_pd, INTR_TYPE_2OP, X86ISD::FMAX, 0),
X86_INTRINSIC_DATA(sse2_min_pd, INTR_TYPE_2OP, X86ISD::FMIN, 0),
+ X86_INTRINSIC_DATA(sse2_movmsk_pd, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
X86_INTRINSIC_DATA(sse2_packssdw_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(sse2_packsswb_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(sse2_packuswb_128, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
+ X86_INTRINSIC_DATA(sse2_padds_b, INTR_TYPE_2OP, X86ISD::ADDS, 0),
+ X86_INTRINSIC_DATA(sse2_padds_w, INTR_TYPE_2OP, X86ISD::ADDS, 0),
+ X86_INTRINSIC_DATA(sse2_paddus_b, INTR_TYPE_2OP, X86ISD::ADDUS, 0),
+ X86_INTRINSIC_DATA(sse2_paddus_w, INTR_TYPE_2OP, X86ISD::ADDUS, 0),
X86_INTRINSIC_DATA(sse2_pavg_b, INTR_TYPE_2OP, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(sse2_pavg_w, INTR_TYPE_2OP, X86ISD::AVG, 0),
- X86_INTRINSIC_DATA(sse2_pmaxs_w, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(sse2_pmaxu_b, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(sse2_pmins_w, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(sse2_pminu_b, INTR_TYPE_2OP, ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(sse2_pmovmskb_128, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
X86_INTRINSIC_DATA(sse2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0),
X86_INTRINSIC_DATA(sse2_pmulhu_w, INTR_TYPE_2OP, ISD::MULHU, 0),
X86_INTRINSIC_DATA(sse2_pmulu_dq, INTR_TYPE_2OP, X86ISD::PMULUDQ, 0),
X86_INTRINSIC_DATA(sse2_psad_bw, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
- X86_INTRINSIC_DATA(sse2_pshuf_d, INTR_TYPE_2OP, X86ISD::PSHUFD, 0),
- X86_INTRINSIC_DATA(sse2_pshufh_w, INTR_TYPE_2OP, X86ISD::PSHUFHW, 0),
- X86_INTRINSIC_DATA(sse2_pshufl_w, INTR_TYPE_2OP, X86ISD::PSHUFLW, 0),
X86_INTRINSIC_DATA(sse2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(sse2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0),
X86_INTRINSIC_DATA(sse2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
@@ -1982,6 +1916,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse2_psrli_d, VSHIFT, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(sse2_psrli_q, VSHIFT, X86ISD::VSRLI, 0),
X86_INTRINSIC_DATA(sse2_psrli_w, VSHIFT, X86ISD::VSRLI, 0),
+ X86_INTRINSIC_DATA(sse2_psubs_b, INTR_TYPE_2OP, X86ISD::SUBS, 0),
+ X86_INTRINSIC_DATA(sse2_psubs_w, INTR_TYPE_2OP, X86ISD::SUBS, 0),
X86_INTRINSIC_DATA(sse2_psubus_b, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(sse2_psubus_w, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(sse2_sqrt_pd, INTR_TYPE_1OP, ISD::FSQRT, 0),
@@ -1997,48 +1933,17 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse3_hsub_ps, INTR_TYPE_2OP, X86ISD::FHSUB, 0),
X86_INTRINSIC_DATA(sse41_insertps, INTR_TYPE_3OP, X86ISD::INSERTPS, 0),
X86_INTRINSIC_DATA(sse41_packusdw, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
- X86_INTRINSIC_DATA(sse41_pmaxsb, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(sse41_pmaxsd, INTR_TYPE_2OP, ISD::SMAX, 0),
- X86_INTRINSIC_DATA(sse41_pmaxud, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(sse41_pmaxuw, INTR_TYPE_2OP, ISD::UMAX, 0),
- X86_INTRINSIC_DATA(sse41_pminsb, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(sse41_pminsd, INTR_TYPE_2OP, ISD::SMIN, 0),
- X86_INTRINSIC_DATA(sse41_pminud, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(sse41_pminuw, INTR_TYPE_2OP, ISD::UMIN, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxbd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxbq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxbw, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxdq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxwd, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
- X86_INTRINSIC_DATA(sse41_pmovzxwq, INTR_TYPE_1OP, X86ISD::VZEXT, 0),
X86_INTRINSIC_DATA(sse41_pmuldq, INTR_TYPE_2OP, X86ISD::PMULDQ, 0),
X86_INTRINSIC_DATA(sse4a_extrqi, INTR_TYPE_3OP, X86ISD::EXTRQI, 0),
X86_INTRINSIC_DATA(sse4a_insertqi, INTR_TYPE_4OP, X86ISD::INSERTQI, 0),
- X86_INTRINSIC_DATA(sse_comieq_ss, COMI, X86ISD::COMI, ISD::SETEQ),
- X86_INTRINSIC_DATA(sse_comige_ss, COMI, X86ISD::COMI, ISD::SETGE),
- X86_INTRINSIC_DATA(sse_comigt_ss, COMI, X86ISD::COMI, ISD::SETGT),
- X86_INTRINSIC_DATA(sse_comile_ss, COMI, X86ISD::COMI, ISD::SETLE),
- X86_INTRINSIC_DATA(sse_comilt_ss, COMI, X86ISD::COMI, ISD::SETLT),
- X86_INTRINSIC_DATA(sse_comineq_ss, COMI, X86ISD::COMI, ISD::SETNE),
- X86_INTRINSIC_DATA(sse_max_ps, INTR_TYPE_2OP, X86ISD::FMAX, 0),
- X86_INTRINSIC_DATA(sse_min_ps, INTR_TYPE_2OP, X86ISD::FMIN, 0),
- X86_INTRINSIC_DATA(sse_rcp_ps, INTR_TYPE_1OP, X86ISD::FRCP, 0),
- X86_INTRINSIC_DATA(sse_rsqrt_ps, INTR_TYPE_1OP, X86ISD::FRSQRT, 0),
- X86_INTRINSIC_DATA(sse_sqrt_ps, INTR_TYPE_1OP, ISD::FSQRT, 0),
- X86_INTRINSIC_DATA(sse_ucomieq_ss, COMI, X86ISD::UCOMI, ISD::SETEQ),
- X86_INTRINSIC_DATA(sse_ucomige_ss, COMI, X86ISD::UCOMI, ISD::SETGE),
- X86_INTRINSIC_DATA(sse_ucomigt_ss, COMI, X86ISD::UCOMI, ISD::SETGT),
- X86_INTRINSIC_DATA(sse_ucomile_ss, COMI, X86ISD::UCOMI, ISD::SETLE),
- X86_INTRINSIC_DATA(sse_ucomilt_ss, COMI, X86ISD::UCOMI, ISD::SETLT),
- X86_INTRINSIC_DATA(sse_ucomineq_ss, COMI, X86ISD::UCOMI, ISD::SETNE),
+ X86_INTRINSIC_DATA(ssse3_pabs_b_128, INTR_TYPE_1OP, X86ISD::ABS, 0),
+ X86_INTRINSIC_DATA(ssse3_pabs_d_128, INTR_TYPE_1OP, X86ISD::ABS, 0),
+ X86_INTRINSIC_DATA(ssse3_pabs_w_128, INTR_TYPE_1OP, X86ISD::ABS, 0),
X86_INTRINSIC_DATA(ssse3_phadd_d_128, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(ssse3_phadd_w_128, INTR_TYPE_2OP, X86ISD::HADD, 0),
X86_INTRINSIC_DATA(ssse3_phsub_d_128, INTR_TYPE_2OP, X86ISD::HSUB, 0),
X86_INTRINSIC_DATA(ssse3_phsub_w_128, INTR_TYPE_2OP, X86ISD::HSUB, 0),
X86_INTRINSIC_DATA(ssse3_pshuf_b_128, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
- X86_INTRINSIC_DATA(ssse3_psign_b_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
- X86_INTRINSIC_DATA(ssse3_psign_d_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
- X86_INTRINSIC_DATA(ssse3_psign_w_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
X86_INTRINSIC_DATA(xop_vpcomb, INTR_TYPE_3OP, X86ISD::VPCOM, 0),
X86_INTRINSIC_DATA(xop_vpcomd, INTR_TYPE_3OP, X86ISD::VPCOM, 0),
X86_INTRINSIC_DATA(xop_vpcomq, INTR_TYPE_3OP, X86ISD::VPCOM, 0),
@@ -2047,6 +1952,11 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(xop_vpcomuq, INTR_TYPE_3OP, X86ISD::VPCOMU, 0),
X86_INTRINSIC_DATA(xop_vpcomuw, INTR_TYPE_3OP, X86ISD::VPCOMU, 0),
X86_INTRINSIC_DATA(xop_vpcomw, INTR_TYPE_3OP, X86ISD::VPCOM, 0),
+ X86_INTRINSIC_DATA(xop_vpermil2pd, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
+ X86_INTRINSIC_DATA(xop_vpermil2pd_256, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
+ X86_INTRINSIC_DATA(xop_vpermil2ps, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
+ X86_INTRINSIC_DATA(xop_vpermil2ps_256, INTR_TYPE_4OP, X86ISD::VPERMIL2, 0),
+ X86_INTRINSIC_DATA(xop_vpperm, INTR_TYPE_3OP, X86ISD::VPPERM, 0),
X86_INTRINSIC_DATA(xop_vprotb, INTR_TYPE_2OP, X86ISD::VPROT, 0),
X86_INTRINSIC_DATA(xop_vprotbi, INTR_TYPE_2OP, X86ISD::VPROTI, 0),
X86_INTRINSIC_DATA(xop_vprotd, INTR_TYPE_2OP, X86ISD::VPROT, 0),
@@ -2069,7 +1979,7 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
* Retrieve data for Intrinsic without chain.
* Return nullptr if intrinsic is not defined in the table.
*/
-static const IntrinsicData* getIntrinsicWithoutChain(unsigned IntNo) {
+static const IntrinsicData* getIntrinsicWithoutChain(uint16_t IntNo) {
IntrinsicData IntrinsicToFind = { IntNo, INTR_NO_TYPE, 0, 0 };
const IntrinsicData *Data = std::lower_bound(std::begin(IntrinsicsWithoutChain),
std::end(IntrinsicsWithoutChain),
@@ -2093,96 +2003,6 @@ static void verifyIntrinsicTables() {
std::end(IntrinsicsWithChain)) &&
"Intrinsic data tables should have unique entries");
}
-
-// X86 specific compare constants.
-// They must be kept in synch with avxintrin.h
-#define _X86_CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
-#define _X86_CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
-#define _X86_CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
-#define _X86_CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
-#define _X86_CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
-#define _X86_CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
-#define _X86_CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
-#define _X86_CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
-#define _X86_CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
-#define _X86_CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
-#define _X86_CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
-#define _X86_CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
-#define _X86_CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
-#define _X86_CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
-#define _X86_CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
-#define _X86_CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
-#define _X86_CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
-#define _X86_CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
-#define _X86_CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
-#define _X86_CMP_UNORD_S 0x13 /* Unordered (signaling) */
-#define _X86_CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
-#define _X86_CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
-#define _X86_CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
-#define _X86_CMP_ORD_S 0x17 /* Ordered (signaling) */
-#define _X86_CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
-#define _X86_CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
-#define _X86_CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
-#define _X86_CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
-#define _X86_CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
-#define _X86_CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
-#define _X86_CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
-#define _X86_CMP_TRUE_US 0x1f /* True (unordered, signaling) */
-
-/*
-* Get comparison modifier from _mm_comi_round_sd/ss intrinsic
-* Return tuple <isOrdered, X86 condcode>
-*/
-static std::tuple<bool,unsigned> TranslateX86ConstCondToX86CC(SDValue &imm) {
- ConstantSDNode *CImm = dyn_cast<ConstantSDNode>(imm);
- unsigned IntImm = CImm->getZExtValue();
- // On a floating point condition, the flags are set as follows:
- // ZF PF CF op
- // 0 | 0 | 0 | X > Y
- // 0 | 0 | 1 | X < Y
- // 1 | 0 | 0 | X == Y
- // 1 | 1 | 1 | unordered
- switch (IntImm) {
- default: llvm_unreachable("Invalid floating point compare value for Comi!");
- case _X86_CMP_EQ_OQ: // 0x00 - Equal (ordered, nonsignaling)
- case _X86_CMP_EQ_OS: // 0x10 - Equal (ordered, signaling)
- return std::make_tuple(true, X86::COND_E);
- case _X86_CMP_EQ_UQ: // 0x08 - Equal (unordered, non-signaling)
- case _X86_CMP_EQ_US: // 0x18 - Equal (unordered, signaling)
- return std::make_tuple(false , X86::COND_E);
- case _X86_CMP_LT_OS: // 0x01 - Less-than (ordered, signaling)
- case _X86_CMP_LT_OQ: // 0x11 - Less-than (ordered, nonsignaling)
- return std::make_tuple(true, X86::COND_B);
- case _X86_CMP_NGE_US: // 0x09 - Not-greater-than-or-equal (unordered, signaling)
- case _X86_CMP_NGE_UQ: // 0x19 - Not-greater-than-or-equal (unordered, nonsignaling)
- return std::make_tuple(false , X86::COND_B);
- case _X86_CMP_LE_OS: // 0x02 - Less-than-or-equal (ordered, signaling)
- case _X86_CMP_LE_OQ: // 0x12 - Less-than-or-equal (ordered, nonsignaling)
- return std::make_tuple(true, X86::COND_BE);
- case _X86_CMP_NGT_US: // 0x0A - Not-greater-than (unordered, signaling)
- case _X86_CMP_NGT_UQ: // 0x1A - Not-greater-than (unordered, nonsignaling)
- return std::make_tuple(false, X86::COND_BE);
- case _X86_CMP_GT_OS: // 0x0E - Greater-than (ordered, signaling)
- case _X86_CMP_GT_OQ: // 0x1E - Greater-than (ordered, nonsignaling)
- return std::make_tuple(true, X86::COND_A);
- case _X86_CMP_NLE_US: // 0x06 - Not-less-than-or-equal (unordered,signaling)
- case _X86_CMP_NLE_UQ: // 0x16 - Not-less-than-or-equal (unordered, nonsignaling)
- return std::make_tuple(false, X86::COND_A);
- case _X86_CMP_GE_OS: // 0x0D - Greater-than-or-equal (ordered, signaling)
- case _X86_CMP_GE_OQ: // 0x1D - Greater-than-or-equal (ordered, nonsignaling)
- return std::make_tuple(true, X86::COND_AE);
- case _X86_CMP_NLT_US: // 0x05 - Not-less-than (unordered, signaling)
- case _X86_CMP_NLT_UQ: // 0x15 - Not-less-than (unordered, nonsignaling)
- return std::make_tuple(false, X86::COND_AE);
- case _X86_CMP_NEQ_OQ: // 0x0C - Not-equal (ordered, non-signaling)
- case _X86_CMP_NEQ_OS: // 0x1C - Not-equal (ordered, signaling)
- return std::make_tuple(true, X86::COND_NE);
- case _X86_CMP_NEQ_UQ: // 0x04 - Not-equal (unordered, nonsignaling)
- case _X86_CMP_NEQ_US: // 0x14 - Not-equal (unordered, signaling)
- return std::make_tuple(false, X86::COND_NE);
- }
-}
-
} // End llvm namespace
#endif
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index e1ca558f0f2c..906e3427b2ff 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -20,6 +20,7 @@
#include "Utils/X86ShuffleDecode.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineOperand.h"
@@ -35,9 +36,15 @@
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+
using namespace llvm;
namespace {
@@ -72,47 +79,33 @@ private:
static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
const MCSubtargetInfo &STI);
-namespace llvm {
- X86AsmPrinter::StackMapShadowTracker::StackMapShadowTracker(TargetMachine &TM)
- : TM(TM), InShadow(false), RequiredShadowSize(0), CurrentShadowSize(0) {}
-
- X86AsmPrinter::StackMapShadowTracker::~StackMapShadowTracker() {}
-
- void
- X86AsmPrinter::StackMapShadowTracker::startFunction(MachineFunction &F) {
- MF = &F;
- CodeEmitter.reset(TM.getTarget().createMCCodeEmitter(
- *MF->getSubtarget().getInstrInfo(),
- *MF->getSubtarget().getRegisterInfo(), MF->getContext()));
- }
-
- void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst,
- const MCSubtargetInfo &STI) {
- if (InShadow) {
- SmallString<256> Code;
- SmallVector<MCFixup, 4> Fixups;
- raw_svector_ostream VecOS(Code);
- CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI);
- CurrentShadowSize += Code.size();
- if (CurrentShadowSize >= RequiredShadowSize)
- InShadow = false; // The shadow is big enough. Stop counting.
- }
+void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst,
+ const MCSubtargetInfo &STI,
+ MCCodeEmitter *CodeEmitter) {
+ if (InShadow) {
+ SmallString<256> Code;
+ SmallVector<MCFixup, 4> Fixups;
+ raw_svector_ostream VecOS(Code);
+ CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI);
+ CurrentShadowSize += Code.size();
+ if (CurrentShadowSize >= RequiredShadowSize)
+ InShadow = false; // The shadow is big enough. Stop counting.
}
+}
- void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
+void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
MCStreamer &OutStreamer, const MCSubtargetInfo &STI) {
- if (InShadow && CurrentShadowSize < RequiredShadowSize) {
- InShadow = false;
- EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
- MF->getSubtarget<X86Subtarget>().is64Bit(), STI);
- }
+ if (InShadow && CurrentShadowSize < RequiredShadowSize) {
+ InShadow = false;
+ EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
+ MF->getSubtarget<X86Subtarget>().is64Bit(), STI);
}
+}
- void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
- OutStreamer->EmitInstruction(Inst, getSubtargetInfo());
- SMShadowTracker.count(Inst, getSubtargetInfo());
- }
-} // end llvm namespace
+void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
+ OutStreamer->EmitInstruction(Inst, getSubtargetInfo());
+ SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get());
+}
X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
X86AsmPrinter &asmprinter)
@@ -140,12 +133,8 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
// Handle dllimport linkage.
Name += "__imp_";
break;
- case X86II::MO_DARWIN_STUB:
- Suffix = "$stub";
- break;
case X86II::MO_DARWIN_NONLAZY:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
Suffix = "$non_lazy_ptr";
break;
}
@@ -153,8 +142,6 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
if (!Suffix.empty())
Name += DL.getPrivateGlobalPrefix();
- unsigned PrefixLen = Name.size();
-
if (MO.isGlobal()) {
const GlobalValue *GV = MO.getGlobal();
AsmPrinter.getNameWithPrefix(Name, GV);
@@ -164,14 +151,11 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
assert(Suffix.empty());
Sym = MO.getMBB()->getSymbol();
}
- unsigned OrigLen = Name.size() - PrefixLen;
Name += Suffix;
if (!Sym)
Sym = Ctx.getOrCreateSymbol(Name);
- StringRef OrigName = StringRef(Name).substr(PrefixLen, OrigLen);
-
// If the target flags on the operand changes the name of the symbol, do that
// before we return the symbol.
switch (MO.getTargetFlags()) {
@@ -189,36 +173,6 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
}
break;
}
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: {
- MachineModuleInfoImpl::StubValueTy &StubSym =
- getMachOMMI().getHiddenGVStubEntry(Sym);
- if (!StubSym.getPointer()) {
- assert(MO.isGlobal() && "Extern symbol not handled yet");
- StubSym =
- MachineModuleInfoImpl::
- StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
- !MO.getGlobal()->hasInternalLinkage());
- }
- break;
- }
- case X86II::MO_DARWIN_STUB: {
- MachineModuleInfoImpl::StubValueTy &StubSym =
- getMachOMMI().getFnStubEntry(Sym);
- if (StubSym.getPointer())
- return Sym;
-
- if (MO.isGlobal()) {
- StubSym =
- MachineModuleInfoImpl::
- StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()),
- !MO.getGlobal()->hasInternalLinkage());
- } else {
- StubSym =
- MachineModuleInfoImpl::
- StubValueTy(Ctx.getOrCreateSymbol(OrigName), false);
- }
- break;
- }
}
return Sym;
@@ -237,7 +191,6 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
// These affect the name of the symbol, not any suffix.
case X86II::MO_DARWIN_NONLAZY:
case X86II::MO_DLLIMPORT:
- case X86II::MO_DARWIN_STUB:
break;
case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break;
@@ -265,14 +218,13 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break;
case X86II::MO_PIC_BASE_OFFSET:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
Expr = MCSymbolRefExpr::create(Sym, Ctx);
// Subtract the pic base.
Expr = MCBinaryExpr::createSub(Expr,
MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx),
Ctx);
if (MO.isJTI()) {
- assert(MAI.doesSetDirectiveSuppressesReloc());
+ assert(MAI.doesSetDirectiveSuppressReloc());
// If .set directive is supported, use it to reduce the number of
// relocations the assembler will generate for differences between
// local labels. This is only safe when the symbols are in the same
@@ -653,50 +605,81 @@ ReSimplify:
// MOV64ao8, MOV64o8a
// XCHG16ar, XCHG32ar, XCHG64ar
case X86::MOV8mr_NOREX:
- case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o32a); break;
+ case X86::MOV8mr:
case X86::MOV8rm_NOREX:
- case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao32); break;
- case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o32a); break;
- case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao32); break;
- case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break;
- case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break;
-
- case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break;
- case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break;
- case X86::ADC32ri: SimplifyShortImmForm(OutMI, X86::ADC32i32); break;
- case X86::ADC64ri32: SimplifyShortImmForm(OutMI, X86::ADC64i32); break;
- case X86::ADD8ri: SimplifyShortImmForm(OutMI, X86::ADD8i8); break;
- case X86::ADD16ri: SimplifyShortImmForm(OutMI, X86::ADD16i16); break;
- case X86::ADD32ri: SimplifyShortImmForm(OutMI, X86::ADD32i32); break;
- case X86::ADD64ri32: SimplifyShortImmForm(OutMI, X86::ADD64i32); break;
- case X86::AND8ri: SimplifyShortImmForm(OutMI, X86::AND8i8); break;
- case X86::AND16ri: SimplifyShortImmForm(OutMI, X86::AND16i16); break;
- case X86::AND32ri: SimplifyShortImmForm(OutMI, X86::AND32i32); break;
- case X86::AND64ri32: SimplifyShortImmForm(OutMI, X86::AND64i32); break;
- case X86::CMP8ri: SimplifyShortImmForm(OutMI, X86::CMP8i8); break;
- case X86::CMP16ri: SimplifyShortImmForm(OutMI, X86::CMP16i16); break;
- case X86::CMP32ri: SimplifyShortImmForm(OutMI, X86::CMP32i32); break;
- case X86::CMP64ri32: SimplifyShortImmForm(OutMI, X86::CMP64i32); break;
- case X86::OR8ri: SimplifyShortImmForm(OutMI, X86::OR8i8); break;
- case X86::OR16ri: SimplifyShortImmForm(OutMI, X86::OR16i16); break;
- case X86::OR32ri: SimplifyShortImmForm(OutMI, X86::OR32i32); break;
- case X86::OR64ri32: SimplifyShortImmForm(OutMI, X86::OR64i32); break;
- case X86::SBB8ri: SimplifyShortImmForm(OutMI, X86::SBB8i8); break;
- case X86::SBB16ri: SimplifyShortImmForm(OutMI, X86::SBB16i16); break;
- case X86::SBB32ri: SimplifyShortImmForm(OutMI, X86::SBB32i32); break;
- case X86::SBB64ri32: SimplifyShortImmForm(OutMI, X86::SBB64i32); break;
- case X86::SUB8ri: SimplifyShortImmForm(OutMI, X86::SUB8i8); break;
- case X86::SUB16ri: SimplifyShortImmForm(OutMI, X86::SUB16i16); break;
- case X86::SUB32ri: SimplifyShortImmForm(OutMI, X86::SUB32i32); break;
- case X86::SUB64ri32: SimplifyShortImmForm(OutMI, X86::SUB64i32); break;
- case X86::TEST8ri: SimplifyShortImmForm(OutMI, X86::TEST8i8); break;
- case X86::TEST16ri: SimplifyShortImmForm(OutMI, X86::TEST16i16); break;
- case X86::TEST32ri: SimplifyShortImmForm(OutMI, X86::TEST32i32); break;
- case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break;
- case X86::XOR8ri: SimplifyShortImmForm(OutMI, X86::XOR8i8); break;
- case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break;
- case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break;
- case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break;
+ case X86::MOV8rm:
+ case X86::MOV16mr:
+ case X86::MOV16rm:
+ case X86::MOV32mr:
+ case X86::MOV32rm: {
+ unsigned NewOpc;
+ switch (OutMI.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::MOV8mr_NOREX:
+ case X86::MOV8mr: NewOpc = X86::MOV8o32a; break;
+ case X86::MOV8rm_NOREX:
+ case X86::MOV8rm: NewOpc = X86::MOV8ao32; break;
+ case X86::MOV16mr: NewOpc = X86::MOV16o32a; break;
+ case X86::MOV16rm: NewOpc = X86::MOV16ao32; break;
+ case X86::MOV32mr: NewOpc = X86::MOV32o32a; break;
+ case X86::MOV32rm: NewOpc = X86::MOV32ao32; break;
+ }
+ SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc);
+ break;
+ }
+
+ case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32:
+ case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32:
+ case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32:
+ case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32:
+ case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32:
+ case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32:
+ case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32:
+ case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32:
+ case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: {
+ unsigned NewOpc;
+ switch (OutMI.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::ADC8ri: NewOpc = X86::ADC8i8; break;
+ case X86::ADC16ri: NewOpc = X86::ADC16i16; break;
+ case X86::ADC32ri: NewOpc = X86::ADC32i32; break;
+ case X86::ADC64ri32: NewOpc = X86::ADC64i32; break;
+ case X86::ADD8ri: NewOpc = X86::ADD8i8; break;
+ case X86::ADD16ri: NewOpc = X86::ADD16i16; break;
+ case X86::ADD32ri: NewOpc = X86::ADD32i32; break;
+ case X86::ADD64ri32: NewOpc = X86::ADD64i32; break;
+ case X86::AND8ri: NewOpc = X86::AND8i8; break;
+ case X86::AND16ri: NewOpc = X86::AND16i16; break;
+ case X86::AND32ri: NewOpc = X86::AND32i32; break;
+ case X86::AND64ri32: NewOpc = X86::AND64i32; break;
+ case X86::CMP8ri: NewOpc = X86::CMP8i8; break;
+ case X86::CMP16ri: NewOpc = X86::CMP16i16; break;
+ case X86::CMP32ri: NewOpc = X86::CMP32i32; break;
+ case X86::CMP64ri32: NewOpc = X86::CMP64i32; break;
+ case X86::OR8ri: NewOpc = X86::OR8i8; break;
+ case X86::OR16ri: NewOpc = X86::OR16i16; break;
+ case X86::OR32ri: NewOpc = X86::OR32i32; break;
+ case X86::OR64ri32: NewOpc = X86::OR64i32; break;
+ case X86::SBB8ri: NewOpc = X86::SBB8i8; break;
+ case X86::SBB16ri: NewOpc = X86::SBB16i16; break;
+ case X86::SBB32ri: NewOpc = X86::SBB32i32; break;
+ case X86::SBB64ri32: NewOpc = X86::SBB64i32; break;
+ case X86::SUB8ri: NewOpc = X86::SUB8i8; break;
+ case X86::SUB16ri: NewOpc = X86::SUB16i16; break;
+ case X86::SUB32ri: NewOpc = X86::SUB32i32; break;
+ case X86::SUB64ri32: NewOpc = X86::SUB64i32; break;
+ case X86::TEST8ri: NewOpc = X86::TEST8i8; break;
+ case X86::TEST16ri: NewOpc = X86::TEST16i16; break;
+ case X86::TEST32ri: NewOpc = X86::TEST32i32; break;
+ case X86::TEST64ri32: NewOpc = X86::TEST64i32; break;
+ case X86::XOR8ri: NewOpc = X86::XOR8i8; break;
+ case X86::XOR16ri: NewOpc = X86::XOR16i16; break;
+ case X86::XOR32ri: NewOpc = X86::XOR32i32; break;
+ case X86::XOR64ri32: NewOpc = X86::XOR64i32; break;
+ }
+ SimplifyShortImmForm(OutMI, NewOpc);
+ break;
+ }
// Try to shrink some forms of movsx.
case X86::MOVSX16rr8:
@@ -785,55 +768,77 @@ void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering,
.addExpr(tlsRef));
}
-/// \brief Emit the optimal amount of multi-byte nops on X86.
-static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, const MCSubtargetInfo &STI) {
+/// \brief Emit the largest nop instruction smaller than or equal to \p NumBytes
+/// bytes. Return the size of nop emitted.
+static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
+ const MCSubtargetInfo &STI) {
// This works only for 64bit. For 32bit we have to do additional checking if
// the CPU supports multi-byte nops.
assert(Is64Bit && "EmitNops only supports X86-64");
- while (NumBytes) {
- unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
- Opc = IndexReg = Displacement = SegmentReg = 0;
- BaseReg = X86::RAX; ScaleVal = 1;
- switch (NumBytes) {
- case 0: llvm_unreachable("Zero nops?"); break;
- case 1: NumBytes -= 1; Opc = X86::NOOP; break;
- case 2: NumBytes -= 2; Opc = X86::XCHG16ar; break;
- case 3: NumBytes -= 3; Opc = X86::NOOPL; break;
- case 4: NumBytes -= 4; Opc = X86::NOOPL; Displacement = 8; break;
- case 5: NumBytes -= 5; Opc = X86::NOOPL; Displacement = 8;
- IndexReg = X86::RAX; break;
- case 6: NumBytes -= 6; Opc = X86::NOOPW; Displacement = 8;
- IndexReg = X86::RAX; break;
- case 7: NumBytes -= 7; Opc = X86::NOOPL; Displacement = 512; break;
- case 8: NumBytes -= 8; Opc = X86::NOOPL; Displacement = 512;
- IndexReg = X86::RAX; break;
- case 9: NumBytes -= 9; Opc = X86::NOOPW; Displacement = 512;
- IndexReg = X86::RAX; break;
- default: NumBytes -= 10; Opc = X86::NOOPW; Displacement = 512;
- IndexReg = X86::RAX; SegmentReg = X86::CS; break;
- }
- unsigned NumPrefixes = std::min(NumBytes, 5U);
- NumBytes -= NumPrefixes;
- for (unsigned i = 0; i != NumPrefixes; ++i)
- OS.EmitBytes("\x66");
+ unsigned NopSize;
+ unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
+ Opc = IndexReg = Displacement = SegmentReg = 0;
+ BaseReg = X86::RAX;
+ ScaleVal = 1;
+ switch (NumBytes) {
+ case 0: llvm_unreachable("Zero nops?"); break;
+ case 1: NopSize = 1; Opc = X86::NOOP; break;
+ case 2: NopSize = 2; Opc = X86::XCHG16ar; break;
+ case 3: NopSize = 3; Opc = X86::NOOPL; break;
+ case 4: NopSize = 4; Opc = X86::NOOPL; Displacement = 8; break;
+ case 5: NopSize = 5; Opc = X86::NOOPL; Displacement = 8;
+ IndexReg = X86::RAX; break;
+ case 6: NopSize = 6; Opc = X86::NOOPW; Displacement = 8;
+ IndexReg = X86::RAX; break;
+ case 7: NopSize = 7; Opc = X86::NOOPL; Displacement = 512; break;
+ case 8: NopSize = 8; Opc = X86::NOOPL; Displacement = 512;
+ IndexReg = X86::RAX; break;
+ case 9: NopSize = 9; Opc = X86::NOOPW; Displacement = 512;
+ IndexReg = X86::RAX; break;
+ default: NopSize = 10; Opc = X86::NOOPW; Displacement = 512;
+ IndexReg = X86::RAX; SegmentReg = X86::CS; break;
+ }
- switch (Opc) {
- default: llvm_unreachable("Unexpected opcode"); break;
- case X86::NOOP:
- OS.EmitInstruction(MCInstBuilder(Opc), STI);
- break;
- case X86::XCHG16ar:
- OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI);
- break;
- case X86::NOOPL:
- case X86::NOOPW:
- OS.EmitInstruction(MCInstBuilder(Opc).addReg(BaseReg)
- .addImm(ScaleVal).addReg(IndexReg)
- .addImm(Displacement).addReg(SegmentReg), STI);
- break;
- }
- } // while (NumBytes)
+ unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U);
+ NopSize += NumPrefixes;
+ for (unsigned i = 0; i != NumPrefixes; ++i)
+ OS.EmitBytes("\x66");
+
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ break;
+ case X86::NOOP:
+ OS.EmitInstruction(MCInstBuilder(Opc), STI);
+ break;
+ case X86::XCHG16ar:
+ OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI);
+ break;
+ case X86::NOOPL:
+ case X86::NOOPW:
+ OS.EmitInstruction(MCInstBuilder(Opc)
+ .addReg(BaseReg)
+ .addImm(ScaleVal)
+ .addReg(IndexReg)
+ .addImm(Displacement)
+ .addReg(SegmentReg),
+ STI);
+ break;
+ }
+ assert(NopSize <= NumBytes && "We overemitted?");
+ return NopSize;
+}
+
+/// \brief Emit the optimal amount of multi-byte nops on X86.
+static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit,
+ const MCSubtargetInfo &STI) {
+ unsigned NopsToEmit = NumBytes;
+ (void)NopsToEmit;
+ while (NumBytes) {
+ NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI);
+ assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!");
+ }
}
void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
@@ -891,10 +896,10 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI,
X86MCInstLower &MCIL) {
- // FAULTING_LOAD_OP <def>, <handler label>, <load opcode>, <load operands>
+ // FAULTING_LOAD_OP <def>, <MBB handler>, <load opcode>, <load operands>
unsigned LoadDefRegister = MI.getOperand(0).getReg();
- MCSymbol *HandlerLabel = MI.getOperand(1).getMCSymbol();
+ MCSymbol *HandlerLabel = MI.getOperand(1).getMBB()->getSymbol();
unsigned LoadOpcode = MI.getOperand(2).getImm();
unsigned LoadOperandsBeginIdx = 3;
@@ -915,6 +920,43 @@ void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI,
OutStreamer->EmitInstruction(LoadMI, getSubtargetInfo());
}
+void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ // PATCHABLE_OP minsize, opcode, operands
+
+ unsigned MinSize = MI.getOperand(0).getImm();
+ unsigned Opcode = MI.getOperand(1).getImm();
+
+ MCInst MCI;
+ MCI.setOpcode(Opcode);
+ for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end()))
+ if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
+ MCI.addOperand(MaybeOperand.getValue());
+
+ SmallString<256> Code;
+ SmallVector<MCFixup, 4> Fixups;
+ raw_svector_ostream VecOS(Code);
+ CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo());
+
+ if (Code.size() < MinSize) {
+ if (MinSize == 2 && Opcode == X86::PUSH64r) {
+ // This is an optimization that lets us get away without emitting a nop in
+ // many cases.
+ //
+ // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %R9) takes two
+ // bytes too, so the check on MinSize is important.
+ MCI.setOpcode(X86::PUSH64rmr);
+ } else {
+ unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(),
+ getSubtargetInfo());
+ assert(NopSize == MinSize && "Could not implement MinSize!");
+ (void) NopSize;
+ }
+ }
+
+ OutStreamer->EmitInstruction(MCI, getSubtargetInfo());
+}
+
// Lower a stackmap of the form:
// <id>, <shadowBytes>, ...
void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) {
@@ -982,14 +1024,107 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
getSubtargetInfo());
}
+void X86AsmPrinter::recordSled(MCSymbol *Sled, const MachineInstr &MI,
+ SledKind Kind) {
+ auto Fn = MI.getParent()->getParent()->getFunction();
+ auto Attr = Fn->getFnAttribute("function-instrument");
+ bool AlwaysInstrument =
+ Attr.isStringAttribute() && Attr.getValueAsString() == "xray-always";
+ Sleds.emplace_back(
+ XRayFunctionEntry{Sled, CurrentFnSym, Kind, AlwaysInstrument, Fn});
+}
+
+void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ // We want to emit the following pattern:
+ //
+ // .Lxray_sled_N:
+ // .palign 2, ...
+ // jmp .tmpN
+ // # 9 bytes worth of noops
+ // .tmpN
+ //
+ // We need the 9 bytes because at runtime, we'd be patching over the full 11
+ // bytes with the following pattern:
+ //
+ // mov %r10, <function id, 32-bit> // 6 bytes
+ // call <relative offset, 32-bits> // 5 bytes
+ //
+ auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
+ OutStreamer->EmitLabel(CurSled);
+ OutStreamer->EmitCodeAlignment(4);
+ auto Target = OutContext.createTempSymbol();
+
+ // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
+ // an operand (computed as an offset from the jmp instruction).
+ // FIXME: Find another less hacky way do force the relative jump.
+ OutStreamer->EmitBytes("\xeb\x09");
+ EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo());
+ OutStreamer->EmitLabel(Target);
+ recordSled(CurSled, MI, SledKind::FUNCTION_ENTER);
+}
+
+void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ // Since PATCHABLE_RET takes the opcode of the return statement as an
+ // argument, we use that to emit the correct form of the RET that we want.
+ // i.e. when we see this:
+ //
+ // PATCHABLE_RET X86::RET ...
+ //
+ // We should emit the RET followed by sleds.
+ //
+ // .Lxray_sled_N:
+ // ret # or equivalent instruction
+ // # 10 bytes worth of noops
+ //
+ // This just makes sure that the alignment for the next instruction is 2.
+ auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
+ OutStreamer->EmitLabel(CurSled);
+ unsigned OpCode = MI.getOperand(0).getImm();
+ MCInst Ret;
+ Ret.setOpcode(OpCode);
+ for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end()))
+ if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO))
+ Ret.addOperand(MaybeOperand.getValue());
+ OutStreamer->EmitInstruction(Ret, getSubtargetInfo());
+ EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo());
+ recordSled(CurSled, MI, SledKind::FUNCTION_EXIT);
+}
+
+void X86AsmPrinter::EmitXRayTable() {
+ if (Sleds.empty())
+ return;
+ if (Subtarget->isTargetELF()) {
+ auto *Section = OutContext.getELFSection(
+ "xray_instr_map", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_GROUP | ELF::SHF_MERGE, 0,
+ CurrentFnSym->getName());
+ auto PrevSection = OutStreamer->getCurrentSectionOnly();
+ OutStreamer->SwitchSection(Section);
+ for (const auto &Sled : Sleds) {
+ OutStreamer->EmitSymbolValue(Sled.Sled, 8);
+ OutStreamer->EmitSymbolValue(CurrentFnSym, 8);
+ auto Kind = static_cast<uint8_t>(Sled.Kind);
+ OutStreamer->EmitBytes(
+ StringRef(reinterpret_cast<const char *>(&Kind), 1));
+ OutStreamer->EmitBytes(
+ StringRef(reinterpret_cast<const char *>(&Sled.AlwaysInstrument), 1));
+ OutStreamer->EmitZeros(14);
+ }
+ OutStreamer->SwitchSection(PrevSection);
+ }
+ Sleds.clear();
+}
+
// Returns instruction preceding MBBI in MachineFunction.
// If MBBI is the first instruction of the first basic block, returns null.
static MachineBasicBlock::const_iterator
PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) {
const MachineBasicBlock *MBB = MBBI->getParent();
while (MBBI == MBB->begin()) {
- if (MBB == MBB->getParent()->begin())
- return nullptr;
+ if (MBB == &MBB->getParent()->front())
+ return MachineBasicBlock::const_iterator();
MBB = MBB->getPrevNode();
MBBI = MBB->end();
}
@@ -1018,7 +1153,8 @@ static const Constant *getConstantFromPool(const MachineInstr &MI,
}
static std::string getShuffleComment(const MachineOperand &DstOp,
- const MachineOperand &SrcOp,
+ const MachineOperand &SrcOp1,
+ const MachineOperand &SrcOp2,
ArrayRef<int> Mask) {
std::string Comment;
@@ -1031,40 +1167,51 @@ static std::string getShuffleComment(const MachineOperand &DstOp,
return X86ATTInstPrinter::getRegisterName(RegNum);
};
+ // TODO: Add support for specifying an AVX512 style mask register in the comment.
StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem";
- StringRef SrcName = SrcOp.isReg() ? GetRegisterName(SrcOp.getReg()) : "mem";
+ StringRef Src1Name =
+ SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem";
+ StringRef Src2Name =
+ SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem";
+
+ // One source operand, fix the mask to print all elements in one span.
+ SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end());
+ if (Src1Name == Src2Name)
+ for (int i = 0, e = ShuffleMask.size(); i != e; ++i)
+ if (ShuffleMask[i] >= e)
+ ShuffleMask[i] -= e;
raw_string_ostream CS(Comment);
CS << DstName << " = ";
- bool NeedComma = false;
- bool InSrc = false;
- for (int M : Mask) {
- // Wrap up any prior entry...
- if (M == SM_SentinelZero && InSrc) {
- InSrc = false;
- CS << "]";
- }
- if (NeedComma)
+ for (int i = 0, e = ShuffleMask.size(); i != e; ++i) {
+ if (i != 0)
CS << ",";
- else
- NeedComma = true;
-
- // Print this shuffle...
- if (M == SM_SentinelZero) {
+ if (ShuffleMask[i] == SM_SentinelZero) {
CS << "zero";
- } else {
- if (!InSrc) {
- InSrc = true;
- CS << SrcName << "[";
- }
- if (M == SM_SentinelUndef)
+ continue;
+ }
+
+ // Otherwise, it must come from src1 or src2. Print the span of elements
+ // that comes from this src.
+ bool isSrc1 = ShuffleMask[i] < (int)e;
+ CS << (isSrc1 ? Src1Name : Src2Name) << '[';
+
+ bool IsFirst = true;
+ while (i != e && ShuffleMask[i] != SM_SentinelZero &&
+ (ShuffleMask[i] < (int)e) == isSrc1) {
+ if (!IsFirst)
+ CS << ',';
+ else
+ IsFirst = false;
+ if (ShuffleMask[i] == SM_SentinelUndef)
CS << "u";
else
- CS << M;
+ CS << ShuffleMask[i] % (int)e;
+ ++i;
}
+ CS << ']';
+ --i; // For loop increments element #.
}
- if (InSrc)
- CS << "]";
CS.flush();
return Comment;
@@ -1202,12 +1349,21 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case TargetOpcode::FAULTING_LOAD_OP:
return LowerFAULTING_LOAD_OP(*MI, MCInstLowering);
+ case TargetOpcode::PATCHABLE_OP:
+ return LowerPATCHABLE_OP(*MI, MCInstLowering);
+
case TargetOpcode::STACKMAP:
return LowerSTACKMAP(*MI);
case TargetOpcode::PATCHPOINT:
return LowerPATCHPOINT(*MI, MCInstLowering);
+ case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
+ return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering);
+
+ case TargetOpcode::PATCHABLE_RET:
+ return LowerPATCHABLE_RET(*MI, MCInstLowering);
+
case X86::MORESTACK_RET:
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
return;
@@ -1254,7 +1410,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case X86::SEH_Epilogue: {
MachineBasicBlock::const_iterator MBBI(MI);
// Check if preceded by a call and emit nop if so.
- for (MBBI = PrevCrossBBInst(MBBI); MBBI; MBBI = PrevCrossBBInst(MBBI)) {
+ for (MBBI = PrevCrossBBInst(MBBI);
+ MBBI != MachineBasicBlock::const_iterator();
+ MBBI = PrevCrossBBInst(MBBI)) {
// Conservatively assume that pseudo instructions don't emit code and keep
// looking for a call. We may emit an unnecessary nop in some cases.
if (!MBBI->isPseudo()) {
@@ -1313,14 +1471,38 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallVector<int, 16> Mask;
DecodePSHUFBMask(C, Mask);
if (!Mask.empty())
- OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, Mask));
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, SrcOp, Mask));
}
break;
}
- case X86::VPERMILPSrm:
+
case X86::VPERMILPDrm:
+ case X86::VPERMILPDYrm:
+ case X86::VPERMILPDZ128rm:
+ case X86::VPERMILPDZ256rm:
+ case X86::VPERMILPDZrm: {
+ if (!OutStreamer->isVerboseAsm())
+ break;
+ assert(MI->getNumOperands() > 5 &&
+ "We should always have at least 5 operands!");
+ const MachineOperand &DstOp = MI->getOperand(0);
+ const MachineOperand &SrcOp = MI->getOperand(1);
+ const MachineOperand &MaskOp = MI->getOperand(5);
+
+ if (auto *C = getConstantFromPool(*MI, MaskOp)) {
+ SmallVector<int, 8> Mask;
+ DecodeVPERMILPMask(C, 64, Mask);
+ if (!Mask.empty())
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, SrcOp, Mask));
+ }
+ break;
+ }
+
+ case X86::VPERMILPSrm:
case X86::VPERMILPSYrm:
- case X86::VPERMILPDYrm: {
+ case X86::VPERMILPSZ128rm:
+ case X86::VPERMILPSZ256rm:
+ case X86::VPERMILPSZrm: {
if (!OutStreamer->isVerboseAsm())
break;
assert(MI->getNumOperands() > 5 &&
@@ -1329,18 +1511,63 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
const MachineOperand &SrcOp = MI->getOperand(1);
const MachineOperand &MaskOp = MI->getOperand(5);
+ if (auto *C = getConstantFromPool(*MI, MaskOp)) {
+ SmallVector<int, 16> Mask;
+ DecodeVPERMILPMask(C, 32, Mask);
+ if (!Mask.empty())
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, SrcOp, Mask));
+ }
+ break;
+ }
+
+ case X86::VPERMIL2PDrm:
+ case X86::VPERMIL2PSrm:
+ case X86::VPERMIL2PDrmY:
+ case X86::VPERMIL2PSrmY: {
+ if (!OutStreamer->isVerboseAsm())
+ break;
+ assert(MI->getNumOperands() > 7 &&
+ "We should always have at least 7 operands!");
+ const MachineOperand &DstOp = MI->getOperand(0);
+ const MachineOperand &SrcOp1 = MI->getOperand(1);
+ const MachineOperand &SrcOp2 = MI->getOperand(2);
+ const MachineOperand &MaskOp = MI->getOperand(6);
+ const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1);
+
+ if (!CtrlOp.isImm())
+ break;
+
unsigned ElSize;
switch (MI->getOpcode()) {
default: llvm_unreachable("Invalid opcode");
- case X86::VPERMILPSrm: case X86::VPERMILPSYrm: ElSize = 32; break;
- case X86::VPERMILPDrm: case X86::VPERMILPDYrm: ElSize = 64; break;
+ case X86::VPERMIL2PSrm: case X86::VPERMIL2PSrmY: ElSize = 32; break;
+ case X86::VPERMIL2PDrm: case X86::VPERMIL2PDrmY: ElSize = 64; break;
+ }
+
+ if (auto *C = getConstantFromPool(*MI, MaskOp)) {
+ SmallVector<int, 16> Mask;
+ DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Mask);
+ if (!Mask.empty())
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp1, SrcOp2, Mask));
}
+ break;
+ }
+
+ case X86::VPPERMrrm: {
+ if (!OutStreamer->isVerboseAsm())
+ break;
+ assert(MI->getNumOperands() > 6 &&
+ "We should always have at least 6 operands!");
+ const MachineOperand &DstOp = MI->getOperand(0);
+ const MachineOperand &SrcOp1 = MI->getOperand(1);
+ const MachineOperand &SrcOp2 = MI->getOperand(2);
+ const MachineOperand &MaskOp = MI->getOperand(6);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
SmallVector<int, 16> Mask;
- DecodeVPERMILPMask(C, ElSize, Mask);
+ DecodeVPPERMMask(C, Mask);
if (!Mask.empty())
- OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp, Mask));
+ OutStreamer->AddComment(getShuffleComment(DstOp, SrcOp1, SrcOp2, Mask));
}
break;
}
@@ -1413,7 +1640,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
CS << CI->getZExtValue();
} else {
// print multi-word constant as (w0,w1)
- auto Val = CI->getValue();
+ const auto &Val = CI->getValue();
CS << "(";
for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
if (i > 0)
@@ -1446,7 +1673,7 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
// is at the end of the shadow.
if (MI->isCall()) {
// Count then size of the call towards the shadow
- SMShadowTracker.count(TmpInst, getSubtargetInfo());
+ SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get());
// Then flush the shadow so that we fill with nops before the call, not
// after it.
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
diff --git a/lib/Target/X86/X86MachineFunctionInfo.h b/lib/Target/X86/X86MachineFunctionInfo.h
index 00515dde5568..d517d82537a7 100644
--- a/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/lib/Target/X86/X86MachineFunctionInfo.h
@@ -17,7 +17,6 @@
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineValueType.h"
-#include <vector>
namespace llvm {
@@ -96,6 +95,12 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// copies.
bool IsSplitCSR = false;
+ /// True if this function uses the red zone.
+ bool UsesRedZone = false;
+
+ /// True if this function has WIN_ALLOCA instructions.
+ bool HasWinAlloca = false;
+
private:
/// ForwardedMustTailRegParms - A list of virtual and physical registers
/// that must be forwarded to every musttail call.
@@ -167,6 +172,12 @@ public:
bool isSplitCSR() const { return IsSplitCSR; }
void setIsSplitCSR(bool s) { IsSplitCSR = s; }
+
+ bool getUsesRedZone() const { return UsesRedZone; }
+ void setUsesRedZone(bool V) { UsesRedZone = V; }
+
+ bool hasWinAlloca() const { return HasWinAlloca; }
+ void setHasWinAlloca(bool v) { HasWinAlloca = v; }
};
} // End llvm namespace
diff --git a/lib/Target/X86/X86OptimizeLEAs.cpp b/lib/Target/X86/X86OptimizeLEAs.cpp
index 45cc0aef1d93..4da0fddda395 100644
--- a/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// This file defines the pass that performs some optimizations with LEA
-// instructions in order to improve code size.
+// instructions in order to improve performance and code size.
// Currently, it does two things:
// 1) If there are two LEA instructions calculating addresses which only differ
// by displacement inside a basic block, one of them is removed.
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
@@ -35,13 +36,186 @@ using namespace llvm;
#define DEBUG_TYPE "x86-optimize-LEAs"
-static cl::opt<bool> EnableX86LEAOpt("enable-x86-lea-opt", cl::Hidden,
- cl::desc("X86: Enable LEA optimizations."),
- cl::init(false));
+static cl::opt<bool>
+ DisableX86LEAOpt("disable-x86-lea-opt", cl::Hidden,
+ cl::desc("X86: Disable LEA optimizations."),
+ cl::init(false));
STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions");
STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed");
+class MemOpKey;
+
+/// \brief Returns a hash table key based on memory operands of \p MI. The
+/// number of the first memory operand of \p MI is specified through \p N.
+static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N);
+
+/// \brief Returns true if two machine operands are identical and they are not
+/// physical registers.
+static inline bool isIdenticalOp(const MachineOperand &MO1,
+ const MachineOperand &MO2);
+
+/// \brief Returns true if two address displacement operands are of the same
+/// type and use the same symbol/index/address regardless of the offset.
+static bool isSimilarDispOp(const MachineOperand &MO1,
+ const MachineOperand &MO2);
+
+/// \brief Returns true if the instruction is LEA.
+static inline bool isLEA(const MachineInstr &MI);
+
+/// A key based on instruction's memory operands.
+class MemOpKey {
+public:
+ MemOpKey(const MachineOperand *Base, const MachineOperand *Scale,
+ const MachineOperand *Index, const MachineOperand *Segment,
+ const MachineOperand *Disp)
+ : Disp(Disp) {
+ Operands[0] = Base;
+ Operands[1] = Scale;
+ Operands[2] = Index;
+ Operands[3] = Segment;
+ }
+
+ bool operator==(const MemOpKey &Other) const {
+ // Addresses' bases, scales, indices and segments must be identical.
+ for (int i = 0; i < 4; ++i)
+ if (!isIdenticalOp(*Operands[i], *Other.Operands[i]))
+ return false;
+
+ // Addresses' displacements don't have to be exactly the same. It only
+ // matters that they use the same symbol/index/address. Immediates' or
+ // offsets' differences will be taken care of during instruction
+ // substitution.
+ return isSimilarDispOp(*Disp, *Other.Disp);
+ }
+
+ // Address' base, scale, index and segment operands.
+ const MachineOperand *Operands[4];
+
+ // Address' displacement operand.
+ const MachineOperand *Disp;
+};
+
+/// Provide DenseMapInfo for MemOpKey.
+namespace llvm {
+template <> struct DenseMapInfo<MemOpKey> {
+ typedef DenseMapInfo<const MachineOperand *> PtrInfo;
+
+ static inline MemOpKey getEmptyKey() {
+ return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
+ PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
+ PtrInfo::getEmptyKey());
+ }
+
+ static inline MemOpKey getTombstoneKey() {
+ return MemOpKey(PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
+ PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
+ PtrInfo::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const MemOpKey &Val) {
+ // Checking any field of MemOpKey is enough to determine if the key is
+ // empty or tombstone.
+ assert(Val.Disp != PtrInfo::getEmptyKey() && "Cannot hash the empty key");
+ assert(Val.Disp != PtrInfo::getTombstoneKey() &&
+ "Cannot hash the tombstone key");
+
+ hash_code Hash = hash_combine(*Val.Operands[0], *Val.Operands[1],
+ *Val.Operands[2], *Val.Operands[3]);
+
+ // If the address displacement is an immediate, it should not affect the
+ // hash so that memory operands which differ only be immediate displacement
+ // would have the same hash. If the address displacement is something else,
+ // we should reflect symbol/index/address in the hash.
+ switch (Val.Disp->getType()) {
+ case MachineOperand::MO_Immediate:
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_JumpTableIndex:
+ Hash = hash_combine(Hash, Val.Disp->getIndex());
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ Hash = hash_combine(Hash, Val.Disp->getSymbolName());
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ Hash = hash_combine(Hash, Val.Disp->getGlobal());
+ break;
+ case MachineOperand::MO_BlockAddress:
+ Hash = hash_combine(Hash, Val.Disp->getBlockAddress());
+ break;
+ case MachineOperand::MO_MCSymbol:
+ Hash = hash_combine(Hash, Val.Disp->getMCSymbol());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ Hash = hash_combine(Hash, Val.Disp->getMBB());
+ break;
+ default:
+ llvm_unreachable("Invalid address displacement operand");
+ }
+
+ return (unsigned)Hash;
+ }
+
+ static bool isEqual(const MemOpKey &LHS, const MemOpKey &RHS) {
+ // Checking any field of MemOpKey is enough to determine if the key is
+ // empty or tombstone.
+ if (RHS.Disp == PtrInfo::getEmptyKey())
+ return LHS.Disp == PtrInfo::getEmptyKey();
+ if (RHS.Disp == PtrInfo::getTombstoneKey())
+ return LHS.Disp == PtrInfo::getTombstoneKey();
+ return LHS == RHS;
+ }
+};
+}
+
+static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
+ assert((isLEA(MI) || MI.mayLoadOrStore()) &&
+ "The instruction must be a LEA, a load or a store");
+ return MemOpKey(&MI.getOperand(N + X86::AddrBaseReg),
+ &MI.getOperand(N + X86::AddrScaleAmt),
+ &MI.getOperand(N + X86::AddrIndexReg),
+ &MI.getOperand(N + X86::AddrSegmentReg),
+ &MI.getOperand(N + X86::AddrDisp));
+}
+
+static inline bool isIdenticalOp(const MachineOperand &MO1,
+ const MachineOperand &MO2) {
+ return MO1.isIdenticalTo(MO2) &&
+ (!MO1.isReg() ||
+ !TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
+}
+
+#ifndef NDEBUG
+static bool isValidDispOp(const MachineOperand &MO) {
+ return MO.isImm() || MO.isCPI() || MO.isJTI() || MO.isSymbol() ||
+ MO.isGlobal() || MO.isBlockAddress() || MO.isMCSymbol() || MO.isMBB();
+}
+#endif
+
+static bool isSimilarDispOp(const MachineOperand &MO1,
+ const MachineOperand &MO2) {
+ assert(isValidDispOp(MO1) && isValidDispOp(MO2) &&
+ "Address displacement operand is not valid");
+ return (MO1.isImm() && MO2.isImm()) ||
+ (MO1.isCPI() && MO2.isCPI() && MO1.getIndex() == MO2.getIndex()) ||
+ (MO1.isJTI() && MO2.isJTI() && MO1.getIndex() == MO2.getIndex()) ||
+ (MO1.isSymbol() && MO2.isSymbol() &&
+ MO1.getSymbolName() == MO2.getSymbolName()) ||
+ (MO1.isGlobal() && MO2.isGlobal() &&
+ MO1.getGlobal() == MO2.getGlobal()) ||
+ (MO1.isBlockAddress() && MO2.isBlockAddress() &&
+ MO1.getBlockAddress() == MO2.getBlockAddress()) ||
+ (MO1.isMCSymbol() && MO2.isMCSymbol() &&
+ MO1.getMCSymbol() == MO2.getMCSymbol()) ||
+ (MO1.isMBB() && MO2.isMBB() && MO1.getMBB() == MO2.getMBB());
+}
+
+static inline bool isLEA(const MachineInstr &MI) {
+ unsigned Opcode = MI.getOpcode();
+ return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
+ Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
+}
+
namespace {
class OptimizeLEAPass : public MachineFunctionPass {
public:
@@ -55,51 +229,43 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
private:
+ typedef DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>> MemOpMap;
+
/// \brief Returns a distance between two instructions inside one basic block.
/// Negative result means, that instructions occur in reverse order.
int calcInstrDist(const MachineInstr &First, const MachineInstr &Last);
/// \brief Choose the best \p LEA instruction from the \p List to replace
/// address calculation in \p MI instruction. Return the address displacement
- /// and the distance between \p MI and the choosen \p LEA in \p AddrDispShift
- /// and \p Dist.
+ /// and the distance between \p MI and the choosen \p BestLEA in
+ /// \p AddrDispShift and \p Dist.
bool chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
- const MachineInstr &MI, MachineInstr *&LEA,
+ const MachineInstr &MI, MachineInstr *&BestLEA,
int64_t &AddrDispShift, int &Dist);
- /// \brief Returns true if two machine operand are identical and they are not
- /// physical registers.
- bool isIdenticalOp(const MachineOperand &MO1, const MachineOperand &MO2);
-
- /// \brief Returns true if the instruction is LEA.
- bool isLEA(const MachineInstr &MI);
+ /// \brief Returns the difference between addresses' displacements of \p MI1
+ /// and \p MI2. The numbers of the first memory operands for the instructions
+ /// are specified through \p N1 and \p N2.
+ int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1,
+ const MachineInstr &MI2, unsigned N2) const;
/// \brief Returns true if the \p Last LEA instruction can be replaced by the
/// \p First. The difference between displacements of the addresses calculated
/// by these LEAs is returned in \p AddrDispShift. It'll be used for proper
/// replacement of the \p Last LEA's uses with the \p First's def register.
bool isReplaceable(const MachineInstr &First, const MachineInstr &Last,
- int64_t &AddrDispShift);
-
- /// \brief Returns true if two instructions have memory operands that only
- /// differ by displacement. The numbers of the first memory operands for both
- /// instructions are specified through \p N1 and \p N2. The address
- /// displacement is returned through AddrDispShift.
- bool isSimilarMemOp(const MachineInstr &MI1, unsigned N1,
- const MachineInstr &MI2, unsigned N2,
- int64_t &AddrDispShift);
+ int64_t &AddrDispShift) const;
/// \brief Find all LEA instructions in the basic block. Also, assign position
/// numbers to all instructions in the basic block to speed up calculation of
/// distance between them.
- void findLEAs(const MachineBasicBlock &MBB,
- SmallVectorImpl<MachineInstr *> &List);
+ void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs);
/// \brief Removes redundant address calculations.
- bool removeRedundantAddrCalc(const SmallVectorImpl<MachineInstr *> &List);
+ bool removeRedundantAddrCalc(MemOpMap &LEAs);
/// \brief Removes LEAs which calculate similar addresses.
- bool removeRedundantLEAs(SmallVectorImpl<MachineInstr *> &List);
+ bool removeRedundantLEAs(MemOpMap &LEAs);
DenseMap<const MachineInstr *, unsigned> InstrPos;
@@ -137,22 +303,20 @@ int OptimizeLEAPass::calcInstrDist(const MachineInstr &First,
// 4) The LEA should be as close to MI as possible, and prior to it if
// possible.
bool OptimizeLEAPass::chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
- const MachineInstr &MI, MachineInstr *&LEA,
+ const MachineInstr &MI,
+ MachineInstr *&BestLEA,
int64_t &AddrDispShift, int &Dist) {
const MachineFunction *MF = MI.getParent()->getParent();
const MCInstrDesc &Desc = MI.getDesc();
- int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags, MI.getOpcode()) +
+ int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) +
X86II::getOperandBias(Desc);
- LEA = nullptr;
+ BestLEA = nullptr;
// Loop over all LEA instructions.
for (auto DefMI : List) {
- int64_t AddrDispShiftTemp = 0;
-
- // Compare instructions memory operands.
- if (!isSimilarMemOp(MI, MemOpNo, *DefMI, 1, AddrDispShiftTemp))
- continue;
+ // Get new address displacement.
+ int64_t AddrDispShiftTemp = getAddrDispShift(MI, MemOpNo, *DefMI, 1);
// Make sure address displacement fits 4 bytes.
if (!isInt<32>(AddrDispShiftTemp))
@@ -174,14 +338,14 @@ bool OptimizeLEAPass::chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
int DistTemp = calcInstrDist(*DefMI, MI);
assert(DistTemp != 0 &&
"The distance between two different instructions cannot be zero");
- if (DistTemp > 0 || LEA == nullptr) {
+ if (DistTemp > 0 || BestLEA == nullptr) {
// Do not update return LEA, if the current one provides a displacement
// which fits in 1 byte, while the new candidate does not.
- if (LEA != nullptr && !isInt<8>(AddrDispShiftTemp) &&
+ if (BestLEA != nullptr && !isInt<8>(AddrDispShiftTemp) &&
isInt<8>(AddrDispShift))
continue;
- LEA = DefMI;
+ BestLEA = DefMI;
AddrDispShift = AddrDispShiftTemp;
Dist = DistTemp;
}
@@ -191,20 +355,28 @@ bool OptimizeLEAPass::chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
break;
}
- return LEA != nullptr;
-}
-
-bool OptimizeLEAPass::isIdenticalOp(const MachineOperand &MO1,
- const MachineOperand &MO2) {
- return MO1.isIdenticalTo(MO2) &&
- (!MO1.isReg() ||
- !TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
+ return BestLEA != nullptr;
}
-bool OptimizeLEAPass::isLEA(const MachineInstr &MI) {
- unsigned Opcode = MI.getOpcode();
- return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
- Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
+// Get the difference between the addresses' displacements of the two
+// instructions \p MI1 and \p MI2. The numbers of the first memory operands are
+// passed through \p N1 and \p N2.
+int64_t OptimizeLEAPass::getAddrDispShift(const MachineInstr &MI1, unsigned N1,
+ const MachineInstr &MI2,
+ unsigned N2) const {
+ const MachineOperand &Op1 = MI1.getOperand(N1 + X86::AddrDisp);
+ const MachineOperand &Op2 = MI2.getOperand(N2 + X86::AddrDisp);
+
+ assert(isSimilarDispOp(Op1, Op2) &&
+ "Address displacement operands are not compatible");
+
+ // After the assert above we can be sure that both operands are of the same
+ // valid type and use the same symbol/index/address, thus displacement shift
+ // calculation is rather simple.
+ if (Op1.isJTI())
+ return 0;
+ return Op1.isImm() ? Op1.getImm() - Op2.getImm()
+ : Op1.getOffset() - Op2.getOffset();
}
// Check that the Last LEA can be replaced by the First LEA. To be so,
@@ -215,13 +387,12 @@ bool OptimizeLEAPass::isLEA(const MachineInstr &MI) {
// register is used only as address base.
bool OptimizeLEAPass::isReplaceable(const MachineInstr &First,
const MachineInstr &Last,
- int64_t &AddrDispShift) {
+ int64_t &AddrDispShift) const {
assert(isLEA(First) && isLEA(Last) &&
"The function works only with LEA instructions");
- // Compare instructions' memory operands.
- if (!isSimilarMemOp(Last, 1, First, 1, AddrDispShift))
- return false;
+ // Get new address displacement.
+ AddrDispShift = getAddrDispShift(Last, 1, First, 1);
// Make sure that LEA def registers belong to the same class. There may be
// instructions (like MOV8mr_NOREX) which allow a limited set of registers to
@@ -239,7 +410,7 @@ bool OptimizeLEAPass::isReplaceable(const MachineInstr &First,
// Get the number of the first memory operand.
const MCInstrDesc &Desc = MI.getDesc();
- int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags, MI.getOpcode());
+ int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
// If the use instruction has no memory operand - the LEA is not
// replaceable.
@@ -270,36 +441,7 @@ bool OptimizeLEAPass::isReplaceable(const MachineInstr &First,
return true;
}
-// Check if MI1 and MI2 have memory operands which represent addresses that
-// differ only by displacement.
-bool OptimizeLEAPass::isSimilarMemOp(const MachineInstr &MI1, unsigned N1,
- const MachineInstr &MI2, unsigned N2,
- int64_t &AddrDispShift) {
- // Address base, scale, index and segment operands must be identical.
- static const int IdenticalOpNums[] = {X86::AddrBaseReg, X86::AddrScaleAmt,
- X86::AddrIndexReg, X86::AddrSegmentReg};
- for (auto &N : IdenticalOpNums)
- if (!isIdenticalOp(MI1.getOperand(N1 + N), MI2.getOperand(N2 + N)))
- return false;
-
- // Address displacement operands may differ by a constant.
- const MachineOperand *Op1 = &MI1.getOperand(N1 + X86::AddrDisp);
- const MachineOperand *Op2 = &MI2.getOperand(N2 + X86::AddrDisp);
- if (!isIdenticalOp(*Op1, *Op2)) {
- if (Op1->isImm() && Op2->isImm())
- AddrDispShift = Op1->getImm() - Op2->getImm();
- else if (Op1->isGlobal() && Op2->isGlobal() &&
- Op1->getGlobal() == Op2->getGlobal())
- AddrDispShift = Op1->getOffset() - Op2->getOffset();
- else
- return false;
- }
-
- return true;
-}
-
-void OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB,
- SmallVectorImpl<MachineInstr *> &List) {
+void OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs) {
unsigned Pos = 0;
for (auto &MI : MBB) {
// Assign the position number to the instruction. Note that we are going to
@@ -310,24 +452,22 @@ void OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB,
InstrPos[&MI] = Pos += 2;
if (isLEA(MI))
- List.push_back(const_cast<MachineInstr *>(&MI));
+ LEAs[getMemOpKey(MI, 1)].push_back(const_cast<MachineInstr *>(&MI));
}
}
// Try to find load and store instructions which recalculate addresses already
// calculated by some LEA and replace their memory operands with its def
// register.
-bool OptimizeLEAPass::removeRedundantAddrCalc(
- const SmallVectorImpl<MachineInstr *> &List) {
+bool OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
bool Changed = false;
- assert(List.size() > 0);
- MachineBasicBlock *MBB = List[0]->getParent();
+ assert(!LEAs.empty());
+ MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent();
// Process all instructions in basic block.
for (auto I = MBB->begin(), E = MBB->end(); I != E;) {
MachineInstr &MI = *I++;
- unsigned Opcode = MI.getOpcode();
// Instruction must be load or store.
if (!MI.mayLoadOrStore())
@@ -335,7 +475,7 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(
// Get the number of the first memory operand.
const MCInstrDesc &Desc = MI.getDesc();
- int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags, Opcode);
+ int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
// If instruction has no memory operand - skip it.
if (MemOpNo < 0)
@@ -347,7 +487,8 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(
MachineInstr *DefMI;
int64_t AddrDispShift;
int Dist;
- if (!chooseBestLEA(List, MI, DefMI, AddrDispShift, Dist))
+ if (!chooseBestLEA(LEAs[getMemOpKey(MI, MemOpNo)], MI, DefMI, AddrDispShift,
+ Dist))
continue;
// If LEA occurs before current instruction, we can freely replace
@@ -362,9 +503,10 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(
InstrPos[DefMI] = InstrPos[&MI] - 1;
// Make sure the instructions' position numbers are sane.
- assert(((InstrPos[DefMI] == 1 && DefMI == MBB->begin()) ||
+ assert(((InstrPos[DefMI] == 1 &&
+ MachineBasicBlock::iterator(DefMI) == MBB->begin()) ||
InstrPos[DefMI] >
- InstrPos[std::prev(MachineBasicBlock::iterator(DefMI))]) &&
+ InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) &&
"Instruction positioning is broken");
}
@@ -393,75 +535,78 @@ bool OptimizeLEAPass::removeRedundantAddrCalc(
}
// Try to find similar LEAs in the list and replace one with another.
-bool
-OptimizeLEAPass::removeRedundantLEAs(SmallVectorImpl<MachineInstr *> &List) {
+bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
bool Changed = false;
- // Loop over all LEA pairs.
- auto I1 = List.begin();
- while (I1 != List.end()) {
- MachineInstr &First = **I1;
- auto I2 = std::next(I1);
- while (I2 != List.end()) {
- MachineInstr &Last = **I2;
- int64_t AddrDispShift;
-
- // LEAs should be in occurence order in the list, so we can freely
- // replace later LEAs with earlier ones.
- assert(calcInstrDist(First, Last) > 0 &&
- "LEAs must be in occurence order in the list");
-
- // Check that the Last LEA instruction can be replaced by the First.
- if (!isReplaceable(First, Last, AddrDispShift)) {
- ++I2;
- continue;
- }
-
- // Loop over all uses of the Last LEA and update their operands. Note that
- // the correctness of this has already been checked in the isReplaceable
- // function.
- for (auto UI = MRI->use_begin(Last.getOperand(0).getReg()),
- UE = MRI->use_end();
- UI != UE;) {
- MachineOperand &MO = *UI++;
- MachineInstr &MI = *MO.getParent();
-
- // Get the number of the first memory operand.
- const MCInstrDesc &Desc = MI.getDesc();
- int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags, MI.getOpcode()) +
- X86II::getOperandBias(Desc);
-
- // Update address base.
- MO.setReg(First.getOperand(0).getReg());
-
- // Update address disp.
- MachineOperand *Op = &MI.getOperand(MemOpNo + X86::AddrDisp);
- if (Op->isImm())
- Op->setImm(Op->getImm() + AddrDispShift);
- else if (Op->isGlobal())
- Op->setOffset(Op->getOffset() + AddrDispShift);
- else
- llvm_unreachable("Invalid address displacement operand");
+ // Loop over all entries in the table.
+ for (auto &E : LEAs) {
+ auto &List = E.second;
+
+ // Loop over all LEA pairs.
+ auto I1 = List.begin();
+ while (I1 != List.end()) {
+ MachineInstr &First = **I1;
+ auto I2 = std::next(I1);
+ while (I2 != List.end()) {
+ MachineInstr &Last = **I2;
+ int64_t AddrDispShift;
+
+ // LEAs should be in occurence order in the list, so we can freely
+ // replace later LEAs with earlier ones.
+ assert(calcInstrDist(First, Last) > 0 &&
+ "LEAs must be in occurence order in the list");
+
+ // Check that the Last LEA instruction can be replaced by the First.
+ if (!isReplaceable(First, Last, AddrDispShift)) {
+ ++I2;
+ continue;
+ }
+
+ // Loop over all uses of the Last LEA and update their operands. Note
+ // that the correctness of this has already been checked in the
+ // isReplaceable function.
+ for (auto UI = MRI->use_begin(Last.getOperand(0).getReg()),
+ UE = MRI->use_end();
+ UI != UE;) {
+ MachineOperand &MO = *UI++;
+ MachineInstr &MI = *MO.getParent();
+
+ // Get the number of the first memory operand.
+ const MCInstrDesc &Desc = MI.getDesc();
+ int MemOpNo =
+ X86II::getMemoryOperandNo(Desc.TSFlags) +
+ X86II::getOperandBias(Desc);
+
+ // Update address base.
+ MO.setReg(First.getOperand(0).getReg());
+
+ // Update address disp.
+ MachineOperand &Op = MI.getOperand(MemOpNo + X86::AddrDisp);
+ if (Op.isImm())
+ Op.setImm(Op.getImm() + AddrDispShift);
+ else if (!Op.isJTI())
+ Op.setOffset(Op.getOffset() + AddrDispShift);
+ }
+
+ // Since we can possibly extend register lifetime, clear kill flags.
+ MRI->clearKillFlags(First.getOperand(0).getReg());
+
+ ++NumRedundantLEAs;
+ DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: "; Last.dump(););
+
+ // By this moment, all of the Last LEA's uses must be replaced. So we
+ // can freely remove it.
+ assert(MRI->use_empty(Last.getOperand(0).getReg()) &&
+ "The LEA's def register must have no uses");
+ Last.eraseFromParent();
+
+ // Erase removed LEA from the list.
+ I2 = List.erase(I2);
+
+ Changed = true;
}
-
- // Since we can possibly extend register lifetime, clear kill flags.
- MRI->clearKillFlags(First.getOperand(0).getReg());
-
- ++NumRedundantLEAs;
- DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: "; Last.dump(););
-
- // By this moment, all of the Last LEA's uses must be replaced. So we can
- // freely remove it.
- assert(MRI->use_empty(Last.getOperand(0).getReg()) &&
- "The LEA's def register must have no uses");
- Last.eraseFromParent();
-
- // Erase removed LEA from the list.
- I2 = List.erase(I2);
-
- Changed = true;
+ ++I1;
}
- ++I1;
}
return Changed;
@@ -470,8 +615,7 @@ OptimizeLEAPass::removeRedundantLEAs(SmallVectorImpl<MachineInstr *> &List) {
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
- // Perform this optimization only if we care about code size.
- if (!EnableX86LEAOpt || !MF.getFunction()->optForSize())
+ if (DisableX86LEAOpt || skipFunction(*MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
@@ -480,7 +624,7 @@ bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
// Process all basic blocks.
for (auto &MBB : MF) {
- SmallVector<MachineInstr *, 16> LEAs;
+ MemOpMap LEAs;
InstrPos.clear();
// Find all LEA instructions in basic block.
@@ -490,13 +634,13 @@ bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
if (LEAs.empty())
continue;
- // Remove redundant LEA instructions. The optimization may have a negative
- // effect on performance, so do it only for -Oz.
- if (MF.getFunction()->optForMinSize())
- Changed |= removeRedundantLEAs(LEAs);
+ // Remove redundant LEA instructions.
+ Changed |= removeRedundantLEAs(LEAs);
- // Remove redundant address calculations.
- Changed |= removeRedundantAddrCalc(LEAs);
+ // Remove redundant address calculations. Do it only for -Os/-Oz since only
+ // a code size gain is expected from this part of the pass.
+ if (MF.getFunction()->optForSize())
+ Changed |= removeRedundantAddrCalc(LEAs);
}
return Changed;
diff --git a/lib/Target/X86/X86PadShortFunction.cpp b/lib/Target/X86/X86PadShortFunction.cpp
index 0f425e28fa7d..62a9aafc2cf3 100644
--- a/lib/Target/X86/X86PadShortFunction.cpp
+++ b/lib/Target/X86/X86PadShortFunction.cpp
@@ -55,6 +55,11 @@ namespace {
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
+
const char *getPassName() const override {
return "X86 Atom pad short functions";
}
@@ -93,6 +98,9 @@ FunctionPass *llvm::createX86PadShortFunctions() {
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(*MF.getFunction()))
+ return false;
+
if (MF.getFunction()->optForSize()) {
return false;
}
@@ -179,13 +187,11 @@ bool PadShortFunc::cyclesUntilReturn(MachineBasicBlock *MBB,
unsigned int CyclesToEnd = 0;
- for (MachineBasicBlock::iterator MBBI = MBB->begin();
- MBBI != MBB->end(); ++MBBI) {
- MachineInstr *MI = MBBI;
+ for (MachineInstr &MI : *MBB) {
// Mark basic blocks with a return instruction. Calls to other
// functions do not count because the called function will be padded,
// if necessary.
- if (MI->isReturn() && !MI->isCall()) {
+ if (MI.isReturn() && !MI.isCall()) {
VisitedBBs[MBB] = VisitedBBInfo(true, CyclesToEnd);
Cycles += CyclesToEnd;
return true;
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 274b56688558..86750633aecc 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -52,7 +52,7 @@ X86RegisterInfo::X86RegisterInfo(const Triple &TT)
X86_MC::getDwarfRegFlavour(TT, false),
X86_MC::getDwarfRegFlavour(TT, true),
(TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
- X86_MC::InitLLVM2SEHRegisterMapping(this);
+ X86_MC::initLLVMToSEHAndCVRegMapping(this);
// Cache some information.
Is64Bit = TT.isArch64Bit();
@@ -162,10 +162,23 @@ X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
case 0: // Normal GPRs.
if (Subtarget.isTarget64BitLP64())
return &X86::GR64RegClass;
+ // If the target is 64bit but we have been told to use 32bit addresses,
+ // we can still use 64-bit register as long as we know the high bits
+ // are zeros.
+ // Reflect that in the returned register class.
+ if (Is64Bit) {
+ // When the target also allows 64-bit frame pointer and we do have a
+ // frame, this is fine to use it for the address accesses as well.
+ const X86FrameLowering *TFI = getFrameLowering(MF);
+ return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
+ ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
+ : &X86::LOW32_ADDR_ACCESSRegClass;
+ }
return &X86::GR32RegClass;
case 1: // Normal GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOSPRegClass;
+ // NOSP does not contain RIP, so no special case here.
return &X86::GR32_NOSPRegClass;
case 2: // NOREX GPRs.
if (Subtarget.isTarget64BitLP64())
@@ -174,6 +187,7 @@ X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOREX_NOSPRegClass;
+ // NOSP does not contain RIP, so no special case here.
return &X86::GR32_NOREX_NOSPRegClass;
case 4: // Available for tailcall (not callee-saved GPRs).
return getGPRsForTailCall(MF);
@@ -280,15 +294,19 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_64_SaveList;
case CallingConv::X86_INTR:
if (Is64Bit) {
+ if (HasAVX512)
+ return CSR_64_AllRegs_AVX512_SaveList;
if (HasAVX)
return CSR_64_AllRegs_AVX_SaveList;
- else
- return CSR_64_AllRegs_SaveList;
+ return CSR_64_AllRegs_SaveList;
} else {
+ if (HasAVX512)
+ return CSR_32_AllRegs_AVX512_SaveList;
+ if (HasAVX)
+ return CSR_32_AllRegs_AVX_SaveList;
if (HasSSE)
return CSR_32_AllRegs_SSE_SaveList;
- else
- return CSR_32_AllRegs_SaveList;
+ return CSR_32_AllRegs_SaveList;
}
default:
break;
@@ -299,6 +317,10 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_Win64_SaveList;
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
+ if (Subtarget.getTargetLowering()->supportSwiftError() &&
+ MF->getFunction()->getAttributes().hasAttrSomewhere(
+ Attribute::SwiftError))
+ return CSR_64_SwiftError_SaveList;
return CSR_64_SaveList;
}
if (CallsEHReturn)
@@ -366,18 +388,22 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
return CSR_64_RegMask;
case CallingConv::X86_INTR:
if (Is64Bit) {
+ if (HasAVX512)
+ return CSR_64_AllRegs_AVX512_RegMask;
if (HasAVX)
return CSR_64_AllRegs_AVX_RegMask;
- else
- return CSR_64_AllRegs_RegMask;
+ return CSR_64_AllRegs_RegMask;
} else {
+ if (HasAVX512)
+ return CSR_32_AllRegs_AVX512_RegMask;
+ if (HasAVX)
+ return CSR_32_AllRegs_AVX_RegMask;
if (HasSSE)
return CSR_32_AllRegs_SSE_RegMask;
- else
- return CSR_32_AllRegs_RegMask;
+ return CSR_32_AllRegs_RegMask;
}
- default:
- break;
+ default:
+ break;
}
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
@@ -385,6 +411,10 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
if (Is64Bit) {
if (IsWin64)
return CSR_Win64_RegMask;
+ if (Subtarget.getTargetLowering()->supportSwiftError() &&
+ MF.getFunction()->getAttributes().hasAttrSomewhere(
+ Attribute::SwiftError))
+ return CSR_64_SwiftError_RegMask;
return CSR_64_RegMask;
}
return CSR_32_RegMask;
diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td
index 56f0d9352d30..373f9b4c65f2 100644
--- a/lib/Target/X86/X86RegisterInfo.td
+++ b/lib/Target/X86/X86RegisterInfo.td
@@ -226,14 +226,14 @@ let SubRegIndices = [sub_ymm] in {
}
// Mask Registers, used by AVX-512 instructions.
-def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, -2, -2]>;
-def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, -2, -2]>;
-def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, -2, -2]>;
-def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, -2, -2]>;
-def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, -2, -2]>;
-def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, -2, -2]>;
-def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, -2, -2]>;
-def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, -2, -2]>;
+def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, 93, 93]>;
+def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, 94, 94]>;
+def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, 95, 95]>;
+def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, 96, 96]>;
+def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, 97, 97]>;
+def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, 98, 98]>;
+def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, 99, 99]>;
+def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, 100, 100]>;
// Floating point stack registers. These don't map one-to-one to the FP
// pseudo registers, but we still mark them as aliasing FP registers. That
@@ -415,6 +415,26 @@ def GR32_NOREX_NOSP : RegisterClass<"X86", [i32], 32,
def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64,
(and GR64_NOREX, GR64_NOSP)>;
+// Register classes used for ABIs that use 32-bit address accesses,
+// while using the whole x84_64 ISA.
+
+// In such cases, it is fine to use RIP as we are sure the 32 high
+// bits are not set. We do not need variants for NOSP as RIP is not
+// allowed there.
+// RIP is not spilled anywhere for now, so stick to 32-bit alignment
+// to save on memory space.
+// FIXME: We could allow all 64bit registers, but we would need
+// something to check that the 32 high bits are not set,
+// which we do not have right now.
+def LOW32_ADDR_ACCESS : RegisterClass<"X86", [i32], 32, (add GR32, RIP)>;
+
+// When RBP is used as a base pointer in a 32-bit addresses environement,
+// this is also safe to use the full register to access addresses.
+// Since RBP will never be spilled, stick to a 32 alignment to save
+// on memory consumption.
+def LOW32_ADDR_ACCESS_RBP : RegisterClass<"X86", [i32], 32,
+ (add LOW32_ADDR_ACCESS, RBP)>;
+
// A class to support the 'A' assembler constraint: EAX then EDX.
def GR32_AD : RegisterClass<"X86", [i32], 32, (add EAX, EDX)>;
@@ -451,6 +471,17 @@ def VR128 : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
def VR256 : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 15)>;
+// Special classes that help the assembly parser choose some alternate
+// instructions to favor 2-byte VEX encodings.
+def VR128L : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
+ 128, (sequence "XMM%u", 0, 7)>;
+def VR128H : RegisterClass<"X86", [v4f32, v2f64, v16i8, v8i16, v4i32, v2i64],
+ 128, (sequence "XMM%u", 8, 15)>;
+def VR256L : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
+ 256, (sequence "YMM%u", 0, 7)>;
+def VR256H : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
+ 256, (sequence "YMM%u", 8, 15)>;
+
// Status flags registers.
def CCR : RegisterClass<"X86", [i32], 32, (add EFLAGS)> {
let CopyCost = -1; // Don't allow copying of status registers.
@@ -477,18 +508,18 @@ def VR256X : RegisterClass<"X86", [v8f32, v4f64, v32i8, v16i16, v8i32, v4i64],
256, (sequence "YMM%u", 0, 31)>;
// Mask registers
-def VK1 : RegisterClass<"X86", [i1], 8, (sequence "K%u", 0, 7)> {let Size = 8;}
-def VK2 : RegisterClass<"X86", [v2i1], 8, (add VK1)> {let Size = 8;}
-def VK4 : RegisterClass<"X86", [v4i1], 8, (add VK2)> {let Size = 8;}
-def VK8 : RegisterClass<"X86", [v8i1], 8, (add VK4)> {let Size = 8;}
+def VK1 : RegisterClass<"X86", [i1], 16, (sequence "K%u", 0, 7)> {let Size = 16;}
+def VK2 : RegisterClass<"X86", [v2i1], 16, (add VK1)> {let Size = 16;}
+def VK4 : RegisterClass<"X86", [v4i1], 16, (add VK2)> {let Size = 16;}
+def VK8 : RegisterClass<"X86", [v8i1], 16, (add VK4)> {let Size = 16;}
def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)> {let Size = 16;}
def VK32 : RegisterClass<"X86", [v32i1], 32, (add VK16)> {let Size = 32;}
def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
-def VK1WM : RegisterClass<"X86", [i1], 8, (sub VK1, K0)> {let Size = 8;}
-def VK2WM : RegisterClass<"X86", [v2i1], 8, (sub VK2, K0)> {let Size = 8;}
-def VK4WM : RegisterClass<"X86", [v4i1], 8, (sub VK4, K0)> {let Size = 8;}
-def VK8WM : RegisterClass<"X86", [v8i1], 8, (sub VK8, K0)> {let Size = 8;}
+def VK1WM : RegisterClass<"X86", [i1], 16, (sub VK1, K0)> {let Size = 16;}
+def VK2WM : RegisterClass<"X86", [v2i1], 16, (sub VK2, K0)> {let Size = 16;}
+def VK4WM : RegisterClass<"X86", [v4i1], 16, (sub VK4, K0)> {let Size = 16;}
+def VK8WM : RegisterClass<"X86", [v8i1], 16, (sub VK8, K0)> {let Size = 16;}
def VK16WM : RegisterClass<"X86", [v16i1], 16, (add VK8WM)> {let Size = 16;}
def VK32WM : RegisterClass<"X86", [v32i1], 32, (add VK16WM)> {let Size = 32;}
def VK64WM : RegisterClass<"X86", [v64i1], 64, (add VK32WM)> {let Size = 64;}
diff --git a/lib/Target/X86/X86Schedule.td b/lib/Target/X86/X86Schedule.td
index a261356afe6a..35257f89100c 100644
--- a/lib/Target/X86/X86Schedule.td
+++ b/lib/Target/X86/X86Schedule.td
@@ -364,6 +364,8 @@ def IIC_SSE_PALIGNRR : InstrItinClass;
def IIC_SSE_PALIGNRM : InstrItinClass;
def IIC_SSE_MWAIT : InstrItinClass;
def IIC_SSE_MONITOR : InstrItinClass;
+def IIC_SSE_MWAITX : InstrItinClass;
+def IIC_SSE_MONITORX : InstrItinClass;
def IIC_SSE_PREFETCH : InstrItinClass;
def IIC_SSE_PAUSE : InstrItinClass;
@@ -633,13 +635,22 @@ def IIC_NOP : InstrItinClass;
// latencies. Since these latencies are not used for pipeline hazards,
// they do not need to be exact.
//
-// The GenericModel contains no instruction itineraries.
-def GenericModel : SchedMachineModel {
+// The GenericX86Model contains no instruction itineraries
+// and disables PostRAScheduler.
+class GenericX86Model : SchedMachineModel {
let IssueWidth = 4;
let MicroOpBufferSize = 32;
let LoadLatency = 4;
let HighLatency = 10;
let PostRAScheduler = 0;
+ let CompleteModel = 0;
+}
+
+def GenericModel : GenericX86Model;
+
+// Define a model with the PostRAScheduler enabled.
+def GenericPostRAModel : GenericX86Model {
+ let PostRAScheduler = 1;
}
include "X86ScheduleAtom.td"
diff --git a/lib/Target/X86/X86ScheduleAtom.td b/lib/Target/X86/X86ScheduleAtom.td
index 4c559c9c1798..a5b440182aa9 100644
--- a/lib/Target/X86/X86ScheduleAtom.td
+++ b/lib/Target/X86/X86ScheduleAtom.td
@@ -544,6 +544,7 @@ def AtomModel : SchedMachineModel {
// simple loops, expand by a small factor to hide the backedge cost.
let LoopMicroOpBufferSize = 10;
let PostRAScheduler = 1;
+ let CompleteModel = 0;
let Itineraries = AtomItineraries;
}
diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp
index b1a01614b4a1..d02859b3dcbd 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
#define DEBUG_TYPE "x86-selectiondag-info"
bool X86SelectionDAGInfo::isBaseRegConflictPossible(
- SelectionDAG &DAG, ArrayRef<unsigned> ClobberSet) const {
+ SelectionDAG &DAG, ArrayRef<MCPhysReg> ClobberSet) const {
// We cannot use TRI->hasBasePointer() until *after* we select all basic
// blocks. Legalization may introduce new stack temporaries with large
// alignment requirements. Fall back to generic code if there are any
@@ -45,7 +45,7 @@ bool X86SelectionDAGInfo::isBaseRegConflictPossible(
}
SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
- SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
@@ -54,8 +54,8 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
#ifndef NDEBUG
// If the base register might conflict with our physical registers, bail out.
- const unsigned ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
- X86::ECX, X86::EAX, X86::EDI};
+ const MCPhysReg ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
+ X86::ECX, X86::EAX, X86::EDI};
assert(!isBaseRegConflictPossible(DAG, ClobberSet));
#endif
@@ -87,8 +87,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl).setChain(Chain)
.setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()),
- DAG.getExternalSymbol(bzeroEntry, IntPtr), std::move(Args),
- 0)
+ DAG.getExternalSymbol(bzeroEntry, IntPtr), std::move(Args))
.setDiscardResult();
std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
@@ -195,7 +194,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
}
SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
- SelectionDAG &DAG, SDLoc dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
// This requires the copy size to be a constant, preferably
@@ -222,8 +221,8 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
return SDValue();
// If the base register might conflict with our physical registers, bail out.
- const unsigned ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
- X86::ECX, X86::ESI, X86::EDI};
+ const MCPhysReg ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
+ X86::ECX, X86::ESI, X86::EDI};
if (isBaseRegConflictPossible(DAG, ClobberSet))
return SDValue();
diff --git a/lib/Target/X86/X86SelectionDAGInfo.h b/lib/Target/X86/X86SelectionDAGInfo.h
index 961bd8c8d5ef..f4a285a5f916 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.h
+++ b/lib/Target/X86/X86SelectionDAGInfo.h
@@ -7,14 +7,15 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the X86 subclass for TargetSelectionDAGInfo.
+// This file defines the X86 subclass for SelectionDAGTargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_X86_X86SELECTIONDAGINFO_H
#define LLVM_LIB_TARGET_X86_X86SELECTIONDAGINFO_H
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
namespace llvm {
@@ -22,27 +23,24 @@ class X86TargetLowering;
class X86TargetMachine;
class X86Subtarget;
-class X86SelectionDAGInfo : public TargetSelectionDAGInfo {
+class X86SelectionDAGInfo : public SelectionDAGTargetInfo {
/// Returns true if it is possible for the base register to conflict with the
/// given set of clobbers for a memory intrinsic.
bool isBaseRegConflictPossible(SelectionDAG &DAG,
- ArrayRef<unsigned> ClobberSet) const;
+ ArrayRef<MCPhysReg> ClobberSet) const;
public:
explicit X86SelectionDAGInfo() = default;
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile,
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const override;
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const override;
};
diff --git a/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp b/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp
index ef16c5bdbfd8..1adc92cfda63 100644
--- a/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp
+++ b/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp
@@ -40,24 +40,43 @@ void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
assert(MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512);
#endif
- // This is a straightforward byte vector.
- if (MaskTy->isVectorTy() && MaskTy->getVectorElementType()->isIntegerTy(8)) {
- int NumElements = MaskTy->getVectorNumElements();
- ShuffleMask.reserve(NumElements);
+ if (!MaskTy->isVectorTy())
+ return;
+ int NumElts = MaskTy->getVectorNumElements();
+
+ Type *EltTy = MaskTy->getVectorElementType();
+ if (!EltTy->isIntegerTy())
+ return;
- for (int i = 0; i < NumElements; ++i) {
+ // The shuffle mask requires a byte vector - decode cases with
+ // wider elements as well.
+ unsigned BitWidth = cast<IntegerType>(EltTy)->getBitWidth();
+ if ((BitWidth % 8) != 0)
+ return;
+
+ int Scale = BitWidth / 8;
+ int NumBytes = NumElts * Scale;
+ ShuffleMask.reserve(NumBytes);
+
+ for (int i = 0; i != NumElts; ++i) {
+ Constant *COp = C->getAggregateElement(i);
+ if (!COp) {
+ ShuffleMask.clear();
+ return;
+ } else if (isa<UndefValue>(COp)) {
+ ShuffleMask.append(Scale, SM_SentinelUndef);
+ continue;
+ }
+
+ APInt APElt = cast<ConstantInt>(COp)->getValue();
+ for (int j = 0; j != Scale; ++j) {
// For AVX vectors with 32 bytes the base of the shuffle is the 16-byte
// lane of the vector we're inside.
- int Base = i & ~0xf;
- Constant *COp = C->getAggregateElement(i);
- if (!COp) {
- ShuffleMask.clear();
- return;
- } else if (isa<UndefValue>(COp)) {
- ShuffleMask.push_back(SM_SentinelUndef);
- continue;
- }
- uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
+ int Base = ((i * Scale) + j) & ~0xf;
+
+ uint64_t Element = APElt.getLoBits(8).getZExtValue();
+ APElt = APElt.lshr(8);
+
// If the high bit (7) of the byte is set, the element is zeroed.
if (Element & (1 << 7))
ShuffleMask.push_back(SM_SentinelZero);
@@ -68,7 +87,8 @@ void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
}
}
}
- // TODO: Handle funny-looking vectors too.
+
+ assert(NumBytes == (int)ShuffleMask.size() && "Unexpected shuffle mask size");
}
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
@@ -84,9 +104,11 @@ void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
// <4 x i32> <i32 -2147483648, i32 -2147483648,
// i32 -2147483648, i32 -2147483648>
- unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
+ if (ElSize != 32 && ElSize != 64)
+ return;
- if (MaskTySize != 128 && MaskTySize != 256) // FIXME: Add support for AVX-512.
+ unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
+ if (MaskTySize != 128 && MaskTySize != 256 && MaskTySize != 512)
return;
// Only support vector types.
@@ -99,14 +121,15 @@ void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
return;
// Support any element type from byte up to element size.
- // This is necesary primarily because 64-bit elements get split to 32-bit
+ // This is necessary primarily because 64-bit elements get split to 32-bit
// in the constant pool on 32-bit target.
unsigned EltTySize = VecEltTy->getIntegerBitWidth();
if (EltTySize < 8 || EltTySize > ElSize)
return;
unsigned NumElements = MaskTySize / ElSize;
- assert((NumElements == 2 || NumElements == 4 || NumElements == 8) &&
+ assert((NumElements == 2 || NumElements == 4 || NumElements == 8 ||
+ NumElements == 16) &&
"Unexpected number of vector elements.");
ShuffleMask.reserve(NumElements);
unsigned NumElementsPerLane = 128 / ElSize;
@@ -133,12 +156,154 @@ void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
// TODO: Handle funny-looking vectors too.
}
+void DecodeVPERMIL2PMask(const Constant *C, unsigned M2Z, unsigned ElSize,
+ SmallVectorImpl<int> &ShuffleMask) {
+ Type *MaskTy = C->getType();
+
+ unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
+ if (MaskTySize != 128 && MaskTySize != 256)
+ return;
+
+ // Only support vector types.
+ if (!MaskTy->isVectorTy())
+ return;
+
+ // Make sure its an integer type.
+ Type *VecEltTy = MaskTy->getVectorElementType();
+ if (!VecEltTy->isIntegerTy())
+ return;
+
+ // Support any element type from byte up to element size.
+ // This is necessary primarily because 64-bit elements get split to 32-bit
+ // in the constant pool on 32-bit target.
+ unsigned EltTySize = VecEltTy->getIntegerBitWidth();
+ if (EltTySize < 8 || EltTySize > ElSize)
+ return;
+
+ unsigned NumElements = MaskTySize / ElSize;
+ assert((NumElements == 2 || NumElements == 4 || NumElements == 8) &&
+ "Unexpected number of vector elements.");
+ ShuffleMask.reserve(NumElements);
+ unsigned NumElementsPerLane = 128 / ElSize;
+ unsigned Factor = ElSize / EltTySize;
+
+ for (unsigned i = 0; i < NumElements; ++i) {
+ Constant *COp = C->getAggregateElement(i * Factor);
+ if (!COp) {
+ ShuffleMask.clear();
+ return;
+ } else if (isa<UndefValue>(COp)) {
+ ShuffleMask.push_back(SM_SentinelUndef);
+ continue;
+ }
+
+ // VPERMIL2 Operation.
+ // Bits[3] - Match Bit.
+ // Bits[2:1] - (Per Lane) PD Shuffle Mask.
+ // Bits[2:0] - (Per Lane) PS Shuffle Mask.
+ uint64_t Selector = cast<ConstantInt>(COp)->getZExtValue();
+ unsigned MatchBit = (Selector >> 3) & 0x1;
+
+ // M2Z[0:1] MatchBit
+ // 0Xb X Source selected by Selector index.
+ // 10b 0 Source selected by Selector index.
+ // 10b 1 Zero.
+ // 11b 0 Zero.
+ // 11b 1 Source selected by Selector index.
+ if ((M2Z & 0x2) != 0u && MatchBit != (M2Z & 0x1)) {
+ ShuffleMask.push_back(SM_SentinelZero);
+ continue;
+ }
+
+ int Index = i & ~(NumElementsPerLane - 1);
+ if (ElSize == 64)
+ Index += (Selector >> 1) & 0x1;
+ else
+ Index += Selector & 0x3;
+
+ int Src = (Selector >> 2) & 0x1;
+ Index += Src * NumElements;
+ ShuffleMask.push_back(Index);
+ }
+
+ // TODO: Handle funny-looking vectors too.
+}
+
+void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
+ Type *MaskTy = C->getType();
+ assert(MaskTy->getPrimitiveSizeInBits() == 128);
+
+ // Only support vector types.
+ if (!MaskTy->isVectorTy())
+ return;
+
+ // Make sure its an integer type.
+ Type *VecEltTy = MaskTy->getVectorElementType();
+ if (!VecEltTy->isIntegerTy())
+ return;
+
+ // The shuffle mask requires a byte vector - decode cases with
+ // wider elements as well.
+ unsigned BitWidth = cast<IntegerType>(VecEltTy)->getBitWidth();
+ if ((BitWidth % 8) != 0)
+ return;
+
+ int NumElts = MaskTy->getVectorNumElements();
+ int Scale = BitWidth / 8;
+ int NumBytes = NumElts * Scale;
+ ShuffleMask.reserve(NumBytes);
+
+ for (int i = 0; i != NumElts; ++i) {
+ Constant *COp = C->getAggregateElement(i);
+ if (!COp) {
+ ShuffleMask.clear();
+ return;
+ } else if (isa<UndefValue>(COp)) {
+ ShuffleMask.append(Scale, SM_SentinelUndef);
+ continue;
+ }
+
+ // VPPERM Operation
+ // Bits[4:0] - Byte Index (0 - 31)
+ // Bits[7:5] - Permute Operation
+ //
+ // Permute Operation:
+ // 0 - Source byte (no logical operation).
+ // 1 - Invert source byte.
+ // 2 - Bit reverse of source byte.
+ // 3 - Bit reverse of inverted source byte.
+ // 4 - 00h (zero - fill).
+ // 5 - FFh (ones - fill).
+ // 6 - Most significant bit of source byte replicated in all bit positions.
+ // 7 - Invert most significant bit of source byte and replicate in all bit positions.
+ APInt MaskElt = cast<ConstantInt>(COp)->getValue();
+ for (int j = 0; j != Scale; ++j) {
+ APInt Index = MaskElt.getLoBits(5);
+ APInt PermuteOp = MaskElt.lshr(5).getLoBits(3);
+ MaskElt = MaskElt.lshr(8);
+
+ if (PermuteOp == 4) {
+ ShuffleMask.push_back(SM_SentinelZero);
+ continue;
+ }
+ if (PermuteOp != 0) {
+ ShuffleMask.clear();
+ return;
+ }
+ ShuffleMask.push_back((int)Index.getZExtValue());
+ }
+ }
+
+ assert(NumBytes == (int)ShuffleMask.size() && "Unexpected shuffle mask size");
+}
+
void DecodeVPERMVMask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
if (MaskTy->isVectorTy()) {
unsigned NumElements = MaskTy->getVectorNumElements();
if (NumElements == VT.getVectorNumElements()) {
+ unsigned EltMaskSize = Log2_64(NumElements);
for (unsigned i = 0; i < NumElements; ++i) {
Constant *COp = C->getAggregateElement(i);
if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp))) {
@@ -148,9 +313,9 @@ void DecodeVPERMVMask(const Constant *C, MVT VT,
if (isa<UndefValue>(COp))
ShuffleMask.push_back(SM_SentinelUndef);
else {
- uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
- Element &= (1 << NumElements) - 1;
- ShuffleMask.push_back(Element);
+ APInt Element = cast<ConstantInt>(COp)->getValue();
+ Element = Element.getLoBits(EltMaskSize);
+ ShuffleMask.push_back(Element.getZExtValue());
}
}
}
@@ -171,6 +336,7 @@ void DecodeVPERMV3Mask(const Constant *C, MVT VT,
Type *MaskTy = C->getType();
unsigned NumElements = MaskTy->getVectorNumElements();
if (NumElements == VT.getVectorNumElements()) {
+ unsigned EltMaskSize = Log2_64(NumElements * 2);
for (unsigned i = 0; i < NumElements; ++i) {
Constant *COp = C->getAggregateElement(i);
if (!COp) {
@@ -180,9 +346,9 @@ void DecodeVPERMV3Mask(const Constant *C, MVT VT,
if (isa<UndefValue>(COp))
ShuffleMask.push_back(SM_SentinelUndef);
else {
- uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
- Element &= (1 << NumElements*2) - 1;
- ShuffleMask.push_back(Element);
+ APInt Element = cast<ConstantInt>(COp)->getValue();
+ Element = Element.getLoBits(EltMaskSize);
+ ShuffleMask.push_back(Element.getZExtValue());
}
}
}
diff --git a/lib/Target/X86/X86ShuffleDecodeConstantPool.h b/lib/Target/X86/X86ShuffleDecodeConstantPool.h
index bcf46322c8cd..d2565b849807 100644
--- a/lib/Target/X86/X86ShuffleDecodeConstantPool.h
+++ b/lib/Target/X86/X86ShuffleDecodeConstantPool.h
@@ -25,18 +25,25 @@ namespace llvm {
class Constant;
class MVT;
-/// \brief Decode a PSHUFB mask from an IR-level vector constant.
+/// Decode a PSHUFB mask from an IR-level vector constant.
void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERMILP variable mask from an IR-level vector constant.
+/// Decode a VPERMILP variable mask from an IR-level vector constant.
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERM W/D/Q/PS/PD mask from an IR-level vector constant.
+/// Decode a VPERMILP2 variable mask from an IR-level vector constant.
+void DecodeVPERMIL2PMask(const Constant *C, unsigned MatchImm, unsigned ElSize,
+ SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a VPPERM variable mask from an IR-level vector constant.
+void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
+
+/// Decode a VPERM W/D/Q/PS/PD mask from an IR-level vector constant.
void DecodeVPERMVMask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask);
-/// \brief Decode a VPERMT2 W/D/Q/PS/PD mask from an IR-level vector constant.
+/// Decode a VPERMT2 W/D/Q/PS/PD mask from an IR-level vector constant.
void DecodeVPERMV3Mask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask);
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 8ef08c960f0b..8f77682d2276 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -46,105 +46,99 @@ X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
/// Classify a blockaddress reference for the current subtarget according to how
/// we should reference it in a non-pcrel context.
-unsigned char X86Subtarget::ClassifyBlockAddressReference() const {
- if (isPICStyleGOT()) // 32-bit ELF targets.
- return X86II::MO_GOTOFF;
-
- if (isPICStyleStubPIC()) // Darwin/32 in PIC mode.
- return X86II::MO_PIC_BASE_OFFSET;
-
- // Direct static reference to label.
- return X86II::MO_NO_FLAG;
+unsigned char X86Subtarget::classifyBlockAddressReference() const {
+ return classifyLocalReference(nullptr);
}
/// Classify a global variable reference for the current subtarget according to
/// how we should reference it in a non-pcrel context.
-unsigned char X86Subtarget::
-ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
- // DLLImport only exists on windows, it is implemented as a load from a
- // DLLIMPORT stub.
- if (GV->hasDLLImportStorageClass())
- return X86II::MO_DLLIMPORT;
+unsigned char
+X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const {
+ return classifyGlobalReference(GV, *GV->getParent());
+}
- bool isDef = GV->isStrongDefinitionForLinker();
+unsigned char
+X86Subtarget::classifyLocalReference(const GlobalValue *GV) const {
+ // 64 bits can use %rip addressing for anything local.
+ if (is64Bit())
+ return X86II::MO_NO_FLAG;
- // X86-64 in PIC mode.
- if (isPICStyleRIPRel()) {
- // Large model never uses stubs.
- if (TM.getCodeModel() == CodeModel::Large)
- return X86II::MO_NO_FLAG;
+ // If this is for a position dependent executable, the static linker can
+ // figure it out.
+ if (!isPositionIndependent())
+ return X86II::MO_NO_FLAG;
- if (isTargetDarwin()) {
- // If symbol visibility is hidden, the extra load is not needed if
- // target is x86-64 or the symbol is definitely defined in the current
- // translation unit.
- if (GV->hasDefaultVisibility() && !isDef)
- return X86II::MO_GOTPCREL;
- } else if (!isTargetWin64()) {
- assert(isTargetELF() && "Unknown rip-relative target");
+ // The COFF dynamic linker just patches the executable sections.
+ if (isTargetCOFF())
+ return X86II::MO_NO_FLAG;
- // Extra load is needed for all externally visible.
- if (!GV->hasLocalLinkage() && GV->hasDefaultVisibility())
- return X86II::MO_GOTPCREL;
- }
+ if (isTargetDarwin()) {
+ // 32 bit macho has no relocation for a-b if a is undefined, even if
+ // b is in the section that is being relocated.
+ // This means we have to use o load even for GVs that are known to be
+ // local to the dso.
+ if (GV && (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
+ return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
- return X86II::MO_NO_FLAG;
+ return X86II::MO_PIC_BASE_OFFSET;
}
- if (isPICStyleGOT()) { // 32-bit ELF targets.
- // Extra load is needed for all externally visible.
- if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
- return X86II::MO_GOTOFF;
- return X86II::MO_GOT;
- }
+ return X86II::MO_GOTOFF;
+}
- if (isPICStyleStubPIC()) { // Darwin/32 in PIC mode.
- // Determine whether we have a stub reference and/or whether the reference
- // is relative to the PIC base or not.
+unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
+ const Module &M) const {
+ // Large model never uses stubs.
+ if (TM.getCodeModel() == CodeModel::Large)
+ return X86II::MO_NO_FLAG;
- // If this is a strong reference to a definition, it is definitely not
- // through a stub.
- if (isDef)
- return X86II::MO_PIC_BASE_OFFSET;
+ if (TM.shouldAssumeDSOLocal(M, GV))
+ return classifyLocalReference(GV);
- // Unless we have a symbol with hidden visibility, we have to go through a
- // normal $non_lazy_ptr stub because this symbol might be resolved late.
- if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
- return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
+ if (isTargetCOFF())
+ return X86II::MO_DLLIMPORT;
- // If symbol visibility is hidden, we have a stub for common symbol
- // references and external declarations.
- if (GV->isDeclarationForLinker() || GV->hasCommonLinkage()) {
- // Hidden $non_lazy_ptr reference.
- return X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE;
- }
+ if (is64Bit())
+ return X86II::MO_GOTPCREL;
- // Otherwise, no stub.
- return X86II::MO_PIC_BASE_OFFSET;
+ if (isTargetDarwin()) {
+ if (!isPositionIndependent())
+ return X86II::MO_DARWIN_NONLAZY;
+ return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
}
- if (isPICStyleStubNoDynamic()) { // Darwin/32 in -mdynamic-no-pic mode.
- // Determine whether we have a stub reference.
+ return X86II::MO_GOT;
+}
- // If this is a strong reference to a definition, it is definitely not
- // through a stub.
- if (isDef)
- return X86II::MO_NO_FLAG;
+unsigned char
+X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const {
+ return classifyGlobalFunctionReference(GV, *GV->getParent());
+}
- // Unless we have a symbol with hidden visibility, we have to go through a
- // normal $non_lazy_ptr stub because this symbol might be resolved late.
- if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
- return X86II::MO_DARWIN_NONLAZY;
+unsigned char
+X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
+ const Module &M) const {
+ if (TM.shouldAssumeDSOLocal(M, GV))
+ return X86II::MO_NO_FLAG;
+
+ assert(!isTargetCOFF());
- // Otherwise, no stub.
+ if (isTargetELF())
+ return X86II::MO_PLT;
+
+ if (is64Bit()) {
+ auto *F = dyn_cast_or_null<Function>(GV);
+ if (F && F->hasFnAttribute(Attribute::NonLazyBind))
+ // If the function is marked as non-lazy, generate an indirect call
+ // which loads from the GOT directly. This avoids runtime overhead
+ // at the cost of eager binding (and one extra byte of encoding).
+ return X86II::MO_GOTPCREL;
return X86II::MO_NO_FLAG;
}
- // Direct static reference to global.
return X86II::MO_NO_FLAG;
}
-
/// This function returns the name of a function which has an interface like
/// the non-standard bzero function, if such a function exists on the
/// current subtarget and it is considered preferable over memset with zero
@@ -165,7 +159,7 @@ bool X86Subtarget::hasSinCos() const {
}
/// Return true if the subtarget allows calls to immediate address.
-bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const {
+bool X86Subtarget::isLegalToCallImmediateAddr() const {
// FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
// but WinCOFFObjectWriter::RecordRelocation cannot emit them. Once it does,
// the following check for Win32 should be removed.
@@ -227,18 +221,19 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
assert((!In64BitMode || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- // Stack alignment is 16 bytes on Darwin, Linux and Solaris (both
+ // Stack alignment is 16 bytes on Darwin, Linux, kFreeBSD and Solaris (both
// 32 and 64 bit) and for all 64-bit targets.
if (StackAlignOverride)
stackAlignment = StackAlignOverride;
else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
- In64BitMode)
+ isTargetKFreeBSD() || In64BitMode)
stackAlignment = 16;
}
void X86Subtarget::initializeEnvironment() {
X86SSELevel = NoSSE;
X863DNowLevel = NoThreeDNow;
+ HasX87 = false;
HasCMov = false;
HasX86_64 = false;
HasPOPCNT = false;
@@ -261,6 +256,8 @@ void X86Subtarget::initializeEnvironment() {
HasLZCNT = false;
HasBMI = false;
HasBMI2 = false;
+ HasVBMI = false;
+ HasIFMA = false;
HasRTM = false;
HasHLE = false;
HasERI = false;
@@ -275,6 +272,7 @@ void X86Subtarget::initializeEnvironment() {
HasPRFCHW = false;
HasRDSEED = false;
HasLAHFSAHF = false;
+ HasMWAITX = false;
HasMPX = false;
IsBTMemSlow = false;
IsSHLDSlow = false;
@@ -283,6 +281,7 @@ void X86Subtarget::initializeEnvironment() {
HasSSEUnalignedMem = false;
HasCmpxchg16b = false;
UseLeaForSP = false;
+ HasFastPartialYMMWrite = false;
HasSlowDivide32 = false;
HasSlowDivide64 = false;
PadShortFunctions = false;
@@ -303,11 +302,11 @@ X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
return *this;
}
-X86Subtarget::X86Subtarget(const Triple &TT, const std::string &CPU,
- const std::string &FS, const X86TargetMachine &TM,
+X86Subtarget::X86Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
+ const X86TargetMachine &TM,
unsigned StackAlignOverride)
: X86GenSubtargetInfo(TT, CPU, FS), X86ProcFamily(Others),
- PICStyle(PICStyles::None), TargetTriple(TT),
+ PICStyle(PICStyles::None), TM(TM), TargetTriple(TT),
StackAlignOverride(StackAlignOverride),
In64BitMode(TargetTriple.getArch() == Triple::x86_64),
In32BitMode(TargetTriple.getArch() == Triple::x86 &&
@@ -317,24 +316,16 @@ X86Subtarget::X86Subtarget(const Triple &TT, const std::string &CPU,
TSInfo(), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
TLInfo(TM, *this), FrameLowering(*this, getStackAlignment()) {
// Determine the PICStyle based on the target selected.
- if (TM.getRelocationModel() == Reloc::Static) {
- // Unless we're in PIC or DynamicNoPIC mode, set the PIC style to None.
+ if (!isPositionIndependent())
setPICStyle(PICStyles::None);
- } else if (is64Bit()) {
- // PIC in 64 bit mode is always rip-rel.
+ else if (is64Bit())
setPICStyle(PICStyles::RIPRel);
- } else if (isTargetCOFF()) {
+ else if (isTargetCOFF())
setPICStyle(PICStyles::None);
- } else if (isTargetDarwin()) {
- if (TM.getRelocationModel() == Reloc::PIC_)
- setPICStyle(PICStyles::StubPIC);
- else {
- assert(TM.getRelocationModel() == Reloc::DynamicNoPIC);
- setPICStyle(PICStyles::StubDynamicNoPIC);
- }
- } else if (isTargetELF()) {
+ else if (isTargetDarwin())
+ setPICStyle(PICStyles::StubPIC);
+ else if (isTargetELF())
setPICStyle(PICStyles::GOT);
- }
}
bool X86Subtarget::enableEarlyIfConversion() const {
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index 13d1026dcaa0..a274b797ca8f 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -35,11 +35,10 @@ class TargetMachine;
///
namespace PICStyles {
enum Style {
- StubPIC, // Used on i386-darwin in -fPIC mode.
- StubDynamicNoPIC, // Used on i386-darwin in -mdynamic-no-pic mode.
- GOT, // Used on many 32-bit unices in -fPIC mode.
- RIPRel, // Used on X86-64 when not in -static mode.
- None // Set when in -static mode (not PIC or DynamicNoPIC mode).
+ StubPIC, // Used on i386-darwin in pic mode.
+ GOT, // Used on 32 bit elf on when in pic mode.
+ RIPRel, // Used on X86-64 when in pic mode.
+ None // Set when not in pic mode.
};
}
@@ -64,12 +63,17 @@ protected:
/// Which PIC style to use
PICStyles::Style PICStyle;
+ const TargetMachine &TM;
+
/// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
X86SSEEnum X86SSELevel;
/// MMX, 3DNow, 3DNow Athlon, or none supported.
X863DNowEnum X863DNowLevel;
+ /// True if the processor supports X87 instructions.
+ bool HasX87;
+
/// True if this processor has conditional move instructions
/// (generally pentium pro+).
bool HasCMov;
@@ -134,6 +138,12 @@ protected:
/// Processor has BMI2 instructions.
bool HasBMI2;
+ /// Processor has VBMI instructions.
+ bool HasVBMI;
+
+ /// Processor has Integer Fused Multiply Add
+ bool HasIFMA;
+
/// Processor has RTM instructions.
bool HasRTM;
@@ -155,6 +165,12 @@ protected:
/// Processor has LAHF/SAHF instructions.
bool HasLAHFSAHF;
+ /// Processor has MONITORX/MWAITX instructions.
+ bool HasMWAITX;
+
+ /// Processor has Prefetch with intent to Write instruction
+ bool HasPFPREFETCHWT1;
+
/// True if BT (bit test) of memory instructions are slow.
bool IsBTMemSlow;
@@ -179,6 +195,10 @@ protected:
/// the stack pointer. This is an optimization for Intel Atom processors.
bool UseLeaForSP;
+ /// True if there is no performance penalty to writing only the lower parts
+ /// of a YMM register without clearing the upper part.
+ bool HasFastPartialYMMWrite;
+
/// True if 8-bit divisions are significantly faster than
/// 32-bit divisions and should be used when possible.
bool HasSlowDivide32;
@@ -226,9 +246,30 @@ protected:
/// Processor has PKU extenstions
bool HasPKU;
- /// Processot supports MPX - Memory Protection Extensions
+ /// Processor supports MPX - Memory Protection Extensions
bool HasMPX;
+ /// Processor supports Invalidate Process-Context Identifier
+ bool HasInvPCId;
+
+ /// Processor has VM Functions
+ bool HasVMFUNC;
+
+ /// Processor has Supervisor Mode Access Protection
+ bool HasSMAP;
+
+ /// Processor has Software Guard Extensions
+ bool HasSGX;
+
+ /// Processor supports Flush Cache Line instruction
+ bool HasCLFLUSHOPT;
+
+ /// Processor has Persistent Commit feature
+ bool HasPCOMMIT;
+
+ /// Processor supports Cache Line Write Back instruction
+ bool HasCLWB;
+
/// Use software floating point for code generation.
bool UseSoftFloat;
@@ -271,7 +312,7 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
- X86Subtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
+ X86Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
const X86TargetMachine &TM, unsigned StackAlignOverride);
const X86TargetLowering *getTargetLowering() const override {
@@ -336,6 +377,7 @@ public:
PICStyles::Style getPICStyle() const { return PICStyle; }
void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
+ bool hasX87() const { return HasX87; }
bool hasCMov() const { return HasCMov; }
bool hasSSE1() const { return X86SSELevel >= SSE1; }
bool hasSSE2() const { return X86SSELevel >= SSE2; }
@@ -374,6 +416,8 @@ public:
bool hasLZCNT() const { return HasLZCNT; }
bool hasBMI() const { return HasBMI; }
bool hasBMI2() const { return HasBMI2; }
+ bool hasVBMI() const { return HasVBMI; }
+ bool hasIFMA() const { return HasIFMA; }
bool hasRTM() const { return HasRTM; }
bool hasHLE() const { return HasHLE; }
bool hasADX() const { return HasADX; }
@@ -381,6 +425,7 @@ public:
bool hasPRFCHW() const { return HasPRFCHW; }
bool hasRDSEED() const { return HasRDSEED; }
bool hasLAHFSAHF() const { return HasLAHFSAHF; }
+ bool hasMWAITX() const { return HasMWAITX; }
bool isBTMemSlow() const { return IsBTMemSlow; }
bool isSHLDSlow() const { return IsSHLDSlow; }
bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
@@ -388,6 +433,7 @@ public:
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
bool useLeaForSP() const { return UseLeaForSP; }
+ bool hasFastPartialYMMWrite() const { return HasFastPartialYMMWrite; }
bool hasSlowDivide32() const { return HasSlowDivide32; }
bool hasSlowDivide64() const { return HasSlowDivide64; }
bool padShortFunctions() const { return PadShortFunctions; }
@@ -408,6 +454,11 @@ public:
bool isSLM() const { return X86ProcFamily == IntelSLM; }
bool useSoftFloat() const { return UseSoftFloat; }
+ /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
+ /// no-sse2). There isn't any reason to disable it if the target processor
+ /// supports it.
+ bool hasMFence() const { return hasSSE2() || is64Bit(); }
+
const Triple &getTargetTriple() const { return TargetTriple; }
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
@@ -421,6 +472,8 @@ public:
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
+ bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
+ bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
@@ -463,7 +516,6 @@ public:
return !In64BitMode && (isTargetCygMing() || isTargetKnownWindowsMSVC());
}
- bool isPICStyleSet() const { return PICStyle != PICStyles::None; }
bool isPICStyleGOT() const { return PICStyle == PICStyles::GOT; }
bool isPICStyleRIPRel() const { return PICStyle == PICStyles::RIPRel; }
@@ -471,13 +523,7 @@ public:
return PICStyle == PICStyles::StubPIC;
}
- bool isPICStyleStubNoDynamic() const {
- return PICStyle == PICStyles::StubDynamicNoPIC;
- }
- bool isPICStyleStubAny() const {
- return PICStyle == PICStyles::StubDynamicNoPIC ||
- PICStyle == PICStyles::StubPIC;
- }
+ bool isPositionIndependent() const { return TM.isPositionIndependent(); }
bool isCallingConvWin64(CallingConv::ID CC) const {
switch (CC) {
@@ -502,18 +548,25 @@ public:
}
}
- /// ClassifyGlobalReference - Classify a global variable reference for the
- /// current subtarget according to how we should reference it in a non-pcrel
- /// context.
- unsigned char ClassifyGlobalReference(const GlobalValue *GV,
- const TargetMachine &TM)const;
+ /// Classify a global variable reference for the current subtarget according
+ /// to how we should reference it in a non-pcrel context.
+ unsigned char classifyLocalReference(const GlobalValue *GV) const;
+
+ unsigned char classifyGlobalReference(const GlobalValue *GV,
+ const Module &M) const;
+ unsigned char classifyGlobalReference(const GlobalValue *GV) const;
+
+ /// Classify a global function reference for the current subtarget.
+ unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
+ const Module &M) const;
+ unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
/// Classify a blockaddress reference for the current subtarget according to
/// how we should reference it in a non-pcrel context.
- unsigned char ClassifyBlockAddressReference() const;
+ unsigned char classifyBlockAddressReference() const;
/// Return true if the subtarget allows calls to immediate address.
- bool IsLegalToCallImmediateAddr(const TargetMachine &TM) const;
+ bool isLegalToCallImmediateAddr() const;
/// This function returns the name of a function which has an interface
/// like the non-standard bzero function, if such a function exists on
diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp
index 0e7e4c0c84a9..50c9c25a27c0 100644
--- a/lib/Target/X86/X86TargetMachine.cpp
+++ b/lib/Target/X86/X86TargetMachine.cpp
@@ -16,6 +16,7 @@
#include "X86TargetObjectFile.h"
#include "X86TargetTransformInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/CommandLine.h"
@@ -39,6 +40,7 @@ extern "C" void LLVMInitializeX86Target() {
PassRegistry &PR = *PassRegistry::getPassRegistry();
initializeWinEHStatePassPass(PR);
+ initializeFixupBWInstPassPass(PR);
}
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -73,17 +75,22 @@ static std::string computeDataLayout(const Triple &TT) {
// Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl())
Ret += "-i64:64";
+ else if (TT.isOSIAMCU())
+ Ret += "-i64:32-f64:32";
else
Ret += "-f64:32:64";
// Some ABIs align long double to 128 bits, others to 32.
- if (TT.isOSNaCl())
+ if (TT.isOSNaCl() || TT.isOSIAMCU())
; // No f80
else if (TT.isArch64Bit() || TT.isOSDarwin())
Ret += "-f80:128";
else
Ret += "-f80:32";
+ if (TT.isOSIAMCU())
+ Ret += "-f128:32";
+
// The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
if (TT.isArch64Bit())
Ret += "-n8:16:32:64";
@@ -91,7 +98,7 @@ static std::string computeDataLayout(const Triple &TT) {
Ret += "-n8:16:32";
// The stack is aligned to 32 bits on some ABIs and 128 bits on others.
- if (!TT.isArch64Bit() && TT.isOSWindows())
+ if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU())
Ret += "-a:0:32-S32";
else
Ret += "-S128";
@@ -99,22 +106,60 @@ static std::string computeDataLayout(const Triple &TT) {
return Ret;
}
-/// X86TargetMachine ctor - Create an X86 target.
+static Reloc::Model getEffectiveRelocModel(const Triple &TT,
+ Optional<Reloc::Model> RM) {
+ bool is64Bit = TT.getArch() == Triple::x86_64;
+ if (!RM.hasValue()) {
+ // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode.
+ // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we
+ // use static relocation model by default.
+ if (TT.isOSDarwin()) {
+ if (is64Bit)
+ return Reloc::PIC_;
+ return Reloc::DynamicNoPIC;
+ }
+ if (TT.isOSWindows() && is64Bit)
+ return Reloc::PIC_;
+ return Reloc::Static;
+ }
+
+ // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC
+ // is defined as a model for code which may be used in static or dynamic
+ // executables but not necessarily a shared library. On X86-32 we just
+ // compile in -static mode, in x86-64 we use PIC.
+ if (*RM == Reloc::DynamicNoPIC) {
+ if (is64Bit)
+ return Reloc::PIC_;
+ if (!TT.isOSDarwin())
+ return Reloc::Static;
+ }
+
+ // If we are on Darwin, disallow static relocation model in X86-64 mode, since
+ // the Mach-O file format doesn't support it.
+ if (*RM == Reloc::Static && TT.isOSDarwin() && is64Bit)
+ return Reloc::PIC_;
+
+ return *RM;
+}
+
+/// Create an X86 target.
///
X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, RM, CM,
- OL),
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options,
+ getEffectiveRelocModel(TT, RM), CM, OL),
TLOF(createTLOF(getTargetTriple())),
Subtarget(TT, CPU, FS, *this, Options.StackAlignmentOverride) {
// Windows stack unwinder gets confused when execution flow "falls through"
// after a call to 'noreturn' function.
// To prevent that, we emit a trap for 'unreachable' IR instructions.
// (which on X86, happens to be the 'ud2' instruction)
- if (Subtarget.isTargetWin64())
+ // On PS4, the "return address" of a 'noreturn' call must still be within
+ // the calling function, and TrapUnreachable is an easy way to get that.
+ if (Subtarget.isTargetWin64() || Subtarget.isTargetPS4())
this->Options.TrapUnreachable = true;
// By default (and when -ffast-math is on), enable estimate codegen for
@@ -137,12 +182,17 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const {
Attribute CPUAttr = F.getFnAttribute("target-cpu");
Attribute FSAttr = F.getFnAttribute("target-features");
- std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
- ? CPUAttr.getValueAsString().str()
- : TargetCPU;
- std::string FS = !FSAttr.hasAttribute(Attribute::None)
- ? FSAttr.getValueAsString().str()
- : TargetFS;
+ StringRef CPU = !CPUAttr.hasAttribute(Attribute::None)
+ ? CPUAttr.getValueAsString()
+ : (StringRef)TargetCPU;
+ StringRef FS = !FSAttr.hasAttribute(Attribute::None)
+ ? FSAttr.getValueAsString()
+ : (StringRef)TargetFS;
+
+ SmallString<512> Key;
+ Key.reserve(CPU.size() + FS.size());
+ Key += CPU;
+ Key += FS;
// FIXME: This is related to the code below to reset the target options,
// we need to know whether or not the soft float flag is set on the
@@ -150,14 +200,15 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const {
// it as a key for the subtarget since that can be the only difference
// between two functions.
bool SoftFloat =
- F.hasFnAttribute("use-soft-float") &&
F.getFnAttribute("use-soft-float").getValueAsString() == "true";
// If the soft float attribute is set on the function turn on the soft float
// subtarget feature.
if (SoftFloat)
- FS += FS.empty() ? "+soft-float" : ",+soft-float";
+ Key += FS.empty() ? "+soft-float" : ",+soft-float";
- auto &I = SubtargetMap[CPU + FS];
+ FS = Key.substr(CPU.size());
+
+ auto &I = SubtargetMap[Key];
if (!I) {
// This needs to be done before we create a new subtarget since any
// creation will depend on the TM and the code generation flags on the
@@ -234,7 +285,6 @@ bool X86PassConfig::addInstSelector() {
addPass(createCleanupLocalDynamicTLSPass());
addPass(createX86GlobalBaseRegPass());
-
return false;
}
@@ -254,10 +304,13 @@ bool X86PassConfig::addPreISel() {
}
void X86PassConfig::addPreRegAlloc() {
- if (getOptLevel() != CodeGenOpt::None)
+ if (getOptLevel() != CodeGenOpt::None) {
+ addPass(createX86FixupSetCC());
addPass(createX86OptimizeLEAs());
+ addPass(createX86CallFrameOptimization());
+ }
- addPass(createX86CallFrameOptimization());
+ addPass(createX86WinAllocaExpander());
}
void X86PassConfig::addPostRegAlloc() {
@@ -274,6 +327,7 @@ void X86PassConfig::addPreEmitPass() {
addPass(createX86IssueVZeroUpperPass());
if (getOptLevel() != CodeGenOpt::None) {
+ addPass(createX86FixupBWInsts());
addPass(createX86PadShortFunctions());
addPass(createX86FixupLEAs());
}
diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h
index 262955698e44..4734a44315a9 100644
--- a/lib/Target/X86/X86TargetMachine.h
+++ b/lib/Target/X86/X86TargetMachine.h
@@ -30,8 +30,9 @@ class X86TargetMachine final : public LLVMTargetMachine {
public:
X86TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
~X86TargetMachine() override;
const X86Subtarget *getSubtargetImpl(const Function &F) const override;
diff --git a/lib/Target/X86/X86TargetObjectFile.cpp b/lib/Target/X86/X86TargetObjectFile.cpp
index 782768d0ab16..d664cff5f2c1 100644
--- a/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/lib/Target/X86/X86TargetObjectFile.cpp
@@ -73,53 +73,30 @@ X86LinuxNaClTargetObjectFile::Initialize(MCContext &Ctx,
InitializeELF(TM.Options.UseInitArray);
}
-const MCExpr *X86WindowsTargetObjectFile::getExecutableRelativeSymbol(
- const ConstantExpr *CE, Mangler &Mang, const TargetMachine &TM) const {
- // We are looking for the difference of two symbols, need a subtraction
- // operation.
- const SubOperator *Sub = dyn_cast<SubOperator>(CE);
- if (!Sub)
- return nullptr;
-
- // Symbols must first be numbers before we can subtract them, we need to see a
- // ptrtoint on both subtraction operands.
- const PtrToIntOperator *SubLHS =
- dyn_cast<PtrToIntOperator>(Sub->getOperand(0));
- const PtrToIntOperator *SubRHS =
- dyn_cast<PtrToIntOperator>(Sub->getOperand(1));
- if (!SubLHS || !SubRHS)
- return nullptr;
-
+const MCExpr *X86WindowsTargetObjectFile::lowerRelativeReference(
+ const GlobalValue *LHS, const GlobalValue *RHS, Mangler &Mang,
+ const TargetMachine &TM) const {
// Our symbols should exist in address space zero, cowardly no-op if
// otherwise.
- if (SubLHS->getPointerAddressSpace() != 0 ||
- SubRHS->getPointerAddressSpace() != 0)
+ if (LHS->getType()->getPointerAddressSpace() != 0 ||
+ RHS->getType()->getPointerAddressSpace() != 0)
return nullptr;
// Both ptrtoint instructions must wrap global objects:
// - Only global variables are eligible for image relative relocations.
// - The subtrahend refers to the special symbol __ImageBase, a GlobalVariable.
- const auto *GOLHS = dyn_cast<GlobalObject>(SubLHS->getPointerOperand());
- const auto *GVRHS = dyn_cast<GlobalVariable>(SubRHS->getPointerOperand());
- if (!GOLHS || !GVRHS)
- return nullptr;
-
// We expect __ImageBase to be a global variable without a section, externally
// defined.
//
// It should look something like this: @__ImageBase = external constant i8
- if (GVRHS->isThreadLocal() || GVRHS->getName() != "__ImageBase" ||
- !GVRHS->hasExternalLinkage() || GVRHS->hasInitializer() ||
- GVRHS->hasSection())
- return nullptr;
-
- // An image-relative, thread-local, symbol makes no sense.
- if (GOLHS->isThreadLocal())
+ if (!isa<GlobalObject>(LHS) || !isa<GlobalVariable>(RHS) ||
+ LHS->isThreadLocal() || RHS->isThreadLocal() ||
+ RHS->getName() != "__ImageBase" || !RHS->hasExternalLinkage() ||
+ cast<GlobalVariable>(RHS)->hasInitializer() || RHS->hasSection())
return nullptr;
- return MCSymbolRefExpr::create(TM.getSymbol(GOLHS, Mang),
- MCSymbolRefExpr::VK_COFF_IMGREL32,
- getContext());
+ return MCSymbolRefExpr::create(
+ TM.getSymbol(LHS, Mang), MCSymbolRefExpr::VK_COFF_IMGREL32, getContext());
}
static std::string APIntToHexString(const APInt &AI) {
@@ -154,16 +131,34 @@ static std::string scalarConstantToHexString(const Constant *C) {
}
MCSection *X86WindowsTargetObjectFile::getSectionForConstant(
- const DataLayout &DL, SectionKind Kind, const Constant *C) const {
+ const DataLayout &DL, SectionKind Kind, const Constant *C,
+ unsigned &Align) const {
if (Kind.isMergeableConst() && C) {
const unsigned Characteristics = COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_LNK_COMDAT;
std::string COMDATSymName;
- if (Kind.isMergeableConst4() || Kind.isMergeableConst8())
- COMDATSymName = "__real@" + scalarConstantToHexString(C);
- else if (Kind.isMergeableConst16())
- COMDATSymName = "__xmm@" + scalarConstantToHexString(C);
+ if (Kind.isMergeableConst4()) {
+ if (Align <= 4) {
+ COMDATSymName = "__real@" + scalarConstantToHexString(C);
+ Align = 4;
+ }
+ } else if (Kind.isMergeableConst8()) {
+ if (Align <= 8) {
+ COMDATSymName = "__real@" + scalarConstantToHexString(C);
+ Align = 8;
+ }
+ } else if (Kind.isMergeableConst16()) {
+ if (Align <= 16) {
+ COMDATSymName = "__xmm@" + scalarConstantToHexString(C);
+ Align = 16;
+ }
+ } else if (Kind.isMergeableConst32()) {
+ if (Align <= 32) {
+ COMDATSymName = "__ymm@" + scalarConstantToHexString(C);
+ Align = 32;
+ }
+ }
if (!COMDATSymName.empty())
return getContext().getCOFFSection(".rdata", Characteristics, Kind,
@@ -171,5 +166,5 @@ MCSection *X86WindowsTargetObjectFile::getSectionForConstant(
COFF::IMAGE_COMDAT_SELECT_ANY);
}
- return TargetLoweringObjectFile::getSectionForConstant(DL, Kind, C);
+ return TargetLoweringObjectFile::getSectionForConstant(DL, Kind, C, Align);
}
diff --git a/lib/Target/X86/X86TargetObjectFile.h b/lib/Target/X86/X86TargetObjectFile.h
index 6b2448cc9de6..2e703f1494fa 100644
--- a/lib/Target/X86/X86TargetObjectFile.h
+++ b/lib/Target/X86/X86TargetObjectFile.h
@@ -40,6 +40,11 @@ namespace llvm {
/// \brief This implemenatation is used for X86 ELF targets that don't
/// have a further specialization.
class X86ELFTargetObjectFile : public TargetLoweringObjectFileELF {
+ public:
+ X86ELFTargetObjectFile() {
+ PLTRelativeVariantKind = MCSymbolRefExpr::VK_PLT;
+ }
+
/// \brief Describe a TLS variable address within debug info.
const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const override;
};
@@ -53,13 +58,15 @@ namespace llvm {
/// \brief This implementation is used for Windows targets on x86 and x86-64.
class X86WindowsTargetObjectFile : public TargetLoweringObjectFileCOFF {
const MCExpr *
- getExecutableRelativeSymbol(const ConstantExpr *CE, Mangler &Mang,
- const TargetMachine &TM) const override;
+ lowerRelativeReference(const GlobalValue *LHS, const GlobalValue *RHS,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
/// \brief Given a mergeable constant with the specified size and relocation
/// information, return a section that it should be placed in.
MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
- const Constant *C) const override;
+ const Constant *C,
+ unsigned &Align) const override;
};
} // end namespace llvm
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index 2e7bbb208743..f44a8c662028 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -532,21 +532,24 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
// potential massive combinations (elem_num x src_type x dst_type).
static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
- { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
{ ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
{ ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
{ ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
- { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
- { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
- { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
{ ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
{ ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
{ ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
+ { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
+ { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
+ { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
};
+ // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
+ // 256-bit wide vectors.
+
static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
{ ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
{ ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
@@ -560,43 +563,46 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
// v16i1 -> v16i32 - load + broadcast
{ ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
{ ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
-
{ ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
{ ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
{ ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
- { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
- { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
{ ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
{ ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
+ { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
+ { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
{ ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
+ { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
{ ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
+ { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
{ ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
{ ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
- { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
- { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
- { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
{ ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
{ ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
{ ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
{ ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
- { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
{ ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
- { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
- { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
- { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
- { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
+ { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
{ ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 },
{ ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 },
@@ -608,20 +614,20 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
};
static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
@@ -639,66 +645,69 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
};
static const TypeConversionCostTblEntry AVXConversionTbl[] = {
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
{ ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
{ ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
+ { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
+ { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
{ ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
{ ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
{ ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
- { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
- { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
{ ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
- { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
- { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
- { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
- { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
{ ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
{ ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
{ ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
{ ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
{ ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
+ { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
- { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
{ ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
{ ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
+ { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
// The generic code to compute the scalar overhead is currently broken.
// Workaround this limitation by estimating the scalarization overhead
// here. We have roughly 10 instructions per scalar element.
// Multiply that by the vector width.
// FIXME: remove that when PR19268 is fixed.
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
- { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 },
+ { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 },
+ { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
+ { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
- { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
{ ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
+ { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
// This node is expanded into scalarized operations but BasicTTI is overly
// optimistic estimating its cost. It computes 3 per element (one
// vector-extract, one scalar conversion and one vector-insert). The
@@ -706,89 +715,104 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
// should be factored in too. Inflating the cost per element by 1.
{ ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
{ ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
+
+ { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
+ { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
};
static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
- { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
- { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
- { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
- { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
- { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
+
+ { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
{ ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
{ ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
- { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
+ { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
+ { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
+ { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
- { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
- { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
+ { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
{ ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 30 },
{ ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
- { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
- { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
- { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
+ { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
+
};
static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
// These are somewhat magic numbers justified by looking at the output of
// Intel's IACA, running some kernels and making sure when we take
// legalization into account the throughput will be overestimated.
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
- { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
- { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
- { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
- { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
{ ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
- // There are faster sequences for float conversions.
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
- { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
{ ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
- { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
- { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
- { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
- { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
- { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
- { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
- { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
- { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
+
+ { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
{ ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
{ ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
- { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
- { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
{ ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
{ ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
- { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
- { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
+ { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
+ { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
+ { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
+ { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
+ { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
+ { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
+ { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
+ { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
+ { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
+ { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
+ { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
+ { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
- { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
- { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
+ { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
+ { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
+ { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
{ ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
{ ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
- { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
- { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
- { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
- { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
+ { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
+ { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
+ { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
};
std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
@@ -859,13 +883,17 @@ int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
+ static const CostTblEntry SSE2CostTbl[] = {
+ { ISD::SETCC, MVT::v2i64, 8 },
+ { ISD::SETCC, MVT::v4i32, 1 },
+ { ISD::SETCC, MVT::v8i16, 1 },
+ { ISD::SETCC, MVT::v16i8, 1 },
+ };
+
static const CostTblEntry SSE42CostTbl[] = {
{ ISD::SETCC, MVT::v2f64, 1 },
{ ISD::SETCC, MVT::v4f32, 1 },
{ ISD::SETCC, MVT::v2i64, 1 },
- { ISD::SETCC, MVT::v4i32, 1 },
- { ISD::SETCC, MVT::v8i16, 1 },
- { ISD::SETCC, MVT::v16i8, 1 },
};
static const CostTblEntry AVX1CostTbl[] = {
@@ -908,12 +936,112 @@ int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
return LT.first * Entry->Cost;
+ if (ST->hasSSE2())
+ if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
+int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> Tys, FastMathFlags FMF) {
+ static const CostTblEntry XOPCostTbl[] = {
+ { ISD::BITREVERSE, MVT::v4i64, 4 },
+ { ISD::BITREVERSE, MVT::v8i32, 4 },
+ { ISD::BITREVERSE, MVT::v16i16, 4 },
+ { ISD::BITREVERSE, MVT::v32i8, 4 },
+ { ISD::BITREVERSE, MVT::v2i64, 1 },
+ { ISD::BITREVERSE, MVT::v4i32, 1 },
+ { ISD::BITREVERSE, MVT::v8i16, 1 },
+ { ISD::BITREVERSE, MVT::v16i8, 1 },
+ { ISD::BITREVERSE, MVT::i64, 3 },
+ { ISD::BITREVERSE, MVT::i32, 3 },
+ { ISD::BITREVERSE, MVT::i16, 3 },
+ { ISD::BITREVERSE, MVT::i8, 3 }
+ };
+ static const CostTblEntry AVX2CostTbl[] = {
+ { ISD::BITREVERSE, MVT::v4i64, 5 },
+ { ISD::BITREVERSE, MVT::v8i32, 5 },
+ { ISD::BITREVERSE, MVT::v16i16, 5 },
+ { ISD::BITREVERSE, MVT::v32i8, 5 },
+ { ISD::BSWAP, MVT::v4i64, 1 },
+ { ISD::BSWAP, MVT::v8i32, 1 },
+ { ISD::BSWAP, MVT::v16i16, 1 }
+ };
+ static const CostTblEntry AVX1CostTbl[] = {
+ { ISD::BITREVERSE, MVT::v4i64, 10 },
+ { ISD::BITREVERSE, MVT::v8i32, 10 },
+ { ISD::BITREVERSE, MVT::v16i16, 10 },
+ { ISD::BITREVERSE, MVT::v32i8, 10 },
+ { ISD::BSWAP, MVT::v4i64, 4 },
+ { ISD::BSWAP, MVT::v8i32, 4 },
+ { ISD::BSWAP, MVT::v16i16, 4 }
+ };
+ static const CostTblEntry SSSE3CostTbl[] = {
+ { ISD::BITREVERSE, MVT::v2i64, 5 },
+ { ISD::BITREVERSE, MVT::v4i32, 5 },
+ { ISD::BITREVERSE, MVT::v8i16, 5 },
+ { ISD::BITREVERSE, MVT::v16i8, 5 },
+ { ISD::BSWAP, MVT::v2i64, 1 },
+ { ISD::BSWAP, MVT::v4i32, 1 },
+ { ISD::BSWAP, MVT::v8i16, 1 }
+ };
+ static const CostTblEntry SSE2CostTbl[] = {
+ { ISD::BSWAP, MVT::v2i64, 7 },
+ { ISD::BSWAP, MVT::v4i32, 7 },
+ { ISD::BSWAP, MVT::v8i16, 7 }
+ };
+
+ unsigned ISD = ISD::DELETED_NODE;
+ switch (IID) {
+ default:
+ break;
+ case Intrinsic::bitreverse:
+ ISD = ISD::BITREVERSE;
+ break;
+ case Intrinsic::bswap:
+ ISD = ISD::BSWAP;
+ break;
+ }
+
+ // Legalize the type.
+ std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
+ MVT MTy = LT.second;
+
+ // Attempt to lookup cost.
+ if (ST->hasXOP())
+ if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ if (ST->hasAVX2())
+ if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ if (ST->hasAVX())
+ if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ if (ST->hasSSSE3())
+ if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ if (ST->hasSSE2())
+ if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
+ return LT.first * Entry->Cost;
+
+ return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF);
+}
+
+int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Value *> Args, FastMathFlags FMF) {
+ return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF);
+}
+
int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
assert(Val->isVectorTy() && "This must be a vector type");
+ Type *ScalarType = Val->getScalarType();
+
if (Index != -1U) {
// Legalize the type.
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
@@ -927,11 +1055,17 @@ int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
Index = Index % Width;
// Floating point scalars are already located in index #0.
- if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
+ if (ScalarType->isFloatingPointTy() && Index == 0)
return 0;
}
- return BaseT::getVectorInstrCost(Opcode, Val, Index);
+ // Add to the base cost if we know that the extracted element of a vector is
+ // destined to be moved to and used in the integer register file.
+ int RegisterFileMoveCost = 0;
+ if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
+ RegisterFileMoveCost = 1;
+
+ return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
}
int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
@@ -983,10 +1117,10 @@ int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
// Each load/store unit costs 1.
int Cost = LT.first * 1;
- // On Sandybridge 256bit load/stores are double pumped
- // (but not on Haswell).
- if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
- Cost*=2;
+ // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
+ // proxy for a double-pumped AVX memory interface such as on Sandybridge.
+ if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
+ Cost *= 2;
return Cost;
}
@@ -1001,14 +1135,14 @@ int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
unsigned NumElem = SrcVTy->getVectorNumElements();
VectorType *MaskTy =
- VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
+ VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
(Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
!isPowerOf2_32(NumElem)) {
// Scalarization
int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
int ScalarCompareCost = getCmpSelInstrCost(
- Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), nullptr);
+ Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
int BranchCost = getCFInstrCost(Instruction::Br);
int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
@@ -1171,7 +1305,7 @@ int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
int64_t Val = Tmp.getSExtValue();
Cost += getIntImmCost(Val);
}
- // We need at least one instruction to materialze the constant.
+ // We need at least one instruction to materialize the constant.
return std::max(1, Cost);
}
@@ -1314,7 +1448,7 @@ int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
if (IndexSize < 64 || !GEP)
return IndexSize;
-
+
unsigned NumOfVarIndices = 0;
Value *Ptrs = GEP->getPointerOperand();
if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
@@ -1339,7 +1473,7 @@ int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) :
DL.getPointerSizeInBits();
- Type *IndexVTy = VectorType::get(IntegerType::get(getGlobalContext(),
+ Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
IndexSize), VF);
std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
@@ -1374,10 +1508,10 @@ int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
int MaskUnpackCost = 0;
if (VariableMask) {
VectorType *MaskTy =
- VectorType::get(Type::getInt1Ty(getGlobalContext()), VF);
+ VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
int ScalarCompareCost =
- getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(getGlobalContext()),
+ getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
nullptr);
int BranchCost = getCFInstrCost(Instruction::Br);
MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
@@ -1438,7 +1572,8 @@ bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
int DataWidth = isa<PointerType>(ScalarTy) ?
DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
- return (DataWidth >= 32 && ST->hasAVX2());
+ return (DataWidth >= 32 && ST->hasAVX()) ||
+ (DataWidth >= 8 && ST->hasBWI());
}
bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
diff --git a/lib/Target/X86/X86TargetTransformInfo.h b/lib/Target/X86/X86TargetTransformInfo.h
index adb745e912d1..ab8046bb9fd4 100644
--- a/lib/Target/X86/X86TargetTransformInfo.h
+++ b/lib/Target/X86/X86TargetTransformInfo.h
@@ -80,6 +80,11 @@ public:
bool VariableMask, unsigned Alignment);
int getAddressComputationCost(Type *PtrTy, bool IsComplex);
+ int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> Tys, FastMathFlags FMF);
+ int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Value *> Args, FastMathFlags FMF);
+
int getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm);
int getIntImmCost(int64_t);
diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp
index 6925b272b4a5..9320e1e2226f 100644
--- a/lib/Target/X86/X86VZeroUpper.cpp
+++ b/lib/Target/X86/X86VZeroUpper.cpp
@@ -38,6 +38,10 @@ namespace {
VZeroUpperInserter() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
const char *getPassName() const override {return "X86 vzeroupper inserter";}
private:
@@ -80,6 +84,7 @@ namespace {
BlockStateMap BlockStates;
DirtySuccessorsWorkList DirtySuccessors;
bool EverMadeChange;
+ bool IsX86INTR;
const TargetInstrInfo *TII;
static char ID;
@@ -122,10 +127,9 @@ static bool clobbersAllYmmRegs(const MachineOperand &MO) {
return true;
}
-static bool hasYmmReg(MachineInstr *MI) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (MI->isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
+static bool hasYmmReg(MachineInstr &MI) {
+ for (const MachineOperand &MO : MI.operands()) {
+ if (MI.isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO))
return true;
if (!MO.isReg())
continue;
@@ -137,12 +141,10 @@ static bool hasYmmReg(MachineInstr *MI) {
return false;
}
-/// clobbersAnyYmmReg() - Check if any YMM register will be clobbered by this
-/// instruction.
-static bool callClobbersAnyYmmReg(MachineInstr *MI) {
- assert(MI->isCall() && "Can only be called on call instructions.");
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+/// Check if any YMM register will be clobbered by this instruction.
+static bool callClobbersAnyYmmReg(MachineInstr &MI) {
+ assert(MI.isCall() && "Can only be called on call instructions.");
+ for (const MachineOperand &MO : MI.operands()) {
if (!MO.isRegMask())
continue;
for (unsigned reg = X86::YMM0; reg <= X86::YMM15; ++reg) {
@@ -153,16 +155,16 @@ static bool callClobbersAnyYmmReg(MachineInstr *MI) {
return false;
}
-// Insert a vzeroupper instruction before I.
+/// Insert a vzeroupper instruction before I.
void VZeroUpperInserter::insertVZeroUpper(MachineBasicBlock::iterator I,
- MachineBasicBlock &MBB) {
+ MachineBasicBlock &MBB) {
DebugLoc dl = I->getDebugLoc();
BuildMI(MBB, I, dl, TII->get(X86::VZEROUPPER));
++NumVZU;
EverMadeChange = true;
}
-// Add MBB to the DirtySuccessors list if it hasn't already been added.
+/// Add MBB to the DirtySuccessors list if it hasn't already been added.
void VZeroUpperInserter::addDirtySuccessor(MachineBasicBlock &MBB) {
if (!BlockStates[MBB.getNumber()].AddedToDirtySuccessors) {
DirtySuccessors.push_back(&MBB);
@@ -170,21 +172,29 @@ void VZeroUpperInserter::addDirtySuccessor(MachineBasicBlock &MBB) {
}
}
-/// processBasicBlock - Loop over all of the instructions in the basic block,
-/// inserting vzeroupper instructions before function calls.
+/// Loop over all of the instructions in the basic block, inserting vzeroupper
+/// instructions before function calls.
void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
- // Start by assuming that the block PASS_THROUGH, which implies no unguarded
+ // Start by assuming that the block is PASS_THROUGH which implies no unguarded
// calls.
BlockExitState CurState = PASS_THROUGH;
BlockStates[MBB.getNumber()].FirstUnguardedCall = MBB.end();
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) {
- MachineInstr *MI = I;
- bool isControlFlow = MI->isCall() || MI->isReturn();
+ for (MachineInstr &MI : MBB) {
+ // No need for vzeroupper before iret in interrupt handler function,
+ // epilogue will restore YMM registers if needed.
+ bool IsReturnFromX86INTR = IsX86INTR && MI.isReturn();
+ bool IsControlFlow = MI.isCall() || MI.isReturn();
+
+ // An existing VZERO* instruction resets the state.
+ if (MI.getOpcode() == X86::VZEROALL || MI.getOpcode() == X86::VZEROUPPER) {
+ CurState = EXITS_CLEAN;
+ continue;
+ }
// Shortcut: don't need to check regular instructions in dirty state.
- if (!isControlFlow && CurState == EXITS_DIRTY)
+ if ((!IsControlFlow || IsReturnFromX86INTR) && CurState == EXITS_DIRTY)
continue;
if (hasYmmReg(MI)) {
@@ -196,7 +206,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// Check for control-flow out of the current function (which might
// indirectly execute SSE instructions).
- if (!isControlFlow)
+ if (!IsControlFlow || IsReturnFromX86INTR)
continue;
// If the call won't clobber any YMM register, skip it as well. It usually
@@ -204,22 +214,21 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// standard calling convention is not used (RegMask is not used to mark
// register clobbered and register usage (def/imp-def/use) is well-defined
// and explicitly specified.
- if (MI->isCall() && !callClobbersAnyYmmReg(MI))
+ if (MI.isCall() && !callClobbersAnyYmmReg(MI))
continue;
- // The VZEROUPPER instruction resets the upper 128 bits of all Intel AVX
- // registers. This instruction has zero latency. In addition, the processor
- // changes back to Clean state, after which execution of Intel SSE
- // instructions or Intel AVX instructions has no transition penalty. Add
- // the VZEROUPPER instruction before any function call/return that might
- // execute SSE code.
+ // The VZEROUPPER instruction resets the upper 128 bits of all AVX
+ // registers. In addition, the processor changes back to Clean state, after
+ // which execution of SSE instructions or AVX instructions has no transition
+ // penalty. Add the VZEROUPPER instruction before any function call/return
+ // that might execute SSE code.
// FIXME: In some cases, we may want to move the VZEROUPPER into a
// predecessor block.
if (CurState == EXITS_DIRTY) {
// After the inserted VZEROUPPER the state becomes clean again, but
// other YMM may appear before other subsequent calls or even before
// the end of the BB.
- insertVZeroUpper(I, MBB);
+ insertVZeroUpper(MI, MBB);
CurState = EXITS_CLEAN;
} else if (CurState == PASS_THROUGH) {
// If this block is currently in pass-through state and we encounter a
@@ -227,7 +236,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// block has successors that exit dirty. Record the location of the call,
// and set the state to EXITS_CLEAN, but do not insert the vzeroupper yet.
// It will be inserted later if necessary.
- BlockStates[MBB.getNumber()].FirstUnguardedCall = I;
+ BlockStates[MBB.getNumber()].FirstUnguardedCall = MI;
CurState = EXITS_CLEAN;
}
}
@@ -244,15 +253,16 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
BlockStates[MBB.getNumber()].ExitState = CurState;
}
-/// runOnMachineFunction - Loop over all of the basic blocks, inserting
-/// vzeroupper instructions before function calls.
+/// Loop over all of the basic blocks, inserting vzeroupper instructions before
+/// function calls.
bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
- if (!ST.hasAVX() || ST.hasAVX512())
+ if (!ST.hasAVX() || ST.hasAVX512() || ST.hasFastPartialYMMWrite())
return false;
TII = ST.getInstrInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
EverMadeChange = false;
+ IsX86INTR = MF.getFunction()->getCallingConv() == CallingConv::X86_INTR;
bool FnHasLiveInYmm = checkFnHasLiveInYmm(MRI);
@@ -284,12 +294,12 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock &MBB : MF)
processBasicBlock(MBB);
- // If any YMM regs are live in to this function, add the entry block to the
+ // If any YMM regs are live-in to this function, add the entry block to the
// DirtySuccessors list
if (FnHasLiveInYmm)
addDirtySuccessor(MF.front());
- // Re-visit all blocks that are successors of EXITS_DIRTY bsocks. Add
+ // Re-visit all blocks that are successors of EXITS_DIRTY blocks. Add
// vzeroupper instructions to unguarded calls, and propagate EXITS_DIRTY
// through PASS_THROUGH blocks.
while (!DirtySuccessors.empty()) {
@@ -302,16 +312,14 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
if (BBState.FirstUnguardedCall != MBB.end())
insertVZeroUpper(BBState.FirstUnguardedCall, MBB);
- // If this successor was a pass-through block then it is now dirty, and its
+ // If this successor was a pass-through block, then it is now dirty. Its
// successors need to be added to the worklist (if they haven't been
// already).
if (BBState.ExitState == PASS_THROUGH) {
DEBUG(dbgs() << "MBB #" << MBB.getNumber()
<< " was Pass-through, is now Dirty-out.\n");
- for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(),
- SE = MBB.succ_end();
- SI != SE; ++SI)
- addDirtySuccessor(**SI);
+ for (MachineBasicBlock *Succ : MBB.successors())
+ addDirtySuccessor(*Succ);
}
}
diff --git a/lib/Target/X86/X86WinAllocaExpander.cpp b/lib/Target/X86/X86WinAllocaExpander.cpp
new file mode 100644
index 000000000000..cc82074e685f
--- /dev/null
+++ b/lib/Target/X86/X86WinAllocaExpander.cpp
@@ -0,0 +1,294 @@
+//===----- X86WinAllocaExpander.cpp - Expand WinAlloca pseudo instruction -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass that expands WinAlloca pseudo-instructions.
+//
+// It performs a conservative analysis to determine whether each allocation
+// falls within a region of the stack that is safe to use, or whether stack
+// probes must be emitted.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+namespace {
+
+class X86WinAllocaExpander : public MachineFunctionPass {
+public:
+ X86WinAllocaExpander() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ /// Strategies for lowering a WinAlloca.
+ enum Lowering { TouchAndSub, Sub, Probe };
+
+ /// Deterministic-order map from WinAlloca instruction to desired lowering.
+ typedef MapVector<MachineInstr*, Lowering> LoweringMap;
+
+ /// Compute which lowering to use for each WinAlloca instruction.
+ void computeLowerings(MachineFunction &MF, LoweringMap& Lowerings);
+
+ /// Get the appropriate lowering based on current offset and amount.
+ Lowering getLowering(int64_t CurrentOffset, int64_t AllocaAmount);
+
+ /// Lower a WinAlloca instruction.
+ void lower(MachineInstr* MI, Lowering L);
+
+ MachineRegisterInfo *MRI;
+ const X86Subtarget *STI;
+ const TargetInstrInfo *TII;
+ const X86RegisterInfo *TRI;
+ unsigned StackPtr;
+ unsigned SlotSize;
+ int64_t StackProbeSize;
+
+ const char *getPassName() const override { return "X86 WinAlloca Expander"; }
+ static char ID;
+};
+
+char X86WinAllocaExpander::ID = 0;
+
+} // end anonymous namespace
+
+FunctionPass *llvm::createX86WinAllocaExpander() {
+ return new X86WinAllocaExpander();
+}
+
+/// Return the allocation amount for a WinAlloca instruction, or -1 if unknown.
+static int64_t getWinAllocaAmount(MachineInstr *MI, MachineRegisterInfo *MRI) {
+ assert(MI->getOpcode() == X86::WIN_ALLOCA_32 ||
+ MI->getOpcode() == X86::WIN_ALLOCA_64);
+ assert(MI->getOperand(0).isReg());
+
+ unsigned AmountReg = MI->getOperand(0).getReg();
+ MachineInstr *Def = MRI->getUniqueVRegDef(AmountReg);
+
+ // Look through copies.
+ while (Def && Def->isCopy() && Def->getOperand(1).isReg())
+ Def = MRI->getUniqueVRegDef(Def->getOperand(1).getReg());
+
+ if (!Def ||
+ (Def->getOpcode() != X86::MOV32ri && Def->getOpcode() != X86::MOV64ri) ||
+ !Def->getOperand(1).isImm())
+ return -1;
+
+ return Def->getOperand(1).getImm();
+}
+
+X86WinAllocaExpander::Lowering
+X86WinAllocaExpander::getLowering(int64_t CurrentOffset,
+ int64_t AllocaAmount) {
+ // For a non-constant amount or a large amount, we have to probe.
+ if (AllocaAmount < 0 || AllocaAmount > StackProbeSize)
+ return Probe;
+
+ // If it fits within the safe region of the stack, just subtract.
+ if (CurrentOffset + AllocaAmount <= StackProbeSize)
+ return Sub;
+
+ // Otherwise, touch the current tip of the stack, then subtract.
+ return TouchAndSub;
+}
+
+static bool isPushPop(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case X86::PUSH32i8:
+ case X86::PUSH32r:
+ case X86::PUSH32rmm:
+ case X86::PUSH32rmr:
+ case X86::PUSHi32:
+ case X86::PUSH64i8:
+ case X86::PUSH64r:
+ case X86::PUSH64rmm:
+ case X86::PUSH64rmr:
+ case X86::PUSH64i32:
+ case X86::POP32r:
+ case X86::POP64r:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void X86WinAllocaExpander::computeLowerings(MachineFunction &MF,
+ LoweringMap &Lowerings) {
+ // Do a one-pass reverse post-order walk of the CFG to conservatively estimate
+ // the offset between the stack pointer and the lowest touched part of the
+ // stack, and use that to decide how to lower each WinAlloca instruction.
+
+ // Initialize OutOffset[B], the stack offset at exit from B, to something big.
+ DenseMap<MachineBasicBlock *, int64_t> OutOffset;
+ for (MachineBasicBlock &MBB : MF)
+ OutOffset[&MBB] = INT32_MAX;
+
+ // Note: we don't know the offset at the start of the entry block since the
+ // prologue hasn't been inserted yet, and how much that will adjust the stack
+ // pointer depends on register spills, which have not been computed yet.
+
+ // Compute the reverse post-order.
+ ReversePostOrderTraversal<MachineFunction*> RPO(&MF);
+
+ for (MachineBasicBlock *MBB : RPO) {
+ int64_t Offset = -1;
+ for (MachineBasicBlock *Pred : MBB->predecessors())
+ Offset = std::max(Offset, OutOffset[Pred]);
+ if (Offset == -1) Offset = INT32_MAX;
+
+ for (MachineInstr &MI : *MBB) {
+ if (MI.getOpcode() == X86::WIN_ALLOCA_32 ||
+ MI.getOpcode() == X86::WIN_ALLOCA_64) {
+ // A WinAlloca moves StackPtr, and potentially touches it.
+ int64_t Amount = getWinAllocaAmount(&MI, MRI);
+ Lowering L = getLowering(Offset, Amount);
+ Lowerings[&MI] = L;
+ switch (L) {
+ case Sub:
+ Offset += Amount;
+ break;
+ case TouchAndSub:
+ Offset = Amount;
+ break;
+ case Probe:
+ Offset = 0;
+ break;
+ }
+ } else if (MI.isCall() || isPushPop(MI)) {
+ // Calls, pushes and pops touch the tip of the stack.
+ Offset = 0;
+ } else if (MI.getOpcode() == X86::ADJCALLSTACKUP32 ||
+ MI.getOpcode() == X86::ADJCALLSTACKUP64) {
+ Offset -= MI.getOperand(0).getImm();
+ } else if (MI.getOpcode() == X86::ADJCALLSTACKDOWN32 ||
+ MI.getOpcode() == X86::ADJCALLSTACKDOWN64) {
+ Offset += MI.getOperand(0).getImm();
+ } else if (MI.modifiesRegister(StackPtr, TRI)) {
+ // Any other modification of SP means we've lost track of it.
+ Offset = INT32_MAX;
+ }
+ }
+
+ OutOffset[MBB] = Offset;
+ }
+}
+
+static unsigned getSubOpcode(bool Is64Bit, int64_t Amount) {
+ if (Is64Bit)
+ return isInt<8>(Amount) ? X86::SUB64ri8 : X86::SUB64ri32;
+ return isInt<8>(Amount) ? X86::SUB32ri8 : X86::SUB32ri;
+}
+
+void X86WinAllocaExpander::lower(MachineInstr* MI, Lowering L) {
+ DebugLoc DL = MI->getDebugLoc();
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock::iterator I = *MI;
+
+ int64_t Amount = getWinAllocaAmount(MI, MRI);
+ if (Amount == 0) {
+ MI->eraseFromParent();
+ return;
+ }
+
+ bool Is64Bit = STI->is64Bit();
+ assert(SlotSize == 4 || SlotSize == 8);
+ unsigned RegA = (SlotSize == 8) ? X86::RAX : X86::EAX;
+
+ switch (L) {
+ case TouchAndSub:
+ assert(Amount >= SlotSize);
+
+ // Use a push to touch the top of the stack.
+ BuildMI(*MBB, I, DL, TII->get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(RegA, RegState::Undef);
+ Amount -= SlotSize;
+ if (!Amount)
+ break;
+
+ // Fall through to make any remaining adjustment.
+ case Sub:
+ assert(Amount > 0);
+ if (Amount == SlotSize) {
+ // Use push to save size.
+ BuildMI(*MBB, I, DL, TII->get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(RegA, RegState::Undef);
+ } else {
+ // Sub.
+ BuildMI(*MBB, I, DL, TII->get(getSubOpcode(Is64Bit, Amount)), StackPtr)
+ .addReg(StackPtr)
+ .addImm(Amount);
+ }
+ break;
+ case Probe:
+ // The probe lowering expects the amount in RAX/EAX.
+ BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY), RegA)
+ .addReg(MI->getOperand(0).getReg());
+
+ // Do the probe.
+ STI->getFrameLowering()->emitStackProbe(*MBB->getParent(), *MBB, MI, DL,
+ /*InPrologue=*/false);
+ break;
+ }
+
+ unsigned AmountReg = MI->getOperand(0).getReg();
+ MI->eraseFromParent();
+
+ // Delete the definition of AmountReg, possibly walking a chain of copies.
+ for (;;) {
+ if (!MRI->use_empty(AmountReg))
+ break;
+ MachineInstr *AmountDef = MRI->getUniqueVRegDef(AmountReg);
+ if (!AmountDef)
+ break;
+ if (AmountDef->isCopy() && AmountDef->getOperand(1).isReg())
+ AmountReg = AmountDef->getOperand(1).isReg();
+ AmountDef->eraseFromParent();
+ break;
+ }
+}
+
+bool X86WinAllocaExpander::runOnMachineFunction(MachineFunction &MF) {
+ if (!MF.getInfo<X86MachineFunctionInfo>()->hasWinAlloca())
+ return false;
+
+ MRI = &MF.getRegInfo();
+ STI = &MF.getSubtarget<X86Subtarget>();
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
+ StackPtr = TRI->getStackRegister();
+ SlotSize = TRI->getSlotSize();
+
+ StackProbeSize = 4096;
+ if (MF.getFunction()->hasFnAttribute("stack-probe-size")) {
+ MF.getFunction()
+ ->getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, StackProbeSize);
+ }
+
+ LoweringMap Lowerings;
+ computeLowerings(MF, Lowerings);
+ for (auto &P : Lowerings)
+ lower(P.first, P.second);
+
+ return true;
+}
diff --git a/lib/Target/X86/X86WinEHState.cpp b/lib/Target/X86/X86WinEHState.cpp
index dce94a9e9ef7..99387edef99a 100644
--- a/lib/Target/X86/X86WinEHState.cpp
+++ b/lib/Target/X86/X86WinEHState.cpp
@@ -15,33 +15,32 @@
//===----------------------------------------------------------------------===//
#include "X86.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
-#include "llvm/IR/Dominators.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/Function.h"
-#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/PatternMatch.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Cloning.h"
-#include "llvm/Transforms/Utils/Local.h"
+#include <deque>
using namespace llvm;
-using namespace llvm::PatternMatch;
#define DEBUG_TYPE "winehstate"
-namespace llvm { void initializeWinEHStatePassPass(PassRegistry &); }
+namespace llvm {
+void initializeWinEHStatePassPass(PassRegistry &);
+}
namespace {
+const int OverdefinedState = INT_MIN;
+
class WinEHStatePass : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid.
@@ -68,12 +67,20 @@ private:
void linkExceptionRegistration(IRBuilder<> &Builder, Function *Handler);
void unlinkExceptionRegistration(IRBuilder<> &Builder);
void addStateStores(Function &F, WinEHFuncInfo &FuncInfo);
- void insertStateNumberStore(Value *ParentRegNode, Instruction *IP, int State);
+ void insertStateNumberStore(Instruction *IP, int State);
Value *emitEHLSDA(IRBuilder<> &Builder, Function *F);
Function *generateLSDAInEAXThunk(Function *ParentFunc);
+ bool isStateStoreNeeded(EHPersonality Personality, CallSite CS);
+ void rewriteSetJmpCallSite(IRBuilder<> &Builder, Function &F, CallSite CS,
+ Value *State);
+ int getBaseStateForBB(DenseMap<BasicBlock *, ColorVector> &BlockColors,
+ WinEHFuncInfo &FuncInfo, BasicBlock *BB);
+ int getStateForCallSite(DenseMap<BasicBlock *, ColorVector> &BlockColors,
+ WinEHFuncInfo &FuncInfo, CallSite CS);
+
// Module-level type getters.
Type *getEHLinkRegistrationType();
Type *getSEHRegistrationType();
@@ -84,20 +91,23 @@ private:
StructType *EHLinkRegistrationTy = nullptr;
StructType *CXXEHRegistrationTy = nullptr;
StructType *SEHRegistrationTy = nullptr;
- Function *FrameRecover = nullptr;
- Function *FrameAddress = nullptr;
- Function *FrameEscape = nullptr;
+ Constant *SetJmp3 = nullptr;
+ Constant *CxxLongjmpUnwind = nullptr;
// Per-function state
EHPersonality Personality = EHPersonality::Unknown;
Function *PersonalityFn = nullptr;
+ bool UseStackGuard = false;
+ int ParentBaseState;
+ Constant *SehLongjmpUnwind = nullptr;
+ Constant *Cookie = nullptr;
/// The stack allocation containing all EH data, including the link in the
/// fs:00 chain and the current state.
AllocaInst *RegNode = nullptr;
- /// Struct type of RegNode. Used for GEPing.
- Type *RegNodeTy = nullptr;
+ // The allocation containing the EH security guard.
+ AllocaInst *EHGuardNode = nullptr;
/// The index of the state field of RegNode.
int StateFieldIndex = ~0U;
@@ -116,9 +126,6 @@ INITIALIZE_PASS(WinEHStatePass, "x86-winehstate",
bool WinEHStatePass::doInitialization(Module &M) {
TheModule = &M;
- FrameEscape = Intrinsic::getDeclaration(TheModule, Intrinsic::localescape);
- FrameRecover = Intrinsic::getDeclaration(TheModule, Intrinsic::localrecover);
- FrameAddress = Intrinsic::getDeclaration(TheModule, Intrinsic::frameaddress);
return false;
}
@@ -128,9 +135,10 @@ bool WinEHStatePass::doFinalization(Module &M) {
EHLinkRegistrationTy = nullptr;
CXXEHRegistrationTy = nullptr;
SEHRegistrationTy = nullptr;
- FrameEscape = nullptr;
- FrameRecover = nullptr;
- FrameAddress = nullptr;
+ SetJmp3 = nullptr;
+ CxxLongjmpUnwind = nullptr;
+ SehLongjmpUnwind = nullptr;
+ Cookie = nullptr;
return false;
}
@@ -164,6 +172,13 @@ bool WinEHStatePass::runOnFunction(Function &F) {
if (!HasPads)
return false;
+ Type *Int8PtrType = Type::getInt8PtrTy(TheModule->getContext());
+ SetJmp3 = TheModule->getOrInsertFunction(
+ "_setjmp3", FunctionType::get(
+ Type::getInt32Ty(TheModule->getContext()),
+ {Int8PtrType, Type::getInt32Ty(TheModule->getContext())},
+ /*isVarArg=*/true));
+
// Disable frame pointer elimination in this function.
// FIXME: Do the nested handlers need to keep the parent ebp in ebp, or can we
// use an arbitrary register?
@@ -182,6 +197,10 @@ bool WinEHStatePass::runOnFunction(Function &F) {
// Reset per-function state.
PersonalityFn = nullptr;
Personality = EHPersonality::Unknown;
+ UseStackGuard = false;
+ RegNode = nullptr;
+ EHGuardNode = nullptr;
+
return true;
}
@@ -256,9 +275,14 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
assert(Personality == EHPersonality::MSVC_CXX ||
Personality == EHPersonality::MSVC_X86SEH);
- StringRef PersonalityName = PersonalityFn->getName();
+ // Struct type of RegNode. Used for GEPing.
+ Type *RegNodeTy;
+
IRBuilder<> Builder(&F->getEntryBlock(), F->getEntryBlock().begin());
Type *Int8PtrType = Builder.getInt8PtrTy();
+ Type *Int32Ty = Builder.getInt32Ty();
+ Type *VoidTy = Builder.getVoidTy();
+
if (Personality == EHPersonality::MSVC_CXX) {
RegNodeTy = getCXXEHRegistrationType();
RegNode = Builder.CreateAlloca(RegNodeTy);
@@ -268,42 +292,71 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
Builder.CreateStore(SP, Builder.CreateStructGEP(RegNodeTy, RegNode, 0));
// TryLevel = -1
StateFieldIndex = 2;
- insertStateNumberStore(RegNode, &*Builder.GetInsertPoint(), -1);
+ ParentBaseState = -1;
+ insertStateNumberStore(&*Builder.GetInsertPoint(), ParentBaseState);
// Handler = __ehhandler$F
Function *Trampoline = generateLSDAInEAXThunk(F);
Link = Builder.CreateStructGEP(RegNodeTy, RegNode, 1);
linkExceptionRegistration(Builder, Trampoline);
+
+ CxxLongjmpUnwind = TheModule->getOrInsertFunction(
+ "__CxxLongjmpUnwind",
+ FunctionType::get(VoidTy, Int8PtrType, /*isVarArg=*/false));
+ cast<Function>(CxxLongjmpUnwind->stripPointerCasts())
+ ->setCallingConv(CallingConv::X86_StdCall);
} else if (Personality == EHPersonality::MSVC_X86SEH) {
// If _except_handler4 is in use, some additional guard checks and prologue
// stuff is required.
- bool UseStackGuard = (PersonalityName == "_except_handler4");
+ StringRef PersonalityName = PersonalityFn->getName();
+ UseStackGuard = (PersonalityName == "_except_handler4");
+
+ // Allocate local structures.
RegNodeTy = getSEHRegistrationType();
RegNode = Builder.CreateAlloca(RegNodeTy);
+ if (UseStackGuard)
+ EHGuardNode = Builder.CreateAlloca(Int32Ty);
+
// SavedESP = llvm.stacksave()
Value *SP = Builder.CreateCall(
Intrinsic::getDeclaration(TheModule, Intrinsic::stacksave), {});
Builder.CreateStore(SP, Builder.CreateStructGEP(RegNodeTy, RegNode, 0));
// TryLevel = -2 / -1
StateFieldIndex = 4;
- insertStateNumberStore(RegNode, &*Builder.GetInsertPoint(),
- UseStackGuard ? -2 : -1);
+ ParentBaseState = UseStackGuard ? -2 : -1;
+ insertStateNumberStore(&*Builder.GetInsertPoint(), ParentBaseState);
// ScopeTable = llvm.x86.seh.lsda(F)
- Value *FI8 = Builder.CreateBitCast(F, Int8PtrType);
- Value *LSDA = Builder.CreateCall(
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_lsda), FI8);
- Type *Int32Ty = Type::getInt32Ty(TheModule->getContext());
+ Value *LSDA = emitEHLSDA(Builder, F);
LSDA = Builder.CreatePtrToInt(LSDA, Int32Ty);
// If using _except_handler4, xor the address of the table with
// __security_cookie.
if (UseStackGuard) {
- Value *Cookie =
- TheModule->getOrInsertGlobal("__security_cookie", Int32Ty);
- Value *Val = Builder.CreateLoad(Int32Ty, Cookie);
+ Cookie = TheModule->getOrInsertGlobal("__security_cookie", Int32Ty);
+ Value *Val = Builder.CreateLoad(Int32Ty, Cookie, "cookie");
LSDA = Builder.CreateXor(LSDA, Val);
}
Builder.CreateStore(LSDA, Builder.CreateStructGEP(RegNodeTy, RegNode, 3));
+
+ // If using _except_handler4, the EHGuard contains: FramePtr xor Cookie.
+ if (UseStackGuard) {
+ Value *Val = Builder.CreateLoad(Int32Ty, Cookie);
+ Value *FrameAddr = Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::frameaddress),
+ Builder.getInt32(0), "frameaddr");
+ Value *FrameAddrI32 = Builder.CreatePtrToInt(FrameAddr, Int32Ty);
+ FrameAddrI32 = Builder.CreateXor(FrameAddrI32, Val);
+ Builder.CreateStore(FrameAddrI32, EHGuardNode);
+ }
+
+ // Register the exception handler.
Link = Builder.CreateStructGEP(RegNodeTy, RegNode, 2);
linkExceptionRegistration(Builder, PersonalityFn);
+
+ SehLongjmpUnwind = TheModule->getOrInsertFunction(
+ UseStackGuard ? "_seh_longjmp_unwind4" : "_seh_longjmp_unwind",
+ FunctionType::get(Type::getVoidTy(TheModule->getContext()), Int8PtrType,
+ /*isVarArg=*/false));
+ cast<Function>(SehLongjmpUnwind->stripPointerCasts())
+ ->setCallingConv(CallingConv::X86_StdCall);
} else {
llvm_unreachable("unexpected personality function");
}
@@ -398,15 +451,203 @@ void WinEHStatePass::unlinkExceptionRegistration(IRBuilder<> &Builder) {
Builder.CreateStore(Next, FSZero);
}
+// Calls to setjmp(p) are lowered to _setjmp3(p, 0) by the frontend.
+// The idea behind _setjmp3 is that it takes an optional number of personality
+// specific parameters to indicate how to restore the personality-specific frame
+// state when longjmp is initiated. Typically, the current TryLevel is saved.
+void WinEHStatePass::rewriteSetJmpCallSite(IRBuilder<> &Builder, Function &F,
+ CallSite CS, Value *State) {
+ // Don't rewrite calls with a weird number of arguments.
+ if (CS.getNumArgOperands() != 2)
+ return;
+
+ Instruction *Inst = CS.getInstruction();
+
+ SmallVector<OperandBundleDef, 1> OpBundles;
+ CS.getOperandBundlesAsDefs(OpBundles);
+
+ SmallVector<Value *, 3> OptionalArgs;
+ if (Personality == EHPersonality::MSVC_CXX) {
+ OptionalArgs.push_back(CxxLongjmpUnwind);
+ OptionalArgs.push_back(State);
+ OptionalArgs.push_back(emitEHLSDA(Builder, &F));
+ } else if (Personality == EHPersonality::MSVC_X86SEH) {
+ OptionalArgs.push_back(SehLongjmpUnwind);
+ OptionalArgs.push_back(State);
+ if (UseStackGuard)
+ OptionalArgs.push_back(Cookie);
+ } else {
+ llvm_unreachable("unhandled personality!");
+ }
+
+ SmallVector<Value *, 5> Args;
+ Args.push_back(
+ Builder.CreateBitCast(CS.getArgOperand(0), Builder.getInt8PtrTy()));
+ Args.push_back(Builder.getInt32(OptionalArgs.size()));
+ Args.append(OptionalArgs.begin(), OptionalArgs.end());
+
+ CallSite NewCS;
+ if (CS.isCall()) {
+ auto *CI = cast<CallInst>(Inst);
+ CallInst *NewCI = Builder.CreateCall(SetJmp3, Args, OpBundles);
+ NewCI->setTailCallKind(CI->getTailCallKind());
+ NewCS = NewCI;
+ } else {
+ auto *II = cast<InvokeInst>(Inst);
+ NewCS = Builder.CreateInvoke(
+ SetJmp3, II->getNormalDest(), II->getUnwindDest(), Args, OpBundles);
+ }
+ NewCS.setCallingConv(CS.getCallingConv());
+ NewCS.setAttributes(CS.getAttributes());
+ NewCS->setDebugLoc(CS->getDebugLoc());
+
+ Instruction *NewInst = NewCS.getInstruction();
+ NewInst->takeName(Inst);
+ Inst->replaceAllUsesWith(NewInst);
+ Inst->eraseFromParent();
+}
+
+// Figure out what state we should assign calls in this block.
+int WinEHStatePass::getBaseStateForBB(
+ DenseMap<BasicBlock *, ColorVector> &BlockColors, WinEHFuncInfo &FuncInfo,
+ BasicBlock *BB) {
+ int BaseState = ParentBaseState;
+ auto &BBColors = BlockColors[BB];
+
+ assert(BBColors.size() == 1 && "multi-color BB not removed by preparation");
+ BasicBlock *FuncletEntryBB = BBColors.front();
+ if (auto *FuncletPad =
+ dyn_cast<FuncletPadInst>(FuncletEntryBB->getFirstNonPHI())) {
+ auto BaseStateI = FuncInfo.FuncletBaseStateMap.find(FuncletPad);
+ if (BaseStateI != FuncInfo.FuncletBaseStateMap.end())
+ BaseState = BaseStateI->second;
+ }
+
+ return BaseState;
+}
+
+// Calculate the state a call-site is in.
+int WinEHStatePass::getStateForCallSite(
+ DenseMap<BasicBlock *, ColorVector> &BlockColors, WinEHFuncInfo &FuncInfo,
+ CallSite CS) {
+ if (auto *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
+ // Look up the state number of the EH pad this unwinds to.
+ assert(FuncInfo.InvokeStateMap.count(II) && "invoke has no state!");
+ return FuncInfo.InvokeStateMap[II];
+ }
+ // Possibly throwing call instructions have no actions to take after
+ // an unwind. Ensure they are in the -1 state.
+ return getBaseStateForBB(BlockColors, FuncInfo, CS.getParent());
+}
+
+// Calculate the intersection of all the FinalStates for a BasicBlock's
+// predecessors.
+static int getPredState(DenseMap<BasicBlock *, int> &FinalStates, Function &F,
+ int ParentBaseState, BasicBlock *BB) {
+ // The entry block has no predecessors but we know that the prologue always
+ // sets us up with a fixed state.
+ if (&F.getEntryBlock() == BB)
+ return ParentBaseState;
+
+ // This is an EH Pad, conservatively report this basic block as overdefined.
+ if (BB->isEHPad())
+ return OverdefinedState;
+
+ int CommonState = OverdefinedState;
+ for (BasicBlock *PredBB : predecessors(BB)) {
+ // We didn't manage to get a state for one of these predecessors,
+ // conservatively report this basic block as overdefined.
+ auto PredEndState = FinalStates.find(PredBB);
+ if (PredEndState == FinalStates.end())
+ return OverdefinedState;
+
+ // This code is reachable via exceptional control flow,
+ // conservatively report this basic block as overdefined.
+ if (isa<CatchReturnInst>(PredBB->getTerminator()))
+ return OverdefinedState;
+
+ int PredState = PredEndState->second;
+ assert(PredState != OverdefinedState &&
+ "overdefined BBs shouldn't be in FinalStates");
+ if (CommonState == OverdefinedState)
+ CommonState = PredState;
+
+ // At least two predecessors have different FinalStates,
+ // conservatively report this basic block as overdefined.
+ if (CommonState != PredState)
+ return OverdefinedState;
+ }
+
+ return CommonState;
+}
+
+// Calculate the intersection of all the InitialStates for a BasicBlock's
+// successors.
+static int getSuccState(DenseMap<BasicBlock *, int> &InitialStates, Function &F,
+ int ParentBaseState, BasicBlock *BB) {
+ // This block rejoins normal control flow,
+ // conservatively report this basic block as overdefined.
+ if (isa<CatchReturnInst>(BB->getTerminator()))
+ return OverdefinedState;
+
+ int CommonState = OverdefinedState;
+ for (BasicBlock *SuccBB : successors(BB)) {
+ // We didn't manage to get a state for one of these predecessors,
+ // conservatively report this basic block as overdefined.
+ auto SuccStartState = InitialStates.find(SuccBB);
+ if (SuccStartState == InitialStates.end())
+ return OverdefinedState;
+
+ // This is an EH Pad, conservatively report this basic block as overdefined.
+ if (SuccBB->isEHPad())
+ return OverdefinedState;
+
+ int SuccState = SuccStartState->second;
+ assert(SuccState != OverdefinedState &&
+ "overdefined BBs shouldn't be in FinalStates");
+ if (CommonState == OverdefinedState)
+ CommonState = SuccState;
+
+ // At least two successors have different InitialStates,
+ // conservatively report this basic block as overdefined.
+ if (CommonState != SuccState)
+ return OverdefinedState;
+ }
+
+ return CommonState;
+}
+
+bool WinEHStatePass::isStateStoreNeeded(EHPersonality Personality,
+ CallSite CS) {
+ if (!CS)
+ return false;
+
+ // If the function touches memory, it needs a state store.
+ if (isAsynchronousEHPersonality(Personality))
+ return !CS.doesNotAccessMemory();
+
+ // If the function throws, it needs a state store.
+ return !CS.doesNotThrow();
+}
+
void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
// Mark the registration node. The backend needs to know which alloca it is so
// that it can recover the original frame pointer.
- IRBuilder<> Builder(RegNode->getParent(), std::next(RegNode->getIterator()));
+ IRBuilder<> Builder(RegNode->getNextNode());
Value *RegNodeI8 = Builder.CreateBitCast(RegNode, Builder.getInt8PtrTy());
Builder.CreateCall(
Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_ehregnode),
{RegNodeI8});
+ if (EHGuardNode) {
+ IRBuilder<> Builder(EHGuardNode->getNextNode());
+ Value *EHGuardNodeI8 =
+ Builder.CreateBitCast(EHGuardNode, Builder.getInt8PtrTy());
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_ehguard),
+ {EHGuardNodeI8});
+ }
+
// Calculate state numbers.
if (isAsynchronousEHPersonality(Personality))
calculateSEHStateNumbers(&F, FuncInfo);
@@ -415,42 +656,141 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
// Iterate all the instructions and emit state number stores.
DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(F);
- for (BasicBlock &BB : F) {
- // Figure out what state we should assign calls in this block.
- int BaseState = -1;
- auto &BBColors = BlockColors[&BB];
+ ReversePostOrderTraversal<Function *> RPOT(&F);
+
+ // InitialStates yields the state of the first call-site for a BasicBlock.
+ DenseMap<BasicBlock *, int> InitialStates;
+ // FinalStates yields the state of the last call-site for a BasicBlock.
+ DenseMap<BasicBlock *, int> FinalStates;
+ // Worklist used to revisit BasicBlocks with indeterminate
+ // Initial/Final-States.
+ std::deque<BasicBlock *> Worklist;
+ // Fill in InitialStates and FinalStates for BasicBlocks with call-sites.
+ for (BasicBlock *BB : RPOT) {
+ int InitialState = OverdefinedState;
+ int FinalState;
+ if (&F.getEntryBlock() == BB)
+ InitialState = FinalState = ParentBaseState;
+ for (Instruction &I : *BB) {
+ CallSite CS(&I);
+ if (!isStateStoreNeeded(Personality, CS))
+ continue;
+
+ int State = getStateForCallSite(BlockColors, FuncInfo, CS);
+ if (InitialState == OverdefinedState)
+ InitialState = State;
+ FinalState = State;
+ }
+ // No call-sites in this basic block? That's OK, we will come back to these
+ // in a later pass.
+ if (InitialState == OverdefinedState) {
+ Worklist.push_back(BB);
+ continue;
+ }
+ DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " InitialState=" << InitialState << '\n');
+ DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " FinalState=" << FinalState << '\n');
+ InitialStates.insert({BB, InitialState});
+ FinalStates.insert({BB, FinalState});
+ }
- assert(BBColors.size() == 1 &&
- "multi-color BB not removed by preparation");
+ // Try to fill-in InitialStates and FinalStates which have no call-sites.
+ while (!Worklist.empty()) {
+ BasicBlock *BB = Worklist.front();
+ Worklist.pop_front();
+ // This BasicBlock has already been figured out, nothing more we can do.
+ if (InitialStates.count(BB) != 0)
+ continue;
+
+ int PredState = getPredState(FinalStates, F, ParentBaseState, BB);
+ if (PredState == OverdefinedState)
+ continue;
+
+ // We successfully inferred this BasicBlock's state via it's predecessors;
+ // enqueue it's successors to see if we can infer their states.
+ InitialStates.insert({BB, PredState});
+ FinalStates.insert({BB, PredState});
+ for (BasicBlock *SuccBB : successors(BB))
+ Worklist.push_back(SuccBB);
+ }
+
+ // Try to hoist stores from successors.
+ for (BasicBlock *BB : RPOT) {
+ int SuccState = getSuccState(InitialStates, F, ParentBaseState, BB);
+ if (SuccState == OverdefinedState)
+ continue;
+
+ // Update our FinalState to reflect the common InitialState of our
+ // successors.
+ FinalStates.insert({BB, SuccState});
+ }
+
+ // Finally, insert state stores before call-sites which transition us to a new
+ // state.
+ for (BasicBlock *BB : RPOT) {
+ auto &BBColors = BlockColors[BB];
BasicBlock *FuncletEntryBB = BBColors.front();
- if (auto *FuncletPad =
- dyn_cast<FuncletPadInst>(FuncletEntryBB->getFirstNonPHI())) {
- auto BaseStateI = FuncInfo.FuncletBaseStateMap.find(FuncletPad);
- if (BaseStateI != FuncInfo.FuncletBaseStateMap.end())
- BaseState = BaseStateI->second;
+ if (isa<CleanupPadInst>(FuncletEntryBB->getFirstNonPHI()))
+ continue;
+
+ int PrevState = getPredState(FinalStates, F, ParentBaseState, BB);
+ DEBUG(dbgs() << "X86WinEHState: " << BB->getName()
+ << " PrevState=" << PrevState << '\n');
+
+ for (Instruction &I : *BB) {
+ CallSite CS(&I);
+ if (!isStateStoreNeeded(Personality, CS))
+ continue;
+
+ int State = getStateForCallSite(BlockColors, FuncInfo, CS);
+ if (State != PrevState)
+ insertStateNumberStore(&I, State);
+ PrevState = State;
}
- for (Instruction &I : BB) {
- if (auto *CI = dyn_cast<CallInst>(&I)) {
- // Possibly throwing call instructions have no actions to take after
- // an unwind. Ensure they are in the -1 state.
- if (CI->doesNotThrow())
- continue;
- insertStateNumberStore(RegNode, CI, BaseState);
- } else if (auto *II = dyn_cast<InvokeInst>(&I)) {
- // Look up the state number of the landingpad this unwinds to.
- assert(FuncInfo.InvokeStateMap.count(II) && "invoke has no state!");
- int State = FuncInfo.InvokeStateMap[II];
- insertStateNumberStore(RegNode, II, State);
- }
+ // We might have hoisted a state store into this block, emit it now.
+ auto EndState = FinalStates.find(BB);
+ if (EndState != FinalStates.end())
+ if (EndState->second != PrevState)
+ insertStateNumberStore(BB->getTerminator(), EndState->second);
+ }
+
+ SmallVector<CallSite, 1> SetJmp3CallSites;
+ for (BasicBlock *BB : RPOT) {
+ for (Instruction &I : *BB) {
+ CallSite CS(&I);
+ if (!CS)
+ continue;
+ if (CS.getCalledValue()->stripPointerCasts() !=
+ SetJmp3->stripPointerCasts())
+ continue;
+
+ SetJmp3CallSites.push_back(CS);
+ }
+ }
+
+ for (CallSite CS : SetJmp3CallSites) {
+ auto &BBColors = BlockColors[CS->getParent()];
+ BasicBlock *FuncletEntryBB = BBColors.front();
+ bool InCleanup = isa<CleanupPadInst>(FuncletEntryBB->getFirstNonPHI());
+
+ IRBuilder<> Builder(CS.getInstruction());
+ Value *State;
+ if (InCleanup) {
+ Value *StateField =
+ Builder.CreateStructGEP(nullptr, RegNode, StateFieldIndex);
+ State = Builder.CreateLoad(StateField);
+ } else {
+ State = Builder.getInt32(getStateForCallSite(BlockColors, FuncInfo, CS));
}
+ rewriteSetJmpCallSite(Builder, F, CS, State);
}
}
-void WinEHStatePass::insertStateNumberStore(Value *ParentRegNode,
- Instruction *IP, int State) {
+void WinEHStatePass::insertStateNumberStore(Instruction *IP, int State) {
IRBuilder<> Builder(IP);
Value *StateField =
- Builder.CreateStructGEP(RegNodeTy, ParentRegNode, StateFieldIndex);
+ Builder.CreateStructGEP(nullptr, RegNode, StateFieldIndex);
Builder.CreateStore(Builder.getInt32(State), StateField);
}
diff --git a/lib/Target/XCore/Disassembler/Makefile b/lib/Target/XCore/Disassembler/Makefile
deleted file mode 100644
index 4caffdd1da6a..000000000000
--- a/lib/Target/XCore/Disassembler/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/XCore/Disassembler/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMXCoreDisassembler
-
-# Hack: we need to include 'main' XCore target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
index aaf267af5311..2e8f762458a7 100644
--- a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
+++ b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
@@ -15,7 +15,7 @@
#include "XCore.h"
#include "XCoreRegisterInfo.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSubtargetInfo.h"
diff --git a/lib/Target/XCore/InstPrinter/Makefile b/lib/Target/XCore/InstPrinter/Makefile
deleted file mode 100644
index 1c1c61299c39..000000000000
--- a/lib/Target/XCore/InstPrinter/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/XCore/AsmPrinter/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMXCoreAsmPrinter
-
-# Hack: we need to include 'main' xcore target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/XCore/MCTargetDesc/Makefile b/lib/Target/XCore/MCTargetDesc/Makefile
deleted file mode 100644
index de61543bfe9c..000000000000
--- a/lib/Target/XCore/MCTargetDesc/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/XCore/TargetDesc/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMXCoreDesc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
index b4085835f285..63ca1e7d4646 100644
--- a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
+++ b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
@@ -15,7 +15,6 @@
#include "InstPrinter/XCoreInstPrinter.h"
#include "XCoreMCAsmInfo.h"
#include "XCoreTargetStreamer.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -62,22 +61,13 @@ static MCAsmInfo *createXCoreMCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createXCoreMCCodeGenInfo(const Triple &TT,
- Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- if (RM == Reloc::Default) {
- RM = Reloc::Static;
- }
+static void adjustCodeGenOpts(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
if (CM == CodeModel::Default) {
CM = CodeModel::Small;
}
if (CM != CodeModel::Small && CM != CodeModel::Large)
report_fatal_error("Target only supports CodeModel Small or Large");
-
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
static MCInstPrinter *createXCoreMCInstPrinter(const Triple &T,
@@ -137,8 +127,8 @@ extern "C" void LLVMInitializeXCoreTargetMC() {
RegisterMCAsmInfoFn X(TheXCoreTarget, createXCoreMCAsmInfo);
// Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheXCoreTarget,
- createXCoreMCCodeGenInfo);
+ TargetRegistry::registerMCAdjustCodeGenOpts(TheXCoreTarget,
+ adjustCodeGenOpts);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(TheXCoreTarget, createXCoreMCInstrInfo);
diff --git a/lib/Target/XCore/Makefile b/lib/Target/XCore/Makefile
deleted file mode 100644
index 92ddc8860876..000000000000
--- a/lib/Target/XCore/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- lib/Target/XCore/Makefile ---------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMXCoreCodeGen
-TARGET = XCore
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = XCoreGenRegisterInfo.inc XCoreGenInstrInfo.inc \
- XCoreGenAsmWriter.inc \
- XCoreGenDAGISel.inc XCoreGenCallingConv.inc \
- XCoreGenDisassemblerTables.inc XCoreGenSubtargetInfo.inc
-
-DIRS = Disassembler InstPrinter TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common
-
diff --git a/lib/Target/XCore/TargetInfo/Makefile b/lib/Target/XCore/TargetInfo/Makefile
deleted file mode 100644
index f8a409517497..000000000000
--- a/lib/Target/XCore/TargetInfo/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- lib/Target/XCore/TargetInfo/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMXCoreInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/lib/Target/XCore/XCoreAsmPrinter.cpp b/lib/Target/XCore/XCoreAsmPrinter.cpp
index b00cdd5040eb..be66e6cb8124 100644
--- a/lib/Target/XCore/XCoreAsmPrinter.cpp
+++ b/lib/Target/XCore/XCoreAsmPrinter.cpp
@@ -93,8 +93,7 @@ void XCoreAsmPrinter::emitArrayBound(MCSymbol *Sym, const GlobalVariable *GV) {
assert( ( GV->hasExternalLinkage() || GV->hasWeakLinkage() ||
GV->hasLinkOnceLinkage() || GV->hasCommonLinkage() ) &&
"Unexpected linkage");
- if (ArrayType *ATy = dyn_cast<ArrayType>(
- cast<PointerType>(GV->getType())->getElementType())) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType())) {
MCSymbol *SymGlob = OutContext.getOrCreateSymbol(
Twine(Sym->getName() + StringRef(".globound")));
diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp
index ae493de083b8..75a2eb0fdd2f 100644
--- a/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -58,10 +58,9 @@ static bool CompareSSIOffset(const StackSlotInfo& a, const StackSlotInfo& b) {
return a.Offset < b.Offset;
}
-
static void EmitDefCfaRegister(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc dl,
- const TargetInstrInfo &TII,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &dl, const TargetInstrInfo &TII,
MachineModuleInfo *MMI, unsigned DRegNum) {
unsigned CFIIndex = MMI->addFrameInst(
MCCFIInstruction::createDefCfaRegister(nullptr, DRegNum));
@@ -70,8 +69,8 @@ static void EmitDefCfaRegister(MachineBasicBlock &MBB,
}
static void EmitDefCfaOffset(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc dl,
- const TargetInstrInfo &TII,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &dl, const TargetInstrInfo &TII,
MachineModuleInfo *MMI, int Offset) {
unsigned CFIIndex =
MMI->addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -Offset));
@@ -80,7 +79,7 @@ static void EmitDefCfaOffset(MachineBasicBlock &MBB,
}
static void EmitCfiOffset(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc dl,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &dl,
const TargetInstrInfo &TII, MachineModuleInfo *MMI,
unsigned DRegNum, int Offset) {
unsigned CFIIndex = MMI->addFrameInst(
@@ -96,7 +95,7 @@ static void EmitCfiOffset(MachineBasicBlock &MBB,
/// \param OffsetFromTop the spill offset from the top of the frame.
/// \param [in,out] Adjusted the current SP offset from the top of the frame.
static void IfNeededExtSP(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc dl,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &dl,
const TargetInstrInfo &TII, MachineModuleInfo *MMI,
int OffsetFromTop, int &Adjusted, int FrameSize,
bool emitFrameMoves) {
@@ -120,7 +119,7 @@ static void IfNeededExtSP(MachineBasicBlock &MBB,
/// \param [in,out] RemainingAdj the current SP offset from the top of the
/// frame.
static void IfNeededLDAWSP(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc dl,
+ MachineBasicBlock::iterator MBBI, const DebugLoc &dl,
const TargetInstrInfo &TII, int OffsetFromTop,
int &RemainingAdj) {
while (OffsetFromTop < RemainingAdj - MaxImmU16) {
@@ -173,8 +172,9 @@ static void GetEHSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
std::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset);
}
-static MachineMemOperand *
-getFrameIndexMMO(MachineBasicBlock &MBB, int FrameIndex, unsigned flags) {
+static MachineMemOperand *getFrameIndexMMO(MachineBasicBlock &MBB,
+ int FrameIndex,
+ MachineMemOperand::Flags flags) {
MachineFunction *MF = MBB.getParent();
const MachineFrameInfo &MFI = *MF->getFrameInfo();
MachineMemOperand *MMO = MF->getMachineMemOperand(
@@ -187,10 +187,11 @@ getFrameIndexMMO(MachineBasicBlock &MBB, int FrameIndex, unsigned flags) {
/// Restore clobbered registers with their spill slot value.
/// The SP will be adjusted at the same time, thus the SpillList must be ordered
/// with the largest (negative) offsets first.
-static void
-RestoreSpillList(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- DebugLoc dl, const TargetInstrInfo &TII, int &RemainingAdj,
- SmallVectorImpl<StackSlotInfo> &SpillList) {
+static void RestoreSpillList(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const DebugLoc &dl, const TargetInstrInfo &TII,
+ int &RemainingAdj,
+ SmallVectorImpl<StackSlotInfo> &SpillList) {
for (unsigned i = 0, e = SpillList.size(); i != e; ++i) {
assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset");
assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset");
@@ -482,9 +483,9 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
// This function eliminates ADJCALLSTACKDOWN,
// ADJCALLSTACKUP pseudo instructions
-void XCoreFrameLowering::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+MachineBasicBlock::iterator XCoreFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo();
if (!hasReservedCallFrame(MF)) {
// Turn the adjcallstackdown instruction into 'extsp <amt>' and the
@@ -528,7 +529,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
}
}
- MBB.erase(I);
+ return MBB.erase(I);
}
void XCoreFrameLowering::determineCalleeSaves(MachineFunction &MF,
diff --git a/lib/Target/XCore/XCoreFrameLowering.h b/lib/Target/XCore/XCoreFrameLowering.h
index 69c71adc8d3f..8729d2208bb2 100644
--- a/lib/Target/XCore/XCoreFrameLowering.h
+++ b/lib/Target/XCore/XCoreFrameLowering.h
@@ -41,8 +41,8 @@ namespace llvm {
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const override;
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const override;
bool hasFP(const MachineFunction &MF) const override;
diff --git a/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp b/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp
index 77292c4f8f52..bd6baef3271e 100644
--- a/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp
+++ b/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp
@@ -17,7 +17,6 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
@@ -28,6 +27,10 @@ namespace {
XCoreFTAOElim() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &Fn) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::AllVRegsAllocated);
+ }
const char *getPassName() const override {
return "XCore FRAME_TO_ARGS_OFFSET Elimination";
diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 9f61c84cd445..ce25cbcfd124 100644
--- a/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -25,7 +25,6 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -42,12 +41,12 @@ namespace {
XCoreDAGToDAGISel(XCoreTargetMachine &TM, CodeGenOpt::Level OptLevel)
: SelectionDAGISel(TM, OptLevel) {}
- SDNode *Select(SDNode *N) override;
- SDNode *SelectBRIND(SDNode *N);
+ void Select(SDNode *N) override;
+ bool tryBRIND(SDNode *N);
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
- inline SDValue getI32Imm(unsigned Imm, SDLoc dl) {
+ inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) {
return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
}
@@ -70,14 +69,14 @@ namespace {
const char *getPassName() const override {
return "XCore DAG->DAG Pattern Instruction Selection";
- }
-
+ }
+
// Include the pieces autogenerated from the target description.
#include "XCoreGenDAGISel.inc"
};
} // end anonymous namespace
-/// createXCoreISelDag - This pass converts a legalized DAG into a
+/// createXCoreISelDag - This pass converts a legalized DAG into a
/// XCore-specific DAG, ready for instruction scheduling.
///
FunctionPass *llvm::createXCoreISelDag(XCoreTargetMachine &TM,
@@ -130,7 +129,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
return false;
}
-SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
+void XCoreDAGToDAGISel::Select(SDNode *N) {
SDLoc dl(N);
switch (N->getOpcode()) {
default: break;
@@ -140,8 +139,9 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
// Transformation function: get the size of a mask
// Look for the first non-zero bit
SDValue MskSize = getI32Imm(32 - countLeadingZeros((uint32_t)Val), dl);
- return CurDAG->getMachineNode(XCore::MKMSK_rus, dl,
- MVT::i32, MskSize);
+ ReplaceNode(N, CurDAG->getMachineNode(XCore::MKMSK_rus, dl,
+ MVT::i32, MskSize));
+ return;
}
else if (!isUInt<16>(Val)) {
SDValue CPIdx = CurDAG->getTargetConstantPool(
@@ -155,57 +155,64 @@ SDNode *XCoreDAGToDAGISel::Select(SDNode *N) {
MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
MachineMemOperand::MOLoad, 4, 4);
cast<MachineSDNode>(node)->setMemRefs(MemOp, MemOp + 1);
- return node;
+ ReplaceNode(N, node);
+ return;
}
break;
}
case XCoreISD::LADD: {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
N->getOperand(2) };
- return CurDAG->getMachineNode(XCore::LADD_l5r, dl, MVT::i32, MVT::i32,
- Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(XCore::LADD_l5r, dl, MVT::i32,
+ MVT::i32, Ops));
+ return;
}
case XCoreISD::LSUB: {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
N->getOperand(2) };
- return CurDAG->getMachineNode(XCore::LSUB_l5r, dl, MVT::i32, MVT::i32,
- Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(XCore::LSUB_l5r, dl, MVT::i32,
+ MVT::i32, Ops));
+ return;
}
case XCoreISD::MACCU: {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
N->getOperand(2), N->getOperand(3) };
- return CurDAG->getMachineNode(XCore::MACCU_l4r, dl, MVT::i32, MVT::i32,
- Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(XCore::MACCU_l4r, dl, MVT::i32,
+ MVT::i32, Ops));
+ return;
}
case XCoreISD::MACCS: {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
N->getOperand(2), N->getOperand(3) };
- return CurDAG->getMachineNode(XCore::MACCS_l4r, dl, MVT::i32, MVT::i32,
- Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(XCore::MACCS_l4r, dl, MVT::i32,
+ MVT::i32, Ops));
+ return;
}
case XCoreISD::LMUL: {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
N->getOperand(2), N->getOperand(3) };
- return CurDAG->getMachineNode(XCore::LMUL_l6r, dl, MVT::i32, MVT::i32,
- Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(XCore::LMUL_l6r, dl, MVT::i32,
+ MVT::i32, Ops));
+ return;
}
case XCoreISD::CRC8: {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) };
- return CurDAG->getMachineNode(XCore::CRC8_l4r, dl, MVT::i32, MVT::i32,
- Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(XCore::CRC8_l4r, dl, MVT::i32,
+ MVT::i32, Ops));
+ return;
}
case ISD::BRIND:
- if (SDNode *ResNode = SelectBRIND(N))
- return ResNode;
+ if (tryBRIND(N))
+ return;
break;
// Other cases are autogenerated.
}
- return SelectCode(N);
+ SelectCode(N);
}
/// Given a chain return a new chain where any appearance of Old is replaced
/// by New. There must be at most one instruction between Old and Chain and
-/// this instruction must be a TokenFactor. Returns an empty SDValue if
+/// this instruction must be a TokenFactor. Returns an empty SDValue if
/// these conditions don't hold.
static SDValue
replaceInChain(SelectionDAG *CurDAG, SDValue Chain, SDValue Old, SDValue New)
@@ -229,16 +236,16 @@ replaceInChain(SelectionDAG *CurDAG, SDValue Chain, SDValue Old, SDValue New)
return CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, Ops);
}
-SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) {
+bool XCoreDAGToDAGISel::tryBRIND(SDNode *N) {
SDLoc dl(N);
// (brind (int_xcore_checkevent (addr)))
SDValue Chain = N->getOperand(0);
SDValue Addr = N->getOperand(1);
if (Addr->getOpcode() != ISD::INTRINSIC_W_CHAIN)
- return nullptr;
+ return false;
unsigned IntNo = cast<ConstantSDNode>(Addr->getOperand(1))->getZExtValue();
if (IntNo != Intrinsic::xcore_checkevent)
- return nullptr;
+ return false;
SDValue nextAddr = Addr->getOperand(2);
SDValue CheckEventChainOut(Addr.getNode(), 1);
if (!CheckEventChainOut.use_empty()) {
@@ -250,7 +257,7 @@ SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) {
SDValue NewChain = replaceInChain(CurDAG, Chain, CheckEventChainOut,
CheckEventChainIn);
if (!NewChain.getNode())
- return nullptr;
+ return false;
Chain = NewChain;
}
// Enable events on the thread using setsr 1 and then disable them immediately
@@ -266,8 +273,10 @@ SDNode *XCoreDAGToDAGISel::SelectBRIND(SDNode *N) {
constOne, Glue), 0);
if (nextAddr->getOpcode() == XCoreISD::PCRelativeWrapper &&
nextAddr->getOperand(0)->getOpcode() == ISD::TargetBlockAddress) {
- return CurDAG->SelectNodeTo(N, XCore::BRFU_lu6, MVT::Other,
- nextAddr->getOperand(0), Glue);
+ CurDAG->SelectNodeTo(N, XCore::BRFU_lu6, MVT::Other,
+ nextAddr->getOperand(0), Glue);
+ return true;
}
- return CurDAG->SelectNodeTo(N, XCore::BAU_1r, MVT::Other, nextAddr, Glue);
+ CurDAG->SelectNodeTo(N, XCore::BAU_1r, MVT::Other, nextAddr, Glue);
+ return true;
}
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 105b2cfb1be6..6f6ac3bd5f4c 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -110,8 +110,6 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::TRAP, MVT::Other, Legal);
@@ -156,7 +154,6 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
// Atomic operations
// We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
// As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
- setInsertFencesForAtomic(true);
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
@@ -257,11 +254,11 @@ SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
// FIXME there is no actual debug info here
SDLoc dl(GA);
- if (GV->getType()->getElementType()->isFunctionTy())
+ if (GV->getValueType()->isFunctionTy())
return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
const auto *GVar = dyn_cast<GlobalVariable>(GV);
- if ((GV->hasSection() && StringRef(GV->getSection()).startswith(".cp.")) ||
+ if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
(GVar && GVar->isConstant() && GV->hasLocalLinkage()))
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
@@ -272,7 +269,7 @@ static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small)
return true;
- Type *ObjType = GV->getType()->getPointerElementType();
+ Type *ObjType = GV->getValueType();
if (!ObjType->isSized())
return false;
@@ -309,8 +306,7 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
Type::getInt8Ty(*DAG.getContext()), GA, Idx);
SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
- DAG.getEntryNode(), CP, MachinePointerInfo(), false,
- false, false, 0);
+ DAG.getEntryNode(), CP, MachinePointerInfo());
}
}
@@ -371,17 +367,15 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
ScaledIndex);
}
-SDValue XCoreTargetLowering::
-lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
- int64_t Offset, SelectionDAG &DAG) const
-{
+SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
+ const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
+ SelectionDAG &DAG) const {
auto PtrVT = getPointerTy(DAG.getDataLayout());
if ((Offset & 0x3) == 0) {
- return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo(), false,
- false, false, 0);
+ return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
}
// Lower to pair of consecutive word aligned loads plus some bit shifting.
- int32_t HighOffset = RoundUpToAlignment(Offset, 4);
+ int32_t HighOffset = alignTo(Offset, 4);
int32_t LowOffset = HighOffset - 4;
SDValue LowAddr, HighAddr;
if (GlobalAddressSDNode *GASD =
@@ -399,10 +393,8 @@ lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
- SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo(),
- false, false, false, 0);
- SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo(),
- false, false, false, 0);
+ SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
+ SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
@@ -462,17 +454,16 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
}
if (LD->getAlignment() == 2) {
- SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
- BasePtr, LD->getPointerInfo(), MVT::i16,
- LD->isVolatile(), LD->isNonTemporal(),
- LD->isInvariant(), 2);
+ SDValue Low =
+ DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
+ LD->getPointerInfo(), MVT::i16,
+ /* Alignment = */ 2, LD->getMemOperand()->getFlags());
SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
DAG.getConstant(2, DL, MVT::i32));
- SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
- HighAddr,
- LD->getPointerInfo().getWithOffset(2),
- MVT::i16, LD->isVolatile(),
- LD->isNonTemporal(), LD->isInvariant(), 2);
+ SDValue High =
+ DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
+ LD->getPointerInfo().getWithOffset(2), MVT::i16,
+ /* Alignment = */ 2, LD->getMemOperand()->getFlags());
SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
DAG.getConstant(16, DL, MVT::i32));
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
@@ -496,7 +487,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
CallingConv::C, IntPtrTy,
DAG.getExternalSymbol("__misaligned_load",
getPointerTy(DAG.getDataLayout())),
- std::move(Args), 0);
+ std::move(Args));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
SDValue Ops[] = { CallResult.first, CallResult.second };
@@ -529,16 +520,14 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
SDValue Low = Value;
SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
DAG.getConstant(16, dl, MVT::i32));
- SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
- ST->getPointerInfo(), MVT::i16,
- ST->isVolatile(), ST->isNonTemporal(),
- 2);
+ SDValue StoreLow = DAG.getTruncStore(
+ Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
+ /* Alignment = */ 2, ST->getMemOperand()->getFlags());
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
DAG.getConstant(2, dl, MVT::i32));
- SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
- ST->getPointerInfo().getWithOffset(2),
- MVT::i16, ST->isVolatile(),
- ST->isNonTemporal(), 2);
+ SDValue StoreHigh = DAG.getTruncStore(
+ Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
+ MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
}
@@ -559,7 +548,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
CallingConv::C, Type::getVoidTy(*DAG.getContext()),
DAG.getExternalSymbol("__misaligned_store",
getPointerTy(DAG.getDataLayout())),
- std::move(Args), 0);
+ std::move(Args));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.second;
@@ -725,11 +714,9 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Unknown operand to lower!");
- if (N->getOpcode() == ISD::ADD) {
- SDValue Result = TryExpandADDWithMul(N, DAG);
- if (Result.getNode())
+ if (N->getOpcode() == ISD::ADD)
+ if (SDValue Result = TryExpandADDWithMul(N, DAG))
return Result;
- }
SDLoc dl(N);
@@ -774,19 +761,17 @@ LowerVAARG(SDValue Op, SelectionDAG &DAG) const
EVT PtrVT = VAListPtr.getValueType();
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDLoc dl(Node);
- SDValue VAList = DAG.getLoad(PtrVT, dl, InChain,
- VAListPtr, MachinePointerInfo(SV),
- false, false, false, 0);
+ SDValue VAList =
+ DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
// Increment the pointer, VAList, to the next vararg
SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
dl));
// Store the incremented VAList to the legalized pointer
InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV));
// Load the actual argument out of the pointer VAList
- return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(),
- false, false, false, 0);
+ return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
}
SDValue XCoreTargetLowering::
@@ -799,7 +784,7 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG) const
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo());
}
SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -832,9 +817,9 @@ LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
int FI = XFI->createLRSpillSlot(MF);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
- return DAG.getLoad(
- getPointerTy(DAG.getDataLayout()), SDLoc(Op), DAG.getEntryNode(), FIN,
- MachinePointerInfo::getFixedStack(MF, FI), false, false, false, 0);
+ return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
+ DAG.getEntryNode(), FIN,
+ MachinePointerInfo::getFixedStack(MF, FI));
}
SDValue XCoreTargetLowering::
@@ -915,33 +900,31 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
SDValue Addr = Trmp;
SDLoc dl(Op);
- OutChains[0] = DAG.getStore(Chain, dl,
- DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
- MachinePointerInfo(TrmpAddr), false, false, 0);
+ OutChains[0] =
+ DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(4, dl, MVT::i32));
- OutChains[1] = DAG.getStore(Chain, dl,
- DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
- MachinePointerInfo(TrmpAddr, 4), false, false, 0);
+ OutChains[1] =
+ DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr, 4));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(8, dl, MVT::i32));
- OutChains[2] = DAG.getStore(Chain, dl,
- DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
- MachinePointerInfo(TrmpAddr, 8), false, false, 0);
+ OutChains[2] =
+ DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr, 8));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(12, dl, MVT::i32));
- OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr,
- MachinePointerInfo(TrmpAddr, 12), false, false,
- 0);
+ OutChains[3] =
+ DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(16, dl, MVT::i32));
- OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr,
- MachinePointerInfo(TrmpAddr, 16), false, false,
- 0);
+ OutChains[4] =
+ DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
@@ -973,29 +956,30 @@ SDValue XCoreTargetLowering::
LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
AtomicSDNode *N = cast<AtomicSDNode>(Op);
assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
- assert(N->getOrdering() <= Monotonic &&
- "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+ assert((N->getOrdering() == AtomicOrdering::Unordered ||
+ N->getOrdering() == AtomicOrdering::Monotonic) &&
+ "setInsertFencesForAtomic(true) expects unordered / monotonic");
if (N->getMemoryVT() == MVT::i32) {
if (N->getAlignment() < 4)
report_fatal_error("atomic load must be aligned");
return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
N->getChain(), N->getBasePtr(), N->getPointerInfo(),
- N->isVolatile(), N->isNonTemporal(), N->isInvariant(),
- N->getAlignment(), N->getAAInfo(), N->getRanges());
+ N->getAlignment(), N->getMemOperand()->getFlags(),
+ N->getAAInfo(), N->getRanges());
}
if (N->getMemoryVT() == MVT::i16) {
if (N->getAlignment() < 2)
report_fatal_error("atomic load must be aligned");
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
- N->isVolatile(), N->isNonTemporal(),
- N->isInvariant(), N->getAlignment(), N->getAAInfo());
+ N->getAlignment(), N->getMemOperand()->getFlags(),
+ N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i8)
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
- N->isVolatile(), N->isNonTemporal(),
- N->isInvariant(), N->getAlignment(), N->getAAInfo());
+ N->getAlignment(), N->getMemOperand()->getFlags(),
+ N->getAAInfo());
return SDValue();
}
@@ -1003,29 +987,29 @@ SDValue XCoreTargetLowering::
LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
AtomicSDNode *N = cast<AtomicSDNode>(Op);
assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
- assert(N->getOrdering() <= Monotonic &&
- "setInsertFencesForAtomic(true) and yet greater than Monotonic");
+ assert((N->getOrdering() == AtomicOrdering::Unordered ||
+ N->getOrdering() == AtomicOrdering::Monotonic) &&
+ "setInsertFencesForAtomic(true) expects unordered / monotonic");
if (N->getMemoryVT() == MVT::i32) {
if (N->getAlignment() < 4)
report_fatal_error("atomic store must be aligned");
- return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(),
- N->getBasePtr(), N->getPointerInfo(),
- N->isVolatile(), N->isNonTemporal(),
- N->getAlignment(), N->getAAInfo());
+ return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
+ N->getPointerInfo(), N->getAlignment(),
+ N->getMemOperand()->getFlags(), N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i16) {
if (N->getAlignment() < 2)
report_fatal_error("atomic store must be aligned");
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
- N->isVolatile(), N->isNonTemporal(),
- N->getAlignment(), N->getAAInfo());
+ N->getAlignment(), N->getMemOperand()->getFlags(),
+ N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i8)
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
- N->isVolatile(), N->isNonTemporal(),
- N->getAlignment(), N->getAAInfo());
+ N->getAlignment(), N->getMemOperand()->getFlags(),
+ N->getAAInfo());
return SDValue();
}
@@ -1071,11 +1055,10 @@ XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers / memory locations.
-static SDValue
-LowerCallResult(SDValue Chain, SDValue InFlag,
- const SmallVectorImpl<CCValAssign> &RVLocs,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+static SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
+ const SmallVectorImpl<CCValAssign> &RVLocs,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) {
SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
// Copy results out of physical registers.
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
@@ -1118,15 +1101,12 @@ LowerCallResult(SDValue Chain, SDValue InFlag,
/// regs to (physical regs)/(stack frame), CALLSEQ_START and
/// CALLSEQ_END are emitted.
/// TODO: isTailCall, sret.
-SDValue
-XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue XCoreTargetLowering::LowerCCCCallTo(
+ SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
+ bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -1256,15 +1236,10 @@ namespace {
}
/// XCore formal arguments implementation
-SDValue
-XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
+SDValue XCoreTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
switch (CallConv)
{
default:
@@ -1280,15 +1255,10 @@ XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
/// virtual registers and generate load operations for
/// arguments places on the stack.
/// TODO: sret
-SDValue
-XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- SDLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue XCoreTargetLowering::LowerCCCArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
@@ -1333,7 +1303,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
{
#ifndef NDEBUG
errs() << "LowerFormalArguments Unhandled argument type: "
- << RegVT.getSimpleVT().SimpleTy << "\n";
+ << RegVT.getEVTString() << "\n";
#endif
llvm_unreachable(nullptr);
}
@@ -1362,8 +1332,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(MF, FI), false,
- false, false, 0);
+ MachinePointerInfo::getFixedStack(MF, FI));
}
const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
ArgData.push_back(ADP);
@@ -1395,8 +1364,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
// Move argument from virt reg -> stack
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(), false, false, 0);
+ SDValue Store =
+ DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
MemOps.push_back(Store);
}
} else {
@@ -1463,11 +1432,11 @@ CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
}
SDValue
-XCoreTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const {
+ const SDLoc &dl, SelectionDAG &DAG) const {
XCoreFunctionInfo *XFI =
DAG.getMachineFunction().getInfo<XCoreFunctionInfo>();
@@ -1514,8 +1483,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
MemOpChains.push_back(DAG.getStore(
Chain, dl, OutVals[i], FIN,
- MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false,
- false, 0));
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
}
// Transform all store nodes into one single node because
@@ -1551,11 +1519,11 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
//===----------------------------------------------------------------------===//
MachineBasicBlock *
-XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
- assert((MI->getOpcode() == XCore::SELECT_CC) &&
+ DebugLoc dl = MI.getDebugLoc();
+ assert((MI.getOpcode() == XCore::SELECT_CC) &&
"Unexpected instr type to insert");
// To "insert" a SELECT_CC instruction, we actually have to insert the diamond
@@ -1588,7 +1556,8 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BB->addSuccessor(sinkMBB);
BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
- .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(sinkMBB);
// copy0MBB:
// %FalseValue = ...
@@ -1602,12 +1571,13 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(*BB, BB->begin(), dl,
- TII.get(XCore::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
+ BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(3).getReg())
+ .addMBB(copy0MBB)
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(thisMBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index b6f09ff418b5..41813bbb8156 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -119,8 +119,8 @@ namespace llvm {
const char *getTargetNodeName(unsigned Opcode) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const override;
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
Type *Ty, unsigned AS) const override;
@@ -144,11 +144,10 @@ namespace llvm {
const XCoreSubtarget &Subtarget;
// Lower Operand helpers
- SDValue LowerCCCArguments(SDValue Chain,
- CallingConv::ID CallConv,
+ SDValue LowerCCCArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
@@ -156,13 +155,14 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
SDValue getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
SelectionDAG &DAG) const;
- SDValue lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain,
- SDValue Base, int64_t Offset,
+ SDValue lowerLoadWordFromAlignedBasePlusOffset(const SDLoc &DL,
+ SDValue Chain, SDValue Base,
+ int64_t Offset,
SelectionDAG &DAG) const;
// Lower Operand specifics
@@ -206,29 +206,28 @@ namespace llvm {
unsigned Depth = 0) const override;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
- SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const override;
bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
LLVMContext &Context) const override;
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return true;
+ }
};
}
diff --git a/lib/Target/XCore/XCoreInstrInfo.cpp b/lib/Target/XCore/XCoreInstrInfo.cpp
index e4129aee9479..e0b3e7153da9 100644
--- a/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -60,17 +60,16 @@ static bool isZeroImm(const MachineOperand &op) {
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
-unsigned
-XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const{
- int Opcode = MI->getOpcode();
+unsigned XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ int Opcode = MI.getOpcode();
if (Opcode == XCore::LDWFI)
{
- if ((MI->getOperand(1).isFI()) && // is a stack slot
- (MI->getOperand(2).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(2))))
- {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if ((MI.getOperand(1).isFI()) && // is a stack slot
+ (MI.getOperand(2).isImm()) && // the imm is zero
+ (isZeroImm(MI.getOperand(2)))) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
}
return 0;
@@ -81,18 +80,16 @@ XCoreInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) con
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
-unsigned
-XCoreInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- int Opcode = MI->getOpcode();
+unsigned XCoreInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ int Opcode = MI.getOpcode();
if (Opcode == XCore::STWFI)
{
- if ((MI->getOperand(1).isFI()) && // is a stack slot
- (MI->getOperand(2).isImm()) && // the imm is zero
- (isZeroImm(MI->getOperand(2))))
- {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
+ if ((MI.getOperand(1).isFI()) && // is a stack slot
+ (MI.getOperand(2).isImm()) && // the imm is zero
+ (isZeroImm(MI.getOperand(2)))) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
}
}
return 0;
@@ -190,24 +187,24 @@ static inline XCore::CondCode GetOppositeBranchCondition(XCore::CondCode CC)
/// Note that RemoveBranch and InsertBranch must be implemented to support
/// cases where this method returns success.
///
-bool
-XCoreInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
+bool XCoreInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I == MBB.end())
return false;
- if (!isUnpredicatedTerminator(I))
+ if (!isUnpredicatedTerminator(*I))
return false;
// Get the last instruction in the block.
MachineInstr *LastInst = I;
// If there is only one terminator instruction, process it.
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
if (IsBRU(LastInst->getOpcode())) {
TBB = LastInst->getOperand(0).getMBB();
return false;
@@ -230,8 +227,7 @@ XCoreInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineInstr *SecondLastInst = I;
// If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() &&
- isUnpredicatedTerminator(--I))
+ if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
return true;
unsigned SecondLastOpc = SecondLastInst->getOpcode();
@@ -273,11 +269,11 @@ XCoreInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
return true;
}
-unsigned
-XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- ArrayRef<MachineOperand> Cond,
- DebugLoc DL)const{
+unsigned XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -330,9 +326,9 @@ XCoreInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
}
void XCoreInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
bool GRDest = XCore::GRRegsRegClass.contains(DestReg);
bool GRSrc = XCore::GRRegsRegClass.contains(SrcReg);
diff --git a/lib/Target/XCore/XCoreInstrInfo.h b/lib/Target/XCore/XCoreInstrInfo.h
index b958c361f5a2..783bc6bab5d5 100644
--- a/lib/Target/XCore/XCoreInstrInfo.h
+++ b/lib/Target/XCore/XCoreInstrInfo.h
@@ -39,7 +39,7 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
/// isStoreToStackSlot - If the specified machine instruction is a direct
@@ -47,23 +47,22 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- unsigned isStoreToStackSlot(const MachineInstr *MI,
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
- bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const override;
+ const DebugLoc &DL) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
- void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
diff --git a/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index f0b720151b17..5cc51cd7a992 100644
--- a/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -76,7 +76,7 @@ createLoweredInitializer(ArrayType *NewType, Constant *OriginalInitializer) {
static Instruction *
createReplacementInstr(ConstantExpr *CE, Instruction *Instr) {
- IRBuilder<true,NoFolder> Builder(Instr);
+ IRBuilder<NoFolder> Builder(Instr);
unsigned OpCode = CE->getOpcode();
switch (OpCode) {
case Instruction::GetElementPtr: {
@@ -179,7 +179,6 @@ static bool isZeroLengthArray(Type *Ty) {
bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
Module *M = GV->getParent();
- LLVMContext &Ctx = M->getContext();
if (!GV->isThreadLocal())
return false;
@@ -189,7 +188,7 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
return false;
// Create replacement global.
- ArrayType *NewType = createLoweredType(GV->getType()->getElementType());
+ ArrayType *NewType = createLoweredType(GV->getValueType());
Constant *NewInitializer = nullptr;
if (GV->hasInitializer())
NewInitializer = createLoweredInitializer(NewType,
@@ -210,11 +209,8 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
Function *GetID = Intrinsic::getDeclaration(GV->getParent(),
Intrinsic::xcore_getid);
Value *ThreadID = Builder.CreateCall(GetID, {});
- SmallVector<Value *, 2> Indices;
- Indices.push_back(Constant::getNullValue(Type::getInt64Ty(Ctx)));
- Indices.push_back(ThreadID);
- Value *Addr =
- Builder.CreateInBoundsGEP(NewGV->getValueType(), NewGV, Indices);
+ Value *Addr = Builder.CreateInBoundsGEP(NewGV->getValueType(), NewGV,
+ {Builder.getInt64(0), ThreadID});
U->replaceUsesOfWith(GV, Addr);
}
diff --git a/lib/Target/XCore/XCoreSelectionDAGInfo.cpp b/lib/Target/XCore/XCoreSelectionDAGInfo.cpp
index 40568d124de0..61fbf0dc3d2c 100644
--- a/lib/Target/XCore/XCoreSelectionDAGInfo.cpp
+++ b/lib/Target/XCore/XCoreSelectionDAGInfo.cpp
@@ -16,13 +16,10 @@ using namespace llvm;
#define DEBUG_TYPE "xcore-selectiondag-info"
-SDValue XCoreSelectionDAGInfo::
-EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
- SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
- bool isVolatile, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const
-{
+SDValue XCoreSelectionDAGInfo::EmitTargetCodeForMemcpy(
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
unsigned SizeBitWidth = Size.getValueType().getSizeInBits();
// Call __memcpy_4 if the src, dst and size are all 4 byte aligned.
if (!AlwaysInline && (Align & 3) == 0 &&
@@ -42,7 +39,7 @@ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
Type::getVoidTy(*DAG.getContext()),
DAG.getExternalSymbol("__memcpy_4",
TLI.getPointerTy(DAG.getDataLayout())),
- std::move(Args), 0)
+ std::move(Args))
.setDiscardResult();
std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
diff --git a/lib/Target/XCore/XCoreSelectionDAGInfo.h b/lib/Target/XCore/XCoreSelectionDAGInfo.h
index 77b3527d77e3..7cd0d8216e91 100644
--- a/lib/Target/XCore/XCoreSelectionDAGInfo.h
+++ b/lib/Target/XCore/XCoreSelectionDAGInfo.h
@@ -7,30 +7,27 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the XCore subclass for TargetSelectionDAGInfo.
+// This file defines the XCore subclass for SelectionDAGTargetInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_XCORE_XCORESELECTIONDAGINFO_H
#define LLVM_LIB_TARGET_XCORE_XCORESELECTIONDAGINFO_H
-#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
namespace llvm {
class XCoreTargetMachine;
-class XCoreSelectionDAGInfo : public TargetSelectionDAGInfo {
+class XCoreSelectionDAGInfo : public SelectionDAGTargetInfo {
public:
-
- SDValue
- EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
- SDValue Chain,
- SDValue Op1, SDValue Op2,
- SDValue Op3, unsigned Align, bool isVolatile,
- bool AlwaysInline,
- MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const override;
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
+ SDValue Chain, SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const override;
};
}
diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp
index 4a79dac0bed9..c3eab802f815 100644
--- a/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -15,21 +15,29 @@
#include "XCoreTargetTransformInfo.h"
#include "XCore.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
-/// XCoreTargetMachine ctor - Create an ILP32 architecture model
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::Static;
+ return *RM;
+}
+
+/// Create an ILP32 architecture model
///
XCoreTargetMachine::XCoreTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM,
+ CodeModel::Model CM,
CodeGenOpt::Level OL)
: LLVMTargetMachine(
T, "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:32-f64:32-a:0:32-n32",
- TT, CPU, FS, Options, RM, CM, OL),
+ TT, CPU, FS, Options, getEffectiveRelocModel(RM), CM, OL),
TLOF(make_unique<XCoreTargetObjectFile>()),
Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
diff --git a/lib/Target/XCore/XCoreTargetMachine.h b/lib/Target/XCore/XCoreTargetMachine.h
index a8addfc3e429..4bd25bc8776c 100644
--- a/lib/Target/XCore/XCoreTargetMachine.h
+++ b/lib/Target/XCore/XCoreTargetMachine.h
@@ -25,7 +25,7 @@ class XCoreTargetMachine : public LLVMTargetMachine {
public:
XCoreTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
+ Optional<Reloc::Model> RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
~XCoreTargetMachine() override;
diff --git a/lib/Target/XCore/XCoreTargetObjectFile.cpp b/lib/Target/XCore/XCoreTargetObjectFile.cpp
index aa16ecc148db..abe1deddc56e 100644
--- a/lib/Target/XCore/XCoreTargetObjectFile.cpp
+++ b/lib/Target/XCore/XCoreTargetObjectFile.cpp
@@ -122,7 +122,7 @@ XCoreTargetObjectFile::SelectSectionForGlobal(const GlobalValue *GV,
if (Kind.isMergeableConst8()) return MergeableConst8Section;
if (Kind.isMergeableConst16()) return MergeableConst16Section;
}
- Type *ObjType = GV->getType()->getPointerElementType();
+ Type *ObjType = GV->getValueType();
auto &DL = GV->getParent()->getDataLayout();
if (TM.getCodeModel() == CodeModel::Small || !ObjType->isSized() ||
DL.getTypeAllocSize(ObjType) < CodeModelLargeSize) {
@@ -145,8 +145,10 @@ XCoreTargetObjectFile::SelectSectionForGlobal(const GlobalValue *GV,
report_fatal_error("Target does not support TLS or Common sections");
}
-MCSection *XCoreTargetObjectFile::getSectionForConstant(
- const DataLayout &DL, SectionKind Kind, const Constant *C) const {
+MCSection *XCoreTargetObjectFile::getSectionForConstant(const DataLayout &DL,
+ SectionKind Kind,
+ const Constant *C,
+ unsigned &Align) const {
if (Kind.isMergeableConst4()) return MergeableConst4Section;
if (Kind.isMergeableConst8()) return MergeableConst8Section;
if (Kind.isMergeableConst16()) return MergeableConst16Section;
diff --git a/lib/Target/XCore/XCoreTargetObjectFile.h b/lib/Target/XCore/XCoreTargetObjectFile.h
index 6701c661a73e..c129d757dea3 100644
--- a/lib/Target/XCore/XCoreTargetObjectFile.h
+++ b/lib/Target/XCore/XCoreTargetObjectFile.h
@@ -34,7 +34,8 @@ static const unsigned CodeModelLargeSize = 256;
const TargetMachine &TM) const override;
MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
- const Constant *C) const override;
+ const Constant *C,
+ unsigned &Align) const override;
};
} // end namespace llvm